.classpath | 9 .gitignore | 2 etc/agent-versioning/build.xml | 2 etc/dev-utils/TestLdapSettings.java | 640 etc/rhq-ircBot/src/main/java/org/rhq/etc/ircbot/RhqIrcBotListener.java | 5 etc/rhq-ircBot/src/main/resources/org/rhq/etc/ircbot/cacerts.jks |binary etc/samples/provisioning/sample-cli/resources/sample-bundle-v1.zip |binary etc/samples/provisioning/sample-cli/resources/sample-bundle-v2.zip |binary etc/scripts/rhq48-storage-patch/apache-cassandra-1.2.4-patch-1.jar |binary etc/scripts/rhq48-storage-patch/disable_compression.cql | 31 etc/scripts/rhq48-storage-patch/rhq48-storage-patch.bat | 104 etc/scripts/rhq48-storage-patch/rhq48-storage-patch.sh | 103 modules/common/ant-bundle/src/main/java/org/rhq/bundle/ant/AntLauncher.java | 39 modules/common/ant-bundle/src/main/java/org/rhq/bundle/ant/BundleAntProject.java | 12 modules/common/ant-bundle/src/main/java/org/rhq/bundle/ant/type/DeploymentUnitType.java | 68 modules/common/ant-bundle/src/test/java/org/rhq/bundle/ant/AntLauncherTest.java | 194 modules/common/ant-bundle/src/test/resources/legacy-test-bundle-audit.xml | 39 modules/common/ant-bundle/src/test/resources/legacy-test-bundle-compressed-archives-with-replace.xml | 21 modules/common/ant-bundle/src/test/resources/legacy-test-bundle-compressed-archives.xml | 14 modules/common/ant-bundle/src/test/resources/legacy-test-bundle-subdir.xml | 26 modules/common/ant-bundle/src/test/resources/legacy-test-bundle-url.xml | 26 modules/common/ant-bundle/src/test/resources/legacy-test-bundle-v1.xml | 45 modules/common/ant-bundle/src/test/resources/legacy-test-bundle-v2-noManageRootDir.xml | 46 modules/common/ant-bundle/src/test/resources/legacy-test-bundle-v2.xml | 45 modules/common/ant-bundle/src/test/resources/test-bundle-audit.xml | 23 modules/common/ant-bundle/src/test/resources/test-bundle-compressed-archives-with-replace.xml | 23 modules/common/ant-bundle/src/test/resources/test-bundle-compressed-archives.xml | 23 modules/common/ant-bundle/src/test/resources/test-bundle-no-manage-root-dir-nor-compliance.xml | 64 modules/common/ant-bundle/src/test/resources/test-bundle-subdir.xml | 23 modules/common/ant-bundle/src/test/resources/test-bundle-url.xml | 23 modules/common/ant-bundle/src/test/resources/test-bundle-v1.xml | 23 modules/common/ant-bundle/src/test/resources/test-bundle-v2-filesAndDirectories.xml | 65 modules/common/ant-bundle/src/test/resources/test-bundle-v2-noManageRootDir.xml | 46 modules/common/ant-bundle/src/test/resources/test-bundle-v2.xml | 23 modules/common/ant-bundle/src/test/resources/test-bundle-with-manage-root-dir.xml | 64 modules/common/cassandra-auth/pom.xml | 27 modules/common/cassandra-auth/src/main/java/org/rhq/cassandra/auth/RhqInternodeAuthenticator.java | 78 modules/common/cassandra-auth/src/main/java/org/rhq/cassandra/auth/RhqInternodeAuthenticatorMBean.java | 10 modules/common/cassandra-ccm/cassandra-ccm-arquillian/src/main/java/org/rhq/cassandra/ccm/arquillian/CCMSuiteDeploymentExtension.java | 80 modules/common/cassandra-ccm/cassandra-ccm-core/pom.xml | 102 modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/bin/cassandra | 192 modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/cassandra-jvm.properties | 112 modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/rhq.cassandra-env.sh | 247 modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/rhq.cassandra.yaml | 4 modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/deploy.xml | 221 modules/common/cassandra-ccm/cassandra-ccm-core/src/main/cassandra/cli/dbsetup.script | 40 modules/common/cassandra-ccm/cassandra-ccm-core/src/main/cassandra/cql/create_keyspace.cql | 1 modules/common/cassandra-ccm/cassandra-ccm-core/src/main/cassandra/cql/dbsetup.cql | 42 modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/CassandraClusterManager.java | 100 modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/ClusterInitService.java | 117 modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/Deployer.java | 117 modules/common/cassandra-ccm/cassandra-ccm-core/src/test/java/org/rhq/cassandra/DeployerITest.java | 4 modules/common/cassandra-ccm/cassandra-ccm-core/src/test/resources/expected.cassandra-env.sh | 247 modules/common/cassandra-ccm/cassandra-ccm-core/src/test/resources/expected.cassandra-jvm.properties | 112 modules/common/cassandra-ccm/cassandra-ccm-core/src/test/resources/expected.cassandra.yaml | 4 modules/common/cassandra-ccm/cassandra-ccm-maven-plugin/src/main/java/org/rhq/cassandra/ccm/maven/DeployMojo.java | 6 modules/common/cassandra-ccm/cassandra-ccm-testng/src/main/java/org/rhq/cassandra/CCMTestNGListener.java | 17 modules/common/cassandra-installer/pom.xml | 5 modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java | 150 modules/common/cassandra-installer/src/main/resources/module/main/module.xml | 1 modules/common/cassandra-installer/src/test/java/org/rhq/storage/installer/StorageInstallerTest.java | 204 modules/common/cassandra-installer/src/test/resources/log4j.properties | 41 modules/common/cassandra-installer/src/test/resources/rhq48/storage/conf/cassandra-env.sh | 247 modules/common/cassandra-installer/src/test/resources/rhq48/storage/conf/cassandra.yaml | 690 modules/common/cassandra-installer/src/test/resources/rhq48/storage/conf/log4j-server.properties | 45 modules/common/cassandra-jmx/pom.xml | 28 modules/common/cassandra-jmx/src/main/java/org/rhq/cassandra/installer/RMIContextFactory.java | 51 modules/common/cassandra-schema/pom.xml | 28 modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/AbstractManager.java | 406 modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/SchemaManager.java | 111 modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/TopologyManager.java | 149 modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/UpdateFile.java | 235 modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/UpdateFolder.java | 170 modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/VersionManager.java | 265 modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/exception/InstalledSchemaTooAdvancedException.java | 39 modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/exception/InstalledSchemaTooOldException.java | 40 modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/exception/SchemaNotInstalledException.java | 38 modules/common/cassandra-schema/src/main/resources/management/0001.xml | 25 modules/common/cassandra-schema/src/main/resources/schema/create/0001.xml | 17 modules/common/cassandra-schema/src/main/resources/schema/create/0002.xml | 9 modules/common/cassandra-schema/src/main/resources/schema/drop/0001.xml | 5 modules/common/cassandra-schema/src/main/resources/schema/drop/0002.xml | 5 modules/common/cassandra-schema/src/main/resources/schema/drop/0003.xml | 5 modules/common/cassandra-schema/src/main/resources/topology/0001.xml | 5 modules/common/cassandra-schema/src/main/resources/topology/0002.xml | 13 modules/common/cassandra-schema/src/test/java/org/rhq/cassandra/schema/UpdateFileTest.java | 170 modules/common/cassandra-schema/src/test/resources/bad_file_1.xml | 9 modules/common/cassandra-schema/src/test/resources/bad_file_2.xml | 9 modules/common/cassandra-schema/src/test/resources/bad_file_3.xml | 9 modules/common/cassandra-schema/src/test/resources/no_binding.xml | 9 modules/common/cassandra-schema/src/test/resources/no_binding_extra_tags.xml | 12 modules/common/cassandra-schema/src/test/resources/no_binding_named_steps.xml | 9 modules/common/cassandra-schema/src/test/resources/required_binding.xml | 9 modules/common/cassandra-schema/src/test/resources/required_binding_named_steps.xml | 9 modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ClusterBuilder.java | 7 modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java | 164 modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditorException.java | 21 modules/common/cassandra-util/src/test/java/org/rhq/cassandra/util/ConfigEditorTest.java | 159 modules/common/cassandra-util/src/test/resources/cassandra.yaml | 690 modules/common/drift/pom.xml | 22 modules/common/filetemplate-bundle/pom.xml | 6 modules/common/filetemplate-bundle/src/main/java/org/rhq/bundle/filetemplate/recipe/BundleRecipeCommand.java | 4 modules/common/jboss-as/pom.xml | 7 modules/common/pom.xml | 8 modules/core/client-api/src/main/java/org/rhq/core/clientapi/agent/metadata/ConfigurationMetadataParser.java | 22 modules/core/client-api/src/main/java/org/rhq/core/clientapi/agent/metadata/PluginMetadataParser.java | 44 modules/core/client-api/src/main/resources/rhq-configuration.xsd | 18 modules/core/client-api/src/test/java/org/rhq/core/clientapi/agent/metadata/test/ExtensionModelTest.java | 2 modules/core/client-api/src/test/java/org/rhq/core/clientapi/agent/metadata/test/NestedSubCategoriesMetadataParserTest.java | 159 modules/core/client-api/src/test/resources/test-hibernate.xml | 8 modules/core/client-api/src/test/resources/test-jbossas.xml | 10 modules/core/client-api/src/test/resources/test-subcategories-nested.xml | 60 modules/core/dbutils/pom.xml | 21 modules/core/dbutils/src/main/java/org/rhq/core/db/upgrade/StorageNodeAddressUpgradeTask.java | 54 modules/core/dbutils/src/main/scripts/dbsetup/authz-data.xml | 22 modules/core/dbutils/src/main/scripts/dbsetup/config-schema.xml | 1 modules/core/dbutils/src/main/scripts/dbsetup/content-schema.xml | 36 modules/core/dbutils/src/main/scripts/dbsetup/measurement-schema.xml | 5 modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml | 259 modules/core/domain/intentional-api-changes-since-4.8.0.xml | 20 modules/core/domain/src/main/java/org/rhq/core/domain/alert/Alert.java | 3 modules/core/domain/src/main/java/org/rhq/core/domain/alert/AlertDampening.java | 3 modules/core/domain/src/main/java/org/rhq/core/domain/alert/AlertDefinition.java | 4 modules/core/domain/src/main/java/org/rhq/core/domain/auth/Subject.java | 36 modules/core/domain/src/main/java/org/rhq/core/domain/authz/Permission.java | 86 modules/core/domain/src/main/java/org/rhq/core/domain/authz/Role.java | 61 modules/core/domain/src/main/java/org/rhq/core/domain/bundle/Bundle.java | 38 modules/core/domain/src/main/java/org/rhq/core/domain/bundle/BundleGroup.java | 245 modules/core/domain/src/main/java/org/rhq/core/domain/cloud/Server.java | 57 modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageClusterSettings.java | 56 modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java | 110 modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNodeConfigurationComposite.java | 179 modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNodeLoadComposite.java | 109 modules/core/domain/src/main/java/org/rhq/core/domain/common/composite/SystemSetting.java | 50 modules/core/domain/src/main/java/org/rhq/core/domain/configuration/Configuration.java | 247 modules/core/domain/src/main/java/org/rhq/core/domain/configuration/definition/PropertyOptionsSource.java | 69 modules/core/domain/src/main/java/org/rhq/core/domain/criteria/AlertDefinitionCriteria.java | 9 modules/core/domain/src/main/java/org/rhq/core/domain/criteria/BundleCriteria.java | 27 modules/core/domain/src/main/java/org/rhq/core/domain/criteria/BundleGroupCriteria.java | 102 modules/core/domain/src/main/java/org/rhq/core/domain/criteria/BundleResourceDeploymentCriteria.java | 9 modules/core/domain/src/main/java/org/rhq/core/domain/criteria/BundleVersionCriteria.java | 10 modules/core/domain/src/main/java/org/rhq/core/domain/criteria/RoleCriteria.java | 7 modules/core/domain/src/main/java/org/rhq/core/domain/measurement/MeasurementBaseline.java | 16 modules/core/domain/src/main/java/org/rhq/core/domain/measurement/MeasurementUnits.java | 13 modules/core/domain/src/main/java/org/rhq/core/domain/resource/Resource.java | 18 modules/core/domain/src/main/java/org/rhq/core/domain/util/ResourceUtility.java | 32 modules/core/domain/src/test/java/org/rhq/core/domain/cloud/StorageNodeTest.java | 111 modules/core/domain/src/test/java/org/rhq/core/domain/configuration/ConfigurationBuilderTest.java | 237 modules/core/domain/src/test/java/org/rhq/core/domain/util/ResourceUtilityTest.java | 84 modules/core/native-system/src/main/java/org/rhq/core/system/SigarAccess.java | 44 modules/core/native-system/src/main/java/org/rhq/core/system/SigarAccessHandler.java | 201 modules/core/native-system/src/test/java/org/rhq/core/system/SigarAccessHandlerTest.java | 172 modules/core/plugin-api/src/main/java/org/rhq/core/pluginapi/inventory/DiscoveredResourceDetails.java | 23 modules/core/plugin-api/src/main/java/org/rhq/core/pluginapi/inventory/ResourceContext.java | 2 modules/core/plugin-container/src/main/java/org/rhq/core/pc/inventory/InventoryManager.java | 43 modules/core/plugin-container/src/main/java/org/rhq/core/pc/inventory/ResourceContainer.java | 15 modules/core/util/src/main/java/org/rhq/core/util/updater/Deployer.java | 16 modules/core/util/src/main/java/org/rhq/core/util/updater/DeploymentData.java | 78 modules/core/util/src/main/java/org/rhq/core/util/updater/DeploymentProperties.java | 105 modules/core/util/src/main/java/org/rhq/core/util/updater/DestinationComplianceMode.java | 87 modules/core/util/src/test/java/org/rhq/core/util/updater/DeploymentPropertiesTest.java | 17 modules/core/util/src/test/java/org/rhq/core/util/updater/DeploymentsMetadataTest.java | 1 modules/core/util/src/test/java/org/rhq/core/util/updater/ManageRootDirTest.java | 4 modules/enterprise/gui/coregui/.externalToolBuilders/org.eclipse.wst.jsdt.core.javascriptValidator.launch | 7 modules/enterprise/gui/coregui/pom.xml | 13 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/GraphMarker.java | 32 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/IconEnum.java | 6 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/ImageManager.java | 5 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/LoginView.java | 2 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/UserSessionManager.java | 2 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/AdministrationView.java | 6 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/PermissionsEditor.java | 139 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RoleBundleGroupSelector.java | 40 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RoleEditView.java | 34 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RoleLdapGroupSelector.java | 365 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RolesDataSource.java | 17 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/ClusterConfigurationEditor.java | 238 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java | 466 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAlertHistoryView.java | 251 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeConfigurationEditor.java | 305 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java | 248 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasourceField.java | 12 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDetailView.java | 345 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeLoadComponent.java | 129 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeTableView.java | 408 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/topology/ServerTableView.java | 8 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/alert/AlertDataSource.java | 22 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/alert/AlertDetailsView.java | 42 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/alert/AlertHistoryView.java | 20 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/alert/definitions/ConditionEditor.java | 38 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/alert/definitions/GeneralPropertiesAlertDefinitionForm.java | 4 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/bundle/deployment/BundleDeploymentDataSource.java | 3 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/bundle/group/BundleGroupSelector.java | 83 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/bundle/group/BundleGroupsDataSource.java | 126 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/bundle/list/BundleView.java | 4 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/bundle/version/BundleVersionView.java | 4 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/configuration/ConfigurationEditor.java | 31 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/form/NumberWithUnitsValidator.java | 6 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/form/SimpleEditableFormItem.java | 2 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/form/ValueWithUnitsItem.java | 138 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/measurement/AbstractMeasurementRangeEditor.java | 18 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/selector/AbstractSelector.java | 4 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/table/AbstractTableSection.java | 21 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/table/Table.java | 40 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/trigger/JobTriggerEditor.java | 4 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/groups/FavoriteGroupsPortlet.java | 2 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/groups/GroupBundleDeploymentsPortlet.java | 2 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/groups/GroupConfigurationUpdatesPortlet.java | 2 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/groups/GroupEventsPortlet.java | 2 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/groups/GroupMetricsPortlet.java | 7 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/groups/GroupOobsPortlet.java | 2 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/groups/GroupOperationsPortlet.java | 2 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/groups/GroupPkgHistoryPortlet.java | 2 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/inventory/groups/graph/ResourceGroupD3GraphPortlet.java | 2 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/inventory/queue/AutodiscoveryPortlet.java | 2 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/inventory/resource/FavoriteResourcesPortlet.java | 2 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/inventory/resource/graph/ResourceD3GraphPortlet.java | 2 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/platform/PlatformSummaryPortlet.java | 2 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/recent/alerts/RecentAlertsPortlet.java | 2 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/recent/operations/OperationHistoryPortlet.java | 2 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/recent/operations/OperationSchedulePortlet.java | 12 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/recent/problems/ProblemResourcesPortlet.java | 2 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/resource/ResourceAlertsPortlet.java | 2 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/resource/ResourceBundleDeploymentsPortlet.java | 2 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/resource/ResourceConfigurationUpdatesPortlet.java | 2 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/resource/ResourceEventsPortlet.java | 2 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/resource/ResourceMetricsPortlet.java | 4 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/resource/ResourceOobsPortlet.java | 2 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/resource/ResourcePkgHistoryPortlet.java | 2 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/summary/InventorySummaryPortlet.java | 2 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/summary/TagCloudPortlet.java | 2 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/util/MashupPortlet.java | 2 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/util/MessagePortlet.java | 2 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/BundleGWTService.java | 11 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/ConfigurationGWTService.java | 23 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/LdapGWTService.java | 5 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/ResourceGWTService.java | 12 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/StorageGWTService.java | 31 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/TopologyGWTService.java | 2 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/AbstractD3GraphListView.java | 14 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/detail/operation/schedule/AbstractOperationScheduleDetailsView.java | 11 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/detail/summary/AbstractActivityView.java | 28 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/AbstractMetricGraph.java | 6 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/ButtonBarDateTimeRangeEditor.java | 8 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/MetricGraphData.java | 30 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/RedrawGraphs.java | 29 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/Refreshable.java | 30 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityLineGraphType.java | 308 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityOverUnderGraphType.java | 97 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilitySummaryPieGraphType.java | 182 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/LineMetricGraph.java | 158 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/MetricNvd3MultiLineGraph.java | 92 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/StackedBarMetricGraphImpl.java | 46 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/definitions/GroupDefinitionDataSource.java | 8 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/definitions/SingleGroupDefinitionView.java | 17 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/D3GroupGraphListView.java | 126 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/inventory/ResourceGroupResourceSelector.java | 67 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3GraphListView.java | 240 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3MultiLineGraph.java | 324 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupMultiLineGraphListView.java | 2 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/GroupMeasurementTableView.java | 60 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/GroupMetricsTableDataSource.java | 5 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/MetricsTableDataSource.java | 333 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/DashboardLinkUtility.java | 258 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/OverviewForm.java | 4 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/ResourceDetailView.java | 38 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/ResourceErrorsView.java | 4 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/ResourceTitleBar.java | 129 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/ResourceTreeView.java | 204 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/configuration/ConfigurationFilter.java | 32 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/configuration/ResourceConfigurationEditView.java | 11 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/D3GraphListView.java | 322 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/MetricD3Graph.java | 32 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/avail/AvailabilityD3GraphView.java | 33 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/avail/ResourceAvailabilityView.java | 399 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/AddToDashboardComponent.java | 184 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MeasurementTableView.java | 237 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsResourceView.java | 167 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableDataSource.java | 166 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableView.java | 393 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsViewDataSource.java | 307 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/ResourceMetricAvailabilityView.java | 222 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/factory/ResourceFactoryConfigurationStep.java | 8 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/factory/ResourceFactoryInfoStep.java | 16 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/selection/ResourceSelector.java | 8 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/searchbar/BasicSearchStrategy.java | 15 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/util/enhanced/Enhanced.java | 2 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/BundleGWTServiceImpl.java | 42 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/ConfigurationGWTServiceImpl.java | 25 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/LdapGWTServiceImpl.java | 22 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/ResourceGWTServiceImpl.java | 29 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/StorageGWTServiceImpl.java | 130 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/TopologyGWTServiceImpl.java | 6 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/listener/CoreGuiServletContextListener.java | 6 modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/CoreGUI.gwt.xml | 1 modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties | 87 modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties | 91 modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties | 97 modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties | 82 modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties | 92 modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties | 86 modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties | 77 modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties | 106 modules/enterprise/gui/coregui/src/main/webapp/CoreGUI.css | 4 modules/enterprise/gui/coregui/src/main/webapp/CoreGUI.html | 23 modules/enterprise/gui/coregui/src/main/webapp/WEB-INF/web.xml | 12 modules/enterprise/gui/coregui/src/main/webapp/css/charts.css | 36 modules/enterprise/gui/coregui/src/main/webapp/images/subsystems/bundle/BundleGroup_16.png |binary modules/enterprise/gui/coregui/src/main/webapp/images/subsystems/bundle/BundleGroup_24.png |binary modules/enterprise/gui/coregui/src/main/webapp/js/jquery-1.9.1.js | 9597 ------- modules/enterprise/gui/coregui/src/main/webapp/js/jquery-1.9.1.min.js | 5 modules/enterprise/gui/coregui/src/main/webapp/js/nv.d3.js |13048 ---------- modules/enterprise/gui/coregui/src/main/webapp/js/nv.d3.min.js | 6 modules/enterprise/gui/coregui/src/main/webapp/js/rhq.js | 240 modules/enterprise/gui/portal-war/src/main/java/org/rhq/enterprise/gui/ha/ListServersUIBean.java | 20 modules/enterprise/gui/rest-war/src/main/java/org/rhq/enterprise/rest/JsonPFilter.java | 56 modules/enterprise/remoting/cli/src/etc/generate-jaxb-client-types.bat | 14 modules/enterprise/remoting/cli/src/etc/generate-jaxb-client-types.sh | 14 modules/enterprise/server/appserver/pom.xml | 11 modules/enterprise/server/appserver/src/main/bin-resources/bin/wrapper/rhq-storage-wrapper.conf | 36 modules/enterprise/server/appserver/src/main/bin-resources/bin/wrapper/rhq-storage-wrapper.env | 24 modules/enterprise/server/appserver/src/main/dev-resources/bin/wrapper/rhq-storage-wrapper.inc | 6 modules/enterprise/server/appserver/src/main/resources/etc/RHQ-mib.txt | 94 modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml | 21 modules/enterprise/server/appserver/src/main/scripts/storage_setup.groovy | 5 modules/enterprise/server/client-api/src/test/java/org/rhq/enterprise/client/test/LocalClientTest.java | 22 modules/enterprise/server/ear/pom.xml | 4 modules/enterprise/server/ear/src/main/application/META-INF/jboss-deployment-structure.xml | 2 modules/enterprise/server/installer/src/main/java/org/rhq/enterprise/server/installer/InstallerServiceImpl.java | 30 modules/enterprise/server/installer/src/main/java/org/rhq/enterprise/server/installer/ServerInstallUtil.java | 85 modules/enterprise/server/itests-2/pom.xml | 14 modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/client/security/test/JndiAccessTest.java | 86 modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/alert/test/AlertConditionTest.java | 47 modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/bundle/BundleManagerBeanTest.java | 839 modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/bundle/TestBundlePluginComponent.java | 84 modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/bundle/TestBundleServerPluginService.java | 50 modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBeanTest.java | 94 modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/configuration/ConfigurationManagerBeanTest.java | 8 modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/discovery/DiscoveryBossBeanTest.java | 21 modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/measurement/MeasurementDataManagerBeanTest.java | 205 modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/measurement/test/MeasurementBaselineManagerTest.java | 6 modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/plugins/ant/RecipeValidationTest.java | 292 modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/resource/metadata/MetadataBeanTest.java | 3 modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/resource/metadata/ResourceMetadataManagerBeanTest.java | 18 modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/resource/metadata/test/UpdatePluginMetadataTestBase.java | 41 modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/resource/test/GroupMemberCandidateResourcesTest.java | 190 modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/resource/test/ResourceManagerBeanTest.java | 181 modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/storage/FakeStorageClusterSettingsManagerBean.java | 27 modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/tagging/TagManagerBeanTest.java | 5 modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/AbstractEJB3Test.java | 81 modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/StrippedDownStartupBean.java | 50 modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/StrippedDownStartupBeanPreparation.java | 43 modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/ldap/FakeLdapContext.java | 37 modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/util/SessionTestHelper.java | 13 modules/enterprise/server/itests-2/src/test/resources/application.xml | 4 modules/enterprise/server/itests-2/src/test/resources/jboss-deployment-structure.xml | 2 modules/enterprise/server/itests-2/src/test/resources/org/rhq/enterprise/server/plugins/ant/recipe-no-manageRootDir.xml | 34 modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml | 186 modules/enterprise/server/jar/pom.xml | 38 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/authz/AuthorizationManagerBean.java | 78 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/authz/AuthorizationManagerLocal.java | 51 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/authz/RoleManagerBean.java | 164 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/authz/RoleManagerLocal.java | 98 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/authz/RoleManagerRemote.java | 24 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerBean.java | 913 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerLocal.java | 157 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerRemote.java | 352 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleServerServiceImpl.java | 2 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java | 960 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java | 92 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerRemote.java | 48 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/TopologyManagerBean.java | 27 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/TopologyManagerLocal.java | 13 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/instance/ServerManagerBean.java | 69 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/configuration/ConfigurationManagerBean.java | 83 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/configuration/ConfigurationManagerLocal.java | 8 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/core/CoreServerMBean.java | 2 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/core/StartupBean.java | 24 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/core/jaas/LdapLoginModule.java | 16 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/measurement/MeasurementBaselineManagerBean.java | 68 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/measurement/MeasurementBaselineManagerLocal.java | 7 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/measurement/MeasurementDataManagerBean.java | 27 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/measurement/MeasurementDataManagerRemote.java | 3 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/operation/OperationManagerBean.java | 6 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/plugin/pc/alert/AlertSender.java | 13 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/remote/RemoteSafeInvocationHandler.java | 10 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/ResourceManagerBean.java | 81 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/ResourceManagerLocal.java | 20 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java | 275 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerLocal.java | 2 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/definition/GroupDefinitionManagerBean.java | 29 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/AbstractRestBean.java | 6 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/OperationsHandlerBean.java | 15 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/ResourceHandlerBean.java | 57 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/helper/LinkDeserializer.java | 28 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/scheduler/jobs/StorageNodeMaintenanceJob.java | 298 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClientManagerBean.java | 76 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterHeartBeatJob.java | 126 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterMonitor.java | 52 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterSettingsManagerBean.java | 58 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterSettingsManagerLocal.java | 16 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java | 842 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerLocal.java | 57 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/system/SystemManagerBean.java | 31 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/system/SystemManagerLocal.java | 11 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/util/CriteriaQueryGenerator.java | 114 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/util/LookupUtil.java | 22 modules/enterprise/server/jar/src/main/resources/server-comm-configuration-overrides.properties | 2 modules/enterprise/server/plugins/alert-snmp/src/main/java/org/rhq/enterprise/server/plugins/alertSnmp/SnmpInfo.java | 18 modules/enterprise/server/plugins/alert-snmp/src/main/java/org/rhq/enterprise/server/plugins/alertSnmp/SnmpSender.java | 10 modules/enterprise/server/plugins/alert-snmp/src/main/java/org/rhq/enterprise/server/plugins/alertSnmp/SnmpTrapSender.java | 47 modules/enterprise/server/plugins/alert-snmp/src/main/resources/META-INF/rhq-serverplugin.xml | 46 modules/enterprise/server/plugins/alert-snmp/src/test/java/org/rhq/enterprise/server/plugins/alertSnmp/SnmpInfoTest.java | 30 modules/enterprise/server/plugins/alert-snmp/src/test/java/org/rhq/enterprise/server/plugins/alertSnmp/SnmpSenderTest.java | 5 modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java | 171 modules/enterprise/server/plugins/ant-bundle/src/main/java/org/rhq/enterprise/server/plugins/ant/AntBundleServerPluginComponent.java | 5 modules/enterprise/server/plugins/yum/pom.xml | 16 modules/enterprise/server/plugins/yum/src/main/java/org/rhq/enterprise/server/plugins/yum/DiskReader.java | 60 modules/enterprise/server/plugins/yum/src/main/java/org/rhq/enterprise/server/plugins/yum/HttpReader.java | 90 modules/enterprise/server/plugins/yum/src/main/java/org/rhq/enterprise/server/plugins/yum/RepoProvider.java | 42 modules/enterprise/server/plugins/yum/src/main/java/org/rhq/enterprise/server/plugins/yum/UrlReader.java | 111 modules/enterprise/server/plugins/yum/src/main/resources/META-INF/rhq-serverplugin.xml | 6 modules/enterprise/server/plugins/yum/src/test/java/Acme/Serve/UrlReaderTestServer.java | 64 modules/enterprise/server/plugins/yum/src/test/java/org/rhq/enterprise/server/plugins/yum/UrlReaderTest.java | 195 modules/enterprise/server/plugins/yum/src/test/resources/test.file | 1 modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/Commands.java | 2 modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/ControlCommand.java | 86 modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/AbstractInstall.java | 37 modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Install.java | 20 modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Restart.java | 69 modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Start.java | 10 modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Stop.java | 10 modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Upgrade.java | 2 modules/enterprise/server/server-metrics/pom.xml | 12 modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsBaselineCalculator.java | 21 modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsDAO.java | 153 modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsServer.java | 26 modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/StorageResultSetFuture.java | 76 modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/StorageSession.java | 110 modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/StorageStateListener.java | 20 modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/domain/ListPagedResult.java | 6 modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/domain/SimplePagedResult.java | 11 modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/CassandraIntegrationTest.java | 35 modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/ClusterMonitorTest.java | 92 modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsBaselineCalculatorTest.java | 62 modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsDAOTest.java | 17 modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsServerTest.java | 11 modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/TimeoutTest.java | 5 modules/enterprise/server/server-metrics/src/test/resources/log4j.properties | 2 modules/enterprise/server/startup-subsystem/pom.xml | 4 modules/helpers/ldap-tool/pom.xml | 85 modules/helpers/ldap-tool/src/main/java/org/rhq/TestLdapSettings.java | 1296 modules/helpers/metrics-simulator/pom.xml | 6 modules/helpers/metrics-simulator/src/main/java/org/rhq/metrics/simulator/Simulator.java | 56 modules/helpers/perftest-support/pom.xml | 8 modules/helpers/pluginAnnotations/pom.xml | 48 modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/DataType.java | 39 modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/DisplayType.java | 39 modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/MeasurementType.java | 38 modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/Metric.java | 43 modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/Operation.java | 38 modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/Parameter.java | 41 modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/Units.java | 38 modules/helpers/pluginGen/log4j.properties | 12 modules/helpers/pluginGen/pom.xml | 88 modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/PluginGen.java | 382 modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/Props.java | 603 modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/ResourceCategory.java | 89 modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/Test.java | 87 modules/helpers/pluginGen/src/main/resources/component.ftl | 277 modules/helpers/pluginGen/src/main/resources/descriptor.ftl | 47 modules/helpers/pluginGen/src/main/resources/descriptorMain.ftl | 104 modules/helpers/pluginGen/src/main/resources/discovery.ftl | 106 modules/helpers/pluginGen/src/main/resources/eventPoller.ftl | 62 modules/helpers/pluginGen/src/main/resources/pom.ftl | 239 modules/helpers/pom.xml | 4 modules/helpers/rest-docs-generator/pom.xml | 12 modules/integration-tests/rest-api/pom.xml | 13 modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/ContentTest.java | 8 modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/GroupTest.java | 6 modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/OperationsTest.java | 337 modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/ResourcesTest.java | 152 modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/d/Link.java | 4 modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/d/LinkDeserializer.java | 18 modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/d/LinkSerializer.java | 53 modules/plugins/ant-bundle/src/main/java/org/rhq/plugins/ant/AntBundlePluginComponent.java | 8 modules/plugins/cassandra/pom.xml | 13 modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java | 253 modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeDiscoveryComponent.java | 55 modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/StorageServiceComponent.java | 103 modules/plugins/cassandra/src/main/resources/META-INF/rhq-plugin.xml | 19 modules/plugins/cassandra/src/test/resources/cassandra.yaml | 690 modules/plugins/cassandra/src/test/resources/log4j.properties | 41 modules/plugins/cassandra/src/test/resources/seeds.yaml | 4 modules/plugins/jboss-as-5/src/main/java/org/rhq/plugins/jbossas5/ApplicationServerComponent.java | 64 modules/plugins/jboss-as-5/src/main/java/org/rhq/plugins/jbossas5/ApplicationServerOperationsDelegate.java | 76 modules/plugins/jboss-as-5/src/main/java/org/rhq/plugins/jbossas5/ConnectorComponent.java | 67 modules/plugins/jboss-as-5/src/main/java/org/rhq/plugins/jbossas5/ConnectorDiscoveryComponent.java | 85 modules/plugins/jboss-as-5/src/main/resources/META-INF/rhq-plugin.xml | 3 modules/plugins/jboss-as-7/src/main/java/org/rhq/modules/plugins/jbossas7/BaseProcessDiscovery.java | 119 modules/plugins/jboss-as-7/src/main/java/org/rhq/modules/plugins/jbossas7/BaseServerComponent.java | 2 modules/plugins/jboss-as-7/src/main/java/org/rhq/modules/plugins/jbossas7/HostControllerComponent.java | 18 modules/plugins/jboss-as-7/src/main/java/org/rhq/modules/plugins/jbossas7/JBossProductType.java | 11 modules/plugins/jboss-as-7/src/main/java/org/rhq/modules/plugins/jbossas7/StandaloneASComponent.java | 25 modules/plugins/jboss-as-7/src/main/java/org/rhq/modules/plugins/jbossas7/SubsystemDiscovery.java | 32 modules/plugins/jboss-as-7/src/main/java/org/rhq/modules/plugins/jbossas7/helper/HostConfiguration.java | 24 modules/plugins/jboss-as-7/src/main/resources/META-INF/rhq-plugin.xml | 8 modules/plugins/jboss-as-7/src/test/java/org/rhq/modules/plugins/jbossas7/itest/domain/DomainServerComponentTest.java | 7 modules/plugins/jboss-as-7/src/test/java/org/rhq/modules/plugins/jbossas7/itest/domain/SecurityModuleOptionsTest.java | 12 modules/plugins/jboss-as-7/src/test/java/org/rhq/modules/plugins/jbossas7/itest/standalone/InterruptibleOperationsTest.java | 13 modules/plugins/jboss-as-7/src/test/java/org/rhq/modules/plugins/jbossas7/itest/standalone/StandaloneServerComponentTest.java | 7 modules/plugins/postfix/src/main/java/org/rhq/plugins/postfix/PostfixServerComponent.java | 61 modules/plugins/postfix/src/main/java/org/rhq/plugins/postfix/PostfixServerDiscoveryComponent.java | 41 modules/plugins/postfix/src/main/resources/META-INF/rhq-plugin.xml | 4 modules/plugins/postfix/src/test/java/org/rhq/plugins/postfix/PostfixComponentTest.java | 8 modules/plugins/rhq-storage/pom.xml | 233 modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/InternodeAuthConfUpdateException.java | 22 modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java | 424 modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeConfigDelegate.java | 323 modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml | 130 modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java | 363 modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeConfigDelegateTest.java | 218 modules/plugins/rhq-storage/src/test/resources/cassandra.yaml | 690 modules/plugins/rhq-storage/src/test/resources/log4j.xml | 40 modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatServerComponent.java | 41 modules/plugins/tomcat/src/main/resources/META-INF/rhq-plugin.xml | 8 pom.xml | 48 527 files changed, 30267 insertions(+), 34011 deletions(-)
New commits: commit adad71fdc7eae5d12e38c017a104428f1fee169b Author: Thomas Segismont tsegismo@redhat.com Date: Tue Aug 20 22:02:58 2013 +0200
Bug 998888 - Unable to install rhq server - ClassNotFoundException
diff --git a/modules/enterprise/server/jar/pom.xml b/modules/enterprise/server/jar/pom.xml index 76848a2..4d8a07d 100644 --- a/modules/enterprise/server/jar/pom.xml +++ b/modules/enterprise/server/jar/pom.xml @@ -246,6 +246,23 @@ <scope>provided</scope> </dependency>
+ <!-- Apache HC HTTP Client 4+ --> + + <dependency> + <groupId>org.apache.httpcomponents</groupId> + <artifactId>httpcore</artifactId> + </dependency> + + <dependency> + <groupId>org.apache.httpcomponents</groupId> + <artifactId>httpclient</artifactId> + </dependency> + + <dependency> + <groupId>org.apache.httpcomponents</groupId> + <artifactId>httpmime</artifactId> + </dependency> + <!-- Required by a couple APL and Lather classes - TODO: Remove this once APL and Lather have been excised. --> <dependency> <groupId>commons-beanutils</groupId> diff --git a/pom.xml b/pom.xml index e637a37..9f51a31 100644 --- a/pom.xml +++ b/pom.xml @@ -168,6 +168,7 @@ <commons-validator.version>1.1.4</commons-validator.version> <commons-collections.version>3.2.1</commons-collections.version> <commons-configuration.version>1.6</commons-configuration.version> + <apache.httpcomponents.version>4.2.5</apache.httpcomponents.version>
<junit.version>4.10</junit.version> <liquibase-core.version>2.0.3</liquibase-core.version> @@ -485,6 +486,36 @@ <version>1.8.2</version> </dependency>
+ <!-- Apache HC HTTP Client 4+ --> + + <dependency> + <groupId>org.apache.httpcomponents</groupId> + <artifactId>httpcore</artifactId> + <version>${apache.httpcomponents.version}</version> + </dependency> + + <dependency> + <groupId>org.apache.httpcomponents</groupId> + <artifactId>httpclient</artifactId> + <version>${apache.httpcomponents.version}</version> + <exclusions> + <exclusion> + <groupId>commons-codec</groupId> + <artifactId>commons-codec</artifactId> + </exclusion> + <exclusion> + <groupId>commons-logging</groupId> + <artifactId>commons-logging</artifactId> + </exclusion> + </exclusions> + </dependency> + + <dependency> + <groupId>org.apache.httpcomponents</groupId> + <artifactId>httpmime</artifactId> + <version>${apache.httpcomponents.version}</version> + </dependency> + <!-- GNU GetOpt (any modules that need to do command-line argument parsing should use this) --> <dependency> <groupId>gnu-getopt</groupId>
commit e1a4fd30809490e637ce50185300831dcdf85ac6 Author: Simeon Pinder spinder@fulliautomatix.conchfritter.com Date: Tue Aug 20 14:22:02 2013 -0400
fix merge issue.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index 0670589..41f4b64 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -51,9 +51,9 @@ import org.apache.commons.logging.LogFactory; import org.rhq.core.domain.alert.Alert; import org.rhq.core.domain.auth.Subject; import org.rhq.core.domain.authz.Permission; +import org.rhq.core.domain.cloud.StorageClusterSettings; import org.rhq.core.domain.cloud.StorageNode; import org.rhq.core.domain.cloud.StorageNode.OperationMode; -import org.rhq.core.domain.cloud.StorageClusterSettings; import org.rhq.core.domain.cloud.StorageNodeConfigurationComposite; import org.rhq.core.domain.cloud.StorageNodeLoadComposite; import org.rhq.core.domain.common.JobTrigger; @@ -685,12 +685,6 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
return configuration; } - - @Override - @Asynchronous - public void updateConfigurationAsync(Subject subject, StorageNodeConfigurationComposite storageNodeConfiguration) { - updateConfiguration(subject, storageNodeConfiguration); - }
@Override @Asynchronous
commit 35a34f2f8267a2dda7c947a72d89b15c232ad953 Merge: 98a9a7d 5cb55b4 Author: Simeon Pinder spinder@fulliautomatix.conchfritter.com Date: Tue Aug 20 10:54:29 2013 -0400
Merge branch 'master' into nightly_clone
commit 5cb55b4e9d8dc51582ba44f9b7c70ff998619f0b Author: Lukas Krejci lkrejci@redhat.com Date: Tue Aug 20 14:24:39 2013 +0200
[BZ 988881] - Removed missing i18n, cancel avail checking timers on logout.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/ResourceTitleBar.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/ResourceTitleBar.java index fbbf0a8..8fdaf6e 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/ResourceTitleBar.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/ResourceTitleBar.java @@ -395,9 +395,14 @@ public class ResourceTitleBar extends EnhancedVLayout { new AsyncCallback<List<ResourceError>>() { public void onFailure(Throwable caught) { pluginErrors.setVisible(false); - CoreGUI.getErrorHandler().handleError( - MSG.dataSource_resourceErrors_error_fetchFailure(String.valueOf(resourceComposite.getResource() - .getId())), caught); + + if (UserSessionManager.isLoggedOut()) { + resourceAvailAndErrorsRefreshTime.cancel(); + } else { + CoreGUI.getErrorHandler().handleError( + MSG.dataSource_resourceErrors_error_fetchFailure(String.valueOf(resourceComposite.getResource() + .getId())), caught); + }
if (latch != null) { latch.countDown(); @@ -427,9 +432,13 @@ public class ResourceTitleBar extends EnhancedVLayout { @Override public void onFailure(Throwable caught) { availabilityImage.setSrc(ImageManager.getAvailabilityLargeIconFromAvailType(currentAvail)); - CoreGUI.getErrorHandler().handleError("I18N: Failed to refresh the availability", caught); - //MSG.dataSource_resourceErrors_error_fetchFailure(String.valueOf(resourceComposite.getResource() - // .getId())), caught); + + if (UserSessionManager.isLoggedOut()) { + resourceAvailAndErrorsRefreshTime.cancel(); + } else { + CoreGUI.getErrorHandler().handleError(MSG.view_inventory_resource_loadFailed(String.valueOf(resource.getId())), caught); + } + if (latch != null) { latch.countDown(); } else {
commit 98a9a7d65386530dc985617124c5d370505bcad5 Merge: ee2e6c3 d566080 Author: Simeon Pinder spinder@fulliautomatix.conchfritter.com Date: Tue Aug 20 08:20:14 2013 -0400
Merge branch 'master' into nightly/rhq.jon
Conflicts: modules/common/cassandra-jmx/pom.xml
commit 09306ff274e4fc42e7eccd031be0535b8a0283d1 Author: John Sanda jsanda@redhat.com Date: Mon Aug 19 22:25:49 2013 -0400
update logic of updateSchemaIfNecessary to handle removal of nodes
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java index 99bd592..b511fe0 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java @@ -165,9 +165,8 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa storageNode = storageNodeOperationsHandler.setMode(storageNode, StorageNode.OperationMode.DECOMMISSION); List<StorageNode> storageNodes = entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE, StorageNode.class).setParameter("operationMode", StorageNode.OperationMode.NORMAL).getResultList(); - storageNodes.add(storageNode);
- boolean runRepair = updateSchemaIfNecessary(storageNodes); + boolean runRepair = updateSchemaIfNecessary(storageNodes.size() + 1, storageNodes.size()); // This is a bit of a hack since the maintenancePending flag is really intended to // queue up storage nodes during cluster maintenance operations. storageNode.setMaintenancePending(runRepair); @@ -205,7 +204,6 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa
@Override public void performAddNodeMaintenance(Subject subject, StorageNode storageNode) { - storageNode.setOperationMode(StorageNode.OperationMode.ADD_MAINTENANCE); List<StorageNode> clusterNodes = entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE, StorageNode.class).setParameter("operationMode", StorageNode.OperationMode.NORMAL) .getResultList(); @@ -214,7 +212,7 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa } storageNode.setMaintenancePending(true); clusterNodes.add(storageNode); - boolean runRepair = updateSchemaIfNecessary(clusterNodes); + boolean runRepair = updateSchemaIfNecessary(clusterNodes.size(), clusterNodes.size() + 1); performAddNodeMaintenance(subject, storageNode, runRepair, createPropertyListOfAddresses(SEEDS_LIST, clusterNodes)); } @@ -701,59 +699,92 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa resourceType.getPlugin().equals(STORAGE_NODE_PLUGIN_NAME); }
- private boolean updateSchemaIfNecessary(List<StorageNode> storageNodes) { - // The previous cluster size will be the current size - 1 since we currently only - // support deploying one node at a time. - int previousClusterSize = storageNodes.size() - 1; + private boolean updateSchemaIfNecessary(int previousClusterSize, int newClusterSize) { boolean isRepairNeeded; int replicationFactor = 1;
- if (previousClusterSize >= 4) { - // At 4 nodes we increase the RF to 3. We are not increasing the RF beyond - // that for additional nodes; so, there is no need to run repair if we are - // expanding from a 4 node cluster since the RF remains the same. + if (previousClusterSize == 0) { + throw new IllegalStateException("previousClusterSize cannot be 0"); + } + if (newClusterSize == 0) { + throw new IllegalStateException("newClusterSize cannot be 0"); + } + if (Math.abs(newClusterSize - previousClusterSize) != 1) { + throw new IllegalStateException("The absolute difference between previousClusterSize[" + + previousClusterSize + "] and newClusterSize[" + newClusterSize + "] must be 1"); + } + + if (newClusterSize == 1) { + isRepairNeeded = false; + replicationFactor = 1; + } else if (previousClusterSize > 4 && newClusterSize == 4) { isRepairNeeded = false; - } else if (previousClusterSize == 1) { - // The RF will increase since we are going from a single to a multi-node - // cluster; therefore, we want to run repair. + } else if (previousClusterSize == 4 && newClusterSize == 3) { isRepairNeeded = true; replicationFactor = 2; - } else if (previousClusterSize == 2) { - if (storageNodes.size() > 3) { - // If we go from 2 to > 3 nodes we will increase the RF to 3; therefore - // we want to run repair. - isRepairNeeded = true; - replicationFactor = 3; - } else { - // If we go from 2 to 3 nodes, we keep the RF at 2 so there is no need - // to run repair. - isRepairNeeded = false; - } - } else if (previousClusterSize == 3) { - // We are increasing the cluster size > 3 which means the RF will be - // updated to 3; therefore, we want to run repair. + } else if (previousClusterSize == 3 && newClusterSize == 2) { + isRepairNeeded = false; + } else if (previousClusterSize == 1 && newClusterSize == 2) { + isRepairNeeded = true; + replicationFactor = 2; + } else if (previousClusterSize == 2 && newClusterSize == 3) { + isRepairNeeded = false; + } else if (previousClusterSize == 3 && newClusterSize == 4) { isRepairNeeded = true; replicationFactor = 3; + } else if (previousClusterSize == 4 && newClusterSize > 4) { + isRepairNeeded = false; } else { - // If we cluster size of zero, then something is really screwed up. It - // should always be > 0. - throw new RuntimeException("The previous cluster size should never be zero at this point"); + throw new IllegalStateException("previousClusterSize[" + previousClusterSize + "] and newClusterSize[" + + newClusterSize + "] is not supported"); }
- if (isRepairNeeded) { -// String username = getRequiredStorageProperty(USERNAME_PROPERTY); -// String password = getRequiredStorageProperty(PASSWORD_PROPERTY); -// SchemaManager schemaManager = new SchemaManager(username, password, storageNodes); -// try{ -// schemaManager.updateTopology(); -// } catch (Exception e) { -// log.error("An error occurred while applying schema topology changes", e); + + + +// if (newClusterSize == 1) { +// isRepairNeeded = false; +// replicationFactor = 1; +// } else if (previousClusterSize >= 4) { +// // At 4 nodes we increase the RF to 3. We are not increasing the RF beyond +// // that for additional nodes; so, there is no need to run repair if we are +// // expanding from a 4 node cluster since the RF remains the same. +// isRepairNeeded = false; +// } else if (previousClusterSize == 1) { +// // The RF will increase since we are going from a single to a multi-node +// // cluster; therefore, we want to run repair. +// isRepairNeeded = true; +// replicationFactor = 2; +// } else if (previousClusterSize == 2) { +// if (storageNodes.size() > 3) { +// // If we go from 2 to > 3 nodes we will increase the RF to 3; therefore +// // we want to run repair. +// isRepairNeeded = true; +// replicationFactor = 3; +// } else { +// // If we go from 2 to 3 nodes, we keep the RF at 2 so there is no need +// // to run repair. +// isRepairNeeded = false; // } +// } else if (previousClusterSize == 3) { +// // We are increasing the cluster size > 3 which means the RF will be +// // updated to 3; therefore, we want to run repair. +// isRepairNeeded = true; +// replicationFactor = 3; +// } else { +// // If we cluster size of zero, then something is really screwed up. It +// // should always be > 0. +// throw new RuntimeException("The previous cluster size should never be zero at this point"); +// }
+ if (isRepairNeeded) { updateReplicationFactor(replicationFactor); if (previousClusterSize == 1) { updateGCGraceSeconds(691200); // 8 days } + } else if (newClusterSize == 1) { + updateReplicationFactor(1); + updateGCGraceSeconds(0); }
return isRepairNeeded;
commit 1540e35d95bad7553824d76b39d7ed4d5c49bf2c Author: John Sanda jsanda@redhat.com Date: Mon Aug 19 21:21:22 2013 -0400
make sure the mode is updated at the end of each (un)deployment phase
The transaction demarcation has been updated to ensure that upon successful completion of a (un)deployment phase, the storage node's mode will be updated before starting the next phase. Previously it was done in the same transaction, and if starting the next phase failed, then the node's mode would still be set to the previously completed phase. This did not break anything, but it did result in extra work.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index 4d4fd17..28723d7 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -238,6 +238,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
switch (storageNode.getOperationMode()) { case INSTALLED: + storageNode.setOperationMode(OperationMode.ANNOUNCE); case ANNOUNCE: reset(); storageNodeOperationsHandler.announceStorageNode(subject, storageNode); diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java index b068734..99bd592 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java @@ -82,7 +82,6 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa if (log.isInfoEnabled()) { log.info("Announcing " + storageNode + " to storage node cluster."); } - storageNode.setOperationMode(StorageNode.OperationMode.ANNOUNCE); List<StorageNode> clusterNodes = entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE, StorageNode.class).setParameter("operationMode", StorageNode.OperationMode.NORMAL).getResultList(); List<StorageNode> allNodes = new ArrayList<StorageNode>(clusterNodes); @@ -113,7 +112,6 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa public void unannounceStorageNode(Subject subject, StorageNode storageNode) { log.info("Unannouncing " + storageNode);
- storageNode.setOperationMode(StorageNode.OperationMode.UNANNOUNCE); List<StorageNode> clusterNodes = entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE, StorageNode.class).setParameter("operationMode", StorageNode.OperationMode.NORMAL).getResultList(); for (StorageNode clusterNode : clusterNodes) { @@ -133,8 +131,6 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa public void uninstall(Subject subject, StorageNode storageNode) { log.info("Uninstalling " + storageNode);
- storageNode.setOperationMode(StorageNode.OperationMode.UNINSTALL); - if (storageNode.getResource() == null) { finishUninstall(subject, storageNode); } else { @@ -142,6 +138,19 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa } }
+ private void finishUninstall(Subject subject, StorageNode storageNode) { + if (storageNode.getResource() != null) { + log.info("Removing storage node resource " + storageNode.getResource() + " from inventory"); + Resource resource = storageNode.getResource(); + storageNodeOperationsHandler.detachFromResource(storageNode); + resourceManager.uninventoryResource(subject, resource.getId()); + } + log.info("Removing storage node entity " + storageNode + " from database"); + entityManager.remove(storageNode); + + log.info(storageNode + " has been undeployed"); + } + @Override @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) public void detachFromResource(StorageNode storageNode) { @@ -153,7 +162,7 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa public void decommissionStorageNode(Subject subject, StorageNode storageNode) { log.info("Preparing to decommission " + storageNode);
- storageNode.setOperationMode(StorageNode.OperationMode.DECOMMISSION); + storageNode = storageNodeOperationsHandler.setMode(storageNode, StorageNode.OperationMode.DECOMMISSION); List<StorageNode> storageNodes = entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE, StorageNode.class).setParameter("operationMode", StorageNode.OperationMode.NORMAL).getResultList(); storageNodes.add(storageNode); @@ -184,6 +193,10 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa StorageNode.class).setParameter("address", storageNodeAddress.getHostAddress()).getSingleResult();
if (storageNode.getOperationMode() == StorageNode.OperationMode.BOOTSTRAP) { + // TODO need to add support for HA deployments + // If multiple RHQ servers are running, they will all receive the event + // notification that the node is up and will all wind up calling this method. + storageNode = storageNodeOperationsHandler.setMode(storageNode, StorageNode.OperationMode.ADD_MAINTENANCE); performAddNodeMaintenance(subjectManager.getOverlord(), storageNode); } else { log.info(storageNode + " has already been bootstrapped. Skipping add node maintenance."); @@ -225,7 +238,11 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa StorageNode.class).setParameter("address", storageNodeAddress.getHostAddress()).getSingleResult();
if (storageNode.getOperationMode() == StorageNode.OperationMode.DECOMMISSION) { - storageNode.setOperationMode(StorageNode.OperationMode.REMOVE_MAINTENANCE); + // TODO need to add support for HA deployments + // If multiple RHQ servers are running, they will all receive the event + // notification that the node is up and will all wind up calling this method. + storageNode = storageNodeOperationsHandler.setMode(storageNode, + StorageNode.OperationMode.REMOVE_MAINTENANCE); performRemoveNodeMaintenance(subjectManager.getOverlord(), storageNode); } else { log.info("Remove node maintenance has already been run for " + storageNode); @@ -241,7 +258,7 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa node.setMaintenancePending(true); } boolean runRepair = storageNode.isMaintenancePending(); - performRemoveNodeMaintenance(subjectManager.getOverlord(), clusterNodes.get(0), runRepair, + performRemoveNodeMaintenance(subject, clusterNodes.get(0), runRepair, createPropertyListOfAddresses(SEEDS_LIST, clusterNodes)); }
@@ -357,8 +374,9 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa
if (nextNode == null) { log.info("Successfully announced new storage node to storage cluster"); - newStorageNode.setOperationMode(StorageNode.OperationMode.BOOTSTRAP); - prepareNodeForBootstrap(subject, newStorageNode, addresses.deepCopy(false)); + newStorageNode = storageNodeOperationsHandler.setMode(newStorageNode, + StorageNode.OperationMode.BOOTSTRAP); + storageNodeOperationsHandler.bootstrapStorageNode(subject, newStorageNode); } else { announceStorageNode(subject, newStorageNode, nextNode, addresses.deepCopy(false)); } @@ -369,32 +387,31 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) public void handleUnannounce(ResourceOperationHistory operationHistory) { StorageNode storageNode = findStorageNode(operationHistory.getResource()); - StorageNode removedStorageNode = null; + StorageNode removedStorageNode = findStorageNodeByMode(StorageNode.OperationMode.UNANNOUNCE); switch (operationHistory.getStatus()) { case INPROGRESS: // nothing to do here break; case CANCELED: - removedStorageNode = findStorageNodeByMode(StorageNode.OperationMode.UNANNOUNCE); undeploymentOperationCanceled(storageNode, operationHistory, removedStorageNode); break; case FAILURE: - removedStorageNode = findStorageNodeByMode(StorageNode.OperationMode.UNANNOUNCE); deploymentOperationFailed(storageNode, operationHistory, removedStorageNode); break; default: // SUCCESS storageNode.setMaintenancePending(false);
- removedStorageNode = findStorageNodeByMode(StorageNode.OperationMode.UNANNOUNCE); StorageNode nextNode = takeFromMaintenanceQueue(); Subject subject = getSubject(operationHistory); - Configuration params = operationHistory.getParameters(); - PropertyList addresses = params.getList("addresses");
if (nextNode == null) { log.info("Successfully unannounced " + removedStorageNode + " to storage cluster"); + removedStorageNode = storageNodeOperationsHandler.setMode(removedStorageNode, + StorageNode.OperationMode.UNINSTALL); uninstall(getSubject(operationHistory), removedStorageNode); } else { + Configuration params = operationHistory.getParameters(); + PropertyList addresses = params.getList("addresses"); unannounceStorageNode(subject, nextNode, addresses.deepCopy(false)); } } @@ -428,17 +445,15 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) public void handleAddNodeMaintenance(ResourceOperationHistory resourceOperationHistory) { StorageNode storageNode = findStorageNode(resourceOperationHistory.getResource()); - StorageNode newStorageNode = null; + StorageNode newStorageNode = findStorageNodeByMode(StorageNode.OperationMode.ADD_MAINTENANCE); switch (resourceOperationHistory.getStatus()) { case INPROGRESS: // nothing to do here return; case CANCELED: - newStorageNode = findStorageNodeByMode(StorageNode.OperationMode.ADD_MAINTENANCE); deploymentOperationCanceled(storageNode, resourceOperationHistory, newStorageNode); return; case FAILURE: - newStorageNode = findStorageNodeByMode(StorageNode.OperationMode.ADD_MAINTENANCE); deploymentOperationFailed(storageNode, resourceOperationHistory, newStorageNode); return; default: // SUCCESS @@ -448,9 +463,7 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa
if (nextNode == null) { log.info("Finished running add node maintenance on all cluster nodes"); - // TODO replace this with an UPDATE statement - newStorageNode = findStorageNodeByMode(StorageNode.OperationMode.ADD_MAINTENANCE); - newStorageNode.setOperationMode(StorageNode.OperationMode.NORMAL); + storageNodeOperationsHandler.setMode(newStorageNode, StorageNode.OperationMode.NORMAL); } else { Configuration parameters = resourceOperationHistory.getParameters(); boolean runRepair = parameters.getSimple(RUN_REPAIR_PROPERTY).getBooleanValue(); @@ -463,19 +476,39 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa
@Override @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) + public void bootstrapStorageNode(Subject subject, StorageNode storageNode) { + List<StorageNode> clusterNodes = entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE, + StorageNode.class).setParameter("operationMode", StorageNode.OperationMode.NORMAL).getResultList(); + clusterNodes.add(storageNode); + prepareNodeForBootstrap(subject, storageNode, createPropertyListOfAddresses("addresses", clusterNodes)); + } + + private void prepareNodeForBootstrap(Subject subject, StorageNode storageNode, PropertyList addresses) { + if (log.isInfoEnabled()) { + log.info("Preparing to bootstrap " + storageNode + " into cluster..."); + } + StorageClusterSettings clusterSettings = storageClusterSettingsManager.getClusterSettings(subject); + Configuration parameters = new Configuration(); + parameters.put(new PropertySimple("cqlPort", clusterSettings.getCqlPort())); + parameters.put(new PropertySimple("gossipPort", clusterSettings.getGossipPort())); + parameters.put(addresses); + + scheduleOperation(subject, storageNode, parameters, "prepareForBootstrap"); + } + + @Override + @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) public void handleRemoveNodeMaintenance(ResourceOperationHistory operationHistory) { StorageNode storageNode = findStorageNode(operationHistory.getResource()); - StorageNode removedStorageNode = null; + StorageNode removedStorageNode = findStorageNodeByMode(StorageNode.OperationMode.REMOVE_MAINTENANCE); switch (operationHistory.getStatus()) { case INPROGRESS: // nothing to do here break; case CANCELED: - removedStorageNode = findStorageNodeByMode(StorageNode.OperationMode.REMOVE_MAINTENANCE); undeploymentOperationCanceled(storageNode, operationHistory, removedStorageNode); break; case FAILURE: - removedStorageNode = findStorageNodeByMode(StorageNode.OperationMode.REMOVE_MAINTENANCE); undeploymentOperationFailed(storageNode, operationHistory, removedStorageNode); break; default: // SUCCESS @@ -485,8 +518,8 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa
if (nextNode == null) { log.info("Finished running remove node maintenance on all cluster nodes"); - // TODO replace this with an UPDATE statement - removedStorageNode = findStorageNodeByMode(StorageNode.OperationMode.REMOVE_MAINTENANCE); + removedStorageNode = storageNodeOperationsHandler.setMode(removedStorageNode, + StorageNode.OperationMode.UNANNOUNCE); unannounceStorageNode(getSubject(operationHistory), removedStorageNode); } else { Configuration parameters = operationHistory.getParameters(); @@ -537,14 +570,11 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa } }
- private void finishUninstall(Subject subject, StorageNode storageNode) { - if (storageNode.getResource() != null) { - log.info("Removing storage node resource " + storageNode.getResource() + " from inventory"); - storageNodeOperationsHandler.detachFromResource(storageNode); - resourceManager.uninventoryResource(subject, storageNode.getResource().getId()); - } - log.info("Removing storage node entity " + storageNode + " from database"); - entityManager.remove(storageNode); + @Override + @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) + public StorageNode setMode(StorageNode storageNode, StorageNode.OperationMode newMode) { + storageNode.setOperationMode(newMode); + return entityManager.merge(storageNode); }
private Subject getSubject(ResourceOperationHistory resourceOperationHistory) { @@ -644,27 +674,6 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa return null; }
- @Override - public void bootstrapStorageNode(Subject subject, StorageNode storageNode) { - List<StorageNode> clusterNodes = entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE, - StorageNode.class).setParameter("operationMode", StorageNode.OperationMode.NORMAL).getResultList(); - clusterNodes.add(storageNode); - prepareNodeForBootstrap(subject, storageNode, createPropertyListOfAddresses("addresses", clusterNodes)); - } - - private void prepareNodeForBootstrap(Subject subject, StorageNode storageNode, PropertyList addresses) { - if (log.isInfoEnabled()) { - log.info("Preparing to bootstrap " + storageNode + " into cluster..."); - } - StorageClusterSettings clusterSettings = storageClusterSettingsManager.getClusterSettings(subject); - Configuration parameters = new Configuration(); - parameters.put(new PropertySimple("cqlPort", clusterSettings.getCqlPort())); - parameters.put(new PropertySimple("gossipPort", clusterSettings.getGossipPort())); - parameters.put(addresses); - - scheduleOperation(subject, storageNode, parameters, "prepareForBootstrap"); - } - private StorageNode takeFromMaintenanceQueue() { List<StorageNode> storageNodes = entityManager.createQuery("SELECT s FROM StorageNode s WHERE " + "s.operationMode = :operationMode AND s.maintenancePending = :maintenancePending", StorageNode.class) diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerLocal.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerLocal.java index 5d08dd8..f0a5b98 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerLocal.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerLocal.java @@ -52,4 +52,6 @@ public interface StorageNodeOperationsHandlerLocal { void performRemoveNodeMaintenance(Subject subject, StorageNode storageNode);
void logError(StorageNode.OperationMode newStorageNodeOperationMode, String error, Exception e); + + StorageNode setMode(StorageNode storageNode, StorageNode.OperationMode newMode); }
commit d5660806b9af04345bbc7e54e3d21edebed7d641 Author: Stefan Negrea snegrea@redhat.com Date: Mon Aug 19 15:55:38 2013 -0500
[BZ 998049] Remove all core domain dependencies from Cassandra common modules.
This was causing maven build problems because core domain would need to be built before database util would run and setup the database.
diff --git a/.classpath b/.classpath index d590a22..f489722 100644 --- a/.classpath +++ b/.classpath @@ -218,6 +218,8 @@ <classpathentry kind="src" path="modules/helpers/ldap-tool/src/main/java"/> <classpathentry kind="src" path="modules/common/cassandra-schema/src/test/java"/> <classpathentry kind="src" path="modules/plugins/rhq-storage/src/test/java"/> + <classpathentry kind="src" path="modules/helpers/metrics-simulator/src/main/java"/> + <classpathentry kind="src" path="modules/common/cassandra-util/src/test/java"/> <classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.7"/> <classpathentry exported="true" kind="var" path="M2_REPO/org/apache/httpcomponents/httpclient/4.2.3/httpclient-4.2.3.jar" sourcepath="M2_REPO/org/apache/httpcomponents/httpclient/4.2.3/httpclient-4.2.3-sources.jar"/> <classpathentry exported="true" kind="var" path="M2_REPO/commons-io/commons-io/2.1/commons-io-2.1.jar" sourcepath="M2_REPO/commons-io/commons-io/2.1/commons-io-2.1-sources.jar"/> diff --git a/modules/common/cassandra-ccm/cassandra-ccm-arquillian/src/main/java/org/rhq/cassandra/ccm/arquillian/CCMSuiteDeploymentExtension.java b/modules/common/cassandra-ccm/cassandra-ccm-arquillian/src/main/java/org/rhq/cassandra/ccm/arquillian/CCMSuiteDeploymentExtension.java index 1aeef43..7c59114 100644 --- a/modules/common/cassandra-ccm/cassandra-ccm-arquillian/src/main/java/org/rhq/cassandra/ccm/arquillian/CCMSuiteDeploymentExtension.java +++ b/modules/common/cassandra-ccm/cassandra-ccm-arquillian/src/main/java/org/rhq/cassandra/ccm/arquillian/CCMSuiteDeploymentExtension.java @@ -26,7 +26,6 @@ package org.rhq.cassandra.ccm.arquillian;
import java.io.File; -import java.util.List; import java.util.concurrent.Callable;
import org.jboss.arquillian.config.descriptor.api.ArquillianDescriptor; @@ -62,7 +61,6 @@ import org.rhq.cassandra.ClusterInitService; import org.rhq.cassandra.DeploymentOptions; import org.rhq.cassandra.DeploymentOptionsFactory; import org.rhq.cassandra.schema.SchemaManager; -import org.rhq.core.domain.cloud.StorageNode;
/** * @author John Sanda @@ -114,7 +112,10 @@ public class CCMSuiteDeploymentExtension implements LoadableExtension {
SchemaManager schemaManager; ClusterInitService clusterInitService = new ClusterInitService(); - List<StorageNode> nodes = null; + + String[] nodes = null; + int[] jmxPorts = null; + int cqlPort = -1;
if (!Boolean.valueOf(System.getProperty("itest.use-external-storage-node", "false"))) {
@@ -131,13 +132,17 @@ public class CCMSuiteDeploymentExtension implements LoadableExtension { options.setStartRpc(true);
ccm = new CassandraClusterManager(options); - nodes = ccm.createCluster(); + ccm.createCluster(); + + nodes = ccm.getNodes(); + jmxPorts = ccm.getJmxPorts(); + cqlPort = ccm.getCqlPort();
ccm.startCluster(false);
try { - clusterInitService.waitForClusterToStart(nodes, nodes.size(), 1500, 20, 5); - schemaManager = new SchemaManager("rhqadmin", "rhqadmin", nodes); + clusterInitService.waitForClusterToStart(nodes, jmxPorts, nodes.length, 20, 5, 1500); + schemaManager = new SchemaManager("rhqadmin", "rhqadmin", nodes, cqlPort);
} catch (Exception e) { if (null != ccm) { @@ -148,7 +153,10 @@ public class CCMSuiteDeploymentExtension implements LoadableExtension { } else { try { String seed = System.getProperty("rhq.cassandra.seeds", "127.0.0.1|7299|9042"); - schemaManager = new SchemaManager("rhqadmin", "rhqadmin", seed); + nodes = parseNodeAddresses(seed); + cqlPort = parseNodeCqlPort(seed); + jmxPorts = parseNodeJmxPorts(seed); + schemaManager = new SchemaManager("rhqadmin", "rhqadmin", nodes, cqlPort);
} catch (Exception e) { throw new RuntimeException("External Cassandra initialization failed", e); @@ -157,7 +165,7 @@ public class CCMSuiteDeploymentExtension implements LoadableExtension {
try { schemaManager.install(); - clusterInitService.waitForSchemaAgreement(nodes); + clusterInitService.waitForSchemaAgreement(nodes, jmxPorts); schemaManager.updateTopology(); } catch (Exception e) { if (null != ccm) { @@ -260,5 +268,58 @@ public class CCMSuiteDeploymentExtension implements LoadableExtension { throw new RuntimeException("Could not load defined deploymentClass: " + className, e); } } + + private String[] parseNodeAddresses(String s) { + String[] unparsedNodes = s.split(","); + + String[] nodes = new String[unparsedNodes.length]; + + for (int index = 0; index < 0; index++) { + String[] params = unparsedNodes[index].split("\|"); + if (params.length != 3) { + throw new IllegalArgumentException( + "Expected string of the form, hostname|jmxPort|nativeTransportPort: [" + s + "]"); + } + + nodes[index] = params[0]; + } + + return nodes; + } + + private int[] parseNodeJmxPorts(String s) { + String[] unparsedNodes = s.split(","); + + int[] jmxPorts = new int[unparsedNodes.length]; + + for (int index = 0; index < 0; index++) { + String[] params = unparsedNodes[index].split("\|"); + if (params.length != 3) { + throw new IllegalArgumentException( + "Expected string of the form, hostname|jmxPort|nativeTransportPort: [" + s + "]"); + } + + jmxPorts[index] = Integer.parseInt(params[1]); + } + + return jmxPorts; + } + + private int parseNodeCqlPort(String s) { + String[] unparsedNodes = s.split(","); + + for (String unparsedNode : unparsedNodes) { + String[] params = unparsedNode.split("\|"); + if (params.length != 3) { + throw new IllegalArgumentException( + "Expected string of the form, hostname|jmxPort|nativeTransportPort: [" + s + "]"); + } + + return Integer.parseInt(params[2]); + } + + throw new IllegalArgumentException("Seed property is not valid [" + s + "]"); + } + } } \ No newline at end of file diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/CassandraClusterManager.java b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/CassandraClusterManager.java index c8bb2ef..4a02e1d 100644 --- a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/CassandraClusterManager.java +++ b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/CassandraClusterManager.java @@ -46,7 +46,6 @@ import java.util.Set; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory;
-import org.rhq.core.domain.cloud.StorageNode; import org.rhq.core.pluginapi.util.ProcessExecutionUtility; import org.rhq.core.system.OperatingSystemType; import org.rhq.core.system.ProcessExecution; @@ -68,6 +67,11 @@ public class CassandraClusterManager { private List<File> installedNodeDirs = new ArrayList<File>(); private Map<Integer, Process> nodeProcessMap = new HashMap<Integer, Process>();
+ private String[] nodes; + private int[] jmxPorts; + private int cqlPort; + + public CassandraClusterManager() { this(new DeploymentOptionsFactory().newDeploymentOptions()); } @@ -90,7 +94,28 @@ public class CassandraClusterManager { } }
- public List<StorageNode> createCluster() { + /** + * @return addresses of storage cluster nodes + */ + public String[] getNodes() { + return nodes; + } + + /** + * @return the JMX ports + */ + public int[] getJmxPorts() { + return jmxPorts; + } + + /** + * @return the CQL Port + */ + public int getCqlPort() { + return cqlPort; + } + + public void createCluster() { if (log.isDebugEnabled()) { log.debug("Installing embedded " + deploymentOptions.getNumNodes() + " node cluster to " + deploymentOptions.getClusterDir()); @@ -104,11 +129,10 @@ public class CassandraClusterManager { if (installedMarker.exists()) { log.info("It appears that the cluster already exists in " + clusterDir); log.info("Skipping cluster creation."); - return calculateNodes(); + getStorageClusterConfiguration(); } FileUtil.purge(clusterDir, false);
- List<StorageNode> nodes = new ArrayList<StorageNode>(deploymentOptions.getNumNodes()); String seeds = collectionToString(calculateLocalIPAddresses(deploymentOptions.getNumNodes())); Set<InetAddress> ipAddresses = null;
@@ -118,6 +142,10 @@ public class CassandraClusterManager { throw new RuntimeException("Failed to get cluster IP addresses", e); }
+ this.nodes = new String[deploymentOptions.getNumNodes()]; + this.jmxPorts = new int[deploymentOptions.getNumNodes()]; + this.cqlPort = deploymentOptions.getNativeTransportPort(); + for (int i = 0; i < deploymentOptions.getNumNodes(); ++i) { File basedir = new File(deploymentOptions.getClusterDir(), "node" + i); String address = getLocalIPAddress(i + 1); @@ -142,15 +170,11 @@ public class CassandraClusterManager { deployer.unzipDistro(); deployer.applyConfigChanges(); deployer.updateFilePerms(); - - StorageNode storageNode = new StorageNode(); - storageNode.setAddress(address); - storageNode.setJmxPort(deploymentOptions.getJmxPort() + i); - storageNode.setCqlPort(nodeOptions.getNativeTransportPort()); - nodes.add(storageNode); - deployer.updateStorageAuthConf(ipAddresses);
+ this.nodes[i] = address; + this.jmxPorts[i] = deploymentOptions.getJmxPort() + i; + installedNodeDirs.add(basedir); } catch (Exception e) { log.error("Failed to install node at " + basedir); @@ -162,7 +186,6 @@ public class CassandraClusterManager { } catch (IOException e) { log.warn("Failed to write installed file marker to " + installedMarker, e); } - return nodes; }
private void updateStorageAuthConf(File basedir) { @@ -210,16 +233,14 @@ public class CassandraClusterManager { return ipAddresses; }
- private List<StorageNode> calculateNodes() { - List<StorageNode> nodes = new ArrayList<StorageNode>(deploymentOptions.getNumNodes()); + private void getStorageClusterConfiguration() { + this.nodes = new String[deploymentOptions.getNumNodes()]; for (int i = 0; i < deploymentOptions.getNumNodes(); ++i) { - StorageNode storageNode = new StorageNode(); - storageNode.setAddress(getLocalIPAddress(i + 1)); - storageNode.setJmxPort(deploymentOptions.getJmxPort() + i); - storageNode.setCqlPort(deploymentOptions.getNativeTransportPort()); - nodes.add(storageNode); + this.nodes[i] = getLocalIPAddress(i + 1); + this.jmxPorts[i] = deploymentOptions.getJmxPort() + i; } - return nodes; + + this.cqlPort = deploymentOptions.getNativeTransportPort(); }
public void startCluster() { @@ -230,9 +251,9 @@ public class CassandraClusterManager { startCluster(getNodeIds());
if (waitForClusterToStart) { - List<StorageNode> nodes = calculateNodes(); + getStorageClusterConfiguration(); ClusterInitService clusterInitService = new ClusterInitService(); - clusterInitService.waitForClusterToStart(nodes, nodes.size(), 20); + clusterInitService.waitForClusterToStart(this.nodes, this.jmxPorts, this.nodes.length, 20); } }
diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/ClusterInitService.java b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/ClusterInitService.java index 83851c5..cbbfad5 100644 --- a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/ClusterInitService.java +++ b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/ClusterInitService.java @@ -28,7 +28,6 @@ package org.rhq.cassandra; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedList; -import java.util.List; import java.util.Map; import java.util.Queue; import java.util.Set; @@ -42,8 +41,6 @@ import javax.management.remote.JMXServiceURL; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory;
-import org.rhq.core.domain.cloud.StorageNode; - /** * This class provides operations to ensure a cluster is initialized and in a consistent * state. It does not offer functionality for initializing a cluster but rather to make @@ -56,13 +53,25 @@ public final class ClusterInitService {
private final Log log = LogFactory.getLog(ClusterInitService.class);
- public boolean ping(List<StorageNode> storageNodes, int numHosts) { + private static final String JMX_CONNECTION_STRING = "service:jmx:rmi:///jndi/rmi://%s:%s/jmxrmi"; + + /** + * Pings the storage nodes to verify if they are available and native transport + * is running. + * + * @param storageNodes storage node addresses + * @param jmxPorts JMX ports + * @param numHosts minimum number of active hosts + * + * @return [true] cluster available with at least minimum number of hosts available, [false] otherwise + */ + public boolean ping(String[] storageNodes, int[] jmxPorts, int numHosts) { int connections = 0; long sleep = 100;
- for (StorageNode host : storageNodes) { + for (int index = 0; index < jmxPorts.length; index++) { try { - boolean isNativeTransportRunning = this.isNativeTransportRunning(host); + boolean isNativeTransportRunning = this.isNativeTransportRunning(storageNodes[index], jmxPorts[index]); if (isNativeTransportRunning) { ++connections; } @@ -71,7 +80,8 @@ public final class ClusterInitService { } } catch (Exception e) { if (log.isDebugEnabled()) { - log.debug("Unable to open JMX connection to cassandra node [" + host + "]", e); + log.debug("Unable to open JMX connection on port [" + jmxPorts[index] + "] to cassandra node [" + + storageNodes[index] + "]", e); } return false; } @@ -89,12 +99,12 @@ public final class ClusterInitService { * hosts. A runtime exception will be thrown after 10 failed retries. * <br/><br/> * After connecting to all nodes, this method will then sleep for a fixed delay. - * See {@link #waitForClusterToStart(java.util.List, int, int)} for details. - * - * @param hosts The cluster nodes to which a connection should be made + * See {@link #waitForClusterToStart(int, java.util.List, int)} for details. + * @param storageNodes The cluster nodes to which a connection should be made + * @param jmxPorts JMX port for each cluster node address */ - public void waitForClusterToStart(List<StorageNode> storageNodes) { - waitForClusterToStart(storageNodes, storageNodes.size(), 10); + public void waitForClusterToStart(String[] storageNodes, int jmxPorts[]) { + waitForClusterToStart(storageNodes, jmxPorts, storageNodes.length, 10); }
/** @@ -109,15 +119,14 @@ public final class ClusterInitService { * schema and to create the cassandra super user. Cassandra has a hard-coded delay of * 10 sceonds before it creates the super user, which means the rhq schema cannot be * created before that. - * - * @param hosts The cluster nodes to which a connection should be made * @param numHosts The number of hosts to which a successful connection has to be made * before returning. * @param retries The number of times to retry connecting. A runtime exception will be * thrown when the number of failed connections exceeds this value. + * @param hosts The cluster nodes to which a connection should be made */ - public void waitForClusterToStart(List<StorageNode> storageNodes, int numHosts, int retries) { - waitForClusterToStart(storageNodes, numHosts, 250, retries, 1); + public void waitForClusterToStart(String[] storageNodes, int jmxPorts[], int numHosts, int retries) { + waitForClusterToStart(storageNodes, jmxPorts, numHosts, 250, retries, 1); }
/** @@ -132,17 +141,16 @@ public final class ClusterInitService { * schema and to create the cassandra super user. Cassandra has a hard-coded delay of * 10 sceonds before it creates the super user, which means the rhq schema cannot be * created before that. - * - * @param hosts The cluster nodes to which a connection should be made * @param numHosts The number of hosts to which a successful connection has to be made * before returning. * @param delay The amount of time wait between attempts to make a connection * @param retries The number of times to retry connecting. A runtime exception will be * thrown when the number of failed connections exceeds this value. * @param initialWait The amount of seconds before first try. + * @param hosts The cluster nodes to which a connection should be made */ - public void waitForClusterToStart(List<StorageNode> storageNodes, int numHosts, long delay, int retries, - int initialWait) { + public void waitForClusterToStart(String[] storageNodes, int jmxPorts[], int numHosts, long delay, + int retries, int initialWait) { if (initialWait > 0) { try { if (log.isDebugEnabled()) { @@ -155,23 +163,28 @@ public final class ClusterInitService {
int connections = 0; int failedConnections = 0; - Queue<StorageNode> queue = new LinkedList<StorageNode>(storageNodes); - StorageNode storageNode = queue.poll(); + Queue<Integer> queue = new LinkedList<Integer>(); + for (int index = 0; index < storageNodes.length; index++) { + queue.add(index); + } + + Integer storageNodeIndex = queue.poll();
- while (storageNode != null) { + while (storageNodeIndex != null) { if (failedConnections >= retries) { throw new RuntimeException("Unable to verify that cluster nodes have started after " + failedConnections + " failed attempts"); } try { - boolean isNativeTransportRunning = this.isNativeTransportRunning(storageNode); + boolean isNativeTransportRunning = isNativeTransportRunning(storageNodes[storageNodeIndex], + jmxPorts[storageNodeIndex]); if (log.isDebugEnabled() && isNativeTransportRunning) { - log.debug("Successfully connected to cassandra node [" + storageNode + "]"); + log.debug("Successfully connected to cassandra node [" + storageNodes[storageNodeIndex] + "]"); } if (isNativeTransportRunning) { ++connections; } else { - queue.offer(storageNode); + queue.offer(storageNodeIndex); } if (connections == numHosts) { if (log.isDebugEnabled()) { @@ -186,9 +199,10 @@ public final class ClusterInitService { } } catch (Exception e) { ++failedConnections; - queue.offer(storageNode); + queue.offer(storageNodeIndex); if (log.isDebugEnabled()) { - log.debug("Unable to open JMX connection to cassandra node [" + storageNode + "].", e); + log.debug("Unable to open JMX connection on port [" + jmxPorts[storageNodeIndex] + + "] to cassandra node [" + storageNodes[storageNodeIndex] + "].", e); } else if (log.isInfoEnabled()) { log.debug("Unable to open connection to cassandra node."); } @@ -197,7 +211,7 @@ public final class ClusterInitService { Thread.sleep(delay); } catch (InterruptedException e) { } - storageNode = queue.poll(); + storageNodeIndex = queue.poll(); } }
@@ -209,8 +223,8 @@ public final class ClusterInitService { * * @param hosts The cluster nodes */ - public void waitForSchemaAgreement(List<StorageNode> storageNodes) throws Exception { - if (storageNodes == null) { + public void waitForSchemaAgreement(String[] storageNodes, int[] jmxPorts) throws Exception { + if (storageNodes == null || storageNodes.length == 0) { return; }
@@ -219,8 +233,8 @@ public final class ClusterInitService {
while (!schemaInAgreement) { Set<String> schemaVersions = new HashSet<String>(); - for (StorageNode host : storageNodes) { - String otherSchchemaVersion = getSchemaVersionForNode(host); + for (int index = 0; index < storageNodes.length; index++) { + String otherSchchemaVersion = getSchemaVersionForNode(storageNodes[index], jmxPorts[index]); if (otherSchchemaVersion != null) { schemaVersions.add(otherSchchemaVersion); } @@ -256,9 +270,9 @@ public final class ClusterInitService { } }
- public boolean isNativeTransportRunning(StorageNode storageNode) throws Exception { + public boolean isNativeTransportRunning(String storageNode, int jmxPort) throws Exception { Boolean nativeTransportRunning = false; - String url = storageNode.getJMXConnectionURL(); + String url = getJMXConnectionURL(storageNode, jmxPort); JMXServiceURL serviceURL = new JMXServiceURL(url); Map<String, String> env = new HashMap<String, String>(); JMXConnector connector = null; @@ -292,8 +306,8 @@ public final class ClusterInitService { return nativeTransportRunning; }
- private String getSchemaVersionForNode(StorageNode storageNode) throws Exception { - String url = storageNode.getJMXConnectionURL(); + private String getSchemaVersionForNode(String storageNode, int jmxPort) throws Exception { + String url = this.getJMXConnectionURL(storageNode, jmxPort); JMXServiceURL serviceURL = new JMXServiceURL(url); Map<String, String> env = new HashMap<String, String>(); JMXConnector connector = null; @@ -326,4 +340,17 @@ public final class ClusterInitService { } return null; } + + /** + * Constructs the JMX connection URL based on the node address and + * JMX port + * + * @param address + * @param jmxPort + * @return + */ + private String getJMXConnectionURL(String address, int jmxPort) { + String[] split = JMX_CONNECTION_STRING.split("%s"); + return split[0] + address + split[1] + jmxPort + split[2]; + } } \ No newline at end of file diff --git a/modules/common/cassandra-ccm/cassandra-ccm-maven-plugin/src/main/java/org/rhq/cassandra/ccm/maven/DeployMojo.java b/modules/common/cassandra-ccm/cassandra-ccm-maven-plugin/src/main/java/org/rhq/cassandra/ccm/maven/DeployMojo.java index f50535c..310d7a2 100644 --- a/modules/common/cassandra-ccm/cassandra-ccm-maven-plugin/src/main/java/org/rhq/cassandra/ccm/maven/DeployMojo.java +++ b/modules/common/cassandra-ccm/cassandra-ccm-maven-plugin/src/main/java/org/rhq/cassandra/ccm/maven/DeployMojo.java @@ -26,7 +26,6 @@ package org.rhq.cassandra.ccm.maven;
import java.io.File; -import java.util.List;
import org.apache.maven.plugin.AbstractMojo; import org.apache.maven.plugin.MojoExecutionException; @@ -38,7 +37,6 @@ import org.rhq.cassandra.CassandraClusterManager; import org.rhq.cassandra.DeploymentOptions; import org.rhq.cassandra.DeploymentOptionsFactory; import org.rhq.cassandra.schema.SchemaManager; -import org.rhq.core.domain.cloud.StorageNode;
/** * @author John Sanda @@ -63,14 +61,14 @@ public class DeployMojo extends AbstractMojo {
long start = System.currentTimeMillis(); getLog().info("Creating " + numNodes + " cluster in " + clusterDir); - List<StorageNode> nodes = ccm.createCluster(); + ccm.createCluster();
getLog().info("Starting cluster nodes"); ccm.startCluster();
getLog().info("Installing RHQ schema"); SchemaManager schemaManager = new SchemaManager(deploymentOptions.getUsername(), - deploymentOptions.getPassword(), nodes); + deploymentOptions.getPassword(), ccm.getNodes(), ccm.getCqlPort());
try { schemaManager.install(); diff --git a/modules/common/cassandra-ccm/cassandra-ccm-testng/src/main/java/org/rhq/cassandra/CCMTestNGListener.java b/modules/common/cassandra-ccm/cassandra-ccm-testng/src/main/java/org/rhq/cassandra/CCMTestNGListener.java index a9292f7..48d047d 100644 --- a/modules/common/cassandra-ccm/cassandra-ccm-testng/src/main/java/org/rhq/cassandra/CCMTestNGListener.java +++ b/modules/common/cassandra-ccm/cassandra-ccm-testng/src/main/java/org/rhq/cassandra/CCMTestNGListener.java @@ -27,7 +27,6 @@ package org.rhq.cassandra;
import java.io.File; import java.lang.reflect.Method; -import java.util.List;
import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -36,7 +35,6 @@ import org.testng.IInvokedMethodListener; import org.testng.ITestResult;
import org.rhq.cassandra.schema.SchemaManager; -import org.rhq.core.domain.cloud.StorageNode;
/** * @author John Sanda @@ -105,13 +103,15 @@ public class CCMTestNGListener implements IInvokedMethodListener { // we cannot initialize ccm here. ccm = new CassandraClusterManager(deploymentOptions); ClusterInitService clusterInitService = new ClusterInitService(); + ccm.createCluster();
- List<StorageNode> nodes = ccm.createCluster(); + String[] nodes = ccm.getNodes(); + int[] jmxPorts = ccm.getJmxPorts();
if (System.getProperty("rhq.cassandra.cluster.skip-shutdown") == null) { - for (StorageNode node : nodes) { + for (int index = 0; index < nodes.length; index++) { try { - if (clusterInitService.isNativeTransportRunning(node)) { + if (clusterInitService.isNativeTransportRunning(nodes[index], jmxPorts[index])) { throw new RuntimeException("A cluster is already running on the same ports."); } } catch (Exception e) { @@ -122,12 +122,13 @@ public class CCMTestNGListener implements IInvokedMethodListener { ccm.startCluster(false);
- clusterInitService.waitForClusterToStart(nodes, nodes.size(), 1500, 20, 2); + clusterInitService.waitForClusterToStart(nodes, jmxPorts, nodes.length, 20, 2, 1500);
- SchemaManager schemaManager = new SchemaManager(annotation.username(), annotation.password(), nodes); + SchemaManager schemaManager = new SchemaManager(annotation.username(), annotation.password(), nodes, + ccm.getCqlPort()); schemaManager.install(); if (annotation.waitForSchemaAgreement()) { - clusterInitService.waitForSchemaAgreement(nodes); + clusterInitService.waitForSchemaAgreement(nodes, jmxPorts); } schemaManager.updateTopology(); } diff --git a/modules/common/cassandra-schema/pom.xml b/modules/common/cassandra-schema/pom.xml index 6fb2915..077cdb9 100644 --- a/modules/common/cassandra-schema/pom.xml +++ b/modules/common/cassandra-schema/pom.xml @@ -13,16 +13,10 @@ <name>RHQ Cassandra Schema</name>
<dependencies> - <dependency> - <groupId>${project.groupId}</groupId> - <artifactId>rhq-cassandra-ccm-core</artifactId> - <version>${project.version}</version> - </dependency> - - <dependency> - <groupId>${project.groupId}</groupId> - <artifactId>rhq-cassandra-util</artifactId> - <version>${project.version}</version> + <dependency> + <groupId>${project.groupId}</groupId> + <artifactId>rhq-cassandra-util</artifactId> + <version>${project.version}</version> </dependency>
<dependency> @@ -36,13 +30,6 @@ </dependency>
<dependency> - <groupId>${project.groupId}</groupId> - <artifactId>rhq-core-domain</artifactId> - <version>${project.version}</version> - </dependency> - - - <dependency> <groupId>com.datastax.cassandra</groupId> <artifactId>cassandra-driver-core</artifactId> <version>${cassandra.driver.version}</version> diff --git a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/AbstractManager.java b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/AbstractManager.java index 7b8c520..7dcef1b 100644 --- a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/AbstractManager.java +++ b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/AbstractManager.java @@ -40,8 +40,6 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory;
import org.rhq.cassandra.util.ClusterBuilder; -import org.rhq.core.domain.cloud.StorageNode; -import org.rhq.core.util.StringUtil;
/** * @author Stefan Negrea @@ -71,13 +69,15 @@ abstract class AbstractManager { private Session session; private final String username; private final String password; - private List<StorageNode> nodes = new ArrayList<StorageNode>(); + private final int cqlPort; + private final String[] nodes; private final UpdateFile managementTasks;
- protected AbstractManager(String username, String password, List<StorageNode> nodes) { + protected AbstractManager(String username, String password, String[] nodes, int cqlPort) { try { this.username = username; this.password = password; + this.cqlPort = cqlPort; this.nodes = nodes; } catch (NoHostAvailableException e) { throw new RuntimeException("Unable create storage node session.", e); @@ -108,15 +108,11 @@ abstract class AbstractManager { protected void initClusterSession(String username, String password) { shutdownClusterConnection();
- String[] hostNames = new String[nodes.size()]; - for (int i = 0; i < hostNames.length; ++i) { - hostNames[i] = nodes.get(i).getAddress(); - }
- log.info("Initializing session to connect to " + StringUtil.arrayToString(hostNames)); + log.info("Initializing storage node session.");
- Cluster cluster = new ClusterBuilder().addContactPoints(hostNames).withCredentials(username, password) - .withPort(nodes.get(0).getCqlPort()).withCompression(Compression.NONE).build(); + Cluster cluster = new ClusterBuilder().addContactPoints(nodes).withCredentials(username, password) + .withPort(this.getCqlPort()).withCompression(Compression.NONE).build();
log.info("Cluster connection configured.");
@@ -140,7 +136,7 @@ abstract class AbstractManager { * @return cluster size */ protected int getClusterSize() { - return nodes.size(); + return nodes.length; }
/** @@ -158,6 +154,13 @@ abstract class AbstractManager { }
/** + * @return the cqlPort + */ + protected int getCqlPort() { + return cqlPort; + } + + /** * Runs a CQL query to check the existence of the RHQ user on the storage cluster. * * @return true if the RHQ user exists, false otherwise diff --git a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/SchemaManager.java b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/SchemaManager.java index 1a82779..fdad697 100644 --- a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/SchemaManager.java +++ b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/SchemaManager.java @@ -25,7 +25,6 @@
package org.rhq.cassandra.schema;
-import java.util.ArrayList; import java.util.Arrays; import java.util.List;
@@ -34,8 +33,6 @@ import org.apache.log4j.Level; import org.apache.log4j.Logger; import org.apache.log4j.PatternLayout;
-import org.rhq.core.domain.cloud.StorageNode; - /** * @author John Sanda */ @@ -51,19 +48,29 @@ public class SchemaManager { */ private final String password;
- private final List<StorageNode> nodes = new ArrayList<StorageNode>(); + /** + * Node addresses + */ + private final String[] nodes;
/** * - * @param username The username RHQ will use to connect to the storage cluster. - * @param password The password RHQ will use to connect to the storage cluster. - * @param nodes A list of seeds nodes that are assumed to be already running and - * clustered prior to apply schema changes. The format for each node - * should be address|jmx_port|cql_port,address|jmx_port|cql_port. - * Each node consists of three fields that are pipe-delimited. */ - public SchemaManager(String username, String password, String... nodes) { - this(username, password, parseNodeInformation(nodes)); + private final int cqlPort; + + /** + * + * @param username The username RHQ will use to connect to the storage cluster + * @param password The password RHQ will use to connect to the storage cluster + * @param nodes A list of seeds nodes that are assumed to be already running and + * clustered prior to apply schema changes. + * @param cqlPort The native CQL port for the storage cluster + */ + public SchemaManager(String username, String password, String[] nodes, int cqlPort) { + this.username = username; + this.password = password; + this.cqlPort = cqlPort; + this.nodes = nodes; }
/** @@ -72,11 +79,13 @@ public class SchemaManager { * @param password The password RHQ will use to connect to the storage cluster. * @param nodes A list of seeds nodes that are assumed to be already running and * clustered prior to apply schema changes. + * @param cqlPort The native CQL port for the storage cluster */ - public SchemaManager(String username, String password, List<StorageNode> nodes) { + public SchemaManager(String username, String password, List<String> nodes, int cqlPort) { this.username = username; this.password = password; - this.nodes.addAll(nodes); + this.cqlPort = cqlPort; + this.nodes = nodes.toArray(new String[nodes.size()]); }
/** @@ -85,7 +94,7 @@ public class SchemaManager { * @throws Exception */ public void install() throws Exception { - VersionManager version = new VersionManager(username, password, nodes); + VersionManager version = new VersionManager(username, password, nodes, cqlPort); version.install(); }
@@ -96,7 +105,7 @@ public class SchemaManager { * @throws Exception */ public void checkCompatibility() throws Exception { - VersionManager version = new VersionManager(username, password, nodes); + VersionManager version = new VersionManager(username, password, nodes, cqlPort); version.checkCompatibility(); }
@@ -106,7 +115,7 @@ public class SchemaManager { * @throws Exception */ public void drop() throws Exception { - VersionManager version = new VersionManager(username, password, nodes); + VersionManager version = new VersionManager(username, password, nodes, cqlPort); version.drop(); }
@@ -118,7 +127,7 @@ public class SchemaManager { * @throws Exception */ public void updateTopology() throws Exception { - TopologyManager topology = new TopologyManager(username, password, nodes); + TopologyManager topology = new TopologyManager(username, password, nodes, cqlPort); topology.updateTopology(); }
@@ -127,28 +136,11 @@ public class SchemaManager { * * @return list of storage nodes */ - public List<StorageNode> getStorageNodes() { + protected String[] getStorageNodes() { return nodes; }
/** - * Parse raw string that contains the list of storage nodes. - * - * @param nodes list of storage nodes - * @return - */ - private static List<StorageNode> parseNodeInformation(String... nodes) { - List<StorageNode> parsedNodes = new ArrayList<StorageNode>(); - for (String node : nodes) { - StorageNode storageNode = new StorageNode(); - storageNode.parseNodeInformation(node); - parsedNodes.add(storageNode); - } - - return parsedNodes; - } - - /** * A main runner used for direct usage of the schema manager. * * @param args arguments @@ -164,19 +156,19 @@ public class SchemaManager { migratorLogging.setLevel(Level.ALL);
if (args.length < 4) { - System.out.println("Usage : command username password nodes..."); + System.out.println("Usage : command username password cqlPort nodes..."); System.out.println("\n"); System.out.println("Commands : install | drop | topology"); - System.out.println("Node format: hostname|jmxPort|cqlPort"); return; }
String command = args[0]; String username = args[1]; String password = args[2]; - String[] hosts = Arrays.copyOfRange(args, 3, args.length); + int cqlPort = Integer.parseInt(args[3]); + String[] hosts = Arrays.copyOfRange(args, 4, args.length);
- SchemaManager schemaManager = new SchemaManager(username, password, hosts); + SchemaManager schemaManager = new SchemaManager(username, password, hosts, cqlPort);
if ("install".equalsIgnoreCase(command)) { schemaManager.install(); diff --git a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/TopologyManager.java b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/TopologyManager.java index 6c08faa..481c006 100644 --- a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/TopologyManager.java +++ b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/TopologyManager.java @@ -1,37 +1,33 @@ /* * - * * RHQ Management Platform - * * Copyright (C) 2005-2012 Red Hat, Inc. - * * All rights reserved. - * * - * * This program is free software; you can redistribute it and/or modify - * * it under the terms of the GNU General Public License, version 2, as - * * published by the Free Software Foundation, and/or the GNU Lesser - * * General Public License, version 2.1, also as published by the Free - * * Software Foundation. - * * - * * This program is distributed in the hope that it will be useful, - * * but WITHOUT ANY WARRANTY; without even the implied warranty of - * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * * GNU General Public License and the GNU Lesser General Public License - * * for more details. - * * - * * You should have received a copy of the GNU General Public License - * * and the GNU Lesser General Public License along with this program; - * * if not, write to the Free Software Foundation, Inc., - * * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * RHQ Management Platform + * Copyright (C) 2005-2012 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation, and/or the GNU Lesser + * General Public License, version 2.1, also as published by the Free + * Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License and the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * and the GNU Lesser General Public License along with this program; + * if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * */
package org.rhq.cassandra.schema;
-import java.util.List; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory;
-import org.rhq.core.domain.cloud.StorageNode; - /** * @author Stefan Negrea */ @@ -56,8 +52,8 @@ class TopologyManager extends AbstractManager { } }
- public TopologyManager(String username, String password, List<StorageNode> nodes) { - super(username, password, nodes); + public TopologyManager(String username, String password, String[] nodes, int cqlPort) { + super(username, password, nodes, cqlPort); }
/** diff --git a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/VersionManager.java b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/VersionManager.java index fe6ddf9..05cee25 100644 --- a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/VersionManager.java +++ b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/VersionManager.java @@ -25,7 +25,6 @@
package org.rhq.cassandra.schema;
-import java.util.List; import java.util.Properties; import java.util.UUID;
@@ -37,7 +36,6 @@ import org.apache.commons.logging.LogFactory; import org.rhq.cassandra.schema.exception.InstalledSchemaTooAdvancedException; import org.rhq.cassandra.schema.exception.InstalledSchemaTooOldException; import org.rhq.cassandra.schema.exception.SchemaNotInstalledException; -import org.rhq.core.domain.cloud.StorageNode;
/** * @author Stefan Negrea @@ -64,8 +62,8 @@ class VersionManager extends AbstractManager { } }
- public VersionManager(String username, String password, List<StorageNode> nodes) throws Exception { - super(username, password, nodes); + public VersionManager(String username, String password, String[] nodes, int cqlPort) throws Exception { + super(username, password, nodes, cqlPort); }
/** diff --git a/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java b/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java index 9fc389c..42e531f 100644 --- a/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java +++ b/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java @@ -1,8 +1,11 @@ package org.rhq.cassandra.util;
+import java.io.BufferedInputStream; +import java.io.BufferedOutputStream; import java.io.File; import java.io.FileInputStream; import java.io.FileNotFoundException; +import java.io.FileOutputStream; import java.io.FileWriter; import java.io.IOException; import java.util.List; @@ -11,9 +14,6 @@ import java.util.Map; import org.yaml.snakeyaml.DumperOptions; import org.yaml.snakeyaml.Yaml;
-import org.rhq.core.util.StringUtil; -import org.rhq.core.util.file.FileUtil; - /** * @author John Sanda */ @@ -57,7 +57,7 @@ public class ConfigEditor {
public void restore() { try { - FileUtil.copyFile(backupFile, configFile); + this.copyFile(backupFile, configFile); backupFile.delete(); yaml = null; config = null; @@ -70,7 +70,7 @@ public class ConfigEditor { private void createBackup() { backupFile = new File(configFile.getParent(), "." + configFile.getName() + ".bak"); try { - FileUtil.copyFile(configFile, backupFile); + this.copyFile(configFile, backupFile); } catch (IOException e) { throw new ConfigEditorException("Failed to create " + backupFile, e); } @@ -113,7 +113,16 @@ public class ConfigEditor { Map seedProvider = (Map) seedProviderList.get(0); List paramsList = (List) seedProvider.get("parameters"); Map params = (Map) paramsList.get(0); - params.put("seeds", StringUtil.arrayToString(seeds)); + + StringBuilder seedsString = new StringBuilder(); + for (int i = 0; i < seeds.length; i++) { + if (i > 0) { + seedsString.append(","); + } + + seedsString.append(seeds[i]); + } + params.put("seeds", seedsString.toString()); }
public Integer getNativeTransportPort() { @@ -132,4 +141,24 @@ public class ConfigEditor { config.put("storage_port", port); }
+ public static void copyFile(File inFile, File outFile) throws FileNotFoundException, IOException { + BufferedInputStream is = new BufferedInputStream(new FileInputStream(inFile)); + BufferedOutputStream os = new BufferedOutputStream(new FileOutputStream(outFile)); + + int bufferSize = 32768; + try { + is = new BufferedInputStream(is, bufferSize); + byte[] buffer = new byte[bufferSize]; + for (int bytesRead = is.read(buffer); bytesRead != -1; bytesRead = is.read(buffer)) { + os.write(buffer, 0, bytesRead); + } + os.flush(); + } catch (IOException ioe) { + throw new RuntimeException("Stream data cannot be copied", ioe); + } finally { + os.close(); + is.close(); + } + } + } diff --git a/modules/common/cassandra-util/src/test/java/org/rhq/cassandra/util/ConfigEditorTest.java b/modules/common/cassandra-util/src/test/java/org/rhq/cassandra/util/ConfigEditorTest.java index 9c3cc16..d101fc2 100644 --- a/modules/common/cassandra-util/src/test/java/org/rhq/cassandra/util/ConfigEditorTest.java +++ b/modules/common/cassandra-util/src/test/java/org/rhq/cassandra/util/ConfigEditorTest.java @@ -3,10 +3,14 @@ package org.rhq.cassandra.util; import static java.util.Arrays.asList; import static org.testng.Assert.assertEquals;
+import java.io.BufferedInputStream; import java.io.File; import java.io.FileInputStream; +import java.io.FileNotFoundException; import java.io.FileOutputStream; +import java.io.IOException; import java.io.InputStream; +import java.io.OutputStream; import java.lang.reflect.Method;
import org.apache.cassandra.config.Config; @@ -17,9 +21,6 @@ import org.yaml.snakeyaml.Loader; import org.yaml.snakeyaml.TypeDescription; import org.yaml.snakeyaml.Yaml;
-import org.rhq.core.util.file.FileUtil; -import org.rhq.core.util.stream.StreamUtil; - /** * @author John Sanda */ @@ -33,14 +34,14 @@ public class ConfigEditorTest { public void initTestDir(Method test) throws Exception { File dir = new File(getClass().getResource(".").toURI()); basedir = new File(dir, getClass().getSimpleName() + "/" + test.getName()); - FileUtil.purge(basedir, true); + purge(basedir, true); basedir.mkdirs();
configFile = new File(basedir, "cassandra.yaml");
InputStream inputStream = getClass().getResourceAsStream("/cassandra.yaml"); FileOutputStream outputStream = new FileOutputStream(configFile); - StreamUtil.copy(inputStream, outputStream); + copyStreams(inputStream, outputStream); }
@Test @@ -119,4 +120,40 @@ public class ConfigEditorTest { return (Config) yaml.load(inputStream); }
+ private static void purge(File dir, boolean deleteIt) { + if (dir != null) { + if (dir.isDirectory()) { + File[] doomedFiles = dir.listFiles(); + if (doomedFiles != null) { + for (File doomedFile : doomedFiles) { + purge(doomedFile, true); // call this method recursively + } + } + } + + if (deleteIt) { + dir.delete(); + } + } + + return; + } + + public static void copyStreams(InputStream is, OutputStream os) throws FileNotFoundException, IOException { + int bufferSize = 32768; + try { + is = new BufferedInputStream(is, bufferSize); + byte[] buffer = new byte[bufferSize]; + for (int bytesRead = is.read(buffer); bytesRead != -1; bytesRead = is.read(buffer)) { + os.write(buffer, 0, bytesRead); + } + os.flush(); + } catch (IOException ioe) { + throw new RuntimeException("Stream data cannot be copied", ioe); + } finally { + os.close(); + is.close(); + } + } + } diff --git a/modules/common/drift/pom.xml b/modules/common/drift/pom.xml index 9500e8d..b1347ab 100644 --- a/modules/common/drift/pom.xml +++ b/modules/common/drift/pom.xml @@ -16,15 +16,23 @@
<dependencies> <dependency> - <groupId>${project.groupId}</groupId> - <artifactId>test-utils</artifactId> - <version>${project.version}</version> - <scope>test</scope> + <groupId>${project.groupId}</groupId> + <artifactId>test-utils</artifactId> + <version>${project.version}</version> + <scope>test</scope> </dependency> + <dependency> - <groupId>commons-io</groupId> - <artifactId>commons-io</artifactId> - </dependency> + <groupId>${project.groupId}</groupId> + <artifactId>rhq-core-domain</artifactId> + <version>${project.version}</version> + <scope>provided</scope> + </dependency> + + <dependency> + <groupId>commons-io</groupId> + <artifactId>commons-io</artifactId> + </dependency> </dependencies>
<profiles> diff --git a/modules/common/filetemplate-bundle/pom.xml b/modules/common/filetemplate-bundle/pom.xml index b6c587e..8e60b26 100644 --- a/modules/common/filetemplate-bundle/pom.xml +++ b/modules/common/filetemplate-bundle/pom.xml @@ -15,6 +15,12 @@ <description>A library with the code common to the agent and server plugins for File Template Bundles</description>
<dependencies> + <dependency> + <groupId>${project.groupId}</groupId> + <artifactId>rhq-core-domain</artifactId> + <version>${project.version}</version> + <scope>provided</scope> + </dependency>
<dependency> <groupId>gnu-getopt</groupId> diff --git a/modules/common/jboss-as/pom.xml b/modules/common/jboss-as/pom.xml index a5d3255..16dd176 100644 --- a/modules/common/jboss-as/pom.xml +++ b/modules/common/jboss-as/pom.xml @@ -16,6 +16,13 @@
<dependencies> <dependency> + <groupId>${project.groupId}</groupId> + <artifactId>rhq-core-domain</artifactId> + <version>${project.version}</version> + <scope>provided</scope> + </dependency> + + <dependency> <groupId>ant</groupId> <artifactId>ant</artifactId> <version>1.6.5</version> diff --git a/modules/common/pom.xml b/modules/common/pom.xml index 45ba5c6..f957b0e 100644 --- a/modules/common/pom.xml +++ b/modules/common/pom.xml @@ -15,12 +15,6 @@ <description>parent POM for all RHQ common plugin libraries</description>
<dependencies> - <dependency> - <groupId>${project.groupId}</groupId> - <artifactId>rhq-core-domain</artifactId> - <version>${project.version}</version> - <scope>provided</scope> <!-- by PC --> - </dependency>
</dependencies>
diff --git a/modules/core/dbutils/pom.xml b/modules/core/dbutils/pom.xml index 1c66dd6..70783f4 100644 --- a/modules/core/dbutils/pom.xml +++ b/modules/core/dbutils/pom.xml @@ -204,11 +204,6 @@ <version>${project.version}</version> </dependency> <dependency> - <groupId>${project.groupId}</groupId> - <artifactId>rhq-core-domain</artifactId> - <version>${project.version}</version> - </dependency> - <dependency> <groupId>org.codehaus.groovy</groupId> <artifactId>groovy-all</artifactId> <version>2.1.3</version> diff --git a/modules/enterprise/server/installer/src/main/java/org/rhq/enterprise/server/installer/InstallerServiceImpl.java b/modules/enterprise/server/installer/src/main/java/org/rhq/enterprise/server/installer/InstallerServiceImpl.java index 5c8002a..56e2df0 100644 --- a/modules/enterprise/server/installer/src/main/java/org/rhq/enterprise/server/installer/InstallerServiceImpl.java +++ b/modules/enterprise/server/installer/src/main/java/org/rhq/enterprise/server/installer/InstallerServiceImpl.java @@ -23,6 +23,7 @@ import java.io.IOException; import java.net.InetAddress; import java.util.ArrayList; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.Properties; import java.util.regex.Matcher; @@ -39,6 +40,7 @@ import org.rhq.common.jbossas.client.controller.DatasourceJBossASClient; import org.rhq.common.jbossas.client.controller.DeploymentJBossASClient; import org.rhq.common.jbossas.client.controller.WebJBossASClient; import org.rhq.core.db.DatabaseTypeFactory; +import org.rhq.core.domain.cloud.StorageNode; import org.rhq.core.util.PropertiesFileUpdate; import org.rhq.core.util.exception.ThrowableUtil; import org.rhq.enterprise.server.installer.ServerInstallUtil.ExistingSchemaOption; @@ -501,7 +503,7 @@ public class InstallerServiceImpl implements InstallerService { ServerInstallUtil.storeServerDetails(serverProperties, clearTextDbPassword, serverDetails);
ServerInstallUtil.persistStorageNodesIfNecessary(serverProperties, clearTextDbPassword, - storageNodeSchemaManager.getStorageNodes()); + parseNodeInformation(serverProperties)); }
@Override @@ -1154,12 +1156,31 @@ public class InstallerServiceImpl implements InstallerService { } }
+ private List<StorageNode> parseNodeInformation(HashMap<String, String> serverProps) { + String[] nodes = serverProps.get("rhq.cassandra.seeds").split(","); + + List<StorageNode> parsedNodes = new ArrayList<StorageNode>(); + for (String node : nodes) { + StorageNode storageNode = new StorageNode(); + storageNode.parseNodeInformation(node); + parsedNodes.add(storageNode); + } + + return parsedNodes; + } + private SchemaManager createStorageNodeSchemaManager(HashMap<String, String> serverProps) { - String[] hosts = serverProps.get("rhq.cassandra.seeds").split(","); String username = serverProps.get("rhq.cassandra.username"); String password = serverProps.get("rhq.cassandra.password");
- return new SchemaManager(username, password, hosts); + List<StorageNode> storageNodes = this.parseNodeInformation(serverProps); + String[] nodes = new String[storageNodes.size()]; + for (int index = 0; index < storageNodes.size(); index++) { + nodes[index] = storageNodes.get(index).getAddress(); + } + int cqlPort = storageNodes.get(0).getCqlPort(); + + return new SchemaManager(username, password, nodes, cqlPort); }
private void writeInstalledFileMarker() throws Exception { diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClientManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClientManagerBean.java index dbd599a..799abcc 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClientManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClientManagerBean.java @@ -121,7 +121,13 @@ public class StorageClientManagerBean { * @param storageNodes storage nodes */ private void checkSchemaCompability(String username, String password, List<StorageNode> storageNodes) { - SchemaManager schemaManager = new SchemaManager(username, password, storageNodes); + String[] nodes = new String[storageNodes.size()]; + for (int index = 0; index < storageNodes.size(); index++) { + nodes[index] = storageNodes.get(index).getAddress(); + } + int cqlPort = storageNodes.get(0).getCqlPort(); + + SchemaManager schemaManager = new SchemaManager(username, password, nodes, cqlPort); try { schemaManager.checkCompatibility(); } catch (Exception e) { diff --git a/modules/helpers/metrics-simulator/src/main/java/org/rhq/metrics/simulator/Simulator.java b/modules/helpers/metrics-simulator/src/main/java/org/rhq/metrics/simulator/Simulator.java index 40e00bf..a5a3994 100644 --- a/modules/helpers/metrics-simulator/src/main/java/org/rhq/metrics/simulator/Simulator.java +++ b/modules/helpers/metrics-simulator/src/main/java/org/rhq/metrics/simulator/Simulator.java @@ -28,7 +28,6 @@ package org.rhq.metrics.simulator; import java.io.File; import java.io.IOException; import java.util.HashSet; -import java.util.List; import java.util.PriorityQueue; import java.util.Set; import java.util.concurrent.Executors; @@ -52,7 +51,6 @@ import org.rhq.cassandra.DeploymentOptions; import org.rhq.cassandra.DeploymentOptionsFactory; import org.rhq.cassandra.schema.SchemaManager; import org.rhq.cassandra.util.ClusterBuilder; -import org.rhq.core.domain.cloud.StorageNode; import org.rhq.metrics.simulator.plan.ClusterConfig; import org.rhq.metrics.simulator.plan.ScheduleGroup; import org.rhq.metrics.simulator.plan.SimulationPlan; @@ -84,17 +82,16 @@ public class Simulator implements ShutdownManager { } });
- List<StorageNode> nodes = initCluster(plan); - - createSchema(nodes); + initCluster(plan); + createSchema();
Session session; if (plan.getClientCompression() == null) { - session = createSession(nodes); + session = createSession(); } else { ProtocolOptions.Compression compression = Enum.valueOf(ProtocolOptions.Compression.class, plan.getClientCompression().toUpperCase()); - session = createSession(nodes, compression); + session = createSession(compression); }
StorageSession storageSession = new StorageSession(session); @@ -172,17 +169,16 @@ public class Simulator implements ShutdownManager { log.info("Shut down complete"); }
- private List<StorageNode> initCluster(SimulationPlan plan) { + private void initCluster(SimulationPlan plan) { try { - List<StorageNode> nodes = deployCluster(plan.getClusterConfig()); - waitForClusterToInitialize(nodes); - return nodes; + deployCluster(plan.getClusterConfig()); + waitForClusterToInitialize(); } catch (Exception e) { throw new RuntimeException("Failed to start simulator. Cluster initialization failed.", e); } }
- private List<StorageNode> deployCluster(ClusterConfig clusterConfig) throws IOException { + private void deployCluster(ClusterConfig clusterConfig) throws IOException { File clusterDir = new File(clusterConfig.getClusterDir(), "cassandra"); log.info("Deploying cluster to " + clusterDir); clusterDir.mkdirs(); @@ -200,10 +196,8 @@ public class Simulator implements ShutdownManager { deploymentOptions.load();
ccm = new CassandraClusterManager(deploymentOptions); - List<StorageNode> nodes = ccm.createCluster(); + ccm.createCluster(); ccm.startCluster(false); - - return nodes; }
private void shutdownCluster() { @@ -211,26 +205,25 @@ public class Simulator implements ShutdownManager { ccm.shutdownCluster(); }
- private void waitForClusterToInitialize(List<StorageNode> nodes) { + private void waitForClusterToInitialize() { log.info("Waiting for cluster to initialize"); ClusterInitService clusterInitService = new ClusterInitService(); - clusterInitService.waitForClusterToStart(nodes, nodes.size(), 1500, 20, 2); + clusterInitService.waitForClusterToStart(ccm.getNodes(), ccm.getJmxPorts(), ccm.getNodes().length, 20, 2, 1500); }
- private void createSchema(List<StorageNode> nodes) { + private void createSchema() { try { log.info("Creating schema"); - SchemaManager schemaManager = new SchemaManager("rhqadmin", "rhqadmin", nodes); + SchemaManager schemaManager = new SchemaManager("rhqadmin", "rhqadmin", ccm.getNodes(), ccm.getCqlPort()); schemaManager.install(); } catch (Exception e) { throw new RuntimeException("Failed to start simulator. An error occurred during schema creation.", e); } }
- private Session createSession(List<StorageNode> nodes) throws NoHostAvailableException { + private Session createSession() throws NoHostAvailableException { try { - Cluster cluster = new ClusterBuilder() - .addContactPoints(getHostNames(nodes)) + Cluster cluster = new ClusterBuilder().addContactPoints(ccm.getNodes()).withPort(ccm.getCqlPort()) .withCredentials("rhqadmin", "rhqadmin") .build();
@@ -244,13 +237,12 @@ public class Simulator implements ShutdownManager { } }
- private Session createSession(List<StorageNode> nodes, ProtocolOptions.Compression compression) + private Session createSession(ProtocolOptions.Compression compression) throws NoHostAvailableException { try { log.debug("Creating session using " + compression.name() + " compression");
- Cluster cluster = new ClusterBuilder() - .addContactPoints(getHostNames(nodes)) + Cluster cluster = new ClusterBuilder().addContactPoints(ccm.getNodes()).withPort(ccm.getCqlPort()) .withCredentials("cassandra", "cassandra") .withCompression(compression) .build(); @@ -264,6 +256,7 @@ public class Simulator implements ShutdownManager { } }
+ @SuppressWarnings("deprecation") private Session initSession(Cluster cluster) { NodeFailureListener listener = new NodeFailureListener(); for (Host host : cluster.getMetadata().getAllHosts()) { @@ -273,14 +266,6 @@ public class Simulator implements ShutdownManager { return cluster.connect("rhq"); }
- private String[] getHostNames(List<StorageNode> nodes) { - String[] hostnames = new String[nodes.size()]; - for (int i = 0; i < hostnames.length; ++i) { - hostnames[i] = nodes.get(i).getAddress(); - } - return hostnames; - } - private Set<Schedule> initSchedules(ScheduleGroup scheduleSet) { long nextCollection = System.currentTimeMillis(); Set<Schedule> schedules = new HashSet<Schedule>(); diff --git a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java index 63517e1..50f8156 100644 --- a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java +++ b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java @@ -150,9 +150,11 @@ public class StorageNodeComponentITest { storageNode.parseNodeInformation("127.0.0.1|7399|9142");
ClusterInitService clusterInitService = new ClusterInitService(); - clusterInitService.waitForClusterToStart(asList(storageNode)); + clusterInitService.waitForClusterToStart(new String[] { storageNode.getAddress() }, + new int[] { storageNode.getJmxPort() });
- SchemaManager schemaManager = new SchemaManager("rhqadmin", "rhqadmin", "127.0.0.1|7399|9142"); + SchemaManager schemaManager = new SchemaManager("rhqadmin", "rhqadmin", + new String[] { storageNode.getAddress() }, storageNode.getCqlPort()); schemaManager.install(); schemaManager.updateTopology(); }
commit 10f975532af27603021a0063a305b9cae0f421a4 Author: John Sanda jsanda@redhat.com Date: Mon Aug 19 16:14:23 2013 -0400
shortening mode name so we now have ADD_MAINTENANCE and REMOVE_MAINTENANCE
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java index e0f278d..aac8cb6 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java @@ -247,7 +247,7 @@ public class StorageNode implements Serializable { return Status.INSTALLED; } if (operationMode == OperationMode.ANNOUNCE || operationMode == OperationMode.BOOTSTRAP || - operationMode == OperationMode.ADD_NODE_MAINTENANCE) { + operationMode == OperationMode.ADD_MAINTENANCE) { if (errorMessage == null && failedOperation == null) { return Status.JOINING; } else { @@ -280,7 +280,7 @@ public class StorageNode implements Serializable { "gossip from its IP address."), BOOTSTRAP("The storage is installed but not yet part of the cluster. It is getting bootstrapped into the " + "cluster"), - ADD_NODE_MAINTENANCE("The storage node is running and is preparing to undergo routine maintenance that is " + + ADD_MAINTENANCE("The storage node is running and is preparing to undergo routine maintenance that is " + "necessary when a new node joins the cluster."), REMOVE_MAINTENANCE("The storage node is no longer part of the cluster. Remaining storage node are " + "undergoing cluster maintenance due to the topology change."), diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index 55593d3..4d4fd17 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -246,7 +246,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN reset(); storageNodeOperationsHandler.bootstrapStorageNode(subject, storageNode); break; - case ADD_NODE_MAINTENANCE: + case ADD_MAINTENANCE: reset(); storageNodeOperationsHandler.performAddNodeMaintenance(subject, storageNode); default: @@ -272,7 +272,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN reset(); storageNodeOperationsHandler.unannounceStorageNode(subject, storageNode); break; - case ADD_NODE_MAINTENANCE: + case ADD_MAINTENANCE: case NORMAL: case DECOMMISSION: reset(); diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java index b619e72..b068734 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java @@ -34,7 +34,6 @@ import org.rhq.enterprise.server.auth.SessionManager; import org.rhq.enterprise.server.auth.SubjectManagerLocal; import org.rhq.enterprise.server.cloud.StorageNodeManagerLocal; import org.rhq.enterprise.server.operation.OperationManagerLocal; -import org.rhq.enterprise.server.resource.ResourceFactoryManagerLocal; import org.rhq.enterprise.server.resource.ResourceManagerLocal; import org.rhq.server.metrics.StorageSession;
@@ -193,7 +192,7 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa
@Override public void performAddNodeMaintenance(Subject subject, StorageNode storageNode) { - storageNode.setOperationMode(StorageNode.OperationMode.ADD_NODE_MAINTENANCE); + storageNode.setOperationMode(StorageNode.OperationMode.ADD_MAINTENANCE); List<StorageNode> clusterNodes = entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE, StorageNode.class).setParameter("operationMode", StorageNode.OperationMode.NORMAL) .getResultList(); @@ -294,7 +293,7 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa String msg = "Aborting storage node deployment due to unexpected error while performing add node " + "maintenance."; log.error(msg, e); - storageNodeOperationsHandler.logError(StorageNode.OperationMode.ADD_NODE_MAINTENANCE, msg, e); + storageNodeOperationsHandler.logError(StorageNode.OperationMode.ADD_MAINTENANCE, msg, e); } } else if (operationHistory.getOperationDefinition().getName().equals("decommission")) { try { @@ -435,11 +434,11 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa // nothing to do here return; case CANCELED: - newStorageNode = findStorageNodeByMode(StorageNode.OperationMode.ADD_NODE_MAINTENANCE); + newStorageNode = findStorageNodeByMode(StorageNode.OperationMode.ADD_MAINTENANCE); deploymentOperationCanceled(storageNode, resourceOperationHistory, newStorageNode); return; case FAILURE: - newStorageNode = findStorageNodeByMode(StorageNode.OperationMode.ADD_NODE_MAINTENANCE); + newStorageNode = findStorageNodeByMode(StorageNode.OperationMode.ADD_MAINTENANCE); deploymentOperationFailed(storageNode, resourceOperationHistory, newStorageNode); return; default: // SUCCESS @@ -450,7 +449,7 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa if (nextNode == null) { log.info("Finished running add node maintenance on all cluster nodes"); // TODO replace this with an UPDATE statement - newStorageNode = findStorageNodeByMode(StorageNode.OperationMode.ADD_NODE_MAINTENANCE); + newStorageNode = findStorageNodeByMode(StorageNode.OperationMode.ADD_MAINTENANCE); newStorageNode.setOperationMode(StorageNode.OperationMode.NORMAL); } else { Configuration parameters = resourceOperationHistory.getParameters();
commit 6bb94ed2094f2380b15aae91e4d3075daf8c81f9 Author: John Sanda jsanda@redhat.com Date: Mon Aug 19 15:51:14 2013 -0400
prepareForBootstrap operation should continue if the storage node is already stopped
Previously the prepareForShutdown operation would fail if the shutdown operation fails which will happen if the node is already stopped. There is no reason to fail the prepareForShutdown operation here because we want the node shut down. And if C* bootstrapping previously failed, the storage node is likely down.
diff --git a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java index 7455f5e..2974beb 100644 --- a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java +++ b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java @@ -290,6 +290,8 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper }
private OperationResult uninstall() { + log.info("Uninstalling storage node at " + getResourceContext().getResourceKey()); + OperationResult result = new OperationResult(); OperationResult shutdownResult = shutdownIfNecessary(); if (shutdownResult.getErrorMessage() != null) { @@ -363,14 +365,13 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper OperationResult result = new OperationResult();
log.info("Stopping storage node"); - OperationResult stopNodeResult = shutdownStorageNode(); - if (stopNodeResult.getErrorMessage() != null) { - log.error("Failed to stop storage node " + this + " Cannot prepare the node for bootstrap which means " + - "that the storage node cannot join the cluster. Make sure the storage node is not running and retry " + - "the operation"); - result.setErrorMessage("Failed to stop storage node. Cannot prepare the node for bootstrap which means " + - "that it cannot join the cluster. Make sure that the node is not running and retry the operation. " + - "Stopping the storage node failed with this error: " + stopNodeResult.getErrorMessage()); + OperationResult shutdownResult = shutdownIfNecessary(); + if (shutdownResult.getErrorMessage() != null) { + log.error("Failed to stop storage node " + getResourceContext().getResourceKey() + ". The storage node " + + "must be shut down in order for the changes made by this operation to take effect."); + result.setErrorMessage("Failed to stop the storage node. The storage node must be shut down in order " + + "for the changes made by this operation to take effect. The attempt to stop shut down the storage " + + "node failed with this error: " + shutdownResult.getErrorMessage()); return result; }
commit 229dcef27a21694a40fa23120358d4de5dd464a4 Author: John Sanda jsanda@redhat.com Date: Mon Aug 19 15:25:55 2013 -0400
fixing typo in api change justification
diff --git a/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml b/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml index 143ae14..5bfa334 100644 --- a/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml +++ b/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml @@ -67,7 +67,7 @@ <difference> <className>org/rhq/enterprise/server/cloud/StorageNodeManagerRemote</className> <differenceType>7012</differenceType> <!-- method added to an interface --> - <method>void unDeployStorageNode(org.rhq.core.domain.auth.Subject, org.rhq.core.domain.cloud.StorageNode)</method> + <method>void undeployStorageNode(org.rhq.core.domain.auth.Subject, org.rhq.core.domain.cloud.StorageNode)</method> <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> </difference>
commit d7ea5f7940953162c32e951fe3a85556aa8f2e69 Author: John Sanda jsanda@redhat.com Date: Mon Aug 19 15:20:04 2013 -0400
refactor common operation scheduling code into a util method
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java index f2fc108..b619e72 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java @@ -103,16 +103,11 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa if (log.isInfoEnabled()) { log.info("Announcing " + newStorageNode + " to cluster node " + clusterNode); } - ResourceOperationSchedule schedule = new ResourceOperationSchedule(); - schedule.setResource(clusterNode.getResource()); - schedule.setJobTrigger(JobTrigger.createNowTrigger()); - schedule.setSubject(subject); - schedule.setOperationName("announce"); + Configuration parameters = new Configuration(); parameters.put(addresses); - schedule.setParameters(parameters);
- operationManager.scheduleResourceOperation(subject, schedule); + scheduleOperation(subject, clusterNode, parameters, "announce"); }
@Override @@ -129,16 +124,10 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa }
private void unannounceStorageNode(Subject subject, StorageNode clusterNode, PropertyList addresses) { - ResourceOperationSchedule schedule = new ResourceOperationSchedule(); - schedule.setResource(clusterNode.getResource()); - schedule.setJobTrigger(JobTrigger.createNowTrigger()); - schedule.setSubject(subject); - schedule.setOperationName("unannounce"); Configuration parameters = new Configuration(); parameters.put(addresses); - schedule.setParameters(parameters);
- operationManager.scheduleResourceOperation(subject, schedule); + scheduleOperation(subject, clusterNode, parameters, "unannounce"); }
@Override @@ -150,15 +139,7 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa if (storageNode.getResource() == null) { finishUninstall(subject, storageNode); } else { - ResourceOperationSchedule schedule = new ResourceOperationSchedule(); - schedule.setResource(storageNode.getResource()); - schedule.setJobTrigger(JobTrigger.createNowTrigger()); - schedule.setSubject(subject); - schedule.setOperationName("uninstall"); - Configuration parameters = new Configuration(); - schedule.setParameters(parameters); - - operationManager.scheduleResourceOperation(subject, schedule); + scheduleOperation(subject, storageNode, new Configuration(), "uninstall"); } }
@@ -183,14 +164,7 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa // queue up storage nodes during cluster maintenance operations. storageNode.setMaintenancePending(runRepair);
- ResourceOperationSchedule schedule = new ResourceOperationSchedule(); - schedule.setOperationName("decommission"); - schedule.setResource(storageNode.getResource()); - schedule.setJobTrigger(JobTrigger.createNowTrigger()); - schedule.setSubject(subject); - schedule.setParameters(new Configuration()); - - operationManager.scheduleResourceOperation(subject, schedule); + scheduleOperation(subject, storageNode, new Configuration(), "decommission"); }
@Override @@ -238,21 +212,12 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa if (log.isInfoEnabled()) { log.info("Running addNodeMaintenance for storage node " + storageNode); } + Configuration params = new Configuration(); + params.put(seedsList); + params.put(new PropertySimple(RUN_REPAIR_PROPERTY, runRepair)); + params.put(new PropertySimple(UPDATE_SEEDS_LIST, Boolean.TRUE));
- ResourceOperationSchedule schedule = new ResourceOperationSchedule(); - schedule.setResource(storageNode.getResource()); - schedule.setJobTrigger(JobTrigger.createNowTrigger()); - schedule.setSubject(subject); - schedule.setOperationName("addNodeMaintenance"); - - Configuration config = new Configuration(); - config.put(seedsList); - config.put(new PropertySimple(RUN_REPAIR_PROPERTY, runRepair)); - config.put(new PropertySimple(UPDATE_SEEDS_LIST, Boolean.TRUE)); - - schedule.setParameters(config); - - operationManager.scheduleResourceOperation(subject, schedule); + scheduleOperation(subject, storageNode, params, "addNodeMaintenance"); }
@Override @@ -286,21 +251,12 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa if (log.isInfoEnabled()) { log.info("Running remove node maintenance for storage node " + storageNode); } + Configuration params = new Configuration(); + params.put(seedsList); + params.put(new PropertySimple(RUN_REPAIR_PROPERTY, runRepair)); + params.put(new PropertySimple(UPDATE_SEEDS_LIST, true));
- ResourceOperationSchedule schedule = new ResourceOperationSchedule(); - schedule.setResource(storageNode.getResource()); - schedule.setJobTrigger(JobTrigger.createNowTrigger()); - schedule.setSubject(subject); - schedule.setOperationName("removeNodeMaintenance"); - - Configuration config = new Configuration(); - config.put(seedsList); - config.put(new PropertySimple(RUN_REPAIR_PROPERTY, runRepair)); - config.put(new PropertySimple(UPDATE_SEEDS_LIST, Boolean.TRUE)); - - schedule.setParameters(config); - - operationManager.scheduleResourceOperation(subject, schedule); + scheduleOperation(subject, storageNode, params, "removeNodeMaintenance"); }
@Override @@ -578,7 +534,7 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa break; default: // SUCCESS log.info("Successfully uninstalled " + storageNode + " from disk"); - uninstall(getSubject(operationHistory), storageNode); + finishUninstall(getSubject(operationHistory), storageNode); } }
@@ -701,22 +657,13 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa if (log.isInfoEnabled()) { log.info("Preparing to bootstrap " + storageNode + " into cluster..."); } - - ResourceOperationSchedule schedule = new ResourceOperationSchedule(); - schedule.setResource(storageNode.getResource()); - schedule.setJobTrigger(JobTrigger.createNowTrigger()); - schedule.setSubject(subject); - schedule.setOperationName("prepareForBootstrap"); - StorageClusterSettings clusterSettings = storageClusterSettingsManager.getClusterSettings(subject); Configuration parameters = new Configuration(); parameters.put(new PropertySimple("cqlPort", clusterSettings.getCqlPort())); parameters.put(new PropertySimple("gossipPort", clusterSettings.getGossipPort())); parameters.put(addresses);
- schedule.setParameters(parameters); - - operationManager.scheduleResourceOperation(subject, schedule); + scheduleOperation(subject, storageNode, parameters, "prepareForBootstrap"); }
private StorageNode takeFromMaintenanceQueue() { @@ -832,6 +779,19 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa return value; }
+ private void scheduleOperation(Subject subject, StorageNode storageNode, Configuration parameters, + String operation) { + ResourceOperationSchedule schedule = new ResourceOperationSchedule(); + schedule.setResource(storageNode.getResource()); + schedule.setJobTrigger(JobTrigger.createNowTrigger()); + schedule.setSubject(subject); + schedule.setOperationName(operation); + schedule.setParameters(parameters); + + operationManager.scheduleResourceOperation(subject, schedule); + + } + private PropertyList createPropertyListOfAddresses(String propertyName, List<StorageNode> nodes) { PropertyList list = new PropertyList(propertyName); for (StorageNode storageNode : nodes) {
commit d37ddf8580e2d5a08ac473b1cc5fe03c57ee3314 Author: John Sanda jsanda@redhat.com Date: Mon Aug 19 14:33:08 2013 -0400
fixing API check for new undeploy method in remote API
diff --git a/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml b/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml index 08e793c..143ae14 100644 --- a/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml +++ b/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml @@ -65,6 +65,13 @@ </difference>
<difference> + <className>org/rhq/enterprise/server/cloud/StorageNodeManagerRemote</className> + <differenceType>7012</differenceType> <!-- method added to an interface --> + <method>void unDeployStorageNode(org.rhq.core.domain.auth.Subject, org.rhq.core.domain.cloud.StorageNode)</method> + <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> + </difference> + + <difference> <className>org/rhq/enterprise/server/bundle/BundleManagerRemote</className> <differenceType>7012</differenceType> <!-- method added to an interface --> <method>void assignBundlesToBundleGroup(org.rhq.core.domain.auth.Subject, int, int[])</method>
commit e9976eeee2677b67e7a4eb0ad75840b0151241da Author: Jirka Kremser jkremser@redhat.com Date: Mon Aug 19 19:41:08 2013 +0200
Adding the support for storage node (un)deployment in coregui.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java index d1ea625..c49f697 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java @@ -124,7 +124,7 @@ public class StorageNodeAdminView extends EnhancedVLayout implements Bookmarkabl public void onFailure(Throwable caught) { Message message = new Message("Unable to render storage node alert view: " + caught.getMessage(), Message.Severity.Warning); - CoreGUI.goToView(VIEW_ID.getName(), message); + CoreGUI.goToView(StorageNodeTableView.VIEW_PATH, message); }
@Override diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java index 685fb5d..9ee4f28 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java @@ -31,6 +31,7 @@ import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDat import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_MTIME; import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_OPERATION_MODE; import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_RESOURCE_ID; +import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_STATUS;
import java.util.ArrayList; import java.util.List; @@ -63,7 +64,6 @@ import org.rhq.enterprise.gui.coregui.client.LinkManager; import org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.StorageNodeLoadCompositeDatasourceField; import org.rhq.enterprise.gui.coregui.client.components.table.TimestampCellFormatter; import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; -import org.rhq.enterprise.gui.coregui.client.inventory.resource.AncestryUtil; import org.rhq.enterprise.gui.coregui.client.util.Log; import org.rhq.enterprise.gui.coregui.client.util.MeasurementConverterClient; import org.rhq.enterprise.gui.coregui.client.util.RPCDataSource; @@ -144,7 +144,7 @@ public class StorageNodeDatasource extends RPCDataSource<StorageNodeLoadComposit // cqlField.setHidden(true); // fields.add(cqlField);
- field = FIELD_OPERATION_MODE.getListGridField("90"); + field = FIELD_STATUS.getListGridField("90"); field.setCellFormatter(new CellFormatter() { public String format(Object value, ListGridRecord listGridRecord, int i, int i1) { if (listGridRecord.getAttribute(FIELD_ERROR_MESSAGE.propertyName()) != null @@ -154,6 +154,7 @@ public class StorageNodeDatasource extends RPCDataSource<StorageNodeLoadComposit return value.toString(); } }); + field.setShowHover(true); field.setHoverCustomizer(new HoverCustomizer() { public String hoverHTML(Object value, ListGridRecord record, int rowNum, int colNum) { @@ -235,6 +236,7 @@ public class StorageNodeDatasource extends RPCDataSource<StorageNodeLoadComposit record.setAttribute(FIELD_JMX_PORT.propertyName(), node.getJmxPort()); record.setAttribute(FIELD_CQL_PORT.propertyName(), node.getCqlPort()); record.setAttribute(FIELD_OPERATION_MODE.propertyName(), node.getOperationMode()); + record.setAttribute(FIELD_STATUS.propertyName(), node.getStatus()); record.setAttribute(FIELD_ERROR_MESSAGE.propertyName(), node.getErrorMessage()); if (node.getFailedOperation() != null && node.getFailedOperation().getResource() != null) { ResourceOperationHistory operationHistory = node.getFailedOperation(); diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasourceField.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasourceField.java index 04a1767..ada97fa 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasourceField.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasourceField.java @@ -40,6 +40,8 @@ public enum StorageNodeDatasourceField {
FIELD_OPERATION_MODE("operationMode", CoreGUI.getMessages().view_adminTopology_server_mode()),
+ FIELD_STATUS("status", "Status"), + FIELD_MEMORY("memory", "Memory"),
FIELD_DISK("disk", "Disk"), diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDetailView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDetailView.java index 4055e6f..f5de561 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDetailView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDetailView.java @@ -77,6 +77,9 @@ import org.rhq.enterprise.gui.coregui.client.util.message.Message; public class StorageNodeDetailView extends EnhancedVLayout implements BookmarkableView {
private final int storageNodeId; + +// String path = StorageNodeAdminView.VIEW_PATH + "/" + storageNodeId; +// CoreGUI.goToView(path, message);
private static final int SECTION_COUNT = 3; private final SectionStack sectionStack; @@ -121,9 +124,7 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab new AsyncCallback<PageList<StorageNode>>() { public void onSuccess(final PageList<StorageNode> storageNodes) { if (storageNodes == null || storageNodes.isEmpty() || storageNodes.size() != 1) { - CoreGUI.getErrorHandler().handleError( - MSG.view_adminTopology_message_fetchServerFail(String.valueOf(storageNodeId))); - initSectionCount = SECTION_COUNT; + onFailure(new Exception("No storage nodes have been found.")); } final StorageNode node = storageNodes.get(0); header.setContents("<div style='text-align: center; font-weight: bold; font-size: medium;'> Storage Node (" @@ -166,6 +167,7 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab Message message = new Message(MSG.view_configurationHistoryDetails_error_loadFailure(), Message.Severity.Warning); initSectionCount = SECTION_COUNT; + CoreGUI.getMessageCenter().notify(message); }
@Override @@ -199,7 +201,10 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab new AsyncCallback<Map<String, List<MeasurementDataNumericHighLowComposite>>>() { @Override public void onFailure(Throwable caught) { - + Message message = new Message("Unable to fetch storage node load data.", + Message.Severity.Warning); + initSectionCount = SECTION_COUNT; + CoreGUI.getMessageCenter().notify(message); }
@Override @@ -357,7 +362,6 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab ResourceOperationHistory operationHistory = storageNode.getFailedOperation(); String value = LinkManager.getSubsystemResourceOperationHistoryLink(operationHistory.getResource().getId(), operationHistory.getId()); - // String value = "#Resource/" + operationHistory.getResource().getId() + "/Operations/History/" + operationHistory.getId()); lastOperation = new StaticTextItem("lastOp", "Operation"); lastOperation.setValue(LinkManager.getHref(value, operationHistory.getOperationDefinition() .getDisplayName())); @@ -389,7 +393,6 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab loadLayout.setWidth100(); LayoutSpacer spacer = new LayoutSpacer(); spacer.setHeight(10); -// HTMLFlow loadLabel = new HTMLFlow("<span style='font-weight:bold'>Status</span>"); HTMLFlow loadLabel = new HTMLFlow("Status"); loadLabel.addStyleName("formTitle"); loadLabel.setTooltip("Contains selected metrics collected for last 8 hours."); diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeTableView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeTableView.java index 84c1586..50cb614 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeTableView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeTableView.java @@ -20,7 +20,7 @@ package org.rhq.enterprise.gui.coregui.client.admin.storage;
import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_ADDRESS; import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_ALERTS; -import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_RESOURCE_ID; +import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.*;
import java.util.ArrayList; import java.util.Arrays; @@ -42,6 +42,7 @@ import com.smartgwt.client.widgets.grid.ListGridField; import com.smartgwt.client.widgets.grid.ListGridRecord;
import org.rhq.core.domain.authz.Permission; +import org.rhq.core.domain.cloud.StorageNode; import org.rhq.core.domain.cloud.StorageNode.OperationMode; import org.rhq.enterprise.gui.coregui.client.CoreGUI; import org.rhq.enterprise.gui.coregui.client.LinkManager; @@ -85,7 +86,7 @@ public class StorageNodeTableView extends TableSection<StorageNodeDatasource> { protected void doOnDraw() { super.doOnDraw(); // commenting out this call, because it caused UI to freeze -// scheduleUnacknowledgedAlertsPollingJob(getListGrid()); + // scheduleUnacknowledgedAlertsPollingJob(getListGrid()); }
@Override @@ -204,6 +205,80 @@ public class StorageNodeTableView extends TableSection<StorageNodeDatasource> {
private void showCommonActions() { addInvokeOperationsAction(); + addDeployAction(); + addUndeployAction(); + } + + private void addUndeployAction() { + final ParametrizedMessage question = new ParametrizedMessage() { + @Override + public String getMessage(String... param) { + return "Are you sure, you want to run the undeploy operation on selected nodes: " + param[0] + + " ? It may take a while to complete."; + } + }; + final ParametrizedMessage success = new ParametrizedMessage() { + @Override + public String getMessage(String... param) { + return "Starting the undeploy operation on storage nodes " + param[0]; + } + }; + final ParametrizedMessage failure = new ParametrizedMessage() { + @Override + public String getMessage(String... param) { + return "Invoking the undeploy operation failed for storage nodes " + param[0] + " ids: " + param[1]; + } + }; + + addTableAction("Undeploy Selected", null, new AuthorizedTableAction(this, TableActionEnablement.SINGLE, + Permission.MANAGE_SETTINGS) { + + @Override + public boolean isEnabled(ListGridRecord[] selection) { + return StorageNodeTableView.this.isUndeployable(super.isEnabled(selection), selection); + } + + @Override + public void executeAction(final ListGridRecord[] selections, Object actionValue) { + executeBulkAction(selections, actionValue, question, success, failure, StorageNodeOperation.UNDEPLOY); + } + }); + } + + private void addDeployAction() { + final ParametrizedMessage question = new ParametrizedMessage() { + @Override + public String getMessage(String... param) { + return "Are you sure, you want to run the deploy operation on selected nodes: " + param[0] + + " ? It may take a while to complete."; + } + }; + final ParametrizedMessage success = new ParametrizedMessage() { + @Override + public String getMessage(String... param) { + return "Starting the deploy operation on storage nodes " + param[0]; + } + }; + final ParametrizedMessage failure = new ParametrizedMessage() { + @Override + public String getMessage(String... param) { + return "Invoking the deploy operation failed for storage nodes " + param[0] + " ids: " + param[1]; + } + }; + + addTableAction("Deploy Selected", null, new AuthorizedTableAction(this, TableActionEnablement.SINGLE, + Permission.MANAGE_SETTINGS) { + + @Override + public boolean isEnabled(ListGridRecord[] selection) { + return StorageNodeTableView.this.isDeployable(super.isEnabled(selection), selection); + } + + @Override + public void executeAction(final ListGridRecord[] selections, Object actionValue) { + executeBulkAction(selections, actionValue, question, success, failure, StorageNodeOperation.DEPLOY); + } + }); }
private void addInvokeOperationsAction() { @@ -213,7 +288,6 @@ public class StorageNodeTableView extends TableSection<StorageNodeDatasource> { operationsMap.put("Restart", "restart"); operationsMap.put("Disable Debug Mode", "stopRPCServer"); operationsMap.put("Enable Debug Mode", "startRPCServer"); - // operationsMap.put("Decommission", "decommission");
addTableAction(MSG.common_title_operation(), null, operationsMap, new AuthorizedTableAction(this, TableActionEnablement.ANY, Permission.MANAGE_SETTINGS) { @@ -221,77 +295,129 @@ public class StorageNodeTableView extends TableSection<StorageNodeDatasource> { @Override public boolean isEnabled(ListGridRecord[] selection) { return StorageNodeTableView.this.isEnabled(super.isEnabled(selection), selection); - }; + }
@Override public void executeAction(final ListGridRecord[] selections, Object actionValue) { - final String operationName = (String) actionValue; - final List<String> selectedAddresses = getSelectedAddresses(selections); - // String message = MSG.view_adminTopology_message_setModeConfirm(selectedAddresses.toString(), mode.name()); - SC.ask("Are you sure, you want to run operation " + operationName + "?", new BooleanCallback() { - public void execute(Boolean confirmed) { - if (confirmed) { - final CountDownLatch latch = CountDownLatch.create(selections.length, new Command() { - @Override - public void execute() { - // Message msg = new Message(MSG.view_adminTopology_message_setMode( - // String.valueOf(selections.length), mode.name()), Message.Severity.Info); - Message msg = new Message("Operation" + operationName - + " was successfully scheduled for resources with ids" - + Arrays.asList(getSelectedIds(selections)), Message.Severity.Info); - CoreGUI.getMessageCenter().notify(msg); - refreshTableInfo(); - } - }); - boolean isStopStartOrRestart = Arrays.asList("start", "shutdown", "restart").contains( - operationName); - for (ListGridRecord storageNodeRecord : selections) { - // NFE should never happen, because of the condition for table action enablement - int resourceId = storageNodeRecord.getAttributeAsInt(FIELD_RESOURCE_ID.propertyName()); - if (isStopStartOrRestart) { - // start, stop or restart the storage node - GWTServiceLookup.getOperationService().scheduleResourceOperation(resourceId, - operationName, null, "Run by Storage Node Administrations UI", 0, - new AsyncCallback<Void>() { - public void onSuccess(Void result) { - latch.countDown(); - } - - public void onFailure(Throwable caught) { - CoreGUI.getErrorHandler().handleError( - "Scheduling operation " + operationName - + " failed for resources with ids" - + Arrays.asList(getSelectedIds(selections)) + " " - + caught.getMessage(), caught); - latch.countDown(); - refreshTableInfo(); - } - }); + ParametrizedMessage question = new ParametrizedMessage() { + @Override + public String getMessage(String... param) { + return "Are you sure, you want to run operation " + param[0] + "?"; + } + }; + ParametrizedMessage success = new ParametrizedMessage() { + @Override + public String getMessage(String... param) { + return "Operation" + param[0] + " was successfully scheduled for storage nodes " + param[1]; + } + }; + ParametrizedMessage failure = new ParametrizedMessage() { + @Override + public String getMessage(String... param) { + return "Scheduling operation " + param[0] + " failed for storage nodes " + param[1]; + } + }; + executeBulkAction(selections, actionValue, question, success, failure, StorageNodeOperation.OTHER); + } + }); + } + + private enum StorageNodeOperation { + DEPLOY, UNDEPLOY, OTHER + } + + private interface ParametrizedMessage { + String getMessage(String... param); + } + + private void executeBulkAction(final ListGridRecord[] selections, Object actionValue, ParametrizedMessage question, + final ParametrizedMessage success, final ParametrizedMessage failure, final StorageNodeOperation operationType) { + final String operationName = (String) actionValue; + final List<String> selectedAddresses = getSelectedAddresses(selections); + SC.ask(question.getMessage(selectedAddresses.toString()), new BooleanCallback() { + public void execute(Boolean confirmed) { + if (confirmed) { + final CountDownLatch latch = CountDownLatch.create(selections.length, new Command() { + @Override + public void execute() { + String msgString = null; + if (operationType == StorageNodeOperation.OTHER) { + msgString = success.getMessage(operationName, selectedAddresses.toString()); + } else { + msgString = success.getMessage(selectedAddresses.toString()); + } + Message msg = new Message(msgString, Message.Severity.Info); + CoreGUI.getMessageCenter().notify(msg); + refreshTableInfo(); + } + }); + boolean isStopStartOrRestart = Arrays.asList("start", "shutdown", "restart") + .contains(operationName); + for (ListGridRecord storageNodeRecord : selections) { + // NFE should never happen, because of the condition for table action enablement + int resourceId = storageNodeRecord.getAttributeAsInt(FIELD_RESOURCE_ID.propertyName()); + if (isStopStartOrRestart) { + // start, stop or restart the storage node + GWTServiceLookup.getOperationService().scheduleResourceOperation(resourceId, operationName, + null, "Run by Storage Node Administrations UI", 0, new AsyncCallback<Void>() { + public void onSuccess(Void result) { + latch.countDown(); + } + + public void onFailure(Throwable caught) { + String msg = failure.getMessage(operationName, + selectedAddresses + " " + caught.getMessage()); + CoreGUI.getErrorHandler().handleError(msg, caught); + latch.countDown(); + refreshTableInfo(); + } + }); + } else { + if (operationType != StorageNodeOperation.OTHER) { // (un)deploy + AsyncCallback<Void> callback = new AsyncCallback<Void>() { + public void onSuccess(Void result) { + latch.countDown(); + } + + public void onFailure(Throwable caught) { + String msg = failure.getMessage( + selectedAddresses.toString(), + Arrays.asList(getSelectedIds(selections)).toString() + " " + + caught.getMessage()); + CoreGUI.getErrorHandler().handleError(msg, caught); + latch.countDown(); + refreshTableInfo(); + } + }; + int storageNodeId = storageNodeRecord.getAttributeAsInt("id"); + StorageNode node = new StorageNode(storageNodeId); + if (operationType == StorageNodeOperation.DEPLOY) { + GWTServiceLookup.getStorageService().deployStorageNode(node, callback); } else { - // invoke the operation on the storage service resource - GWTServiceLookup.getStorageService().invokeOperationOnStorageService(resourceId, - operationName, new AsyncCallback<Void>() { - public void onSuccess(Void result) { - latch.countDown(); - } - - public void onFailure(Throwable caught) { - CoreGUI.getErrorHandler().handleError( - "Scheduling operation " + operationName - + " failed for resources with ids" - + Arrays.asList(getSelectedIds(selections)) + " " - + caught.getMessage(), caught); - latch.countDown(); - refreshTableInfo(); - } - }); + GWTServiceLookup.getStorageService().undeployStorageNode(node, callback); } + } else { + // invoke the operation on the storage service resource + GWTServiceLookup.getStorageService().invokeOperationOnStorageService(resourceId, + operationName, new AsyncCallback<Void>() { + public void onSuccess(Void result) { + latch.countDown(); + } + + public void onFailure(Throwable caught) { + String msg = failure.getMessage(operationName, selectedAddresses + " " + + caught.getMessage()); + CoreGUI.getErrorHandler().handleError(msg, caught); + latch.countDown(); + refreshTableInfo(); + } + }); } - } else { - refreshTableInfo(); } } - }); + } else { + refreshTableInfo(); + } } }); } @@ -331,6 +457,33 @@ public class StorageNodeTableView extends TableSection<StorageNodeDatasource> { return true; }
+ private boolean isDeployable(boolean parentsOpinion, ListGridRecord[] selection) { + if (!parentsOpinion || !isEnabled(parentsOpinion, selection)) { + return false; + } + for (ListGridRecord storageNodeRecord : selection) { + if ("NORMAL".equals(storageNodeRecord.getAttributeAsString(FIELD_STATUS.propertyName())) + || "JOINING".equals(storageNodeRecord.getAttributeAsString(FIELD_STATUS.propertyName())) + || "LEAVING".equals(storageNodeRecord.getAttributeAsString(FIELD_STATUS.propertyName()))) { + return false; + } + } + return true; + } + + private boolean isUndeployable(boolean parentsOpinion, ListGridRecord[] selection) { + if (!parentsOpinion || !isEnabled(parentsOpinion, selection)) { + return false; + } + for (ListGridRecord storageNodeRecord : selection) { + if ("JOINING".equals(storageNodeRecord.getAttributeAsString(FIELD_STATUS.propertyName())) + || "LEAVING".equals(storageNodeRecord.getAttributeAsString(FIELD_STATUS.propertyName()))) { + return false; + } + } + return true; + } + @Override protected String getBasePath() { return VIEW_PATH; diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/StorageGWTService.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/StorageGWTService.java index 72f17b18..9470302 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/StorageGWTService.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/StorageGWTService.java @@ -27,6 +27,7 @@ import java.util.Map;
import com.google.gwt.user.client.rpc.RemoteService;
+import org.rhq.core.domain.auth.Subject; import org.rhq.core.domain.cloud.StorageClusterSettings; import org.rhq.core.domain.cloud.StorageNode; import org.rhq.core.domain.cloud.StorageNodeConfigurationComposite; @@ -92,4 +93,8 @@ public interface StorageGWTService extends RemoteService { StorageClusterSettings retrieveClusterSettings() throws RuntimeException;
void updateClusterSettings(StorageClusterSettings clusterSettings) throws RuntimeException; + + void undeployStorageNode(StorageNode storageNode) throws RuntimeException; + + void deployStorageNode(StorageNode storageNode) throws RuntimeException; } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/StorageGWTServiceImpl.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/StorageGWTServiceImpl.java index ae18075..5548285 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/StorageGWTServiceImpl.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/StorageGWTServiceImpl.java @@ -26,6 +26,7 @@ import java.util.ArrayList; import java.util.List; import java.util.Map;
+import org.rhq.core.domain.auth.Subject; import org.rhq.core.domain.cloud.StorageClusterSettings; import org.rhq.core.domain.cloud.StorageNode; import org.rhq.core.domain.cloud.StorageNodeConfigurationComposite; @@ -201,5 +202,23 @@ public class StorageGWTServiceImpl extends AbstractGWTServiceImpl implements Sto } catch (Throwable t) { throw getExceptionToThrowToClient(t); } - } + } + + @Override + public void undeployStorageNode(StorageNode storageNode) throws RuntimeException { + try { + storageNodeManager.undeployStorageNode(getSessionSubject(), storageNode); + } catch (Throwable t) { + throw getExceptionToThrowToClient(t); + } + } + + @Override + public void deployStorageNode(StorageNode storageNode) throws RuntimeException { + try { + storageNodeManager.deployStorageNode(getSessionSubject(), storageNode); + } catch (Throwable t) { + throw getExceptionToThrowToClient(t); + } + } }
commit 39b3cc10edb3c228a26c0d110a6873eddd5a39b0 Author: Lukas Krejci lkrejci@redhat.com Date: Mon Aug 19 17:52:53 2013 +0200
[BZ 986491] - Yum content source plugin now handles HTTP basic auth
It also should be able to handle HTTPS and other URL schemes if support for them is available in the RHQ server's JVM.
diff --git a/modules/enterprise/server/plugins/yum/pom.xml b/modules/enterprise/server/plugins/yum/pom.xml index e52c04b..bef9d81 100644 --- a/modules/enterprise/server/plugins/yum/pom.xml +++ b/modules/enterprise/server/plugins/yum/pom.xml @@ -32,6 +32,20 @@ <scope>provided</scope> <!-- this version of jdom is included in the server, we'll juse reuse it --> </dependency>
+ <!-- Test deps --> + <dependency> + <groupId>org.rhq</groupId> + <artifactId>test-utils</artifactId> + <version>${project.version}</version> + <scope>test</scope> + </dependency> + + <dependency> + <groupId>org.jboss.resteasy</groupId> + <artifactId>tjws</artifactId> + <version>3.0.3.Final</version> + <scope>test</scope> + </dependency> </dependencies>
<build> @@ -128,4 +142,4 @@ </profile> </profiles>
-</project> \ No newline at end of file +</project> diff --git a/modules/enterprise/server/plugins/yum/src/main/java/org/rhq/enterprise/server/plugins/yum/DiskReader.java b/modules/enterprise/server/plugins/yum/src/main/java/org/rhq/enterprise/server/plugins/yum/DiskReader.java index f131471..6a2227f 100644 --- a/modules/enterprise/server/plugins/yum/src/main/java/org/rhq/enterprise/server/plugins/yum/DiskReader.java +++ b/modules/enterprise/server/plugins/yum/src/main/java/org/rhq/enterprise/server/plugins/yum/DiskReader.java @@ -1,6 +1,6 @@ /* * RHQ Management Platform - * Copyright (C) 2005-2008 Red Hat, Inc. + * Copyright (C) 2013 Red Hat, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify @@ -16,13 +16,13 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ + package org.rhq.enterprise.server.plugins.yum;
import java.io.File; -import java.io.FileInputStream; import java.io.IOException; -import java.io.InputStream; -import java.util.zip.GZIPInputStream; +import java.net.URISyntaxException; +import java.net.URL;
/** * The disk reader is a yum repo reader used to read metadata and bits from an existing yum repo that is located on a @@ -30,56 +30,24 @@ import java.util.zip.GZIPInputStream; * * @author jortel */ -public class DiskReader implements RepoReader { - /** - * The base or root directory path of a yum repo. - */ - private final String basepath; +public class DiskReader extends UrlReader {
- /** - * Constructor. - * - * @param basepath The base or root directory path of a yum repo. - */ - public DiskReader(String basepath) { - this.basepath = basepath; + public DiskReader(URL baseUrl) { + super(baseUrl); }
/** * Validate the reader. Validates that the base path is an existing directory that is readable. * - * @throws Exception When <i>basepath</i> is not a directory, does not exist, or is not readable. + * @throws IOException When <i>baseUrl</i> is not a directory, does not exist, or is not readable. */ - public void validate() throws Exception { - File file = new File(basepath); - if (file.exists() || file.canRead() || file.isDirectory()) { + @Override + public void validate() throws IOException, URISyntaxException { + File file = new File(baseUrl.toURI().getSchemeSpecificPart()); + if (file.exists() && file.canRead() && file.isDirectory()) { return; // good }
- throw new Exception("Path: '" + basepath + "' not found, not a directory or permission denied"); - } - - /** - * Open an input stream to specifed relative path. Prepends the basepath to the <i>path</i> and opens and opens and - * input stream. - * - * @param path A relative path to a file within the repo. - * - * @return An open input stream that <b>must</b> be closed by the caller. - * - * @throws IOException On all errors. - */ - public InputStream openStream(String path) throws IOException { - InputStream in = new FileInputStream(basepath + "/" + path); - if (path.endsWith(".gz")) { - return new GZIPInputStream(in); - } - - return in; - } - - @Override - public String toString() { - return "basepath: " + basepath; + throw new IOException("Path: '" + baseUrl + "' not found, not a directory or permission denied"); } -} \ No newline at end of file +} diff --git a/modules/enterprise/server/plugins/yum/src/main/java/org/rhq/enterprise/server/plugins/yum/HttpReader.java b/modules/enterprise/server/plugins/yum/src/main/java/org/rhq/enterprise/server/plugins/yum/HttpReader.java index 98e115c..99eecdd 100644 --- a/modules/enterprise/server/plugins/yum/src/main/java/org/rhq/enterprise/server/plugins/yum/HttpReader.java +++ b/modules/enterprise/server/plugins/yum/src/main/java/org/rhq/enterprise/server/plugins/yum/HttpReader.java @@ -21,80 +21,58 @@ package org.rhq.enterprise.server.plugins.yum; import java.io.IOException; import java.io.InputStream; import java.net.HttpURLConnection; +import java.net.URISyntaxException; import java.net.URL; import java.util.zip.GZIPInputStream;
+import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import org.rhq.core.util.Base64; + /** * The http reader is a yum repo reader used to read metadata and bits from an existing (remote) yum repo using yum's * native http interface. * * @author jortel */ -public class HttpReader implements RepoReader { - /** - * The base url of a yum repo. - */ - private final String baseurl; +public class HttpReader extends UrlReader {
- /** - * The current url connection - */ - HttpURLConnection connection; + private static final Log LOG = LogFactory.getLog(RepoProvider.class); + + private final String username; + private final String password;
/** * Constructor. * - * @param basepath The base url of a yum repo. + * @param baseUrl The base url of a yum repo. + * @param username the name of the user to authenticate with or null + * @param password the password to use or null */ - public HttpReader(String baseurl) { - this.baseurl = baseurl; + public HttpReader(URL baseUrl, String username, String password) { + super(baseUrl); + this.username = username; + this.password = password; }
- /** - * Validate the reader. Validates that the base url is valid. - * - * @throws Exception When <i>baseurl</i> is not valid. - */ - public void validate() throws Exception { - URL url = new URL(baseurl); - connection = (HttpURLConnection) url.openConnection(); - connection.setRequestMethod("GET"); - try { - if (connection.getHeaderField(0) == null) { - throw new IOException("Cannot validate connection - check URL"); - } - } finally { - connection.disconnect(); - } - } - - /** - * Open an input stream to specifed relative url. Prepends the baseurl to the <i>url</i> and opens and opens and - * input stream. Files with a .gz suffix will be unziped (inline). - * - * @param suffix A url that is relative to the <i>baseurl</i> and references a file within the repo. - * - * @return An open input stream that <b>must</b> be closed by the caller. - * - * @throws IOException On all errors. - */ - public InputStream openStream(String suffix) throws IOException { - URL url = new URL(baseurl + "/" + suffix); - connection = (HttpURLConnection) url.openConnection(); - connection.setRequestMethod("GET"); - InputStream in = connection.getInputStream(); - if (suffix.endsWith(".gz")) { - return new GZIPInputStream(in); + @Override + protected InputStream doOpen(URL url) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("open " + url); }
- return in; - } + HttpURLConnection connection = (HttpURLConnection) url.openConnection(); + connection.setInstanceFollowRedirects(true);
- /* - * (non-Javadoc) @see java.lang.Object#toString() - */ - @Override - public String toString() { - return baseurl; + if (username != null) { + String userInfo = username; + if (password != null) { + userInfo += ":" + password; + } + String basicAuth = "Basic " + Base64.encode(userInfo.getBytes("ISO-8859-1")); + connection.setRequestProperty("Authorization", basicAuth); + } + return connection.getInputStream(); } -} \ No newline at end of file +} diff --git a/modules/enterprise/server/plugins/yum/src/main/java/org/rhq/enterprise/server/plugins/yum/RepoProvider.java b/modules/enterprise/server/plugins/yum/src/main/java/org/rhq/enterprise/server/plugins/yum/RepoProvider.java index f065ba7..2e04453 100644 --- a/modules/enterprise/server/plugins/yum/src/main/java/org/rhq/enterprise/server/plugins/yum/RepoProvider.java +++ b/modules/enterprise/server/plugins/yum/src/main/java/org/rhq/enterprise/server/plugins/yum/RepoProvider.java @@ -19,6 +19,9 @@ package org.rhq.enterprise.server.plugins.yum;
import java.io.InputStream; +import java.net.MalformedURLException; +import java.net.URI; +import java.net.URL; import java.util.ArrayList; import java.util.Collection; import java.util.List; @@ -84,20 +87,19 @@ public class RepoProvider implements ContentProvider, PackageSource { throw new IllegalArgumentException("Missing required 'location' property"); }
- location = trim(location); - log.info("Initialized with location: " + location); - if (location.startsWith("http://")) { - reader = new HttpReader(location); - return; - } + location = location.trim(); + String username = configuration.getSimpleValue("username"); + String password = configuration.getSimpleValue("password");
- if (location.startsWith("file://")) { - location = location.substring(7); - reader = new DiskReader(location); - return; - } + URI uri = new URI(location);
- reader = new DiskReader(location); + log.info("Initialized with location: " + location); + try { + reader = UrlReader.fromUri(uri, username, password); + } catch (MalformedURLException e) { + log.error("Could not determine a reader for the URI [" + uri + "]"); + throw e; + } }
/** @@ -176,22 +178,6 @@ public class RepoProvider implements ContentProvider, PackageSource { reader.validate(); }
- /** - * Trim white space and trailing (/) characters. - * - * @param path A url/directory path string. - * - * @return A trimmed string. - */ - private String trim(String path) { - path = path.trim(); - while ((path.length() > 1) && path.endsWith("/")) { - path = path.substring(0, path.length() - 1); - } - - return path; - } - public SyncProgressWeight getSyncProgressWeight() { return SyncProgressWeight.DEFAULT_WEIGHTS; } diff --git a/modules/enterprise/server/plugins/yum/src/main/java/org/rhq/enterprise/server/plugins/yum/UrlReader.java b/modules/enterprise/server/plugins/yum/src/main/java/org/rhq/enterprise/server/plugins/yum/UrlReader.java new file mode 100644 index 0000000..682b319 --- /dev/null +++ b/modules/enterprise/server/plugins/yum/src/main/java/org/rhq/enterprise/server/plugins/yum/UrlReader.java @@ -0,0 +1,111 @@ +/* + * RHQ Management Platform + * Copyright (C) 2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +package org.rhq.enterprise.server.plugins.yum; + +import java.io.IOException; +import java.io.InputStream; +import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import java.net.URLConnection; +import java.util.zip.GZIPInputStream; + +/** + * @author Lukas Krejci + * @since 4.9 + */ +public class UrlReader implements RepoReader { + + protected final URL baseUrl; + + public static UrlReader fromUri(URI uri, String username, String password) throws MalformedURLException { + if (uri.getScheme() == null) { + try { + return new DiskReader(new URI("file", uri.getSchemeSpecificPart(), uri.getFragment()).toURL()); + } catch (URISyntaxException e) { + throw new IllegalStateException( + "URI syntax exception while adding the 'file' scheme to a path. This should not have happened.", e); + } + } else if (uri.getScheme().equals("file")) { + return new DiskReader(uri.toURL()); + } else if (uri.getScheme().startsWith("http")) { + return new HttpReader(uri.toURL(), username, password); + } else { + return new UrlReader(uri.toURL()); + } + } + + protected UrlReader(URL baseUrl) { + this.baseUrl = baseUrl; + + } + + public void validate() throws IOException, URISyntaxException { + InputStream content = doOpen(baseUrl); + content.close(); + } + + /** + * Open an input stream to specifed relative url. Prepends the baseurl to the <i>url</i> and opens and opens and + * input stream. Files with a .gz suffix will be unziped (inline). + * + * @param path A path that is relative to the <i>baseurl</i> and references a file within the repo. + * + * @return An open input stream that <b>must</b> be closed by the caller. + * + * @throws IOException On all errors. + */ + @Override + public final InputStream openStream(String path) throws IOException { + URL url = extendBaseUrl(path); + + InputStream ret = doOpen(url); + if (path.endsWith(".gz")) { + ret = new GZIPInputStream(ret); + } + + return ret; + } + + protected InputStream doOpen(URL url) throws IOException { + return url.openStream(); + } + + /** + * Mainly used for test purposes, othewise not really useful. + */ + public URL getBaseURL() { + return baseUrl; + } + + protected URL extendBaseUrl(String suffix) throws MalformedURLException { + if (suffix != null) { + suffix = suffix.trim(); + } + + return suffix == null ? baseUrl : new URL(baseUrl + "/" + suffix); + } + + @Override + public String toString() { + return getClass().getSimpleName() + " " + baseUrl; + } +} diff --git a/modules/enterprise/server/plugins/yum/src/main/resources/META-INF/rhq-serverplugin.xml b/modules/enterprise/server/plugins/yum/src/main/resources/META-INF/rhq-serverplugin.xml index b501235..2ced81c 100644 --- a/modules/enterprise/server/plugins/yum/src/main/resources/META-INF/rhq-serverplugin.xml +++ b/modules/enterprise/server/plugins/yum/src/main/resources/META-INF/rhq-serverplugin.xml @@ -24,7 +24,11 @@ type="string" required="true" description="The URL or path to the Yum repository" /> + <c:simple-property name="username" type="string" required="false" + description="The optional user name to authenticate with"/> + <c:simple-property name="password" type="password" required="false" + description="The optional password to authenticate with"/> </configuration> </contentSourceType>
-</content-plugin> \ No newline at end of file +</content-plugin> diff --git a/modules/enterprise/server/plugins/yum/src/test/java/Acme/Serve/UrlReaderTestServer.java b/modules/enterprise/server/plugins/yum/src/test/java/Acme/Serve/UrlReaderTestServer.java new file mode 100644 index 0000000..43f3d33 --- /dev/null +++ b/modules/enterprise/server/plugins/yum/src/test/java/Acme/Serve/UrlReaderTestServer.java @@ -0,0 +1,64 @@ +/* + * RHQ Management Platform + * Copyright (C) 2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +package Acme.Serve; + +import java.io.PrintStream; +import java.util.Map; +import java.util.Properties; + +/** + * This needs to be in the {@code Acme.Serve} package so that authentication realm can be defined. + * + * @author Lukas Krejci + * @since 4.9 + */ +public class UrlReaderTestServer extends Serve { + private static final long serialVersionUID = 1L; + + + public static class AuthRealm extends BasicAuthRealm { + + private static final long serialVersionUID = 1L; + + public AuthRealm(String name) { + super(name); + } + } + + public UrlReaderTestServer(Map arguments, PrintStream logStream) { + super(arguments, logStream); + } + + @Override + public void setMappingTable(PathTreeDictionary mappingTable) { + super.setMappingTable(mappingTable); + } + + @Override + protected void initMime() { + mime = new Properties(); + mime.put("file", "text/plain"); + } + + @Override + public void setRealms(PathTreeDictionary realms) { + super.setRealms(realms); + } +} diff --git a/modules/enterprise/server/plugins/yum/src/test/java/org/rhq/enterprise/server/plugins/yum/UrlReaderTest.java b/modules/enterprise/server/plugins/yum/src/test/java/org/rhq/enterprise/server/plugins/yum/UrlReaderTest.java new file mode 100644 index 0000000..62e9c18 --- /dev/null +++ b/modules/enterprise/server/plugins/yum/src/test/java/org/rhq/enterprise/server/plugins/yum/UrlReaderTest.java @@ -0,0 +1,195 @@ +package org.rhq.enterprise.server.plugins.yum;/* + * RHQ Management Platform + * Copyright (C) 2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +import static org.testng.Assert.assertEquals; + +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.Reader; +import java.net.InetAddress; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import java.util.HashMap; +import java.util.Map; + +import javax.servlet.ServletException; +import javax.servlet.http.HttpServlet; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; + +import org.testng.Assert; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; + +import org.rhq.core.util.stream.StreamUtil; +import org.rhq.test.PortScout; + +import Acme.Serve.Serve; +import Acme.Serve.UrlReaderTestServer; + +/** + * @author Lukas Krejci + * @since 4.9 + */ +@Test +public class UrlReaderTest { + + private static final String TEST_USER = "testUser"; + private static final String TEST_PASSWORD = "password"; + + private static class AuthServlet extends HttpServlet { + + private static final long serialVersionUID = 1L; + + @Override + protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException { + String authType = req.getAuthType(); + String remoteUser = req.getRemoteUser(); + + assertEquals(authType, "BASIC", "Unexpected authentication type"); + assertEquals(remoteUser, TEST_USER, "Unexpected authenticated user."); + + String path = req.getPathTranslated(); + if (path != null) { + FileInputStream in = new FileInputStream(path); + try { + StreamUtil.copy(in, resp.getOutputStream(), false); + } finally { + in.close(); + } + } + } + } + + private UrlReaderTestServer httpServer; + private String rootUrl; + + @BeforeClass + public void startWebServer() throws IOException, URISyntaxException { + PortScout portScout = new PortScout(); + int httpPort = portScout.getNextFreePort(); + + Map<String, Object> params = new HashMap<String, Object>(); + params.put(Serve.ARG_PORT, httpPort); + params.put(Serve.ARG_NOHUP, "nohup"); + + httpServer = new UrlReaderTestServer(params, System.err); + + Serve.PathTreeDictionary aliases = new Serve.PathTreeDictionary(); + File root = getRoot(); + aliases.put("/", root); + aliases.put("/*", root); + + httpServer.setMappingTable(aliases); + httpServer.addDefaultServlets(null); + + httpServer.addServlet("/auth", new AuthServlet()); + + UrlReaderTestServer.AuthRealm authRealm = new UrlReaderTestServer.AuthRealm("auth"); + authRealm.put(TEST_USER, TEST_PASSWORD); + + Serve.PathTreeDictionary realms = new Serve.PathTreeDictionary(); + realms.put("/auth", authRealm); + + httpServer.setRealms(realms); + portScout.close(); + + httpServer.runInBackground(); + + rootUrl = InetAddress.getLocalHost().getHostAddress() + ":" + httpPort; + } + + @AfterClass(alwaysRun = true) + public void stopWebServer() throws IOException { + httpServer.stopBackground(); + httpServer.destroyAllServlets(); + } + + public void picksCorrectImpl() throws Exception { + URI httpUrl = new URI("http://jboss.org/rhq"); + URI httpsUrl = new URI("https://jboss.org/rhq"); + URI noSchemeUrl = new URI("stairway/to/heaven"); + URI fileUrl = new URI("file:/over/the/rainbow"); + + UrlReader httpRdr = UrlReader.fromUri(httpUrl, null, null); + UrlReader httpsRdr = UrlReader.fromUri(httpsUrl, null, null); + UrlReader noSchemeRdr = UrlReader.fromUri(noSchemeUrl, null, null); + UrlReader fileRdr = UrlReader.fromUri(fileUrl, null, null); + + assertReader(httpRdr, httpUrl.toURL(), HttpReader.class); + assertReader(httpsRdr, httpsUrl.toURL(), HttpReader.class); + assertReader(noSchemeRdr, new URL("file:stairway/to/heaven"), DiskReader.class); + assertReader(fileRdr, fileUrl.toURL(), DiskReader.class); + } + + public void readsFiles() throws Exception { + UrlReader fileReader = UrlReader.fromUri(getRoot().toURI(), null, null); + + testReaderWithTestFile(fileReader); + } + + public void readsHttp() throws Exception { + URI uri = new URI("http://" + rootUrl); + + UrlReader httpReader = UrlReader.fromUri(uri, null, null); + + testReaderWithTestFile(httpReader); + } + + public void authenticatesInHttp() throws Exception { + URI uri = new URI("http://" + rootUrl + "/auth"); + + UrlReader httpReader = UrlReader.fromUri(uri, TEST_USER, TEST_PASSWORD); + + testReaderWithTestFile(httpReader); + } + + private static void assertReader(UrlReader instance, URL expectedUrl, Class<? extends UrlReader> expectedType) { + assertEquals(instance.getClass(), expectedType, "Unexpected reader type"); + assertEquals(instance.getBaseURL(), expectedUrl, "Unexpected baseUrl"); + } + + private void testReaderWithTestFile(UrlReader reader) throws IOException, URISyntaxException { + try { + reader.validate(); + } catch (IOException e) { + Assert.fail("Validation of " + reader.getClass().getSimpleName() + " reader failed", e); + } + + Reader rdr = new InputStreamReader(reader.openStream("test.file")); + try { + String contents = StreamUtil.slurp(rdr); + + assertEquals(contents, "kachny\n", "Unexpected contents of the test file"); + } finally { + rdr.close(); + } + } + + private File getRoot() throws URISyntaxException { + URI testUri = getClass().getResource("/test.file").toURI(); + + File testFile = new File(testUri.getSchemeSpecificPart()); + return testFile.getParentFile(); + } +} diff --git a/modules/enterprise/server/plugins/yum/src/test/resources/test.file b/modules/enterprise/server/plugins/yum/src/test/resources/test.file new file mode 100644 index 0000000..8742087 --- /dev/null +++ b/modules/enterprise/server/plugins/yum/src/test/resources/test.file @@ -0,0 +1 @@ +kachny
commit 1b7f52ffd3943d3b0574a34b7170f8434f81b3df Author: Jirka Kremser jkremser@redhat.com Date: Mon Aug 19 14:38:10 2013 +0200
Fixing/handling errors in UI when storage node has no associated resource id (this can happen when installing everything with "rhqctl install --agent-auto-start false").
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java index d6a91cb..d1ea625 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java @@ -122,15 +122,17 @@ public class StorageNodeAdminView extends EnhancedVLayout implements Bookmarkabl GWTServiceLookup.getStorageService().findResourcesWithAlertDefinitions(new AsyncCallback<Integer[]>() { @Override public void onFailure(Throwable caught) { - Message message = new Message("foobar", - Message.Severity.Warning); + Message message = new Message("Unable to render storage node alert view: " + + caught.getMessage(), Message.Severity.Warning); CoreGUI.goToView(VIEW_ID.getName(), message); }
@Override public void onSuccess(Integer[] result) { if (result == null || result.length == 0) { - onFailure(new Exception("foobaz")); + onFailure(new Exception( + "Unfortunately, there are no associated resources for the available storage nodes. " + + "Check if the agents are running on the machines where the storage nodes are deployed.")); } else { resIds = ArrayUtils.unwrapArray(result); tabset.getTabByName(tabInfo.name.getName()).setPane( diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java index a89bb81..685fb5d 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java @@ -250,7 +250,8 @@ public class StorageNodeDatasource extends RPCDataSource<StorageNodeLoadComposit } int value = from.getUnackAlerts(); record.setAttribute(FIELD_ALERTS.propertyName(), - StorageNodeAdminView.getAlertsString("New Alerts", node.getId(), value)); + node.getResource() != null ? StorageNodeAdminView.getAlertsString("New Alerts", node.getId(), value) + : "New Alerts (0)"); String memory = null; if (from.getHeapPercentageUsed() != null && from.getHeapPercentageUsed().getAggregate().getAvg() != null) memory = MeasurementConverterClient.format(from.getHeapPercentageUsed().getAggregate().getAvg(), from diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDetailView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDetailView.java index 6fdae0c..4055e6f 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDetailView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDetailView.java @@ -23,7 +23,6 @@ import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDat import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_ALERTS; import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_CQL_PORT; import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_CTIME; -import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_FAILED_OPERATION; import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_JMX_PORT; import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_MTIME; import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_OPERATION_MODE; @@ -67,7 +66,6 @@ import org.rhq.enterprise.gui.coregui.client.util.Log; import org.rhq.enterprise.gui.coregui.client.util.MeasurementUtility; import org.rhq.enterprise.gui.coregui.client.util.StringUtility; import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedHLayout; -import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedUtility; import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedVLayout; import org.rhq.enterprise.gui.coregui.client.util.message.Message;
@@ -130,9 +128,11 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab final StorageNode node = storageNodes.get(0); header.setContents("<div style='text-align: center; font-weight: bold; font-size: medium;'> Storage Node (" + node.getAddress() + ")</div>"); - fetchStorageNodeConfigurationComposite(node); + prepareDetailsSection(node); + fetchStorageNodeConfigurationComposite(node); fetchSparkLineDataForLoadComponent(node); + fetchUnackAlerts(storageNodeId, node.getResource() != null); }
public void onFailure(Throwable caught) { @@ -142,45 +142,76 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab initSectionCount = SECTION_COUNT; } }); - fetchUnackAlerts(storageNodeId); }
private void fetchStorageNodeConfigurationComposite(final StorageNode node) { - GWTServiceLookup.getStorageService().retrieveConfiguration(node, - new AsyncCallback<StorageNodeConfigurationComposite>() { - @Override - public void onFailure(Throwable caught) { - Message message = new Message(MSG.view_configurationHistoryDetails_error_loadFailure(), - Message.Severity.Warning); - initSectionCount = SECTION_COUNT; - } + if (node.getResource() == null) { // no associated resource yet + LayoutSpacer spacer = new LayoutSpacer(); + spacer.setHeight(15); + HTMLFlow info = new HTMLFlow("<h2>There is no configuration available for this node. Is the agent running on the " + + node.getAddress() + "?</h2>"); + SectionStackSection section = new SectionStackSection("Configuration"); + section.setItems(spacer, info); + section.setExpanded(true); + section.setCanCollapse(false); + + configurationSection = section; + initSectionCount++; + } else { + GWTServiceLookup.getStorageService().retrieveConfiguration(node, + new AsyncCallback<StorageNodeConfigurationComposite>() { + @Override + public void onFailure(Throwable caught) { + Message message = new Message(MSG.view_configurationHistoryDetails_error_loadFailure(), + Message.Severity.Warning); + initSectionCount = SECTION_COUNT; + }
- @Override - public void onSuccess(StorageNodeConfigurationComposite result) { - prepareResourceConfigEditor(result); - } - }); + @Override + public void onSuccess(StorageNodeConfigurationComposite result) { + prepareResourceConfigEditor(result); + } + }); + } }
private void fetchSparkLineDataForLoadComponent(final StorageNode storageNode) { + if (storageNode.getResource() == null) { + HTMLFlow info = new HTMLFlow("<i>No load data available.</i>"); + info.setExtraSpace(5); + loadLayout = new EnhancedVLayout(); + loadLayout.setWidth100(); + LayoutSpacer spacer = new LayoutSpacer(); + spacer.setHeight(10); + HTMLFlow loadLabel = new HTMLFlow("Status"); + loadLabel.addStyleName("formTitle"); + loadLabel.setHoverWidth(300); + loadLayout.setMembers(spacer, loadLabel, info); + + if (detailsAndLoadLayout == null) { + detailsAndLoadLayout = new EnhancedHLayout(); + } + initSectionCount++; + } else { + GWTServiceLookup.getStorageService().findStorageNodeLoadDataForLast(storageNode, 8, + MeasurementUtility.UNIT_HOURS, 60, + new AsyncCallback<Map<String, List<MeasurementDataNumericHighLowComposite>>>() { + @Override + public void onFailure(Throwable caught) {
- GWTServiceLookup.getStorageService().findStorageNodeLoadDataForLast(storageNode, 8, MeasurementUtility.UNIT_HOURS, - 60, new AsyncCallback<Map<String, List<MeasurementDataNumericHighLowComposite>>>() { - @Override - public void onFailure(Throwable caught) { - - } + }
- @Override - public void onSuccess(Map<String, List<MeasurementDataNumericHighLowComposite>> result) { - prepareLoadSection(sectionStack, storageNode, result); - } + @Override + public void onSuccess(Map<String, List<MeasurementDataNumericHighLowComposite>> result) { + prepareLoadSection(sectionStack, storageNode, result); + }
- }); + }); + } }
- private void fetchUnackAlerts(final int storageNodeId) { + private void fetchUnackAlerts(final int storageNodeId, final boolean isResourceIdSet) { GWTServiceLookup.getStorageService().findNotAcknowledgedStorageNodeAlertsCounts(Arrays.asList(storageNodeId), new AsyncCallback<List<Integer>>() { @Override @@ -198,7 +229,7 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab } else { unackAlerts = result.get(0); if (alertsItem != null) { - alertsItem.setValue(StorageNodeAdminView.getAlertsString("New Alerts", storageNodeId, unackAlerts)); + alertsItem.setValue(isResourceIdSet ? StorageNodeAdminView.getAlertsString("New Alerts", storageNodeId, unackAlerts) : "New Alerts (0)"); } } } @@ -263,15 +294,16 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab
final StaticTextItem cqlPortItem = new StaticTextItem(FIELD_CQL_PORT.propertyName(), FIELD_CQL_PORT.title()); cqlPortItem.setValue(storageNode.getCqlPort()); - + final StaticTextItem jmxPortItem = new StaticTextItem(FIELD_JMX_PORT.propertyName(), FIELD_JMX_PORT.title()); jmxPortItem.setValue(storageNode.getJmxPort());
-// final StaticTextItem jmxConnectionUrlItem = new StaticTextItem("jmxConnectionUrl", -// MSG.view_adminTopology_storageNode_jmxConnectionUrl()); -// jmxConnectionUrlItem.setValue(storageNode.getJMXConnectionURL()); + // final StaticTextItem jmxConnectionUrlItem = new StaticTextItem("jmxConnectionUrl", + // MSG.view_adminTopology_storageNode_jmxConnectionUrl()); + // jmxConnectionUrlItem.setValue(storageNode.getJMXConnectionURL());
- final StaticTextItem operationModeItem = new StaticTextItem(FIELD_OPERATION_MODE.propertyName(), MSG.view_adminTopology_serverDetail_operationMode()); + final StaticTextItem operationModeItem = new StaticTextItem(FIELD_OPERATION_MODE.propertyName(), + MSG.view_adminTopology_serverDetail_operationMode()); operationModeItem.setValue(storageNode.getOperationMode());
// make clickable link to associated resource @@ -294,35 +326,52 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab StaticTextItem lastUpdateItem = new StaticTextItem(FIELD_MTIME.propertyName(), FIELD_MTIME.title()); lastUpdateItem.setValue(TimestampCellFormatter.format(Long.valueOf(storageNode.getMtime()), TimestampCellFormatter.DATE_TIME_FORMAT_LONG)); - + alertsItem = new StaticTextItem(FIELD_ALERTS.propertyName(), FIELD_ALERTS.title()); - alertsItem.setPrompt("The number in brackets represents the number of unacknowledged alerts for this storage node."); + alertsItem + .setPrompt("The number in brackets represents the number of unacknowledged alerts for this storage node."); if (unackAlerts != -1) { alertsItem.setValue(StorageNodeAdminView.getAlertsString("New Alerts", storageNodeId, unackAlerts)); } - - StaticTextItem message = new StaticTextItem("message", "Note"); - message.setValue(storageNode.getErrorMessage() == null ? "Everything is ok" : storageNode.getErrorMessage()); - + + StaticTextItem messageItem = new StaticTextItem("message", "Note"); + StringBuffer message = new StringBuffer(); + boolean isOk = true; + if (storageNode.getResource() == null) { + message.append("Storage node has no associated resource.<br />"); + isOk = false; + } + if (storageNode.getErrorMessage() != null) { + message.append(storageNode.getErrorMessage()).append("<br />"); + isOk = false; + } + if (isOk) { + message.append("Everything is ok"); + } + messageItem.setValue(message); + StaticTextItem lastOperation = null; - boolean isOperationFailed = storageNode.getFailedOperation() != null && storageNode.getFailedOperation().getResource() != null; + boolean isOperationFailed = storageNode.getFailedOperation() != null + && storageNode.getFailedOperation().getResource() != null; if (isOperationFailed) { ResourceOperationHistory operationHistory = storageNode.getFailedOperation(); - String value = LinkManager.getSubsystemResourceOperationHistoryLink(operationHistory.getResource().getId(), operationHistory.getId()); -// String value = "#Resource/" + operationHistory.getResource().getId() + "/Operations/History/" + operationHistory.getId()); + String value = LinkManager.getSubsystemResourceOperationHistoryLink(operationHistory.getResource().getId(), + operationHistory.getId()); + // String value = "#Resource/" + operationHistory.getResource().getId() + "/Operations/History/" + operationHistory.getId()); lastOperation = new StaticTextItem("lastOp", "Operation"); - lastOperation.setValue(LinkManager.getHref(value, operationHistory.getOperationDefinition().getDisplayName())); + lastOperation.setValue(LinkManager.getHref(value, operationHistory.getOperationDefinition() + .getDisplayName())); } - - - + List<FormItem> formItems = new ArrayList<FormItem>(6); - formItems.addAll(Arrays.asList(nameItem, resourceItem,cqlPortItem, jmxPortItem/*, jmxConnectionUrlItem*/)); - if (!CoreGUI.isDebugMode()) formItems.add(operationModeItem); // debug mode fails if this item is added - formItems.addAll(Arrays.asList(installationDateItem, lastUpdateItem, alertsItem, message)); - if (isOperationFailed) formItems.add(lastOperation); - form.setItems(formItems.toArray(new FormItem[]{})); - + formItems.addAll(Arrays.asList(nameItem, resourceItem, cqlPortItem, jmxPortItem/*, jmxConnectionUrlItem*/)); + if (!CoreGUI.isDebugMode()) + formItems.add(operationModeItem); // debug mode fails if this item is added + formItems.addAll(Arrays.asList(installationDateItem, lastUpdateItem, alertsItem, messageItem)); + if (isOperationFailed) + formItems.add(lastOperation); + form.setItems(formItems.toArray(new FormItem[] {})); + detailsLayout = new EnhancedVLayout(); detailsLayout.setWidth(450); detailsLayout.addMember(form); diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeTableView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeTableView.java index efe56e3..84c1586 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeTableView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeTableView.java @@ -27,7 +27,7 @@ import java.util.Arrays; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; -import org.rhq.enterprise.gui.coregui.client.util.Log; + import com.google.gwt.user.client.Timer; import com.google.gwt.user.client.rpc.AsyncCallback; import com.smartgwt.client.data.Criteria; @@ -50,6 +50,7 @@ import org.rhq.enterprise.gui.coregui.client.components.table.AuthorizedTableAct import org.rhq.enterprise.gui.coregui.client.components.table.TableActionEnablement; import org.rhq.enterprise.gui.coregui.client.components.table.TableSection; import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; +import org.rhq.enterprise.gui.coregui.client.util.Log; import org.rhq.enterprise.gui.coregui.client.util.StringUtility; import org.rhq.enterprise.gui.coregui.client.util.async.Command; import org.rhq.enterprise.gui.coregui.client.util.async.CountDownLatch; @@ -83,6 +84,7 @@ public class StorageNodeTableView extends TableSection<StorageNodeDatasource> { @Override protected void doOnDraw() { super.doOnDraw(); + // commenting out this call, because it caused UI to freeze // scheduleUnacknowledgedAlertsPollingJob(getListGrid()); }
@@ -178,6 +180,11 @@ public class StorageNodeTableView extends TableSection<StorageNodeDatasource> { ListGrid listGrid = new ListGrid() { @Override protected Canvas getExpansionComponent(final ListGridRecord record) { + if (record.getAttribute(FIELD_RESOURCE_ID.propertyName()) == null) { + // no resource set + return new HTMLFlow("There is no load data available for this node. Is the agent running on the " + + record.getAttributeAsString(FIELD_ADDRESS.propertyName() + "?")); + } int id = record.getAttributeAsInt(FIELD_ID); return new StorageNodeLoadComponent(id, null); } diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index 04abeb88..55593d3 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -469,10 +469,15 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN long endTime = System.currentTimeMillis(); long beginTime = endTime - (8 * 60 * 60 * 1000); for (StorageNode node : nodes) { - StorageNodeLoadComposite composite = getLoad(subjectManager.getOverlord(), node, beginTime, endTime); - int unackAlerts = findNotAcknowledgedStorageNodeAlerts(subjectManager.getOverlord(), node).size(); - composite.setUnackAlerts(unackAlerts); - result.add(composite); + if (node.getOperationMode() != OperationMode.INSTALLED) { + StorageNodeLoadComposite composite = getLoad(subjectManager.getOverlord(), node, beginTime, endTime); + int unackAlerts = findNotAcknowledgedStorageNodeAlerts(subjectManager.getOverlord(), node).size(); + composite.setUnackAlerts(unackAlerts); + result.add(composite); + } else { // newly installed node + result.add(new StorageNodeLoadComposite(node, beginTime, endTime)); + } + } return result; }
commit 3c8110a8abd138ec47eec6cd60d2ad61cad8b56d Author: Stefan Negrea snegrea@redhat.com Date: Sat Aug 17 19:25:18 2013 -0500
[BZ 993513] Baselines entries are no longer calculated and inserted into the SQL database if there is no data in the storage.
The sql query that returns the schedules that need recomputation has been updated to return only the ids of the schedules, reducing the amount of data requested from SQL to the minimum.
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/measurement/MeasurementBaseline.java b/modules/core/domain/src/main/java/org/rhq/core/domain/measurement/MeasurementBaseline.java index 5bd0e8b..681914d 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/measurement/MeasurementBaseline.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/measurement/MeasurementBaseline.java @@ -94,6 +94,8 @@ public class MeasurementBaseline implements Serializable { @OneToOne(fetch = FetchType.LAZY, optional = false) private MeasurementSchedule schedule;
+ @Column(name = "SCHEDULE_ID", insertable = false, updatable = false) + private int scheduleId;
public MeasurementBaseline() { computeTime = System.currentTimeMillis(); @@ -176,6 +178,20 @@ public class MeasurementBaseline implements Serializable { }
/** + * @return the scheduleId + */ + public int getScheduleId() { + return scheduleId; + } + + /** + * @param scheduleId the scheduleId to set + */ + public void setScheduleId(int scheduleId) { + this.scheduleId = scheduleId; + } + + /** * If <code>true</code>, it means a user manually entered the baseline values, as opposed to having them * automatically be calculated by examining past measurement data. * diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/measurement/MeasurementBaselineManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/measurement/MeasurementBaselineManagerBean.java index 90ac6e7..ed8db3a 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/measurement/MeasurementBaselineManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/measurement/MeasurementBaselineManagerBean.java @@ -18,10 +18,9 @@ */ package org.rhq.enterprise.server.measurement;
+import java.util.ArrayList; import java.util.Arrays; import java.util.Date; -import java.util.Iterator; -import java.util.LinkedList; import java.util.List; import java.util.Properties;
@@ -48,10 +47,10 @@ import org.rhq.enterprise.server.RHQConstants; import org.rhq.enterprise.server.auth.SubjectManagerLocal; import org.rhq.enterprise.server.authz.AuthorizationManagerLocal; import org.rhq.enterprise.server.authz.PermissionException; -import org.rhq.enterprise.server.storage.StorageClientManagerBean; import org.rhq.enterprise.server.cloud.StatusManagerLocal; import org.rhq.enterprise.server.measurement.instrumentation.MeasurementMonitor; import org.rhq.enterprise.server.resource.ResourceManagerLocal; +import org.rhq.enterprise.server.storage.StorageClientManagerBean; import org.rhq.enterprise.server.system.SystemManagerLocal; import org.rhq.server.metrics.MetricsBaselineCalculator;
@@ -148,8 +147,6 @@ public class MeasurementBaselineManagerBean implements MeasurementBaselineManage + (System.currentTimeMillis() - now) + ")ms");
now = System.currentTimeMillis(); - int totalProcessed = 0; - while (true) { /* * each call is done in a separate xtn of at most 100K inserted rows; this helps to keep the xtn * shorter to avoid timeouts in scenarios where baseline calculations bunch together. the idea was that @@ -189,16 +186,22 @@ public class MeasurementBaselineManagerBean implements MeasurementBaselineManage * In any event, an appropriate chunking solution needs to be found, and that partitioning strategy * needs to replace the limits in the query today. */ - List<MeasurementSchedule> schedulesWithoutBaselines = - measurementBaselineManager.getSchedulesWithoutBaselines(); - measurementBaselineManager.calculateBaselines(schedulesWithoutBaselines, now, amountOfData); - totalProcessed += schedulesWithoutBaselines.size(); - - if (schedulesWithoutBaselines.size() < BASELINE_PROCESSING_LIMIT) { - break; + List<Integer> schedulesWithoutBaselines = measurementBaselineManager.getSchedulesWithoutBaselines(); + + List<Integer> accumulator = new ArrayList<Integer>(); + for (Integer value : schedulesWithoutBaselines) { + accumulator.add(value); + if (accumulator.size() == BASELINE_PROCESSING_LIMIT) { + measurementBaselineManager.calculateBaselines(accumulator, now, amountOfData); + accumulator.clear(); } } - log.info("Calculated and inserted [" + totalProcessed + "] new baselines. (" + if (!accumulator.isEmpty()) { + measurementBaselineManager.calculateBaselines(accumulator, now, amountOfData); + accumulator.clear(); + } + + log.info("Calculated and inserted [" + schedulesWithoutBaselines.size() + "] new baselines. (" + (System.currentTimeMillis() - now) + ")ms");
MeasurementMonitor.getMBean().incrementBaselineCalculationTime(System.currentTimeMillis() - computeTime); @@ -222,18 +225,17 @@ public class MeasurementBaselineManagerBean implements MeasurementBaselineManage
@SuppressWarnings("unchecked") @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) - public List<MeasurementSchedule> getSchedulesWithoutBaselines() { - String sql = - "SELECT s.* FROM rhq_measurement_sched s INNER JOIN rhq_measurement_def d ON s.definition = d.id " + - "LEFT JOIN rhq_measurement_bline b ON s.id = b.schedule_id WHERE b.schedule_id IS NULL AND d.numeric_type = 0"; - Query query = this.entityManager.createNativeQuery(sql, MeasurementSchedule.class); - query.setMaxResults(BASELINE_PROCESSING_LIMIT); + public List<Integer> getSchedulesWithoutBaselines() { + final String sql = + "SELECT s.id FROM rhq_measurement_sched s INNER JOIN rhq_measurement_def d ON s.definition = d.id " + + "LEFT JOIN rhq_measurement_bline b ON s.id = b.schedule_id WHERE s.enabled = true AND b.schedule_id IS NULL AND d.numeric_type = 0"; + Query query = this.entityManager.createNativeQuery(sql);
return query.getResultList(); }
@TransactionAttribute(TransactionAttributeType.NOT_SUPPORTED) - public void calculateBaselines(List<MeasurementSchedule> schedules, long olderThan, long amountOfData) { + public void calculateBaselines(List<Integer> schedules, long olderThan, long amountOfData) { long endTime = olderThan; long startTime = endTime - amountOfData;
@@ -242,30 +244,21 @@ public class MeasurementBaselineManagerBean implements MeasurementBaselineManage long calcStartTime = System.currentTimeMillis(); List<MeasurementBaseline> results = baselineCalculator.calculateBaselines(schedules, startTime, endTime); long calcEndTime = System.currentTimeMillis(); - int count = results.size();
if (log.isDebugEnabled()) { - log.debug("Finished computing " + count + " new baselines in " + (calcEndTime - calcStartTime) + " ms"); + log.debug("Finished computing " + results.size() + " new baselines in " + (calcEndTime - calcStartTime) + + " ms"); }
log.debug("Persisting baselines calculations"); long saveStartTime = System.currentTimeMillis(); - Iterator<MeasurementBaseline> iterator = results.iterator(); - List<MeasurementBaseline> queue = new LinkedList<MeasurementBaseline>(); - while (iterator.hasNext()) { - if (queue.size() == 10) { - measurementBaselineManager.saveNewBaselines(queue); - queue = new LinkedList<MeasurementBaseline>(); - } - queue.add(iterator.next()); - } - if (!queue.isEmpty()) { - measurementBaselineManager.saveNewBaselines(queue); - } + + measurementBaselineManager.saveNewBaselines(results);
long saveEndTime = System.currentTimeMillis(); if (log.isDebugEnabled()) { - log.debug("Finished persisting " + count + " baselines in " + (saveEndTime - saveStartTime) + " ms"); + log.debug("Finished persisting " + results.size() + " baselines in " + (saveEndTime - saveStartTime) + + " ms"); } }
@@ -273,7 +266,10 @@ public class MeasurementBaselineManagerBean implements MeasurementBaselineManage @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) public void saveNewBaselines(List<MeasurementBaseline> baselines) { for (MeasurementBaseline baseline : baselines) { - entityManager.merge(baseline); + MeasurementSchedule schedule = new MeasurementSchedule(); + schedule.setId(baseline.getScheduleId()); + baseline.setSchedule(schedule); + this.entityManager.merge(baseline); } }
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/measurement/MeasurementBaselineManagerLocal.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/measurement/MeasurementBaselineManagerLocal.java index af6145a..35b8099 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/measurement/MeasurementBaselineManagerLocal.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/measurement/MeasurementBaselineManagerLocal.java @@ -24,7 +24,6 @@ import javax.ejb.Local;
import org.rhq.core.domain.auth.Subject; import org.rhq.core.domain.measurement.MeasurementBaseline; -import org.rhq.core.domain.measurement.MeasurementSchedule; import org.rhq.core.domain.resource.Resource;
/** @@ -73,7 +72,7 @@ public interface MeasurementBaselineManagerLocal { * will be called repeatedly during baseline calculations to get all of the necessary * schedules. */ - List<MeasurementSchedule> getSchedulesWithoutBaselines(); + List<Integer> getSchedulesWithoutBaselines();
/** * Given a list of schedules, this method calculates and stores baselines using the @@ -87,7 +86,7 @@ public interface MeasurementBaselineManagerLocal { * is treated as a duration. For example, a value of 259200000 * would be treated as 3 days. */ - void calculateBaselines(List<MeasurementSchedule> schedules, long olderThan, long amountOfData); + void calculateBaselines(List<Integer> schedules, long olderThan, long amountOfData);
/** * Persists the newly calculated baselines. @@ -148,7 +147,7 @@ public interface MeasurementBaselineManagerLocal { /** * Return a list of {@link MeasurementBaseline} objects for the {@link Resource} represented by the given id. * - * @param subject the user request to view the baseline history for the given resource + * @param subject the user request to view the baseline history for the given resource * @param resourceId the id of the resource whose baselines are to be returned * * @return a list of baselines for all measurements scheduled on the given resource diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsBaselineCalculator.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsBaselineCalculator.java index 6242df6..ef7d092 100644 --- a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsBaselineCalculator.java +++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsBaselineCalculator.java @@ -31,7 +31,6 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory;
import org.rhq.core.domain.measurement.MeasurementBaseline; -import org.rhq.core.domain.measurement.MeasurementSchedule; import org.rhq.server.metrics.domain.AggregateSimpleNumericMetric; import org.rhq.server.metrics.domain.AggregateType;
@@ -48,12 +47,12 @@ public class MetricsBaselineCalculator { this.metricsDAO = metricsDAO; }
- public List<MeasurementBaseline> calculateBaselines(List<MeasurementSchedule> schedules, long startTime, + public List<MeasurementBaseline> calculateBaselines(List<Integer> schedules, long startTime, long endTime) { List<MeasurementBaseline> calculatedBaselines = new ArrayList<MeasurementBaseline>();
MeasurementBaseline measurementBaseline; - for (MeasurementSchedule schedule : schedules) { + for (Integer schedule : schedules) { measurementBaseline = this.calculateBaseline(schedule, startTime, endTime); if (measurementBaseline != null) { calculatedBaselines.add(measurementBaseline); @@ -63,9 +62,9 @@ public class MetricsBaselineCalculator { return calculatedBaselines; }
- private MeasurementBaseline calculateBaseline(MeasurementSchedule schedule, long startTime, long endTime) { - Iterable<AggregateSimpleNumericMetric> metrics = this.metricsDAO.findAggregatedSimpleOneHourMetric( - schedule.getId(), startTime, endTime); + private MeasurementBaseline calculateBaseline(Integer schedule, long startTime, long endTime) { + Iterable<AggregateSimpleNumericMetric> metrics = this.metricsDAO.findAggregatedSimpleOneHourMetric(schedule, + startTime, endTime);
if (metrics != null && metrics.iterator() != null && metrics.iterator().hasNext()) { ArithmeticMeanCalculator mean = new ArithmeticMeanCalculator(); @@ -104,7 +103,7 @@ public class MetricsBaselineCalculator { baseline.setMax(max); baseline.setMin(min); baseline.setMean(mean.getArithmeticMean()); - baseline.setSchedule(schedule); + baseline.setScheduleId(schedule);
if (log.isDebugEnabled()) { log.debug("Calculated baseline: " + baseline.toString()); @@ -113,12 +112,6 @@ public class MetricsBaselineCalculator { return baseline; }
- MeasurementBaseline baseline = new MeasurementBaseline(); - baseline.setMax(Double.NaN); - baseline.setMin(Double.NaN); - baseline.setMean(Double.NaN); - baseline.setSchedule(schedule); - - return baseline; + return null; } } diff --git a/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsBaselineCalculatorTest.java b/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsBaselineCalculatorTest.java index 2b58222..1137dca 100644 --- a/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsBaselineCalculatorTest.java +++ b/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsBaselineCalculatorTest.java @@ -27,7 +27,6 @@ import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.verifyNoMoreInteractions; import static org.mockito.Mockito.when; -import static org.rhq.test.AssertUtils.assertPropertiesMatch; import static org.testng.Assert.assertEquals;
import java.util.ArrayList; @@ -44,7 +43,6 @@ import org.testng.annotations.ObjectFactory; import org.testng.annotations.Test;
import org.rhq.core.domain.measurement.MeasurementBaseline; -import org.rhq.core.domain.measurement.MeasurementSchedule; import org.rhq.server.metrics.domain.AggregateSimpleNumericMetric; import org.rhq.server.metrics.domain.AggregateType;
@@ -66,7 +64,6 @@ public class MetricsBaselineCalculatorTest {
@Test public void noCalculationTest() throws Exception { - //tell the method story as it happens: mock dependencies and configure //those dependencies to get the method under test to completion. StorageSession mockSession = mock(StorageSession.class); @@ -74,34 +71,24 @@ public class MetricsBaselineCalculatorTest { PowerMockito.whenNew(MetricsDAO.class).withParameterTypes(StorageSession.class, MetricsConfiguration.class) .withArguments(eq(mockSession), eq(metricsConfiguration)).thenReturn(mockMetricsDAO);
- when(mockMetricsDAO.findAggregatedSimpleOneHourMetric(eq(1), eq(0), - eq(1))).thenReturn(new ArrayList<AggregateSimpleNumericMetric>()); + when(mockMetricsDAO.findAggregatedSimpleOneHourMetric(eq(1), eq(0), eq(1))).thenReturn( + new ArrayList<AggregateSimpleNumericMetric>());
- MeasurementSchedule mockSchedule = mock(MeasurementSchedule.class); - when(mockSchedule.getId()).thenReturn(0); + int expectedScheduleId = 2567;
//create object to test and inject required dependencies MetricsBaselineCalculator objectUnderTest = new MetricsBaselineCalculator(new MetricsDAO(mockSession, metricsConfiguration));
//run code under test - List<MeasurementBaseline> result = objectUnderTest.calculateBaselines(Arrays.asList(mockSchedule), 0, 1); + List<MeasurementBaseline> result = objectUnderTest.calculateBaselines(Arrays.asList(expectedScheduleId), 0, 1);
//verify the results (Assert and mock verification) - assertEquals(result.size(), 1, "Expected to get back one baseline"); - MeasurementBaseline expected = new MeasurementBaseline(); - expected.setSchedule(mockSchedule); - expected.setMax(Double.NaN); - expected.setMin(Double.NaN); - expected.setMean(Double.NaN); - - assertPropertiesMatch("", expected, result.get(0), "computeTime"); + assertEquals(result.size(), 0, "No baselines expected");
verify(mockMetricsDAO, times(1)).findAggregatedSimpleOneHourMetric(any(Integer.class), any(Integer.class), any(Integer.class)); verifyNoMoreInteractions(mockMetricsDAO); - - verify(mockSchedule, times(1)).getId(); }
@Test @@ -138,7 +125,7 @@ public class MetricsBaselineCalculatorTest { } }
- int expectedScheduleId= 567; + int expectedScheduleId = 1567; long expectedStartTime = 135; long expectedEndTime = 246; long beforeComputeTime = System.currentTimeMillis(); @@ -154,15 +141,12 @@ public class MetricsBaselineCalculatorTest { mockMetricsDAO.findAggregatedSimpleOneHourMetric(eq(expectedScheduleId), eq(expectedStartTime), eq(expectedEndTime))).thenReturn(randomData);
- MeasurementSchedule mockSchedule = mock(MeasurementSchedule.class); - when(mockSchedule.getId()).thenReturn(expectedScheduleId); - //create object to test and inject required dependencies MetricsBaselineCalculator objectUnderTest = new MetricsBaselineCalculator(new MetricsDAO(mockSession, metricsConfiguration));
//run code under test - List<MeasurementBaseline> result = objectUnderTest.calculateBaselines(Arrays.asList(mockSchedule), + List<MeasurementBaseline> result = objectUnderTest.calculateBaselines(Arrays.asList(expectedScheduleId), expectedStartTime, expectedEndTime);
//verify the results (Assert and mock verification) @@ -172,7 +156,7 @@ public class MetricsBaselineCalculatorTest { Assert.assertEquals(baselineResult.getMean(), average, TEST_PRECISION); Assert.assertEquals(baselineResult.getMax(), expectedMax, TEST_PRECISION); Assert.assertEquals(baselineResult.getMin(), expectedMin, TEST_PRECISION); - Assert.assertEquals(baselineResult.getSchedule(), mockSchedule); + Assert.assertEquals(baselineResult.getScheduleId(), expectedScheduleId); if (baselineResult.getComputeTime().getTime() > System.currentTimeMillis()) { Assert.fail("Back compute time, the computation was forward dated."); } @@ -183,10 +167,6 @@ public class MetricsBaselineCalculatorTest { verify(mockMetricsDAO, times(1)).findAggregatedSimpleOneHourMetric(eq(expectedScheduleId), eq(expectedStartTime), eq(expectedEndTime)); verifyNoMoreInteractions(mockMetricsDAO); - - verify(mockSchedule, times(2)).getId(); - verify(mockSchedule, times(1)).setBaseline(eq(baselineResult)); - verifyNoMoreInteractions(mockSchedule); }
@Test @@ -221,15 +201,12 @@ public class MetricsBaselineCalculatorTest { mockMetricsDAO.findAggregatedSimpleOneHourMetric(eq(expectedScheduleId), eq(expectedStartTime), eq(expectedEndTime))).thenReturn(randomData);
- MeasurementSchedule mockSchedule = mock(MeasurementSchedule.class); - when(mockSchedule.getId()).thenReturn(expectedScheduleId); - //create object to test and inject required dependencies MetricsBaselineCalculator objectUnderTest = new MetricsBaselineCalculator(new MetricsDAO(mockSession, metricsConfiguration));
//run code under test - List<MeasurementBaseline> result = objectUnderTest.calculateBaselines(Arrays.asList(mockSchedule), + List<MeasurementBaseline> result = objectUnderTest.calculateBaselines(Arrays.asList(expectedScheduleId), expectedStartTime, expectedEndTime);
//verify the results (Assert and mock verification) @@ -239,15 +216,10 @@ public class MetricsBaselineCalculatorTest { Assert.assertEquals(baselineResult.getMean(), average, TEST_PRECISION); Assert.assertEquals(baselineResult.getMax(), expectedMinMax); Assert.assertEquals(baselineResult.getMin(), expectedMinMax); - Assert.assertEquals(baselineResult.getSchedule(), mockSchedule); + Assert.assertEquals(baselineResult.getScheduleId(), expectedScheduleId);
verify(mockMetricsDAO, times(1)).findAggregatedSimpleOneHourMetric(eq(expectedScheduleId), eq(expectedStartTime), eq(expectedEndTime)); verifyNoMoreInteractions(mockMetricsDAO); - - verify(mockSchedule, times(2)).getId(); - verify(mockSchedule, times(1)).setBaseline(eq(baselineResult)); - verifyNoMoreInteractions(mockSchedule); } - }
commit aefb12333cad1c8063e55004e2670d95f028a419 Author: John Sanda jsanda@redhat.com Date: Sat Aug 17 18:47:37 2013 -0400
update status property for undeployment operation modes
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java index 4d4596e..e0f278d 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java @@ -254,10 +254,17 @@ public class StorageNode implements Serializable { return Status.DOWN; } } + if (operationMode == OperationMode.DECOMMISSION || operationMode == OperationMode.UNANNOUNCE || + operationMode == OperationMode.REMOVE_MAINTENANCE || operationMode == OperationMode.UNINSTALL) { + if (errorMessage == null && failedOperation == null) { + return Status.LEAVING; + } else { + return Status.DOWN; + } + } if (operationMode == OperationMode.NORMAL) { return Status.NORMAL; } - // else operation mode is DOWN return Status.DOWN; }
commit 82d37b9c211a575b660cfd1624b6ef5aa24e695d Author: John Sanda jsanda@redhat.com Date: Sat Aug 17 15:17:37 2013 -0400
add support for undeploying a node that is in a failed deployment state
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index fcf4526..04abeb88 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -250,9 +250,12 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN reset(); storageNodeOperationsHandler.performAddNodeMaintenance(subject, storageNode); default: - // For any other operation mode, the storage node should already be part of - // the cluster. - // TODO Make sure that the storage node is in fact part of the cluster + // TODO what do we do with/about maintenance mode? + + // We do not want to deploying a node that is in the process of being + // undeployed. It is too hard to make sure we are in an inconsistent state. + // Instead finishe the undeployment and redeploy the storage node. + throw new RuntimeException("Cannot deploy " + storageNode); } }
@@ -264,6 +267,12 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN reset(); storageNodeOperationsHandler.uninstall(subject, storageNode); break; + case ANNOUNCE: + case BOOTSTRAP: + reset(); + storageNodeOperationsHandler.unannounceStorageNode(subject, storageNode); + break; + case ADD_NODE_MAINTENANCE: case NORMAL: case DECOMMISSION: reset(); @@ -281,7 +290,8 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN storageNodeOperationsHandler.uninstall(subject, storageNode); break; default: - + // TODO what do we do with/about maintenance mode + throw new RuntimeException("Cannot undeploy " + storageNode); } }
commit d9dce65588a6e5c69d31fec1c21ebaaaae2ecefd Author: John Sanda jsanda@redhat.com Date: Sat Aug 17 14:36:04 2013 -0400
add a check to see if the node is already decommissioned
If has C* has already been decommissioned, i.e., its operation mode is DECOMMISSIONED, then trying the decommission operation again will result in C* throwing an exception which we could interpret as a failed operation. This makes the resource operation idempotent.
diff --git a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java index a82992e..7455f5e 100644 --- a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java +++ b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java @@ -273,10 +273,16 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper try { EmsConnection emsConnection = getEmsConnection(); EmsBean storageService = emsConnection.getBean("org.apache.cassandra.db:type=StorageService"); - Class<?>[] emptyParams = new Class<?>[0];
- EmsOperation operation = storageService.getOperation("decommission", emptyParams); - operation.invoke((Object[]) emptyParams); + EmsAttribute operationModeAttr = storageService.getAttribute("OperationMode"); + String operationMode = (String) operationModeAttr.refresh(); + if (operationMode.equals("DECOMMISSIONED")) { + log.info("The storage node at " + getResourceContext().getResourceKey() + " is already decommissioned."); + } else { + Class<?>[] emptyParams = new Class<?>[0]; + EmsOperation operation = storageService.getOperation("decommission", emptyParams); + operation.invoke((Object[]) emptyParams); + } } catch (EmsInvocationException e) { result.setErrorMessage("Decommission operation failed: " + ThrowableUtil.getAllMessages(e)); }
commit 1ecedb01d70d65a2e66ac88fe567c9371b810cc8 Author: John Sanda jsanda@redhat.com Date: Sat Aug 17 14:00:27 2013 -0400
add support for resuming a failed undeployment
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index 341cbc7..fcf4526 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -265,9 +265,13 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN storageNodeOperationsHandler.uninstall(subject, storageNode); break; case NORMAL: + case DECOMMISSION: reset(); storageNodeOperationsHandler.decommissionStorageNode(subject, storageNode); break; + case REMOVE_MAINTENANCE: + reset(); + storageNodeOperationsHandler.performRemoveNodeMaintenance(subject, storageNode); case UNANNOUNCE: reset(); storageNodeOperationsHandler.unannounceStorageNode(subject, storageNode); diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java index a30cfa7..f2fc108 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java @@ -78,9 +78,6 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa @EJB private ResourceManagerLocal resourceManager;
- @EJB - private ResourceFactoryManagerLocal resourceFactoryManager; - @Override public void announceStorageNode(Subject subject, StorageNode storageNode) { if (log.isInfoEnabled()) { @@ -150,19 +147,19 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa
storageNode.setOperationMode(StorageNode.OperationMode.UNINSTALL);
- ResourceOperationSchedule schedule = new ResourceOperationSchedule(); - schedule.setResource(storageNode.getResource()); - schedule.setJobTrigger(JobTrigger.createNowTrigger()); - schedule.setSubject(subject); - schedule.setOperationName("uninstall"); - Configuration parameters = new Configuration(); - schedule.setParameters(parameters); - - operationManager.scheduleResourceOperation(subject, schedule); - -// Resource resource = storageNode.getResource(); -// storageNodeOperationsHandler.detachFromResource(storageNode); -// storageNodeOperationsHandler.deleteStorageNodeResource(subject, resource); + if (storageNode.getResource() == null) { + finishUninstall(subject, storageNode); + } else { + ResourceOperationSchedule schedule = new ResourceOperationSchedule(); + schedule.setResource(storageNode.getResource()); + schedule.setJobTrigger(JobTrigger.createNowTrigger()); + schedule.setSubject(subject); + schedule.setOperationName("uninstall"); + Configuration parameters = new Configuration(); + schedule.setParameters(parameters); + + operationManager.scheduleResourceOperation(subject, schedule); + } }
@Override @@ -173,13 +170,6 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa }
@Override - @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) - public void deleteStorageNodeResource(Subject subject, Resource resource) { - log.info("Preparing to delete storage node resource " + resource); - resourceFactoryManager.deleteResource(subject, resource.getId()); - } - - @Override public void decommissionStorageNode(Subject subject, StorageNode storageNode) { log.info("Preparing to decommission " + storageNode);
@@ -272,20 +262,25 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa
if (storageNode.getOperationMode() == StorageNode.OperationMode.DECOMMISSION) { storageNode.setOperationMode(StorageNode.OperationMode.REMOVE_MAINTENANCE); - List<StorageNode> clusterNodes = entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE, - StorageNode.class).setParameter("operationMode", StorageNode.OperationMode.NORMAL) - .getResultList(); - for (StorageNode node : clusterNodes) { - node.setMaintenancePending(true); - } - boolean runRepair = storageNode.isMaintenancePending(); - performRemoveNodeMaintenance(subjectManager.getOverlord(), clusterNodes.get(0), runRepair, - createPropertyListOfAddresses(SEEDS_LIST, clusterNodes)); + performRemoveNodeMaintenance(subjectManager.getOverlord(), storageNode); } else { log.info("Remove node maintenance has already been run for " + storageNode); } }
+ @Override + public void performRemoveNodeMaintenance(Subject subject, StorageNode storageNode) { + List<StorageNode> clusterNodes = entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE, + StorageNode.class).setParameter("operationMode", StorageNode.OperationMode.NORMAL) + .getResultList(); + for (StorageNode node : clusterNodes) { + node.setMaintenancePending(true); + } + boolean runRepair = storageNode.isMaintenancePending(); + performRemoveNodeMaintenance(subjectManager.getOverlord(), clusterNodes.get(0), runRepair, + createPropertyListOfAddresses(SEEDS_LIST, clusterNodes)); + } + private void performRemoveNodeMaintenance(Subject subject, StorageNode storageNode, boolean runRepair, PropertyList seedsList) { if (log.isInfoEnabled()) { @@ -583,16 +578,18 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa break; default: // SUCCESS log.info("Successfully uninstalled " + storageNode + " from disk"); - Resource resource = storageNode.getResource(); - - log.info("Remove storage node resource " + resource + " from inventory"); - - storageNodeOperationsHandler.detachFromResource(storageNode); - resourceManager.uninventoryResource(getSubject(operationHistory), resource.getId()); + uninstall(getSubject(operationHistory), storageNode); + } + }
- log.info("Removing storage node entity " + storageNode + " from database"); - entityManager.remove(storageNode); + private void finishUninstall(Subject subject, StorageNode storageNode) { + if (storageNode.getResource() != null) { + log.info("Removing storage node resource " + storageNode.getResource() + " from inventory"); + storageNodeOperationsHandler.detachFromResource(storageNode); + resourceManager.uninventoryResource(subject, storageNode.getResource().getId()); } + log.info("Removing storage node entity " + storageNode + " from database"); + entityManager.remove(storageNode); }
private Subject getSubject(ResourceOperationHistory resourceOperationHistory) { diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerLocal.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerLocal.java index 7ed2c4d..5d08dd8 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerLocal.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerLocal.java @@ -8,7 +8,6 @@ import org.rhq.core.domain.auth.Subject; import org.rhq.core.domain.cloud.StorageNode; import org.rhq.core.domain.operation.OperationHistory; import org.rhq.core.domain.operation.ResourceOperationHistory; -import org.rhq.core.domain.resource.Resource;
/** * @author John Sanda @@ -46,11 +45,11 @@ public interface StorageNodeOperationsHandlerLocal {
void detachFromResource(StorageNode storageNode);
- void deleteStorageNodeResource(Subject subject, Resource resource); - void decommissionStorageNode(Subject subject, StorageNode storageNode);
void performRemoveNodeMaintenanceIfNecessary(InetAddress storageNodeAddress);
+ void performRemoveNodeMaintenance(Subject subject, StorageNode storageNode); + void logError(StorageNode.OperationMode newStorageNodeOperationMode, String error, Exception e); }
commit 81e62c8880839a62faf4a1efb90bfc7dc9529be6 Author: John Sanda jsanda@redhat.com Date: Sat Aug 17 12:06:03 2013 -0400
initial support for undeploying a storage node
Undeploying a storage node involves,
* removing the node from the cluster * updating existing nodes cassandra.yaml to no longer reference the node * updating the internode auth conf file for existing nodes * shutting down the node and purging its files from disk * removing the node's resource from inventory * deleting the StorageNode entity
There is stil a good bit of work left to do to handle various cases like going back to 1 node and kicking off the undeployment in all the various states.
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java index 48d5f83..4d4596e 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java @@ -24,6 +24,7 @@ package org.rhq.core.domain.cloud;
import java.io.Serializable;
+import javax.persistence.CascadeType; import javax.persistence.Column; import javax.persistence.Entity; import javax.persistence.EnumType; @@ -146,7 +147,7 @@ public class StorageNode implements Serializable { private Resource resource;
@JoinColumn(name = "RESOURCE_OP_HIST_ID", referencedColumnName = "ID", nullable = true) - @OneToOne(optional = true) + @OneToOne(optional = true, cascade = {CascadeType.REMOVE}) private ResourceOperationHistory failedOperation;
// required for JPA @@ -261,17 +262,22 @@ public class StorageNode implements Serializable { }
public enum OperationMode { - + DECOMMISSION("Remove the storage node from service"), DOWN("This storage node is down"), // INSTALLED("This storage node is newly installed but not yet operational"), // MAINTENANCE("This storage node is in maintenance mode"), // NORMAL("This storage node is running normally"), ANNOUNCE("The storage node is installed but not yet part of the cluster. It is being announced so that it " + "can join the cluster."), + UNANNOUNCE("The storage node has been decommissioned and the cluster is being notified to stop accepting " + + "gossip from its IP address."), BOOTSTRAP("The storage is installed but not yet part of the cluster. It is getting bootstrapped into the " + "cluster"), ADD_NODE_MAINTENANCE("The storage node is running and is preparing to undergo routine maintenance that is " + - "necessary when a new node joins the cluster."); + "necessary when a new node joins the cluster."), + REMOVE_MAINTENANCE("The storage node is no longer part of the cluster. Remaining storage node are " + + "undergoing cluster maintenance due to the topology change."), + UNINSTALL("The storage node is being removed from inventory and its bits on disk are getting purged.");
public final String message;
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index 5cce984..341cbc7 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -256,6 +256,31 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN } }
+ @Override + public void undeployStorageNode(Subject subject, StorageNode storageNode) { + storageNode = entityManager.find(StorageNode.class, storageNode.getId()); + switch (storageNode.getOperationMode()) { + case INSTALLED: + reset(); + storageNodeOperationsHandler.uninstall(subject, storageNode); + break; + case NORMAL: + reset(); + storageNodeOperationsHandler.decommissionStorageNode(subject, storageNode); + break; + case UNANNOUNCE: + reset(); + storageNodeOperationsHandler.unannounceStorageNode(subject, storageNode); + break; + case UNINSTALL: + reset(); + storageNodeOperationsHandler.uninstall(subject, storageNode); + break; + default: + + } + } + private void reset() { for (StorageNode storageNode : getStorageNodes()) { storageNode.setErrorMessage(null); diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java index 75a795c..58a06a7 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java @@ -170,4 +170,6 @@ public interface StorageNodeManagerLocal { StorageNode createStorageNode(Resource resource);
void deployStorageNode(Subject subject, StorageNode storageNode); + + void undeployStorageNode(Subject subject, StorageNode storageNode); } diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerRemote.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerRemote.java index 2255299..7ffb2a6 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerRemote.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerRemote.java @@ -98,4 +98,6 @@ public interface StorageNodeManagerRemote { PageList<Alert> findAllStorageNodeAlerts(Subject subject, StorageNode storageNode);
void deployStorageNode(Subject sbubject, StorageNode storageNode); + + void undeployStorageNode(Subject subject, StorageNode storageNode); } diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterMonitor.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterMonitor.java index 7db95fb..bd2efbe 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterMonitor.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterMonitor.java @@ -41,6 +41,8 @@ public class StorageClusterMonitor implements StorageStateListener { @Override public void onStorageNodeRemoved(InetAddress address) { log.info("Storage node at " + address.getHostAddress() + " has been removed from the cluster"); + StorageNodeOperationsHandlerLocal storageNodeOperationsHandler = LookupUtil.getStorageNodeOperationsHandler(); + storageNodeOperationsHandler.performRemoveNodeMaintenanceIfNecessary(address); }
@Override diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java index 4969c46..a30cfa7 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java @@ -2,7 +2,6 @@ package org.rhq.enterprise.server.storage;
import java.net.InetAddress; import java.util.ArrayList; -import java.util.LinkedList; import java.util.List;
import javax.ejb.Asynchronous; @@ -35,6 +34,8 @@ import org.rhq.enterprise.server.auth.SessionManager; import org.rhq.enterprise.server.auth.SubjectManagerLocal; import org.rhq.enterprise.server.cloud.StorageNodeManagerLocal; import org.rhq.enterprise.server.operation.OperationManagerLocal; +import org.rhq.enterprise.server.resource.ResourceFactoryManagerLocal; +import org.rhq.enterprise.server.resource.ResourceManagerLocal; import org.rhq.server.metrics.StorageSession;
/** @@ -74,6 +75,12 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa @EJB private StorageNodeOperationsHandlerLocal storageNodeOperationsHandler;
+ @EJB + private ResourceManagerLocal resourceManager; + + @EJB + private ResourceFactoryManagerLocal resourceFactoryManager; + @Override public void announceStorageNode(Subject subject, StorageNode storageNode) { if (log.isInfoEnabled()) { @@ -103,7 +110,33 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa schedule.setResource(clusterNode.getResource()); schedule.setJobTrigger(JobTrigger.createNowTrigger()); schedule.setSubject(subject); - schedule.setOperationName("updateKnownNodes"); + schedule.setOperationName("announce"); + Configuration parameters = new Configuration(); + parameters.put(addresses); + schedule.setParameters(parameters); + + operationManager.scheduleResourceOperation(subject, schedule); + } + + @Override + public void unannounceStorageNode(Subject subject, StorageNode storageNode) { + log.info("Unannouncing " + storageNode); + + storageNode.setOperationMode(StorageNode.OperationMode.UNANNOUNCE); + List<StorageNode> clusterNodes = entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE, + StorageNode.class).setParameter("operationMode", StorageNode.OperationMode.NORMAL).getResultList(); + for (StorageNode clusterNode : clusterNodes) { + clusterNode.setMaintenancePending(true); + } + unannounceStorageNode(subject, clusterNodes.get(0), createPropertyListOfAddresses("addresses", clusterNodes)); + } + + private void unannounceStorageNode(Subject subject, StorageNode clusterNode, PropertyList addresses) { + ResourceOperationSchedule schedule = new ResourceOperationSchedule(); + schedule.setResource(clusterNode.getResource()); + schedule.setJobTrigger(JobTrigger.createNowTrigger()); + schedule.setSubject(subject); + schedule.setOperationName("unannounce"); Configuration parameters = new Configuration(); parameters.put(addresses); schedule.setParameters(parameters); @@ -112,10 +145,69 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa }
@Override + public void uninstall(Subject subject, StorageNode storageNode) { + log.info("Uninstalling " + storageNode); + + storageNode.setOperationMode(StorageNode.OperationMode.UNINSTALL); + + ResourceOperationSchedule schedule = new ResourceOperationSchedule(); + schedule.setResource(storageNode.getResource()); + schedule.setJobTrigger(JobTrigger.createNowTrigger()); + schedule.setSubject(subject); + schedule.setOperationName("uninstall"); + Configuration parameters = new Configuration(); + schedule.setParameters(parameters); + + operationManager.scheduleResourceOperation(subject, schedule); + +// Resource resource = storageNode.getResource(); +// storageNodeOperationsHandler.detachFromResource(storageNode); +// storageNodeOperationsHandler.deleteStorageNodeResource(subject, resource); + } + + @Override + @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) + public void detachFromResource(StorageNode storageNode) { + storageNode.setResource(null); + storageNode.setFailedOperation(null); + } + + @Override + @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) + public void deleteStorageNodeResource(Subject subject, Resource resource) { + log.info("Preparing to delete storage node resource " + resource); + resourceFactoryManager.deleteResource(subject, resource.getId()); + } + + @Override + public void decommissionStorageNode(Subject subject, StorageNode storageNode) { + log.info("Preparing to decommission " + storageNode); + + storageNode.setOperationMode(StorageNode.OperationMode.DECOMMISSION); + List<StorageNode> storageNodes = entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE, + StorageNode.class).setParameter("operationMode", StorageNode.OperationMode.NORMAL).getResultList(); + storageNodes.add(storageNode); + + boolean runRepair = updateSchemaIfNecessary(storageNodes); + // This is a bit of a hack since the maintenancePending flag is really intended to + // queue up storage nodes during cluster maintenance operations. + storageNode.setMaintenancePending(runRepair); + + ResourceOperationSchedule schedule = new ResourceOperationSchedule(); + schedule.setOperationName("decommission"); + schedule.setResource(storageNode.getResource()); + schedule.setJobTrigger(JobTrigger.createNowTrigger()); + schedule.setSubject(subject); + schedule.setParameters(new Configuration()); + + operationManager.scheduleResourceOperation(subject, schedule); + } + + @Override @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) public void logError(StorageNode.OperationMode newStorageNodeOperationMode, String error, Exception e) { try { - StorageNode newStorageNode = findNewStorgeNode(newStorageNodeOperationMode); + StorageNode newStorageNode = findStorageNodeByMode(newStorageNodeOperationMode); newStorageNode.setErrorMessage(error + " Check the server log for details. Root cause: " + ThrowableUtil.getRootCause(e).getMessage()); } catch (Exception e1) { @@ -157,8 +249,6 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa log.info("Running addNodeMaintenance for storage node " + storageNode); }
- Subject overlord = subjectManager.getOverlord(); - ResourceOperationSchedule schedule = new ResourceOperationSchedule(); schedule.setResource(storageNode.getResource()); schedule.setJobTrigger(JobTrigger.createNowTrigger()); @@ -172,7 +262,50 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa
schedule.setParameters(config);
- operationManager.scheduleResourceOperation(overlord, schedule); + operationManager.scheduleResourceOperation(subject, schedule); + } + + @Override + public void performRemoveNodeMaintenanceIfNecessary(InetAddress storageNodeAddress) { + StorageNode storageNode = entityManager.createNamedQuery(StorageNode.QUERY_FIND_BY_ADDRESS, + StorageNode.class).setParameter("address", storageNodeAddress.getHostAddress()).getSingleResult(); + + if (storageNode.getOperationMode() == StorageNode.OperationMode.DECOMMISSION) { + storageNode.setOperationMode(StorageNode.OperationMode.REMOVE_MAINTENANCE); + List<StorageNode> clusterNodes = entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE, + StorageNode.class).setParameter("operationMode", StorageNode.OperationMode.NORMAL) + .getResultList(); + for (StorageNode node : clusterNodes) { + node.setMaintenancePending(true); + } + boolean runRepair = storageNode.isMaintenancePending(); + performRemoveNodeMaintenance(subjectManager.getOverlord(), clusterNodes.get(0), runRepair, + createPropertyListOfAddresses(SEEDS_LIST, clusterNodes)); + } else { + log.info("Remove node maintenance has already been run for " + storageNode); + } + } + + private void performRemoveNodeMaintenance(Subject subject, StorageNode storageNode, boolean runRepair, + PropertyList seedsList) { + if (log.isInfoEnabled()) { + log.info("Running remove node maintenance for storage node " + storageNode); + } + + ResourceOperationSchedule schedule = new ResourceOperationSchedule(); + schedule.setResource(storageNode.getResource()); + schedule.setJobTrigger(JobTrigger.createNowTrigger()); + schedule.setSubject(subject); + schedule.setOperationName("removeNodeMaintenance"); + + Configuration config = new Configuration(); + config.put(seedsList); + config.put(new PropertySimple(RUN_REPAIR_PROPERTY, runRepair)); + config.put(new PropertySimple(UPDATE_SEEDS_LIST, Boolean.TRUE)); + + schedule.setParameters(config); + + operationManager.scheduleResourceOperation(subject, schedule); }
@Override @@ -187,9 +320,9 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa return; }
- if (resourceOperationHistory.getOperationDefinition().getName().equals("updateKnownNodes")) { + if (resourceOperationHistory.getOperationDefinition().getName().equals("announce")) { try { - storageNodeOperationsHandler.handleUpdateKnownNodes(resourceOperationHistory); + storageNodeOperationsHandler.handleAnnounce(resourceOperationHistory); } catch (Exception e) { String msg = "Aborting storage node deployment due to unexpected error while announcing cluster nodes."; log.error(msg, e); @@ -212,13 +345,44 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa log.error(msg, e); storageNodeOperationsHandler.logError(StorageNode.OperationMode.ADD_NODE_MAINTENANCE, msg, e); } + } else if (operationHistory.getOperationDefinition().getName().equals("decommission")) { + try { + storageNodeOperationsHandler.handleDecommission(resourceOperationHistory); + } catch (Exception e) { + String msg = "Aborting undeployment due to unexpected error while decommissioning storage node."; + log.error(msg, e); + storageNodeOperationsHandler.logError(StorageNode.OperationMode.DECOMMISSION, msg, e); + } + } else if (operationHistory.getOperationDefinition().getName().equals("removeNodeMaintenance")) { + try { + storageNodeOperationsHandler.handleRemoveNodeMaintenance(resourceOperationHistory); + } catch (Exception e) { + String msg = "Aborting undeployment due to unexpected error while performing remove node maintenance."; + log.error(msg, e); + storageNodeOperationsHandler.logError(StorageNode.OperationMode.REMOVE_MAINTENANCE, msg, e); + } + } else if (operationHistory.getOperationDefinition().getName().equals("unannounce")) { + try { + storageNodeOperationsHandler.handleUnannounce(resourceOperationHistory); + } catch (Exception e) { + String msg = "Aborting undeployment due to unexpected error while performing unannouncement."; + log.error(msg, e); + storageNodeOperationsHandler.logError(StorageNode.OperationMode.UNANNOUNCE, msg, e); + } + } else if (operationHistory.getOperationDefinition().getName().equals("uninstall")) { + try { + storageNodeOperationsHandler.handleUninstall(resourceOperationHistory); + } catch (Exception e) { + String msg = "Aborting undeployment due to unexpected error while uninstalling."; + log.error(msg, e); + storageNodeOperationsHandler.logError(StorageNode.OperationMode.UNINSTALL, msg, e); + } } - }
@Override @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) - public void handleUpdateKnownNodes(ResourceOperationHistory resourceOperationHistory) { + public void handleAnnounce(ResourceOperationHistory resourceOperationHistory) { StorageNode storageNode = findStorageNode(resourceOperationHistory.getResource()); StorageNode newStorageNode = null; switch (resourceOperationHistory.getStatus()) { @@ -226,11 +390,11 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa // nothing to do here return; case CANCELED: - newStorageNode = findNewStorgeNode(StorageNode.OperationMode.ANNOUNCE); - operationCanceled(storageNode, resourceOperationHistory, newStorageNode); + newStorageNode = findStorageNodeByMode(StorageNode.OperationMode.ANNOUNCE); + deploymentOperationCanceled(storageNode, resourceOperationHistory, newStorageNode); case FAILURE: - newStorageNode = findNewStorgeNode(StorageNode.OperationMode.ANNOUNCE); - operationFailed(storageNode, resourceOperationHistory, newStorageNode); + newStorageNode = findStorageNodeByMode(StorageNode.OperationMode.ANNOUNCE); + deploymentOperationFailed(storageNode, resourceOperationHistory, newStorageNode); return; default: // SUCCESS storageNode.setMaintenancePending(false); @@ -238,7 +402,7 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa PropertyList addresses = parameters.getList("addresses"); StorageNode nextNode = takeFromMaintenanceQueue();
- newStorageNode = findNewStorgeNode(StorageNode.OperationMode.ANNOUNCE); + newStorageNode = findStorageNodeByMode(StorageNode.OperationMode.ANNOUNCE); Subject subject = getSubject(resourceOperationHistory);
if (nextNode == null) { @@ -253,6 +417,41 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa
@Override @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) + public void handleUnannounce(ResourceOperationHistory operationHistory) { + StorageNode storageNode = findStorageNode(operationHistory.getResource()); + StorageNode removedStorageNode = null; + switch (operationHistory.getStatus()) { + case INPROGRESS: + // nothing to do here + break; + case CANCELED: + removedStorageNode = findStorageNodeByMode(StorageNode.OperationMode.UNANNOUNCE); + undeploymentOperationCanceled(storageNode, operationHistory, removedStorageNode); + break; + case FAILURE: + removedStorageNode = findStorageNodeByMode(StorageNode.OperationMode.UNANNOUNCE); + deploymentOperationFailed(storageNode, operationHistory, removedStorageNode); + break; + default: // SUCCESS + storageNode.setMaintenancePending(false); + + removedStorageNode = findStorageNodeByMode(StorageNode.OperationMode.UNANNOUNCE); + StorageNode nextNode = takeFromMaintenanceQueue(); + Subject subject = getSubject(operationHistory); + Configuration params = operationHistory.getParameters(); + PropertyList addresses = params.getList("addresses"); + + if (nextNode == null) { + log.info("Successfully unannounced " + removedStorageNode + " to storage cluster"); + uninstall(getSubject(operationHistory), removedStorageNode); + } else { + unannounceStorageNode(subject, nextNode, addresses.deepCopy(false)); + } + } + } + + @Override + @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) public void handlePrepareForBootstrap(ResourceOperationHistory resourceOperationHistory) { StorageNode newStorageNode = findStorageNode(resourceOperationHistory.getResource()); switch (resourceOperationHistory.getStatus()) { @@ -264,10 +463,10 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa // If the operation is canceled the plugin will get an InterruptedException. // The actual bootstrapping may very well complete so we need to add in some // checks to find out if the node is up and part of the cluster. - operationCanceled(newStorageNode, resourceOperationHistory); + deploymentOperationCanceled(newStorageNode, resourceOperationHistory); return; case FAILURE: - operationFailed(newStorageNode, resourceOperationHistory); + deploymentOperationFailed(newStorageNode, resourceOperationHistory); return; default: // SUCCESS // Nothing to do because we wait for the C* driver to notify us that the @@ -285,24 +484,22 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa // nothing to do here return; case CANCELED: - newStorageNode = findNewStorgeNode(StorageNode.OperationMode.ADD_NODE_MAINTENANCE); - operationCanceled(storageNode, resourceOperationHistory, newStorageNode); + newStorageNode = findStorageNodeByMode(StorageNode.OperationMode.ADD_NODE_MAINTENANCE); + deploymentOperationCanceled(storageNode, resourceOperationHistory, newStorageNode); return; case FAILURE: - newStorageNode = findNewStorgeNode(StorageNode.OperationMode.ADD_NODE_MAINTENANCE); - operationFailed(storageNode, resourceOperationHistory, newStorageNode); + newStorageNode = findStorageNodeByMode(StorageNode.OperationMode.ADD_NODE_MAINTENANCE); + deploymentOperationFailed(storageNode, resourceOperationHistory, newStorageNode); return; default: // SUCCESS - if (log.isInfoEnabled()) { - log.info("Finished cluster maintenance for " + storageNode + " for addition of new node"); - } + log.info("Finished running add node maintenance for " + storageNode); storageNode.setMaintenancePending(false); StorageNode nextNode = takeFromMaintenanceQueue();
if (nextNode == null) { - log.info("Finished running cluster maintenance for addition of new node"); + log.info("Finished running add node maintenance on all cluster nodes"); // TODO replace this with an UPDATE statement - newStorageNode = findNewStorgeNode(StorageNode.OperationMode.ADD_NODE_MAINTENANCE); + newStorageNode = findStorageNodeByMode(StorageNode.OperationMode.ADD_NODE_MAINTENANCE); newStorageNode.setOperationMode(StorageNode.OperationMode.NORMAL); } else { Configuration parameters = resourceOperationHistory.getParameters(); @@ -314,56 +511,176 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa } }
+ @Override + @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) + public void handleRemoveNodeMaintenance(ResourceOperationHistory operationHistory) { + StorageNode storageNode = findStorageNode(operationHistory.getResource()); + StorageNode removedStorageNode = null; + switch (operationHistory.getStatus()) { + case INPROGRESS: + // nothing to do here + break; + case CANCELED: + removedStorageNode = findStorageNodeByMode(StorageNode.OperationMode.REMOVE_MAINTENANCE); + undeploymentOperationCanceled(storageNode, operationHistory, removedStorageNode); + break; + case FAILURE: + removedStorageNode = findStorageNodeByMode(StorageNode.OperationMode.REMOVE_MAINTENANCE); + undeploymentOperationFailed(storageNode, operationHistory, removedStorageNode); + break; + default: // SUCCESS + log.info("Finished remove node maintenance for " + storageNode); + storageNode.setMaintenancePending(false); + StorageNode nextNode = takeFromMaintenanceQueue(); + + if (nextNode == null) { + log.info("Finished running remove node maintenance on all cluster nodes"); + // TODO replace this with an UPDATE statement + removedStorageNode = findStorageNodeByMode(StorageNode.OperationMode.REMOVE_MAINTENANCE); + unannounceStorageNode(getSubject(operationHistory), removedStorageNode); + } else { + Configuration parameters = operationHistory.getParameters(); + boolean runRepair = parameters.getSimple(RUN_REPAIR_PROPERTY).getBooleanValue(); + PropertyList seedsList = parameters.getList(SEEDS_LIST).deepCopy(false); + Subject subject = getSubject(operationHistory); + performRemoveNodeMaintenance(subject, nextNode, runRepair, seedsList); + } + } + } + + @Override + @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) + public void handleDecommission(ResourceOperationHistory operationHistory) { + StorageNode storageNode = findStorageNode(operationHistory.getResource()); + switch (operationHistory.getStatus()) { + case INPROGRESS: + // nothing do to here + break; + case CANCELED: + undeploymentOperationCanceled(storageNode, operationHistory); + break; + case FAILURE: + undeploymentOperationFailed(storageNode, operationHistory); + break; + default: // SUCCESS + log.info("Successfully decommissioned " + storageNode); + } + } + + @Override + @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) + public void handleUninstall(ResourceOperationHistory operationHistory) { + StorageNode storageNode = findStorageNode(operationHistory.getResource()); + switch (operationHistory.getStatus()) { + case INPROGRESS: + // nothing to do here + break; + case CANCELED: + undeploymentOperationCanceled(storageNode, operationHistory); + break; + case FAILURE: + undeploymentOperationFailed(storageNode, operationHistory); + break; + default: // SUCCESS + log.info("Successfully uninstalled " + storageNode + " from disk"); + Resource resource = storageNode.getResource(); + + log.info("Remove storage node resource " + resource + " from inventory"); + + storageNodeOperationsHandler.detachFromResource(storageNode); + resourceManager.uninventoryResource(getSubject(operationHistory), resource.getId()); + + log.info("Removing storage node entity " + storageNode + " from database"); + entityManager.remove(storageNode); + } + } + private Subject getSubject(ResourceOperationHistory resourceOperationHistory) { Subject subject = subjectManager.getSubjectByName(resourceOperationHistory.getSubjectName()); return SessionManager.getInstance().put(subject); }
- private void operationCanceled(StorageNode storageNode, ResourceOperationHistory operationHistory, + private void deploymentOperationCanceled(StorageNode storageNode, ResourceOperationHistory operationHistory, StorageNode newStorageNode) { - log.error("Deployment has been aborted due to canceled operation [" + + operationCanceled(storageNode, operationHistory, newStorageNode, "Deployment"); + } + + private void undeploymentOperationCanceled(StorageNode storageNode, ResourceOperationHistory operationHistory, + StorageNode removedStorageNode) { + operationCanceled(storageNode, operationHistory, removedStorageNode, "Undeployment"); + } + + private void operationCanceled(StorageNode storageNode, ResourceOperationHistory operationHistory, + StorageNode movingNode, String opType) { + log.error(opType + " has been aborted due to canceled operation [" + operationHistory.getOperationDefinition().getDisplayName() + " on " + storageNode.getResource() + ": " + operationHistory.getErrorMessage());
- newStorageNode.setErrorMessage("Deployment has been aborted due to canceled resource operation on " + + movingNode.setErrorMessage(opType + " has been aborted due to canceled resource operation on " + storageNode.getAddress()); - storageNode.setErrorMessage("Deployment of " + newStorageNode.getAddress() + " has been aborted due " + + storageNode.setErrorMessage(opType + " of " + movingNode.getAddress() + " has been aborted due " + "to cancellation of resource operation [" + operationHistory.getOperationDefinition().getDisplayName() + "]."); storageNode.setFailedOperation(operationHistory); }
- private void operationCanceled(StorageNode newStorageNode, ResourceOperationHistory operationHistory) { - log.error("Deployment has been aborted due to canceled operation [" + - operationHistory.getOperationDefinition().getDisplayName() + " on " + newStorageNode.getResource() + + private void deploymentOperationCanceled(StorageNode newStorageNode, ResourceOperationHistory operationHistory) { + operationCanceled(newStorageNode, operationHistory, "Deployment"); + } + + private void undeploymentOperationCanceled(StorageNode storageNode, ResourceOperationHistory operationHistory) { + operationCanceled(storageNode, operationHistory, "Undeployment"); + } + + private void operationCanceled(StorageNode storageNode, ResourceOperationHistory operationHistory, String opType) { + log.error(opType + " has been aborted due to canceled operation [" + + operationHistory.getOperationDefinition().getDisplayName() + " on " + storageNode.getResource() + ": " + operationHistory.getErrorMessage());
- newStorageNode.setErrorMessage("Deployment has been aborted due to canceled resource operation [" + + storageNode.setErrorMessage(opType + " has been aborted due to canceled resource operation [" + operationHistory.getOperationDefinition().getDisplayName() + "]."); - newStorageNode.setFailedOperation(operationHistory); + storageNode.setFailedOperation(operationHistory); }
- private void operationFailed(StorageNode storageNode, ResourceOperationHistory operationHistory, + private void deploymentOperationFailed(StorageNode storageNode, ResourceOperationHistory operationHistory, StorageNode newStorageNode) { - log.error("Deployment has been aborted due to failed operation [" + - operationHistory.getOperationDefinition().getDisplayName() + "] on " + storageNode.getResource() + - ": " + operationHistory.getErrorMessage()); + operationFailed(storageNode, operationHistory, newStorageNode, "Deployment"); + }
- newStorageNode.setErrorMessage("Deployment has been aborted due to failed resource operation on " + - storageNode.getAddress()); - storageNode.setErrorMessage("Deployment of " + newStorageNode.getAddress() + " has been aborted due " + - "to failed resource operation [" + operationHistory.getOperationDefinition().getDisplayName() + "]."); - storageNode.setFailedOperation(operationHistory); + private void undeploymentOperationFailed(StorageNode storageNode, ResourceOperationHistory operationHistory, + StorageNode removedNode) { + operationFailed(storageNode, operationHistory, removedNode, "Undeployment"); + } + + private void deploymentOperationFailed(StorageNode storageNode, ResourceOperationHistory operationHistory) { + operationFailed(storageNode, operationHistory, "Deployment"); + } + + private void undeploymentOperationFailed(StorageNode storageNode, ResourceOperationHistory operationHistory) { + operationFailed(storageNode, operationHistory, "Undeployment"); }
- private void operationFailed(StorageNode newStorageNode, ResourceOperationHistory operationHistory) { - log.error("Deployment has been aborted due to failed operation [" + - operationHistory.getOperationDefinition().getDisplayName() + "] on " + newStorageNode.getResource() + + private void operationFailed(StorageNode storageNode, ResourceOperationHistory operationHistory, String opType) { + log.error(opType + " has been aborted due to failed operation [" + + operationHistory.getOperationDefinition().getDisplayName() + "] on " + storageNode.getResource() + ": " + operationHistory.getErrorMessage());
- newStorageNode.setErrorMessage("Deployment has been aborted due to failed resource operation [" + + storageNode.setErrorMessage(opType + " has been aborted due to failed resource operation [" + operationHistory.getOperationDefinition().getDisplayName() + "]."); - newStorageNode.setFailedOperation(operationHistory); + storageNode.setFailedOperation(operationHistory); + } + + private void operationFailed(StorageNode storageNode, ResourceOperationHistory operationHistory, + StorageNode movingNode, String opType) { + log.error(opType + " has been aborted due to failed operation [" + + operationHistory.getOperationDefinition().getDisplayName() + "] on " + storageNode.getResource() + + ": " + operationHistory.getErrorMessage()); + + movingNode.setErrorMessage(opType + " has been aborted due to failed resource operation on " + + storageNode.getAddress()); + storageNode.setErrorMessage(opType + " of " + movingNode.getAddress() + " has been aborted due " + + "to failed resource operation [" + operationHistory.getOperationDefinition().getDisplayName() + "]."); + storageNode.setFailedOperation(operationHistory); }
private StorageNode findStorageNode(Resource resource) { @@ -417,7 +734,7 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa return storageNodes.get(0); }
- private StorageNode findNewStorgeNode(StorageNode.OperationMode operationMode) { + private StorageNode findStorageNodeByMode(StorageNode.OperationMode operationMode) { return entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE, StorageNode.class) .setParameter("operationMode", operationMode).getSingleResult(); } diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerLocal.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerLocal.java index 83b0ce5..7ed2c4d 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerLocal.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerLocal.java @@ -8,6 +8,7 @@ import org.rhq.core.domain.auth.Subject; import org.rhq.core.domain.cloud.StorageNode; import org.rhq.core.domain.operation.OperationHistory; import org.rhq.core.domain.operation.ResourceOperationHistory; +import org.rhq.core.domain.resource.Resource;
/** * @author John Sanda @@ -17,19 +18,39 @@ public interface StorageNodeOperationsHandlerLocal { @Asynchronous void handleOperationUpdateIfNecessary(OperationHistory operationHistory);
- void handleUpdateKnownNodes(ResourceOperationHistory operationHistory); + void handleAnnounce(ResourceOperationHistory operationHistory); + + void handleUnannounce(ResourceOperationHistory operationHistory);
void handlePrepareForBootstrap(ResourceOperationHistory operationHistory);
void handleAddNodeMaintenance(ResourceOperationHistory operationHistory);
+ void handleRemoveNodeMaintenance(ResourceOperationHistory operationHistory); + + void handleDecommission(ResourceOperationHistory operationHistory); + + void handleUninstall(ResourceOperationHistory operationHistory); + void announceStorageNode(Subject subject, StorageNode storageNode);
+ void unannounceStorageNode(Subject subject, StorageNode storageNode); + void bootstrapStorageNode(Subject subject, StorageNode storageNode);
void performAddNodeMaintenanceIfNecessary(InetAddress storageNodeAddress);
void performAddNodeMaintenance(Subject subject, StorageNode storageNode);
+ void uninstall(Subject subject, StorageNode storageNode); + + void detachFromResource(StorageNode storageNode); + + void deleteStorageNodeResource(Subject subject, Resource resource); + + void decommissionStorageNode(Subject subject, StorageNode storageNode); + + void performRemoveNodeMaintenanceIfNecessary(InetAddress storageNodeAddress); + void logError(StorageNode.OperationMode newStorageNodeOperationMode, String error, Exception e); } diff --git a/modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java b/modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java index 5692bea..1bf9683 100644 --- a/modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java +++ b/modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java @@ -67,7 +67,8 @@ public class AlertDefinitionServerPluginComponent implements ServerPluginCompone private static final String FREE_DISK_TO_DATA_SIZE_RATIO_METRIC_NAME = "Calculated.FreeDiskToDataSizeRatio"; private static final String TAKE_SNAPSHOT_OPERATION_NAME = "takeSnapshot"; private static final String[] MAINTENANCE_OPERATIONS = new String[] { "readRepair", "addNodeMaintenance", - "updateKnownNodes", "prepareForBootstrap", "prepareForUpgrade", "updateSeedsList", "updateConfiguration" }; + "removeNodeMaintenance", "announce", "unannounce", "prepareForBootstrap", "prepareForUpgrade", + "updateSeedsList", "updateConfiguration" };
static private final List<InjectedTemplate> injectedTemplates; static private final InjectedTemplate storageNodeHighHeapTemplate;
commit 29776cafb9e8965a35dda988b5803ebe5fc605aa Author: John Sanda jsanda@redhat.com Date: Sat Aug 17 11:57:38 2013 -0400
Implement storage node uninstall as resource operation
The uninstall task invovles purging the storage node bits from disk and removing it from inventory. I previously implemented this by implementing the DeleteResourceFacet. This was problematic though because the server side logic for the deploy/undeploy work flows are centered around resource operations.
diff --git a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java index 64d672d..a82992e 100644 --- a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java +++ b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java @@ -55,7 +55,6 @@ import org.rhq.core.domain.configuration.PropertyMap; import org.rhq.core.domain.configuration.PropertySimple; import org.rhq.core.pluginapi.configuration.ConfigurationFacet; import org.rhq.core.pluginapi.configuration.ConfigurationUpdateReport; -import org.rhq.core.pluginapi.inventory.DeleteResourceFacet; import org.rhq.core.pluginapi.inventory.ProcessScanResult; import org.rhq.core.pluginapi.inventory.ResourceContext; import org.rhq.core.pluginapi.operation.OperationFacet; @@ -71,8 +70,7 @@ import org.rhq.plugins.cassandra.util.KeyspaceService; /** * @author John Sanda */ -public class StorageNodeComponent extends CassandraNodeComponent implements OperationFacet, ConfigurationFacet, - DeleteResourceFacet { +public class StorageNodeComponent extends CassandraNodeComponent implements OperationFacet, ConfigurationFacet {
private Log log = LogFactory.getLog(StorageNodeComponent.class);
@@ -91,29 +89,6 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper configDelegate.updateResourceConfiguration(configurationUpdateReport); }
- @Override - public void deleteResource() throws Exception { - OperationResult shutdownResult = shutdownIfNecessary(); - if (shutdownResult.getErrorMessage() != null) { - throw new Exception("Cannot delete storage node [resourceKey: " + getResourceContext().getResourceKey() + - "]: " + shutdownResult.getErrorMessage()); - } - - log.info("Purging data directories"); - Configuration pluginConfig = getResourceContext().getPluginConfiguration(); - String yamlProp = pluginConfig.getSimpleValue("yamlConfiguration"); - File yamlFile = new File(yamlProp); - ConfigEditor yamlEditor = new ConfigEditor(yamlFile); - yamlEditor.load(); - purgeDataDirs(yamlEditor); - - File basedir = getBasedir(); - log.info("Purging installation directory " + basedir); - purgeDir(basedir); - - log.info("Finished deleting storage node " + getResourceContext().getResourceKey()); - } - private OperationResult shutdownIfNecessary() { log.info("Shutting down " + getResourceContext().getResourceKey());
@@ -159,14 +134,18 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper return readRepair(); } else if (name.equals("updateConfiguration")) { return updateConfiguration(parameters); - } else if (name.equals("updateKnownNodes")) { - return updateKnownNodes(parameters); + } else if (name.equals("announce")) { + return announce(parameters); + } else if (name.equals("unannounce")) { + return unannounce(parameters); } else if (name.equals("prepareForBootstrap")) { return prepareForBootstrap(parameters); } else if (name.equals("shutdown")) { return shutdownStorageNode(); } else if (name.equals("decommission")) { return decommission(); + } else if (name.equals("uninstall")) { + return uninstall(); } else { return super.invokeOperation(name, parameters); } @@ -304,6 +283,41 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper return result; }
+ private OperationResult uninstall() { + OperationResult result = new OperationResult(); + OperationResult shutdownResult = shutdownIfNecessary(); + if (shutdownResult.getErrorMessage() != null) { + result.setErrorMessage("Failed to shut down storage node: " + shutdownResult.getErrorMessage()); + } else { + File basedir = getBasedir(); + if (basedir.exists()) { + log.info("Purging data directories"); + Configuration pluginConfig = getResourceContext().getPluginConfiguration(); + String yamlProp = pluginConfig.getSimpleValue("yamlConfiguration"); + File yamlFile = new File(yamlProp); + ConfigEditor yamlEditor = new ConfigEditor(yamlFile); + yamlEditor.load(); + purgeDataDirs(yamlEditor); + + log.info("Purging installation directory " + basedir); + purgeDir(basedir); + + log.info("Finished deleting storage node " + getResourceContext().getResourceKey()); + } else { + log.info(basedir + " does not exist. Storage node files have already been purged."); + } + } + return result; + } + + private OperationResult announce(Configuration params) { + return updateKnownNodes(params); + } + + private OperationResult unannounce(Configuration params) { + return updateKnownNodes(params); + } + private OperationResult updateKnownNodes(Configuration params) { OperationResult result = new OperationResult();
diff --git a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml index 95c1723..5159b95 100644 --- a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml +++ b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml @@ -12,8 +12,7 @@ name="RHQ Storage Node" discovery="StorageNodeDiscoveryComponent" class="StorageNodeComponent" - description="RHQ Storage Node" - createDeletePolicy="delete-only"> + description="RHQ Storage Node">
<subcategories> <subcategory name="Client Request Metrics" description="Client Request Metrics"/> @@ -67,7 +66,9 @@ </results> </operation>
- <operation name="decommission" description="Take the Cassandra node out of service"/> + <operation name="decommission" description="Take the storage node out of service"/> + + <operation name="uninstall" description="Removes all of the storage node files from disk"/>
<operation name="readRepair" description="Runs read repair on primar range of rhq and system_auth keyspaces"> <results> @@ -119,7 +120,18 @@ </results> </operation>
- <operation name="updateKnownNodes"> + <operation name="announce"> + <parameters> + <c:list-property name="addresses"> + <c:simple-property name="address"/> + </c:list-property> + </parameters> + <results> + <c:simple-property name="details"/> + </results> + </operation> + + <operation name="unannounce"> <parameters> <c:list-property name="addresses"> <c:simple-property name="address"/>
commit 778600f2675ae515b9bf05440b8498ef72d3bbc0 Author: John Sanda jsanda@redhat.com Date: Fri Aug 16 22:00:19 2013 -0400
adding plugin support for decommissioning and uninstalling a storage node
diff --git a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java index a24a219..64d672d 100644 --- a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java +++ b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java @@ -39,6 +39,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.hyperic.sigar.SigarException; import org.mc4j.ems.connection.EmsConnection; +import org.mc4j.ems.connection.EmsInvocationException; import org.mc4j.ems.connection.bean.EmsBean; import org.mc4j.ems.connection.bean.attribute.EmsAttribute; import org.mc4j.ems.connection.bean.operation.EmsOperation; @@ -54,6 +55,7 @@ import org.rhq.core.domain.configuration.PropertyMap; import org.rhq.core.domain.configuration.PropertySimple; import org.rhq.core.pluginapi.configuration.ConfigurationFacet; import org.rhq.core.pluginapi.configuration.ConfigurationUpdateReport; +import org.rhq.core.pluginapi.inventory.DeleteResourceFacet; import org.rhq.core.pluginapi.inventory.ProcessScanResult; import org.rhq.core.pluginapi.inventory.ResourceContext; import org.rhq.core.pluginapi.operation.OperationFacet; @@ -69,7 +71,8 @@ import org.rhq.plugins.cassandra.util.KeyspaceService; /** * @author John Sanda */ -public class StorageNodeComponent extends CassandraNodeComponent implements OperationFacet, ConfigurationFacet { +public class StorageNodeComponent extends CassandraNodeComponent implements OperationFacet, ConfigurationFacet, + DeleteResourceFacet {
private Log log = LogFactory.getLog(StorageNodeComponent.class);
@@ -88,11 +91,54 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper configDelegate.updateResourceConfiguration(configurationUpdateReport); }
+ @Override + public void deleteResource() throws Exception { + OperationResult shutdownResult = shutdownIfNecessary(); + if (shutdownResult.getErrorMessage() != null) { + throw new Exception("Cannot delete storage node [resourceKey: " + getResourceContext().getResourceKey() + + "]: " + shutdownResult.getErrorMessage()); + } + + log.info("Purging data directories"); + Configuration pluginConfig = getResourceContext().getPluginConfiguration(); + String yamlProp = pluginConfig.getSimpleValue("yamlConfiguration"); + File yamlFile = new File(yamlProp); + ConfigEditor yamlEditor = new ConfigEditor(yamlFile); + yamlEditor.load(); + purgeDataDirs(yamlEditor); + + File basedir = getBasedir(); + log.info("Purging installation directory " + basedir); + purgeDir(basedir); + + log.info("Finished deleting storage node " + getResourceContext().getResourceKey()); + } + + private OperationResult shutdownIfNecessary() { + log.info("Shutting down " + getResourceContext().getResourceKey()); + + ProcessInfo process = getResourceContext().getNativeProcess(); + if (process == null) { + File pidFile = new File(getBinDir(), "cassandra.pid"); + if (pidFile.exists()) { + return shutdownStorageNode(); + } else { + return new OperationResult("Storage node is not running"); + } + } else { + return shutdownStorageNode(); + } + } + private File getBasedir() { Configuration pluginConfig = getResourceContext().getPluginConfiguration(); return new File(pluginConfig.getSimpleValue("baseDir")); }
+ private File getBinDir() { + return new File(getBasedir(), "bin"); + } + private File getConfDir() { return new File(getBasedir(), "conf"); } @@ -105,6 +151,8 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper public OperationResult invokeOperation(String name, Configuration parameters) throws Exception { if (name.equals("addNodeMaintenance")) { return nodeAdded(parameters); + } else if (name.equals("removeNodeMaintenance")) { + return nodeRemoved(parameters); } else if (name.equals("prepareForUpgrade")) { return prepareForUpgrade(parameters); } else if (name.equals("readRepair")) { @@ -117,6 +165,8 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper return prepareForBootstrap(parameters); } else if (name.equals("shutdown")) { return shutdownStorageNode(); + } else if (name.equals("decommission")) { + return decommission(); } else { return super.invokeOperation(name, parameters); } @@ -237,6 +287,23 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper return result; }
+ private OperationResult decommission() { + log.info("Decommissioning " + getResourceContext().getResourceKey()); + + OperationResult result = new OperationResult(); + try { + EmsConnection emsConnection = getEmsConnection(); + EmsBean storageService = emsConnection.getBean("org.apache.cassandra.db:type=StorageService"); + Class<?>[] emptyParams = new Class<?>[0]; + + EmsOperation operation = storageService.getOperation("decommission", emptyParams); + operation.invoke((Object[]) emptyParams); + } catch (EmsInvocationException e) { + result.setErrorMessage("Decommission operation failed: " + ThrowableUtil.getAllMessages(e)); + } + return result; + } + private OperationResult updateKnownNodes(Configuration params) { OperationResult result = new OperationResult();
@@ -295,11 +362,7 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper try { configEditor.load();
- purgeDir(new File(configEditor.getCommitLogDirectory())); - for (String dir : configEditor.getDataFileDirectories()) { - purgeDir(new File(dir)); - } - purgeDir(new File(configEditor.getSavedCachesDirectory())); + purgeDataDirs(configEditor);
log.info("Updating cluster settings");
@@ -357,6 +420,14 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper } }
+ private void purgeDataDirs(ConfigEditor configEditor) { + purgeDir(new File(configEditor.getCommitLogDirectory())); + for (String dir : configEditor.getDataFileDirectories()) { + purgeDir(new File(dir)); + } + purgeDir(new File(configEditor.getSavedCachesDirectory())); + } + private void purgeDir(File dir) { log.info("Purging " + dir); FileUtil.purge(dir, true); @@ -377,6 +448,14 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper }
private OperationResult nodeAdded(Configuration params) { + return performTopologyChangeMaintenance(params); + } + + private OperationResult nodeRemoved(Configuration params) { + return performTopologyChangeMaintenance(params); + } + + private OperationResult performTopologyChangeMaintenance(Configuration params) { boolean runRepair = params.getSimple("runRepair").getBooleanValue(); boolean updateSeedsList = params.getSimple("updateSeedsList").getBooleanValue();
diff --git a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml index cc01c9d..95c1723 100644 --- a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml +++ b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml @@ -12,7 +12,8 @@ name="RHQ Storage Node" discovery="StorageNodeDiscoveryComponent" class="StorageNodeComponent" - description="RHQ Storage Node"> + description="RHQ Storage Node" + createDeletePolicy="delete-only">
<subcategories> <subcategory name="Client Request Metrics" description="Client Request Metrics"/> @@ -66,6 +67,8 @@ </results> </operation>
+ <operation name="decommission" description="Take the Cassandra node out of service"/> + <operation name="readRepair" description="Runs read repair on primar range of rhq and system_auth keyspaces"> <results> <c:list-property name="results"> @@ -97,6 +100,25 @@ </results> </operation>
+ <operation name="removeNodeMaintenance"> + <parameters> + <c:simple-property name="runRepair" type="boolean" default="true"/> + <c:simple-property name="updateSeedsList" type="boolean" default="true"/> + <c:list-property name="seedsList"> + <c:simple-property name="seed" type="string"/> + </c:list-property> + </parameters> + <results> + <c:list-property name="results"> + <c:map-property name="resultsMap"> + <c:simple-property name="task" type="string"/> + <c:simple-property name="succeeded" type="boolean"/> + <c:simple-property name="details" type="string"/> + </c:map-property> + </c:list-property> + </results> + </operation> + <operation name="updateKnownNodes"> <parameters> <c:list-property name="addresses">
commit dafb75cdcf9bf64c664012749ae0460fdb843c34 Author: mtho11 mikecthompson@gmail.com Date: Thu Aug 15 21:56:44 2013 -0700
Fix refresh to keep graph open after auto-refresh.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/ResourceDetailView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/ResourceDetailView.java index ae51195..9bd1ea6 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/ResourceDetailView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/ResourceDetailView.java @@ -114,6 +114,8 @@ public class ResourceDetailView extends
private ResourceComposite resourceComposite;
+ private MetricsResourceView metricsResourceView; + //private List<ResourceSelectListener> selectListeners = new ArrayList<ResourceSelectListener>();
private TwoLevelTab summaryTab; @@ -391,7 +393,10 @@ public class ResourceDetailView extends viewFactory = (!visible) ? null : new ViewFactory() { @Override public Canvas createView() { - return new MetricsResourceView(resource); + if(null == metricsResourceView){ + metricsResourceView = new MetricsResourceView(resource); + } + return metricsResourceView; } }; updateSubTab(this.monitoringTab, this.monitorMetrics, visible, visibleToIE8, viewFactory); diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableView.java index db9abd4..242efe0 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableView.java @@ -81,6 +81,7 @@ import org.rhq.enterprise.gui.coregui.client.util.preferences.MeasurementUserPre public class MetricsTableView extends Table<MetricsViewDataSource> implements Refreshable {
private final Resource resource; + private boolean rendered = false; private final AbstractD3GraphListView abstractD3GraphListView;
private final MeasurementUserPreferences measurementUserPrefs; @@ -115,19 +116,22 @@ public class MetricsTableView extends Table<MetricsViewDataSource> implements Re ArrayList<ListGridField> fields = getDataSource().getListGridFields(); setListGridFields(fields.toArray(new ListGridField[0]));
- addTableAction(MSG.view_measureTable_getLive(), new ShowLiveDataTableAction(this)); - addExtraWidget(addToDashboardComponent, false); - addToDashboardComponent.disableAddToDashboardButton(); - metricsTableListGrid.addSelectionChangedHandler(new SelectionChangedHandler() { - @Override - public void onSelectionChanged(SelectionEvent selectionEvent) { - if (metricsTableListGrid.getSelectedRecords().length > 0) { - addToDashboardComponent.enableAddToDashboardButton(); - } else { - addToDashboardComponent.disableAddToDashboardButton(); + if(!rendered){ + addTableAction(MSG.view_measureTable_getLive(), new ShowLiveDataTableAction(this)); + addExtraWidget(addToDashboardComponent, false); + addToDashboardComponent.disableAddToDashboardButton(); + metricsTableListGrid.addSelectionChangedHandler(new SelectionChangedHandler() { + @Override + public void onSelectionChanged(SelectionEvent selectionEvent) { + if (metricsTableListGrid.getSelectedRecords().length > 0) { + addToDashboardComponent.enableAddToDashboardButton(); + } else { + addToDashboardComponent.disableAddToDashboardButton(); + } } - } - }); + }); + rendered = true; + } }
private static class ShowLiveDataTableAction implements TableAction {
commit 1e36387756d2d80aa47ad9201e0650b638ec143d Author: Jirka Kremser jkremser@redhat.com Date: Fri Aug 16 15:50:36 2013 +0200
Adding new component for editing the storage cluster configuration (stored in the system settings).
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageClusterSettings.java b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageClusterSettings.java new file mode 100644 index 0000000..661ad18 --- /dev/null +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageClusterSettings.java @@ -0,0 +1,56 @@ +package org.rhq.core.domain.cloud; + +import java.io.Serializable; + +/** + * @author John Sanda + */ +public class StorageClusterSettings implements Serializable { + + private static final long serialVersionUID = 1; + + private int cqlPort; + + private int gossipPort; + + public int getCqlPort() { + return cqlPort; + } + + public void setCqlPort(int cqlPort) { + this.cqlPort = cqlPort; + } + + public int getGossipPort() { + return gossipPort; + } + + public void setGossipPort(int gossipPort) { + this.gossipPort = gossipPort; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + StorageClusterSettings that = (StorageClusterSettings) o; + + if (cqlPort != that.cqlPort) return false; + if (gossipPort != that.gossipPort) return false; + + return true; + } + + @Override + public int hashCode() { + int result = cqlPort; + result = 29 * result + gossipPort; + return result; + } + + @Override + public String toString() { + return "StorageClusterSettings[cqlPort=" + cqlPort + ", gossipPort=" + gossipPort + "]"; + } +} diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/ClusterConfigurationEditor.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/ClusterConfigurationEditor.java new file mode 100644 index 0000000..bd11d03 --- /dev/null +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/ClusterConfigurationEditor.java @@ -0,0 +1,238 @@ +/* + * RHQ Management Platform + * Copyright (C) 2005-2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +package org.rhq.enterprise.gui.coregui.client.admin.storage; + +import java.util.ArrayList; +import java.util.List; + +import com.google.gwt.user.client.rpc.AsyncCallback; +import com.smartgwt.client.types.Alignment; +import com.smartgwt.client.types.Overflow; +import com.smartgwt.client.util.BooleanCallback; +import com.smartgwt.client.util.SC; +import com.smartgwt.client.widgets.events.ClickEvent; +import com.smartgwt.client.widgets.events.ClickHandler; +import com.smartgwt.client.widgets.form.fields.FormItem; +import com.smartgwt.client.widgets.form.fields.StaticTextItem; +import com.smartgwt.client.widgets.form.fields.TextItem; +import com.smartgwt.client.widgets.form.validator.IsIntegerValidator; +import com.smartgwt.client.widgets.form.validator.Validator; +import com.smartgwt.client.widgets.layout.LayoutSpacer; +import com.smartgwt.client.widgets.toolbar.ToolStrip; + +import org.rhq.core.domain.cloud.StorageClusterSettings; +import org.rhq.enterprise.gui.coregui.client.CoreGUI; +import org.rhq.enterprise.gui.coregui.client.RefreshableView; +import org.rhq.enterprise.gui.coregui.client.components.form.EnhancedDynamicForm; +import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; +import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedIButton; +import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedToolStrip; +import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedVLayout; +import org.rhq.enterprise.gui.coregui.client.util.message.Message; + +/** + * The component for editing the cluster wide configuration + * + * @author Jirka Kremser + */ +public class ClusterConfigurationEditor extends EnhancedVLayout implements RefreshableView { + + private EnhancedDynamicForm form; + private EnhancedIButton saveButton; + private boolean oddRow; + private StorageClusterSettings settings; + + private static String FIELD_CQL_PORT = "cql_port"; + private static String FIELD_GOSSIP_PORT = "gossip_port"; + + public ClusterConfigurationEditor() { + super(); + } + + private void fetchClusterSettings() { + GWTServiceLookup.getStorageService().retrieveClusterSettings( + new AsyncCallback<StorageClusterSettings>() { + @Override + public void onFailure(Throwable caught) { + Message message = new Message(MSG.view_configurationHistoryDetails_error_loadFailure(), + Message.Severity.Warning); + } + + @Override + public void onSuccess(StorageClusterSettings settings) { + ClusterConfigurationEditor.this.settings = settings; + prepareForm(); + } + }); + } + + private void save() { + updateSettings(); + GWTServiceLookup.getStorageService().updateClusterSettings(settings, new AsyncCallback<Void>() { + public void onSuccess(Void result) { + Message msg = new Message("Storage node settings were successfully updated.", Message.Severity.Info); + CoreGUI.getMessageCenter().notify(msg); + } + + public void onFailure(Throwable caught) { + CoreGUI.getErrorHandler().handleError("Unable to update the storage node settings.", caught); + } + }); + } + + private List<FormItem> buildOneFormRowWithValidator(String name, String title, String value, String description, + Validator validator) { + return buildOneFormRow(name, title, value, description, false, validator); + } + + private List<FormItem> buildOneFormRow(String name, String title, String value, String description, + boolean unitsDropdown, Validator validator) { + List<FormItem> fields = new ArrayList<FormItem>(); + StaticTextItem nameItem = new StaticTextItem(); + nameItem.setStartRow(true); + nameItem.setValue("<b>" + title + "</b>"); + nameItem.setShowTitle(false); + nameItem.setCellStyle(oddRow ? "OddRow" : "EvenRow"); + fields.add(nameItem); + + FormItem valueItem = null; + valueItem = new TextItem(); + valueItem.setName(name); + valueItem.setValue(value); + valueItem.setWidth(220); + if (validator != null) { + valueItem.setValidators(validator); + } + valueItem.setValidateOnChange(true); + valueItem.setAlign(Alignment.CENTER); + valueItem.setShowTitle(false); + valueItem.setRequired(true); + valueItem.setCellStyle(oddRow ? "OddRow" : "EvenRow"); + fields.add(valueItem); + + StaticTextItem descriptionItem = new StaticTextItem(); + descriptionItem.setValue(description); + descriptionItem.setShowTitle(false); + descriptionItem.setEndRow(true); + descriptionItem.setCellStyle(oddRow ? "OddRow" : "EvenRow"); + fields.add(descriptionItem); + + oddRow = !oddRow; + return fields; + } + + private List<FormItem> buildHeaderItems() { + List<FormItem> fields = new ArrayList<FormItem>(); + fields.add(createHeaderTextItem(MSG.view_configEdit_property())); + fields.add(createHeaderTextItem(MSG.common_title_value())); + fields.add(createHeaderTextItem(MSG.common_title_description())); + return fields; + } + + private StaticTextItem createHeaderTextItem(String value) { + StaticTextItem unsetHeader = new StaticTextItem(); + unsetHeader.setValue(value); + unsetHeader.setShowTitle(false); + unsetHeader.setCellStyle("configurationEditorHeaderCell"); + return unsetHeader; + } + + @Override + protected void onDraw() { + super.onDraw(); + refresh(); + } + + private void prepareForm() { + form = new EnhancedDynamicForm(); + form.setHiliteRequiredFields(true); + form.setNumCols(3); + form.setCellPadding(5); + form.setColWidths(190, 220, "*"); + form.setIsGroup(true); + form.setGroupTitle("Cluster Wide Settings"); + form.setBorder("1px solid #AAA"); + oddRow = true; + + List<FormItem> items = buildHeaderItems(); +// IntegerRangeValidator positiveInteger = new IntegerRangeValidator(); +// positiveInteger.setMin(1); +// positiveInteger.setMax(Integer.MAX_VALUE); + IsIntegerValidator validator = new IsIntegerValidator(); + items.addAll(buildOneFormRowWithValidator(FIELD_CQL_PORT, "CQL Port", String.valueOf(settings.getCqlPort()), + "The port on which the Storage Nodes listens for CQL client connections.", validator)); + +// IntegerRangeValidator portValidator = new IntegerRangeValidator(); +// portValidator.setMin(1); +// portValidator.setMax(65535); // (1 << 16) - 1 + validator = new IsIntegerValidator(); + items.addAll(buildOneFormRowWithValidator(FIELD_GOSSIP_PORT, "Gossip Port", String.valueOf(settings.getGossipPort()), + "The port used for internode communication. This is a shared, cluster-wide setting.", validator)); + form.setFields(items.toArray(new FormItem[items.size()])); + form.setWidth100(); + form.setOverflow(Overflow.VISIBLE); + setWidth100(); + + LayoutSpacer spacer = new LayoutSpacer(); + spacer.setWidth100(); + + ToolStrip toolStrip = buildToolStrip(); + setMembers(form, spacer, toolStrip); + form.validate(); + markForRedraw(); + } + + @Override + public void refresh() { + fetchClusterSettings(); + } + + private EnhancedToolStrip buildToolStrip() { + saveButton = new EnhancedIButton(MSG.common_button_save()); + saveButton.addClickHandler(new ClickHandler() { + public void onClick(ClickEvent clickEvent) { + if (form.validate()) { + SC.ask( + "Changing the cluster wide configuration will eventually affect all the storage nodes. Do you want to continue?", + new BooleanCallback() { + @Override + public void execute(Boolean value) { + if (value) { + save(); + } + } + }); + } + } + }); + EnhancedToolStrip toolStrip = new EnhancedToolStrip(); + toolStrip.setWidth100(); + toolStrip.setMembersMargin(5); + toolStrip.setLayoutMargin(5); + toolStrip.addMember(saveButton); + + return toolStrip; + } + + private StorageClusterSettings updateSettings() { + settings.setCqlPort(Integer.parseInt(form.getValueAsString(FIELD_CQL_PORT))); + settings.setGossipPort(Integer.parseInt(form.getValueAsString(FIELD_GOSSIP_PORT))); + return settings; + } +} diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java index c99cf70c..d6a91cb 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java @@ -18,26 +18,15 @@ */ package org.rhq.enterprise.gui.coregui.client.admin.storage;
-import java.util.ArrayList; import java.util.EnumSet;
import com.google.gwt.user.client.Timer; import com.google.gwt.user.client.rpc.AsyncCallback; -import com.smartgwt.client.data.DataSourceField; -import com.smartgwt.client.types.GroupStartOpen; import com.smartgwt.client.widgets.Label; -import com.smartgwt.client.widgets.grid.CellFormatter; -import com.smartgwt.client.widgets.grid.ListGrid; -import com.smartgwt.client.widgets.grid.ListGridField; -import com.smartgwt.client.widgets.grid.ListGridRecord; import com.smartgwt.client.widgets.tab.events.TabSelectedEvent; import com.smartgwt.client.widgets.tab.events.TabSelectedHandler;
-import org.rhq.core.domain.criteria.AlertCriteria; -import org.rhq.core.domain.criteria.ResourceGroupCriteria; import org.rhq.core.domain.resource.ResourceType; -import org.rhq.core.domain.resource.group.composite.ResourceGroupComposite; -import org.rhq.core.domain.util.PageList; import org.rhq.core.domain.util.collection.ArrayUtils; import org.rhq.enterprise.gui.coregui.client.BookmarkableView; import org.rhq.enterprise.gui.coregui.client.CoreGUI; @@ -45,16 +34,10 @@ import org.rhq.enterprise.gui.coregui.client.IconEnum; import org.rhq.enterprise.gui.coregui.client.LinkManager; import org.rhq.enterprise.gui.coregui.client.ViewPath; import org.rhq.enterprise.gui.coregui.client.admin.AdministrationView; -import org.rhq.enterprise.gui.coregui.client.alert.AlertDataSource; -import org.rhq.enterprise.gui.coregui.client.alert.AlertHistoryView; import org.rhq.enterprise.gui.coregui.client.components.tab.NamedTab; import org.rhq.enterprise.gui.coregui.client.components.tab.NamedTabSet; -import org.rhq.enterprise.gui.coregui.client.components.table.Table; -import org.rhq.enterprise.gui.coregui.client.components.view.HasViewName; import org.rhq.enterprise.gui.coregui.client.components.view.ViewName; import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; -import org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.configuration.GroupResourceConfigurationEditView; -import org.rhq.enterprise.gui.coregui.client.inventory.resource.AncestryUtil; import org.rhq.enterprise.gui.coregui.client.inventory.resource.type.ResourceTypeRepository; import org.rhq.enterprise.gui.coregui.client.util.Log; import org.rhq.enterprise.gui.coregui.client.util.StringUtility; @@ -66,7 +49,7 @@ import org.rhq.enterprise.gui.coregui.client.util.message.Message; * * @author Jirka Kremser */ -public class StorageNodeAdminView extends EnhancedVLayout implements/* HasViewName,*/ BookmarkableView { +public class StorageNodeAdminView extends EnhancedVLayout implements BookmarkableView {
public static final ViewName VIEW_ID = new ViewName("StorageNodes", MSG.view_adminTopology_storageNodes(), IconEnum.STORAGE_NODE); @@ -74,7 +57,7 @@ public class StorageNodeAdminView extends EnhancedVLayout implements/* HasViewNa public static final String VIEW_PATH = AdministrationView.VIEW_ID + "/" + AdministrationView.SECTION_TOPOLOGY_VIEW_ID + "/" + VIEW_ID;
- private static final String GROUP_NAME = "RHQ Storage Nodes"; +// private static final String GROUP_NAME = "RHQ Storage Nodes";
private final NamedTabSet tabset; private TabInfo tableTabInfo = new TabInfo(0, new ViewName("Nodes")); @@ -158,31 +141,36 @@ public class StorageNodeAdminView extends EnhancedVLayout implements/* HasViewNa }); } } else if (tabInfo.equals(settingsTabInfo)) { - ResourceGroupCriteria criteria = new ResourceGroupCriteria(); - criteria.addFilterName(GROUP_NAME); - criteria.setStrict(true); - GWTServiceLookup.getResourceGroupService().findResourceGroupCompositesByCriteria(criteria, - new AsyncCallback<PageList<ResourceGroupComposite>>() { - @Override - public void onFailure(Throwable caught) { - Message message = new Message(MSG.view_group_detail_failLoadComp(String.valueOf(GROUP_NAME)), - Message.Severity.Warning); - CoreGUI.goToView(VIEW_ID.getName(), message); - } - - @Override - public void onSuccess(PageList<ResourceGroupComposite> result) { - if (result.isEmpty()) { - onFailure(new Exception("Group with name [" + GROUP_NAME + "] does not exist.")); - } else { - ResourceGroupComposite groupComposite = result.get(0); - loadResourceType(groupComposite.getResourceGroup().getResourceType().getId()); - tabset.getTabByName(tabInfo.name.getName()).setPane( - new GroupResourceConfigurationEditView(groupComposite)); - tabset.selectTab(tabInfo.index); - } - } - }); + ClusterConfigurationEditor editor = new ClusterConfigurationEditor(); + tabset.getTabByName(tabInfo.name.getName()).setPane(editor); + tabset.selectTab(tabInfo.index); + + // we don't group configuration editor anymore +// ResourceGroupCriteria criteria = new ResourceGroupCriteria(); +// criteria.addFilterName(GROUP_NAME); +// criteria.setStrict(true); +// GWTServiceLookup.getResourceGroupService().findResourceGroupCompositesByCriteria(criteria, +// new AsyncCallback<PageList<ResourceGroupComposite>>() { +// @Override +// public void onFailure(Throwable caught) { +// Message message = new Message(MSG.view_group_detail_failLoadComp(String.valueOf(GROUP_NAME)), +// Message.Severity.Warning); +// CoreGUI.goToView(VIEW_ID.getName(), message); +// } +// +// @Override +// public void onSuccess(PageList<ResourceGroupComposite> result) { +// if (result.isEmpty()) { +// onFailure(new Exception("Group with name [" + GROUP_NAME + "] does not exist.")); +// } else { +// ResourceGroupComposite groupComposite = result.get(0); +// loadResourceType(groupComposite.getResourceGroup().getResourceType().getId()); +// tabset.getTabByName(tabInfo.name.getName()).setPane( +// new GroupResourceConfigurationEditView(groupComposite)); +// tabset.selectTab(tabInfo.index); +// } +// } +// }); } }
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/StorageGWTService.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/StorageGWTService.java index 38829a5..72f17b18 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/StorageGWTService.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/StorageGWTService.java @@ -27,6 +27,7 @@ import java.util.Map;
import com.google.gwt.user.client.rpc.RemoteService;
+import org.rhq.core.domain.cloud.StorageClusterSettings; import org.rhq.core.domain.cloud.StorageNode; import org.rhq.core.domain.cloud.StorageNodeConfigurationComposite; import org.rhq.core.domain.cloud.StorageNodeLoadComposite; @@ -87,4 +88,8 @@ public interface StorageGWTService extends RemoteService { StorageNodeConfigurationComposite retrieveConfiguration(StorageNode storageNode) throws RuntimeException;
void updateConfiguration(StorageNodeConfigurationComposite storageNodeConfiguration) throws RuntimeException; + + StorageClusterSettings retrieveClusterSettings() throws RuntimeException; + + void updateClusterSettings(StorageClusterSettings clusterSettings) throws RuntimeException; } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/StorageGWTServiceImpl.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/StorageGWTServiceImpl.java index b7437e3..ae18075 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/StorageGWTServiceImpl.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/StorageGWTServiceImpl.java @@ -26,6 +26,7 @@ import java.util.ArrayList; import java.util.List; import java.util.Map;
+import org.rhq.core.domain.cloud.StorageClusterSettings; import org.rhq.core.domain.cloud.StorageNode; import org.rhq.core.domain.cloud.StorageNodeConfigurationComposite; import org.rhq.core.domain.cloud.StorageNodeLoadComposite; @@ -40,6 +41,7 @@ import org.rhq.enterprise.server.cloud.StorageNodeManagerLocal; import org.rhq.enterprise.server.measurement.util.MeasurementUtils; import org.rhq.enterprise.server.operation.OperationManagerLocal; import org.rhq.enterprise.server.resource.ResourceManagerLocal; +import org.rhq.enterprise.server.storage.StorageClusterSettingsManagerLocal; import org.rhq.enterprise.server.util.LookupUtil;
/** @@ -51,6 +53,8 @@ public class StorageGWTServiceImpl extends AbstractGWTServiceImpl implements Sto
private StorageNodeManagerLocal storageNodeManager = LookupUtil.getStorageNodeManager();
+ private StorageClusterSettingsManagerLocal storageClusterSettingsManager = LookupUtil.getStorageClusterSettingsManagerLocal(); + private OperationManagerLocal operationManager = LookupUtil.getOperationManager();
private ResourceManagerLocal resourceManager = LookupUtil.getResourceManager(); @@ -179,4 +183,23 @@ public class StorageGWTServiceImpl extends AbstractGWTServiceImpl implements Sto throw getExceptionToThrowToClient(t); } } + + @Override + public void updateClusterSettings(StorageClusterSettings clusterSettings) throws RuntimeException { + try { + storageClusterSettingsManager.setClusterSettings(getSessionSubject(), clusterSettings); + } catch (Throwable t) { + throw getExceptionToThrowToClient(t); + } + } + + @Override + public StorageClusterSettings retrieveClusterSettings() throws RuntimeException { + try { + return SerialUtility.prepare(storageClusterSettingsManager.getClusterSettings(getSessionSubject()), + "StorageGWTServiceImpl.retrieveClusterSettings"); + } catch (Throwable t) { + throw getExceptionToThrowToClient(t); + } + } } diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/storage/FakeStorageClusterSettingsManagerBean.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/storage/FakeStorageClusterSettingsManagerBean.java index 3ac61e4..c9738a7 100644 --- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/storage/FakeStorageClusterSettingsManagerBean.java +++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/storage/FakeStorageClusterSettingsManagerBean.java @@ -3,6 +3,7 @@ package org.rhq.enterprise.server.storage; import javax.ejb.Stateless;
import org.rhq.core.domain.auth.Subject; +import org.rhq.core.domain.cloud.StorageClusterSettings;
/** * @author John Sanda diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index 861e3fa..5cce984 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -53,6 +53,7 @@ import org.rhq.core.domain.auth.Subject; import org.rhq.core.domain.authz.Permission; import org.rhq.core.domain.cloud.StorageNode; import org.rhq.core.domain.cloud.StorageNode.OperationMode; +import org.rhq.core.domain.cloud.StorageClusterSettings; import org.rhq.core.domain.cloud.StorageNodeConfigurationComposite; import org.rhq.core.domain.cloud.StorageNodeLoadComposite; import org.rhq.core.domain.common.JobTrigger; @@ -84,7 +85,6 @@ import org.rhq.enterprise.server.resource.ResourceManagerLocal; import org.rhq.enterprise.server.resource.ResourceTypeManagerLocal; import org.rhq.enterprise.server.rest.reporting.MeasurementConverter; import org.rhq.enterprise.server.scheduler.SchedulerLocal; -import org.rhq.enterprise.server.storage.StorageClusterSettings; import org.rhq.enterprise.server.storage.StorageClusterSettingsManagerLocal; import org.rhq.enterprise.server.storage.StorageNodeOperationsHandlerLocal; import org.rhq.enterprise.server.util.CriteriaQueryGenerator; diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterSettings.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterSettings.java deleted file mode 100644 index 2098acd..0000000 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterSettings.java +++ /dev/null @@ -1,56 +0,0 @@ -package org.rhq.enterprise.server.storage; - -import java.io.Serializable; - -/** - * @author John Sanda - */ -public class StorageClusterSettings implements Serializable { - - private static final long serialVersionUID = 1; - - private int cqlPort; - - private int gossipPort; - - public int getCqlPort() { - return cqlPort; - } - - public void setCqlPort(int cqlPort) { - this.cqlPort = cqlPort; - } - - public int getGossipPort() { - return gossipPort; - } - - public void setGossipPort(int gossipPort) { - this.gossipPort = gossipPort; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - StorageClusterSettings that = (StorageClusterSettings) o; - - if (cqlPort != that.cqlPort) return false; - if (gossipPort != that.gossipPort) return false; - - return true; - } - - @Override - public int hashCode() { - int result = cqlPort; - result = 29 * result + gossipPort; - return result; - } - - @Override - public String toString() { - return "StorageClusterSettings[cqlPort=" + cqlPort + ", gossipPort=" + gossipPort + "]"; - } -} diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterSettingsManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterSettingsManagerBean.java index 9418bca..64fb310 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterSettingsManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterSettingsManagerBean.java @@ -6,8 +6,11 @@ import javax.ejb.EJB; import javax.ejb.Stateless;
import org.rhq.core.domain.auth.Subject; +import org.rhq.core.domain.authz.Permission; +import org.rhq.core.domain.cloud.StorageClusterSettings; import org.rhq.core.domain.common.composite.SystemSetting; import org.rhq.core.domain.common.composite.SystemSettings; +import org.rhq.enterprise.server.authz.RequiredPermission; import org.rhq.enterprise.server.system.SystemManagerLocal;
/** @@ -20,6 +23,7 @@ public class StorageClusterSettingsManagerBean implements StorageClusterSettings private SystemManagerLocal systemManager;
@Override + @RequiredPermission(Permission.MANAGE_SETTINGS) public StorageClusterSettings getClusterSettings(Subject subject) { SystemSettings settings = systemManager.getSystemSettings(subject); Map<String, String> settingsMap = settings.toMap(); @@ -43,6 +47,7 @@ public class StorageClusterSettingsManagerBean implements StorageClusterSettings }
@Override + @RequiredPermission(Permission.MANAGE_SETTINGS) public void setClusterSettings(Subject subject, StorageClusterSettings clusterSettings) { SystemSettings settings = new SystemSettings(); settings.put(SystemSetting.STORAGE_CQL_PORT, Integer.toString(clusterSettings.getCqlPort())); diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterSettingsManagerLocal.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterSettingsManagerLocal.java index cb63bc4..f98cccc 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterSettingsManagerLocal.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterSettingsManagerLocal.java @@ -3,6 +3,7 @@ package org.rhq.enterprise.server.storage; import javax.ejb.Local;
import org.rhq.core.domain.auth.Subject; +import org.rhq.core.domain.cloud.StorageClusterSettings;
/** * @author John Sanda diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java index 2a79c59..4969c46 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java @@ -17,6 +17,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory;
import org.rhq.core.domain.auth.Subject; +import org.rhq.core.domain.cloud.StorageClusterSettings; import org.rhq.core.domain.cloud.StorageNode; import org.rhq.core.domain.common.JobTrigger; import org.rhq.core.domain.configuration.Configuration; diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/util/LookupUtil.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/util/LookupUtil.java index 5aded7d..3cde894 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/util/LookupUtil.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/util/LookupUtil.java @@ -188,8 +188,10 @@ import org.rhq.enterprise.server.scheduler.SchedulerBean; import org.rhq.enterprise.server.scheduler.SchedulerLocal; import org.rhq.enterprise.server.search.SavedSearchManagerBean; import org.rhq.enterprise.server.search.SavedSearchManagerLocal; -import org.rhq.enterprise.server.storage.StorageNodeOperationsHandlerLocal; +import org.rhq.enterprise.server.storage.StorageClusterSettingsManagerBean; +import org.rhq.enterprise.server.storage.StorageClusterSettingsManagerLocal; import org.rhq.enterprise.server.storage.StorageNodeOperationsHandlerBean; +import org.rhq.enterprise.server.storage.StorageNodeOperationsHandlerLocal; import org.rhq.enterprise.server.subsystem.AlertSubsystemManagerBean; import org.rhq.enterprise.server.subsystem.AlertSubsystemManagerLocal; import org.rhq.enterprise.server.subsystem.ConfigurationSubsystemManagerBean; @@ -492,6 +494,10 @@ public final class LookupUtil { public static StorageNodeOperationsHandlerLocal getStorageNodeOperationsHandler() { return lookupLocal(StorageNodeOperationsHandlerBean.class); } + + public static StorageClusterSettingsManagerLocal getStorageClusterSettingsManagerLocal() { + return lookupLocal(StorageClusterSettingsManagerBean.class); + }
public static ClusterManagerLocal getClusterManager() { return lookupLocal(ClusterManagerBean.class);
commit 7c4c9151c51f41aadc02a8898c370f879680cf8b Author: Heiko W. Rupp hwr@redhat.com Date: Fri Aug 16 10:01:27 2013 +0200
BZ 966294 Add one more default when the user just accepts the default in the UI.
diff --git a/modules/enterprise/server/plugins/alert-snmp/src/main/java/org/rhq/enterprise/server/plugins/alertSnmp/SnmpTrapSender.java b/modules/enterprise/server/plugins/alert-snmp/src/main/java/org/rhq/enterprise/server/plugins/alertSnmp/SnmpTrapSender.java index ea3d985..809c8a1 100644 --- a/modules/enterprise/server/plugins/alert-snmp/src/main/java/org/rhq/enterprise/server/plugins/alertSnmp/SnmpTrapSender.java +++ b/modules/enterprise/server/plugins/alert-snmp/src/main/java/org/rhq/enterprise/server/plugins/alertSnmp/SnmpTrapSender.java @@ -83,6 +83,7 @@ public class SnmpTrapSender implements PDUFactory { public static final int DEFAULT = 0; private static final String UDP_TRANSPORT = "udp"; private static final String TCP_TRANSPORT = "tcp"; + private static final String DEFAULT_RHQ_BINDING = "1.3.6.1.4.1.18016.2.1";
private Log log = LogFactory.getLog(SnmpTrapSender.class);
@@ -494,8 +495,8 @@ public class SnmpTrapSender implements PDUFactory { if (!this.snmpEnabled) { return "SNMP is not enabled."; } - - String variableBindingPrefix = alertParameters.getSimpleValue(SnmpInfo.PARAM_VARIABLE_BINDING_PREFIX, null); + String variableBindingPrefix = alertParameters.getSimpleValue(SnmpInfo.PARAM_VARIABLE_BINDING_PREFIX, + DEFAULT_RHQ_BINDING);
// request id and a timestamp are added below in setSysUpTime..
commit d706398531f65ba7ec1e1f06946fff55d5195b7b Author: John Sanda jsanda@redhat.com Date: Thu Aug 15 23:03:03 2013 -0400
adding status property to StorageNode
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java index 54a0db5..48d5f83 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java @@ -241,6 +241,25 @@ public class StorageNode implements Serializable { this.operationMode = operationMode; }
+ public Status getStatus() { + if (operationMode == OperationMode.INSTALLED) { + return Status.INSTALLED; + } + if (operationMode == OperationMode.ANNOUNCE || operationMode == OperationMode.BOOTSTRAP || + operationMode == OperationMode.ADD_NODE_MAINTENANCE) { + if (errorMessage == null && failedOperation == null) { + return Status.JOINING; + } else { + return Status.DOWN; + } + } + if (operationMode == OperationMode.NORMAL) { + return Status.NORMAL; + } + // else operation mode is DOWN + return Status.DOWN; + } + public enum OperationMode {
DOWN("This storage node is down"), // @@ -265,6 +284,14 @@ public class StorageNode implements Serializable { } }
+ public enum Status { + INSTALLED, + DOWN, + NORMAL, + JOINING, + LEAVING + } + public String getJMXConnectionURL() { // GWT doesn't support String.format() String[] split = JMX_CONNECTION_STRING.split("%s");
commit 6aef940aa47acd03c6a45134b0bbde13ef520478 Author: mtho11 mikecthompson@gmail.com Date: Thu Aug 15 12:30:49 2013 -0700
Fix for when no dashboards exist (A Dashboard doesnt exist until you go to the Dashboards screen) Metrics dashboard selection combobox bombs.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/AddToDashboardComponent.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/AddToDashboardComponent.java index f337202..8823dcf 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/AddToDashboardComponent.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/AddToDashboardComponent.java @@ -134,14 +134,16 @@ public class AddToDashboardComponent extends EnhancedToolStrip implements Enhanc }
public void onSuccess(PageList<Dashboard> dashboards) { - for (final Dashboard dashboard : dashboards) { - dashboardMenuMap.put(String.valueOf(dashboard.getId()), - MSG.view_tree_common_contextMenu_addChartToDashboard(dashboard.getName())); - dashboardMap.put(dashboard.getId(), dashboard); + if(dashboards.size() > 0){ + for (final Dashboard dashboard : dashboards) { + dashboardMenuMap.put(String.valueOf(dashboard.getId()), + MSG.view_tree_common_contextMenu_addChartToDashboard(dashboard.getName())); + dashboardMap.put(dashboard.getId(), dashboard); + } + selectedDashboard = dashboards.get(0); + dashboardSelectItem.setValueMap(dashboardMenuMap); + dashboardSelectItem.setValue(selectedDashboard.getId()); } - selectedDashboard = dashboards.get(0); - dashboardSelectItem.setValueMap(dashboardMenuMap); - dashboardSelectItem.setValue(selectedDashboard.getId()); } }); }
commit ee2e6c38b5eb0ae918cb2776b1a2b9c946868844 Merge: 986f4f4 85c1c8a Author: Simeon Pinder spinder@redhat.com Date: Thu Aug 15 18:12:35 2013 -0400
Merge branch 'master' into nightly/rhq.jon
commit ca7af9aaf30b017cd8b00c81031e51dadc3bc48e Author: John Sanda jsanda@redhat.com Date: Thu Aug 15 16:31:49 2013 -0400
updating api checks
diff --git a/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml b/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml index 617593a..08e793c 100644 --- a/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml +++ b/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml @@ -58,6 +58,13 @@ </difference>
<difference> + <className>org/rhq/enterprise/server/cloud/StorageNodeManagerRemote</className> + <differenceType>7012</differenceType> <!-- method added to an interface --> + <method>void deployStorageNode(org.rhq.core.domain.auth.Subject, org.rhq.core.domain.cloud.StorageNode)</method> + <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> + </difference> + + <difference> <className>org/rhq/enterprise/server/bundle/BundleManagerRemote</className> <differenceType>7012</differenceType> <!-- method added to an interface --> <method>void assignBundlesToBundleGroup(org.rhq.core.domain.auth.Subject, int, int[])</method>
commit 261bf3827c95ca1f5083c27234e50a3bb14d80bd Author: John Sanda jsanda@redhat.com Date: Thu Aug 15 16:27:02 2013 -0400
[BZ 958166] Remove dependency on com.sun class
We no longer are performing the JMX calls to C* from the RHQ server but we still are making JMX calls from the Storage installer. As long as the sun.jdk module exports,
<path name="com/sun/jndi/url/rmi"/>
We do not need to have the dependency on the com.sun classes. And our build updates the sun.jdk module to export that path.
diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/pom.xml b/modules/common/cassandra-ccm/cassandra-ccm-core/pom.xml index a24ce10..ad2de77 100644 --- a/modules/common/cassandra-ccm/cassandra-ccm-core/pom.xml +++ b/modules/common/cassandra-ccm/cassandra-ccm-core/pom.xml @@ -25,18 +25,6 @@ </dependency>
<dependency> - <groupId>${project.groupId}</groupId> - <artifactId>rhq-cassandra-jmx</artifactId> - <version>${project.version}</version> - <exclusions> - <exclusion> - <groupId>com.sun</groupId> - <artifactId>rt</artifactId> - </exclusion> - </exclusions> - </dependency> - - <dependency> <groupId>org.apache.cassandra</groupId> <artifactId>cassandra-thrift</artifactId> <version>${cassandra.version}</version> diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/ClusterInitService.java b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/ClusterInitService.java index ac2313b..83851c5 100644 --- a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/ClusterInitService.java +++ b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/ClusterInitService.java @@ -25,7 +25,6 @@
package org.rhq.cassandra;
-import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedList; @@ -33,27 +32,16 @@ import java.util.List; import java.util.Map; import java.util.Queue; import java.util.Set; -import java.util.UUID;
import javax.management.MBeanServerConnection; import javax.management.ObjectName; import javax.management.remote.JMXConnector; import javax.management.remote.JMXConnectorFactory; import javax.management.remote.JMXServiceURL; -import javax.naming.Context; - -import com.datastax.driver.core.BoundStatement; -import com.datastax.driver.core.Cluster; -import com.datastax.driver.core.PreparedStatement; -import com.datastax.driver.core.ResultSet; -import com.datastax.driver.core.Row; -import com.datastax.driver.core.Session; -import com.datastax.driver.core.exceptions.NoHostAvailableException;
import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory;
-import org.rhq.cassandra.installer.RMIContextFactory; import org.rhq.core.domain.cloud.StorageNode;
/** @@ -273,8 +261,6 @@ public final class ClusterInitService { String url = storageNode.getJMXConnectionURL(); JMXServiceURL serviceURL = new JMXServiceURL(url); Map<String, String> env = new HashMap<String, String>(); - // see https://issues.jboss.org/browse/AS7-2138 - env.put(Context.INITIAL_CONTEXT_FACTORY, RMIContextFactory.class.getName()); JMXConnector connector = null;
try { @@ -310,8 +296,6 @@ public final class ClusterInitService { String url = storageNode.getJMXConnectionURL(); JMXServiceURL serviceURL = new JMXServiceURL(url); Map<String, String> env = new HashMap<String, String>(); - // see https://issues.jboss.org/browse/AS7-2138 - env.put(Context.INITIAL_CONTEXT_FACTORY, RMIContextFactory.class.getName()); JMXConnector connector = null;
try { diff --git a/modules/common/cassandra-installer/pom.xml b/modules/common/cassandra-installer/pom.xml index 3a4188d..4492e70 100644 --- a/modules/common/cassandra-installer/pom.xml +++ b/modules/common/cassandra-installer/pom.xml @@ -95,11 +95,6 @@ <version>${project.version}</version> </artifactItem> <artifactItem> - <groupId>${project.groupId}</groupId> - <artifactId>rhq-cassandra-jmx</artifactId> - <version>${project.version}</version> - </artifactItem> - <artifactItem> <groupId>org.yaml</groupId> <artifactId>snakeyaml</artifactId> <version>${cassandra.snakeyaml.version}</version> diff --git a/modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java b/modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java index 113d66b..3686b6f 100644 --- a/modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java +++ b/modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java @@ -46,7 +46,6 @@ import javax.management.ObjectName; import javax.management.remote.JMXConnector; import javax.management.remote.JMXConnectorFactory; import javax.management.remote.JMXServiceURL; -import javax.naming.Context;
import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.CommandLineParser; @@ -66,7 +65,6 @@ import org.yaml.snakeyaml.Yaml; import org.rhq.cassandra.Deployer; import org.rhq.cassandra.DeploymentOptions; import org.rhq.cassandra.DeploymentOptionsFactory; -import org.rhq.cassandra.installer.RMIContextFactory; import org.rhq.core.util.PropertiesFileUpdate; import org.rhq.core.util.StringUtil; import org.rhq.core.util.exception.ThrowableUtil; @@ -651,8 +649,6 @@ public class StorageInstaller { }
Map<String, String> env = new HashMap<String, String>(); - env.put(Context.INITIAL_CONTEXT_FACTORY, RMIContextFactory.class.getName()); - for (int i = 0; i < retries; ++i) { try { connector = JMXConnectorFactory.connect(serviceURL, env); diff --git a/modules/common/cassandra-installer/src/main/resources/module/main/module.xml b/modules/common/cassandra-installer/src/main/resources/module/main/module.xml index 049d30d..bf66025 100644 --- a/modules/common/cassandra-installer/src/main/resources/module/main/module.xml +++ b/modules/common/cassandra-installer/src/main/resources/module/main/module.xml @@ -7,7 +7,6 @@ <resource-root path="${project.build.finalName}.jar"/> <resource-root path="rhq-cassandra-ccm-core-${project.version}.jar"/> <resource-root path="rhq-core-util-${project.version}.jar"/> - <resource-root path="rhq-cassandra-jmx-${project.version}.jar"/> <resource-root path="snakeyaml-${cassandra.snakeyaml.version}.jar"/> </resources>
diff --git a/modules/common/cassandra-jmx/pom.xml b/modules/common/cassandra-jmx/pom.xml deleted file mode 100644 index 1653f8c..0000000 --- a/modules/common/cassandra-jmx/pom.xml +++ /dev/null @@ -1,28 +0,0 @@ -<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" - xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd%22%3E - - <modelVersion>4.0.0</modelVersion> - - <parent> - <groupId>org.rhq</groupId> - <artifactId>rhq-common-parent</artifactId> - <version>4.9.0-SNAPSHOT</version> - </parent> - - <artifactId>rhq-cassandra-jmx</artifactId> - <name>RHQ Cassandra JMX Utility</name> - - <properties> - <moduleName>org.rhq.${project.artifactId}</moduleName> - </properties> - - <dependencies> - <dependency> - <groupId>com.sun</groupId> - <artifactId>rt</artifactId> - <version>1.6.0</version> - <scope>system</scope> - <systemPath>${java.home}/lib/rt.jar</systemPath> - </dependency> - </dependencies> -</project> diff --git a/modules/common/cassandra-jmx/src/main/java/org/rhq/cassandra/installer/RMIContextFactory.java b/modules/common/cassandra-jmx/src/main/java/org/rhq/cassandra/installer/RMIContextFactory.java deleted file mode 100644 index 34c456d..0000000 --- a/modules/common/cassandra-jmx/src/main/java/org/rhq/cassandra/installer/RMIContextFactory.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * - * * RHQ Management Platform - * * Copyright (C) 2005-2012 Red Hat, Inc. - * * All rights reserved. - * * - * * This program is free software; you can redistribute it and/or modify - * * it under the terms of the GNU General Public License, version 2, as - * * published by the Free Software Foundation, and/or the GNU Lesser - * * General Public License, version 2.1, also as published by the Free - * * Software Foundation. - * * - * * This program is distributed in the hope that it will be useful, - * * but WITHOUT ANY WARRANTY; without even the implied warranty of - * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * * GNU General Public License and the GNU Lesser General Public License - * * for more details. - * * - * * You should have received a copy of the GNU General Public License - * * and the GNU Lesser General Public License along with this program; - * * if not, write to the Free Software Foundation, Inc., - * * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - * - */ - -package org.rhq.cassandra.installer; - -import java.util.Hashtable; - -import javax.naming.Context; -import javax.naming.NamingException; -import javax.naming.spi.InitialContextFactory; - -import com.sun.jndi.url.rmi.rmiURLContext; - -/** - * This class is provided as part of a work around for <a href="https://issues.jboss.org/browse/AS7-2138">AS7-2138</a>. - * - * @author John Sanda - */ -public class RMIContextFactory implements InitialContextFactory { - - /* (non-Javadoc) - * @see javax.naming.spi.InitialContextFactory#getInitialContext(java.util.Hashtable) - */ - @Override - public Context getInitialContext(Hashtable environment) throws NamingException { - return new rmiURLContext(environment); - } - -} diff --git a/modules/common/pom.xml b/modules/common/pom.xml index 7d12500..45ba5c6 100644 --- a/modules/common/pom.xml +++ b/modules/common/pom.xml @@ -32,7 +32,6 @@ <module>jboss-as-dmr-client</module> <module>cassandra-auth</module> <module>cassandra-util</module> - <module>cassandra-jmx</module> <module>cassandra-schema</module> <module>cassandra-ccm</module> <module>cassandra-installer</module>
commit 85c1c8a8c348d72ce3121e71ae17871a3422a891 Author: Heiko W. Rupp hwr@redhat.com Date: Thu Aug 15 21:27:58 2013 +0200
More dbsetup/upgrade fixes related to PropertyOptionSources.
diff --git a/modules/core/dbutils/src/main/scripts/dbsetup/config-schema.xml b/modules/core/dbutils/src/main/scripts/dbsetup/config-schema.xml index 1328915..b484d2b 100644 --- a/modules/core/dbutils/src/main/scripts/dbsetup/config-schema.xml +++ b/modules/core/dbutils/src/main/scripts/dbsetup/config-schema.xml @@ -65,7 +65,7 @@ <column name="LINK_TO_TARGET" type="BOOLEAN" required="false"/> <column name="FILTER" type="VARCHAR2" required="false" size="40"/> <column name="EXPRESSION" type="VARCHAR2" required="true" size="400"/> - <column name="EXPRESSION_SCOPE" type="VARCHAR2" required="true" size="12" default="unlimited"/> + <column name="EXPRESSION_SCOPE" type="VARCHAR2" required="true" size="15" default="UNLIMITED"/> <column name="TARGET_TYPE" type="VARCHAR2" required="true" size="20"/> <column name="PROPERTY_DEF_ID" type="INTEGER" required="false" references="RHQ_CONFIG_PROP_DEF"/> </table> diff --git a/modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml b/modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml index ddc5de9..949ca4b 100644 --- a/modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml +++ b/modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml @@ -2180,15 +2180,15 @@ </schemaSpec>
<schemaSpec version="2.136"> - <schema-addColumn table="RHQ_CONFIG_PD_OSRC" column="EXPRESSION_SCOPE" columnType="VARCHAR2" precision="12"/> + <schema-addColumn table="RHQ_CONFIG_PD_OSRC" column="EXPRESSION_SCOPE" columnType="VARCHAR2" precision="15"/> <schema-directSQL> <statement desc="Fill in defaults"> UPDATE RHQ_CONFIG_PD_OSRC - SET EXPRESSION_SCOPE = 'unlimited' + SET EXPRESSION_SCOPE = 'UNLIMITED' WHERE EXPRESSION_SCOPE IS NULL </statement> </schema-directSQL> - <schema-alterColumn table="RHQ_CONFIG_PD_OSRC" column="EXPRESSION_SCOPE" nullable="false" default="unlimited"/> + <schema-alterColumn table="RHQ_CONFIG_PD_OSRC" column="EXPRESSION_SCOPE" nullable="false" default="UNLIMITED"/> </schemaSpec>
<schemaSpec version="2.137">
commit 3c216a95be3b3c042fadaf93aaaf95a7fc387c5a Author: Elias Ross elias_ross@apple.com Date: Thu Jun 13 16:15:09 2013 -0700
BZ 991153 - Agent uses incorrect synchronization for 'UUID to ResourceContainer map'
When copying a 'synchronized' map, you must lock it to copy it. But this was not done. It makes sense, instead, to use a ConcurrentHashMap, especially because the copy was only done for iteration purposes.
Also, use a copy on write array for listeners
diff --git a/modules/core/plugin-container/src/main/java/org/rhq/core/pc/inventory/InventoryManager.java b/modules/core/plugin-container/src/main/java/org/rhq/core/pc/inventory/InventoryManager.java index 7319c97..847a269 100644 --- a/modules/core/plugin-container/src/main/java/org/rhq/core/pc/inventory/InventoryManager.java +++ b/modules/core/plugin-container/src/main/java/org/rhq/core/pc/inventory/InventoryManager.java @@ -36,6 +36,7 @@ import java.util.Set; import java.util.UUID; import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CopyOnWriteArraySet; import java.util.concurrent.ExecutionException; import java.util.concurrent.Executor; import java.util.concurrent.Future; @@ -196,13 +197,12 @@ public class InventoryManager extends AgentService implements ContainerService, /** * UUID to ResourceContainer map */ - private Map<String, ResourceContainer> resourceContainers = Collections - .synchronizedMap(new HashMap<String, ResourceContainer>(1000)); + private final Map<String, ResourceContainer> resourceContainers = new ConcurrentHashMap<String, ResourceContainer>(100);
/** * Collection of event listeners to inform of changes to the inventory. */ - private Set<InventoryEventListener> inventoryEventListeners = new HashSet<InventoryEventListener>(); + private final Set<InventoryEventListener> inventoryEventListeners = new CopyOnWriteArraySet<InventoryEventListener>();
private PluginManager pluginManager = PluginContainer.getInstance().getPluginManager();
@@ -466,9 +466,7 @@ public class InventoryManager extends AgentService implements ContainerService, @Nullable public ResourceContainer getResourceContainer(CanonicalResourceKey canonicalId) { ResourceContainer resourceContainer = null; - Map<String, ResourceContainer> copy = new HashMap<String, ResourceContainer>(this.resourceContainers); // avoids concurrent mod exception, kinda - - for (Map.Entry<String, ResourceContainer> entry : copy.entrySet()) { + for (Map.Entry<String, ResourceContainer> entry : resourceContainers.entrySet()) { ResourceContainer container = entry.getValue(); Resource resource = container.getResource(); if (resource != null) { @@ -486,14 +484,15 @@ public class InventoryManager extends AgentService implements ContainerService, } } } - - copy.clear(); // help GC return resourceContainer; }
@Nullable public ResourceContainer getResourceContainer(Resource resource) { - return this.resourceContainers.get(resource.getUuid()); + String uuid = resource.getUuid(); + if (uuid == null) + return null; + return this.resourceContainers.get(uuid); }
@Nullable @@ -512,15 +511,13 @@ public class InventoryManager extends AgentService implements ContainerService, return null; }
- List<ResourceContainer> containers = new ArrayList<ResourceContainer>(this.resourceContainers.values()); // avoids concurrent mod exception ResourceContainer retContainer = null; - for (ResourceContainer container : containers) { + for (ResourceContainer container : resourceContainers.values()) { if (resourceId.equals(container.getResource().getId())) { retContainer = container; break; } } - containers.clear(); // help GC return retContainer; }
@@ -619,7 +616,7 @@ public class InventoryManager extends AgentService implements ContainerService, /** * This method implicitly calls {@link #handleReport(AvailabilityReport)} so any report generating entries * *will be sent to the server*. Callers should subsequently *NOT* send the report. - * + * * @param changedOnlyReport * @return The report, for inspection */ @@ -630,7 +627,7 @@ public class InventoryManager extends AgentService implements ContainerService, /** * This method implicitly calls {@link #handleReport(AvailabilityReport)} so any report generating entries * *will be sent to the server*. Callers should subsequently *NOT* send the report. - * + * * @param changedOnlyReport * @param forceChecks * @return The report, for inspection @@ -1193,7 +1190,7 @@ public class InventoryManager extends AgentService implements ContainerService, // If we synced any Resources, one or more Resource components were probably started, request a // full avail report to make sure their availabilities are determined on the next avail run (typically // < 30s away). A full avail report will ensure an initial avail check is performed for a resource. - // + // // Also kick off a service scan to scan those Resources for new child Resources. Kick both tasks off // asynchronously. // @@ -2445,9 +2442,7 @@ public class InventoryManager extends AgentService implements ContainerService, * @param listener instance to notify of change events */ public void addInventoryEventListener(InventoryEventListener listener) { - synchronized (this.inventoryEventListeners) { - this.inventoryEventListeners.add(listener); - } + this.inventoryEventListeners.add(listener); }
/** @@ -2456,9 +2451,7 @@ public class InventoryManager extends AgentService implements ContainerService, * @param listener instance to remove from event notification */ public void removeInventoryEventListener(InventoryEventListener listener) { - synchronized (this.inventoryEventListeners) { - this.inventoryEventListeners.remove(listener); - } + this.inventoryEventListeners.remove(listener); }
/** @@ -2485,9 +2478,7 @@ public class InventoryManager extends AgentService implements ContainerService, * @return all inventory event listeners */ private Set<InventoryEventListener> getInventoryEventListeners() { - synchronized (this.inventoryEventListeners) { - return new HashSet<InventoryEventListener>(this.inventoryEventListeners); - } + return this.inventoryEventListeners; }
/** @@ -2970,7 +2961,7 @@ public class InventoryManager extends AgentService implements ContainerService, }
///// - // Now we need to loop over batches of the resource ID list - asking the server for their resource representations. + // Now we need to loop over batches of the resource ID list - asking the server for their resource representations. // When we get the resources from the server, we put them in our resourceMap, keyed on ID.
Map<Integer, Resource> resourceMap = new HashMap<Integer, Resource>(resourceIdList.size()); @@ -3021,7 +3012,7 @@ public class InventoryManager extends AgentService implements ContainerService, for (Resource r : resourceBatch) { // protect against childResources notNull assumptions downstream if (null == r.getChildResources()) { - r.setChildResources(null); // this will actually initialize to an empty Set + r.setChildResources(null); // this will actually initialize to an empty Set } resourceMap.put(r.getId(), r); }
commit d10c1b99865368a946e57ef79584ff2f018af678 Author: Jirka Kremser jkremser@redhat.com Date: Thu Aug 15 17:34:01 2013 +0200
First version of displaying the failures of maitenance tasks on storage nodes (or the whole storage node cluster).
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java index e085eb1..a89bb81 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java @@ -23,6 +23,8 @@ import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDat import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_CQL_PORT; import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_CTIME; import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_DISK; +import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_ERROR_MESSAGE; +import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_FAILED_OPERATION; import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_ID; import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_JMX_PORT; import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_MEMORY; @@ -41,6 +43,7 @@ import com.smartgwt.client.data.DataSourceField; import com.smartgwt.client.data.Record; import com.smartgwt.client.data.fields.DataSourceIntegerField; import com.smartgwt.client.data.fields.DataSourceTextField; +import com.smartgwt.client.widgets.grid.CellFormatter; import com.smartgwt.client.widgets.grid.HoverCustomizer; import com.smartgwt.client.widgets.grid.ListGridField; import com.smartgwt.client.widgets.grid.ListGridRecord; @@ -51,13 +54,16 @@ import org.rhq.core.domain.cloud.StorageNodeLoadComposite; import org.rhq.core.domain.cloud.StorageNodeLoadComposite.MeasurementAggregateWithUnits; import org.rhq.core.domain.criteria.StorageNodeCriteria; import org.rhq.core.domain.measurement.MeasurementAggregate; +import org.rhq.core.domain.operation.ResourceOperationHistory; import org.rhq.core.domain.util.PageControl; import org.rhq.core.domain.util.PageList; import org.rhq.core.domain.util.PageOrdering; import org.rhq.enterprise.gui.coregui.client.CoreGUI; +import org.rhq.enterprise.gui.coregui.client.LinkManager; import org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.StorageNodeLoadCompositeDatasourceField; import org.rhq.enterprise.gui.coregui.client.components.table.TimestampCellFormatter; import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; +import org.rhq.enterprise.gui.coregui.client.inventory.resource.AncestryUtil; import org.rhq.enterprise.gui.coregui.client.util.Log; import org.rhq.enterprise.gui.coregui.client.util.MeasurementConverterClient; import org.rhq.enterprise.gui.coregui.client.util.RPCDataSource; @@ -137,8 +143,29 @@ public class StorageNodeDatasource extends RPCDataSource<StorageNodeLoadComposit // ListGridField cqlField = FIELD_CQL_PORT.getListGridField("90"); // cqlField.setHidden(true); // fields.add(cqlField); - fields.add(FIELD_OPERATION_MODE.getListGridField("90")); - + + field = FIELD_OPERATION_MODE.getListGridField("90"); + field.setCellFormatter(new CellFormatter() { + public String format(Object value, ListGridRecord listGridRecord, int i, int i1) { + if (listGridRecord.getAttribute(FIELD_ERROR_MESSAGE.propertyName()) != null + || listGridRecord.getAttribute(FIELD_FAILED_OPERATION.propertyName()) != null) { + return "<span style='" + DONT_MISS_ME_COLOR + "'>" + value.toString() + "</span>"; + } else + return value.toString(); + } + }); + field.setShowHover(true); + field.setHoverCustomizer(new HoverCustomizer() { + public String hoverHTML(Object value, ListGridRecord record, int rowNum, int colNum) { + if (record.getAttribute(FIELD_ERROR_MESSAGE.propertyName()) != null + || record.getAttribute(FIELD_FAILED_OPERATION.propertyName()) != null) { + return value.toString() + ": Something went wrong. Please double click on the storage node to show the detail page to know more."; + } else + return value.toString(); + } + }); + fields.add(field); + ListGridField createdTimeField = FIELD_CTIME.getListGridField("120"); TimestampCellFormatter.prepareDateField(createdTimeField); fields.add(createdTimeField); @@ -208,6 +235,13 @@ public class StorageNodeDatasource extends RPCDataSource<StorageNodeLoadComposit record.setAttribute(FIELD_JMX_PORT.propertyName(), node.getJmxPort()); record.setAttribute(FIELD_CQL_PORT.propertyName(), node.getCqlPort()); record.setAttribute(FIELD_OPERATION_MODE.propertyName(), node.getOperationMode()); + record.setAttribute(FIELD_ERROR_MESSAGE.propertyName(), node.getErrorMessage()); + if (node.getFailedOperation() != null && node.getFailedOperation().getResource() != null) { + ResourceOperationHistory operationHistory = node.getFailedOperation(); + String value = LinkManager.getSubsystemResourceOperationHistoryLink(operationHistory.getResource().getId(), operationHistory.getId()); +// String value = "#Resource/" + operationHistory.getResource().getId() + "/Operations/History/" + operationHistory.getId()); + record.setAttribute(FIELD_FAILED_OPERATION.propertyName(), value); + } record.setAttribute(FIELD_CTIME.propertyName(), node.getCtime()); record.setAttribute(FIELD_MTIME.propertyName(), node.getMtime()); if (node.getResource() != null) { diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasourceField.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasourceField.java index 19ba5ee..04a1767 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasourceField.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasourceField.java @@ -43,6 +43,10 @@ public enum StorageNodeDatasourceField { FIELD_MEMORY("memory", "Memory"),
FIELD_DISK("disk", "Disk"), + + FIELD_ERROR_MESSAGE("errorMessage", "Error"), + + FIELD_FAILED_OPERATION("failedOperation", "Failed Operation"),
FIELD_CTIME("ctime", CoreGUI.getMessages().view_adminTopology_serverDetail_installationDate()),
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDetailView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDetailView.java index dc500f0..6fdae0c 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDetailView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDetailView.java @@ -23,6 +23,7 @@ import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDat import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_ALERTS; import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_CQL_PORT; import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_CTIME; +import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_FAILED_OPERATION; import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_JMX_PORT; import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_MTIME; import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_OPERATION_MODE; @@ -49,6 +50,7 @@ import org.rhq.core.domain.cloud.StorageNode; import org.rhq.core.domain.cloud.StorageNodeConfigurationComposite; import org.rhq.core.domain.criteria.StorageNodeCriteria; import org.rhq.core.domain.measurement.composite.MeasurementDataNumericHighLowComposite; +import org.rhq.core.domain.operation.ResourceOperationHistory; import org.rhq.core.domain.resource.Resource; import org.rhq.core.domain.resource.composite.ResourceComposite; import org.rhq.core.domain.util.PageList; @@ -299,16 +301,26 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab alertsItem.setValue(StorageNodeAdminView.getAlertsString("New Alerts", storageNodeId, unackAlerts)); }
- StaticTextItem memoryStatusItem = new StaticTextItem("memoryStatus", "Memory"); - memoryStatusItem.setValue("No action needed"); + StaticTextItem message = new StaticTextItem("message", "Note"); + message.setValue(storageNode.getErrorMessage() == null ? "Everything is ok" : storageNode.getErrorMessage()); + + StaticTextItem lastOperation = null; + boolean isOperationFailed = storageNode.getFailedOperation() != null && storageNode.getFailedOperation().getResource() != null; + if (isOperationFailed) { + ResourceOperationHistory operationHistory = storageNode.getFailedOperation(); + String value = LinkManager.getSubsystemResourceOperationHistoryLink(operationHistory.getResource().getId(), operationHistory.getId()); +// String value = "#Resource/" + operationHistory.getResource().getId() + "/Operations/History/" + operationHistory.getId()); + lastOperation = new StaticTextItem("lastOp", "Operation"); + lastOperation.setValue(LinkManager.getHref(value, operationHistory.getOperationDefinition().getDisplayName())); + } +
- StaticTextItem diskStatusItem = new StaticTextItem("mdiskStatus", "Disk"); - diskStatusItem.setValue("No action needed");
List<FormItem> formItems = new ArrayList<FormItem>(6); formItems.addAll(Arrays.asList(nameItem, resourceItem,cqlPortItem, jmxPortItem/*, jmxConnectionUrlItem*/)); if (!CoreGUI.isDebugMode()) formItems.add(operationModeItem); // debug mode fails if this item is added - formItems.addAll(Arrays.asList(installationDateItem, lastUpdateItem, alertsItem, memoryStatusItem, diskStatusItem)); + formItems.addAll(Arrays.asList(installationDateItem, lastUpdateItem, alertsItem, message)); + if (isOperationFailed) formItems.add(lastOperation); form.setItems(formItems.toArray(new FormItem[]{}));
detailsLayout = new EnhancedVLayout();
commit 7b3df20a5ebf5a78f90d6b87090098c324334ef4 Author: Lukas Krejci lkrejci@redhat.com Date: Thu Aug 15 13:09:14 2013 +0200
Clean up after the test.
diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/resource/metadata/ResourceMetadataManagerBeanTest.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/resource/metadata/ResourceMetadataManagerBeanTest.java index 46f2bef..1d6b49a 100644 --- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/resource/metadata/ResourceMetadataManagerBeanTest.java +++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/resource/metadata/ResourceMetadataManagerBeanTest.java @@ -4,6 +4,8 @@ import static java.util.Arrays.asList;
import java.util.ArrayList; import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; import java.util.List; import java.util.Set;
@@ -18,6 +20,7 @@ import org.rhq.core.domain.alert.AlertDampening; import org.rhq.core.domain.alert.AlertDefinition; import org.rhq.core.domain.alert.AlertPriority; import org.rhq.core.domain.alert.BooleanExpression; +import org.rhq.core.domain.auth.Subject; import org.rhq.core.domain.bundle.Bundle; import org.rhq.core.domain.bundle.BundleType; import org.rhq.core.domain.bundle.ResourceTypeBundleConfiguration; @@ -54,6 +57,9 @@ import org.rhq.enterprise.server.util.LookupUtil; public class ResourceMetadataManagerBeanTest extends MetadataBeanTest {
private static final String PLUGIN_NAME = "ResourceMetadataManagerBeanTestPlugin"; + //this is used in afterclass, which might execute with a different instance than the rest of the tests + //therefore we need to make this static. Not sure who causes this, if it is Arq or TestNG itself. + private static Set<Integer> groupIds = Collections.synchronizedSet(new HashSet<Integer>());
@Test(groups = { "plugin.resource.metadata.test", "NewPlugin" }) public void testRemovalOfObsoleteBundleAndDriftConfig() throws Exception { @@ -607,9 +613,16 @@ public class ResourceMetadataManagerBeanTest extends MetadataBeanTest { }
// this needs to be the last test executed in the class, it does cleanup - @Test(priority = 10, alwaysRun = true, dependsOnGroups = { "RemoveTypes" }) + @Test(priority = 1000, alwaysRun = true, dependsOnGroups = { "RemoveTypes" }) public void afterClassWorkTest() throws Exception { afterClassWork(); + + Subject overlord = LookupUtil.getSubjectManager().getOverlord(); + ResourceGroupManagerLocal groupManager = LookupUtil.getResourceGroupManager(); + + for(int id : groupIds) { + groupManager.deleteResourceGroup(overlord, id); + } }
void assertTypesPersisted(String msg, List<String> types, String plugin) { @@ -714,6 +727,7 @@ public class ResourceMetadataManagerBeanTest extends MetadataBeanTest { ResourceGroup resourceGroup = new ResourceGroup(groupName, resourceType); resourceGroup.setRecursive(recursive); ResourceGroup result = resourceGroupMgr.createResourceGroup(subjectMgr.getOverlord(), resourceGroup); + groupIds.add(result.getId()); return result; }
commit 0f0f307b2696819722a16fe84e27e5b51bd66390 Author: Lukas Krejci lkrejci@redhat.com Date: Thu Aug 15 13:08:45 2013 +0200
[BZ 988881] - GUI now refreshes availability every 15s.
Getting the live availability or live value of a metric now triggers alerts.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/ResourceGWTService.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/ResourceGWTService.java index 3248186..eb0e85b 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/ResourceGWTService.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/ResourceGWTService.java @@ -26,6 +26,7 @@ import com.google.gwt.user.client.rpc.RemoteService;
import org.rhq.core.domain.configuration.Configuration; import org.rhq.core.domain.criteria.ResourceCriteria; +import org.rhq.core.domain.measurement.ResourceAvailability; import org.rhq.core.domain.resource.Agent; import org.rhq.core.domain.resource.CreateResourceHistory; import org.rhq.core.domain.resource.DeleteResourceHistory; @@ -69,6 +70,8 @@ public interface ResourceGWTService extends RemoteService {
ResourceAvailabilitySummary getResourceAvailabilitySummary(int resourceId) throws RuntimeException;
+ ResourceAvailability getLiveResourceAvailability(int resourceId) throws RuntimeException; + PageList<Resource> findResourcesByCriteria(ResourceCriteria criteria) throws RuntimeException;
List<Resource> findResourcesByCriteriaBounded(ResourceCriteria criteria, int maxResources, int maxResourcesByType) diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/ResourceErrorsView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/ResourceErrorsView.java index b214edc..b46e075 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/ResourceErrorsView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/ResourceErrorsView.java @@ -169,7 +169,7 @@ public class ResourceErrorsView extends Table<ResourceErrorsDataSource> { @Override public void refresh() { super.refresh(); - this.titleBar.refresh(); + this.titleBar.refreshResourceErrors(); }
private void popupDetails(String details) { @@ -188,7 +188,7 @@ public class ResourceErrorsView extends Table<ResourceErrorsDataSource> { winModal.addCloseClickHandler(new CloseClickHandler() { public void onCloseClick(CloseClickEvent event) { winModal.markForDestroy(); - titleBar.refresh(); + titleBar.refreshResourceErrors(); } });
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/ResourceTitleBar.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/ResourceTitleBar.java index 85dde83..fbbf0a8 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/ResourceTitleBar.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/ResourceTitleBar.java @@ -23,9 +23,12 @@ import java.util.LinkedHashSet; import java.util.List; import java.util.Set;
+import com.google.gwt.user.client.Timer; import com.google.gwt.user.client.rpc.AsyncCallback; import com.smartgwt.client.types.Alignment; +import com.smartgwt.client.types.Cursor; import com.smartgwt.client.types.VerticalAlignment; +import com.smartgwt.client.widgets.Button; import com.smartgwt.client.widgets.Canvas; import com.smartgwt.client.widgets.HTMLFlow; import com.smartgwt.client.widgets.Img; @@ -33,13 +36,13 @@ import com.smartgwt.client.widgets.Label; import com.smartgwt.client.widgets.Window; import com.smartgwt.client.widgets.events.ClickEvent; import com.smartgwt.client.widgets.events.ClickHandler; -import com.smartgwt.client.widgets.events.DoubleClickEvent; -import com.smartgwt.client.widgets.events.DoubleClickHandler; import com.smartgwt.client.widgets.form.DynamicForm; import com.smartgwt.client.widgets.form.fields.SpacerItem;
import org.rhq.core.domain.auth.Subject; import org.rhq.core.domain.criteria.ResourceCriteria; +import org.rhq.core.domain.measurement.AvailabilityType; +import org.rhq.core.domain.measurement.ResourceAvailability; import org.rhq.core.domain.resource.Resource; import org.rhq.core.domain.resource.ResourceCategory; import org.rhq.core.domain.resource.ResourceError; @@ -53,6 +56,9 @@ import org.rhq.enterprise.gui.coregui.client.components.tagging.TagEditorView; import org.rhq.enterprise.gui.coregui.client.components.tagging.TagsChangedCallback; import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; import org.rhq.enterprise.gui.coregui.client.util.StringUtility; +import org.rhq.enterprise.gui.coregui.client.util.async.Command; +import org.rhq.enterprise.gui.coregui.client.util.async.CountDownLatch; +import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedIButton; import org.rhq.enterprise.gui.coregui.client.util.message.Message; import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedHLayout; import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedVLayout; @@ -74,6 +80,8 @@ public class ResourceTitleBar extends EnhancedVLayout { private static final String COLLAPSED_TOOLTIP = MSG.view_portlet_inventory_tooltip_expand(); private static final String EXPANDED_TOOLTIP = MSG.view_portlet_inventory_tooltip_collapse(); private static final String PLUGIN_ERRORS_ICON = "[SKIN]/Dialog/warn.png"; + private static final String LOADING_ICON = "ajax-loader.gif"; + private Img expandCollapseArrow;
private Resource resource; @@ -87,6 +95,14 @@ public class ResourceTitleBar extends EnhancedVLayout { private EnhancedHLayout detailsForm; private OverviewForm detailsFormSummary; private Img pluginErrors; + private Img loading; + + private Timer resourceAvailAndErrorsRefreshTime = new Timer() { + @Override + public void run() { + refreshAvailAndResourceErrors(); + } + };
public ResourceTitleBar() { super(); @@ -116,14 +132,9 @@ public class ResourceTitleBar extends EnhancedVLayout { this.title.setWidth("*");
this.availabilityImage = new Img(ImageManager.getAvailabilityLargeIcon(null), 24, 24); - this.availabilityImage.addDoubleClickHandler(new DoubleClickHandler() { - public void onDoubleClick(DoubleClickEvent event) { - refresh(); - } - });
this.favoriteButton = new Img(NOT_FAV_ICON, 24, 24); - + this.favoriteButton.setCursor(Cursor.POINTER); this.favoriteButton.addClickHandler(new ClickHandler() { public void onClick(ClickEvent clickEvent) { Set<Integer> favorites = toggleFavoriteLocally(); @@ -179,7 +190,6 @@ public class ResourceTitleBar extends EnhancedVLayout {
pluginErrors = new Img(PLUGIN_ERRORS_ICON, 24, 24); pluginErrors.setVisible(false); - refreshPluginErrors(); // this is an async call
//define tool tip pluginErrors.setPrompt(MSG.view_resource_title_component_errors_tooltip()); @@ -227,10 +237,15 @@ public class ResourceTitleBar extends EnhancedVLayout { } });
+ loading = new Img(LOADING_ICON, 16, 16); + loading.setVisible(false); + loading.setValign(VerticalAlignment.CENTER); + //top information top.addMember(expandCollapseArrow); top.addMember(badge); top.addMember(title); + top.addMember(loading); top.addMember(pluginErrors); top.addMember(availabilityImage); top.addMember(favoriteButton); @@ -267,6 +282,17 @@ public class ResourceTitleBar extends EnhancedVLayout { } addMember(details); ResourceTitleBar.this.markForRedraw(); + + resourceAvailAndErrorsRefreshTime.scheduleRepeating(15000); + } + + @Override + protected void onUnload() { + if (resourceAvailAndErrorsRefreshTime != null) { + resourceAvailAndErrorsRefreshTime.cancel(); + } + + super.onUnload(); }
private void loadTags(final TagEditorView tagEditorView) { @@ -344,11 +370,27 @@ public class ResourceTitleBar extends EnhancedVLayout { return favorites; }
- public void refresh() { - refreshPluginErrors(); + public void refreshResourceErrors() { + refreshErrors(null); + } + + public void refreshAvailAndResourceErrors() { + CountDownLatch latch = CountDownLatch.create(2, new Command() { + @Override + public void execute() { + loading.setVisible(false); + markForRedraw(); + } + }); + + loading.setVisible(true); + loading.markForRedraw(); + + refreshAvailability(latch); + refreshErrors(latch); }
- private void refreshPluginErrors() { + private void refreshErrors(final CountDownLatch latch) { GWTServiceLookup.getResourceService().findResourceErrors(resourceComposite.getResource().getId(), new AsyncCallback<List<ResourceError>>() { public void onFailure(Throwable caught) { @@ -356,15 +398,59 @@ public class ResourceTitleBar extends EnhancedVLayout { CoreGUI.getErrorHandler().handleError( MSG.dataSource_resourceErrors_error_fetchFailure(String.valueOf(resourceComposite.getResource() .getId())), caught); + + if (latch != null) { + latch.countDown(); + } else { + markForRedraw(); + } }
public void onSuccess(List<ResourceError> result) { pluginErrors.setVisible(!result.isEmpty()); - markForRedraw(); + + if (latch != null) { + latch.countDown(); + } else { + markForRedraw(); + } } }); }
+ private void refreshAvailability(final CountDownLatch latch) { + final AvailabilityType currentAvail = resource.getCurrentAvailability().getAvailabilityType(); + + GWTServiceLookup.getResourceService().getLiveResourceAvailability(resource.getId(), + new AsyncCallback<ResourceAvailability>() { + + @Override + public void onFailure(Throwable caught) { + availabilityImage.setSrc(ImageManager.getAvailabilityLargeIconFromAvailType(currentAvail)); + CoreGUI.getErrorHandler().handleError("I18N: Failed to refresh the availability", caught); + //MSG.dataSource_resourceErrors_error_fetchFailure(String.valueOf(resourceComposite.getResource() + // .getId())), caught); + if (latch != null) { + latch.countDown(); + } else { + markForRedraw(); + } + } + + @Override + public void onSuccess(ResourceAvailability result) { + availabilityImage.setSrc(ImageManager.getAvailabilityLargeIconFromAvailType(result.getAvailabilityType())); + resource.setCurrentAvailability(result); + availabilityImage.markForRedraw(); + if (latch != null) { + latch.countDown(); + } else { + markForRedraw(); + } + } + }); + } + public class UpdateFavoritesCallback implements AsyncCallback<Subject> { public void onSuccess(Subject subject) { String msg = null; @@ -391,4 +477,4 @@ public class ResourceTitleBar extends EnhancedVLayout { } }
-} \ No newline at end of file +} diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/ResourceGWTServiceImpl.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/ResourceGWTServiceImpl.java index dfa1b9f..fbf5d7b 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/ResourceGWTServiceImpl.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/ResourceGWTServiceImpl.java @@ -33,6 +33,7 @@ import org.rhq.core.domain.configuration.Configuration; import org.rhq.core.domain.configuration.definition.ConfigurationDefinition; import org.rhq.core.domain.configuration.definition.ConfigurationTemplate; import org.rhq.core.domain.criteria.ResourceCriteria; +import org.rhq.core.domain.measurement.ResourceAvailability; import org.rhq.core.domain.resource.Agent; import org.rhq.core.domain.resource.CannotConnectToAgentException; import org.rhq.core.domain.resource.CreateResourceHistory; @@ -135,6 +136,15 @@ public class ResourceGWTServiceImpl extends AbstractGWTServiceImpl implements Re } }
+ @Override + public ResourceAvailability getLiveResourceAvailability(int resourceId) throws RuntimeException { + try { + return SerialUtility.prepare(resourceManager.getLiveResourceAvailability(getSessionSubject(), resourceId), "ResourceService.getLiveResourceAvailability"); + } catch (Throwable t) { + throw getExceptionToThrowToClient(t); + } + } + public PageList<Resource> findResourcesByCriteria(ResourceCriteria criteria) throws RuntimeException { try { PageList<Resource> result = resourceManager.findResourcesByCriteria(getSessionSubject(), criteria); diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/alert/test/AlertConditionTest.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/alert/test/AlertConditionTest.java index 3797542..153cc3d 100644 --- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/alert/test/AlertConditionTest.java +++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/alert/test/AlertConditionTest.java @@ -23,8 +23,6 @@ import java.util.HashSet; import java.util.List; import java.util.Set;
-import javax.persistence.NoResultException; - import org.testng.Assert; import org.testng.annotations.Test;
@@ -38,7 +36,6 @@ import org.rhq.core.domain.alert.AlertDampening.Category; import org.rhq.core.domain.alert.AlertDefinition; import org.rhq.core.domain.alert.AlertPriority; import org.rhq.core.domain.alert.BooleanExpression; -import org.rhq.core.domain.cloud.Server; import org.rhq.core.domain.criteria.AlertCriteria; import org.rhq.core.domain.criteria.ResourceCriteria; import org.rhq.core.domain.discovery.AvailabilityReport; @@ -49,7 +46,6 @@ import org.rhq.core.domain.measurement.MeasurementDefinition; import org.rhq.core.domain.measurement.MeasurementReport; import org.rhq.core.domain.measurement.MeasurementSchedule; import org.rhq.core.domain.measurement.MeasurementScheduleRequest; -import org.rhq.core.domain.resource.Agent; import org.rhq.core.domain.resource.Resource; import org.rhq.core.domain.resource.ResourceType; import org.rhq.core.domain.util.PageControl; @@ -69,12 +65,7 @@ import org.rhq.enterprise.server.util.LookupUtil; public class AlertConditionTest extends UpdatePluginMetadataTestBase { private static final boolean ENABLED = true;
- // this must match the constant found in ServerManagerBean - private static final String RHQ_SERVER_NAME_PROPERTY = "rhq.server.high-availability.name"; - private static final String RHQ_SERVER_NAME_PROPERTY_VALUE = "AlertConditionTestServer"; - private Resource resource; - private Server server;
@Override protected String getSubsystemDirectory() { @@ -82,22 +73,12 @@ public class AlertConditionTest extends UpdatePluginMetadataTestBase { }
@Override - protected void beforeMethod() throws Exception { - super.beforeMethod(); - System.setProperty(RHQ_SERVER_NAME_PROPERTY, RHQ_SERVER_NAME_PROPERTY_VALUE); - } - - @Override protected void afterMethod() throws Exception { if (resource != null) { deleteNewResource(resource); resource = null; }
- deleteServerIdentity(); - - System.setProperty(RHQ_SERVER_NAME_PROPERTY, ""); - super.afterMethod(); }
@@ -661,32 +642,4 @@ public class AlertConditionTest extends UpdatePluginMetadataTestBase { LookupUtil.getAlertConditionCacheManager().reloadAllCaches(); }
- private void createServerIdentity() { - server = new Server(); - server.setName(RHQ_SERVER_NAME_PROPERTY_VALUE); - server.setAddress("localhost"); - server.setPort(7080); - server.setSecurePort(7443); - server.setComputePower(1); - server.setOperationMode(Server.OperationMode.MAINTENANCE); - int serverId = LookupUtil.getServerManager().create(server); - assert serverId > 0 : "could not create our server identity in the DB"; - - // simulate the agent being "connected" to the server - try { - Agent agent = getAgent(); - agent.setServer(server); - LookupUtil.getAgentManager().updateAgent(agent); - } catch (NoResultException nre) { - // no agent to attach - } - } - - private void deleteServerIdentity() throws Exception { - if (server != null) { - cleanupAgent(); // can't remove the server before we purge the agent - LookupUtil.getTopologyManager().deleteServer(LookupUtil.getSubjectManager().getOverlord(), server.getId()); - server = null; - } - } } diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/measurement/MeasurementDataManagerBeanTest.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/measurement/MeasurementDataManagerBeanTest.java index 9f49143..dbd03ae 100644 --- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/measurement/MeasurementDataManagerBeanTest.java +++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/measurement/MeasurementDataManagerBeanTest.java @@ -34,8 +34,11 @@ import static org.rhq.test.AssertUtils.assertPropertiesMatch; import java.math.BigDecimal; import java.math.MathContext; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; +import java.util.HashSet; import java.util.List; +import java.util.Set;
import javax.ejb.EJB;
@@ -43,12 +46,30 @@ import com.datastax.driver.core.exceptions.NoHostAvailableException;
import org.joda.time.DateTime; import org.joda.time.Hours; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; import org.testng.annotations.Test;
+import org.rhq.core.clientapi.agent.discovery.DiscoveryAgentService; +import org.rhq.core.clientapi.agent.measurement.MeasurementAgentService; +import org.rhq.core.domain.alert.Alert; +import org.rhq.core.domain.alert.AlertCondition; +import org.rhq.core.domain.alert.AlertConditionCategory; +import org.rhq.core.domain.alert.AlertConditionOperator; +import org.rhq.core.domain.alert.AlertDampening; +import org.rhq.core.domain.alert.AlertDefinition; +import org.rhq.core.domain.alert.AlertPriority; +import org.rhq.core.domain.alert.BooleanExpression; import org.rhq.core.domain.auth.Subject; +import org.rhq.core.domain.cloud.Server; import org.rhq.core.domain.common.EntityContext; +import org.rhq.core.domain.criteria.AlertCriteria; import org.rhq.core.domain.criteria.ResourceCriteria; +import org.rhq.core.domain.measurement.Availability; +import org.rhq.core.domain.measurement.AvailabilityType; import org.rhq.core.domain.measurement.MeasurementAggregate; +import org.rhq.core.domain.measurement.MeasurementData; import org.rhq.core.domain.measurement.MeasurementDataNumeric; import org.rhq.core.domain.measurement.MeasurementDefinition; import org.rhq.core.domain.measurement.MeasurementReport; @@ -56,14 +77,18 @@ import org.rhq.core.domain.measurement.MeasurementSchedule; import org.rhq.core.domain.measurement.MeasurementScheduleRequest; import org.rhq.core.domain.measurement.composite.MeasurementDataNumericHighLowComposite; import org.rhq.core.domain.resource.Agent; +import org.rhq.core.domain.resource.InventoryStatus; import org.rhq.core.domain.resource.Resource; import org.rhq.core.domain.resource.ResourceType; +import org.rhq.enterprise.server.alert.AlertDefinitionManagerLocal; import org.rhq.enterprise.server.auth.SubjectManagerLocal; import org.rhq.enterprise.server.drift.DriftServerPluginService; import org.rhq.enterprise.server.resource.ResourceManagerLocal; import org.rhq.enterprise.server.storage.StorageClientManagerBean; import org.rhq.enterprise.server.test.AbstractEJB3Test; +import org.rhq.enterprise.server.test.TestServerCommunicationsService; import org.rhq.enterprise.server.test.TransactionCallback; +import org.rhq.enterprise.server.util.LookupUtil; import org.rhq.enterprise.server.util.ResourceTreeHelper; import org.rhq.server.metrics.MetricsDAO; import org.rhq.server.metrics.StorageSession; @@ -77,6 +102,10 @@ import org.rhq.test.AssertUtils; */ public class MeasurementDataManagerBeanTest extends AbstractEJB3Test {
+ // this must match the constant found in ServerManagerBean + private static final String RHQ_SERVER_NAME_PROPERTY = "rhq.server.high-availability.name"; + private static final String RHQ_SERVER_NAME_PROPERTY_VALUE = "TestServer"; + //private final Log log = LogFactory.getLog(MeasurementDataManagerBeanTest.class);
private static final boolean ENABLED = true; @@ -101,6 +130,8 @@ public class MeasurementDataManagerBeanTest extends AbstractEJB3Test {
private ResourceType resourceType;
+ private Server server; + private Agent agent;
private MeasurementDefinition dynamicMeasuremenDef; @@ -109,7 +140,7 @@ public class MeasurementDataManagerBeanTest extends AbstractEJB3Test {
private MeasurementSchedule dynamicSchedule;
- private Subject overlord; + private AlertDefinition alertDefinition;
@EJB private SubjectManagerLocal subjectManager; @@ -125,9 +156,17 @@ public class MeasurementDataManagerBeanTest extends AbstractEJB3Test {
private MetricsDAO metricsDAO;
+ private TestServerCommunicationsService agentServiceContainer; + + private Subject getOverlord() { + return subjectManager.getOverlord(); + } + @Override protected void beforeMethod() throws Exception { - overlord = subjectManager.getOverlord(); + agentServiceContainer = prepareForTestAgents(); + + prepareScheduler();
metricsDAO = storageClientManager.getMetricsDAO();
@@ -137,15 +176,21 @@ public class MeasurementDataManagerBeanTest extends AbstractEJB3Test { prepareCustomServerPluginService(driftServerPluginService); driftServerPluginService.masterConfig.getPluginDirectory().mkdirs();
+ System.setProperty(RHQ_SERVER_NAME_PROPERTY, RHQ_SERVER_NAME_PROPERTY_VALUE); + createInventory(); insertDummyReport(); + + agentServiceContainer.addStartedAgent(agent); }
@Override protected void afterMethod() throws Exception { - purgeDB(); + purgeDB(true);
unprepareServerPluginService(); + unprepareScheduler(); + unprepareForTestAgents(); }
@Test(enabled = ENABLED) @@ -168,7 +213,7 @@ public class MeasurementDataManagerBeanTest extends AbstractEJB3Test { dataManager.mergeMeasurementReport(report); waitForRawInserts();
- List<MeasurementDataNumericHighLowComposite> actualData = findDataForContext(overlord, + List<MeasurementDataNumericHighLowComposite> actualData = findDataForContext(getOverlord(), EntityContext.forResource(resource.getId()), dynamicSchedule, beginTime.getMillis(), endTime.getMillis());
assertEquals("Expected to get back 60 data points.", buckets.getNumDataPoints(), actualData.size()); @@ -213,7 +258,7 @@ public class MeasurementDataManagerBeanTest extends AbstractEJB3Test { dataManager.mergeMeasurementReport(report); waitForRawInserts();
- MeasurementAggregate actual = dataManager.getAggregate(overlord, dynamicSchedule.getId(), + MeasurementAggregate actual = dataManager.getAggregate(getOverlord(), dynamicSchedule.getId(), beginTime.getMillis(), endTime.getMillis());
MeasurementAggregate expected = new MeasurementAggregate(1.1, divide((1.1 + 2.2 + 3.3 + 4.4 + 5.5 + 6.6), 6), @@ -244,7 +289,7 @@ public class MeasurementDataManagerBeanTest extends AbstractEJB3Test {
insert1HourData(data);
- List<MeasurementDataNumericHighLowComposite> actualData = findDataForContext(overlord, + List<MeasurementDataNumericHighLowComposite> actualData = findDataForContext(getOverlord(), EntityContext.forResource(resource.getId()), dynamicSchedule, beginTime.getMillis(), endTime.getMillis());
assertEquals("Expected to get back 60 data points.", buckets.getNumDataPoints(), actualData.size()); @@ -260,8 +305,50 @@ public class MeasurementDataManagerBeanTest extends AbstractEJB3Test { actualData.get(59), 0.0001D); }
+ @Test(enabled = ENABLED) + public void gettingLiveDataTriggersAlerts() throws Exception { + agentServiceContainer.measurementService = Mockito.mock(MeasurementAgentService.class); + + Mockito.when(agentServiceContainer.measurementService.getRealTimeMeasurementValue(Mockito.anyInt(), Mockito.anySetOf(MeasurementScheduleRequest.class))).then( + new Answer<Set<MeasurementData>>() { + @Override + @SuppressWarnings("unchecked") + public Set<MeasurementData> answer(InvocationOnMock invocation) throws Throwable { + Set<MeasurementScheduleRequest> requests = (Set<MeasurementScheduleRequest>) invocation.getArguments()[1]; + + Set<MeasurementData> ret = new HashSet<MeasurementData>(); + for(MeasurementScheduleRequest req : requests) { + ret.add(new MeasurementDataNumeric(System.currentTimeMillis(), req, (double) System.nanoTime())); + } + + return ret; + } + }); + + dataManager.findLiveData(getOverlord(), resource.getId(), new int[] { dynamicMeasuremenDef.getId()}, Long.MAX_VALUE); + // wait for our JMS messages to process and see if we get any alerts + Thread.sleep(3000); + + //need to do this so that we don't have to wait on server's heartbeat to propagate the + //collected value into the alert condition cache + LookupUtil.getAlertConditionCacheManager().reloadAllCaches(); + + //this first metric collection doesn't trigger alerts because there's no "history" to compare against + //let's trigger another metric collection so that we see the alert fire... + dataManager.findLiveData(getOverlord(), resource.getId(), new int[] { dynamicMeasuremenDef.getId()}, Long.MAX_VALUE); + // wait for our JMS messages to process and see if we get any alerts + Thread.sleep(3000); + + //check that the alert fired when the value of the measurement changed. + AlertCriteria aCrit = new AlertCriteria(); + aCrit.addFilterResourceIds(resource.getId()); + + List<Alert> alerts = LookupUtil.getAlertManager().findAlertsByCriteria(getOverlord(), aCrit); + assertEquals("Unexpected number of alerts on the resource.", 1, alerts.size()); + } + private void createInventory() throws Exception { - purgeDB(); + purgeDB(false); executeInTransaction(false, new TransactionCallback() { @Override public void execute() throws Exception { @@ -269,7 +356,18 @@ public class MeasurementDataManagerBeanTest extends AbstractEJB3Test { resourceType = new ResourceType(RESOURCE_TYPE, PLUGIN, SERVER, null); em.persist(resourceType);
+ server = new Server(); + server.setName(RHQ_SERVER_NAME_PROPERTY_VALUE); + server.setAddress("localhost"); + server.setPort(7080); + server.setSecurePort(7443); + server.setComputePower(1); + server.setOperationMode(Server.OperationMode.MAINTENANCE); + int serverId = LookupUtil.getServerManager().create(server); + assert serverId > 0 : "could not create our server identity in the DB"; + agent = new Agent(AGENT_NAME, "localhost", 9999, "", "randomToken"); + agent.setServer(server); em.persist(agent);
dynamicMeasuremenDef = new MeasurementDefinition(resourceType, DYNAMIC_DEF_NAME); @@ -281,6 +379,7 @@ public class MeasurementDataManagerBeanTest extends AbstractEJB3Test {
resource = new Resource(RESOURCE_KEY, RESOURCE_NAME, resourceType); resource.setUuid(RESOURCE_UUID); + resource.setInventoryStatus(InventoryStatus.COMMITTED); resource.setAgent(agent);
em.persist(resource); @@ -292,29 +391,65 @@ public class MeasurementDataManagerBeanTest extends AbstractEJB3Test { em.persist(dynamicSchedule); } }); + + alertDefinition = new AlertDefinition(); + AlertCondition cond = new AlertCondition(alertDefinition, AlertConditionCategory.CHANGE); + cond.setName(DYNAMIC_DEF_NAME); + cond.setMeasurementDefinition(dynamicMeasuremenDef); + alertDefinition.setName("liveDataTestAlert"); + alertDefinition.setResource(resource); + alertDefinition.setPriority(AlertPriority.MEDIUM); + alertDefinition.setRecoveryId(0); + alertDefinition.setAlertDampening(new AlertDampening(AlertDampening.Category.NONE)); + alertDefinition.setConditions(Collections.singleton(cond)); + alertDefinition.setEnabled(true); + alertDefinition.setConditionExpression(BooleanExpression.ALL); + + AlertDefinitionManagerLocal alertDefinitionManager = LookupUtil.getAlertDefinitionManager(); + //needs to be done outside of the above transaction, so that the createAlert... method can "see" the resource. + alertDefinitionManager.createAlertDefinitionInNewTransaction(getOverlord(), alertDefinition, resource.getId(), true); + + //obvious, right? This needs to be done for the alert subsystem to become aware of the new def + LookupUtil.getAlertConditionCacheManager().reloadAllCaches(); }
- private void purgeDB() { + private void purgeDB(final boolean assumeResourceExists) { purgeMetricsTables();
+ ResourceCriteria c = new ResourceCriteria(); + c.addFilterInventoryStatus(null); + c.addFilterResourceKey(RESOURCE_KEY); + c.fetchSchedules(true); + c.fetchAlertDefinitions(true); + + final List<Resource> r = resourceManager.findResourcesByCriteria(subjectManager.getOverlord(), c); + if (assumeResourceExists && !r.isEmpty()) { + assertTrue("Should be only 1 resource", r.size() == 1); + } + + if (!r.isEmpty()) { + Resource doomedResource = r.get(0); + deleteAlertDefinitions(doomedResource.getAlertDefinitions()); + } + executeInTransaction(false, new TransactionCallback() { @Override public void execute() throws Exception { - ResourceCriteria c = new ResourceCriteria(); - c.addFilterInventoryStatus(null); - c.addFilterResourceKey(RESOURCE_KEY); - c.fetchSchedules(true); - List<Resource> r = resourceManager.findResourcesByCriteria(subjectManager.getOverlord(), c); - - // Note that the order of deletes is important due to FK - // constraints. + if (!r.isEmpty()) { - assertTrue("Should be only 1 resource", r.size() == 1); - Resource doomedResource = r.get(0); - deleteMeasurementSchedules(doomedResource); - deleteResource(doomedResource); + //load the resource entity again within this transaction so that we + //have an attached copy of it. + Resource delete = em.find(Resource.class, r.get(0).getId()); + + + // Note that the order of deletes is important due to FK + // constraints. + deleteMeasurementSchedules(delete); + deleteResource(delete); } + deleteAgent(); + deleteServer(); deleteDynamicMeasurementDef(); deleteResourceType(); } @@ -330,6 +465,10 @@ public class MeasurementDataManagerBeanTest extends AbstractEJB3Test { em.createQuery("delete from Agent where name = :name").setParameter("name", AGENT_NAME).executeUpdate(); }
+ private void deleteServer() { + em.createQuery("delete from Server where name = :name").setParameter("name", RHQ_SERVER_NAME_PROPERTY_VALUE).executeUpdate(); + } + private void deleteResourceType() { em.createQuery("delete from ResourceType where name = :name and plugin = :plugin") .setParameter("name", RESOURCE_TYPE).setParameter("plugin", PLUGIN).executeUpdate(); @@ -349,6 +488,26 @@ public class MeasurementDataManagerBeanTest extends AbstractEJB3Test { } }
+ private void deleteAlertDefinitions(Collection<AlertDefinition> defs) { + AlertDefinitionManagerLocal alertDefinitionManager = LookupUtil.getAlertDefinitionManager(); + + int[] ids = new int[defs.size()]; + int i = 0; + for(AlertDefinition def : defs) { + ids[i++] = def.getId(); + + LookupUtil.getAlertManager() + .deleteAlertsByContext(getOverlord(), EntityContext.forResource(def.getResource().getId())); + } + + alertDefinitionManager.removeAlertDefinitions(getOverlord(), ids); + + alertDefinitionManager.purgeUnusedAlertDefinitions(); + for(i = 0; i < ids.length; ++i) { + alertDefinitionManager.purgeInternals(ids[i]); + } + } + private void insertDummyReport() { // we insert the dummy report due to https://bugzilla.redhat.com/show_bug.cgi?id=822240 DateTime now = new DateTime(); diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/resource/metadata/test/UpdatePluginMetadataTestBase.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/resource/metadata/test/UpdatePluginMetadataTestBase.java index ac4ba79..abdc281 100644 --- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/resource/metadata/test/UpdatePluginMetadataTestBase.java +++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/resource/metadata/test/UpdatePluginMetadataTestBase.java @@ -38,6 +38,7 @@ import org.rhq.core.clientapi.agent.measurement.MeasurementAgentService; import org.rhq.core.clientapi.descriptor.DescriptorPackages; import org.rhq.core.clientapi.descriptor.plugin.PluginDescriptor; import org.rhq.core.domain.auth.Subject; +import org.rhq.core.domain.cloud.Server; import org.rhq.core.domain.criteria.ResourceCriteria; import org.rhq.core.domain.criteria.ResourceTypeCriteria; import org.rhq.core.domain.measurement.MeasurementData; @@ -61,6 +62,10 @@ import org.rhq.enterprise.server.util.LookupUtil;
public class UpdatePluginMetadataTestBase extends AbstractEJB3Test {
+ // this must match the constant found in ServerManagerBean + protected static final String RHQ_SERVER_NAME_PROPERTY = "rhq.server.high-availability.name"; + protected static final String RHQ_SERVER_NAME_PROPERTY_VALUE = "TestServer"; + protected TestServerCommunicationsService agentServiceContainer;
protected static final String PLUGIN_NAME = "UpdatePluginMetadataTestBasePlugin"; // don't change this - our test descriptor .xml files use it as plugin name @@ -70,6 +75,7 @@ public class UpdatePluginMetadataTestBase extends AbstractEJB3Test { protected static PluginManagerLocal pluginMgr; protected static ResourceTypeManagerLocal resourceTypeManager; protected static ResourceManagerLocal resourceManager; + private Server server;
@Override protected void beforeMethod() throws Exception { @@ -86,6 +92,8 @@ public class UpdatePluginMetadataTestBase extends AbstractEJB3Test { pluginMgr = LookupUtil.getPluginManager(); resourceTypeManager = LookupUtil.getResourceTypeManager(); resourceManager = LookupUtil.getResourceManager(); + + System.setProperty(RHQ_SERVER_NAME_PROPERTY, RHQ_SERVER_NAME_PROPERTY_VALUE); }
@Override @@ -96,6 +104,10 @@ public class UpdatePluginMetadataTestBase extends AbstractEJB3Test { unpreparePluginScannerService(); unprepareScheduler(); unprepareForTestAgents(); + + deleteServerIdentity(); + + System.setProperty(RHQ_SERVER_NAME_PROPERTY, ""); }
protected void prepareMockAgentServiceContainer() { @@ -261,6 +273,35 @@ public class UpdatePluginMetadataTestBase extends AbstractEJB3Test { agentServiceContainer.addStartedAgent(agent); }
+ protected void createServerIdentity() { + server = new Server(); + server.setName(RHQ_SERVER_NAME_PROPERTY_VALUE); + server.setAddress("localhost"); + server.setPort(7080); + server.setSecurePort(7443); + server.setComputePower(1); + server.setOperationMode(Server.OperationMode.MAINTENANCE); + int serverId = LookupUtil.getServerManager().create(server); + assert serverId > 0 : "could not create our server identity in the DB"; + + // simulate the agent being "connected" to the server + try { + Agent agent = getAgent(); + agent.setServer(server); + LookupUtil.getAgentManager().updateAgent(agent); + } catch (NoResultException nre) { + // no agent to attach + } + } + + protected void deleteServerIdentity() throws Exception { + if (server != null) { + cleanupAgent(); // can't remove the server before we purge the agent + LookupUtil.getTopologyManager().deleteServer(LookupUtil.getSubjectManager().getOverlord(), server.getId()); + server = null; + } + } + /** * A dummy that needs to be set up before running ResourceManager.deleteResource() */ diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/resource/test/ResourceManagerBeanTest.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/resource/test/ResourceManagerBeanTest.java index 1fbeb3c..660afed 100644 --- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/resource/test/ResourceManagerBeanTest.java +++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/resource/test/ResourceManagerBeanTest.java @@ -18,17 +18,40 @@ */ package org.rhq.enterprise.server.resource.test;
+import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.Random; import java.util.UUID;
import javax.transaction.NotSupportedException; import javax.transaction.SystemException;
+import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; import org.testng.annotations.Test;
+import org.jboss.remoting.InvokerLocator; + +import org.rhq.core.clientapi.agent.discovery.DiscoveryAgentService; +import org.rhq.core.domain.alert.Alert; +import org.rhq.core.domain.alert.AlertCondition; +import org.rhq.core.domain.alert.AlertConditionCategory; +import org.rhq.core.domain.alert.AlertConditionOperator; +import org.rhq.core.domain.alert.AlertDampening; +import org.rhq.core.domain.alert.AlertDefinition; +import org.rhq.core.domain.alert.AlertPriority; +import org.rhq.core.domain.alert.BooleanExpression; import org.rhq.core.domain.auth.Subject; +import org.rhq.core.domain.criteria.AlertCriteria; +import org.rhq.core.domain.criteria.AlertDefinitionCriteria; +import org.rhq.core.domain.criteria.ResourceCriteria; import org.rhq.core.domain.criteria.ResourceGroupCriteria; +import org.rhq.core.domain.measurement.Availability; +import org.rhq.core.domain.measurement.AvailabilityType; +import org.rhq.core.domain.measurement.MeasurementDefinition; +import org.rhq.core.domain.measurement.ResourceAvailability; import org.rhq.core.domain.resource.Agent; import org.rhq.core.domain.resource.InventoryStatus; import org.rhq.core.domain.resource.Resource; @@ -39,8 +62,13 @@ import org.rhq.core.domain.resource.ResourceType; import org.rhq.core.domain.resource.group.GroupCategory; import org.rhq.core.domain.resource.group.ResourceGroup; import org.rhq.core.domain.util.PageList; +import org.rhq.enterprise.communications.ServiceContainer; +import org.rhq.enterprise.communications.command.server.CommandProcessorMetrics; +import org.rhq.enterprise.server.agentclient.AgentClient; +import org.rhq.enterprise.server.alert.AlertDefinitionManagerLocal; import org.rhq.enterprise.server.auth.SessionManager; import org.rhq.enterprise.server.auth.SessionNotFoundException; +import org.rhq.enterprise.server.core.comm.ServerConfiguration; import org.rhq.enterprise.server.discovery.DiscoveryServerServiceImpl; import org.rhq.enterprise.server.operation.OperationDefinitionNotFoundException; import org.rhq.enterprise.server.resource.ResourceManagerLocal; @@ -51,6 +79,7 @@ import org.rhq.enterprise.server.resource.group.ResourceGroupNotFoundException; import org.rhq.enterprise.server.resource.group.definition.exception.GroupDefinitionNotFoundException; import org.rhq.enterprise.server.resource.metadata.test.UpdatePluginMetadataTestBase; import org.rhq.enterprise.server.test.TestServerCommunicationsService; +import org.rhq.enterprise.server.test.TestServerCommunicationsServiceMBean; import org.rhq.enterprise.server.util.LookupUtil;
/** @@ -58,39 +87,25 @@ import org.rhq.enterprise.server.util.LookupUtil; */ @Test public class ResourceManagerBeanTest extends UpdatePluginMetadataTestBase { - private Subject overlord; private Resource newResource; private ResourceGroup newGroup; private ResourceGroupManagerLocal groupManager;
- TestServerCommunicationsService agentServiceContainer; - @Override protected void beforeMethod() throws Exception { super.beforeMethod();
- overlord = LookupUtil.getSubjectManager().getOverlord(); newResource = createNewResourceWithNewType(); groupManager = LookupUtil.getResourceGroupManager(); newGroup = createNewGroup(); }
- @Override - protected void afterMethod() throws Exception { - if (newGroup != null) { - groupManager.deleteResourceGroup(overlord, newGroup.getId()); - } - deleteNewResourceAgentResourceType(newResource); - - super.afterMethod(); - } - public void testResourceErrors() { ResourceError error; List<ResourceError> errors; DiscoveryServerServiceImpl serverService = new DiscoveryServerServiceImpl();
- errors = resourceManager.findResourceErrors(overlord, newResource.getId(), + errors = resourceManager.findResourceErrors(getOverlord(), newResource.getId(), ResourceErrorType.INVALID_PLUGIN_CONFIGURATION); assert errors.size() == 0;
@@ -100,7 +115,7 @@ public class ResourceManagerBeanTest extends UpdatePluginMetadataTestBase { // simulate the agent notifying the server about an error // this will exercise the addResourceError in the SLSB serverService.setResourceError(error); - errors = resourceManager.findResourceErrors(overlord, newResource.getId(), + errors = resourceManager.findResourceErrors(getOverlord(), newResource.getId(), ResourceErrorType.INVALID_PLUGIN_CONFIGURATION); assert errors.size() == 1; error = errors.get(0); @@ -118,7 +133,7 @@ public class ResourceManagerBeanTest extends UpdatePluginMetadataTestBase { error.setSummary("another summary"); error.setDetail("another detail"); serverService.setResourceError(error); - errors = resourceManager.findResourceErrors(overlord, newResource.getId(), + errors = resourceManager.findResourceErrors(getOverlord(), newResource.getId(), ResourceErrorType.INVALID_PLUGIN_CONFIGURATION); assert errors.size() == 1; error = errors.get(0); @@ -128,12 +143,22 @@ public class ResourceManagerBeanTest extends UpdatePluginMetadataTestBase { assert error.getErrorType() == ResourceErrorType.INVALID_PLUGIN_CONFIGURATION; assert error.getTimeOccurred() == 567890;
- resourceManager.deleteResourceError(overlord, error.getId()); - errors = resourceManager.findResourceErrors(overlord, newResource.getId(), + resourceManager.deleteResourceError(getOverlord(), error.getId()); + errors = resourceManager.findResourceErrors(getOverlord(), newResource.getId(), ResourceErrorType.INVALID_PLUGIN_CONFIGURATION); assert errors.size() == 0; }
+ @Override + protected void afterMethod() throws Exception { + if (newGroup != null) { + groupManager.deleteResourceGroup(getOverlord(), newGroup.getId()); + } + deleteNewResourceAgentResourceType(newResource); + + super.afterMethod(); + } + public void testResourceLineage() throws Exception { // given a resource id for the leaf resource in a resource hierarchy int leafResourceId = givenASampleResourceHierarchy(); @@ -162,7 +187,7 @@ public class ResourceManagerBeanTest extends UpdatePluginMetadataTestBase { // Make sure our application exceptions are not wrapped public void bz886850Test() { try { - resourceManager.getResourceById(overlord, 2637426); + resourceManager.getResourceById(getOverlord(), 2637426); fail("Should have thrown a ResourceNotFoundException"); } catch (Throwable t) { if (!(t instanceof ResourceNotFoundException)) { @@ -178,7 +203,7 @@ public class ResourceManagerBeanTest extends UpdatePluginMetadataTestBase { } } try { - LookupUtil.getOperationManager().getOperationDefinition(overlord, 3456347); + LookupUtil.getOperationManager().getOperationDefinition(getOverlord(), 3456347); fail("Should have thrown a OperationDefinitionNotFoundException"); } catch (Throwable t) { if (!(t instanceof OperationDefinitionNotFoundException)) { @@ -186,7 +211,7 @@ public class ResourceManagerBeanTest extends UpdatePluginMetadataTestBase { } } try { - LookupUtil.getResourceTypeManager().getResourceTypeById(overlord, 3456347); + LookupUtil.getResourceTypeManager().getResourceTypeById(getOverlord(), 3456347); fail("Should have thrown a ResourceTypeNotFoundException"); } catch (Throwable t) { if (!(t instanceof ResourceTypeNotFoundException)) { @@ -194,7 +219,7 @@ public class ResourceManagerBeanTest extends UpdatePluginMetadataTestBase { } } try { - LookupUtil.getResourceGroupManager().getResourceGroup(overlord, 3456347); + LookupUtil.getResourceGroupManager().getResourceGroup(getOverlord(), 3456347); fail("Should have thrown a ResourceGroupNotFoundException"); } catch (Throwable t) { if (!(t instanceof ResourceGroupNotFoundException)) { @@ -215,7 +240,7 @@ public class ResourceManagerBeanTest extends UpdatePluginMetadataTestBase { ResourceGroupCriteria criteria = new ResourceGroupCriteria(); criteria.addFilterId(newGroup.getId()); criteria.fetchExplicitResources(true); - PageList<ResourceGroup> persistedGroups = groupManager.findResourceGroupsByCriteria(overlord, criteria); + PageList<ResourceGroup> persistedGroups = groupManager.findResourceGroupsByCriteria(getOverlord(), criteria); assertEquals("There should be just one group with id " + newGroup.getId(), 1, persistedGroups.size());
@@ -226,8 +251,8 @@ public class ResourceManagerBeanTest extends UpdatePluginMetadataTestBase { .getExplicitResources().size());
// add resource to group - groupManager.addResourcesToGroup(overlord, newGroup.getId(), new int[] { newResource.getId() }); - persistedGroups = groupManager.findResourceGroupsByCriteria(overlord, criteria); + groupManager.addResourcesToGroup(getOverlord(), newGroup.getId(), new int[] { newResource.getId() }); + persistedGroups = groupManager.findResourceGroupsByCriteria(getOverlord(), criteria); assertEquals("There should be one member in the newly created group.", 1, persistedGroups.get(0).getExplicitResources() .size()); } @@ -237,41 +262,110 @@ public class ResourceManagerBeanTest extends UpdatePluginMetadataTestBase { ResourceGroupCriteria criteria = new ResourceGroupCriteria(); criteria.addFilterId(newGroup.getId()); criteria.fetchExplicitResources(true); - PageList<ResourceGroup> persistedGroups = groupManager.findResourceGroupsByCriteria(overlord, criteria); + PageList<ResourceGroup> persistedGroups = groupManager.findResourceGroupsByCriteria(getOverlord(), criteria); assertEquals("There should be just one group with id " + newGroup.getId(), 1, persistedGroups.size()); assertEquals("An empty group is considered as MIXED.", GroupCategory.MIXED, persistedGroups.get(0) .getGroupCategory());
// add resource to group - groupManager.addResourcesToGroup(overlord, persistedGroups.get(0).getId(), new int[] { newResource.getId() }); - persistedGroups = groupManager.findResourceGroupsByCriteria(overlord, criteria); + groupManager.addResourcesToGroup(getOverlord(), persistedGroups.get(0).getId(), new int[] { newResource.getId() }); + persistedGroups = groupManager.findResourceGroupsByCriteria(getOverlord(), criteria); assertEquals("A group with just one explicit member is considered as COMPATIBLE.", GroupCategory.COMPATIBLE, persistedGroups.get(0).getGroupCategory());
// now uninventorize the only resource - resourceManager.uninventoryResource(overlord, newResource.getId()); - persistedGroups = groupManager.findResourceGroupsByCriteria(overlord, criteria); + resourceManager.uninventoryResource(getOverlord(), newResource.getId()); + persistedGroups = groupManager.findResourceGroupsByCriteria(getOverlord(), criteria); assertEquals("An empty group is considered as MIXED.", GroupCategory.MIXED, persistedGroups.get(0) .getGroupCategory()); }
public void testResourceRemovalFromGroup() { - ResourceGroup persistedGroup = groupManager.getResourceGroup(overlord, newGroup.getId()); + ResourceGroup persistedGroup = groupManager.getResourceGroup(getOverlord(), newGroup.getId()); assertEquals("An empty group is considered as MIXED.", GroupCategory.MIXED, persistedGroup.getGroupCategory());
// add resource to group - groupManager.addResourcesToGroup(overlord, persistedGroup.getId(), new int[] { newResource.getId() }); - persistedGroup = groupManager.getResourceGroup(overlord, newGroup.getId()); + groupManager.addResourcesToGroup(getOverlord(), persistedGroup.getId(), new int[] { newResource.getId() }); + persistedGroup = groupManager.getResourceGroup(getOverlord(), newGroup.getId()); assertEquals("A group with just one explicit member is considered as COMPATIBLE.", GroupCategory.COMPATIBLE, persistedGroup.getGroupCategory());
// now remove the only resource from the group - groupManager.removeResourcesFromGroup(overlord, persistedGroup.getId(), new int[] { newResource.getId() }); - persistedGroup = groupManager.getResourceGroup(overlord, newGroup.getId()); + groupManager.removeResourcesFromGroup(getOverlord(), persistedGroup.getId(), new int[] { newResource.getId() }); + persistedGroup = groupManager.getResourceGroup(getOverlord(), newGroup.getId()); assertEquals("An empty group is considered as MIXED.", GroupCategory.MIXED, persistedGroup.getGroupCategory()); }
+ public void testLiveAvailability() throws Exception { + agentServiceContainer.discoveryService = Mockito.mock(DiscoveryAgentService.class); + + Mockito.when(agentServiceContainer.discoveryService.getCurrentAvailability(Mockito.any(Resource.class))).then( + new Answer<Availability>() { + int count = 0; + + @Override + public Availability answer(InvocationOnMock invocation) throws Throwable { + Resource res = (Resource) invocation.getArguments()[0]; + AvailabilityType avail = count++ == 0 ? AvailabilityType.DOWN : AvailabilityType.UP; + return new Availability(res, avail); + } + }); + + AlertDefinition alertDef = new AlertDefinition(); + + AlertCondition cond = new AlertCondition(alertDef, AlertConditionCategory.AVAILABILITY); + cond.setName(AlertConditionOperator.AVAIL_GOES_UP.name()); + alertDef.setName("liveAvailabilityTestAlert"); + alertDef.setResource(newResource); + alertDef.setPriority(AlertPriority.MEDIUM); + alertDef.setRecoveryId(0); + alertDef.setAlertDampening(new AlertDampening(AlertDampening.Category.NONE)); + alertDef.setConditions(Collections.singleton(cond)); + alertDef.setEnabled(true); + alertDef.setConditionExpression(BooleanExpression.ALL); + + AlertDefinitionManagerLocal alertDefinitionManager = LookupUtil.getAlertDefinitionManager(); + alertDefinitionManager.createAlertDefinitionInNewTransaction(getOverlord(), alertDef, newResource.getId(), true); + + //obvious, right? This needs to be done for the alert subsystem to become aware of the new def + LookupUtil.getAlertConditionCacheManager().reloadAllCaches(); + + ResourceCriteria crit = new ResourceCriteria(); + crit.addFilterId(newResource.getId()); + crit.fetchCurrentAvailability(true); + Resource fromDb = resourceManager.findResourcesByCriteria(getOverlord(), crit).get(0); + + assertEquals(AvailabilityType.UNKNOWN, fromDb.getCurrentAvailability().getAvailabilityType()); + + //ask for the live avail - the mock agent response will return "DOWN" the first time + resourceManager.getLiveResourceAvailability(getOverlord(), newResource.getId()); + + //check that the resource changed its avail in the db + fromDb = resourceManager.getResource(getOverlord(), newResource.getId()); + assertEquals(AvailabilityType.DOWN, fromDb.getCurrentAvailability().getAvailabilityType()); + + //ask for the live avail - the mock agent response will return "UP" the second time + resourceManager.getLiveResourceAvailability(getOverlord(), newResource.getId()); + + // wait for our JMS messages to process and see if we get any alerts + Thread.sleep(3000); + + //check that the resource changed its avail in the db + fromDb = resourceManager.getResource(getOverlord(), newResource.getId()); + assertEquals(AvailabilityType.UP, fromDb.getCurrentAvailability().getAvailabilityType()); + + // wait for our JMS messages to process and see if we get any alerts + Thread.sleep(3000); + + //check that the alert fired when going from DOWN to UP + AlertCriteria aCrit = new AlertCriteria(); + aCrit.addFilterResourceIds(newResource.getId()); + + List<Alert> alerts = LookupUtil.getAlertManager().findAlertsByCriteria(getOverlord(), aCrit); + assertEquals("Unexpected number of alerts on the resource.", 1, alerts.size()); + } + private int givenASampleResourceHierarchy() throws NotSupportedException, SystemException { getTransactionManager().begin();
@@ -339,15 +433,14 @@ public class ResourceManagerBeanTest extends UpdatePluginMetadataTestBase {
em.persist(resourceType);
- Agent agent = new Agent("testagent", "testaddress", 16163, "", "testtoken"); - em.persist(agent); - em.flush(); - resource = new Resource("reskey" + System.currentTimeMillis(), "resname", resourceType); + setUpAgent(resource); + resource.setUuid("" + new Random().nextInt()); - resource.setAgent(agent); resource.setInventoryStatus(InventoryStatus.COMMITTED); em.persist(resource); + + createServerIdentity(); } catch (Exception e) { System.out.println("CANNOT PREPARE TEST: " + e); getTransactionManager().rollback(); @@ -361,7 +454,7 @@ public class ResourceManagerBeanTest extends UpdatePluginMetadataTestBase {
private ResourceGroup createNewGroup() { ResourceGroup group = new ResourceGroup("testGroup"); - groupManager.createResourceGroup(overlord, group); + groupManager.createResourceGroup(getOverlord(), group); return group; }
@@ -372,9 +465,9 @@ public class ResourceManagerBeanTest extends UpdatePluginMetadataTestBase { try { Resource res = em.find(Resource.class, resource.getId()); System.out.println("Removing " + res + "..."); - List<Integer> deletedIds = resourceManager.uninventoryResource(overlord, res.getId()); + List<Integer> deletedIds = resourceManager.uninventoryResource(getOverlord(), res.getId()); for (Integer deletedResourceId : deletedIds) { - resourceManager.uninventoryResourceAsyncWork(overlord, deletedResourceId); + resourceManager.uninventoryResourceAsyncWork(getOverlord(), deletedResourceId); } em.flush();
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/measurement/MeasurementDataManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/measurement/MeasurementDataManagerBean.java index 09133ae..3c1232d 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/measurement/MeasurementDataManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/measurement/MeasurementDataManagerBean.java @@ -832,6 +832,11 @@ public class MeasurementDataManagerBean implements MeasurementDataManagerLocal, } }
+ if (result != null && !result.isEmpty()) { + //we just got data from the agent so let's push them through the alerting + pushToAlertSubsystem(result); + } + //[BZ 760139] always return non-null value even when there are errors on the server side. Avoids cryptic // Global UI Exceptions when attempting to serialize null responses. if (null == result) { @@ -881,6 +886,13 @@ public class MeasurementDataManagerBean implements MeasurementDataManagerLocal, } } } + + if (values != null && !values.isEmpty()) { + //we just got data from the agent so let's push them through the alerting + pushToAlertSubsystem(values); + } + + return values; }
@@ -977,4 +989,17 @@ public class MeasurementDataManagerBean implements MeasurementDataManagerLocal, private MeasurementDataManagerUtility getConnectedUtilityInstance() { return MeasurementDataManagerUtility.getInstance(rhqDs); } + + private void pushToAlertSubsystem(Set<MeasurementData> data) { + MeasurementReport fakeReport = new MeasurementReport(); + for(MeasurementData datum : data) { + if (datum instanceof MeasurementDataTrait) { + fakeReport.addData((MeasurementDataTrait) datum); + } else if (datum instanceof MeasurementDataNumeric) { + fakeReport.addData((MeasurementDataNumeric) datum); + } + } + + this.measurementDataManager.mergeMeasurementReport(fakeReport); + } } diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/ResourceManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/ResourceManagerBean.java index 0aa09a4..b92548f 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/ResourceManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/ResourceManagerBean.java @@ -128,6 +128,7 @@ import org.rhq.enterprise.server.authz.PermissionException; import org.rhq.enterprise.server.authz.RequiredPermission; import org.rhq.enterprise.server.core.AgentManagerLocal; import org.rhq.enterprise.server.discovery.DiscoveryServerServiceImpl; +import org.rhq.enterprise.server.measurement.AvailabilityManagerLocal; import org.rhq.enterprise.server.measurement.MeasurementScheduleManagerLocal; import org.rhq.enterprise.server.resource.disambiguation.DisambiguationUpdateStrategy; import org.rhq.enterprise.server.resource.disambiguation.Disambiguator; @@ -172,6 +173,8 @@ public class ResourceManagerBean implements ResourceManagerLocal, ResourceManage @EJB //@IgnoreDependency private MeasurementScheduleManagerLocal measurementScheduleManager; + @EJB + private AvailabilityManagerLocal availabilityManager;
public void createResource(Subject user, Resource resource, int parentId) throws ResourceAlreadyExistsException { Resource parent = null; @@ -2436,18 +2439,19 @@ public class ResourceManagerBean implements ResourceManagerLocal, ResourceManage return getResourceById(subject, resourceId); }
+ @Override + @TransactionAttribute(TransactionAttributeType.NEVER) public ResourceAvailability getLiveResourceAvailability(Subject subject, int resourceId) { Resource res = getResourceById(subject, resourceId); ResourceAvailability results = new ResourceAvailability(res, AvailabilityType.UNKNOWN);
try { - Agent agent = res.getAgent(); - if (agent == null) { + // first, quickly see if we can even ping the agent, if not, don't bother trying to get the resource avail + AgentClient client = agentManager.getAgentClient(subject, res.getId()); + if (client == null) { throw new IllegalStateException("No agent is associated with the resource with id [" + resourceId + "]"); }
- // first, quickly see if we can even ping the agent, if not, don't bother trying to get the resource avail - AgentClient client = agentManager.getAgentClient(agent); boolean agentPing = client.ping(5000L); if (agentPing) { // we can't serialize the resource due to the hibernate proxies (agent can't deserialize hibernate objs) @@ -2458,10 +2462,17 @@ public class ResourceManagerBean implements ResourceManagerLocal, ResourceManage Availability avail = client.getDiscoveryAgentService().getCurrentAvailability(bareResource); if (avail != null) { results.setAvailabilityType(avail.getAvailabilityType()); + + AvailabilityReport report = new AvailabilityReport(true, client.getAgent().getName()); + avail.setStartTime(System.currentTimeMillis()); + report.addAvailability(avail); + availabilityManager.mergeAvailabilityReport(report); } - entityManager.flush(); } - } catch (Throwable ignore) { + } catch (Exception e) { + if (log.isInfoEnabled()) { + log.info("Failed to get live availability.", e); + } }
return results;
commit f1cc58b4e1f132a38c2c0a9855fc5778cc87812e Author: Heiko W. Rupp hwr@redhat.com Date: Thu Aug 15 10:41:28 2013 +0200
Get dbupgrade to work again.
diff --git a/modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml b/modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml index 9bfdb46..ddc5de9 100644 --- a/modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml +++ b/modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml @@ -1291,7 +1291,7 @@ ALTER TABLE RHQ_OPERATION_SCHEDULE ADD PRIMARY KEY (ID) </statement> </schema-directSQL> - <schema-directSQL ignoreError="true"> + <schema-directSQL ignoreError="true"> <statement desc="Changing primary key of RHQ_OPERATION_SCHEDULE to ID. Step3/3..."> CREATE UNIQUE INDEX RHQ_OPERATION_SCHEDULE_KEY_IDX ON RHQ_OPERATION_SCHEDULE (JOB_NAME, JOB_GROUP) </statement> @@ -1373,7 +1373,7 @@ <schema-directSQL> <statement> INSERT INTO RHQ_PERMISSION (role_id, operation) - SELECT role_id, 15 FROM RHQ_PERMISSION WHERE operation = 1 AND NOT role_id = 2 + SELECT role_id, 15 FROM RHQ_PERMISSION WHERE operation = 1 AND NOT role_id = 2 </statement> </schema-directSQL> </schemaSpec> @@ -1469,11 +1469,11 @@ <schema-addColumn table="RHQ_DRIFT_CONFIG" column="NAME" columnType="VARCHAR2" precision="512" /> <schema-alterColumn table="RHQ_DRIFT_CONFIG" column="NAME" nullable="FALSE" /> <schema-addColumn table="RHQ_DRIFT_CONFIG" column="INTERVAL" columnType="LONG" /> - <schema-alterColumn table="RHQ_DRIFT_CONFIG" column="INTERVAL" nullable="FALSE" /> + <schema-alterColumn table="RHQ_DRIFT_CONFIG" column="INTERVAL" nullable="FALSE" /> <schema-addColumn table="RHQ_DRIFT_CONFIG" column="IS_ENABLED" columnType="BOOLEAN" /> <schema-alterColumn table="RHQ_DRIFT_CONFIG" column="IS_ENABLED" nullable="FALSE" /> <schema-addColumn table="RHQ_DRIFT_CONFIG" column="CONFIG_ID" columnType="INTEGER" /> - <schema-alterColumn table="RHQ_DRIFT_CONFIG" column="CONFIG_ID" nullable="FALSE" /> + <schema-alterColumn table="RHQ_DRIFT_CONFIG" column="CONFIG_ID" nullable="FALSE" /> <schema-directSQL> <statement desc="Creating RHQ_DRIFT_CONFIG foreign key relation to RHQ_CONFIG"> ALTER TABLE RHQ_DRIFT_CONFIG @@ -1482,7 +1482,7 @@ REFERENCES RHQ_CONFIG (ID) </statement> </schema-directSQL> - <schema-addColumn table="RHQ_DRIFT_CONFIG" column="RESOURCE_ID" columnType="INTEGER" /> + <schema-addColumn table="RHQ_DRIFT_CONFIG" column="RESOURCE_ID" columnType="INTEGER" /> <schema-directSQL> <statement desc="Creating RHQ_DRIFT_CONFIG foreign key relation to RHQ_RESOURCE"> ALTER TABLE RHQ_DRIFT_CONFIG @@ -1492,7 +1492,7 @@ </statement> </schema-directSQL>
- + <!-- RHQ_DRIFT_CHANGE_SET --> <schema-createSequence name="RHQ_DRIFT_CHANGE_SET_ID_SEQ" initial="10001" /> <schema-directSQL> @@ -1507,7 +1507,7 @@ <schema-addColumn table="RHQ_DRIFT_CHANGE_SET" column="CATEGORY" columnType="VARCHAR2" precision="20" /> <schema-alterColumn table="RHQ_DRIFT_CHANGE_SET" column="CATEGORY" nullable="FALSE" /> <schema-addColumn table="RHQ_DRIFT_CHANGE_SET" column="RESOURCE_ID" columnType="INTEGER" /> - <schema-alterColumn table="RHQ_DRIFT_CHANGE_SET" column="RESOURCE_ID" nullable="FALSE" /> + <schema-alterColumn table="RHQ_DRIFT_CHANGE_SET" column="RESOURCE_ID" nullable="FALSE" /> <schema-directSQL> <statement desc="Creating RHQ_DRIFT_CHANGE_SET foreign key relation to RHQ_RESOURCE"> ALTER TABLE RHQ_DRIFT_CHANGE_SET @@ -1515,9 +1515,9 @@ FOREIGN KEY (RESOURCE_ID) REFERENCES RHQ_RESOURCE (ID) </statement> - </schema-directSQL> + </schema-directSQL> <schema-addColumn table="RHQ_DRIFT_CHANGE_SET" column="DRIFT_CONFIG_ID" columnType="INTEGER" /> - <schema-alterColumn table="RHQ_DRIFT_CHANGE_SET" column="DRIFT_CONFIG_ID" nullable="FALSE" /> + <schema-alterColumn table="RHQ_DRIFT_CHANGE_SET" column="DRIFT_CONFIG_ID" nullable="FALSE" /> <schema-directSQL> <statement desc="Creating RHQ_DRIFT_CHANGE_SET foreign key relation to RHQ_DRIFT_CONFIG"> ALTER TABLE RHQ_DRIFT_CHANGE_SET @@ -1631,10 +1631,10 @@ <!-- RHQ 4.2 RELEASE uses DB Schema 2.114 Note, the 4.1 to 4.2 upgrade is broken because a 4.2 dbsetup actually creates a 2.115 schema. So, for 4.2 dbs to upgrade to 4.3 the schema version must first be corrected and set the schema - version to 2.115. + version to 2.115. -->
- <schemaSpec version="2.115"> + <schemaSpec version="2.115"> <!-- RHQ_DRIFT_DEF_TEMPLATE --> <schema-createSequence name="RHQ_DRIFT_DEF_TEMPLATE_ID_SEQ" initial="10001" /> <schema-directSQL> @@ -1654,7 +1654,7 @@ <schema-addColumn table="RHQ_DRIFT_DEF_TEMPLATE" column="CTIME" columnType="LONG" /> <schema-alterColumn table="RHQ_DRIFT_DEF_TEMPLATE" column="CTIME" nullable="FALSE" /> <schema-addColumn table="RHQ_DRIFT_DEF_TEMPLATE" column="IS_USER_DEFINED" columnType="BOOLEAN" /> - <schema-alterColumn table="RHQ_DRIFT_DEF_TEMPLATE" column="IS_USER_DEFINED" nullable="FALSE" /> + <schema-alterColumn table="RHQ_DRIFT_DEF_TEMPLATE" column="IS_USER_DEFINED" nullable="FALSE" /> <schema-directSQL> <statement desc="Creating RHQ_DRIFT_DEF_TEMPLATE foreign key to RHQ_RESOURCE_TYPE"> ALTER TABLE RHQ_DRIFT_DEF_TEMPLATE @@ -1676,7 +1676,7 @@ </schema-directSQL>
<!-- RHQ_DRIFT_CONFIG => RHQ_DRIFT_DEFINITION --> - <schema-directSQL> + <schema-directSQL> <statement> ALTER TABLE RHQ_DRIFT_CONFIG RENAME TO RHQ_DRIFT_DEFINITION </statement> @@ -1685,17 +1685,17 @@ </statement> <statement targetDBVendor="oracle"> RENAME RHQ_DRIFT_CONFIG_ID_SEQ TO RHQ_DRIFT_DEFINITION_ID_SEQ - </statement> + </statement> </schema-directSQL>
<!-- add new columns --> - <schema-addColumn table="RHQ_DRIFT_DEFINITION" column="DESCRIPTION" columnType="VARCHAR2" precision="512" /> + <schema-addColumn table="RHQ_DRIFT_DEFINITION" column="DESCRIPTION" columnType="VARCHAR2" precision="512" /> <schema-addColumn table="RHQ_DRIFT_DEFINITION" column="DRIFT_HANDLING_MODE" columnType="VARCHAR2" precision="20" /> <schema-addColumn table="RHQ_DRIFT_DEFINITION" column="IS_PINNED" columnType="BOOLEAN"/> <schema-addColumn table="RHQ_DRIFT_DEFINITION" column="IS_ATTACHED" columnType="BOOLEAN"/> <schema-addColumn table="RHQ_DRIFT_DEFINITION" column="DRIFT_DEF_TEMPLATE_ID" columnType="INTEGER"/> - - <!-- initialize new required columns --> + + <!-- initialize new required columns --> <schema-directSQL> <statement targetDBVendor="postgresql" desc="RHQ_DRIFT_DEFINITION: set is_pinned column (false)"> UPDATE RHQ_DRIFT_DEFINITION SET IS_PINNED = FALSE @@ -1712,9 +1712,9 @@ <statement desc="RHQ_DRIFT_DEFINITION: set drift_handling_mode column (normal)"> UPDATE RHQ_DRIFT_DEFINITION SET DRIFT_HANDLING_MODE = 'normal' </statement> - </schema-directSQL> + </schema-directSQL> <schema-alterColumn table="RHQ_DRIFT_DEFINITION" column="IS_PINNED" nullable="FALSE"/> - <schema-alterColumn table="RHQ_DRIFT_DEFINITION" column="IS_ATTACHED" nullable="FALSE"/> + <schema-alterColumn table="RHQ_DRIFT_DEFINITION" column="IS_ATTACHED" nullable="FALSE"/> <schema-alterColumn table="RHQ_DRIFT_DEFINITION" column="DRIFT_HANDLING_MODE" nullable="FALSE" />
<!-- add new FK --> @@ -1740,7 +1740,7 @@ <!-- RHQ_DRIFT_CHANGE_SET --> <!-- Resource column is now nullable --> <schema-alterColumn table="RHQ_DRIFT_CHANGE_SET" column="RESOURCE_ID" nullable="TRUE" /> - + <!-- Add new drift_set column with FK --> <schema-addColumn table="RHQ_DRIFT_CHANGE_SET" column="DRIFT_SET_ID" columnType="INTEGER"/> <schema-directSQL> @@ -1750,9 +1750,9 @@ FOREIGN KEY (DRIFT_SET_ID) REFERENCES RHQ_DRIFT_SET (ID) </statement> - </schema-directSQL> + </schema-directSQL> +
- <!-- Rename column DRIFT_CONFIG_ID to DRIFT_DEFINITION_ID --> <schema-directSQL> <statement desc="Renaming RHQ_DRIFT_CHANGE_SET.DRIFT_CONFIG_ID column to DRIFT_DEFINITION_ID"> @@ -1768,20 +1768,20 @@ <statement desc="RHQ_DRIFT_CHANGE_SET: set drift_handling_mode column (normal)"> UPDATE RHQ_DRIFT_CHANGE_SET SET DRIFT_HANDLING_MODE = 'normal' </statement> - </schema-directSQL> + </schema-directSQL> <schema-alterColumn table="RHQ_DRIFT_CHANGE_SET" column="DRIFT_HANDLING_MODE" nullable="FALSE" />
<!-- RHQ_DRIFT --> - <!-- Add new path_directory column --> + <!-- Add new path_directory column --> <schema-addColumn table="RHQ_DRIFT" column="PATH_DIRECTORY" columnType="VARCHAR2" precision="1024" /> - + <!-- Set the new path_directory column using the current path column values --> - <schema-javaTask className="DriftPathDirectoryDatabaseUpgradeTask" /> + <schema-javaTask className="DriftPathDirectoryDatabaseUpgradeTask" />
<!-- Now it is safe to set the new path_directory column not null --> - <schema-alterColumn table="RHQ_DRIFT" column="PATH_DIRECTORY" nullable="FALSE" /> - + <schema-alterColumn table="RHQ_DRIFT" column="PATH_DIRECTORY" nullable="FALSE" /> + <!-- add new drift_set_id column and FK --> <schema-addColumn table="RHQ_DRIFT" column="DRIFT_SET_ID" columnType="INTEGER"/> <schema-directSQL> @@ -1795,48 +1795,48 @@
<!-- Morph rhq_drift_template_map rows into rhq_drift_def_template rows. The rhq_drift_template_map table is obsolete. --> - <schema-javaTask className="DriftTemplateMapDatabaseUpgradeTask" /> + <schema-javaTask className="DriftTemplateMapDatabaseUpgradeTask" />
</schemaSpec>
<!-- RHQ 4.2 RELEASE uses DB Schema 2.114, which is incorrect, it should have been 2.115. So, for the upgrade to work for an RHQ 4.2 install the schema version must be set to 2.115 prior to the upgrade. - + The 4.2 upgrade should start with 2.116. --> <schemaSpec version="2.116"> - <!-- RHQ_DRIFT_DEFINITION changes --> + <!-- RHQ_DRIFT_DEFINITION changes --> <schema-addColumn table="RHQ_DRIFT_DEFINITION" column="COMPLIANCE_STATUS" columnType="INTEGER"/> <schema-directSQL> <statement desc="RHQ_DRIFT_DEFINITION: set compliance_status column (in compliance)"> UPDATE RHQ_DRIFT_DEFINITION SET COMPLIANCE_STATUS = 0 </statement> </schema-directSQL> - <schema-alterColumn table="RHQ_DRIFT_DEFINITION" column="COMPLIANCE_STATUS" nullable="FALSE"/> - + <schema-alterColumn table="RHQ_DRIFT_DEFINITION" column="COMPLIANCE_STATUS" nullable="FALSE"/> + <schema-alterColumn table="RHQ_DRIFT_DEFINITION" column="NAME" columnType="VARCHAR2" precision="512" /> - - - <!-- RHQ_PRD_VER changes --> + + + <!-- RHQ_PRD_VER changes --> <!-- Increase the max length of a product version from 50 to 100. --> <schema-alterColumn table="RHQ_PRD_VER" column="VERSION" columnType="VARCHAR2" precision="100" /> </schemaSpec>
<!-- JON 3.0 RELEASE uses DB Schema 2.116 --> - + <schemaSpec version="2.117"> <!-- Remove an obsolete drift table that may be hanging around --> <schema-dropTable table="RHQ_DRIFT_TEMPLATE_MAP" ignoreError="true" /> - + <!-- Remove an unwanted null constraint --> - <schema-alterColumn table="RHQ_DRIFT_CHANGE_SET" column="DRIFT_DEFINITION_ID" nullable="TRUE" /> + <schema-alterColumn table="RHQ_DRIFT_CHANGE_SET" column="DRIFT_DEFINITION_ID" nullable="TRUE" /> </schemaSpec>
<schemaSpec version="2.118"> <!-- Correct an earlier upgrade issue where 'All Resources Role' was granted MANAGE_REPOSITORIES. --> <schema-directSQL> <statement> - DELETE FROM RHQ_PERMISSION WHERE role_id = 2 AND operation = 15 + DELETE FROM RHQ_PERMISSION WHERE role_id = 2 AND operation = 15 </statement> </schema-directSQL> </schemaSpec> @@ -1849,42 +1849,42 @@ </statement> </schema-directSQL> </schemaSpec> - + <schemaSpec version="2.120"> - <!-- RHQ_AGENT changes --> + <!-- RHQ_AGENT changes --> <schema-addColumn table="RHQ_AGENT" column="LAST_AVAILABILITY_PING" columnType="LONG"/> <schema-directSQL> <statement desc="RHQ_AGENT: init last_availability_ping to last_availability_report"> - UPDATE RHQ_AGENT SET LAST_AVAILABILITY_PING = LAST_AVAILABILITY_REPORT + UPDATE RHQ_AGENT SET LAST_AVAILABILITY_PING = LAST_AVAILABILITY_REPORT </statement> </schema-directSQL>
<!-- Drop agent quiet time (i.e. suspect agent time) from 15 to 5 minutes --> <schema-directSQL> <statement desc="Changing AGENT_MAX_QUIET_TIME_ALLOWED to 5 minutes from 15 minutes"> - UPDATE RHQ_SYSTEM_CONFIG - SET PROPERTY_VALUE = '300000', DEFAULT_PROPERTY_VALUE = '300000' + UPDATE RHQ_SYSTEM_CONFIG + SET PROPERTY_VALUE = '300000', DEFAULT_PROPERTY_VALUE = '300000' WHERE PROPERTY_KEY = 'AGENT_MAX_QUIET_TIME_ALLOWED' </statement> </schema-directSQL> - + <!-- set null availabilities to UNKNOWN --> <schema-directSQL> <statement desc="Changing NULL RHQ_AVAILABILITY.AVAILABILITY_TYPE to UNKNOWN"> UPDATE RHQ_AVAILABILITY - SET AVAILABILITY_TYPE = 2 + SET AVAILABILITY_TYPE = 2 WHERE AVAILABILITY_TYPE IS NULL </statement> <statement desc="Changing NULL RHQ_RESOURCE_AVAIL.AVAILABILITY_TYPE to UNKNOWN"> UPDATE RHQ_RESOURCE_AVAIL - SET AVAILABILITY_TYPE = 2 + SET AVAILABILITY_TYPE = 2 WHERE AVAILABILITY_TYPE IS NULL </statement> </schema-directSQL>
<schema-alterColumn table="RHQ_AVAILABILITY" column="AVAILABILITY_TYPE" nullable="FALSE"/> - <schema-alterColumn table="RHQ_RESOURCE_AVAIL" column="AVAILABILITY_TYPE" nullable="FALSE"/> - + <schema-alterColumn table="RHQ_RESOURCE_AVAIL" column="AVAILABILITY_TYPE" nullable="FALSE"/> + </schemaSpec>
<schemaSpec version="2.121"> @@ -1892,27 +1892,27 @@ </schemaSpec>
<!-- RHQ 4.4, JON 3.1.0 RELEASE uses DB Schema 2.121 --> - + <schemaSpec version="2.122"> - <schema-directSQL> + <schema-directSQL> <statement desc="Updating DOWN Alert Conditions to new format"> UPDATE RHQ_ALERT_CONDITION SET NAME = 'AVAIL_GOES_DOWN', OPTION_STATUS = NULL - WHERE TYPE = 'AVAILABILITY' + WHERE TYPE = 'AVAILABILITY' AND NAME IS NULL AND OPTION_STATUS = 'DOWN' </statement> <statement desc="Updating UP Alert Conditions to new format"> UPDATE RHQ_ALERT_CONDITION - SET NAME = 'AVAIL_GOES_UP', OPTION_STATUS = NULL + SET NAME = 'AVAIL_GOES_UP', OPTION_STATUS = NULL WHERE TYPE = 'AVAILABILITY' AND NAME IS NULL AND OPTION_STATUS = 'UP' - </statement> - </schema-directSQL> + </statement> + </schema-directSQL> </schemaSpec> - -<!-- JON 3.1.1 RELEASE uses DB Schema 2.122 --> + +<!-- JON 3.1.1 RELEASE uses DB Schema 2.122 -->
<schemaSpec version="2.123">
@@ -1989,8 +1989,8 @@ </schema-directSQL> </schemaSpec>
-<!-- RHQ 4.6.0 uses DB Schema 2.128 --> - +<!-- RHQ 4.6.0 uses DB Schema 2.128 --> + <!-- BZ 881848 --> <schemaSpec version="2.129"> <schema-directSQL> @@ -2080,12 +2080,12 @@
<schemaSpec version="2.135"> <!-- Fine Grained Bundle Permissions --> - + <!-- Add new perms to superuser/all-resources roles --> <schema-directSQL> <statement desc="Inserting MANAGE_BUNDLE_GROUPS permission for all MANAGE_BUNDLE (12) roles"> INSERT INTO rhq_permission (role_id, operation) SELECT role_id, 18 FROM rhq_permission p where p.operation = 12 - </statement> + </statement> <statement desc="Inserting CREATE_BUNDLES permission for all MANAGE_BUNDLE (12) roles"> INSERT INTO rhq_permission (role_id, operation) SELECT role_id, 19 FROM rhq_permission p where p.operation = 12 </statement> @@ -2181,6 +2181,13 @@
<schemaSpec version="2.136"> <schema-addColumn table="RHQ_CONFIG_PD_OSRC" column="EXPRESSION_SCOPE" columnType="VARCHAR2" precision="12"/> + <schema-directSQL> + <statement desc="Fill in defaults"> + UPDATE RHQ_CONFIG_PD_OSRC + SET EXPRESSION_SCOPE = 'unlimited' + WHERE EXPRESSION_SCOPE IS NULL + </statement> + </schema-directSQL> <schema-alterColumn table="RHQ_CONFIG_PD_OSRC" column="EXPRESSION_SCOPE" nullable="false" default="unlimited"/> </schemaSpec>
@@ -2196,10 +2203,10 @@ REFERENCES RHQ_OPERATION_HISTORY (ID) </statement> <statement targetDBVendor="postgresql" desc="Set maintenance_pending flag to false for existing storage nodes"> - UPDATE RHQ_STORAGE_NODE SET IGNORED = false + UPDATE RHQ_STORAGE_NODE SET MAINTENANCE_PENDING = false </statement> <statement targetDBVendor="oracle" desc="Set maintenance_pending flag to false for existing storage nodes"> - UPDATE RHQ_STORAGE_NODE SET IGNORED = 0 + UPDATE RHQ_STORAGE_NODE SET MAINTENANCE_PENDING = 0 </statement> </schema-directSQL> </schemaSpec>
commit 67cc69eefd11f2d64d54df979d1b2f1585665f9b Author: Heiko W. Rupp hwr@redhat.com Date: Tue Aug 13 15:57:01 2013 +0200
BZ 996276 Fix example expressions
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/trigger/JobTriggerEditor.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/trigger/JobTriggerEditor.java index 9678629..3a216a2 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/trigger/JobTriggerEditor.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/trigger/JobTriggerEditor.java @@ -306,8 +306,8 @@ public class JobTriggerEditor extends EnhancedVLayout { + " </tbody>\n" + "\n" + "</table>\n" - + "<p>So cron expressions can be as simple as this: <tt>* * * * ? *</tt><br />\n" - + "or more complex, like this: <tt>0/5 14,18,3-39,52 * ? JAN,MAR,SEP MON-FRI 2002-2010</tt></p>\n" + + "<p>So cron expressions can be as simple as this: <tt>0 * * ? * *</tt> to run every minute on the minute<br />\n" + + "or more complex, like this: <tt>0/5 14,18,3-39,52 * ? JAN,MAR,SEP MON-FRI 2002-2015</tt></p>\n" + "\n" + "<h2><a name="CronTriggersTutorial-Specialcharacters"></a>Special Characters</h2>\n" + "\n"
commit 94bb4f2ea3f458837a3bf55013685e84ec32b321 Author: John Sanda jsanda@redhat.com Date: Thu Aug 15 01:57:08 2013 -0400
removing unused exception class
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageConfigurationException.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageConfigurationException.java deleted file mode 100644 index dc616a8..0000000 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageConfigurationException.java +++ /dev/null @@ -1,23 +0,0 @@ -package org.rhq.enterprise.server.storage; - -/** - * @author John Sanda - */ -public class StorageConfigurationException extends RuntimeException { - - public StorageConfigurationException() { - super(); - } - - public StorageConfigurationException(String message) { - super(message); - } - - public StorageConfigurationException(String message, Throwable cause) { - super(message, cause); - } - - public StorageConfigurationException(Throwable cause) { - super(cause); - } -}
commit 578ad8d0103ee9f2a834a32ee082ce39268a6ddc Author: John Sanda jsanda@redhat.com Date: Thu Aug 15 01:56:04 2013 -0400
add maintenance flag for queueing up storage nodes to be processed
diff --git a/modules/core/dbutils/src/main/scripts/dbsetup/measurement-schema.xml b/modules/core/dbutils/src/main/scripts/dbsetup/measurement-schema.xml index f307ff8..da9ff04 100644 --- a/modules/core/dbutils/src/main/scripts/dbsetup/measurement-schema.xml +++ b/modules/core/dbutils/src/main/scripts/dbsetup/measurement-schema.xml @@ -455,6 +455,7 @@ <column name="CTIME" required="true" type="LONG" /> <column name="MTIME" required="true" type="LONG" /> <column name="ERROR_MSG" required="false" type="LONGVARCHAR"/> + <column name="MAINTENANCE_PENDING" required="true" type="BOOLEAN" default="false"/> <column name="RESOURCE_ID" required="false" type="INTEGER" references="RHQ_RESOURCE(ID)" /> <column name="RESOURCE_OP_HIST_ID" required="false" type="INTEGER" references="RHQ_OPERATION_HISTORY(ID)"/>
diff --git a/modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml b/modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml index edf0147..9bfdb46 100644 --- a/modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml +++ b/modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml @@ -2185,6 +2185,7 @@ </schemaSpec>
<schemaSpec version="2.137"> + <schema-addColumn table="RHQ_STORAGE_NODE" column="MAINTENANCE_PENDING" columnType="BOOLEAN"/> <schema-addColumn table="RHQ_STORAGE_NODE" column="ERROR_MSG" columnType="LONGVARCHAR"/> <schema-addColumn table="RHQ_STORAGE_NODE" column="RESOURCE_OP_HIST_ID" columnType="INTEGER"/> <schema-directSQL> @@ -2194,6 +2195,12 @@ FOREIGN KEY (RESOURCE_OP_HIST_ID) REFERENCES RHQ_OPERATION_HISTORY (ID) </statement> + <statement targetDBVendor="postgresql" desc="Set maintenance_pending flag to false for existing storage nodes"> + UPDATE RHQ_STORAGE_NODE SET IGNORED = false + </statement> + <statement targetDBVendor="oracle" desc="Set maintenance_pending flag to false for existing storage nodes"> + UPDATE RHQ_STORAGE_NODE SET IGNORED = 0 + </statement> </schema-directSQL> </schemaSpec>
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java index 3f2a89c..54a0db5 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java @@ -135,6 +135,9 @@ public class StorageNode implements Serializable { @Column(name = "MTIME", nullable = false) private long mtime;
+ @Column(name = "MAINTENANCE_PENDING", nullable = false) + private boolean maintenancePending; + @Column(name = "ERROR_MSG", nullable = true) private String errorMessage;
@@ -198,6 +201,14 @@ public class StorageNode implements Serializable { this.mtime = mtime; }
+ public boolean isMaintenancePending() { + return maintenancePending; + } + + public void setMaintenancePending(boolean maintenancePending) { + this.maintenancePending = maintenancePending; + } + public Resource getResource() { return resource; } diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java index 72f5e9d..2a79c59 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java @@ -84,18 +84,19 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa List<StorageNode> allNodes = new ArrayList<StorageNode>(clusterNodes); allNodes.add(storageNode);
- announceStorageNode(subject, storageNode, createPropertyListOfAddresses("addresses", allNodes), - getAddresses(clusterNodes)); + for (StorageNode clusterNode : clusterNodes) { + clusterNode.setMaintenancePending(true); + }
- } + announceStorageNode(subject, storageNode, clusterNodes.get(0), createPropertyListOfAddresses("addresses", + allNodes));
- private void announceStorageNode(Subject subject, StorageNode storageNode, PropertyList addresses, - List<String> remainingNodes) { - String address = remainingNodes.remove(0); - StorageNode clusterNode = findStorageNodeByAddress(address); + }
+ private void announceStorageNode(Subject subject, StorageNode newStorageNode, StorageNode clusterNode, + PropertyList addresses) { if (log.isInfoEnabled()) { - log.info("Announcing " + storageNode + " to cluster node " + clusterNode); + log.info("Announcing " + newStorageNode + " to cluster node " + clusterNode); } ResourceOperationSchedule schedule = new ResourceOperationSchedule(); schedule.setResource(clusterNode.getResource()); @@ -104,7 +105,6 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa schedule.setOperationName("updateKnownNodes"); Configuration parameters = new Configuration(); parameters.put(addresses); - parameters.put(new PropertySimple("remainingNodes", StringUtil.listToString(remainingNodes))); schedule.setParameters(parameters);
operationManager.scheduleResourceOperation(subject, schedule); @@ -141,8 +141,9 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa StorageNode.class).setParameter("operationMode", StorageNode.OperationMode.NORMAL) .getResultList(); for (StorageNode node : clusterNodes) { - node.setOperationMode(StorageNode.OperationMode.ADD_NODE_MAINTENANCE); + node.setMaintenancePending(true); } + storageNode.setMaintenancePending(true); clusterNodes.add(storageNode); boolean runRepair = updateSchemaIfNecessary(clusterNodes); performAddNodeMaintenance(subject, storageNode, runRepair, createPropertyListOfAddresses(SEEDS_LIST, @@ -231,19 +232,20 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa operationFailed(storageNode, resourceOperationHistory, newStorageNode); return; default: // SUCCESS + storageNode.setMaintenancePending(false); Configuration parameters = resourceOperationHistory.getParameters(); PropertyList addresses = parameters.getList("addresses"); - List<String> remainingNodes = getRemainingNodes(resourceOperationHistory); + StorageNode nextNode = takeFromMaintenanceQueue();
newStorageNode = findNewStorgeNode(StorageNode.OperationMode.ANNOUNCE); Subject subject = getSubject(resourceOperationHistory);
- if (remainingNodes.isEmpty()) { + if (nextNode == null) { log.info("Successfully announced new storage node to storage cluster"); newStorageNode.setOperationMode(StorageNode.OperationMode.BOOTSTRAP); prepareNodeForBootstrap(subject, newStorageNode, addresses.deepCopy(false)); } else { - announceStorageNode(subject, newStorageNode, addresses.deepCopy(false), remainingNodes); + announceStorageNode(subject, newStorageNode, nextNode, addresses.deepCopy(false)); } } } @@ -293,11 +295,14 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa if (log.isInfoEnabled()) { log.info("Finished cluster maintenance for " + storageNode + " for addition of new node"); } - storageNode.setOperationMode(StorageNode.OperationMode.NORMAL); - StorageNode nextNode = takeFromQueue(storageNode, StorageNode.OperationMode.ADD_NODE_MAINTENANCE); + storageNode.setMaintenancePending(false); + StorageNode nextNode = takeFromMaintenanceQueue();
if (nextNode == null) { log.info("Finished running cluster maintenance for addition of new node"); + // TODO replace this with an UPDATE statement + newStorageNode = findNewStorgeNode(StorageNode.OperationMode.ADD_NODE_MAINTENANCE); + newStorageNode.setOperationMode(StorageNode.OperationMode.NORMAL); } else { Configuration parameters = resourceOperationHistory.getParameters(); boolean runRepair = parameters.getSimple(RUN_REPAIR_PROPERTY).getBooleanValue(); @@ -399,33 +404,16 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa operationManager.scheduleResourceOperation(subject, schedule); }
- private StorageNode takeFromQueue(StorageNode lastTaken, StorageNode.OperationMode queue) { - List<StorageNode> nodes = entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE_EXCLUDING, - StorageNode.class).setParameter("operationMode", queue).setParameter("storageNode", lastTaken) + private StorageNode takeFromMaintenanceQueue() { + List<StorageNode> storageNodes = entityManager.createQuery("SELECT s FROM StorageNode s WHERE " + + "s.operationMode = :operationMode AND s.maintenancePending = :maintenancePending", StorageNode.class) + .setParameter("operationMode", StorageNode.OperationMode.NORMAL).setParameter("maintenancePending", true) .getResultList();
- if (nodes.isEmpty()) { + if (storageNodes.isEmpty()) { return null; } - return nodes.get(0); - } - - private List<String> getRemainingNodes(ResourceOperationHistory resourceOperationHistory) { - LinkedList<String> addresses = new LinkedList<String>(); - Configuration results = resourceOperationHistory.getResults(); - String remainingNodes = results.getSimpleValue("remainingNodes"); - - if (!StringUtil.isEmpty(remainingNodes)) { - for (String address : remainingNodes.split(",")) { - addresses.add(address); - } - } - return addresses; - } - - private StorageNode findStorageNodeByAddress(String address) { - return entityManager.createNamedQuery(StorageNode.QUERY_FIND_BY_ADDRESS, StorageNode.class) - .setParameter("address", address).getSingleResult(); + return storageNodes.get(0); }
private StorageNode findNewStorgeNode(StorageNode.OperationMode operationMode) { @@ -537,12 +525,4 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa return list; }
- private List<String> getAddresses(List<StorageNode> storageNodes) { - List<String> addresses = new LinkedList<String>(); - for (StorageNode storageNode : storageNodes) { - addresses.add(storageNode.getAddress()); - } - return addresses; - } - } diff --git a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java index 7f58037..a24a219 100644 --- a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java +++ b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java @@ -257,7 +257,6 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper
Configuration complexResults = result.getComplexResults(); complexResults.put(new PropertySimple("details", "Successfully updated the set of known nodes.")); - complexResults.put(params.get("remainingNodes").deepCopy(false));
return result; } catch (InternodeAuthConfUpdateException e) { diff --git a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml index 4ef10cd..cc01c9d 100644 --- a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml +++ b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml @@ -102,11 +102,9 @@ <c:list-property name="addresses"> <c:simple-property name="address"/> </c:list-property> - <c:simple-property name="remainingNodes"/> </parameters> <results> <c:simple-property name="details"/> - <c:simple-property name="remainingNodes"/> </results> </operation>
commit 6a41f788ada57056207fa5ee36259c9f1f2fb42a Author: John Sanda jsanda@redhat.com Date: Wed Aug 14 23:32:41 2013 -0400
bump up dbupgrade version
diff --git a/modules/core/dbutils/pom.xml b/modules/core/dbutils/pom.xml index 2e8e4b8..1c66dd6 100644 --- a/modules/core/dbutils/pom.xml +++ b/modules/core/dbutils/pom.xml @@ -17,7 +17,7 @@ <description>Database schema setup, upgrade and other utilities</description>
<properties> - <db.schema.version>2.136</db.schema.version> + <db.schema.version>2.137</db.schema.version> <rhq.ds.type-mapping>${rhq.test.ds.type-mapping}</rhq.ds.type-mapping> <rhq.ds.server-name>${rhq.test.ds.server-name}</rhq.ds.server-name> <rhq.ds.db-name>${rhq.test.ds.db-name}</rhq.ds.db-name>
commit 9a9767577237ee0efc6cd902a3fb8861f280d193 Author: John Sanda jsanda@redhat.com Date: Wed Aug 14 23:02:34 2013 -0400
adding more error handling for storage node deployments
In my previous commit I added code to persist resource operation failures that occur during storage node deployment. This commit adds error handling for unexpected server side errors. Errors are logged to the StorageNode entity in a separate transaction to ensure that the error message gets persisted.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeDeploymentException.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeDeploymentException.java deleted file mode 100644 index fca6e96..0000000 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeDeploymentException.java +++ /dev/null @@ -1,22 +0,0 @@ -package org.rhq.enterprise.server.storage; - -/** - * @author John Sanda - */ -public class StorageNodeDeploymentException extends RuntimeException { - - public StorageNodeDeploymentException() { - } - - public StorageNodeDeploymentException(String message) { - super(message); - } - - public StorageNodeDeploymentException(String message, Throwable cause) { - super(message, cause); - } - - public StorageNodeDeploymentException(Throwable cause) { - super(cause); - } -} diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java index aaa54f5..72f5e9d 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java @@ -12,7 +12,6 @@ import javax.ejb.TransactionAttribute; import javax.ejb.TransactionAttributeType; import javax.persistence.EntityManager; import javax.persistence.PersistenceContext; -import javax.persistence.PersistenceException;
import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -23,13 +22,13 @@ import org.rhq.core.domain.common.JobTrigger; import org.rhq.core.domain.configuration.Configuration; import org.rhq.core.domain.configuration.PropertyList; import org.rhq.core.domain.configuration.PropertySimple; -import org.rhq.core.domain.operation.OperationDefinition; import org.rhq.core.domain.operation.OperationHistory; import org.rhq.core.domain.operation.ResourceOperationHistory; import org.rhq.core.domain.operation.bean.ResourceOperationSchedule; import org.rhq.core.domain.resource.Resource; import org.rhq.core.domain.resource.ResourceType; import org.rhq.core.util.StringUtil; +import org.rhq.core.util.exception.ThrowableUtil; import org.rhq.enterprise.server.RHQConstants; import org.rhq.enterprise.server.auth.SessionManager; import org.rhq.enterprise.server.auth.SubjectManagerLocal; @@ -71,8 +70,10 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa @EJB private StorageClientManagerBean storageClientManager;
+ @EJB + private StorageNodeOperationsHandlerLocal storageNodeOperationsHandler; + @Override - @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) public void announceStorageNode(Subject subject, StorageNode storageNode) { if (log.isInfoEnabled()) { log.info("Announcing " + storageNode + " to storage node cluster."); @@ -110,6 +111,18 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa }
@Override + @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) + public void logError(StorageNode.OperationMode newStorageNodeOperationMode, String error, Exception e) { + try { + StorageNode newStorageNode = findNewStorgeNode(newStorageNodeOperationMode); + newStorageNode.setErrorMessage(error + " Check the server log for details. Root cause: " + + ThrowableUtil.getRootCause(e).getMessage()); + } catch (Exception e1) { + log.error("Failed to log error against storage node", e); + } + } + + @Override public void performAddNodeMaintenanceIfNecessary(InetAddress storageNodeAddress) { StorageNode storageNode = entityManager.createNamedQuery(StorageNode.QUERY_FIND_BY_ADDRESS, StorageNode.class).setParameter("address", storageNodeAddress.getHostAddress()).getSingleResult(); @@ -167,24 +180,77 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa return; }
- ResourceOperationHistory resourceOperationHistory = entityManager.find(ResourceOperationHistory.class, - operationHistory.getId()); - if (resourceOperationHistory == null) { + ResourceOperationHistory resourceOperationHistory = (ResourceOperationHistory) operationHistory; + if (!isStorageNodeOperation(resourceOperationHistory)) { return; }
- if (isStorageNodeOperation(resourceOperationHistory.getOperationDefinition())) { - if (resourceOperationHistory.getOperationDefinition().getName().equals("updateKnownNodes")) { - handleUpdateKnownNodes(resourceOperationHistory); - } else if (operationHistory.getOperationDefinition().getName().equals("prepareForBootstrap")) { - handlePrepareForBootstrap(resourceOperationHistory); - } else if (operationHistory.getOperationDefinition().getName().equals("addNodeMaintenance")) { - handleAddNodeMaintenance(resourceOperationHistory); + if (resourceOperationHistory.getOperationDefinition().getName().equals("updateKnownNodes")) { + try { + storageNodeOperationsHandler.handleUpdateKnownNodes(resourceOperationHistory); + } catch (Exception e) { + String msg = "Aborting storage node deployment due to unexpected error while announcing cluster nodes."; + log.error(msg, e); + storageNodeOperationsHandler.logError(StorageNode.OperationMode.ANNOUNCE, msg, e); + } + } else if (operationHistory.getOperationDefinition().getName().equals("prepareForBootstrap")) { + try { + storageNodeOperationsHandler.handlePrepareForBootstrap(resourceOperationHistory); + } catch (Exception e) { + String msg = "Aborting storage node deployment due to unexpected error while bootstrapping new node."; + log.error(msg, e); + storageNodeOperationsHandler.logError(StorageNode.OperationMode.BOOTSTRAP, msg, e); + } + } else if (operationHistory.getOperationDefinition().getName().equals("addNodeMaintenance")) { + try { + storageNodeOperationsHandler.handleAddNodeMaintenance(resourceOperationHistory); + } catch (Exception e) { + String msg = "Aborting storage node deployment due to unexpected error while performing add node " + + "maintenance."; + log.error(msg, e); + storageNodeOperationsHandler.logError(StorageNode.OperationMode.ADD_NODE_MAINTENANCE, msg, e); } } + }
- private void handlePrepareForBootstrap(ResourceOperationHistory resourceOperationHistory) { + @Override + @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) + public void handleUpdateKnownNodes(ResourceOperationHistory resourceOperationHistory) { + StorageNode storageNode = findStorageNode(resourceOperationHistory.getResource()); + StorageNode newStorageNode = null; + switch (resourceOperationHistory.getStatus()) { + case INPROGRESS: + // nothing to do here + return; + case CANCELED: + newStorageNode = findNewStorgeNode(StorageNode.OperationMode.ANNOUNCE); + operationCanceled(storageNode, resourceOperationHistory, newStorageNode); + case FAILURE: + newStorageNode = findNewStorgeNode(StorageNode.OperationMode.ANNOUNCE); + operationFailed(storageNode, resourceOperationHistory, newStorageNode); + return; + default: // SUCCESS + Configuration parameters = resourceOperationHistory.getParameters(); + PropertyList addresses = parameters.getList("addresses"); + List<String> remainingNodes = getRemainingNodes(resourceOperationHistory); + + newStorageNode = findNewStorgeNode(StorageNode.OperationMode.ANNOUNCE); + Subject subject = getSubject(resourceOperationHistory); + + if (remainingNodes.isEmpty()) { + log.info("Successfully announced new storage node to storage cluster"); + newStorageNode.setOperationMode(StorageNode.OperationMode.BOOTSTRAP); + prepareNodeForBootstrap(subject, newStorageNode, addresses.deepCopy(false)); + } else { + announceStorageNode(subject, newStorageNode, addresses.deepCopy(false), remainingNodes); + } + } + } + + @Override + @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) + public void handlePrepareForBootstrap(ResourceOperationHistory resourceOperationHistory) { StorageNode newStorageNode = findStorageNode(resourceOperationHistory.getResource()); switch (resourceOperationHistory.getStatus()) { case INPROGRESS: @@ -195,15 +261,9 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa // If the operation is canceled the plugin will get an InterruptedException. // The actual bootstrapping may very well complete so we need to add in some // checks to find out if the node is up and part of the cluster. - - log.error("The operation [prepareForBootstrap] was canceled for " + newStorageNode + - ". Deployment of the new storage node cannot proceed."); operationCanceled(newStorageNode, resourceOperationHistory); return; case FAILURE: - log.error("The operation [preparedForBootstrap] failed for " + newStorageNode + ". The reported " + - "failure is: " + resourceOperationHistory.getErrorMessage()); - log.error("Deployment of the new storage node cannot proceed."); operationFailed(newStorageNode, resourceOperationHistory); return; default: // SUCCESS @@ -212,47 +272,9 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa } }
- private void handleUpdateKnownNodes(ResourceOperationHistory resourceOperationHistory) { - StorageNode storageNode = findStorageNode(resourceOperationHistory.getResource()); - StorageNode newStorageNode = null; - switch (resourceOperationHistory.getStatus()) { - case INPROGRESS: - // nothing to do here - return; - case CANCELED: - log.error("The operation [updateKnownNodes] was canceled for " + storageNode + - ". Deployment of the new storage node cannot proceed."); - newStorageNode = findNewStorgeNode(StorageNode.OperationMode.ANNOUNCE); - operationCanceled(storageNode, resourceOperationHistory, newStorageNode); - case FAILURE: - log.error("The operation [updateKnownNodes] failed for " + storageNode + ". The reported " + - "failure is: " + resourceOperationHistory.getErrorMessage()); - log.error("Deployment of the new storage node cannot proceed."); - newStorageNode = findNewStorgeNode(StorageNode.OperationMode.ANNOUNCE); - operationFailed(storageNode, resourceOperationHistory, newStorageNode); - return; - default: // SUCCESS - if (log.isInfoEnabled()) { - log.info("Finished announcing cluster nodes to " + storageNode); - } - Configuration parameters = resourceOperationHistory.getParameters(); - PropertyList addresses = parameters.getList("addresses"); - List<String> remainingNodes = getRemainingNodes(resourceOperationHistory); - - newStorageNode = findNewStorgeNode(StorageNode.OperationMode.ANNOUNCE); - Subject subject = getSubject(resourceOperationHistory); - - if (remainingNodes.isEmpty()) { - log.info("Successfully announced new storage node to cluster"); - newStorageNode.setOperationMode(StorageNode.OperationMode.BOOTSTRAP); - prepareNodeForBootstrap(subject, newStorageNode, addresses.deepCopy(false)); - } else { - announceStorageNode(subject, newStorageNode, addresses.deepCopy(false), remainingNodes); - } - } - } - - private void handleAddNodeMaintenance(ResourceOperationHistory resourceOperationHistory) { + @Override + @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) + public void handleAddNodeMaintenance(ResourceOperationHistory resourceOperationHistory) { StorageNode storageNode = findStorageNode(resourceOperationHistory.getResource()); StorageNode newStorageNode = null; switch (resourceOperationHistory.getStatus()) { @@ -260,21 +282,16 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa // nothing to do here return; case CANCELED: - log.error("The operation [addNodeMaintenance] was canceled for " + storageNode + ". This operation " + - "needs to be run on each storage node when a new node is added to the cluster."); newStorageNode = findNewStorgeNode(StorageNode.OperationMode.ADD_NODE_MAINTENANCE); operationCanceled(storageNode, resourceOperationHistory, newStorageNode); return; case FAILURE: - log.error("The operation [addNodeMaintenance] failed for " + storageNode + ". This operation " + - "needs to be run on each storage node when a new node is added to the cluster. The reported " + - "failure is: " + resourceOperationHistory.getErrorMessage()); newStorageNode = findNewStorgeNode(StorageNode.OperationMode.ADD_NODE_MAINTENANCE); operationFailed(storageNode, resourceOperationHistory, newStorageNode); return; default: // SUCCESS if (log.isInfoEnabled()) { - log.info("Finnished cluster maintenance for " + storageNode + " for addition of new node"); + log.info("Finished cluster maintenance for " + storageNode + " for addition of new node"); } storageNode.setOperationMode(StorageNode.OperationMode.NORMAL); StorageNode nextNode = takeFromQueue(storageNode, StorageNode.OperationMode.ADD_NODE_MAINTENANCE); @@ -298,6 +315,10 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa
private void operationCanceled(StorageNode storageNode, ResourceOperationHistory operationHistory, StorageNode newStorageNode) { + log.error("Deployment has been aborted due to canceled operation [" + + operationHistory.getOperationDefinition().getDisplayName() + " on " + storageNode.getResource() + + ": " + operationHistory.getErrorMessage()); + newStorageNode.setErrorMessage("Deployment has been aborted due to canceled resource operation on " + storageNode.getAddress()); storageNode.setErrorMessage("Deployment of " + newStorageNode.getAddress() + " has been aborted due " + @@ -307,6 +328,10 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa }
private void operationCanceled(StorageNode newStorageNode, ResourceOperationHistory operationHistory) { + log.error("Deployment has been aborted due to canceled operation [" + + operationHistory.getOperationDefinition().getDisplayName() + " on " + newStorageNode.getResource() + + ": " + operationHistory.getErrorMessage()); + newStorageNode.setErrorMessage("Deployment has been aborted due to canceled resource operation [" + operationHistory.getOperationDefinition().getDisplayName() + "]."); newStorageNode.setFailedOperation(operationHistory); @@ -314,6 +339,10 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa
private void operationFailed(StorageNode storageNode, ResourceOperationHistory operationHistory, StorageNode newStorageNode) { + log.error("Deployment has been aborted due to failed operation [" + + operationHistory.getOperationDefinition().getDisplayName() + "] on " + storageNode.getResource() + + ": " + operationHistory.getErrorMessage()); + newStorageNode.setErrorMessage("Deployment has been aborted due to failed resource operation on " + storageNode.getAddress()); storageNode.setErrorMessage("Deployment of " + newStorageNode.getAddress() + " has been aborted due " + @@ -322,6 +351,10 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa }
private void operationFailed(StorageNode newStorageNode, ResourceOperationHistory operationHistory) { + log.error("Deployment has been aborted due to failed operation [" + + operationHistory.getOperationDefinition().getDisplayName() + "] on " + newStorageNode.getResource() + + ": " + operationHistory.getErrorMessage()); + newStorageNode.setErrorMessage("Deployment has been aborted due to failed resource operation [" + operationHistory.getOperationDefinition().getDisplayName() + "]."); newStorageNode.setFailedOperation(operationHistory); @@ -391,28 +424,21 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa }
private StorageNode findStorageNodeByAddress(String address) { - try { - return entityManager.createNamedQuery(StorageNode.QUERY_FIND_BY_ADDRESS, StorageNode.class) + return entityManager.createNamedQuery(StorageNode.QUERY_FIND_BY_ADDRESS, StorageNode.class) .setParameter("address", address).getSingleResult(); - - } catch (PersistenceException e) { - throw new StorageNodeDeploymentException("Storage node deployment has failed! Failed to fetch the next " + - "storage node at " + address + " to be updated.", e); - } }
private StorageNode findNewStorgeNode(StorageNode.OperationMode operationMode) { - try { - return entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE, StorageNode.class) + return entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE, StorageNode.class) .setParameter("operationMode", operationMode).getSingleResult(); - } catch (PersistenceException e) { - throw new StorageNodeDeploymentException("Storage node deployment has failed! Failed to fetch the " + - "storage node to be deployed.", e); - } }
- private boolean isStorageNodeOperation(OperationDefinition operationDefinition) { - ResourceType resourceType = operationDefinition.getResourceType(); + private boolean isStorageNodeOperation(ResourceOperationHistory operationHistory) { + if (operationHistory == null) { + return false; + } + + ResourceType resourceType = operationHistory.getOperationDefinition().getResourceType(); return resourceType.getName().equals(STORAGE_NODE_TYPE_NAME) && resourceType.getPlugin().equals(STORAGE_NODE_PLUGIN_NAME); } diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerLocal.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerLocal.java index 44bb842..83b0ce5 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerLocal.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerLocal.java @@ -7,6 +7,7 @@ import javax.ejb.Asynchronous; import org.rhq.core.domain.auth.Subject; import org.rhq.core.domain.cloud.StorageNode; import org.rhq.core.domain.operation.OperationHistory; +import org.rhq.core.domain.operation.ResourceOperationHistory;
/** * @author John Sanda @@ -16,6 +17,12 @@ public interface StorageNodeOperationsHandlerLocal { @Asynchronous void handleOperationUpdateIfNecessary(OperationHistory operationHistory);
+ void handleUpdateKnownNodes(ResourceOperationHistory operationHistory); + + void handlePrepareForBootstrap(ResourceOperationHistory operationHistory); + + void handleAddNodeMaintenance(ResourceOperationHistory operationHistory); + void announceStorageNode(Subject subject, StorageNode storageNode);
void bootstrapStorageNode(Subject subject, StorageNode storageNode); @@ -23,4 +30,6 @@ public interface StorageNodeOperationsHandlerLocal { void performAddNodeMaintenanceIfNecessary(InetAddress storageNodeAddress);
void performAddNodeMaintenance(Subject subject, StorageNode storageNode); + + void logError(StorageNode.OperationMode newStorageNodeOperationMode, String error, Exception e); }
commit 2089a3788c5bc35de0b81a35c99398c43489ef59 Author: John Sanda jsanda@redhat.com Date: Wed Aug 14 17:36:23 2013 -0400
capture and log deployment failures that result from failed resource operations
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index 34f6381..861e3fa 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -182,7 +182,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN if (log.isInfoEnabled()) { log.info("Scheduling cluster maintenance to deploy " + storageNode + " into the storage cluster..."); } - deployStorageNode(subjectManager.getOverlord(), storageNode.getId()); + deployStorageNode(subjectManager.getOverlord(), storageNode); } } catch (UnknownHostException e) { throw new RuntimeException("Could not resolve address [" + address + "]. The resource " + resource + @@ -233,18 +233,21 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN }
@Override - public void deployStorageNode(Subject subject, int storageNodeId) { - StorageNode storageNode = entityManager.find(StorageNode.class, storageNodeId); + public void deployStorageNode(Subject subject, StorageNode storageNode) { + storageNode = entityManager.find(StorageNode.class, storageNode.getId());
switch (storageNode.getOperationMode()) { case INSTALLED: case ANNOUNCE: + reset(); storageNodeOperationsHandler.announceStorageNode(subject, storageNode); break; case BOOTSTRAP: + reset(); storageNodeOperationsHandler.bootstrapStorageNode(subject, storageNode); break; case ADD_NODE_MAINTENANCE: + reset(); storageNodeOperationsHandler.performAddNodeMaintenance(subject, storageNode); default: // For any other operation mode, the storage node should already be part of @@ -253,6 +256,13 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN } }
+ private void reset() { + for (StorageNode storageNode : getStorageNodes()) { + storageNode.setErrorMessage(null); + storageNode.setFailedOperation(null); + } + } + private List<StorageNode> combine(List<StorageNode> storageNodes, StorageNode storageNode) { List<StorageNode> newList = new ArrayList<StorageNode>(storageNodes.size() + 1); newList.addAll(storageNodes); diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java index e5f4f22..75a795c 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java @@ -169,5 +169,5 @@ public interface StorageNodeManagerLocal {
StorageNode createStorageNode(Resource resource);
- void deployStorageNode(Subject subject, int storageNodeId); + void deployStorageNode(Subject subject, StorageNode storageNode); } diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerRemote.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerRemote.java index 75ac02b..2255299 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerRemote.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerRemote.java @@ -96,4 +96,6 @@ public interface StorageNodeManagerRemote { * @return all storage nodes alerts */ PageList<Alert> findAllStorageNodeAlerts(Subject subject, StorageNode storageNode); + + void deployStorageNode(Subject sbubject, StorageNode storageNode); } diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java index f996bf2..aaa54f5 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java @@ -2,10 +2,8 @@ package org.rhq.enterprise.server.storage;
import java.net.InetAddress; import java.util.ArrayList; -import java.util.HashSet; import java.util.LinkedList; import java.util.List; -import java.util.Set;
import javax.ejb.Asynchronous; import javax.ejb.EJB; @@ -23,7 +21,6 @@ import org.rhq.core.domain.auth.Subject; import org.rhq.core.domain.cloud.StorageNode; import org.rhq.core.domain.common.JobTrigger; import org.rhq.core.domain.configuration.Configuration; -import org.rhq.core.domain.configuration.Property; import org.rhq.core.domain.configuration.PropertyList; import org.rhq.core.domain.configuration.PropertySimple; import org.rhq.core.domain.operation.OperationDefinition; @@ -194,15 +191,20 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa // nothing to do here return; case CANCELED: + // TODO Verify whether or not the node has been bootstrapped + // If the operation is canceled the plugin will get an InterruptedException. + // The actual bootstrapping may very well complete so we need to add in some + // checks to find out if the node is up and part of the cluster. + log.error("The operation [prepareForBootstrap] was canceled for " + newStorageNode + ". Deployment of the new storage node cannot proceed."); - // TODO update workflow status (the status needs to be accessible in the UI) + operationCanceled(newStorageNode, resourceOperationHistory); return; case FAILURE: log.error("The operation [preparedForBootstrap] failed for " + newStorageNode + ". The reported " + "failure is: " + resourceOperationHistory.getErrorMessage()); log.error("Deployment of the new storage node cannot proceed."); - // TODO update workflow status (the status needs to be accessible in the UI) + operationFailed(newStorageNode, resourceOperationHistory); return; default: // SUCCESS // Nothing to do because we wait for the C* driver to notify us that the @@ -212,6 +214,7 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa
private void handleUpdateKnownNodes(ResourceOperationHistory resourceOperationHistory) { StorageNode storageNode = findStorageNode(resourceOperationHistory.getResource()); + StorageNode newStorageNode = null; switch (resourceOperationHistory.getStatus()) { case INPROGRESS: // nothing to do here @@ -219,13 +222,14 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa case CANCELED: log.error("The operation [updateKnownNodes] was canceled for " + storageNode + ". Deployment of the new storage node cannot proceed."); - // TODO update workflow status (the status needs to be accessible in the UI) - return; + newStorageNode = findNewStorgeNode(StorageNode.OperationMode.ANNOUNCE); + operationCanceled(storageNode, resourceOperationHistory, newStorageNode); case FAILURE: log.error("The operation [updateKnownNodes] failed for " + storageNode + ". The reported " + "failure is: " + resourceOperationHistory.getErrorMessage()); log.error("Deployment of the new storage node cannot proceed."); - // TODO update workflow status (the status needs to be accessible in the UI) + newStorageNode = findNewStorgeNode(StorageNode.OperationMode.ANNOUNCE); + operationFailed(storageNode, resourceOperationHistory, newStorageNode); return; default: // SUCCESS if (log.isInfoEnabled()) { @@ -235,7 +239,7 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa PropertyList addresses = parameters.getList("addresses"); List<String> remainingNodes = getRemainingNodes(resourceOperationHistory);
- StorageNode newStorageNode = findNewStorgeNode(StorageNode.OperationMode.ANNOUNCE); + newStorageNode = findNewStorgeNode(StorageNode.OperationMode.ANNOUNCE); Subject subject = getSubject(resourceOperationHistory);
if (remainingNodes.isEmpty()) { @@ -248,13 +252,9 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa } }
- private Subject getSubject(ResourceOperationHistory resourceOperationHistory) { - Subject subject = subjectManager.getSubjectByName(resourceOperationHistory.getSubjectName()); - return SessionManager.getInstance().put(subject); - } - private void handleAddNodeMaintenance(ResourceOperationHistory resourceOperationHistory) { StorageNode storageNode = findStorageNode(resourceOperationHistory.getResource()); + StorageNode newStorageNode = null; switch (resourceOperationHistory.getStatus()) { case INPROGRESS: // nothing to do here @@ -262,13 +262,15 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa case CANCELED: log.error("The operation [addNodeMaintenance] was canceled for " + storageNode + ". This operation " + "needs to be run on each storage node when a new node is added to the cluster."); - // TODO update workflow status (the status needs to be accessible in the UI) + newStorageNode = findNewStorgeNode(StorageNode.OperationMode.ADD_NODE_MAINTENANCE); + operationCanceled(storageNode, resourceOperationHistory, newStorageNode); return; case FAILURE: log.error("The operation [addNodeMaintenance] failed for " + storageNode + ". This operation " + "needs to be run on each storage node when a new node is added to the cluster. The reported " + "failure is: " + resourceOperationHistory.getErrorMessage()); - // TODO update workflow status (the status needs to be accessible in the UI) + newStorageNode = findNewStorgeNode(StorageNode.OperationMode.ADD_NODE_MAINTENANCE); + operationFailed(storageNode, resourceOperationHistory, newStorageNode); return; default: // SUCCESS if (log.isInfoEnabled()) { @@ -289,6 +291,42 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa } }
+ private Subject getSubject(ResourceOperationHistory resourceOperationHistory) { + Subject subject = subjectManager.getSubjectByName(resourceOperationHistory.getSubjectName()); + return SessionManager.getInstance().put(subject); + } + + private void operationCanceled(StorageNode storageNode, ResourceOperationHistory operationHistory, + StorageNode newStorageNode) { + newStorageNode.setErrorMessage("Deployment has been aborted due to canceled resource operation on " + + storageNode.getAddress()); + storageNode.setErrorMessage("Deployment of " + newStorageNode.getAddress() + " has been aborted due " + + "to cancellation of resource operation [" + operationHistory.getOperationDefinition().getDisplayName() + + "]."); + storageNode.setFailedOperation(operationHistory); + } + + private void operationCanceled(StorageNode newStorageNode, ResourceOperationHistory operationHistory) { + newStorageNode.setErrorMessage("Deployment has been aborted due to canceled resource operation [" + + operationHistory.getOperationDefinition().getDisplayName() + "]."); + newStorageNode.setFailedOperation(operationHistory); + } + + private void operationFailed(StorageNode storageNode, ResourceOperationHistory operationHistory, + StorageNode newStorageNode) { + newStorageNode.setErrorMessage("Deployment has been aborted due to failed resource operation on " + + storageNode.getAddress()); + storageNode.setErrorMessage("Deployment of " + newStorageNode.getAddress() + " has been aborted due " + + "to failed resource operation [" + operationHistory.getOperationDefinition().getDisplayName() + "]."); + storageNode.setFailedOperation(operationHistory); + } + + private void operationFailed(StorageNode newStorageNode, ResourceOperationHistory operationHistory) { + newStorageNode.setErrorMessage("Deployment has been aborted due to failed resource operation [" + + operationHistory.getOperationDefinition().getDisplayName() + "]."); + newStorageNode.setFailedOperation(operationHistory); + } + private StorageNode findStorageNode(Resource resource) { for (StorageNode storageNode : storageNodeManager.getStorageNodes()) { if (storageNode.getResource().getId() == resource.getId()) { @@ -298,31 +336,6 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa return null; }
- private StorageNode findStorageNodeToPrepareForBootstrap(PropertyList addressList) { - // It is possible that we could have more that one INSTALLED node. We want to make - // sure we grab the one that was just announced to the cluster. - Set<String> addresses = toSet(addressList); - List<StorageNode> installedNodes = entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE, - StorageNode.class).setParameter("operationMode", StorageNode.OperationMode.INSTALLED).getResultList(); - - for (StorageNode installedNode : installedNodes) { - if (addresses.contains(installedNode.getAddress())) { - return installedNode; - } - } - // TODO What should we do in the very unlikely event that we do not find the IP address? - throw new IllegalStateException("Failed to find storage node to be bootstrapped."); - } - - private Set<String> toSet(PropertyList propertyList) { - Set<String> set = new HashSet<String>(); - for (Property property : propertyList.getList()) { - PropertySimple simple = (PropertySimple) property; - set.add(simple.getStringValue()); - } - return set; - } - @Override public void bootstrapStorageNode(Subject subject, StorageNode storageNode) { List<StorageNode> clusterNodes = entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE,
commit 374e65d9c0712316f5650ffa7d5a069933c6b8b3 Author: John Sanda jsanda@redhat.com Date: Wed Aug 14 16:13:18 2013 -0400
add fields in StorageNode for error reporting during maintenance
Storage node deployment and undeployment consists of a series of different resource operations. The (un)deployment work flow could fail due to one of those resource operations. When that occurs we can provide a direct link in the StorageNode.failedOperation field to the operation history of the failed operation. This direct link will help with providing quick insight into the cause of the failure.
There is also a new errorMessage field in StorageNode. This field will provide summary info about the failure. If the failure is in server side processing and not in a resource operation, then the errorMessage field should be set but not the failedOperation field.
diff --git a/modules/core/dbutils/src/main/scripts/dbsetup/measurement-schema.xml b/modules/core/dbutils/src/main/scripts/dbsetup/measurement-schema.xml index c15aa1f..f307ff8 100644 --- a/modules/core/dbutils/src/main/scripts/dbsetup/measurement-schema.xml +++ b/modules/core/dbutils/src/main/scripts/dbsetup/measurement-schema.xml @@ -454,7 +454,9 @@ <column name="OPERATION_MODE" required="true" size="32" type="VARCHAR2" /> <column name="CTIME" required="true" type="LONG" /> <column name="MTIME" required="true" type="LONG" /> - <column name="RESOURCE_ID" required="false" type="INTEGER" references="RHQ_RESOURCE(ID)" /> + <column name="ERROR_MSG" required="false" type="LONGVARCHAR"/> + <column name="RESOURCE_ID" required="false" type="INTEGER" references="RHQ_RESOURCE(ID)" /> + <column name="RESOURCE_OP_HIST_ID" required="false" type="INTEGER" references="RHQ_OPERATION_HISTORY(ID)"/>
<!-- This index is for constraint, not performance --> <index name="RHQ_STORAGE_NODE_UNIQUE" unique="true"> diff --git a/modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml b/modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml index 6f42345..edf0147 100644 --- a/modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml +++ b/modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml @@ -2184,6 +2184,19 @@ <schema-alterColumn table="RHQ_CONFIG_PD_OSRC" column="EXPRESSION_SCOPE" nullable="false" default="unlimited"/> </schemaSpec>
+ <schemaSpec version="2.137"> + <schema-addColumn table="RHQ_STORAGE_NODE" column="ERROR_MSG" columnType="LONGVARCHAR"/> + <schema-addColumn table="RHQ_STORAGE_NODE" column="RESOURCE_OP_HIST_ID" columnType="INTEGER"/> + <schema-directSQL> + <statement desc="Creating RHQ_STORAGE_NODE foreign key to RHQ_OPERATION_HISTORY"> + ALTER TABLE RHQ_STORAGE_NODE + ADD CONSTRAINT RHQ_SN_OP_HIST_ID_FK + FOREIGN KEY (RESOURCE_OP_HIST_ID) + REFERENCES RHQ_OPERATION_HISTORY (ID) + </statement> + </schema-directSQL> + </schemaSpec> + </dbupgrade> </target> </project> diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java index 3e94526..3f2a89c 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java @@ -40,6 +40,7 @@ import javax.persistence.PrePersist; import javax.persistence.SequenceGenerator; import javax.persistence.Table;
+import org.rhq.core.domain.operation.ResourceOperationHistory; import org.rhq.core.domain.resource.Resource;
/** @@ -134,10 +135,17 @@ public class StorageNode implements Serializable { @Column(name = "MTIME", nullable = false) private long mtime;
+ @Column(name = "ERROR_MSG", nullable = true) + private String errorMessage; + @JoinColumn(name = "RESOURCE_ID", referencedColumnName = "ID", nullable = true) @OneToOne(fetch = FetchType.EAGER, optional = true) private Resource resource;
+ @JoinColumn(name = "RESOURCE_OP_HIST_ID", referencedColumnName = "ID", nullable = true) + @OneToOne(optional = true) + private ResourceOperationHistory failedOperation; + // required for JPA public StorageNode() { } @@ -198,6 +206,22 @@ public class StorageNode implements Serializable { this.resource = resource; }
+ public String getErrorMessage() { + return errorMessage; + } + + public void setErrorMessage(String errorMessage) { + this.errorMessage = errorMessage; + } + + public ResourceOperationHistory getFailedOperation() { + return failedOperation; + } + + public void setFailedOperation(ResourceOperationHistory failedOperation) { + this.failedOperation = failedOperation; + } + public OperationMode getOperationMode() { return operationMode; }
commit 2b73f4d7f00252b9f4ea71fce47f54458cf683b2 Author: John Sanda jsanda@redhat.com Date: Wed Aug 14 12:08:13 2013 -0400
refactoring state transitions and adding method for deployment
When a storage node is committed into inventory its operation mode is set to INSTALLED, unless the storage node entity exists in which case the mode is set to NORMAL. After creating the storage node entity, deployment is started. The operation mode changs to ANNOUNCE. The address of the new node is announced to existing cluster nodes. After announcing completes, the operation mode changes to BOOTSTRAP, and the prepareForBootstrap operation is run on the new node. When the new node is reported up as part of the cluster, the operation mode of all cluster modes is set to ADD_NODE_MAINTENANCE. The addNodeMaintenance operation is then run on each storage node. When that operation completes, the node's operation mode is set back to NORMAL.
The StorageNodeManagerBean.deployStorageNode method looks at the operation mode of the node to determine at what step in the process to start the deployment. The deployStorageNode method is the only method that the UI or remote API will need to invoke to start or resume a deployment.
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java index 6a5cf6a..3e94526 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java @@ -209,11 +209,13 @@ public class StorageNode implements Serializable { public enum OperationMode {
DOWN("This storage node is down"), // - INSTALLED("This storage node is newly installed but not yet operationial"), // + INSTALLED("This storage node is newly installed but not yet operational"), // MAINTENANCE("This storage node is in maintenance mode"), // NORMAL("This storage node is running normally"), - ANNOUNCE("The storage node is running normally and is being updated to have newly deployed storage nodes " + - "announced to it so that those new nodes can join the cluster."), + ANNOUNCE("The storage node is installed but not yet part of the cluster. It is being announced so that it " + + "can join the cluster."), + BOOTSTRAP("The storage is installed but not yet part of the cluster. It is getting bootstrapped into the " + + "cluster"), ADD_NODE_MAINTENANCE("The storage node is running and is preparing to undergo routine maintenance that is " + "necessary when a new node joins the cluster.");
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index 11d81b4..34f6381 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -177,21 +177,12 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN storageNode.setOperationMode(OperationMode.NORMAL); initClusterSettingsIfNecessary(pluginConfig); } else { - storageNode = new StorageNode(); - storageNode.setAddress(address); - storageNode.setCqlPort(Integer.parseInt(pluginConfig.getSimpleValue(RHQ_STORAGE_CQL_PORT_PROPERTY))); - storageNode.setJmxPort(Integer.parseInt(pluginConfig.getSimpleValue(RHQ_STORAGE_JMX_PORT_PROPERTY))); - storageNode.setResource(resource); - storageNode.setOperationMode(OperationMode.INSTALLED); - - entityManager.persist(storageNode); + storageNode = createStorageNode(resource);
if (log.isInfoEnabled()) { - log.info(storageNode + " is a new storage node and not part of the storage node cluster."); - log.info("Scheduling maintenance operations to bring " + storageNode + " into the cluster..."); + log.info("Scheduling cluster maintenance to deploy " + storageNode + " into the storage cluster..."); } - - announceNewNode(storageNode); + deployStorageNode(subjectManager.getOverlord(), storageNode.getId()); } } catch (UnknownHostException e) { throw new RuntimeException("Could not resolve address [" + address + "]. The resource " + resource + @@ -224,17 +215,42 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN storageClusterSettingsManager.setClusterSettings(subjectManager.getOverlord(), clusterSettings); }
- private void announceNewNode(StorageNode newStorageNode) { - if (log.isInfoEnabled()) { - log.info("Announcing " + newStorageNode + " to storage node cluster."); - } + @Override + @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) + public StorageNode createStorageNode(Resource resource) { + Configuration pluginConfig = resource.getPluginConfiguration(); + + StorageNode storageNode = new StorageNode(); + storageNode.setAddress(pluginConfig.getSimpleValue(RHQ_STORAGE_ADDRESS_PROPERTY)); + storageNode.setCqlPort(Integer.parseInt(pluginConfig.getSimpleValue(RHQ_STORAGE_CQL_PORT_PROPERTY))); + storageNode.setJmxPort(Integer.parseInt(pluginConfig.getSimpleValue(RHQ_STORAGE_JMX_PORT_PROPERTY))); + storageNode.setResource(resource); + storageNode.setOperationMode(OperationMode.INSTALLED);
- List<StorageNode> clusteredNodes = getClusteredStorageNodes(); - for (StorageNode node : clusteredNodes) { - node.setOperationMode(OperationMode.ANNOUNCE); + entityManager.persist(storageNode); + + return storageNode; + } + + @Override + public void deployStorageNode(Subject subject, int storageNodeId) { + StorageNode storageNode = entityManager.find(StorageNode.class, storageNodeId); + + switch (storageNode.getOperationMode()) { + case INSTALLED: + case ANNOUNCE: + storageNodeOperationsHandler.announceStorageNode(subject, storageNode); + break; + case BOOTSTRAP: + storageNodeOperationsHandler.bootstrapStorageNode(subject, storageNode); + break; + case ADD_NODE_MAINTENANCE: + storageNodeOperationsHandler.performAddNodeMaintenance(subject, storageNode); + default: + // For any other operation mode, the storage node should already be part of + // the cluster. + // TODO Make sure that the storage node is in fact part of the cluster } - PropertyList addresses = createPropertyListOfAddresses("addresses", combine(clusteredNodes, newStorageNode)); - storageNodeOperationsHandler.announceNewStorageNode(newStorageNode, clusteredNodes.get(0), addresses); }
private List<StorageNode> combine(List<StorageNode> storageNodes, StorageNode storageNode) { @@ -254,12 +270,6 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN }
@Override - public boolean isAddNodeMaintenanceInProgress() { - return !entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE) - .setParameter("operationMode", OperationMode.ADD_NODE_MAINTENANCE).getResultList().isEmpty(); - } - - @Override @RequiredPermission(Permission.MANAGE_SETTINGS) public StorageNodeLoadComposite getLoad(Subject subject, StorageNode node, long beginTime, long endTime) { int resourceId = getResourceIdFromStorageNode(node); @@ -418,11 +428,6 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN return result; }
- private List<StorageNode> getClusteredStorageNodes() { - return entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE, StorageNode.class) - .setParameter("operationMode", OperationMode.NORMAL).getResultList(); - } - @Override @RequiredPermission(Permission.MANAGE_SETTINGS) public PageList<StorageNode> findStorageNodesByCriteria(Subject subject, StorageNodeCriteria criteria) { @@ -796,4 +801,4 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN return successResultFound; }
-} \ No newline at end of file +} diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java index b5ee7f0..e5f4f22 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java @@ -167,5 +167,7 @@ public interface StorageNodeManagerLocal {
Map<String, List<MeasurementDataNumericHighLowComposite>> findStorageNodeLoadDataForLast(Subject subject, StorageNode node, long beginTime, long endTime, int numPoints);
- boolean isAddNodeMaintenanceInProgress(); + StorageNode createStorageNode(Resource resource); + + void deployStorageNode(Subject subject, int storageNodeId); } diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterMonitor.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterMonitor.java index 734da35..7db95fb 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterMonitor.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterMonitor.java @@ -7,7 +7,6 @@ import com.datastax.driver.core.exceptions.NoHostAvailableException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory;
-import org.rhq.enterprise.server.cloud.StorageNodeManagerLocal; import org.rhq.enterprise.server.util.LookupUtil; import org.rhq.server.metrics.StorageStateListener;
@@ -30,12 +29,8 @@ public class StorageClusterMonitor implements StorageStateListener {
isClusterAvailable = true;
- StorageNodeManagerLocal storageNodeManager = LookupUtil.getStorageNodeManager(); - if (storageNodeManager.isAddNodeMaintenanceInProgress()) { - log.info("Scheduling cluster maintenance..."); - StorageNodeOperationsHandlerLocal storageOperationsHandler = LookupUtil.getStorageNodeOperationsHandler(); - storageOperationsHandler.performAddNodeMaintenance(address); - } + StorageNodeOperationsHandlerLocal storageOperationsHandler = LookupUtil.getStorageNodeOperationsHandler(); + storageOperationsHandler.performAddNodeMaintenanceIfNecessary(address); }
@Override diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeDeploymentException.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeDeploymentException.java new file mode 100644 index 0000000..fca6e96 --- /dev/null +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeDeploymentException.java @@ -0,0 +1,22 @@ +package org.rhq.enterprise.server.storage; + +/** + * @author John Sanda + */ +public class StorageNodeDeploymentException extends RuntimeException { + + public StorageNodeDeploymentException() { + } + + public StorageNodeDeploymentException(String message) { + super(message); + } + + public StorageNodeDeploymentException(String message, Throwable cause) { + super(message, cause); + } + + public StorageNodeDeploymentException(Throwable cause) { + super(cause); + } +} diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java index 1bf3cec..f996bf2 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java @@ -1,15 +1,20 @@ package org.rhq.enterprise.server.storage;
import java.net.InetAddress; +import java.util.ArrayList; import java.util.HashSet; +import java.util.LinkedList; import java.util.List; import java.util.Set;
import javax.ejb.Asynchronous; import javax.ejb.EJB; import javax.ejb.Stateless; +import javax.ejb.TransactionAttribute; +import javax.ejb.TransactionAttributeType; import javax.persistence.EntityManager; import javax.persistence.PersistenceContext; +import javax.persistence.PersistenceException;
import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -29,6 +34,7 @@ import org.rhq.core.domain.resource.Resource; import org.rhq.core.domain.resource.ResourceType; import org.rhq.core.util.StringUtil; import org.rhq.enterprise.server.RHQConstants; +import org.rhq.enterprise.server.auth.SessionManager; import org.rhq.enterprise.server.auth.SubjectManagerLocal; import org.rhq.enterprise.server.cloud.StorageNodeManagerLocal; import org.rhq.enterprise.server.operation.OperationManagerLocal; @@ -69,39 +75,72 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa private StorageClientManagerBean storageClientManager;
@Override - public void announceNewStorageNode(StorageNode newStorageNode, StorageNode clusterNode, PropertyList addresses) { + @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) + public void announceStorageNode(Subject subject, StorageNode storageNode) { if (log.isInfoEnabled()) { - log.info("Announcing new storage node " + newStorageNode + " to cluster node " + clusterNode); + log.info("Announcing " + storageNode + " to storage node cluster."); + } + storageNode.setOperationMode(StorageNode.OperationMode.ANNOUNCE); + List<StorageNode> clusterNodes = entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE, + StorageNode.class).setParameter("operationMode", StorageNode.OperationMode.NORMAL).getResultList(); + List<StorageNode> allNodes = new ArrayList<StorageNode>(clusterNodes); + allNodes.add(storageNode); + + announceStorageNode(subject, storageNode, createPropertyListOfAddresses("addresses", allNodes), + getAddresses(clusterNodes)); + + } + + private void announceStorageNode(Subject subject, StorageNode storageNode, PropertyList addresses, + List<String> remainingNodes) { + String address = remainingNodes.remove(0); + StorageNode clusterNode = findStorageNodeByAddress(address); + + if (log.isInfoEnabled()) { + log.info("Announcing " + storageNode + " to cluster node " + clusterNode); } - Subject overlord = subjectManager.getOverlord(); ResourceOperationSchedule schedule = new ResourceOperationSchedule(); schedule.setResource(clusterNode.getResource()); schedule.setJobTrigger(JobTrigger.createNowTrigger()); - schedule.setSubject(overlord); + schedule.setSubject(subject); schedule.setOperationName("updateKnownNodes"); Configuration parameters = new Configuration(); parameters.put(addresses); + parameters.put(new PropertySimple("remainingNodes", StringUtil.listToString(remainingNodes))); schedule.setParameters(parameters);
- operationManager.scheduleResourceOperation(overlord, schedule); + operationManager.scheduleResourceOperation(subject, schedule); }
@Override - public void performAddNodeMaintenance(InetAddress storageNodeAddress) { + public void performAddNodeMaintenanceIfNecessary(InetAddress storageNodeAddress) { StorageNode storageNode = entityManager.createNamedQuery(StorageNode.QUERY_FIND_BY_ADDRESS, StorageNode.class).setParameter("address", storageNodeAddress.getHostAddress()).getSingleResult(); - storageNode.setOperationMode(StorageNode.OperationMode.ADD_NODE_MAINTENANCE);
+ if (storageNode.getOperationMode() == StorageNode.OperationMode.BOOTSTRAP) { + performAddNodeMaintenance(subjectManager.getOverlord(), storageNode); + } else { + log.info(storageNode + " has already been bootstrapped. Skipping add node maintenance."); + } + } + + @Override + public void performAddNodeMaintenance(Subject subject, StorageNode storageNode) { + storageNode.setOperationMode(StorageNode.OperationMode.ADD_NODE_MAINTENANCE); List<StorageNode> clusterNodes = entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE, - StorageNode.class).setParameter("operationMode", StorageNode.OperationMode.ADD_NODE_MAINTENANCE) + StorageNode.class).setParameter("operationMode", StorageNode.OperationMode.NORMAL) .getResultList(); - + for (StorageNode node : clusterNodes) { + node.setOperationMode(StorageNode.OperationMode.ADD_NODE_MAINTENANCE); + } + clusterNodes.add(storageNode); boolean runRepair = updateSchemaIfNecessary(clusterNodes); - - performAddNodeMaintenance(storageNode, runRepair, createPropertyListOfAddresses(SEEDS_LIST, clusterNodes)); + performAddNodeMaintenance(subject, storageNode, runRepair, createPropertyListOfAddresses(SEEDS_LIST, + clusterNodes)); }
- private void performAddNodeMaintenance(StorageNode storageNode, boolean runRepair, PropertyList seedsList) { + private void performAddNodeMaintenance(Subject subject, StorageNode storageNode, boolean runRepair, + PropertyList seedsList) { if (log.isInfoEnabled()) { log.info("Running addNodeMaintenance for storage node " + storageNode); } @@ -111,7 +150,7 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa ResourceOperationSchedule schedule = new ResourceOperationSchedule(); schedule.setResource(storageNode.getResource()); schedule.setJobTrigger(JobTrigger.createNowTrigger()); - schedule.setSubject(overlord); + schedule.setSubject(subject); schedule.setOperationName("addNodeMaintenance");
Configuration config = new Configuration(); @@ -192,22 +231,28 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa if (log.isInfoEnabled()) { log.info("Finished announcing cluster nodes to " + storageNode); } - storageNode.setOperationMode(StorageNode.OperationMode.ADD_NODE_MAINTENANCE); Configuration parameters = resourceOperationHistory.getParameters(); PropertyList addresses = parameters.getList("addresses"); - StorageNode nextNode = takeFromQueue(storageNode, StorageNode.OperationMode.ANNOUNCE); + List<String> remainingNodes = getRemainingNodes(resourceOperationHistory);
- if (nextNode == null) { + StorageNode newStorageNode = findNewStorgeNode(StorageNode.OperationMode.ANNOUNCE); + Subject subject = getSubject(resourceOperationHistory); + + if (remainingNodes.isEmpty()) { log.info("Successfully announced new storage node to cluster"); - StorageNode installedNode = findStorageNodeToPrepareForBootstrap(addresses); - // Pass a copy of addresses to avoid a TransientObjectException - prepareNodeForBootstrap(installedNode, addresses.deepCopy(false)); + newStorageNode.setOperationMode(StorageNode.OperationMode.BOOTSTRAP); + prepareNodeForBootstrap(subject, newStorageNode, addresses.deepCopy(false)); } else { - announceNewStorageNode(storageNode, nextNode, addresses.deepCopy(false)); + announceStorageNode(subject, newStorageNode, addresses.deepCopy(false), remainingNodes); } } }
+ private Subject getSubject(ResourceOperationHistory resourceOperationHistory) { + Subject subject = subjectManager.getSubjectByName(resourceOperationHistory.getSubjectName()); + return SessionManager.getInstance().put(subject); + } + private void handleAddNodeMaintenance(ResourceOperationHistory resourceOperationHistory) { StorageNode storageNode = findStorageNode(resourceOperationHistory.getResource()); switch (resourceOperationHistory.getStatus()) { @@ -238,7 +283,8 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa Configuration parameters = resourceOperationHistory.getParameters(); boolean runRepair = parameters.getSimple(RUN_REPAIR_PROPERTY).getBooleanValue(); PropertyList seedsList = parameters.getList(SEEDS_LIST).deepCopy(false); - performAddNodeMaintenance(nextNode, runRepair, seedsList); + Subject subject = getSubject(resourceOperationHistory); + performAddNodeMaintenance(subject, nextNode, runRepair, seedsList); } } } @@ -277,7 +323,15 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa return set; }
- private void prepareNodeForBootstrap(StorageNode storageNode, PropertyList addresses) { + @Override + public void bootstrapStorageNode(Subject subject, StorageNode storageNode) { + List<StorageNode> clusterNodes = entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE, + StorageNode.class).setParameter("operationMode", StorageNode.OperationMode.NORMAL).getResultList(); + clusterNodes.add(storageNode); + prepareNodeForBootstrap(subject, storageNode, createPropertyListOfAddresses("addresses", clusterNodes)); + } + + private void prepareNodeForBootstrap(Subject subject, StorageNode storageNode, PropertyList addresses) { if (log.isInfoEnabled()) { log.info("Preparing to bootstrap " + storageNode + " into cluster..."); } @@ -285,11 +339,10 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa ResourceOperationSchedule schedule = new ResourceOperationSchedule(); schedule.setResource(storageNode.getResource()); schedule.setJobTrigger(JobTrigger.createNowTrigger()); - schedule.setSubject(subjectManager.getOverlord()); + schedule.setSubject(subject); schedule.setOperationName("prepareForBootstrap");
- StorageClusterSettings clusterSettings = storageClusterSettingsManager.getClusterSettings( - subjectManager.getOverlord()); + StorageClusterSettings clusterSettings = storageClusterSettingsManager.getClusterSettings(subject); Configuration parameters = new Configuration(); parameters.put(new PropertySimple("cqlPort", clusterSettings.getCqlPort())); parameters.put(new PropertySimple("gossipPort", clusterSettings.getGossipPort())); @@ -297,7 +350,7 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa
schedule.setParameters(parameters);
- operationManager.scheduleResourceOperation(subjectManager.getOverlord(), schedule); + operationManager.scheduleResourceOperation(subject, schedule); }
private StorageNode takeFromQueue(StorageNode lastTaken, StorageNode.OperationMode queue) { @@ -311,6 +364,40 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa return nodes.get(0); }
+ private List<String> getRemainingNodes(ResourceOperationHistory resourceOperationHistory) { + LinkedList<String> addresses = new LinkedList<String>(); + Configuration results = resourceOperationHistory.getResults(); + String remainingNodes = results.getSimpleValue("remainingNodes"); + + if (!StringUtil.isEmpty(remainingNodes)) { + for (String address : remainingNodes.split(",")) { + addresses.add(address); + } + } + return addresses; + } + + private StorageNode findStorageNodeByAddress(String address) { + try { + return entityManager.createNamedQuery(StorageNode.QUERY_FIND_BY_ADDRESS, StorageNode.class) + .setParameter("address", address).getSingleResult(); + + } catch (PersistenceException e) { + throw new StorageNodeDeploymentException("Storage node deployment has failed! Failed to fetch the next " + + "storage node at " + address + " to be updated.", e); + } + } + + private StorageNode findNewStorgeNode(StorageNode.OperationMode operationMode) { + try { + return entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE, StorageNode.class) + .setParameter("operationMode", operationMode).getSingleResult(); + } catch (PersistenceException e) { + throw new StorageNodeDeploymentException("Storage node deployment has failed! Failed to fetch the " + + "storage node to be deployed.", e); + } + } + private boolean isStorageNodeOperation(OperationDefinition operationDefinition) { ResourceType resourceType = operationDefinition.getResourceType(); return resourceType.getName().equals(STORAGE_NODE_TYPE_NAME) && @@ -411,4 +498,12 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa return list; }
+ private List<String> getAddresses(List<StorageNode> storageNodes) { + List<String> addresses = new LinkedList<String>(); + for (StorageNode storageNode : storageNodes) { + addresses.add(storageNode.getAddress()); + } + return addresses; + } + } diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerLocal.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerLocal.java index fcdcd3e..44bb842 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerLocal.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerLocal.java @@ -4,8 +4,8 @@ import java.net.InetAddress;
import javax.ejb.Asynchronous;
+import org.rhq.core.domain.auth.Subject; import org.rhq.core.domain.cloud.StorageNode; -import org.rhq.core.domain.configuration.PropertyList; import org.rhq.core.domain.operation.OperationHistory;
/** @@ -16,7 +16,11 @@ public interface StorageNodeOperationsHandlerLocal { @Asynchronous void handleOperationUpdateIfNecessary(OperationHistory operationHistory);
- void announceNewStorageNode(StorageNode newStorageNode, StorageNode clusterNode, PropertyList addresses); + void announceStorageNode(Subject subject, StorageNode storageNode);
- void performAddNodeMaintenance(InetAddress storageNodeAddress); + void bootstrapStorageNode(Subject subject, StorageNode storageNode); + + void performAddNodeMaintenanceIfNecessary(InetAddress storageNodeAddress); + + void performAddNodeMaintenance(Subject subject, StorageNode storageNode); } diff --git a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java index e76cfa0..7f58037 100644 --- a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java +++ b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java @@ -255,7 +255,9 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper EmsOperation emsOperation = authBean.getOperation("reloadConfiguration"); emsOperation.invoke();
- result.setSimpleResult("Successfully updated the set of known nodes."); + Configuration complexResults = result.getComplexResults(); + complexResults.put(new PropertySimple("details", "Successfully updated the set of known nodes.")); + complexResults.put(params.get("remainingNodes").deepCopy(false));
return result; } catch (InternodeAuthConfUpdateException e) { diff --git a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml index a2d04d0..4ef10cd 100644 --- a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml +++ b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml @@ -102,7 +102,12 @@ <c:list-property name="addresses"> <c:simple-property name="address"/> </c:list-property> + <c:simple-property name="remainingNodes"/> </parameters> + <results> + <c:simple-property name="details"/> + <c:simple-property name="remainingNodes"/> + </results> </operation>
<operation name="prepareForBootstrap">
commit a895379f5bc41c0d4f2c67dfb391fe8ecc99f7ea Author: Stefan Negrea snegrea@redhat.com Date: Wed Aug 14 17:58:32 2013 -0500
Add alert template for maintenance operation failures for storage nodes.
diff --git a/modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java b/modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java index f752d1d..5692bea 100644 --- a/modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java +++ b/modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java @@ -66,11 +66,14 @@ public class AlertDefinitionServerPluginComponent implements ServerPluginCompone private static final String TOTAL_DISK_USED_PERCENTAGE_METRIC_NAME = "Calculated.TotalDiskUsedPercentage"; private static final String FREE_DISK_TO_DATA_SIZE_RATIO_METRIC_NAME = "Calculated.FreeDiskToDataSizeRatio"; private static final String TAKE_SNAPSHOT_OPERATION_NAME = "takeSnapshot"; + private static final String[] MAINTENANCE_OPERATIONS = new String[] { "readRepair", "addNodeMaintenance", + "updateKnownNodes", "prepareForBootstrap", "prepareForUpgrade", "updateSeedsList", "updateConfiguration" };
static private final List<InjectedTemplate> injectedTemplates; static private final InjectedTemplate storageNodeHighHeapTemplate; static private final InjectedTemplate storageNodeHighDiskUsageTemplate; static private final InjectedTemplate storageNodeSnapshotFailureTemplate; + static private final InjectedTemplate storageNodeMaintenanceOperationsFailureTemplate;
static { storageNodeHighHeapTemplate = new InjectedTemplate( @@ -91,10 +94,18 @@ public class AlertDefinitionServerPluginComponent implements ServerPluginCompone "StorageNodeSnapshotFailureTemplate", // "An alert template to notify users when a snapshot operations fails for an RHQ Storage Node. When fired please see documentation for the proper corrective action.");
+ storageNodeMaintenanceOperationsFailureTemplate = new InjectedTemplate( + "RHQStorage", // + "RHQ Storage Node", // + "StorageNodeMaintenanceOperationsFailureTemplate", // + "An alert template to notify users when a maintenance operation fails for an RHQ Storage Node. When fired please see documentation for the proper corrective action."); + injectedTemplates = new ArrayList<InjectedTemplate>(); injectedTemplates.add(storageNodeHighHeapTemplate); injectedTemplates.add(storageNodeHighDiskUsageTemplate); injectedTemplates.add(storageNodeSnapshotFailureTemplate); + injectedTemplates.add(storageNodeMaintenanceOperationsFailureTemplate); + }
private ServerPluginContext context; @@ -239,6 +250,8 @@ public class AlertDefinitionServerPluginComponent implements ServerPluginCompone newAlertDefId = injectStorageNodeHighDiskUsageTemplate(resourceType); } else if (storageNodeSnapshotFailureTemplate.equals(injectedAlertDef)) { newAlertDefId = injectStorageNodeSnapshotFailureTemplate(resourceType); + } else if (storageNodeMaintenanceOperationsFailureTemplate.equals(injectedAlertDef)) { + newAlertDefId = injectStorageNodeMaintenanceOperationsFailureTemplate(resourceType); }
adc.addFilterId(newAlertDefId); @@ -398,6 +411,36 @@ public class AlertDefinitionServerPluginComponent implements ServerPluginCompone return newTemplateId; }
+ private int injectStorageNodeMaintenanceOperationsFailureTemplate(ResourceType resourceType) { + AlertTemplateManagerLocal alertTemplateManager = LookupUtil.getAlertTemplateManager(); + SubjectManagerLocal subjectManager = LookupUtil.getSubjectManager(); + + AlertDefinition newTemplate = new AlertDefinition(); + newTemplate.setName(storageNodeMaintenanceOperationsFailureTemplate.getName()); + newTemplate.setResourceType(resourceType); + newTemplate.setPriority(AlertPriority.MEDIUM); + newTemplate.setConditionExpression(BooleanExpression.ANY); + newTemplate.setDescription(storageNodeMaintenanceOperationsFailureTemplate.getDescription()); + newTemplate.setRecoveryId(0); + newTemplate.setEnabled(true); + + for (String operation : MAINTENANCE_OPERATIONS) { + AlertCondition snapshotFailureCondition = new AlertCondition(); + snapshotFailureCondition.setCategory(AlertConditionCategory.CONTROL); + snapshotFailureCondition.setName(operation); + snapshotFailureCondition.setOption(OperationRequestStatus.FAILURE.name()); + newTemplate.addCondition(snapshotFailureCondition); + } + + AlertDampening dampener = new AlertDampening(AlertDampening.Category.NONE); + newTemplate.setAlertDampening(dampener); + + int newTemplateId = alertTemplateManager.createAlertTemplate(subjectManager.getOverlord(), newTemplate, + resourceType.getId()); + + return newTemplateId; + } + private static class InjectedTemplate { static public final String FIELD_PLUGIN_NAME = "plugin"; static public final String FIELD_RESOURCE_TYPE_NAME = "type";
commit 8a8727caabae944cc749a35a26a6b91abbad5729 Author: Stefan Negrea snegrea@redhat.com Date: Wed Aug 14 15:44:07 2013 -0500
Fixing errors after rebase merge.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index aa6f78f..11d81b4 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -153,7 +153,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN private ResourceManagerLocal resourceManager;
@EJB - private StorageClusterSettingsManagerBean storageClusterSettingsManager; + private StorageClusterSettingsManagerLocal storageClusterSettingsManager;
@EJB private StorageNodeOperationsHandlerLocal storageNodeOperationsHandler; @@ -625,7 +625,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
return configuration; } - + @Override @Asynchronous public void updateConfigurationAsync(Subject subject, StorageNodeConfigurationComposite storageNodeConfiguration) {
commit 61fb79fad813bf75c5274743c4f161d37857b4e5 Author: Stefan Negrea snegrea@redhat.com Date: Wed Aug 14 15:30:37 2013 -0500
More text for the schema compatibility text.
diff --git a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/VersionManager.java b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/VersionManager.java index ec54a0c..fe6ddf9 100644 --- a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/VersionManager.java +++ b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/VersionManager.java @@ -36,6 +36,7 @@ import org.apache.commons.logging.LogFactory;
import org.rhq.cassandra.schema.exception.InstalledSchemaTooAdvancedException; import org.rhq.cassandra.schema.exception.InstalledSchemaTooOldException; +import org.rhq.cassandra.schema.exception.SchemaNotInstalledException; import org.rhq.core.domain.cloud.StorageNode;
/** @@ -258,6 +259,11 @@ class VersionManager extends AbstractManager { try { initClusterSession();
+ if (!this.schemaExists()) { + log.error("Storage cluster schema not installed. Please re-run the server installer to install the storage cluster schema properly."); + throw new SchemaNotInstalledException(); + } + int installedSchemaVersion = this.getInstalledSchemaVersion();
UpdateFolder folder = new UpdateFolder(Task.Update.getFolder()); @@ -280,7 +286,7 @@ class VersionManager extends AbstractManager { } finally { shutdownClusterConnection();
- log.info("Completed check for storage schema compatibility."); + log.info("Completed storage schema compatibility check."); } } } diff --git a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/exception/InstalledSchemaTooAdvancedException.java b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/exception/InstalledSchemaTooAdvancedException.java index 2f83ef5..bf36b25 100644 --- a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/exception/InstalledSchemaTooAdvancedException.java +++ b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/exception/InstalledSchemaTooAdvancedException.java @@ -33,6 +33,7 @@ public class InstalledSchemaTooAdvancedException extends Exception {
public InstalledSchemaTooAdvancedException() { super( - "Storage schema is too advanced for the current installation. Schema revisions have been applied beyond the capability of the installation."); + "Storage schema is too advanced for the current installation. Schema revisions have been applied beyond the capability of the installation. " + + "Please install a newer version of the server that is compatibile with the storage schema version."); } } \ No newline at end of file diff --git a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/exception/InstalledSchemaTooOldException.java b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/exception/InstalledSchemaTooOldException.java index 4da863b..db4bfbf 100644 --- a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/exception/InstalledSchemaTooOldException.java +++ b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/exception/InstalledSchemaTooOldException.java @@ -29,10 +29,12 @@ package org.rhq.cassandra.schema.exception; /** * @author Stefan Negrea */ +@SuppressWarnings("serial") public class InstalledSchemaTooOldException extends Exception {
public InstalledSchemaTooOldException() { super( - "Storage schema needs to be updated. The schema manager contains updates not yet applied to the storage cluster installation."); + "Storage schema is not properly updated. The schema manager contains updates not yet applied to the storage cluster installation." + + "Please re-run the server installer to update the current schema to the latest version."); } } \ No newline at end of file diff --git a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/exception/SchemaNotInstalledException.java b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/exception/SchemaNotInstalledException.java new file mode 100644 index 0000000..63e9013 --- /dev/null +++ b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/exception/SchemaNotInstalledException.java @@ -0,0 +1,38 @@ +/* + * + * RHQ Management Platform + * Copyright (C) 2005-2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation, and/or the GNU Lesser + * General Public License, version 2.1, also as published by the Free + * Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License and the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * and the GNU Lesser General Public License along with this program; + * if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +package org.rhq.cassandra.schema.exception; + + +/** + * @author Stefan Negrea + */ +@SuppressWarnings("serial") +public class SchemaNotInstalledException extends Exception { + + public SchemaNotInstalledException() { + super("Storage schema is not properly installed. Please re-run the server installer to install storage schema."); + } +} \ No newline at end of file
commit c50b47c2ba50ddc9640ec6060313e36aaa86b44f Author: Stefan Negrea snegrea@redhat.com Date: Wed Aug 14 14:37:26 2013 -0500
[BZ 991598] Add basica node configuration validation to the composite class. It validates heap settings as well as port settings.
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNodeConfigurationComposite.java b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNodeConfigurationComposite.java index d7d7b7d..fccae9b 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNodeConfigurationComposite.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNodeConfigurationComposite.java @@ -115,6 +115,54 @@ public class StorageNodeConfigurationComposite implements Serializable { this.heapNewSize = heapNewSize; }
+ public boolean validate() { + //validate heap settings + boolean validHeap = false; + + String heapSize = getHeapSize() == null ? null : (getHeapSize().trim().length() == 0 ? null : getHeapSize() + .trim().toLowerCase()); + String heapNewSize = getHeapNewSize() == null ? null : (getHeapNewSize().trim().length() == 0 ? null + : getHeapNewSize().trim().toLowerCase()); + + if (heapSize == null && heapNewSize == null) { + validHeap = true; + } else if (heapSize != null && heapNewSize != null) { + try { + int heapSizeParsed = 0; + if (heapSize.contains("g")) { + heapSizeParsed = Integer.parseInt(heapSize.replace("g", "")) * 1024; + } else if (heapSize.contains("m")) { + heapSizeParsed = Integer.parseInt(heapSize.toLowerCase().replace("m", "")); + } else { + throw new IllegalArgumentException(); + } + + int heapNewSizeParsed = 0; + if (heapNewSize.contains("g")) { + heapNewSizeParsed = Integer.parseInt(heapNewSize.replace("g", "")) * 1024; + } else if (heapNewSize.contains("m")) { + heapNewSizeParsed = Integer.parseInt(heapNewSize.toLowerCase().replace("m", "")); + } else { + throw new IllegalArgumentException(); + } + + if (heapNewSizeParsed < heapSizeParsed) { + validHeap = true; + } + } catch (Exception e) { + //Nothing to do heap settings are not valid since parsing failed at some point + } + } + + //validate JMX Port + boolean validJMXPort = false; + if (this.getJmxPort() < 65535) { + validJMXPort = true; + } + + return validHeap && validJMXPort; + } + /* (non-Javadoc) * @see java.lang.Object#toString() */ diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index d7085da..aa6f78f 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -153,7 +153,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN private ResourceManagerLocal resourceManager;
@EJB - private StorageClusterSettingsManagerLocal storageClusterSettingsManager; + private StorageClusterSettingsManagerBean storageClusterSettingsManager;
@EJB private StorageNodeOperationsHandlerLocal storageNodeOperationsHandler; @@ -575,7 +575,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
@Override public Integer[] findResourcesWithAlertDefinitions(StorageNode storageNode) { - List<StorageNode> initialStorageNodes = null; + List<StorageNode> initialStorageNodes = getStorageNodes(); if (storageNode == null) { initialStorageNodes = getStorageNodes(); } else { @@ -625,7 +625,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
return configuration; } - + @Override @Asynchronous public void updateConfigurationAsync(Subject subject, StorageNodeConfigurationComposite storageNodeConfiguration) {
commit d4de10efdbd458ece2beaf9444c1a61b17f4ef00 Author: Mike Thompson mithomps@redhat.com Date: Wed Aug 14 10:32:20 2013 -0700
Add GraphMarker interface to mark places in the code where graphs are (later) injected by d3 javascript code.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/GraphMarker.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/GraphMarker.java new file mode 100644 index 0000000..da88999 --- /dev/null +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/GraphMarker.java @@ -0,0 +1,32 @@ +/* + * RHQ Management Platform + * Copyright (C) 2005-2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +package org.rhq.enterprise.gui.coregui.client; + +/** + * Defines the first step in the 2 step approach to creating graphs. + * <ol> + * <li>Step 1: Create the graph marker in the page.</li> + * <li>Step 2: Create and Attach a graph to graph marker via HasD3MetricJsniChart.drawJsniChart</li> + * </ol> + * + * @author Mike Thompson + */ +public interface GraphMarker { + String createGraphMarker(); +} diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityOverUnderGraphType.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityOverUnderGraphType.java index 59d4c75..eb20d40 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityOverUnderGraphType.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityOverUnderGraphType.java @@ -40,7 +40,7 @@ import org.rhq.enterprise.gui.coregui.client.util.MeasurementConverterClient; */ public class AvailabilityOverUnderGraphType implements AvailabilityGraphType {
- private static Messages MSG = CoreGUI.getMessages(); + private static final Messages MSG = CoreGUI.getMessages(); private List<Availability> availabilityList; private List<ResourceGroupAvailability> groupAvailabilityList; private Integer entityId; diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilitySummaryPieGraphType.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilitySummaryPieGraphType.java index 72438fd6..2099bfc 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilitySummaryPieGraphType.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilitySummaryPieGraphType.java @@ -23,6 +23,7 @@ import java.util.List;
import com.smartgwt.client.widgets.HTMLFlow;
+import org.rhq.enterprise.gui.coregui.client.GraphMarker; import org.rhq.enterprise.gui.coregui.client.util.Log;
/** @@ -32,7 +33,7 @@ import org.rhq.enterprise.gui.coregui.client.util.Log; * * @author Mike Thompson */ -public class AvailabilitySummaryPieGraphType { +public class AvailabilitySummaryPieGraphType implements GraphMarker{
public static final int HEIGHT = 75; public static final int WIDTH = 75; @@ -42,7 +43,14 @@ public class AvailabilitySummaryPieGraphType { public AvailabilitySummaryPieGraphType() { }
- public HTMLFlow createGraphMarker() { + public HTMLFlow addGraphMarkerMember(){ + HTMLFlow graphFlow = new HTMLFlow(createGraphMarker()); + graphFlow.setWidth(WIDTH); + graphFlow.setHeight(HEIGHT); + return graphFlow; + + } + public String createGraphMarker() { Log.debug("drawGraph marker in AvailabilitySummaryPieGraph");
StringBuilder divAndSvgDefs = new StringBuilder(); @@ -50,10 +58,7 @@ public class AvailabilitySummaryPieGraphType { divAndSvgDefs.append("<svg xmlns="http://www.w3.org/2000/svg%5C" version="1.1" style="height:" + HEIGHT + "px;" ></svg>"); divAndSvgDefs.append("</div>"); - HTMLFlow graphFlow = new HTMLFlow(divAndSvgDefs.toString()); - graphFlow.setWidth(WIDTH); - graphFlow.setHeight(HEIGHT); - return graphFlow; + return divAndSvgDefs.toString(); }
public void setAvailabilityData(String upLabel, double upPercent, String downLabel, double downPercent, diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/MetricD3Graph.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/MetricD3Graph.java index 4dad9b1..20eb39b 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/MetricD3Graph.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/MetricD3Graph.java @@ -21,6 +21,7 @@ package org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitori import com.google.gwt.user.client.Timer; import com.smartgwt.client.widgets.HTMLFlow;
+import org.rhq.enterprise.gui.coregui.client.GraphMarker; import org.rhq.enterprise.gui.coregui.client.inventory.common.AbstractD3GraphListView; import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.Refreshable; import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.StackedBarMetricGraphImpl; @@ -31,7 +32,7 @@ import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedVLayout; * A D3 graph implementation for graphing Resource metrics. * Just the graph only. No avail graph no buttons just he graph. */ -public class MetricD3Graph<T extends AbstractD3GraphListView> extends EnhancedVLayout implements Refreshable { +public class MetricD3Graph<T extends AbstractD3GraphListView> extends EnhancedVLayout implements Refreshable,GraphMarker {
protected StackedBarMetricGraphImpl graph; private HTMLFlow graphDiv = null; diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/avail/AvailabilityD3GraphView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/avail/AvailabilityD3GraphView.java index 6e3b590..2c3a4c7 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/avail/AvailabilityD3GraphView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/avail/AvailabilityD3GraphView.java @@ -24,6 +24,7 @@ import com.smartgwt.client.widgets.HTMLFlow;
import org.rhq.core.domain.measurement.Availability; import org.rhq.core.domain.resource.group.composite.ResourceGroupAvailability; +import org.rhq.enterprise.gui.coregui.client.GraphMarker; import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.AvailabilityGraphType; import org.rhq.enterprise.gui.coregui.client.util.Log; import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedVLayout; @@ -34,7 +35,7 @@ import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedVLayout; * * @author Mike Thompson */ -public class AvailabilityD3GraphView<T extends AvailabilityGraphType> extends EnhancedVLayout { +public class AvailabilityD3GraphView<T extends AvailabilityGraphType> extends EnhancedVLayout implements GraphMarker {
protected T availabilityGraphType;
@@ -49,14 +50,14 @@ public class AvailabilityD3GraphView<T extends AvailabilityGraphType> extends En protected void onDraw() { super.onDraw(); removeMembers(getMembers()); - createGraphMarker(); + addGraphMarkerComponent(); }
@Override public void parentResized() { super.parentResized(); removeMembers(getMembers()); - createGraphMarker(); + addGraphMarkerComponent(); drawJsniChart(); }
@@ -76,7 +77,7 @@ public class AvailabilityD3GraphView<T extends AvailabilityGraphType> extends En * */
- public void createGraphMarker() { + public String createGraphMarker() { Log.debug("drawGraph marker in AvailabilityD3Graph for: " + availabilityGraphType.getChartId());
StringBuilder divAndSvgDefs = new StringBuilder(); @@ -99,11 +100,14 @@ public class AvailabilityD3GraphView<T extends AvailabilityGraphType> extends En divAndSvgDefs.append("</div>"); divAndSvgDefs.append("</div>"); // end availTooltipDiv divAndSvgDefs.append("</div>"); - HTMLFlow graph = new HTMLFlow(divAndSvgDefs.toString()); + return divAndSvgDefs.toString(); + } + + public void addGraphMarkerComponent(){ + HTMLFlow graph = new HTMLFlow(createGraphMarker()); graph.setWidth100(); graph.setHeight(65); addMember(graph); - }
/** diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/ResourceMetricAvailabilityView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/ResourceMetricAvailabilityView.java index 5835f61..a058e4f 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/ResourceMetricAvailabilityView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/ResourceMetricAvailabilityView.java @@ -140,7 +140,7 @@ public class ResourceMetricAvailabilityView extends EnhancedVLayout {
CanvasItem availPieChartItem = new CanvasItem(); availPieChartItem.setTitle(MSG.pie_chart_availability_summary_label()); - availPieChartItem.setCanvas(availabilitySummaryPieGraph.createGraphMarker()); + availPieChartItem.setCanvas(availabilitySummaryPieGraph.addGraphMarkerMember()); availPieChartItem.setRowSpan(3); availPieChartItem.setVAlign(VerticalAlignment.TOP); availPieChartItem.setTitleVAlign(VerticalAlignment.TOP);
commit 986f4f49243702f9e21bc223e651a4a040bfcb13 Merge: 714fb04 0ac6f0c Author: Simeon Pinder spinder@fulliautomatix.conchfritter.com Date: Wed Aug 14 13:32:53 2013 -0400
Merging master into nightly/rhq.jon.
diff --cc modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index e925de2,d7085da..5f961a6 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@@ -625,14 -625,14 +625,20 @@@ public class StorageNodeManagerBean imp
return configuration; } + + @Override + @Asynchronous + public void updateConfigurationAsync(Subject subject, StorageNodeConfigurationComposite storageNodeConfiguration) { + updateConfiguration(subject, storageNodeConfiguration); + }
@Override + @Asynchronous + public void updateConfigurationAsync(Subject subject, StorageNodeConfigurationComposite storageNodeConfiguration) { + updateConfiguration(subject, storageNodeConfiguration); + } + + @Override public boolean updateConfiguration(Subject subject, StorageNodeConfigurationComposite storageNodeConfiguration) { try { StorageNode storageNode = findStorageNodeByAddress(InetAddress.getByName(
commit 0ac6f0c384a9c10c6da88c05fb60de67a6478482 Author: jfclere jfclere@neo2.gva.redhat.com Date: Mon Apr 15 17:07:28 2013 +0200
[BZ 865460] Cannot add a Group to tomcat's UserDatabase
diff --git a/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatUserDatabaseComponent.java b/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatUserDatabaseComponent.java index 0a95069..ead4956 100644 --- a/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatUserDatabaseComponent.java +++ b/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatUserDatabaseComponent.java @@ -51,6 +51,7 @@ public class TomcatUserDatabaseComponent extends MBeanResourceComponent<TomcatSe if (TomcatGroupComponent.RESOURCE_TYPE_NAME.equals(resourceTypeName)) { name = report.getResourceConfiguration().getSimple("groupname").getStringValue(); newRoles = report.getResourceConfiguration().getSimple(TomcatGroupComponent.CONFIG_ROLES); + report.getResourceConfiguration().remove(TomcatGroupComponent.CONFIG_ROLES); objectName = String.format("Users:type=Group,groupname="%s",database=UserDatabase", name); operation = "createGroup"; } else if (TomcatRoleComponent.RESOURCE_TYPE_NAME.equals(resourceTypeName)) {
commit f625bff9b253beb4f4810109b5f4f964b5a73f1e Author: jfclere jfclere@neo2.gva.redhat.com Date: Mon Apr 15 16:28:19 2013 +0200
[BZ 921194] Additional corrections by lfuka.
diff --git a/modules/plugins/tomcat/src/main/resources/META-INF/rhq-plugin.xml b/modules/plugins/tomcat/src/main/resources/META-INF/rhq-plugin.xml index 653736f..2fd8931 100644 --- a/modules/plugins/tomcat/src/main/resources/META-INF/rhq-plugin.xml +++ b/modules/plugins/tomcat/src/main/resources/META-INF/rhq-plugin.xml @@ -734,7 +734,9 @@ <c:simple-property name="name" type="string" - readOnly="true" /> + readOnly="true" + default="Default Tomcat Connector name" + description="Connector name."/> <c:simple-property name="port" type="string" @@ -748,8 +750,8 @@ <c:simple-property name="connector" type="string" - description="Connector protocol connector. Note: Only available on Tomcat 7." - required="false" + description="Connector protocol connector." + default="Default connector" readOnly="true" /> <c:simple-property name="address"
commit f04c9ef98291ed1cfe44d4c752dba7234f03672b Author: jfclere jfclere@neo2.gva.redhat.com Date: Wed Apr 10 15:26:38 2013 +0200
[BZ 921261] WebModule is reported as DOWN or UNAVAILABLE ... from da0179ab26a0c3e3a50238e9997147864e5a759b
diff --git a/modules/plugins/tomcat/src/main/resources/META-INF/rhq-plugin.xml b/modules/plugins/tomcat/src/main/resources/META-INF/rhq-plugin.xml index 97f410a..653736f 100644 --- a/modules/plugins/tomcat/src/main/resources/META-INF/rhq-plugin.xml +++ b/modules/plugins/tomcat/src/main/resources/META-INF/rhq-plugin.xml @@ -748,7 +748,8 @@ <c:simple-property name="connector" type="string" - description="Connector protocol connector." + description="Connector protocol connector. Note: Only available on Tomcat 7." + required="false" readOnly="true" /> <c:simple-property name="address"
commit a978c9b911b4198f2a5d06477a11fb4811a1fb46 Author: jfclere jfclere@neo2.gva.redhat.com Date: Wed Apr 10 15:22:52 2013 +0200
[BZ 921261] WebModule is reported as DOWN or UNAVAILABLE ... from 00e594847fe67da46f8976df58b5d2324d6ebb48
diff --git a/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatWarComponent.java b/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatWarComponent.java index 2a39e25..0a51c45 100644 --- a/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatWarComponent.java +++ b/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatWarComponent.java @@ -370,7 +370,23 @@ public class TomcatWarComponent extends MBeanResourceComponent<TomcatVHostCompon mbeanOperation.invoke(paramValues);
if (!WarOperation.DESTROY.equals(operation)) { - String state = (String) this.webModuleMBean.getAttribute("stateName").refresh(); + String state = null; + try { + // check to see if the mbean is truly active + state = (String) this.webModuleMBean.getAttribute("stateName").refresh(); + } catch (Exception e) { + // if not active an exception may be thrown + state = WarMBeanState.STOPPED; + // try "state" for Tomcat 5.5 + try { + int stateInt = (Integer) this.webModuleMBean.getAttribute("state").refresh(); + if (stateInt == 1) { + state = WarMBeanState.STARTED; + } + } catch (Exception ex) { + // Ignore + } + } String expectedState = getExpectedPostExecutionState(operation); if (!state.equals(expectedState)) { throw new Exception("Failed to " + name + " webapp (value of the 'state' attribute of MBean '"
commit 0aec2ecc35daaaef6d845cd72eb53b377a7deeef Author: jfclere jfclere@neo2.gva.redhat.com Date: Wed Apr 10 15:17:22 2013 +0200
[BZ 921194] Connectors are not properly discovered and therefore are unavailable.. from 00e594847fe67da46f8976df58b5d2324d6ebb48.
diff --git a/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatConnectorDiscoveryComponent.java b/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatConnectorDiscoveryComponent.java index 7a65a73..32566d5 100644 --- a/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatConnectorDiscoveryComponent.java +++ b/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatConnectorDiscoveryComponent.java @@ -132,12 +132,14 @@ public class TomcatConnectorDiscoveryComponent extends MBeanResourceDiscoveryCom if (connectorON != null) { EmsBean connectorBean = connection.getBean(connectorON); EmsAttribute executorNameAttrib = connectorBean.getAttribute("executorName"); - Object executorNameValue = executorNameAttrib.getValue(); - if (executorNameValue != null) { - String executorName = executorNameValue.toString(); - if (!executorName.isEmpty() && !executorName.equalsIgnoreCase("Internal")) { - pluginConfiguration.put(new PropertySimple( - TomcatConnectorComponent.PLUGIN_CONFIG_SHARED_EXECUTOR, executorName)); + if (executorNameAttrib != null) { + Object executorNameValue = executorNameAttrib.getValue(); + if (executorNameValue != null) { + String executorName = executorNameValue.toString(); + if (!executorName.isEmpty() && !executorName.equalsIgnoreCase("Internal")) { + pluginConfiguration.put(new PropertySimple( + TomcatConnectorComponent.PLUGIN_CONFIG_SHARED_EXECUTOR, executorName)); + } } } }
commit 07dd8cdca9c98093bd1586bd772b866cac286ea1 Author: jfclere jfclere@neo2.gva.redhat.com Date: Wed Apr 10 15:13:02 2013 +0200
Fix BZ 865460 from 417fbb59817edf64a93d3cca00f2c51926379ab2
Conflicts:
modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatUserDatabaseComponent.java
diff --git a/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatUserDatabaseComponent.java b/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatUserDatabaseComponent.java index ead4956..0a95069 100644 --- a/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatUserDatabaseComponent.java +++ b/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatUserDatabaseComponent.java @@ -51,7 +51,6 @@ public class TomcatUserDatabaseComponent extends MBeanResourceComponent<TomcatSe if (TomcatGroupComponent.RESOURCE_TYPE_NAME.equals(resourceTypeName)) { name = report.getResourceConfiguration().getSimple("groupname").getStringValue(); newRoles = report.getResourceConfiguration().getSimple(TomcatGroupComponent.CONFIG_ROLES); - report.getResourceConfiguration().remove(TomcatGroupComponent.CONFIG_ROLES); objectName = String.format("Users:type=Group,groupname="%s",database=UserDatabase", name); operation = "createGroup"; } else if (TomcatRoleComponent.RESOURCE_TYPE_NAME.equals(resourceTypeName)) {
commit 4db84715b9e8597d600d9811710e734f4b10cc16 Author: jfclere jfclere@neo2.gva.redhat.com Date: Wed Apr 10 14:33:38 2013 +0200
fix for BZ: 707349 from e7d48240474fba87f1a3c4118de4618fd2c8b32d.
Conflicts:
modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatConnectorDiscoveryComponent.java modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatWarComponent.java modules/plugins/tomcat/src/main/resources/META-INF/rhq-plugin.xml
diff --git a/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatConnectorDiscoveryComponent.java b/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatConnectorDiscoveryComponent.java index 32566d5..7a65a73 100644 --- a/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatConnectorDiscoveryComponent.java +++ b/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatConnectorDiscoveryComponent.java @@ -132,14 +132,12 @@ public class TomcatConnectorDiscoveryComponent extends MBeanResourceDiscoveryCom if (connectorON != null) { EmsBean connectorBean = connection.getBean(connectorON); EmsAttribute executorNameAttrib = connectorBean.getAttribute("executorName"); - if (executorNameAttrib != null) { - Object executorNameValue = executorNameAttrib.getValue(); - if (executorNameValue != null) { - String executorName = executorNameValue.toString(); - if (!executorName.isEmpty() && !executorName.equalsIgnoreCase("Internal")) { - pluginConfiguration.put(new PropertySimple( - TomcatConnectorComponent.PLUGIN_CONFIG_SHARED_EXECUTOR, executorName)); - } + Object executorNameValue = executorNameAttrib.getValue(); + if (executorNameValue != null) { + String executorName = executorNameValue.toString(); + if (!executorName.isEmpty() && !executorName.equalsIgnoreCase("Internal")) { + pluginConfiguration.put(new PropertySimple( + TomcatConnectorComponent.PLUGIN_CONFIG_SHARED_EXECUTOR, executorName)); } } } diff --git a/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatWarComponent.java b/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatWarComponent.java index 0a51c45..2a39e25 100644 --- a/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatWarComponent.java +++ b/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatWarComponent.java @@ -370,23 +370,7 @@ public class TomcatWarComponent extends MBeanResourceComponent<TomcatVHostCompon mbeanOperation.invoke(paramValues);
if (!WarOperation.DESTROY.equals(operation)) { - String state = null; - try { - // check to see if the mbean is truly active - state = (String) this.webModuleMBean.getAttribute("stateName").refresh(); - } catch (Exception e) { - // if not active an exception may be thrown - state = WarMBeanState.STOPPED; - // try "state" for Tomcat 5.5 - try { - int stateInt = (Integer) this.webModuleMBean.getAttribute("state").refresh(); - if (stateInt == 1) { - state = WarMBeanState.STARTED; - } - } catch (Exception ex) { - // Ignore - } - } + String state = (String) this.webModuleMBean.getAttribute("stateName").refresh(); String expectedState = getExpectedPostExecutionState(operation); if (!state.equals(expectedState)) { throw new Exception("Failed to " + name + " webapp (value of the 'state' attribute of MBean '" diff --git a/modules/plugins/tomcat/src/main/resources/META-INF/rhq-plugin.xml b/modules/plugins/tomcat/src/main/resources/META-INF/rhq-plugin.xml index 653736f..97f410a 100644 --- a/modules/plugins/tomcat/src/main/resources/META-INF/rhq-plugin.xml +++ b/modules/plugins/tomcat/src/main/resources/META-INF/rhq-plugin.xml @@ -748,8 +748,7 @@ <c:simple-property name="connector" type="string" - description="Connector protocol connector. Note: Only available on Tomcat 7." - required="false" + description="Connector protocol connector." readOnly="true" /> <c:simple-property name="address"
commit 5dbaed3e13f7ab06153aec5999c7eb4f1bc0f0b3 Author: Jirka Kremser jkremser@redhat.com Date: Wed Aug 14 19:19:52 2013 +0200
Alert view for a single storage node and its child resources.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java index cd0ec54..c99cf70c 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java @@ -218,16 +218,18 @@ public class StorageNodeAdminView extends EnhancedVLayout implements/* HasViewNa } }.run(); } - -// @Override -// public ViewName getViewName() { -// return VIEW_ID; -// }
public static String getAlertsString(String prefix, int numOfUnackAlerts) { - return prefix - + (numOfUnackAlerts != 0 ? " <font color='#CC0000;'>(" + numOfUnackAlerts + ")</font>" : " (" + return getAlertsString(prefix, -1, numOfUnackAlerts); + } + + public static String getAlertsString(String prefix, int storageNodeId, int numOfUnackAlerts) { + String detailsUrl = "#" + VIEW_PATH + "/" + storageNodeId + "/Alerts"; + detailsUrl = StringUtility.escapeHtml(detailsUrl); + String value = prefix + + (numOfUnackAlerts != 0 ? " <span style='color: #CC0000;'>(" + numOfUnackAlerts + ")</span>" : " (" + numOfUnackAlerts + ")"); + return storageNodeId == -1 ? value : LinkManager.getHref(detailsUrl, value); }
private static final class TabInfo { @@ -274,7 +276,7 @@ public class StorageNodeAdminView extends EnhancedVLayout implements/* HasViewNa showTab(alertsTabInfo); } else if (backupTabInfo.name.getName().equals(viewId)) { showTab(backupTabInfo); - } else { + } else { //default showTab(tableTabInfo); table.renderView(viewPath); } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAlertHistoryView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAlertHistoryView.java index 0821585..9fa55a6 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAlertHistoryView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAlertHistoryView.java @@ -20,9 +20,11 @@ package org.rhq.enterprise.gui.coregui.client.admin.storage;
import java.util.ArrayList;
+import com.google.gwt.user.client.rpc.AsyncCallback; import com.smartgwt.client.data.Record; import com.smartgwt.client.types.GroupStartOpen; import com.smartgwt.client.types.ImageStyle; +import com.smartgwt.client.widgets.HTMLFlow; import com.smartgwt.client.widgets.Img; import com.smartgwt.client.widgets.grid.CellFormatter; import com.smartgwt.client.widgets.grid.ListGrid; @@ -30,7 +32,10 @@ import com.smartgwt.client.widgets.grid.ListGridField; import com.smartgwt.client.widgets.grid.ListGridRecord; import com.smartgwt.client.widgets.grid.SummaryFunction;
+import org.rhq.core.domain.cloud.StorageNode; import org.rhq.core.domain.criteria.AlertCriteria; +import org.rhq.core.domain.criteria.StorageNodeCriteria; +import org.rhq.core.domain.util.PageList; import org.rhq.enterprise.gui.coregui.client.CoreGUI; import org.rhq.enterprise.gui.coregui.client.ImageManager; import org.rhq.enterprise.gui.coregui.client.LinkManager; @@ -38,6 +43,7 @@ import org.rhq.enterprise.gui.coregui.client.alert.AlertDataSource; import org.rhq.enterprise.gui.coregui.client.alert.AlertHistoryView; import org.rhq.enterprise.gui.coregui.client.components.table.AbstractTableAction; import org.rhq.enterprise.gui.coregui.client.components.table.TableActionEnablement; +import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; import org.rhq.enterprise.gui.coregui.client.inventory.resource.AncestryUtil; import org.rhq.enterprise.gui.coregui.client.util.StringUtility;
@@ -47,9 +53,45 @@ import org.rhq.enterprise.gui.coregui.client.util.StringUtility; */ public class StorageNodeAlertHistoryView extends AlertHistoryView { private boolean isGouped = true; + private final HTMLFlow header; + private final int storageNodeId;
public StorageNodeAlertHistoryView(String tableTitle, int[] resourceIds) { + this(tableTitle, resourceIds, null, -1); + } + + public StorageNodeAlertHistoryView(String tableTitle, int[] resourceIds, HTMLFlow header, int storageNodeId) { super(tableTitle, resourceIds); + this.header = header; + this.storageNodeId = storageNodeId; + } + + @Override + protected void onInit() { + super.onInit(); + if (header != null && storageNodeId != -1) { + StorageNodeCriteria criteria = new StorageNodeCriteria(); + criteria.addFilterId(storageNodeId); + GWTServiceLookup.getStorageService().findStorageNodesByCriteria(criteria, + new AsyncCallback<PageList<StorageNode>>() { + public void onSuccess(final PageList<StorageNode> storageNodes) { + if (storageNodes == null || storageNodes.isEmpty() || storageNodes.size() != 1) { + CoreGUI.getErrorHandler().handleError( + MSG.view_adminTopology_message_fetchServerFail(String.valueOf(storageNodeId))); + } + final StorageNode node = storageNodes.get(0); + header + .setContents("<div style='text-align: center; font-weight: bold; font-size: medium;'> Storage Node (" + + node.getAddress() + ")</div>"); + } + + public void onFailure(Throwable caught) { + CoreGUI.getErrorHandler().handleError( + MSG.view_adminTopology_message_fetchServerFail(String.valueOf(storageNodeId)) + " " + + caught.getMessage(), caught); + } + }); + } }
@Override diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java index 7a777b1..e085eb1 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java @@ -215,8 +215,8 @@ public class StorageNodeDatasource extends RPCDataSource<StorageNodeLoadComposit } } int value = from.getUnackAlerts(); - record.setAttribute(FIELD_ALERTS.propertyName(), "New Alerts" - + (value != 0 ? " <span style='color: #CC0000;'>(" + value + ")</span>" : " (" + value + ")")); + record.setAttribute(FIELD_ALERTS.propertyName(), + StorageNodeAdminView.getAlertsString("New Alerts", node.getId(), value)); String memory = null; if (from.getHeapPercentageUsed() != null && from.getHeapPercentageUsed().getAggregate().getAvg() != null) memory = MeasurementConverterClient.format(from.getHeapPercentageUsed().getAggregate().getAvg(), from diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDetailView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDetailView.java index a5ae6eb..dc500f0 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDetailView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDetailView.java @@ -36,6 +36,7 @@ import com.google.gwt.user.client.Timer; import com.google.gwt.user.client.rpc.AsyncCallback; import com.smartgwt.client.types.Overflow; import com.smartgwt.client.types.VisibilityMode; +import com.smartgwt.client.widgets.Canvas; import com.smartgwt.client.widgets.HTMLFlow; import com.smartgwt.client.widgets.form.DynamicForm; import com.smartgwt.client.widgets.form.fields.FormItem; @@ -51,6 +52,7 @@ import org.rhq.core.domain.measurement.composite.MeasurementDataNumericHighLowCo import org.rhq.core.domain.resource.Resource; import org.rhq.core.domain.resource.composite.ResourceComposite; import org.rhq.core.domain.util.PageList; +import org.rhq.core.domain.util.collection.ArrayUtils; import org.rhq.enterprise.gui.coregui.client.BookmarkableView; import org.rhq.enterprise.gui.coregui.client.CoreGUI; import org.rhq.enterprise.gui.coregui.client.LinkManager; @@ -63,6 +65,7 @@ import org.rhq.enterprise.gui.coregui.client.util.Log; import org.rhq.enterprise.gui.coregui.client.util.MeasurementUtility; import org.rhq.enterprise.gui.coregui.client.util.StringUtility; import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedHLayout; +import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedUtility; import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedVLayout; import org.rhq.enterprise.gui.coregui.client.util.message.Message;
@@ -85,6 +88,7 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab private SectionStackSection detailsAndLoadSection; private StaticTextItem alertsItem; private HTMLFlow header; + private boolean alerts = false;
private volatile int initSectionCount = 0; private int unackAlerts = -1; @@ -107,6 +111,9 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab @Override protected void onInit() { super.onInit(); + if (alerts) { + return; + } StorageNodeCriteria criteria = new StorageNodeCriteria(); criteria.addFilterId(storageNodeId); criteria.fetchResource(true); @@ -189,7 +196,7 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab } else { unackAlerts = result.get(0); if (alertsItem != null) { - alertsItem.setValue(StorageNodeAdminView.getAlertsString("New Alerts", unackAlerts)); + alertsItem.setValue(StorageNodeAdminView.getAlertsString("New Alerts", storageNodeId, unackAlerts)); } } } @@ -203,6 +210,9 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab @Override protected void onDraw() { super.onDraw(); + if (alerts) { + return; + }
// wait until we have all of the sections before we show them. We don't use InitializableView because, // it seems they are not supported (in the applicable renderView()) at this level. @@ -284,8 +294,9 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab TimestampCellFormatter.DATE_TIME_FORMAT_LONG));
alertsItem = new StaticTextItem(FIELD_ALERTS.propertyName(), FIELD_ALERTS.title()); + alertsItem.setPrompt("The number in brackets represents the number of unacknowledged alerts for this storage node."); if (unackAlerts != -1) { - alertsItem.setValue(StorageNodeAdminView.getAlertsString("New Alerts", unackAlerts)); + alertsItem.setValue(StorageNodeAdminView.getAlertsString("New Alerts", storageNodeId, unackAlerts)); }
StaticTextItem memoryStatusItem = new StaticTextItem("memoryStatus", "Memory"); @@ -341,9 +352,9 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab }
private void prepareResourceConfigEditor(final StorageNodeConfigurationComposite configuration) { - LayoutSpacer spacer = new LayoutSpacer(); - spacer.setHeight(15); - StorageNodeConfigurationEditor editorView = new StorageNodeConfigurationEditor(configuration); + LayoutSpacer spacer = new LayoutSpacer(); + spacer.setHeight(15); + StorageNodeConfigurationEditor editorView = new StorageNodeConfigurationEditor(configuration); SectionStackSection section = new SectionStackSection("Configuration"); section.setItems(spacer, editorView); section.setExpanded(true); @@ -353,10 +364,40 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab initSectionCount++; }
+ private void showAlertsForSingleStorageNode() { + GWTServiceLookup.getStorageService().findResourcesWithAlertDefinitions(new StorageNode(storageNodeId), + new AsyncCallback<Integer[]>() { + @Override + public void onFailure(Throwable caught) { + alerts = false; + Message message = new Message("foobar", Message.Severity.Warning); +// CoreGUI.goToView(VIEW_ID.getName(), message); + } + + @Override + public void onSuccess(Integer[] result) { + if (result == null || result.length == 0) { + onFailure(new Exception("foobaz")); + } else { + removeMember(sectionStack); + sectionStack.destroy(); + int[] resIds = ArrayUtils.unwrapArray(result); + Canvas alertsView = new StorageNodeAlertHistoryView("storageNode_" + storageNodeId + "_Alerts", + resIds, header, storageNodeId); + addMember(alertsView); + } + } + }); + } + @Override public void renderView(ViewPath viewPath) { -// if (viewPath.toString().endsWith("/Config")) { -// } + if (viewPath.toString().endsWith("/Alerts")) { + alerts = true; + showAlertsForSingleStorageNode(); + } else { + alerts = false; + } Log.debug("StorageNodeDetailView: " + viewPath); } } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeTableView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeTableView.java index bd415b2..efe56e3 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeTableView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeTableView.java @@ -83,7 +83,7 @@ public class StorageNodeTableView extends TableSection<StorageNodeDatasource> { @Override protected void doOnDraw() { super.doOnDraw(); - scheduleUnacknowledgedAlertsPollingJob(getListGrid()); +// scheduleUnacknowledgedAlertsPollingJob(getListGrid()); }
@Override @@ -153,8 +153,9 @@ public class StorageNodeTableView extends TableSection<StorageNodeDatasource> { public void onSuccess(List<Integer> result) { for (int i = 0; i < records.length; i++) { int value = result.get(i); + int storageNodeId = records[i].getAttributeAsInt("id"); records[i].setAttribute(FIELD_ALERTS.propertyName(), - StorageNodeAdminView.getAlertsString("New Alerts", value)); + StorageNodeAdminView.getAlertsString("New Alerts", storageNodeId, value)); listGrid.setData(records); } schedule(15 * 1000); diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/StorageGWTService.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/StorageGWTService.java index 957bf34..38829a5 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/StorageGWTService.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/StorageGWTService.java @@ -76,6 +76,8 @@ public interface StorageGWTService extends RemoteService {
Integer[] findResourcesWithAlertDefinitions() throws RuntimeException;
+ Integer[] findResourcesWithAlertDefinitions(StorageNode storageNode) throws RuntimeException; + int findNotAcknowledgedStorageNodeAlertsCount() throws RuntimeException;
List<Integer> findNotAcknowledgedStorageNodeAlertsCounts(List<Integer> storageNodeIds) throws RuntimeException; diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/StorageGWTServiceImpl.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/StorageGWTServiceImpl.java index f7f7442..b7437e3 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/StorageGWTServiceImpl.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/StorageGWTServiceImpl.java @@ -116,6 +116,15 @@ public class StorageGWTServiceImpl extends AbstractGWTServiceImpl implements Sto }
@Override + public Integer[] findResourcesWithAlertDefinitions(StorageNode storageNode) throws RuntimeException { + try { + return storageNodeManager.findResourcesWithAlertDefinitions(storageNode); + } catch (Throwable t) { + throw getExceptionToThrowToClient(t); + } + } + + @Override public int findNotAcknowledgedStorageNodeAlertsCount() throws RuntimeException { try { return storageNodeManager.findNotAcknowledgedStorageNodeAlerts(getSessionSubject()).size(); diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index e925de2..d7085da 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -575,7 +575,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
@Override public Integer[] findResourcesWithAlertDefinitions(StorageNode storageNode) { - List<StorageNode> initialStorageNodes = getStorageNodes(); + List<StorageNode> initialStorageNodes = null; if (storageNode == null) { initialStorageNodes = getStorageNodes(); } else { @@ -625,7 +625,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
return configuration; } - + @Override @Asynchronous public void updateConfigurationAsync(Subject subject, StorageNodeConfigurationComposite storageNodeConfiguration) {
commit 1e826c93f260fb53f8befc5ab9a3b9a74dfbee75 Author: Jay Shaughnessy jshaughn@redhat.com Date: Wed Aug 14 11:20:02 2013 -0400
remove entries for stuff still in a branch
diff --git a/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml b/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml index 5182ef1..617593a 100644 --- a/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml +++ b/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml @@ -67,7 +67,7 @@ <difference> <className>org/rhq/enterprise/server/bundle/BundleManagerRemote</className> <differenceType>7012</differenceType> <!-- method added to an interface --> - <method>org.rhq.core.domain.bundle.BundleGroup createBundleGroup(org.rhq.core.domain.auth.Subject, org.rhq.core.domain.bundle.BundleGroup)</method> + <method>org.rhq.core.domain.bundle.BundleGroup createBundleGroup(org.rhq.core.domain.auth.Subject, java.lang.String, java.lang.String)</method> <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> </difference>
@@ -162,11 +162,4 @@ <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> </difference>
- <difference> - <className>org/rhq/enterprise/server/bundle/BundleManagerRemote</className> - <differenceType>7012</differenceType> <!-- method added to an interface --> - <method>org.rhq.core.domain.bundle.BundleGroup updateBundleGroup(org.rhq.core.domain.auth.Subject, org.rhq.core.domain.bundle.BundleGroup)</method> - <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> - </difference> - </differences>
commit 07bead42ccd636be67b717e9922f5a4eb134cff8 Author: Thomas Segismont tsegismo@redhat.com Date: Wed Aug 14 11:13:09 2013 +0200
Bug 956329 - Create new Managed Server using EAP 6 plug-in exposes server groups and socket binding groups from other resources
Introduced new expressionScope attribute in RHQ config XSD for option source node (updated config parser, PropertyOptionSource entity and database schema accordingly)
Updated ConfigurationGWTService and ConfigurationManagerBean: now when filling enumeratedValues we look at expressionScope attribute to narrow down search results if expressionScope is 'baseResource'
Updated as7 plugin to use new expressionScope attribute for Managed Server resource configuration attributes
diff --git a/modules/core/client-api/src/main/java/org/rhq/core/clientapi/agent/metadata/ConfigurationMetadataParser.java b/modules/core/client-api/src/main/java/org/rhq/core/clientapi/agent/metadata/ConfigurationMetadataParser.java index 4fa4558e..35ef00a 100644 --- a/modules/core/client-api/src/main/java/org/rhq/core/clientapi/agent/metadata/ConfigurationMetadataParser.java +++ b/modules/core/client-api/src/main/java/org/rhq/core/clientapi/agent/metadata/ConfigurationMetadataParser.java @@ -1,24 +1,20 @@ /* * RHQ Management Platform - * Copyright (C) 2005-2008 Red Hat, Inc. + * Copyright (C) 2005-2013 Red Hat, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation, and/or the GNU Lesser - * General Public License, version 2.1, also as published by the Free - * Software Foundation. + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License and the GNU Lesser General Public License - * for more details. + * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * and the GNU Lesser General Public License along with this program; - * if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * along with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ package org.rhq.core.clientapi.agent.metadata;
@@ -40,6 +36,7 @@ import org.rhq.core.clientapi.descriptor.configuration.ConfigurationProperty; import org.rhq.core.clientapi.descriptor.configuration.ConfigurationTemplateDescriptor; import org.rhq.core.clientapi.descriptor.configuration.ConstraintType; import org.rhq.core.clientapi.descriptor.configuration.DynamicProperty; +import org.rhq.core.clientapi.descriptor.configuration.ExpressionScope; import org.rhq.core.clientapi.descriptor.configuration.FloatConstraintType; import org.rhq.core.clientapi.descriptor.configuration.IntegerConstraintType; import org.rhq.core.clientapi.descriptor.configuration.ListProperty; @@ -256,6 +253,11 @@ public class ConfigurationMetadataParser { throw new IllegalArgumentException("Filter expression must be less than 40 chars long"); } optionsSource.setFilter(source.getFilter()); + ExpressionScope expressionScope = source.getExpressionScope(); + if (expressionScope != null) { + optionsSource.setExpressionScope(PropertyOptionsSource.ExpressionScope.fromValue(expressionScope + .value())); + } String expression = source.getExpression(); if (expression == null || expression.isEmpty()) throw new IllegalArgumentException("Expression must not be empty"); diff --git a/modules/core/client-api/src/main/resources/rhq-configuration.xsd b/modules/core/client-api/src/main/resources/rhq-configuration.xsd index 4b706d8..d5e186b 100644 --- a/modules/core/client-api/src/main/resources/rhq-configuration.xsd +++ b/modules/core/client-api/src/main/resources/rhq-configuration.xsd @@ -479,7 +479,11 @@ Expressions are written in the syntax of the search bar.</xs:documentation> </xs:annotation> </xs:attribute> - + <xs:attribute name="expressionScope" use="optional" default="unlimited" type="config:expressionScope"> + xs:annotation + xs:documentationA scope for expression searches. Default value is 'unlimited'.</xs:documentation> + </xs:annotation> + </xs:attribute> </xs:complexType>
<xs:complexType name="option"> @@ -607,4 +611,16 @@ </xs:restriction> </xs:simpleType>
+ <xs:simpleType name="expressionScope"> + xs:annotation + xs:documentationA scope for expression searches. Must be one of 'unlimited' or 'baseResource'. + 'unlimited' means that search results may include any resource in inventory. 'baseResource' narrows down to the + top level server or service.</xs:documentation> + </xs:annotation> + <xs:restriction base="xs:string"> + <xs:enumeration value="unlimited"/> + <xs:enumeration value="baseResource"/> + </xs:restriction> + </xs:simpleType> + </xs:schema> diff --git a/modules/core/dbutils/pom.xml b/modules/core/dbutils/pom.xml index a3c82c2..2e8e4b8 100644 --- a/modules/core/dbutils/pom.xml +++ b/modules/core/dbutils/pom.xml @@ -17,7 +17,7 @@ <description>Database schema setup, upgrade and other utilities</description>
<properties> - <db.schema.version>2.135</db.schema.version> + <db.schema.version>2.136</db.schema.version> <rhq.ds.type-mapping>${rhq.test.ds.type-mapping}</rhq.ds.type-mapping> <rhq.ds.server-name>${rhq.test.ds.server-name}</rhq.ds.server-name> <rhq.ds.db-name>${rhq.test.ds.db-name}</rhq.ds.db-name> diff --git a/modules/core/dbutils/src/main/scripts/dbsetup/config-schema.xml b/modules/core/dbutils/src/main/scripts/dbsetup/config-schema.xml index d5d937e..1328915 100644 --- a/modules/core/dbutils/src/main/scripts/dbsetup/config-schema.xml +++ b/modules/core/dbutils/src/main/scripts/dbsetup/config-schema.xml @@ -65,6 +65,7 @@ <column name="LINK_TO_TARGET" type="BOOLEAN" required="false"/> <column name="FILTER" type="VARCHAR2" required="false" size="40"/> <column name="EXPRESSION" type="VARCHAR2" required="true" size="400"/> + <column name="EXPRESSION_SCOPE" type="VARCHAR2" required="true" size="12" default="unlimited"/> <column name="TARGET_TYPE" type="VARCHAR2" required="true" size="20"/> <column name="PROPERTY_DEF_ID" type="INTEGER" required="false" references="RHQ_CONFIG_PROP_DEF"/> </table> diff --git a/modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml b/modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml index 341f0b2..6f42345 100644 --- a/modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml +++ b/modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml @@ -1,4 +1,3 @@ - <project name="db-upgrade" default="upgrade" basedir=".">
<!-- @@ -2180,6 +2179,11 @@
</schemaSpec>
+ <schemaSpec version="2.136"> + <schema-addColumn table="RHQ_CONFIG_PD_OSRC" column="EXPRESSION_SCOPE" columnType="VARCHAR2" precision="12"/> + <schema-alterColumn table="RHQ_CONFIG_PD_OSRC" column="EXPRESSION_SCOPE" nullable="false" default="unlimited"/> + </schemaSpec> + </dbupgrade> </target> </project> diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/configuration/definition/PropertyOptionsSource.java b/modules/core/domain/src/main/java/org/rhq/core/domain/configuration/definition/PropertyOptionsSource.java index 70c9a1f..7b34d0b 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/configuration/definition/PropertyOptionsSource.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/configuration/definition/PropertyOptionsSource.java @@ -1,27 +1,25 @@ /* * RHQ Management Platform - * Copyright (C) 2005-2011 Red Hat, Inc. + * Copyright (C) 2005-2013 Red Hat, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation, and/or the GNU Lesser - * General Public License, version 2.1, also as published by the Free - * Software Foundation. + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License and the GNU Lesser General Public License - * for more details. + * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * and the GNU Lesser General Public License along with this program; - * if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * along with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ package org.rhq.core.domain.configuration.definition;
+import static org.rhq.core.domain.configuration.definition.PropertyOptionsSource.ExpressionScope.UNLIMITED; + import java.io.Serializable;
import javax.persistence.Column; @@ -54,8 +52,15 @@ public class PropertyOptionsSource implements Serializable{
@Column(name="LINK_TO_TARGET") private boolean linkToTarget; + private String filter; + + @Column(name="EXPRESSION_SCOPE") + @Enumerated(EnumType.STRING) + private ExpressionScope expressionScope; + private String expression; + @Column(name="TARGET_TYPE") @Enumerated(EnumType.STRING) private TargetType targetType; @@ -65,6 +70,7 @@ public class PropertyOptionsSource implements Serializable{ PropertyDefinitionSimple propertyDefinition;
public PropertyOptionsSource() { + expressionScope = UNLIMITED; }
public void setTarget(String target) { @@ -83,6 +89,10 @@ public class PropertyOptionsSource implements Serializable{ this.expression = expression; }
+ public void setExpressionScope(ExpressionScope expressionScope) { + this.expressionScope = expressionScope; + } + public boolean isLinkToTarget() { return linkToTarget; } @@ -95,10 +105,22 @@ public class PropertyOptionsSource implements Serializable{ return expression; }
+ public ExpressionScope getExpressionScope() { + return expressionScope; + } + public TargetType getTargetType() { return targetType; }
+ public int getId() { + return id; + } + + public void setId(int id) { + this.id = id; + } + public enum TargetType { GROUP("group"), PLUGIN("plugin"), @@ -123,15 +145,30 @@ public class PropertyOptionsSource implements Serializable{ } } return valueOf(v); -// throw new IllegalArgumentException(v.toString()); } }
- public int getId() { - return id; - } + public enum ExpressionScope { + UNLIMITED("unlimited"), // + BASE_RESOURCE("baseResource");
- public void setId(int id) { - this.id = id; + private final String value; + + ExpressionScope(String v) { + value = v; + } + + public String value() { + return value; + } + + public static ExpressionScope fromValue(String v) { + for (ExpressionScope f : ExpressionScope.values()) { + if (f.value.equals(v)) { + return f; + } + } + return valueOf(v); + } } } diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/resource/Resource.java b/modules/core/domain/src/main/java/org/rhq/core/domain/resource/Resource.java index c54d987..8fc65a8 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/resource/Resource.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/resource/Resource.java @@ -1,24 +1,20 @@ /* * RHQ Management Platform - * Copyright (C) 2005-2012 Red Hat, Inc. + * Copyright (C) 2005-2013 Red Hat, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation, and/or the GNU Lesser - * General Public License, version 2.1, also as published by the Free - * Software Foundation. + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License and the GNU Lesser General Public License - * for more details. + * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * and the GNU Lesser General Public License along with this program; - * if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * along with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ package org.rhq.core.domain.resource;
@@ -1837,7 +1833,7 @@ public class Resource implements Comparable<Resource>, Serializable {
final Resource resource = (Resource) o;
- if ((uuid != null) ? (!uuid.equals(resource.uuid)) : (resource.uuid != null)) { + if ((getUuid() != null) ? (!getUuid().equals(resource.getUuid())) : (resource.getUuid() != null)) { return false; }
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/util/ResourceUtility.java b/modules/core/domain/src/main/java/org/rhq/core/domain/util/ResourceUtility.java index 748a66e..649edd8 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/util/ResourceUtility.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/util/ResourceUtility.java @@ -1,6 +1,6 @@ /* * RHQ Management Platform - * Copyright (C) 2012 Red Hat, Inc. + * Copyright (C) 2005-2013 Red Hat, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify @@ -13,17 +13,21 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * along with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ + package org.rhq.core.domain.util;
+import static org.rhq.core.domain.resource.ResourceCategory.PLATFORM; + import java.util.LinkedHashSet; import java.util.Set;
import org.jetbrains.annotations.NotNull;
import org.rhq.core.domain.resource.Resource; +import org.rhq.core.domain.resource.ResourceCategory; import org.rhq.core.domain.resource.ResourceType;
/** @@ -32,7 +36,7 @@ import org.rhq.core.domain.resource.ResourceType; * @since 4.4 * @author Ian Springer */ -public abstract class ResourceUtility { +public class ResourceUtility {
public static Resource getChildResource(Resource parent, ResourceType type, String key) { for (Resource resource : parent.getChildResources()) { @@ -68,7 +72,27 @@ public abstract class ResourceUtility { return acceptedChildResources; }
+ /** + * Returns the base server or service of the specified <code>resource</code>. + * + * @param resource + * @return the base server or service of the specified <code>resource</code>. + * @throws IllegalArgumentException if <code>resource</code> is null + */ + public static Resource getBaseServerOrService(Resource resource) { + if (resource == null) { + throw new IllegalArgumentException("resource is null"); + } + Resource current, parent = resource; + do { + current = parent; + parent = current.getParentResource(); + } while (parent != null && parent.getResourceType().getCategory() != PLATFORM); + return current; + } + private ResourceUtility() { + // Defensive }
} diff --git a/modules/core/domain/src/test/java/org/rhq/core/domain/util/ResourceUtilityTest.java b/modules/core/domain/src/test/java/org/rhq/core/domain/util/ResourceUtilityTest.java new file mode 100644 index 0000000..259166c --- /dev/null +++ b/modules/core/domain/src/test/java/org/rhq/core/domain/util/ResourceUtilityTest.java @@ -0,0 +1,84 @@ +/* + * RHQ Management Platform + * Copyright (C) 2005-2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA + */ + +package org.rhq.core.domain.util; + +import static org.rhq.core.domain.resource.ResourceCategory.PLATFORM; +import static org.rhq.core.domain.resource.ResourceCategory.SERVER; +import static org.rhq.core.domain.resource.ResourceCategory.SERVICE; +import static org.rhq.core.domain.util.ResourceUtility.getBaseServerOrService; +import static org.testng.Assert.assertSame; + +import org.testng.annotations.Test; + +import org.rhq.core.domain.resource.Resource; +import org.rhq.core.domain.resource.ResourceType; + +/** + * @author Thomas Segismont + */ +public class ResourceUtilityTest { + + private static final ResourceType TEST_PLATFORM_TYPE = new ResourceType("test platform", "test plugin", PLATFORM, + null); + private static final ResourceType TEST_TOP_LEVEL_SERVER_TYPE = new ResourceType("test topLevelServer", + "test plugin", SERVER, null); + + @Test(expectedExceptions = { IllegalArgumentException.class }) + public void getBaseServerOrServiceShouldThrowIllegalArgumentExceptionOnNull() throws Exception { + getBaseServerOrService(null); + } + + @Test + public void getBaseServerOrServiceOfPlatform() throws Exception { + Resource platform = new Resource(); + platform.setResourceType(TEST_PLATFORM_TYPE); + assertSame(platform, getBaseServerOrService(platform)); + } + + @Test + public void getBaseServerOrServiceOfTopLevelServer() throws Exception { + Resource topLevelServer = new Resource(); + topLevelServer.setResourceType(TEST_TOP_LEVEL_SERVER_TYPE); + assertSame(topLevelServer, getBaseServerOrService(topLevelServer)); + } + + @Test + public void getBaseServerOrServiceOfTopLevelService() throws Exception { + Resource topLevelService = new Resource(); + topLevelService.setResourceType(new ResourceType("test topLevelService", "test plugin", SERVICE, null)); + assertSame(topLevelService, getBaseServerOrService(topLevelService)); + } + + @Test + public void getBaseServerOrServiceOfNestedService() throws Exception { + ResourceType testServiceType = new ResourceType("test service", "test plugin", SERVICE, + TEST_TOP_LEVEL_SERVER_TYPE); + Resource service = new Resource(); + service.setResourceType(testServiceType); + Resource topLevelServer = new Resource(); + topLevelServer.setResourceType(TEST_TOP_LEVEL_SERVER_TYPE); + topLevelServer.addChildResource(service); + Resource platform = new Resource(); + platform.setResourceType(TEST_PLATFORM_TYPE); + platform.addChildResource(topLevelServer); + assertSame(topLevelServer, getBaseServerOrService(service)); + } + +} diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/ConfigurationGWTService.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/ConfigurationGWTService.java index 06207cd..ad7ac3b 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/ConfigurationGWTService.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/ConfigurationGWTService.java @@ -1,3 +1,22 @@ +/* + * RHQ Management Platform + * Copyright (C) 2005-2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA + */ + package org.rhq.enterprise.gui.coregui.client.gwt;
import java.util.List; @@ -88,6 +107,6 @@ public interface ConfigurationGWTService extends RemoteService { void deleteGroupResourceConfigurationUpdate(Integer groupId, Integer[] groupResourceConfigUpdateIds) throws RuntimeException;
- ConfigurationDefinition getOptionValuesForConfigDefinition(int resourceId, ConfigurationDefinition definition) - throws RuntimeException; + ConfigurationDefinition getOptionValuesForConfigDefinition(int resourceId, int parentResourceId, + ConfigurationDefinition definition) throws RuntimeException; } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/detail/operation/schedule/AbstractOperationScheduleDetailsView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/detail/operation/schedule/AbstractOperationScheduleDetailsView.java index febc86d..983a8dd 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/detail/operation/schedule/AbstractOperationScheduleDetailsView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/detail/operation/schedule/AbstractOperationScheduleDetailsView.java @@ -1,8 +1,7 @@ /* * RHQ Management Platform - * Copyright 2010-2011, Red Hat Middleware LLC, and individual contributors - * as indicated by the @author tags. See the copyright.txt file in the - * distribution for a full listing of individual contributors. + * Copyright (C) 2005-2013 Red Hat, Inc. + * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -14,8 +13,8 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * along with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ package org.rhq.enterprise.gui.coregui.client.inventory.common.detail.operation.schedule;
@@ -463,7 +462,7 @@ public abstract class AbstractOperationScheduleDetailsView extends }
ConfigurationGWTServiceAsync configurationService = GWTServiceLookup.getConfigurationService(); - configurationService.getOptionValuesForConfigDefinition(getResourceId(), parametersDefinition, + configurationService.getOptionValuesForConfigDefinition(getResourceId(), -1, parametersDefinition, new AsyncCallback<ConfigurationDefinition>() {
@Override diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/configuration/ResourceConfigurationEditView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/configuration/ResourceConfigurationEditView.java index 6a51e3d..b358bb3 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/configuration/ResourceConfigurationEditView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/configuration/ResourceConfigurationEditView.java @@ -141,7 +141,7 @@ public class ResourceConfigurationEditView extends EnhancedVLayout implements Pr public void onTypesLoaded(ResourceType type) { ConfigurationGWTServiceAsync configurationService = GWTServiceLookup .getConfigurationService(); - configurationService.getOptionValuesForConfigDefinition(resource.getId(), + configurationService.getOptionValuesForConfigDefinition(resource.getId(), -1, type.getResourceConfigurationDefinition(), new AsyncCallback<ConfigurationDefinition>() { @Override diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/factory/ResourceFactoryConfigurationStep.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/factory/ResourceFactoryConfigurationStep.java index 3aa37e0..a9fe9f3 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/factory/ResourceFactoryConfigurationStep.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/factory/ResourceFactoryConfigurationStep.java @@ -1,6 +1,6 @@ /* * RHQ Management Platform - * Copyright (C) 2005-2010 Red Hat, Inc. + * Copyright (C) 2005-2013 Red Hat, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify @@ -13,8 +13,8 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * along with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ package org.rhq.enterprise.gui.coregui.client.inventory.resource.factory;
@@ -97,7 +97,7 @@ public class ResourceFactoryConfigurationStep extends AbstractWizardStep impleme this.startingConfig = wizard.getNewResourceStartingConfiguration();
ConfigurationGWTServiceAsync configurationService = GWTServiceLookup.getConfigurationService(); - configurationService.getOptionValuesForConfigDefinition(-1, def, + configurationService.getOptionValuesForConfigDefinition(-1, wizard.getParentResource().getId(), def, new AsyncCallback<ConfigurationDefinition>() { public void onSuccess(ConfigurationDefinition result) { createAndAddConfigurationEditor(result); diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/ConfigurationGWTServiceImpl.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/ConfigurationGWTServiceImpl.java index d351f7a..3520f82 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/ConfigurationGWTServiceImpl.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/ConfigurationGWTServiceImpl.java @@ -1,3 +1,22 @@ +/* + * RHQ Management Platform + * Copyright (C) 2005-2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA + */ + package org.rhq.enterprise.gui.coregui.server.gwt;
import java.util.ArrayList; @@ -352,11 +371,11 @@ public class ConfigurationGWTServiceImpl extends AbstractGWTServiceImpl implemen }
@Override - public ConfigurationDefinition getOptionValuesForConfigDefinition(int resourceId, ConfigurationDefinition definition) - throws RuntimeException { + public ConfigurationDefinition getOptionValuesForConfigDefinition(int resourceId, int parentResourceId, + ConfigurationDefinition definition) throws RuntimeException { try { ConfigurationDefinition def = configurationManager.getOptionsForConfigurationDefinition( - getSessionSubject(), resourceId, definition); + getSessionSubject(), resourceId, parentResourceId, definition); return SerialUtility.prepare(def, "ConfigurationService.getOptionValuesForPropertySimple"); } catch (Throwable t) { throw getExceptionToThrowToClient(t); diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/configuration/ConfigurationManagerBeanTest.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/configuration/ConfigurationManagerBeanTest.java index 700cfc5..eef4c70 100644 --- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/configuration/ConfigurationManagerBeanTest.java +++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/configuration/ConfigurationManagerBeanTest.java @@ -1,6 +1,6 @@ /* * RHQ Management Platform - * Copyright (C) 2005-2011 Red Hat, Inc. + * Copyright (C) 2005-2013 Red Hat, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify @@ -13,8 +13,8 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * along with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ package org.rhq.enterprise.server.configuration;
@@ -776,7 +776,7 @@ public class ConfigurationManagerBeanTest extends AbstractEJB3Test { assert configurationDefinition != null : "Configuration Definition could not be located."; //retrieve the options for ConfigurationDefinition ConfigurationDefinition options = configurationManager.getOptionsForConfigurationDefinition(overlord, - newResource1.getId(), configurationDefinition); + newResource1.getId(), -1, configurationDefinition); assert options != null : "Unable able to retrieve options for resource with id [" + newResource1.getId() + "]."; assert !options.getPropertyDefinitions().entrySet().isEmpty() : "No PropertyDefinitionSimple instances found."; diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/configuration/ConfigurationManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/configuration/ConfigurationManagerBean.java index 8246626..6c078c6 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/configuration/ConfigurationManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/configuration/ConfigurationManagerBean.java @@ -1,6 +1,6 @@ /* * RHQ Management Platform - * Copyright (C) 2005-2011 Red Hat, Inc. + * Copyright (C) 2005-2013 Red Hat, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify @@ -13,8 +13,8 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * along with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ package org.rhq.enterprise.server.configuration;
@@ -38,6 +38,9 @@ import javax.persistence.NoResultException; import javax.persistence.PersistenceContext; import javax.persistence.Query;
+import com.google.common.base.Predicate; +import com.google.common.collect.Iterables; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.hibernate.Hibernate; @@ -96,6 +99,7 @@ import org.rhq.core.domain.util.OrderingField; import org.rhq.core.domain.util.PageControl; import org.rhq.core.domain.util.PageList; import org.rhq.core.domain.util.PageOrdering; +import org.rhq.core.domain.util.ResourceUtility; import org.rhq.core.util.MessageDigestGenerator; import org.rhq.core.util.collection.ArrayUtils; import org.rhq.core.util.exception.ThrowableUtil; @@ -2534,26 +2538,37 @@ public class ConfigurationManagerBean implements ConfigurationManagerLocal, Conf
@Override public ConfigurationDefinition getOptionsForConfigurationDefinition(Subject subject, int resourceId, - ConfigurationDefinition def) { + int parentResourceId, ConfigurationDefinition def) { + + Resource resource = null, baseResource = null; + if (resourceId >= 0) { + resource = resourceManager.getResource(subject, resourceId); + } + if (parentResourceId >= 0) { + Resource parentResource = resourceManager.getResource(subject, parentResourceId); + baseResource = ResourceUtility.getBaseServerOrService(parentResource); + } else if (resource != null) { + baseResource = ResourceUtility.getBaseServerOrService(resource); + }
for (Map.Entry<String, PropertyDefinition> entry : def.getPropertyDefinitions().entrySet()) { PropertyDefinition pd = entry.getValue();
if (pd instanceof PropertyDefinitionSimple) { PropertyDefinitionSimple pds = (PropertyDefinitionSimple) pd; - handlePDS(subject, resourceId, pds); + handlePDS(subject, resource, baseResource, pds);
} else if (pd instanceof PropertyDefinitionList) { PropertyDefinitionList pdl = (PropertyDefinitionList) pd; PropertyDefinition memberDef = pdl.getMemberDefinition(); if (memberDef instanceof PropertyDefinitionSimple) { PropertyDefinitionSimple pds = (PropertyDefinitionSimple) memberDef; - handlePDS(subject, resourceId, pds); + handlePDS(subject, resource, baseResource, pds); } else if (memberDef instanceof PropertyDefinitionMap) { PropertyDefinitionMap pdm = (PropertyDefinitionMap) memberDef; for (PropertyDefinition inner : pdm.getOrderedPropertyDefinitions()) { if (inner instanceof PropertyDefinitionSimple) { - handlePDS(subject, resourceId, (PropertyDefinitionSimple) inner); + handlePDS(subject, resource, baseResource, (PropertyDefinitionSimple) inner); } log.debug("3 ____[ " + inner.toString() + " in " + pdl.toString() + " ]____ not yet supported"); } @@ -2565,7 +2580,7 @@ public class ConfigurationManagerBean implements ConfigurationManagerLocal, Conf PropertyDefinitionMap pdm = (PropertyDefinitionMap) pd; for (PropertyDefinition inner : pdm.getOrderedPropertyDefinitions()) { if (inner instanceof PropertyDefinitionSimple) { - handlePDS(subject, resourceId, (PropertyDefinitionSimple) inner); + handlePDS(subject, resource, baseResource, (PropertyDefinitionSimple) inner); } else { log.debug("4 ____[ " + inner.toString() + " in " + pdm.toString() + " ]____ not yet supported"); } @@ -2583,21 +2598,20 @@ public class ConfigurationManagerBean implements ConfigurationManagerLocal, Conf * @param subject Subject of the caller - may limit search results * @param pds the PropertyDefinitionSimple to work on */ - private void handlePDS(final Subject subject, int resourceId, PropertyDefinitionSimple pds) { + private void handlePDS(final Subject subject, Resource resource, Resource baseResource, PropertyDefinitionSimple pds) {
if (pds.getOptionsSource() != null) { // evaluate the source parameters PropertyOptionsSource pos = pds.getOptionsSource(); PropertyOptionsSource.TargetType tt = pos.getTargetType(); String expression = pos.getExpression(); + PropertyOptionsSource.ExpressionScope expressionScope = pos.getExpressionScope(); String filter = pos.getFilter(); Pattern filterPattern = null; if (filter != null) filterPattern = Pattern.compile(filter);
if (tt == PropertyOptionsSource.TargetType.RESOURCE || tt == PropertyOptionsSource.TargetType.CONFIGURATION) { - List<Resource> resources = null; - CriteriaQuery<Resource, ResourceCriteria> resourcesPaged = null; ResourceCriteria criteria = new ResourceCriteria();
//Use CriteriaQuery to automatically chunk/page through criteria query results @@ -2608,6 +2622,7 @@ public class ConfigurationManagerBean implements ConfigurationManagerLocal, Conf } };
+ Iterable<Resource> foundResources = null; if (tt == PropertyOptionsSource.TargetType.CONFIGURATION) { // split out expression part for target=configuration // return if no property specifier is given @@ -2617,10 +2632,15 @@ public class ConfigurationManagerBean implements ConfigurationManagerLocal, Conf
if (!"self".equals(expr)) { criteria.setSearchExpression(expr); - resourcesPaged = new CriteriaQuery<Resource, ResourceCriteria>(criteria, queryExecutor); - } else if (resourceId >= 0) { - resources = new ArrayList<Resource>(); - resources.add(resourceManager.getResourceById(subject, resourceId)); + foundResources = new CriteriaQuery<Resource, ResourceCriteria>(criteria, queryExecutor); + if (expressionScope == PropertyOptionsSource.ExpressionScope.BASE_RESOURCE && baseResource != null) { + foundResources = Iterables.filter(foundResources, new IsInBaseResourcePredicate( + baseResource)); + } + } else if (resource != null) { + ArrayList<Resource> resourceList = new ArrayList<Resource>(); + resourceList.add(resource); + foundResources = resourceList; } else { log.warn("Self reference requested but resource id is not valid." + "Option source expression:" + expression); @@ -2633,18 +2653,16 @@ public class ConfigurationManagerBean implements ConfigurationManagerLocal, Conf } } else { criteria.setSearchExpression(expression); - resourcesPaged = new CriteriaQuery<Resource, ResourceCriteria>(criteria, queryExecutor); - } - - if (resources != null) {//process resources - for (Resource resource : resources) { - processPropertyOptionsSource(pds, tt, expression, filterPattern, resource); - } - } else {// process resourcesPaged(CriteriaQuery parsing) - for (Resource resource : resourcesPaged) { - processPropertyOptionsSource(pds, tt, expression, filterPattern, resource); + foundResources = new CriteriaQuery<Resource, ResourceCriteria>(criteria, queryExecutor); + if (expressionScope == PropertyOptionsSource.ExpressionScope.BASE_RESOURCE && baseResource != null) { + foundResources = Iterables.filter(foundResources, new IsInBaseResourcePredicate( + baseResource)); } } + + for (Resource foundResource : foundResources) { + processPropertyOptionsSource(pds, tt, expression, filterPattern, foundResource); + } } else if (tt == PropertyOptionsSource.TargetType.GROUP) { // spinder 2-15-13: commenting out this code below as we don't appear to be using any of it. Half done. // // for groups we need to talk to the group manager @@ -2772,4 +2790,19 @@ public class ConfigurationManagerBean implements ConfigurationManagerLocal, Conf
return true; } + + private static final class IsInBaseResourcePredicate implements Predicate<Resource> { + + private Resource baseResource; + + private IsInBaseResourcePredicate(Resource baseResource) { + this.baseResource = baseResource; + } + + @Override + public boolean apply(Resource resource) { + Resource baseServerOrService = ResourceUtility.getBaseServerOrService(resource); + return baseResource.equals(baseServerOrService); + } + } } diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/configuration/ConfigurationManagerLocal.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/configuration/ConfigurationManagerLocal.java index 9fc43ce..640b17d 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/configuration/ConfigurationManagerLocal.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/configuration/ConfigurationManagerLocal.java @@ -1,6 +1,6 @@ /* * RHQ Management Platform - * Copyright (C) 2005-2008 Red Hat, Inc. + * Copyright (C) 2005-2013 Red Hat, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify @@ -13,8 +13,8 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * along with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ package org.rhq.enterprise.server.configuration;
@@ -473,7 +473,7 @@ public interface ConfigurationManagerLocal extends ConfigurationManagerRemote { GroupPluginConfigurationUpdateCriteria criteria);
ConfigurationDefinition getOptionsForConfigurationDefinition(Subject subject, int resourceId, - ConfigurationDefinition def); + int parentResourceId, ConfigurationDefinition def);
/** * Dedicated method for supporting resource upgrade of plugin configuration. Similar to diff --git a/modules/plugins/jboss-as-7/src/main/resources/META-INF/rhq-plugin.xml b/modules/plugins/jboss-as-7/src/main/resources/META-INF/rhq-plugin.xml index 162c3a1..1fd2e82 100644 --- a/modules/plugins/jboss-as-7/src/main/resources/META-INF/rhq-plugin.xml +++ b/modules/plugins/jboss-as-7/src/main/resources/META-INF/rhq-plugin.xml @@ -1701,13 +1701,13 @@ <resource-configuration> <!-- IF you add properties here, you also need to update org.rhq.modules.plugins.jbossas7.ManagedASComponent.loadResourceConfiguration --> <c:simple-property name="hostname" displayName="Name of the host to put the server on" required="true" readOnly="true"> - <c:option-source target="resource" expression="type=^Host$ plugin=&pluginName;"/> + <c:option-source target="resource" expression="type=^Host$ plugin=&pluginName;" expressionScope="baseResource"/> </c:simple-property> <c:simple-property name="group" readOnly="false" displayName="Server Group" description="Server Group this instance belongs to."> - <c:option-source target="resource" expression="type=ServerGroup"/> + <c:option-source target="resource" expression="type=ServerGroup" expressionScope="baseResource"/> </c:simple-property> - <c:simple-property name="socket-binding-group" readOnly="false" displayName="Socket binding group" description="Socket bindings to use" required="true" default="standard-sockets"> - <c:option-source target="resource" expression="type=SocketBindingGroup"/> + <c:simple-property name="socket-binding-group" readOnly="false" displayName="Socket binding group" description="Socket bindings to use" required="true"> + <c:option-source target="resource" expression="type=SocketBindingGroup" expressionScope="baseResource"/> </c:simple-property> <c:simple-property name="socket-binding-port-offset" readOnly="false" displayName="Port Offset" type="integer" default="0" description="Offset to the base ports"/> <c:simple-property name="auto-start" displayName="Autostart" default="false" type="boolean"/>
commit 2a21d7b3a9a97248df2ab6d9d83804663c55972b Author: Thomas Segismont tsegismo@redhat.com Date: Wed Aug 14 16:51:00 2013 +0200
Bug 846362 - [jboss-as5] Shutdown max wait time "shared" between different resources
diff --git a/modules/plugins/jboss-as-5/src/main/java/org/rhq/plugins/jbossas5/ApplicationServerOperationsDelegate.java b/modules/plugins/jboss-as-5/src/main/java/org/rhq/plugins/jbossas5/ApplicationServerOperationsDelegate.java index b96a2cc..2ced390 100644 --- a/modules/plugins/jboss-as-5/src/main/java/org/rhq/plugins/jbossas5/ApplicationServerOperationsDelegate.java +++ b/modules/plugins/jboss-as-5/src/main/java/org/rhq/plugins/jbossas5/ApplicationServerOperationsDelegate.java @@ -1,27 +1,28 @@ /* - * Jopr Management Platform - * Copyright (C) 2005-2009 Red Hat, Inc. + * RHQ Management Platform + * Copyright (C) 2005-2013 Red Hat, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation, and/or the GNU Lesser - * General Public License, version 2.1, also as published by the Free - * Software Foundation. + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License and the GNU Lesser General Public License - * for more details. + * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * and the GNU Lesser General Public License along with this program; - * if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * along with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ package org.rhq.plugins.jbossas5;
+import static java.util.concurrent.TimeUnit.MINUTES; +import static org.rhq.core.util.StringUtil.isBlank; +import static org.rhq.plugins.jbossas5.ApplicationServerPluginConfigurationProperties.START_WAIT_MAX_PROP; +import static org.rhq.plugins.jbossas5.ApplicationServerPluginConfigurationProperties.STOP_WAIT_MAX_PROP; + import java.io.File; import java.util.List; import java.util.Map; @@ -54,7 +55,7 @@ import org.rhq.core.system.SystemInfo; * @author Jay Shaughnessy */ public class ApplicationServerOperationsDelegate { - + private static class ExecutionFailedException extends Exception {
private static final long serialVersionUID = 1L; @@ -76,12 +77,12 @@ public class ApplicationServerOperationsDelegate { super(cause); } } - + /** - * max amount of time to wait for server to show as unavailable after + * default max amount of time to wait for server to show as unavailable after * executing stop - in milliseconds */ - private static long STOP_WAIT_MAX = 1000L * 150; // 2.5 minutes + private static final long DEFAULT_STOP_WAIT_MAX = 1000L * 150; // 2.5 minutes
/** * amount of time to wait between availability checks when performing a stop @@ -95,8 +96,8 @@ public class ApplicationServerOperationsDelegate { */ private static final long STOP_WAIT_FINAL = 1000L * 30; // 30 seconds
- /** max amount of time to wait for start to complete - in milliseconds */ - private static long START_WAIT_MAX = 1000L * 300; // 5 minutes + /** default max amount of time to wait for start to complete - in milliseconds */ + private static final long DEFAULT_START_WAIT_MAX = 1000L * 300; // 5 minutes
/** * amount of time to wait between availability checks when performing a @@ -503,17 +504,9 @@ public class ApplicationServerOperationsDelegate { AvailabilityType avail; //detect whether startWaitMax property has been set. Configuration pluginConfig = serverComponent.getResourceContext().getPluginConfiguration(); - PropertySimple property = pluginConfig - .getSimple(ApplicationServerPluginConfigurationProperties.START_WAIT_MAX_PROP); - //if set and valid, update startWaitMax value - if ((property != null) && (property.getIntegerValue() != null)) { - int newValue = property.getIntegerValue(); - if (newValue >= 1) { - START_WAIT_MAX = 1000L * 60 * newValue; - } - } + long startWaitMax = getMaxWait(pluginConfig.getSimple(START_WAIT_MAX_PROP), DEFAULT_START_WAIT_MAX); while (((avail = this.serverComponent.getAvailability()) == AvailabilityType.DOWN) - && (System.currentTimeMillis() < (start + START_WAIT_MAX))) { + && (System.currentTimeMillis() < (start + startWaitMax))) { try { Thread.sleep(START_WAIT_INTERVAL); } catch (InterruptedException e) { @@ -528,17 +521,9 @@ public class ApplicationServerOperationsDelegate { AvailabilityType avail; //detect whether stopWaitMax property has been set. Configuration pluginConfig = serverComponent.getResourceContext().getPluginConfiguration(); - PropertySimple property = pluginConfig - .getSimple(ApplicationServerPluginConfigurationProperties.STOP_WAIT_MAX_PROP); - //if set and valid update stopWaitMax value - if ((property != null) && (property.getIntegerValue() != null)) { - int newValue = property.getIntegerValue(); - if (newValue >= 1) { - STOP_WAIT_MAX = 1000L * 60 * newValue; - } - } + long stopWaitMax = getMaxWait(pluginConfig.getSimple(STOP_WAIT_MAX_PROP), DEFAULT_STOP_WAIT_MAX); while (((avail = this.serverComponent.getAvailability()) == AvailabilityType.UP) - && (System.currentTimeMillis() < (start + STOP_WAIT_MAX))) { + && (System.currentTimeMillis() < (start + stopWaitMax))) { try { Thread.sleep(STOP_WAIT_INTERVAL); } catch (InterruptedException e) { @@ -556,6 +541,22 @@ public class ApplicationServerOperationsDelegate { return avail; }
+ private long getMaxWait(PropertySimple propertySimple, long defaultValueInMillis) { + if (propertySimple == null || isBlank(propertySimple.getStringValue())) { + return defaultValueInMillis; + } + try { + long valueInMinutes = Long.parseLong(propertySimple.getStringValue()); + if (valueInMinutes > 0) { + return MINUTES.toMillis(valueInMinutes); + } else { + return defaultValueInMillis; + } + } catch (NumberFormatException e) { + return defaultValueInMillis; + } + } + /** * Return the absolute path of this JBoss server's start script (e.g. * "C:\opt\jboss-5.1.0.GA\bin\run.sh"). @@ -659,4 +660,3 @@ public class ApplicationServerOperationsDelegate { } } } -
commit 714fb04be78b57d97ce53004290de503b044397d Author: John Sanda jsanda@redhat.com Date: Tue Aug 13 23:56:39 2013 -0400
adding test impl of StorageClusterSettingsManagerLocal
diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/storage/FakeStorageClusterSettingsManagerBean.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/storage/FakeStorageClusterSettingsManagerBean.java new file mode 100644 index 0000000..3ac61e4 --- /dev/null +++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/storage/FakeStorageClusterSettingsManagerBean.java @@ -0,0 +1,26 @@ +package org.rhq.enterprise.server.storage; + +import javax.ejb.Stateless; + +import org.rhq.core.domain.auth.Subject; + +/** + * @author John Sanda + */ +@Stateless +public class FakeStorageClusterSettingsManagerBean implements StorageClusterSettingsManagerLocal { + + @Override + public StorageClusterSettings getClusterSettings(Subject subject) { + StorageClusterSettings settings = new StorageClusterSettings(); + settings.setGossipPort(7100); + settings.setCqlPort(9042); + + return settings; + } + + @Override + public void setClusterSettings(Subject subject, StorageClusterSettings clusterSettings) { + //To change body of implemented methods use File | Settings | File Templates. + } +}
commit f488c3459b2d53fefda27cdb340b3530c90d2858 Author: John Sanda jsanda@redhat.com Date: Tue Aug 13 23:06:17 2013 -0400
attempting to fix server itest failures
I am not sure why but DiscoveryBossBeanTest has been failing due to calls to StorageClusterSettingsManagerBean made by StorageNodeManagerBean. I am deploying a test impl for StorageClusterSettingsManager to see if it resovles the issues. There is no need to deploy the production EJB. It is just a thin wrapper around SystemManagerBean, and it can/should be tested separately.
diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/AbstractEJB3Test.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/AbstractEJB3Test.java index 44a4646..0f1c6fb 100644 --- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/AbstractEJB3Test.java +++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/AbstractEJB3Test.java @@ -88,6 +88,7 @@ import org.rhq.enterprise.server.plugin.pc.ServerPluginService; import org.rhq.enterprise.server.plugin.pc.ServerPluginServiceMBean; import org.rhq.enterprise.server.scheduler.SchedulerService; import org.rhq.enterprise.server.scheduler.SchedulerServiceMBean; +import org.rhq.enterprise.server.storage.FakeStorageClusterSettingsManagerBean; import org.rhq.enterprise.server.storage.StorageClientManagerBean; import org.rhq.enterprise.server.util.LookupUtil; import org.rhq.test.AssertUtils; @@ -352,6 +353,8 @@ public abstract class AbstractEJB3Test extends Arquillian { .create("/rhq-server.jar/org/rhq/enterprise/server/core/StartupBean$1.class")); testEar.delete(ArchivePaths .create("/rhq-server.jar/org/rhq/enterprise/server/core/ShutdownListener.class")); + testEar.delete(ArchivePaths + .create("/rhq-server.jar/org/rhq/enterprise/server/storage/StorageClusterSettingsManagerBean.class"));
//replace the above startup beans with stripped down versions testEar.add(new ClassAsset(StrippedDownStartupBean.class), ArchivePaths @@ -359,6 +362,8 @@ public abstract class AbstractEJB3Test extends Arquillian { testEar.add(new ClassAsset(StrippedDownStartupBeanPreparation.class), ArchivePaths .create("/rhq-server.jar/org/rhq/enterprise/server/test/" + "StrippedDownStartupBeanPreparation.class")); + testEar.add(new ClassAsset(FakeStorageClusterSettingsManagerBean.class), ArchivePaths + .create("/rhq-server.jar/org/rhq/enterprise/server/storage/FakeStorageClusterSettingsManagerBean.class")); testEar.addAsManifestResource(new ByteArrayAsset("<beans/>".getBytes()), ArchivePaths.create("beans.xml"));
// add the test classes to the deployment diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index 9e41692..e925de2 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -36,10 +36,6 @@ import java.util.Map; import java.util.Queue; import java.util.Set;
-import org.rhq.enterprise.server.storage.StorageClusterSettings; -import org.rhq.enterprise.server.storage.StorageClusterSettingsManagerBean; -import org.rhq.enterprise.server.storage.StorageNodeOperationsHandlerLocal; - import javax.ejb.Asynchronous; import javax.ejb.EJB; import javax.ejb.Stateless; @@ -88,6 +84,9 @@ import org.rhq.enterprise.server.resource.ResourceManagerLocal; import org.rhq.enterprise.server.resource.ResourceTypeManagerLocal; import org.rhq.enterprise.server.rest.reporting.MeasurementConverter; import org.rhq.enterprise.server.scheduler.SchedulerLocal; +import org.rhq.enterprise.server.storage.StorageClusterSettings; +import org.rhq.enterprise.server.storage.StorageClusterSettingsManagerLocal; +import org.rhq.enterprise.server.storage.StorageNodeOperationsHandlerLocal; import org.rhq.enterprise.server.util.CriteriaQueryGenerator; import org.rhq.enterprise.server.util.CriteriaQueryRunner; import org.rhq.enterprise.server.util.LookupUtil; @@ -154,7 +153,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN private ResourceManagerLocal resourceManager;
@EJB - private StorageClusterSettingsManagerBean storageClusterSettingsManager; + private StorageClusterSettingsManagerLocal storageClusterSettingsManager;
@EJB private StorageNodeOperationsHandlerLocal storageNodeOperationsHandler; diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterSettingsManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterSettingsManagerBean.java index 3e10a2f..9418bca 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterSettingsManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterSettingsManagerBean.java @@ -3,7 +3,7 @@ package org.rhq.enterprise.server.storage; import java.util.Map;
import javax.ejb.EJB; -import javax.ejb.Singleton; +import javax.ejb.Stateless;
import org.rhq.core.domain.auth.Subject; import org.rhq.core.domain.common.composite.SystemSetting; @@ -13,12 +13,13 @@ import org.rhq.enterprise.server.system.SystemManagerLocal; /** * @author John Sanda */ -@Singleton -public class StorageClusterSettingsManagerBean { +@Stateless +public class StorageClusterSettingsManagerBean implements StorageClusterSettingsManagerLocal {
@EJB private SystemManagerLocal systemManager;
+ @Override public StorageClusterSettings getClusterSettings(Subject subject) { SystemSettings settings = systemManager.getSystemSettings(subject); Map<String, String> settingsMap = settings.toMap(); @@ -41,6 +42,7 @@ public class StorageClusterSettingsManagerBean { return clusterSettings; }
+ @Override public void setClusterSettings(Subject subject, StorageClusterSettings clusterSettings) { SystemSettings settings = new SystemSettings(); settings.put(SystemSetting.STORAGE_CQL_PORT, Integer.toString(clusterSettings.getCqlPort())); diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterSettingsManagerLocal.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterSettingsManagerLocal.java new file mode 100644 index 0000000..cb63bc4 --- /dev/null +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterSettingsManagerLocal.java @@ -0,0 +1,15 @@ +package org.rhq.enterprise.server.storage; + +import javax.ejb.Local; + +import org.rhq.core.domain.auth.Subject; + +/** + * @author John Sanda + */ +@Local +public interface StorageClusterSettingsManagerLocal { + StorageClusterSettings getClusterSettings(Subject subject); + + void setClusterSettings(Subject subject, StorageClusterSettings clusterSettings); +} diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java index 3e0eed8..1bf3cec 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java @@ -63,7 +63,7 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa private OperationManagerLocal operationManager;
@EJB - private StorageClusterSettingsManagerBean storageClusterSettingsManager; + private StorageClusterSettingsManagerLocal storageClusterSettingsManager;
@EJB private StorageClientManagerBean storageClientManager;
commit 186dbad2dc78b6161d2bd96635347a7f039a83b7 Author: Mike Thompson mithomps@redhat.com Date: Tue Aug 13 15:32:30 2013 -0700
[BZ 949750] - Chart hovers fail to render in IE. Availability Chart hover fix.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityOverUnderGraphType.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityOverUnderGraphType.java index 64c355d..59d4c75 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityOverUnderGraphType.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityOverUnderGraphType.java @@ -250,6 +250,44 @@ public class AvailabilityOverUnderGraphType implements AvailabilityGraphType { .attr("opacity", ".75") .attr("fill", function (d) { return calcBarFill(d); + }).on("mouseover",function (d) { + + var timeFormatter = $wnd.d3.time.format(availChartContext.chartHoverTimeFormat), + dateFormatter = $wnd.d3.time.format(availChartContext.chartHoverDateFormat), + availStart = new Date(+d.availStart), + xPosition = parseFloat($wnd.d3.select(this).attr("x")), + availTooltipDiv = $wnd.d3.select("#availTooltip") + .style("left", xPosition + "px") + .style("top", "0px"); + + availTooltipDiv.select("#availTooltipLabel") + .text(availChartContext.hoverBarAvailabilityLabel); + + availTooltipDiv + .select("#availTooltipType") + .text(d.availTypeMessage); + + availTooltipDiv + .select("#availTooltipStartDate") + .text(dateFormatter(availStart)); + + availTooltipDiv + .select("#availTooltipStartTime") + .text(timeFormatter(availStart)); + + availTooltipDiv + .select("#availTooltipDurationLabel") + .text(availChartContext.hoverBarLabel); + + availTooltipDiv + .select("#availTooltipDuration") + .text(d.availDuration); + + //Show the tooltip + $wnd.d3.select("#availTooltip").classed("hidden", false); + }).on("mouseout", function () { + //Hide the tooltip + $wnd.d3.select("#availTooltip").classed("hidden", true); });
xAxis.tickFormat($wnd.rhqCommon.getD3CustomTimeFormat(availChartContext.chartXaxisTimeFormatHours, availChartContext.chartXaxisTimeFormatHoursMinutes)); @@ -295,42 +333,11 @@ public class AvailabilityOverUnderGraphType implements AvailabilityGraphType { }
- - function createHovers() { - $wnd.jQuery('svg rect.availBars').tipsy({ - gravity: 's', - html: true, - trigger: 'hover', - title: function () { - var d = this.__data__; - return formatHovers(d); - }, - show: function (e, el) { - el.css({ 'z-index': '990000'}) - } - }); - } - - - function formatHovers(d) { - var timeFormatter = $wnd.d3.time.format(availChartContext.chartHoverTimeFormat), - dateFormatter = $wnd.d3.time.format(availChartContext.chartHoverDateFormat), - availStart = new Date(+d.availStart); - - return '<div class="chartHoverEnclosingDiv">' + - '<div class="chartHoverAlignLeft"><span >' + availChartContext.hoverBarAvailabilityLabel + ': </span><span style="width:50px;">' + d.availTypeMessage + '</span></div>' + - '<div class="chartHoverAlignLeft"><span>' + dateFormatter(availStart) + ' ' + timeFormatter(availStart) + '</span></div>' + - '<div class="chartHoverAlignLeft"><span >' + availChartContext.hoverBarLabel + ': </span><span style="width:50px;">' + d.availDuration + '</span></div>' + - '</div>'; - - } - return { // Public API draw: function (availChartContext) { "use strict"; drawBars(availChartContext); - createHovers(); } }; // end public closure
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/avail/AvailabilityD3GraphView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/avail/AvailabilityD3GraphView.java index 3fc0192..6e3b590 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/avail/AvailabilityD3GraphView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/avail/AvailabilityD3GraphView.java @@ -83,7 +83,22 @@ public class AvailabilityD3GraphView<T extends AvailabilityGraphType> extends En divAndSvgDefs.append("<div id="availChart-" + availabilityGraphType.getChartId() + "" ><svg xmlns="http://www.w3.org/2000/svg%5C" version="1.1" style="height:65px;">"); divAndSvgDefs.append(getSvgDefs()); - divAndSvgDefs.append("</svg></div>"); + divAndSvgDefs.append("</svg>"); + divAndSvgDefs.append("<div id="availTooltip" class="hidden" >"); + divAndSvgDefs.append("<div>"); + divAndSvgDefs.append("<span id="availTooltipLabel" class="availTooltipLabel"></span>"); + divAndSvgDefs.append("<span> : </span>"); + divAndSvgDefs.append("<span id="availTooltipType" style="width:40px;font-weight:bold;"></span>"); + divAndSvgDefs.append("<span> - </span>"); + divAndSvgDefs.append("<span id="availTooltipDuration" ></span>"); + divAndSvgDefs.append("<div/>"); + divAndSvgDefs.append("<div>"); + divAndSvgDefs.append("<span id="availTooltipStartDate" ></span>"); + divAndSvgDefs.append("<span> </span>"); + divAndSvgDefs.append("<span id="availTooltipStartTime" ></span>"); + divAndSvgDefs.append("</div>"); + divAndSvgDefs.append("</div>"); // end availTooltipDiv + divAndSvgDefs.append("</div>"); HTMLFlow graph = new HTMLFlow(divAndSvgDefs.toString()); graph.setWidth100(); graph.setHeight(65); diff --git a/modules/enterprise/gui/coregui/src/main/webapp/css/charts.css b/modules/enterprise/gui/coregui/src/main/webapp/css/charts.css index 2dbd79b..6a004b4 100644 --- a/modules/enterprise/gui/coregui/src/main/webapp/css/charts.css +++ b/modules/enterprise/gui/coregui/src/main/webapp/css/charts.css @@ -156,3 +156,39 @@ rect.low { font-weight: bold; color: #70c4e2; } + +#availTooltip { + z-index: 990000; + position: absolute; + width: 150px; + height: auto; + padding: 5px; + background-color: #000; + opacity: 0.55; + -webkit-border-radius: 5px; + -moz-border-radius: 5px; + border-radius: 5px; + -webkit-box-shadow: 4px 4px 5px rgba(0, 0, 0, 0.4); + -moz-box-shadow: 4px 4px 5px rgba(0, 0, 0, 0.4); + box-shadow: 4px 4px 5px rgba(0, 0, 0, 0.4); + pointer-events: none; +} + +#availTooltip.hidden { + display: none; +} + +#availTooltip div { + margin: 0; + font-family: Arial, Verdana, sans-serif; + font-size: 9px; + color: #d3d3d6; +} + +.availTooltipLabel { + width: 40px; + font-weight: bold; + color: #d3d3d6; +} + +
commit a62ac169f4ded7612ecbc93bc647248f29eb64d2 Author: Jay Shaughnessy jshaughn@redhat.com Date: Tue Aug 13 15:01:50 2013 -0400
fix some issues, add some new api methods
diff --git a/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml b/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml index 40e826a..5182ef1 100644 --- a/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml +++ b/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml @@ -67,7 +67,7 @@ <difference> <className>org/rhq/enterprise/server/bundle/BundleManagerRemote</className> <differenceType>7012</differenceType> <!-- method added to an interface --> - <method>org.rhq.core.domain.bundle.BundleGroup createBundleGroup(org.rhq.core.domain.auth.Subject, java.lang.String, java.lang.String)</method> + <method>org.rhq.core.domain.bundle.BundleGroup createBundleGroup(org.rhq.core.domain.auth.Subject, org.rhq.core.domain.bundle.BundleGroup)</method> <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> </difference>
@@ -128,38 +128,45 @@ </difference>
<difference> - <className>org/rhq/enterprise/server/bundle/RoleManagerRemote</className> + <className>org/rhq/enterprise/server/authz/RoleManagerRemote</className> <differenceType>7012</differenceType> <!-- method added to an interface --> <method>void addBundleGroupsToRole(org.rhq.core.domain.auth.Subject, int, int[])</method> <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> </difference>
<difference> - <className>org/rhq/enterprise/server/bundle/RoleManagerRemote</className> + <className>org/rhq/enterprise/server/authz/RoleManagerRemote</className> <differenceType>7012</differenceType> <!-- method added to an interface --> <method>void addRolesToBundleGroup(org.rhq.core.domain.auth.Subject, int, int[])</method> <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> </difference>
<difference> - <className>org/rhq/enterprise/server/bundle/RoleManagerRemote</className> + <className>org/rhq/enterprise/server/authz/RoleManagerRemote</className> <differenceType>7012</differenceType> <!-- method added to an interface --> <method>void removeBundleGroupsFromRole(org.rhq.core.domain.auth.Subject, int, int[])</method> <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> </difference>
<difference> - <className>org/rhq/enterprise/server/bundle/RoleManagerRemote</className> + <className>org/rhq/enterprise/server/authz/RoleManagerRemote</className> <differenceType>7012</differenceType> <!-- method added to an interface --> <method>void removeRolesFromBundleGroup(org.rhq.core.domain.auth.Subject, int, int[])</method> <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> </difference>
<difference> - <className>org/rhq/enterprise/server/bundle/RoleManagerRemote</className> + <className>org/rhq/enterprise/server/authz/RoleManagerRemote</className> <differenceType>7012</differenceType> <!-- method added to an interface --> <method>void setAssignedBundleGroups(org.rhq.core.domain.auth.Subject, int, int[])</method> <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> </difference>
+ <difference> + <className>org/rhq/enterprise/server/bundle/BundleManagerRemote</className> + <differenceType>7012</differenceType> <!-- method added to an interface --> + <method>org.rhq.core.domain.bundle.BundleGroup updateBundleGroup(org.rhq.core.domain.auth.Subject, org.rhq.core.domain.bundle.BundleGroup)</method> + <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> + </difference> + </differences>
commit abe0c590474b8a1efc071e26e63f877d0cc383b2 Author: Simeon Pinder spinder@fulliautomatix.conchfritter.com Date: Mon Aug 12 15:17:15 2013 -0400
[BZ 990576] fix issue with available groups status update speed, guard against client failures and add more logging.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RoleLdapGroupSelector.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RoleLdapGroupSelector.java index c4e7145..4cb1275 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RoleLdapGroupSelector.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RoleLdapGroupSelector.java @@ -72,7 +72,8 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c public static final String FIELD_DESCRIPTION = "description"; final TextItem searchTextItem = new TextItem(); protected int cursorPosition; - private static int retryAttempt = 0; + private static int retryAttempt = 0;//limit retries on failure + private static int noProgressAttempts = 0;//limit really slow attempt parse times
//override the selector key for ldap group selection. protected String getSelectorKey() { @@ -263,6 +264,7 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c groupQueryStatus.setIcons(failIcon); groupQueryStatus.setDefaultValue(MSG.view_adminRoles_failLdapGroupsSettings()); CoreGUI.getErrorHandler().handleError(MSG.view_adminRoles_failLdapGroupsSettings(), caught); + Log.debug(MSG.view_adminRoles_failLdapGroupsSettings()); }
@Override @@ -308,6 +310,7 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c retryAttempt++; if (retryAttempt > 3) { cancel();//kill thread + Log.debug(MSG.view_adminRoles_failLdapRetry()); retryAttempt = 0; } } @@ -339,6 +342,10 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c pageCount = Integer.valueOf(value); } } + + if (resultCountValue == 0) { + noProgressAttempts++; + } //Update status information String warnTooManyResults = MSG.view_adminRoles_ldapWarnTooManyResults(); String warnQueryTakingLongResults = MSG.view_adminRoles_ldapWarnQueryTakingLongResults(); @@ -378,12 +385,15 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c if (resultCountValue > 20000) {//results throttled adviceItem.setValue(tooManyResults); adviceItem.setTooltip(tooManyResults); + Log.debug(tooManyResults);//log error to client. } else if ((current - start) >= 10 * 1000) {// took longer than 10s adviceItem.setValue(queryTookLongResults); adviceItem.setTooltip(queryTookLongResults); + Log.debug(queryTookLongResults);//log error to client. } else if (pageCount >= 20) {// required more than 20 pages of results adviceItem.setValue(queryTookManyPagesResults); adviceItem.setTooltip(queryTookManyPagesResults); + Log.debug(queryTookManyPagesResults);//log error to client. } else {//simple success. groupQueryStatus.setDefaultValue(success); groupQueryStatus.setIcons(successIcon); @@ -391,8 +401,19 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c adviceItem.setTooltip(none); adviceItem.setDisabled(true); } + noProgressAttempts = 0; //now cancel the timer cancel(); + } else if (noProgressAttempts >= 10) {//availGroups query stuck on server side + //cancel the timer. + cancel(); + String clientSideQuitting = MSG.view_adminRoles_failLdapCancelling();//catch all + adviceItem.setDisabled(false); + groupQueryStatus.setIcons(attentionIcon); + adviceItem.setValue(clientSideQuitting); + adviceItem.setTooltip(clientSideQuitting); + noProgressAttempts = 0; + Log.debug(clientSideQuitting);//log error to client. } availableGroupDetails.markForRedraw(); } diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties index 869a8ce..9fd2b24 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties @@ -512,9 +512,11 @@ view_adminRoles_assignedSubjects = Assigned Subjects view_adminRoles_failCreateRoleWithExistingName = Failed to create role with existing name [{0}]. Please use another name. view_adminRoles_failLdap = Failed to determine if LDAP configured - assuming no LDAP. view_adminRoles_failLdapAvailableGroups = Failed: Unable to retrieve status for latest AvailableGroups() call. +view_adminRoles_failLdapCancelling = Client unable to proceed. Cancelling all future available groups status checks for this thread. Likely 1)LDAP server communication failures or 2)ldap query checking inexplicably failed. view_adminRoles_failLdapGroups = Failed to retrieve available LDAP groups - assuming no LDAP groups. view_adminRoles_failLdapGroupsRole = Failed to load LDAP groups available for role. view_adminRoles_failLdapGroupsSettings = Failed to retrieve system settings details for LDAP groups. +view_adminRoles_failLdapRetry = Retried 3 times. Cancelling future available group requests. view_adminRoles_failRoles = Failed to fetch roles. view_adminRoles_globalPerms = Global Permissions view_adminRoles_ldapGroups = LDAP Groups diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties index 4e6532d..dca98cb 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties @@ -531,9 +531,11 @@ view_adminRoles_assignedSubjects = PÅiÅazené subjekty view_adminRoles_failCreateRoleWithExistingName = NepodaÅilo se vytvoÅit roli s existujÃcÃm jménem [{0}]. PouÅŸÃjte prosÃm jiné jméno. view_adminRoles_failLdap = NepodaÅilo se urÄit je-li LDAP nastaven - pÅedpokládám, ÅŸe LDAP nenà pÅÃtomen. ##view_adminRoles_failLdapAvailableGroups = NepodaÅilo se zÃskat informace o LDAP skupinách - pÅedpokládám, ÅŸe LDAP nenà pÅÃtomen. +##view_adminRoles_failLdapCancelling = NepodaÅilo se zÃskat informace o LDAP skupinách - pÅedpokládám, ÅŸe LDAP nenà pÅÃtomen. view_adminRoles_failLdapGroups = NepodaÅilo se zÃskat informace o LDAP skupinách - pÅedpokládám, ÅŸe LDAP nenà pÅÃtomen. view_adminRoles_failLdapGroupsRole = NepodaÅilo se zÃskat informace o LDAP skupinách pro roli. ##view_adminRoles_failLdapGroupsSettings = NepodaÅilo se zÃskat informace o LDAP skupinách - pÅedpokládám, ÅŸe LDAP nenà pÅÃtomen. +##view_adminRoles_failLdapRetry = NepodaÅilo se zÃskat informace o LDAP skupinách - pÅedpokládám, ÅŸe LDAP nenà pÅÃtomen. view_adminRoles_failRoles = NepodaÅilo se zÃskat role. view_adminRoles_globalPerms = Globalnà povolenà view_adminRoles_ldapGroups = LDAP skupiny diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties index c50c577..8e76bca 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties @@ -509,9 +509,11 @@ view_adminRoles_assignedSubjects = Zugewisene Benutzer view_adminRoles_failCreateRoleWithExistingName = Konnte die Rolle mit dem existierenden Namen [{0}] nicht anlegen. Bitte wÀhlen Sie einen anderen Namen. view_adminRoles_failLdap = Konnte nicht ermitteln, ob LDAP konfiguriert ist - gehe von keiner LDAP-Konfiguration aus. ##view_adminRoles_failLdapAvailableGroups = Konnte de LDAP-Gruppen nicht laden. Annahme ist, dass es keine gibt. +##view_adminRoles_failLdapCancelling = Konnte de LDAP-Gruppen nicht laden. Annahme ist, dass es keine gibt. view_adminRoles_failLdapGroups = Konnte de LDAP-Gruppen nicht laden. Annahme ist, dass es keine gibt. view_adminRoles_failLdapGroupsRole = Konnte die LDAP-Gruppen fÃŒr die Rolle nicht laden. ##view_adminRoles_failLdapGroupsSettings = Konnte de LDAP-Gruppen nicht laden. Annahme ist, dass es keine gibt. +##view_adminRoles_failLdapRetry = Konnte de LDAP-Gruppen nicht laden. Annahme ist, dass es keine gibt. view_adminRoles_failRoles = Konnte die Rollen nicht laden. view_adminRoles_globalPerms = Applikationsweite Rechte view_adminRoles_ldapGroups = LDAP-Gruppen diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties index 54fef87..a8e5795 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties @@ -508,9 +508,11 @@ view_adminRoles_assignedSubjects = å²ãåœãŠãããSubjects ##view_adminRoles_failCreateRoleWithExistingName = Failed to create role with existing name [{0}]. Please use another name. view_adminRoles_failLdap = LDAPãæ§æããããã©ããã®æ±ºå®ã«å€±æããŸãã - LDAPãç¡ãããšãä»®å®ããŸã ##view_adminRoles_failLdapAvailableGroups = å©çšå¯èœãªLDAPã°ã«ãŒããèªã¿åºãã®ã«å€±æããŸãã - LDAPã°ã«ãŒããç¡ãããšãä»®å®ããŸã +##view_adminRoles_failLdapCancelling = å©çšå¯èœãªLDAPã°ã«ãŒããèªã¿åºãã®ã«å€±æããŸãã - LDAPã°ã«ãŒããç¡ãããšãä»®å®ããŸã view_adminRoles_failLdapGroups = å©çšå¯èœãªLDAPã°ã«ãŒããèªã¿åºãã®ã«å€±æããŸãã - LDAPã°ã«ãŒããç¡ãããšãä»®å®ããŸã view_adminRoles_failLdapGroupsRole = ããŒã«ã®ããã«å©çšå¯èœãªLDAPã°ã«ãŒããããŒãããã®ã«å€±æããŸãã ##view_adminRoles_failLdapGroupsSettings = å©çšå¯èœãªLDAPã°ã«ãŒããèªã¿åºãã®ã«å€±æããŸãã - LDAPã°ã«ãŒããç¡ãããšãä»®å®ããŸã +##view_adminRoles_failLdapRetry = å©çšå¯èœãªLDAPã°ã«ãŒããèªã¿åºãã®ã«å€±æããŸãã - LDAPã°ã«ãŒããç¡ãããšãä»®å®ããŸã view_adminRoles_failRoles = ããŒã«ã®åãåºãã«å€±æããŸãã view_adminRoles_globalPerms = ã°ããŒãã«æš©é view_adminRoles_ldapGroups = LDAPã°ã«ãŒã diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties index 4deaf49..872ad93 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties @@ -459,9 +459,11 @@ view_adminRoles_assignedSubjects = í ë¹ë Subjects view_adminRoles_failCreateRoleWithExistingName = êž°ì¡Ž ìŽëŠ [{0}]곌 ìí ì ë§ë€ì§ 못íìµëë€. ë€ë¥ž ìŽëŠì ì¬ì©íììì€. view_adminRoles_failLdap = LDAPìŽ ì€ì ëìëì§ ì¬ë¶ì ê²°ì ì ì€íšíìµëë€ - LDAPìŽ ìë ê²ì ê°ì í©ëë€. ##view_adminRoles_failLdapAvailableGroups = ì¬ì©ê°ë¥í LDAP 귞룹ì ìœëë° ì€íšíìµëë€ - LDAP ê·žë£¹ìŽ ìë ê²ì ê°ì í©ëë€. +##view_adminRoles_failLdapCancelling = ì¬ì©ê°ë¥í LDAP 귞룹ì ìœëë° ì€íšíìµëë€ - LDAP ê·žë£¹ìŽ ìë ê²ì ê°ì í©ëë€. view_adminRoles_failLdapGroups = ì¬ì©ê°ë¥í LDAP 귞룹ì ìœëë° ì€íšíìµëë€ - LDAP ê·žë£¹ìŽ ìë ê²ì ê°ì í©ëë€. view_adminRoles_failLdapGroupsRole = ìí ì ìíŽ ì¬ì©ê°ë¥í LDAP 귞룹ì ë¡ëíëë° ì€íšíìµëë€. ##view_adminRoles_failLdapGroupsSettings = ì¬ì©ê°ë¥í LDAP 귞룹ì ìœëë° ì€íšíìµëë€ - LDAP ê·žë£¹ìŽ ìë ê²ì ê°ì í©ëë€. +##view_adminRoles_failLdapRetry = ì¬ì©ê°ë¥í LDAP 귞룹ì ìœëë° ì€íšíìµëë€ - LDAP ê·žë£¹ìŽ ìë ê²ì ê°ì í©ëë€. view_adminRoles_failRoles = ìí ì ê±°ì ì€íšíìµëë€. view_adminRoles_globalPerms = êžë¡ë² ê¶í view_adminRoles_ldapGroups = LDAP 귞룹 diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties index 35d2aec..9a4e437 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties @@ -514,9 +514,11 @@ view_adminRoles_assignedSubjects = Perfis Associados ##view_adminRoles_failCreateRoleWithExistingName = Failed to create role with existing name [{0}]. Please Use another name. view_adminRoles_failLdap = Falha ao determinar se o LDAP foi configurado - assumindo como LDAP n\u00E3o configurado. ##view_adminRoles_failLdapAvailableGroups = Falha ao recuperar os grupos dispon\u00EDveis no LDAP - assumindo como LDAP sem grupos. +##view_adminRoles_failLdapCancelling = Falha ao recuperar os grupos dispon\u00EDveis no LDAP - assumindo como LDAP sem grupos. view_adminRoles_failLdapGroups = Falha ao recuperar os grupos dispon\u00EDveis no LDAP - assumindo como LDAP sem grupos. view_adminRoles_failLdapGroupsRole = Falha ao carregar grupos do LDAP dispon\u00EDveis para o perfil. ##view_adminRoles_failLdapGroupsSettings = Falha ao recuperar os grupos dispon\u00EDveis no LDAP - assumindo como LDAP sem grupos. +##view_adminRoles_failLdapRetry = Falha ao recuperar os grupos dispon\u00EDveis no LDAP - assumindo como LDAP sem grupos. view_adminRoles_failRoles = Falha ao recuperar perfis. view_adminRoles_globalPerms = Permiss\u00F5es Globais view_adminRoles_ldapGroups = Grupos LDAP diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties index 3035f91..d978d76 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties @@ -2595,14 +2595,15 @@ view_adminRoles_assignedSubjects = ÐазМаÑеММÑе ÑÑбÑекÑÑ ##view_adminRoles_failCreateRoleWithExistingName = Failed to create role with existing name [{0}]. Please use another name. view_adminRoles_failLdap = Ðе ÑЎалПÑÑ ÐŸÐ¿ÑеЎелОÑÑ, МаÑÑÑПеМ лО LDAP - пÑеЎпПлПжОÑелÑМП LDAP ПÑÑÑÑÑÑвÑеÑ. ##view_adminRoles_failLdapAvailableGroups = Ðе ÑЎалПÑÑ Ð¿ÐŸÐ»ÑÑОÑÑ LDAP-гÑÑппÑ. ÐÑеЎпПлПжОÑелÑМП LDAP гÑÑÐ¿Ð¿Ñ ÐŸÑÑÑÑÑÑвÑÑÑ +##view_adminRoles_failLdapCancelling = Ðе ÑЎалПÑÑ Ð¿ÐŸÐ»ÑÑОÑÑ LDAP-гÑÑппÑ. ÐÑеЎпПлПжОÑелÑМП LDAP гÑÑÐ¿Ð¿Ñ ÐŸÑÑÑÑÑÑвÑÑÑ view_adminRoles_failLdapGroups = Ðе ÑЎалПÑÑ Ð¿ÐŸÐ»ÑÑОÑÑ LDAP-гÑÑппÑ. ÐÑеЎпПлПжОÑелÑМП LDAP гÑÑÐ¿Ð¿Ñ ÐŸÑÑÑÑÑÑвÑÑÑ view_adminRoles_failLdapGroupsRole = Ðе ÑЎалПÑÑ Ð·Ð°Ð³ÑÑзОÑÑ LDAP гÑÑппÑ, ЎПÑÑÑпМÑе ÐŽÐ»Ñ ÑПлО. ##view_adminRoles_failLdapGroupsSettings = Ðе ÑЎалПÑÑ Ð¿ÐŸÐ»ÑÑОÑÑ LDAP-гÑÑппÑ. ÐÑеЎпПлПжОÑелÑМП LDAP гÑÑÐ¿Ð¿Ñ ÐŸÑÑÑÑÑÑвÑÑÑ +##view_adminRoles_failLdapRetry = Ðе ÑЎалПÑÑ Ð¿ÐŸÐ»ÑÑОÑÑ LDAP-гÑÑппÑ. ÐÑеЎпПлПжОÑелÑМП LDAP гÑÑÐ¿Ð¿Ñ ÐŸÑÑÑÑÑÑвÑÑÑ view_adminRoles_failRoles = Ðе ÑЎалПÑÑ Ð¿ÐŸÐ»ÑÑОÑÑ ÑПлО. view_adminRoles_globalPerms = ÐлПбалÑÐœÑе пПлМПЌПÑÐžÑ view_adminRoles_ldapGroups = LDAP-гÑÑÐ¿Ð¿Ñ view_adminRoles_ldapGroupsReadOnly = ЎаММÑе LDAP гÑÑпп ЎПÑÑÑÐ¿ÐœÑ ÑПлÑкП ÐŽÐ»Ñ ÑÑÐµÐœÐžÑ -##view_adminRoles_noLdap = The LDAP security integration is not configured. To configure LDAP, go to <a {0}>{1}</a>. ##view_adminRoles_ldapGroupsSettingsReadOnly = ЎаММÑе LDAP гÑÑпп ЎПÑÑÑÐ¿ÐœÑ ÑПлÑкП ÐŽÐ»Ñ ÑÑÐµÐœÐžÑ ##view_adminRoles_ldapQueryPageSize = ÐПлМПЌПÑÐžÑ ÑеÑÑÑÑа ##view_adminRoles_ldapTooManyResults = ЎаММÑе LDAP гÑÑпп ЎПÑÑÑÐ¿ÐœÑ ÑПлÑкП ÐŽÐ»Ñ ÑÑÐµÐœÐžÑ @@ -2611,6 +2612,7 @@ view_adminRoles_ldapGroupsReadOnly = ЎаММÑе LDAP гÑÑпп ЎПÑÑÑпМ ##view_adminRoles_ldapWarnParsingManyPagesResults = ЎаММÑе LDAP гÑÑпп ЎПÑÑÑÐ¿ÐœÑ ÑПлÑкП ÐŽÐ»Ñ ÑÑÐµÐœÐžÑ ##view_adminRoles_ldapWarnQueryTakingLongResults = ЎаММÑе LDAP гÑÑпп ЎПÑÑÑÐ¿ÐœÑ ÑПлÑкП ÐŽÐ»Ñ ÑÑÐµÐœÐžÑ ##view_adminRoles_ldapWarnTooManyResults = ЎаММÑе LDAP гÑÑпп ЎПÑÑÑÐ¿ÐœÑ ÑПлÑкП ÐŽÐ»Ñ ÑÑÐµÐœÐžÑ +##view_adminRoles_noLdap = The LDAP security integration is not configured. To configure LDAP, go to <a {0}>{1}</a>. view_adminRoles_permissions_autoselecting_configureRead_implied = ÐвÑПЌаÑОÑеÑкО ПÑклÑÑеМП CONFIGURE_WRITE пПлМПЌПÑОе, пПÑкПлÑÐºÑ ÐŸÑÑÑÑÑÑвÑÐµÑ CONFIGURE_READ... view_adminRoles_permissions_autoselecting_configureWrite_implied = ÐвÑПЌаÑОÑеÑкО вÑбÑаМП CONFIGURE_READ пПлМПЌПÑОе, пПÑкПлÑÐºÑ CONFIGURE_WRITE пПЎÑазÑЌеваеÑ, ÑÑП ... ##view_adminRoles_permissions_autoselecting_manageBundleGroups_implied = Autoselected View Bundles, which is granted by Manage Bundle Groups... diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties index 615d3a0..b4f0369 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties @@ -502,9 +502,11 @@ view_adminRoles_assignedSubjects = \u5206\u914d\u7684\u4e3b\u9898 ##view_adminRoles_failCreateRoleWithExistingName = Failed to create role with existing name [{0}]. Please Use another name. view_adminRoles_failLdap = \u65e0\u6cd5\u786e\u5b9aLDAP - \u5047\u5b9a\u6ca1\u6709LDAP. ##view_adminRoles_failLdapAvailableGroups = \u67e5\u8be2\u6709\u6548LDAP\u7ec4\u5931\u8d25 - \u5047\u5b9a\u65e0DAP\u7ec4. +##view_adminRoles_failLdapCancelling = \u67e5\u8be2\u6709\u6548LDAP\u7ec4\u5931\u8d25 - \u5047\u5b9a\u65e0DAP\u7ec4. view_adminRoles_failLdapGroups = \u67e5\u8be2\u6709\u6548LDAP\u7ec4\u5931\u8d25 - \u5047\u5b9a\u65e0DAP\u7ec4. view_adminRoles_failLdapGroupsRole = \u52a0\u8f7dLDAP\u7ec4\u89d2\u8272\u5931\u8d25. ##view_adminRoles_failLdapGroupsSettings = \u67e5\u8be2\u6709\u6548LDAP\u7ec4\u5931\u8d25 - \u5047\u5b9a\u65e0DAP\u7ec4. +##view_adminRoles_failLdapRetry = \u67e5\u8be2\u6709\u6548LDAP\u7ec4\u5931\u8d25 - \u5047\u5b9a\u65e0DAP\u7ec4. view_adminRoles_failRoles = \u53d6\u5f97\u89d2\u8272\u5931\u8d25. view_adminRoles_globalPerms = \u5168\u5c40\u6388\u6743 view_adminRoles_ldapGroups = LDAP\u7ec4 diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java index dc53a3c..5c8031d 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java @@ -604,7 +604,13 @@ SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE.name(), "" entry.put("name", name); entry.put("description", description); groupDetailsMap.add(entry); - resultCount++; + + resultCount++;//monitor the number of groups returned during this query. + groupQueryResultCount = resultCount;//update result count + if (groupQueryPageCount == 0) { + groupQueryPageCount++; + } + groupQueryCurrentTime = System.currentTimeMillis(); } }
commit 73ebb3821a8ee7a8e1735e660043b0cb95f5cfdf Author: Simeon Pinder spinder@fulliautomatix.conchfritter.com Date: Fri Aug 9 16:46:29 2013 -0400
[BZ 990576] add I18N for new fields.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RoleLdapGroupSelector.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RoleLdapGroupSelector.java index 8d53b82..c4e7145 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RoleLdapGroupSelector.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RoleLdapGroupSelector.java @@ -97,16 +97,13 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c int groupPanelWidth = 375; int groupPanelHeight = 150;
- // final TextItem search = new TextItem("search", - // MSG.common_title_search()); - // Structure the display area into two separate display regions // Available Groups region final DynamicForm availableGroupDetails = new DynamicForm(); { availableGroupDetails.setWidth(groupPanelWidth); availableGroupDetails.setHeight(groupPanelHeight); - availableGroupDetails.setGroupTitle("Available Groups Results"); + availableGroupDetails.setGroupTitle(MSG.common_title_ldapGroupsAvailable()); availableGroupDetails.setIsGroup(true); availableGroupDetails.setWrapItemTitles(false); //add itemChanged handler to listen for changes to SearchItem @@ -129,21 +126,21 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c } }); } - final TextItem resultCountItem = new TextItem("resultCount", "Groups Found"); + final TextItem resultCountItem = new TextItem("resultCount", MSG.common_title_groupsFound()); { resultCountItem.setCanEdit(false); resultCountItem.setWidth("100%"); } - final TextItem pageCountItem = new TextItem("pageCount", "Query Pages Parsed"); + final TextItem pageCountItem = new TextItem("pageCount", MSG.common_title_queryPagesParsed()); { pageCountItem.setCanEdit(false); pageCountItem.setWidth("100%"); } - final TextAreaItem adviceItem = new TextAreaItem("advice", "Suggest"); + final TextAreaItem adviceItem = new TextAreaItem("advice", MSG.common_title_suggest()); { adviceItem.setWidth("100%"); adviceItem.setHeight(20); - String feedback = "(None)"; + String feedback = MSG.common_val_none(); adviceItem.setValue(feedback); adviceItem.setTooltip(feedback); adviceItem.setDisabled(true); @@ -162,13 +159,12 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c } }); } - // final TextItem search = new TextItem("search", - // MSG.common_title_search()); + //Customize Search component { - searchTextItem.setName("search"); - searchTextItem.setTitle("Filter[results below]"); + searchTextItem.setName(MSG.common_title_search()); + searchTextItem.setTitle(MSG.common_title_filterResultsBelow()); searchTextItem.setWidth("100%"); - searchTextItem.setTooltip("Start typing here to only show groups containing the typed characters."); + searchTextItem.setTooltip(MSG.common_msg_typeToFilterResults()); } final FormItemIcon loadingIcon = new FormItemIcon(); final FormItemIcon successIcon = new FormItemIcon(); @@ -186,8 +182,8 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c final StaticTextItem groupQueryStatus = new StaticTextItem(); { groupQueryStatus.setName("groupQueryStatus"); - groupQueryStatus.setTitle("Query Progress"); - groupQueryStatus.setDefaultValue("Loading..."); + groupQueryStatus.setTitle(MSG.common_title_queryProgress()); + groupQueryStatus.setDefaultValue(MSG.common_msg_loading()); groupQueryStatus.setIcons(loadingIcon); } availableGroupDetails.setItems(resultCountItem, pageCountItem, groupQueryStatus, adviceItem, searchTextItem); @@ -197,21 +193,22 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c { ldapGroupSettings.setWidth(groupPanelWidth); ldapGroupSettings.setHeight(groupPanelHeight); - ldapGroupSettings.setGroupTitle("[Read Only] Ldap Group Settings. Edit in 'System Settings'"); + ldapGroupSettings.setGroupTitle(MSG.view_adminRoles_ldapGroupsSettingsReadOnly()); ldapGroupSettings.setIsGroup(true); ldapGroupSettings.setWrapItemTitles(false); } - final TextItem groupSearch = new TextItem("groupSearch", "Search Filter"); + final TextItem groupSearch = new TextItem("groupSearch", MSG.view_admin_systemSettings_LDAPFilter_name()); { groupSearch.setCanEdit(false); groupSearch.setWidth("100%"); } - final TextItem groupMember = new TextItem("groupMember", "Member Filter"); + final TextItem groupMember = new TextItem("groupMember", MSG.view_admin_systemSettings_LDAPGroupMember_name()); { groupMember.setCanEdit(false); groupMember.setWidth("100%"); } - final CheckboxItem groupQueryPagingItem = new CheckboxItem("groupQueryEnable", "Query Paging Enabled"); + final CheckboxItem groupQueryPagingItem = new CheckboxItem("groupQueryEnable", + MSG.view_admin_systemSettings_LDAPGroupUsePaging_name()); { groupQueryPagingItem.setCanEdit(false); groupQueryPagingItem.setValue(false); @@ -221,12 +218,14 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c //You have to set this attribute groupQueryPagingItem.setAttribute("labelAsTitle", true); } - final TextItem groupQueryPagingCountItem = new TextItem("groupQueryCount", "Query Page Size"); + final TextItem groupQueryPagingCountItem = new TextItem("groupQueryCount", + MSG.view_adminRoles_ldapQueryPageSize()); { groupQueryPagingCountItem.setCanEdit(false); groupQueryPagingCountItem.setWidth("100%"); } - final CheckboxItem groupUsePosixGroupsItem = new CheckboxItem("groupUsePosixGroups", "Use Posix Enabled"); + final CheckboxItem groupUsePosixGroupsItem = new CheckboxItem("groupUsePosixGroups", + MSG.view_admin_systemSettings_LDAPGroupUsePosixGroup_name()); { groupUsePosixGroupsItem.setCanEdit(false); groupUsePosixGroupsItem.setValue(false); @@ -262,9 +261,8 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c @Override public void onFailure(Throwable caught) { groupQueryStatus.setIcons(failIcon); - groupQueryStatus.setDefaultValue("Fail: Unable to retrieve system settings."); - //TODO: update this message - CoreGUI.getErrorHandler().handleError(MSG.view_adminRoles_failLdap(), caught); + groupQueryStatus.setDefaultValue(MSG.view_adminRoles_failLdapGroupsSettings()); + CoreGUI.getErrorHandler().handleError(MSG.view_adminRoles_failLdapGroupsSettings(), caught); }
@Override @@ -291,10 +289,10 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c //launch operations to populate/refresh LDAP Group Query contents. final Timer availableGroupsTimer = new Timer() { public void run() { - final String attention = "Attention"; - final String success = "Success"; - final String none = "(None)"; - final String failed = "Failed"; + final String attention = MSG.common_status_attention(); + final String success = MSG.common_status_success(); + final String none = MSG.common_val_none(); + final String failed = MSG.common_status_failed(); //make request to RHQ about state of latest LDAP GWT request GWTServiceLookup.getLdapService().findAvailableGroupsStatus( new AsyncCallback<Set<Map<String, String>>>() { @@ -302,11 +300,11 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c public void onFailure(Throwable caught) { groupQueryStatus.setIcons(failIcon); groupQueryStatus.setDefaultValue(failed); - String adviceValue = "Failed: Unable to retrieve status for latest AvailableGroups() call."; + String adviceValue = MSG.view_adminRoles_failLdapAvailableGroups(); adviceItem.setValue(adviceValue); adviceItem.setTooltip(adviceValue); - //TODO: update this message - CoreGUI.getErrorHandler().handleError(MSG.view_adminRoles_failLdap(), caught); + CoreGUI.getErrorHandler() + .handleError(MSG.view_adminRoles_failLdapAvailableGroups(), caught); retryAttempt++; if (retryAttempt > 3) { cancel();//kill thread @@ -342,9 +340,10 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c } } //Update status information - String warnTooManyResults = " A lot of results are being returned. Modify your 'Group Search Filter' to return fewer results."; - String warnQueryTakingLongResults = " Query taking a while to complete. Modify your 'Group Search Filter' to return fewer results."; - String warnParsingManyPagesResults = " Query requires a lot of pages. Modify your 'Group Search Page Size' to return more results per request."; + String warnTooManyResults = MSG.view_adminRoles_ldapWarnTooManyResults(); + String warnQueryTakingLongResults = MSG.view_adminRoles_ldapWarnQueryTakingLongResults(); + String warnParsingManyPagesResults = MSG.view_adminRoles_ldapWarnParsingManyPagesResults(); + boolean resultCountWarning = false; boolean pageCountWarning = false; boolean timePassingWarning = false; @@ -368,12 +367,11 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c //act on status details to add extra perf suggestions. Kill threads older than 30 mins long parseTime = System.currentTimeMillis() - ldapGroupSelectorRequestId; if ((queryCompleted) || (parseTime) > 30 * 60 * 1000) { - String tooManyResults = "Too many results to show all. Modify your 'Group Search Filter' to return fewer than 20000 results."; - String queryTookLongResults = " Query took " + parseTime - + " ms to complete. Modify your 'Group Search Filter' to return fewer results."; - String queryTookManyPagesResults = " Query required " - + pageCount - + " pages to complete. Modify 'Group Search Page Size' to return more results per request."; + String tooManyResults = MSG.view_adminRoles_ldapTooManyResults(); + String queryTookLongResults = MSG.view_adminRoles_ldapTookLongResults(parseTime + ""); + String queryTookManyPagesResults = MSG + .view_adminRoles_ldapTookManyPagesResults(pageCount + ""); + adviceItem.setDisabled(false); groupQueryStatus.setIcons(attentionIcon); groupQueryStatus.setDefaultValue(attention); diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties index 67ec087..869a8ce 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties @@ -136,11 +136,13 @@ common_msg_noItemsToShow = No items to show common_msg_notYetImplemented = Not Yet Implemented common_msg_see_more = see more... common_msg_step_x_of_y = Step {0} of {1} +common_msg_typeToFilterResults = Start typing here to only show results containing the typed characters. common_severity_debug = Debug common_severity_error = Error common_severity_fatal = Fatal common_severity_info = Info common_severity_warn = Warn +common_status_attention = Attention common_status_canceled = Canceled common_status_deferred = Deferred common_status_failed = Failed @@ -192,11 +194,13 @@ common_title_edit = Edit? common_title_edit_mode = Edit Mode common_title_enabled = Enabled? common_title_end = End +common_title_filterResultsBelow = Filter[results below] common_title_generalProp = General Properties common_title_group = Group common_title_group_def_total = Group Definition Total common_title_group_member_health = Group Member Health common_title_groups = Groups +common_title_groupsFound = Groups Found common_title_help = Help common_title_host = Host common_title_id = ID @@ -210,6 +214,7 @@ common_title_kind = Kind common_title_lastUpdated = Last Updated common_title_lastUpdatedBy = Last Updated By common_title_ldapGroups = LDAP Groups +common_title_ldapGroupsAvailable = Available Groups Results common_title_mashup = Mashup common_title_members_reporting = Members Reporting common_title_message = Message @@ -234,6 +239,8 @@ common_title_platform_total = Platform Total common_title_plugin = Plugin common_title_port = Port common_title_providers = Providers +common_title_queryPagesParsed = Query Pages Parsed +common_title_queryProgress = Query Progress common_title_recent_alerts = Recent Alerts common_title_recent_bundle_deployments = Recent Bundle Deployments common_title_recent_configuration_updates = Recent Configuration Updates @@ -273,6 +280,7 @@ common_title_sort_order_tooltip = Sets sort order for results. common_title_start = Start common_title_status = Status common_title_stop = Stop +common_title_suggest = Suggest common_title_summary = Summary common_title_tag_cloud = Tag Cloud common_title_the = The @@ -503,12 +511,22 @@ view_adminRoles_assignedGroups = Assigned Resource Groups view_adminRoles_assignedSubjects = Assigned Subjects view_adminRoles_failCreateRoleWithExistingName = Failed to create role with existing name [{0}]. Please use another name. view_adminRoles_failLdap = Failed to determine if LDAP configured - assuming no LDAP. +view_adminRoles_failLdapAvailableGroups = Failed: Unable to retrieve status for latest AvailableGroups() call. view_adminRoles_failLdapGroups = Failed to retrieve available LDAP groups - assuming no LDAP groups. view_adminRoles_failLdapGroupsRole = Failed to load LDAP groups available for role. +view_adminRoles_failLdapGroupsSettings = Failed to retrieve system settings details for LDAP groups. view_adminRoles_failRoles = Failed to fetch roles. view_adminRoles_globalPerms = Global Permissions view_adminRoles_ldapGroups = LDAP Groups view_adminRoles_ldapGroupsReadOnly = LDAP group data is read only +view_adminRoles_ldapGroupsSettingsReadOnly = [Read Only] Ldap Group Settings. Edit in 'System Settings' +view_adminRoles_ldapQueryPageSize = Query Page Size +view_adminRoles_ldapTooManyResults = Too many results to show all. Modify your 'Group Search Filter' to return fewer than 20000 results. +view_adminRoles_ldapTookLongResults = Query took {0} ms to complete. Modify your 'Group Search Filter' to return fewer results. +view_adminRoles_ldapTookManyPagesResults = Query required {0} pages to complete. Modify 'Group Search Page Size' to return more results per request. +view_adminRoles_ldapWarnParsingManyPagesResults = Query requires a lot of pages. Modify your 'Group Search Page Size' to return more results per request. +view_adminRoles_ldapWarnQueryTakingLongResults = \ Query taking a while to complete. Modify your 'Group Search Filter' to return fewer results. +view_adminRoles_ldapWarnTooManyResults = \ A lot of results are being returned. Modify your 'Group Search Filter' to return fewer results. view_adminRoles_noLdap = The LDAP security integration is not configured. To configure LDAP, go to <a {0}>{1}</a>. view_adminRoles_permissions_autoselecting_configureRead_implied = Autodeselected CONFIGURE_WRITE permission, since lack of CONFIGURE_READ implies lack of it... view_adminRoles_permissions_autoselecting_configureWrite_implied = Autoselected CONFIGURE_READ permission, since CONFIGURE_WRITE implies it... diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties index 2c97c84..4e6532d 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties @@ -159,11 +159,13 @@ common_msg_noItemsToShow = Ŝádné poloÅŸky k zobrazenà common_msg_notYetImplemented = JeÅ¡tÄ nebylo implementováno common_msg_see_more = vÃce... common_msg_step_x_of_y = Krok {0} z {1} +##common_msg_typeToFilterResults = JeÅ¡tÄ nebylo implementováno common_severity_debug = LadÄnà common_severity_error = Chyba common_severity_fatal = Fatálnà common_severity_info = InformaÄnà common_severity_warn = Varovánà +##common_status_attention = ÃspÄch common_status_canceled = ZruÅ¡eno common_status_deferred = OdloÅŸeno common_status_failed = Selhalo @@ -215,11 +217,13 @@ common_title_edit_mode = EditaÄnà mód common_title_enabled = Povoleno? common_title_end = Konec common_title_favorites = OblÃbené +##common_title_filterResultsBelow = OblÃbené common_title_generalProp = Obecné vlastnosti common_title_group = Skupina common_title_group_def_total = Definic skupin celkem common_title_group_member_health = Zdravà Älena skupiny common_title_groups = Skupiny +##common_title_groupsFound = Skupiny common_title_help = NápovÄda common_title_host = Host common_title_icon = Ikona @@ -234,6 +238,7 @@ common_title_kind = Druh common_title_lastUpdated = Naposledy modifikováno common_title_lastUpdatedBy = Naposledy modifikoval common_title_ldapGroups = LDAP skupiny +##common_title_ldapGroupsAvailable = LDAP skupiny common_title_mashup = Mashup common_title_members_reporting = Reportovánà Älenů common_title_message = Zpráva @@ -258,6 +263,8 @@ common_title_platform_total = Celkem platforem common_title_plugin = ZásuvnÃœ modul common_title_port = Port common_title_providers = Poskytovatelé +##common_title_queryPagesParsed = LDAP skupiny +##common_title_queryProgress = LDAP skupiny common_title_recent_alerts = Nedávné vÃœstrahy common_title_recent_bundle_deployments = Nedávno nasazené balÃky common_title_recent_configuration_updates = Nedávno zmÄnÄné konfigurace @@ -297,6 +304,7 @@ common_title_sort_order_tooltip = Nastavà Åazenà pro vÃœsledky. common_title_start = Start common_title_status = Stav common_title_stop = Stop +##common_title_suggest = Shrnutà common_title_summary = Shrnutà common_title_tag_cloud = Tag cloud common_title_timestamp = Datum/Äas @@ -522,12 +530,22 @@ view_adminRoles_assignedGroups = PÅiÅazené skupiny zdrojů view_adminRoles_assignedSubjects = PÅiÅazené subjekty view_adminRoles_failCreateRoleWithExistingName = NepodaÅilo se vytvoÅit roli s existujÃcÃm jménem [{0}]. PouÅŸÃjte prosÃm jiné jméno. view_adminRoles_failLdap = NepodaÅilo se urÄit je-li LDAP nastaven - pÅedpokládám, ÅŸe LDAP nenà pÅÃtomen. +##view_adminRoles_failLdapAvailableGroups = NepodaÅilo se zÃskat informace o LDAP skupinách - pÅedpokládám, ÅŸe LDAP nenà pÅÃtomen. view_adminRoles_failLdapGroups = NepodaÅilo se zÃskat informace o LDAP skupinách - pÅedpokládám, ÅŸe LDAP nenà pÅÃtomen. view_adminRoles_failLdapGroupsRole = NepodaÅilo se zÃskat informace o LDAP skupinách pro roli. +##view_adminRoles_failLdapGroupsSettings = NepodaÅilo se zÃskat informace o LDAP skupinách - pÅedpokládám, ÅŸe LDAP nenà pÅÃtomen. view_adminRoles_failRoles = NepodaÅilo se zÃskat role. view_adminRoles_globalPerms = Globalnà povolenà view_adminRoles_ldapGroups = LDAP skupiny view_adminRoles_ldapGroupsReadOnly = data LDAP skupiny jsou jen pro Ätenà +##view_adminRoles_ldapGroupsSettingsReadOnly = data LDAP skupiny jsou jen pro Ätenà +##view_adminRoles_ldapQueryPageSize = OprávnÄnà zdroje +##view_adminRoles_ldapTooManyResults = data LDAP skupiny jsou jen pro Ätenà +##view_adminRoles_ldapTookLongResults = data LDAP skupiny jsou jen pro Ätenà +##view_adminRoles_ldapTookManyPagesResults = data LDAP skupiny jsou jen pro Ätenà +##view_adminRoles_ldapWarnParsingManyPagesResults = data LDAP skupiny jsou jen pro Ätenà +##view_adminRoles_ldapWarnQueryTakingLongResults = data LDAP skupiny jsou jen pro Ätenà +##view_adminRoles_ldapWarnTooManyResults = data LDAP skupiny jsou jen pro Ätenà view_adminRoles_noLdap = Integrace LDAP nenà nakonfigurována. K nastavenà ÅÃzenà bezpeÄnosti pÅes LDAP prosÃm navÅ¡tivte <a {0}>{1}</a>. view_adminRoles_permissions_autoselecting_configureRead_implied = Automaticky odoznaÄeno CONFIGURE_WRITE povolenÃ, protoÅŸe absence CONFIGURE_READ to implikuje... view_adminRoles_permissions_autoselecting_configureWrite_implied = Automaticky oznaÄeno CONFIGURE_READ povolenÃ, protoÅŸe CONFIGURE_WRITE jej implikuje... diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties index d4f053b..c50c577 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties @@ -139,11 +139,13 @@ common_msg_noItemsToShow = Keine EintrÀge vorhanden common_msg_notYetImplemented = Noch nicht implementiert common_msg_see_more = mehr ... common_msg_step_x_of_y = Schritt {0} von {1} +##common_msg_typeToFilterResults = Noch nicht implementiert common_severity_debug = Debug common_severity_error = Fehler common_severity_fatal = Fatal common_severity_info = Information common_severity_warn = Warnung +##common_status_attention = Erfolg common_status_canceled = Abgebrochen common_status_deferred = Verzögert common_status_failed = Fehlgeschlagen @@ -199,6 +201,7 @@ common_title_group = Gruppe common_title_group_def_total = Anzahl Gruppen-Definitionen common_title_group_member_health = Zustand der Gruppenmitglieder common_title_groups = Gruppen +##common_title_groupsFound = Gruppen common_title_help = Hilfe common_title_host = Rechner common_title_id = ID @@ -211,6 +214,7 @@ common_title_kind = Art common_title_lastUpdated = Zuletzt aktualisiert common_title_lastUpdatedBy = Zuletzt aktualisiert durch common_title_ldapGroups = LDAP-Gruppen +##common_title_ldapGroupsAvailable = LDAP-Gruppen ##common_title_mashup = Mashup common_title_members_reporting = Anzahl berichtender Mitglieder common_title_message = Nachricht @@ -235,6 +239,8 @@ common_title_platform_total = Anzahl Platformen common_title_plugin = Plugin common_title_port = Port common_title_providers = Anbieter +##common_title_queryPagesParsed = LDAP-Gruppen +##common_title_queryProgress = LDAP-Gruppen common_title_recent_alerts = KÃŒrzlich ausgelöste Alarme common_title_recent_bundle_deployments = KÃŒrzliche Bundle-Deployments common_title_recent_configuration_updates = Frische Konfigurationsaktualisierungen @@ -274,6 +280,7 @@ common_title_sort_order_tooltip = Legt die Sortierreihenfolge fÃŒr Ergebnisse fe common_title_start = Start common_title_status = Status common_title_stop = Stop +##common_title_suggest = Zusammenfassung common_title_summary = Zusammenfassung common_title_tag_cloud = Tag-Wolke common_title_the = Die @@ -501,12 +508,22 @@ view_adminRoles_assignedGroups = Zugewiesene Ressourcen-Gruppen view_adminRoles_assignedSubjects = Zugewisene Benutzer view_adminRoles_failCreateRoleWithExistingName = Konnte die Rolle mit dem existierenden Namen [{0}] nicht anlegen. Bitte wÀhlen Sie einen anderen Namen. view_adminRoles_failLdap = Konnte nicht ermitteln, ob LDAP konfiguriert ist - gehe von keiner LDAP-Konfiguration aus. +##view_adminRoles_failLdapAvailableGroups = Konnte de LDAP-Gruppen nicht laden. Annahme ist, dass es keine gibt. view_adminRoles_failLdapGroups = Konnte de LDAP-Gruppen nicht laden. Annahme ist, dass es keine gibt. view_adminRoles_failLdapGroupsRole = Konnte die LDAP-Gruppen fÃŒr die Rolle nicht laden. +##view_adminRoles_failLdapGroupsSettings = Konnte de LDAP-Gruppen nicht laden. Annahme ist, dass es keine gibt. view_adminRoles_failRoles = Konnte die Rollen nicht laden. view_adminRoles_globalPerms = Applikationsweite Rechte view_adminRoles_ldapGroups = LDAP-Gruppen view_adminRoles_ldapGroupsReadOnly = LDAP Gruppendaten können nur gelesen werden +##view_adminRoles_ldapGroupsSettingsReadOnly = LDAP Gruppendaten können nur gelesen werden +##view_adminRoles_ldapQueryPageSize = Rechte auf Ressourcen +##view_adminRoles_ldapTooManyResults = LDAP Gruppendaten können nur gelesen werden +##view_adminRoles_ldapTookLongResults = LDAP Gruppendaten können nur gelesen werden +##view_adminRoles_ldapTookManyPagesResults = LDAP Gruppendaten können nur gelesen werden +##view_adminRoles_ldapWarnParsingManyPagesResults = LDAP Gruppendaten können nur gelesen werden +##view_adminRoles_ldapWarnQueryTakingLongResults = LDAP Gruppendaten können nur gelesen werden +##view_adminRoles_ldapWarnTooManyResults = LDAP Gruppendaten können nur gelesen werden view_adminRoles_noLdap = Die LDAP-Integration ist nicht konfiguriert. Um LDAP zu konfigurieren, wechseln sie zu <a {0}>{1}</a>. ##view_adminRoles_permissions_autoselecting_configureRead_implied = Autodeselected CONFIGURE_WRITE permission, since lack of CONFIGURE_READ implies lack of it... ##view_adminRoles_permissions_autoselecting_configureWrite_implied = Autoselected CONFIGURE_READ permission, since CONFIGURE_WRITE implies it... diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties index c7c1f0a..54fef87 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties @@ -138,11 +138,13 @@ common_msg_noItemsToShow = 衚瀺ããé ç®ã¯ãããŸãã common_msg_notYetImplemented = å®è£ ãããŠããŸãã common_msg_see_more = 詳现... common_msg_step_x_of_y = ã¹ããã {1} äž {0} çªã +##common_msg_typeToFilterResults = å®è£ ãããŠããŸãã common_severity_debug = ãããã° common_severity_error = ãšã©ãŒ common_severity_fatal = èŽåœç common_severity_info = æ å ± common_severity_warn = èŠå +##common_status_attention = æå common_status_canceled = ãã£ã³ã»ã« common_status_deferred = 延æ common_status_failed = 倱æ @@ -199,6 +201,7 @@ common_title_group = ã°ã«ãŒã common_title_group_def_total = ã°ã«ãŒãå®çŸ©ã®åèš common_title_group_member_health = ã°ã«ãŒãã¡ã³ããŒã®ç¶æ common_title_groups = ã°ã«ãŒã +##common_title_groupsFound = ã°ã«ãŒã common_title_help = ãã«ã common_title_host = ãã¹ã common_title_id = ID @@ -212,6 +215,7 @@ common_title_kind = çš®é¡ common_title_lastUpdated = æçµæŽæ° common_title_lastUpdatedBy = æçµæŽæ°ïŒã«ããïŒ common_title_ldapGroups = LDAPã°ã«ãŒã +##common_title_ldapGroupsAvailable = LDAPã°ã«ãŒã common_title_mashup = ããã·ã¥ã¢ãã common_title_members_reporting = ã¡ã³ããŒã®ãªããŒã common_title_message = ã¡ãã»ãŒãž @@ -236,6 +240,8 @@ common_title_platform_total = ãã©ãããã©ãŒã ã®åèš common_title_plugin = ãã©ã°ã€ã³ common_title_port = ããŒã common_title_providers = ãããã€ã +##common_title_queryPagesParsed = LDAPã°ã«ãŒã +##common_title_queryProgress = LDAPã°ã«ãŒã common_title_recent_alerts = æè¿ã®ã¢ã©ãŒã common_title_recent_bundle_deployments = æè¿ã®ãã³ãã«ã®ããã〠common_title_recent_configuration_updates = æè¿ã®æ§æã®æŽæ° @@ -275,6 +281,7 @@ common_title_sort_order_tooltip = çµæã®ãœãŒãé ãèšå® common_title_start = éå§ common_title_status = ã¹ããŒã¿ã¹ common_title_stop = åæ¢ +##common_title_suggest = ãµã㪠common_title_summary = ãµã㪠common_title_tag_cloud = ã¿ã°ã¯ã©ãŠã common_title_timestamp = æ¥ä»/æé @@ -500,12 +507,22 @@ view_adminRoles_assignedGroups = å²ãåœãŠããããªãœãŒã¹ã°ã«ãŒã view_adminRoles_assignedSubjects = å²ãåœãŠãããSubjects ##view_adminRoles_failCreateRoleWithExistingName = Failed to create role with existing name [{0}]. Please use another name. view_adminRoles_failLdap = LDAPãæ§æããããã©ããã®æ±ºå®ã«å€±æããŸãã - LDAPãç¡ãããšãä»®å®ããŸã +##view_adminRoles_failLdapAvailableGroups = å©çšå¯èœãªLDAPã°ã«ãŒããèªã¿åºãã®ã«å€±æããŸãã - LDAPã°ã«ãŒããç¡ãããšãä»®å®ããŸã view_adminRoles_failLdapGroups = å©çšå¯èœãªLDAPã°ã«ãŒããèªã¿åºãã®ã«å€±æããŸãã - LDAPã°ã«ãŒããç¡ãããšãä»®å®ããŸã view_adminRoles_failLdapGroupsRole = ããŒã«ã®ããã«å©çšå¯èœãªLDAPã°ã«ãŒããããŒãããã®ã«å€±æããŸãã +##view_adminRoles_failLdapGroupsSettings = å©çšå¯èœãªLDAPã°ã«ãŒããèªã¿åºãã®ã«å€±æããŸãã - LDAPã°ã«ãŒããç¡ãããšãä»®å®ããŸã view_adminRoles_failRoles = ããŒã«ã®åãåºãã«å€±æããŸãã view_adminRoles_globalPerms = ã°ããŒãã«æš©é view_adminRoles_ldapGroups = LDAPã°ã«ãŒã view_adminRoles_ldapGroupsReadOnly = LDAPã°ã«ãŒãããŒã¿ã¯èªã¿åºãå°çšã§ã +##view_adminRoles_ldapGroupsSettingsReadOnly = LDAPã°ã«ãŒãããŒã¿ã¯èªã¿åºãå°çšã§ã +##view_adminRoles_ldapQueryPageSize = ãªãœãŒã¹æš©é +##view_adminRoles_ldapTooManyResults = LDAPã°ã«ãŒãããŒã¿ã¯èªã¿åºãå°çšã§ã +##view_adminRoles_ldapTookLongResults = LDAPã°ã«ãŒãããŒã¿ã¯èªã¿åºãå°çšã§ã +##view_adminRoles_ldapTookManyPagesResults = LDAPã°ã«ãŒãããŒã¿ã¯èªã¿åºãå°çšã§ã +##view_adminRoles_ldapWarnParsingManyPagesResults = LDAPã°ã«ãŒãããŒã¿ã¯èªã¿åºãå°çšã§ã +##view_adminRoles_ldapWarnQueryTakingLongResults = LDAPã°ã«ãŒãããŒã¿ã¯èªã¿åºãå°çšã§ã +##view_adminRoles_ldapWarnTooManyResults = LDAPã°ã«ãŒãããŒã¿ã¯èªã¿åºãå°çšã§ã view_adminRoles_noLdap = LDAPã»ãã¥ãªãã£ã®çµ±åã¯æ§æãããŠããŸãããLDAPãæ§æããã«ã¯ã <a {0}>{1}</a>ã«è¡ã£ãŠãã ããã view_adminRoles_permissions_autoselecting_configureRead_implied = CONFIGURE_WRITEæš©éãèªåçã«éžæãããŸããããªããªããCONFIGURE_READãç¡ãããšããããæ瀺ããŠããããã§ãã view_adminRoles_permissions_autoselecting_configureWrite_implied = CONFIGURE_READæš©éãèªåçã«éžæãããŸããããªããªããCONFIGURE_WRITEããããæ瀺ããŠããããã§ãã diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties index 0213540..4deaf49 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties @@ -132,10 +132,12 @@ common_msg_noItemsToShow = íìí íëª©ìŽ ììµëë€. common_msg_notYetImplemented = 구íëì§ ìììµëë€. common_msg_see_more = ììží볎Ʞ... common_msg_step_x_of_y = ëšê³ {0} ì€ {1} ë²ì§ž +##common_msg_typeToFilterResults = 구íëì§ ìììµëë€. common_severity_debug = ëë²ê·ž common_severity_error = ìë¬ common_severity_info = ì 볎 common_severity_warn = ê²œê³ +##common_status_attention = ì±ê³µ common_status_canceled = ì·šì common_status_deferred = ì°êž° common_status_failed = ì€íš @@ -187,6 +189,7 @@ common_title_group = 귞룹 common_title_group_def_total = ì 첎 귞룹 ì ì common_title_group_member_health = ê·žë£¹ë©€ë² ìí common_title_groups = 귞룹 +##common_title_groupsFound = 귞룹 common_title_help = ëìë§ common_title_host = ížì€íž common_title_id = ID @@ -197,6 +200,7 @@ common_title_inventory = ìžë²€í 늬 common_title_inventoryStatus = ìžë²€í 늬 ìí common_title_inventorySummary = ìžë²€í 늬 ììœ common_title_ldapGroups = LDAP 귞룹 +##common_title_ldapGroupsAvailable = LDAP 귞룹 common_title_members_reporting = ë©€ë² ëŠ¬í¬íž common_title_message = ë©ìì§ common_title_metric = ìž¡ì í목 @@ -219,6 +223,8 @@ common_title_platform_total = ì 첎 íë«íŒ common_title_plugin = íë¬ê·žìž common_title_port = í¬íž common_title_providers = ê³µêžì +##common_title_queryPagesParsed = LDAP 귞룹 +##common_title_queryProgress = LDAP 귞룹 common_title_recent_alerts = ìµê·Œ ì늌 common_title_recent_bundle_deployments = ìµê·Œ ë²ë€ ë°°í¬ common_title_recent_drifts = ìµê·Œ ë늬ííž @@ -254,6 +260,7 @@ common_title_sort_order_tooltip = 결곌ì ì ë ¬ ìì륌 ì€ì common_title_start = ìì common_title_status = ìí common_title_stop = ì ì§ +##common_title_suggest = ììœ common_title_summary = ììœ common_title_tag_cloud = íê·ž íŽëŒì°ë common_title_timestamp = ë ì§/ìê° @@ -451,12 +458,15 @@ view_adminRoles_assignedGroups = í ë¹ë ìì 귞룹 view_adminRoles_assignedSubjects = í ë¹ë Subjects view_adminRoles_failCreateRoleWithExistingName = êž°ì¡Ž ìŽëŠ [{0}]곌 ìí ì ë§ë€ì§ 못íìµëë€. ë€ë¥ž ìŽëŠì ì¬ì©íììì€. view_adminRoles_failLdap = LDAPìŽ ì€ì ëìëì§ ì¬ë¶ì ê²°ì ì ì€íšíìµëë€ - LDAPìŽ ìë ê²ì ê°ì í©ëë€. +##view_adminRoles_failLdapAvailableGroups = ì¬ì©ê°ë¥í LDAP 귞룹ì ìœëë° ì€íšíìµëë€ - LDAP ê·žë£¹ìŽ ìë ê²ì ê°ì í©ëë€. view_adminRoles_failLdapGroups = ì¬ì©ê°ë¥í LDAP 귞룹ì ìœëë° ì€íšíìµëë€ - LDAP ê·žë£¹ìŽ ìë ê²ì ê°ì í©ëë€. view_adminRoles_failLdapGroupsRole = ìí ì ìíŽ ì¬ì©ê°ë¥í LDAP 귞룹ì ë¡ëíëë° ì€íšíìµëë€. +##view_adminRoles_failLdapGroupsSettings = ì¬ì©ê°ë¥í LDAP 귞룹ì ìœëë° ì€íšíìµëë€ - LDAP ê·žë£¹ìŽ ìë ê²ì ê°ì í©ëë€. view_adminRoles_failRoles = ìí ì ê±°ì ì€íšíìµëë€. view_adminRoles_globalPerms = êžë¡ë² ê¶í view_adminRoles_ldapGroups = LDAP 귞룹 ##view_adminRoles_ldapGroupsReadOnly = LDAP group data is read only +##view_adminRoles_ldapQueryPageSize = 늬ìì€ ê¶í view_adminRoles_noLdap = LDAP 볎ì íµí©ìŽ ì€ì ëì§ ìììµëë€. LDAPì 구ì±íë €ë©Ž <a {0}>{1}</a>ë¡ ê°ììì. ##view_adminRoles_permissions_autoselecting_configureRead_implied = Autodeselected CONFIGURE_WRITE permission, since lack of CONFIGURE_READ implies lack of it... ##view_adminRoles_permissions_autoselecting_configureWrite_implied = Autoselected CONFIGURE_READ permission, since CONFIGURE_WRITE implies it... diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties index 952b01f..35d2aec 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties @@ -135,11 +135,13 @@ common_msg_noItemsToShow = N\u00E3o existem itens para apresentar common_msg_notYetImplemented = N\u00E3o implementado ainda common_msg_see_more = Ver detalhes... common_msg_step_x_of_y = Passo {0} de {1} +##common_msg_typeToFilterResults = N\u00E3o implementado ainda ##common_severity_debug = Debug common_severity_error = Erro ##common_severity_fatal = Fatal ##common_severity_info = Info ##common_severity_warn = Warn +##common_status_attention = Sucesso common_status_canceled = Cancelado common_status_deferred = Deferido common_status_failed = Falha @@ -191,11 +193,13 @@ common_title_edit_mode = Modo Edi\u00E7\u00E3o common_title_enabled = Habilitado? common_title_end = Fim common_title_favorites = Favoritos +##common_title_filterResultsBelow = Favoritos common_title_generalProp = Propriedades Gerais common_title_group = Grupo common_title_group_def_total = Total de Defini\u00E7\u00E3o de Grupos common_title_group_member_health = Sa\u00FAde do Membro do Grupo common_title_groups = Grupos +##common_title_groupsFound = Grupos common_title_help = Ajuda common_title_host = Host common_title_icon = \u00EDcone @@ -209,6 +213,7 @@ common_title_inventorySummary = Sum\u00E1rio do Invent\u00E1rio common_title_lastUpdated = \u00DAltima Atualiza\u00E7\u00E3o common_title_lastUpdatedBy = \u00DAltima Atualiza\u00E7\u00E3o feita por common_title_ldapGroups = Grupos LDAP +##common_title_ldapGroupsAvailable = Grupos LDAP common_title_mashup = Mashup common_title_members_reporting = Relat\u00F3rio de Membros common_title_message = Mensagem @@ -233,6 +238,8 @@ common_title_platform_total = Total de Plataformas ##common_title_plugin = Plugin common_title_port = Porta common_title_providers = Provedores +##common_title_queryPagesParsed = Grupos LDAP +##common_title_queryProgress = Grupos LDAP common_title_recent_alerts = Alertas Recentes common_title_recent_bundle_deployments = Deployments de Bundle recente common_title_recent_configuration_updates = Configura\u00E7\u00F5es Alteradas Recentemente @@ -272,6 +279,7 @@ common_title_sort_order_tooltip = Define a ordem dos resultados. common_title_start = Iniciar common_title_status = Status common_title_stop = Parar +##common_title_suggest = Sum\u00E1rio common_title_summary = Sum\u00E1rio common_title_tag_cloud = Nuvem de Tags common_title_the = O @@ -505,12 +513,22 @@ view_adminRoles_assignedGroups = Grupos de Recursos Associados view_adminRoles_assignedSubjects = Perfis Associados ##view_adminRoles_failCreateRoleWithExistingName = Failed to create role with existing name [{0}]. Please Use another name. view_adminRoles_failLdap = Falha ao determinar se o LDAP foi configurado - assumindo como LDAP n\u00E3o configurado. +##view_adminRoles_failLdapAvailableGroups = Falha ao recuperar os grupos dispon\u00EDveis no LDAP - assumindo como LDAP sem grupos. view_adminRoles_failLdapGroups = Falha ao recuperar os grupos dispon\u00EDveis no LDAP - assumindo como LDAP sem grupos. view_adminRoles_failLdapGroupsRole = Falha ao carregar grupos do LDAP dispon\u00EDveis para o perfil. +##view_adminRoles_failLdapGroupsSettings = Falha ao recuperar os grupos dispon\u00EDveis no LDAP - assumindo como LDAP sem grupos. view_adminRoles_failRoles = Falha ao recuperar perfis. view_adminRoles_globalPerms = Permiss\u00F5es Globais view_adminRoles_ldapGroups = Grupos LDAP view_adminRoles_ldapGroupsReadOnly = Informa\u00E7\u00F5es do grupo LDAP com permiss\u00F5es apenas de leitura +##view_adminRoles_ldapGroupsSettingsReadOnly = Informa\u00E7\u00F5es do grupo LDAP com permiss\u00F5es apenas de leitura +##view_adminRoles_ldapQueryPageSize = Permiss\u00F5es do Recurso +##view_adminRoles_ldapTooManyResults = Informa\u00E7\u00F5es do grupo LDAP com permiss\u00F5es apenas de leitura +##view_adminRoles_ldapTookLongResults = Informa\u00E7\u00F5es do grupo LDAP com permiss\u00F5es apenas de leitura +##view_adminRoles_ldapTookManyPagesResults = Informa\u00E7\u00F5es do grupo LDAP com permiss\u00F5es apenas de leitura +##view_adminRoles_ldapWarnParsingManyPagesResults = Informa\u00E7\u00F5es do grupo LDAP com permiss\u00F5es apenas de leitura +##view_adminRoles_ldapWarnQueryTakingLongResults = Informa\u00E7\u00F5es do grupo LDAP com permiss\u00F5es apenas de leitura +##view_adminRoles_ldapWarnTooManyResults = Informa\u00E7\u00F5es do grupo LDAP com permiss\u00F5es apenas de leitura view_adminRoles_noLdap = A integra\u00E7\u00E3o com o LDAP ainda n\u00E3o foi configurada. Para configurar o LDAP acesse <a {0}>{1}</a>. view_adminRoles_permissions_autoselecting_configureRead_implied = Permiss\u00E3o CONFIGURE_WRITE desmarcada automaticamente devida a aus\u00EAncia da permiss\u00E3o CONFIGURE_READ... view_adminRoles_permissions_autoselecting_configureWrite_implied = Permiss\u00E3o CONFIGURE_READ marcada automaticamente devida a marca\u00E7\u00E3o de CONFIGURE_WRITE... diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties index 4fbef12..3035f91 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties @@ -2252,11 +2252,13 @@ common_msg_noItemsToShow = ÐÑÑÑÑÑÑвÑÑÑ ÑлеЌеМÑÑ ÐŽÐ»Ñ ÐŸÑП common_msg_notYetImplemented = ÐÑе Ме ÑеалОзПваМП common_msg_see_more = пПЎÑПбМее... common_msg_step_x_of_y = Каг {0} Оз {1} +##common_msg_typeToFilterResults = ÐÑе Ме ÑеалОзПваМП common_severity_debug = ÐÑлаЎка common_severity_error = ÐÑОбка common_severity_fatal = ЀаÑалÑÐœÑй common_severity_info = ÐÐœÑПÑЌаÑÐžÑ common_severity_warn = ÐÑеЎÑпÑежЎеМОе +##common_status_attention = УÑпеÑÐœÑй common_status_canceled = ÐÑЌеМÑÐœ common_status_deferred = ÐÑлПжеММÑй common_status_failed = ÐезÑÑпеÑМП @@ -2312,6 +2314,7 @@ common_title_group = ÐÑÑппа common_title_group_def_total = ÐпÑеЎелеМОе кПлОÑеÑÑва гÑÑпп common_title_group_member_health = СПÑÑПÑМОе ÑлеМа гÑÑÐ¿Ð¿Ñ common_title_groups = ÐÑÑÐ¿Ð¿Ñ +##common_title_groupsFound = ÐÑÑÐ¿Ð¿Ñ common_title_help = ÐПЌПÑÑ common_title_host = Host common_title_id = ID @@ -2323,6 +2326,7 @@ common_title_inventorySummary = ÐÑПг ОМвеМÑаÑОзаÑОО common_title_lastUpdated = ÐПÑлеЎМее ПбМПвлеМОе common_title_lastUpdatedBy = ÐПÑлеЎМее ПбМПвлеМОе ÐŸÑ common_title_ldapGroups = LDAP-гÑÑÐ¿Ð¿Ñ +##common_title_ldapGroupsAvailable = LDAP-гÑÑÐ¿Ð¿Ñ common_title_mashup = Mashup common_title_members_reporting = ÐПлОÑеÑÑвП пПлÑзПваÑелей ПÑÑеÑМПÑÑО common_title_message = СППбÑеМОе @@ -2346,6 +2350,8 @@ common_title_platform_total = ÐПлОÑеÑÑвП плаÑÑПÑÐŒ common_title_plugin = ÐлагОМ common_title_port = ÐПÑÑ common_title_providers = ÐÑПвайЎеÑÑ +##common_title_queryPagesParsed = LDAP-гÑÑÐ¿Ð¿Ñ +##common_title_queryProgress = LDAP-гÑÑÐ¿Ð¿Ñ common_title_recent_alerts = ÐПÑлеЎМОе пÑеЎÑпÑÐµÐ¶ÐŽÐµÐœÐžÑ common_title_recent_bundle_deployments = ÐПÑлеЎМОе ÑазвеÑÑÑÐ²Ð°ÐœÐžÑ Ð¿Ð°ÐºÐµÑа common_title_recent_configuration_updates = ÐПÑлеЎМОе ÐŸÐ±ÐœÐŸÐ²Ð»ÐµÐœÐžÑ ÐºÐŸÐœÑОгÑÑаÑОО @@ -2382,6 +2388,7 @@ common_title_sort_order_tooltip = УÑÑаМПвОÑÑ Ð¿ÐŸÑÑЎПк ÑПÑÑО common_title_start = СÑаÑÑ common_title_status = СÑаÑÑÑ common_title_stop = СÑПп +##common_title_suggest = СвПЎка common_title_summary = СвПЎка common_title_tag_cloud = ÐблакП ÑегПв common_title_timestamp = ÐаÑа/ÐÑеЌÑcommon_title_total=ÐÑПг @@ -2587,13 +2594,23 @@ view_adminRoles_assignedGroups = ÐазМаÑеММÑе гÑÑÐ¿Ð¿Ñ ÑеÑÑÑ view_adminRoles_assignedSubjects = ÐазМаÑеММÑе ÑÑбÑекÑÑ ##view_adminRoles_failCreateRoleWithExistingName = Failed to create role with existing name [{0}]. Please use another name. view_adminRoles_failLdap = Ðе ÑЎалПÑÑ ÐŸÐ¿ÑеЎелОÑÑ, МаÑÑÑПеМ лО LDAP - пÑеЎпПлПжОÑелÑМП LDAP ПÑÑÑÑÑÑвÑеÑ. +##view_adminRoles_failLdapAvailableGroups = Ðе ÑЎалПÑÑ Ð¿ÐŸÐ»ÑÑОÑÑ LDAP-гÑÑппÑ. ÐÑеЎпПлПжОÑелÑМП LDAP гÑÑÐ¿Ð¿Ñ ÐŸÑÑÑÑÑÑвÑÑÑ view_adminRoles_failLdapGroups = Ðе ÑЎалПÑÑ Ð¿ÐŸÐ»ÑÑОÑÑ LDAP-гÑÑппÑ. ÐÑеЎпПлПжОÑелÑМП LDAP гÑÑÐ¿Ð¿Ñ ÐŸÑÑÑÑÑÑвÑÑÑ view_adminRoles_failLdapGroupsRole = Ðе ÑЎалПÑÑ Ð·Ð°Ð³ÑÑзОÑÑ LDAP гÑÑппÑ, ЎПÑÑÑпМÑе ÐŽÐ»Ñ ÑПлО. +##view_adminRoles_failLdapGroupsSettings = Ðе ÑЎалПÑÑ Ð¿ÐŸÐ»ÑÑОÑÑ LDAP-гÑÑппÑ. ÐÑеЎпПлПжОÑелÑМП LDAP гÑÑÐ¿Ð¿Ñ ÐŸÑÑÑÑÑÑвÑÑÑ view_adminRoles_failRoles = Ðе ÑЎалПÑÑ Ð¿ÐŸÐ»ÑÑОÑÑ ÑПлО. view_adminRoles_globalPerms = ÐлПбалÑÐœÑе пПлМПЌПÑÐžÑ view_adminRoles_ldapGroups = LDAP-гÑÑÐ¿Ð¿Ñ view_adminRoles_ldapGroupsReadOnly = ЎаММÑе LDAP гÑÑпп ЎПÑÑÑÐ¿ÐœÑ ÑПлÑкП ÐŽÐ»Ñ ÑÑÐµÐœÐžÑ ##view_adminRoles_noLdap = The LDAP security integration is not configured. To configure LDAP, go to <a {0}>{1}</a>. +##view_adminRoles_ldapGroupsSettingsReadOnly = ЎаММÑе LDAP гÑÑпп ЎПÑÑÑÐ¿ÐœÑ ÑПлÑкП ÐŽÐ»Ñ ÑÑÐµÐœÐžÑ +##view_adminRoles_ldapQueryPageSize = ÐПлМПЌПÑÐžÑ ÑеÑÑÑÑа +##view_adminRoles_ldapTooManyResults = ЎаММÑе LDAP гÑÑпп ЎПÑÑÑÐ¿ÐœÑ ÑПлÑкП ÐŽÐ»Ñ ÑÑÐµÐœÐžÑ +##view_adminRoles_ldapTookLongResults = ЎаММÑе LDAP гÑÑпп ЎПÑÑÑÐ¿ÐœÑ ÑПлÑкП ÐŽÐ»Ñ ÑÑÐµÐœÐžÑ +##view_adminRoles_ldapTookManyPagesResults = ЎаММÑе LDAP гÑÑпп ЎПÑÑÑÐ¿ÐœÑ ÑПлÑкП ÐŽÐ»Ñ ÑÑÐµÐœÐžÑ +##view_adminRoles_ldapWarnParsingManyPagesResults = ЎаММÑе LDAP гÑÑпп ЎПÑÑÑÐ¿ÐœÑ ÑПлÑкП ÐŽÐ»Ñ ÑÑÐµÐœÐžÑ +##view_adminRoles_ldapWarnQueryTakingLongResults = ЎаММÑе LDAP гÑÑпп ЎПÑÑÑÐ¿ÐœÑ ÑПлÑкП ÐŽÐ»Ñ ÑÑÐµÐœÐžÑ +##view_adminRoles_ldapWarnTooManyResults = ЎаММÑе LDAP гÑÑпп ЎПÑÑÑÐ¿ÐœÑ ÑПлÑкП ÐŽÐ»Ñ ÑÑÐµÐœÐžÑ view_adminRoles_permissions_autoselecting_configureRead_implied = ÐвÑПЌаÑОÑеÑкО ПÑклÑÑеМП CONFIGURE_WRITE пПлМПЌПÑОе, пПÑкПлÑÐºÑ ÐŸÑÑÑÑÑÑвÑÐµÑ CONFIGURE_READ... view_adminRoles_permissions_autoselecting_configureWrite_implied = ÐвÑПЌаÑОÑеÑкО вÑбÑаМП CONFIGURE_READ пПлМПЌПÑОе, пПÑкПлÑÐºÑ CONFIGURE_WRITE пПЎÑазÑЌеваеÑ, ÑÑП ... ##view_adminRoles_permissions_autoselecting_manageBundleGroups_implied = Autoselected View Bundles, which is granted by Manage Bundle Groups... diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties index 24ea681..615d3a0 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties @@ -135,11 +135,13 @@ common_msg_noItemsToShow = \u6ca1\u6709\u6761\u76ee\u663e\u793a common_msg_notYetImplemented = \u5c1a\u672a\u5b9e\u73b0 common_msg_see_more = \u67e5\u770b\u66f4\u591a... common_msg_step_x_of_y = Step {0} of {1} +##common_msg_typeToFilterResults = \u5c1a\u672a\u5b9e\u73b0 common_severity_debug = \u8c03\u8bd5 common_severity_error = Error common_severity_fatal = \u4e25\u91cd common_severity_info = \u4fe1\u606f common_severity_warn = \u8b66\u544a +##common_status_attention = \u6210\u529f common_status_canceled = \u53d6\u6d88 common_status_deferred = \u5ef6\u671f common_status_failed = \u5931\u8d25 @@ -195,6 +197,7 @@ common_title_group = \u7ec4 common_title_group_def_total = \u5b9a\u4e49\u7ec4\u603b\u6570 common_title_group_member_health = \u7ec4\u5458\u5065\u5eb7\u5ea6 common_title_groups = \u7ec4 +##common_title_groupsFound = \u7ec4 common_title_help = \u5e2e\u52a9 common_title_host = \u4e3b\u673a common_title_id = ID @@ -206,6 +209,7 @@ common_title_inventorySummary = \u6e05\u5355\u6c47\u603b common_title_lastUpdated = \u6700\u8fd1\u66f4\u65b0 common_title_lastUpdatedBy = \u6700\u8fd1\u66f4\u65b0\u8005 common_title_ldapGroups = LDAP\u7ec4 +##common_title_ldapGroupsAvailable = LDAP\u7ec4 common_title_mashup = Mashup common_title_members_reporting = \u7ec4\u62a5\u8868 common_title_message = \u6d88\u606f @@ -230,6 +234,8 @@ common_title_platform_total = \u6240\u6709\u5e73\u53f0 common_title_plugin = \u63d2\u4ef6 common_title_port = \u7aef\u53e3 common_title_providers = \u63d0\u4f9b\u8005 +##common_title_queryPagesParsed = LDAP\u7ec4 +##common_title_queryProgress = LDAP\u7ec4 common_title_recent_alerts = \u6700\u8fd1\u544a\u8b66 common_title_recent_bundle_deployments = \u6700\u8fd1\u53d1\u5e03Bundles common_title_recent_configuration_updates = \u6700\u8fd1\u914d\u7f6e\u66f4\u65b0 @@ -269,6 +275,7 @@ common_title_sort_order_tooltip = \u8bbe\u7f6e\u6392\u5e8f\u65b9\u5f0f. common_title_start = \u5f00\u59cb common_title_status = \u72b6\u6001 common_title_stop = \u505c\u6b62 +##common_title_suggest = \u7edf\u8ba1 common_title_summary = \u7edf\u8ba1 common_title_tag_cloud = \u6807\u7b7e\u4e91 common_title_the = The @@ -494,12 +501,22 @@ view_adminRoles_assignedGroups = \u5206\u914d\u7684\u8d44\u6e90\u7ec4 view_adminRoles_assignedSubjects = \u5206\u914d\u7684\u4e3b\u9898 ##view_adminRoles_failCreateRoleWithExistingName = Failed to create role with existing name [{0}]. Please Use another name. view_adminRoles_failLdap = \u65e0\u6cd5\u786e\u5b9aLDAP - \u5047\u5b9a\u6ca1\u6709LDAP. +##view_adminRoles_failLdapAvailableGroups = \u67e5\u8be2\u6709\u6548LDAP\u7ec4\u5931\u8d25 - \u5047\u5b9a\u65e0DAP\u7ec4. view_adminRoles_failLdapGroups = \u67e5\u8be2\u6709\u6548LDAP\u7ec4\u5931\u8d25 - \u5047\u5b9a\u65e0DAP\u7ec4. view_adminRoles_failLdapGroupsRole = \u52a0\u8f7dLDAP\u7ec4\u89d2\u8272\u5931\u8d25. +##view_adminRoles_failLdapGroupsSettings = \u67e5\u8be2\u6709\u6548LDAP\u7ec4\u5931\u8d25 - \u5047\u5b9a\u65e0DAP\u7ec4. view_adminRoles_failRoles = \u53d6\u5f97\u89d2\u8272\u5931\u8d25. view_adminRoles_globalPerms = \u5168\u5c40\u6388\u6743 view_adminRoles_ldapGroups = LDAP\u7ec4 view_adminRoles_ldapGroupsReadOnly = LDAP\u7ec4\u6570\u636e\u4e3a\u53ea\u8bfb +##view_adminRoles_ldapGroupsSettingsReadOnly = LDAP\u7ec4\u6570\u636e\u4e3a\u53ea\u8bfb +##view_adminRoles_ldapQueryPageSize = \u8d44\u6e90\u6388\u6743 +##view_adminRoles_ldapTooManyResults = LDAP\u7ec4\u6570\u636e\u4e3a\u53ea\u8bfb +##view_adminRoles_ldapTookLongResults = LDAP\u7ec4\u6570\u636e\u4e3a\u53ea\u8bfb +##view_adminRoles_ldapTookManyPagesResults = LDAP\u7ec4\u6570\u636e\u4e3a\u53ea\u8bfb +##view_adminRoles_ldapWarnParsingManyPagesResults = LDAP\u7ec4\u6570\u636e\u4e3a\u53ea\u8bfb +##view_adminRoles_ldapWarnQueryTakingLongResults = LDAP\u7ec4\u6570\u636e\u4e3a\u53ea\u8bfb +##view_adminRoles_ldapWarnTooManyResults = LDAP\u7ec4\u6570\u636e\u4e3a\u53ea\u8bfb view_adminRoles_noLdap = \u6ca1\u6709\u96c6\u6210LDAP\u5b89\u5168, \u5230<a {0}>{1}</a>. ##view_adminRoles_permissions_autoselecting_configureRead_implied = Autodeselected CONFIGURE_WRITE permission, since lack of CONFIGURE_READ implies lack of it... ##view_adminRoles_permissions_autoselecting_configureWrite_implied = Autoselected CONFIGURE_READ permission, since CONFIGURE_WRITE implies it...
commit 43883b8c436129218ac4c3c917a4883177f13d0c Author: Simeon Pinder spinder@fulliautomatix.conchfritter.com Date: Fri Aug 9 10:22:37 2013 -0400
[BZ 990576] - insert hard ldap group limit with parsing to guard against excessive ui perf issues. - fix ldap search filter issue - add Suggest/Advice component for more feedback for ldap integration. - disable edit on Suggest/Advice component - clean up selector feedback ui threads to guard against refresh. - refresh cached ldap results after 30 mins. - modify AbstractSelector to better support overriding the availableFilterForm
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RoleLdapGroupSelector.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RoleLdapGroupSelector.java index b17961d..8d53b82 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RoleLdapGroupSelector.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RoleLdapGroupSelector.java @@ -36,11 +36,18 @@ import com.smartgwt.client.data.Record; import com.smartgwt.client.data.fields.DataSourceTextField; import com.smartgwt.client.types.TitleOrientation; import com.smartgwt.client.widgets.form.DynamicForm; +import com.smartgwt.client.widgets.form.events.ItemChangedEvent; +import com.smartgwt.client.widgets.form.events.ItemChangedHandler; import com.smartgwt.client.widgets.form.fields.CheckboxItem; import com.smartgwt.client.widgets.form.fields.FormItemIcon; import com.smartgwt.client.widgets.form.fields.SpacerItem; import com.smartgwt.client.widgets.form.fields.StaticTextItem; +import com.smartgwt.client.widgets.form.fields.TextAreaItem; import com.smartgwt.client.widgets.form.fields.TextItem; +import com.smartgwt.client.widgets.form.fields.events.ChangeEvent; +import com.smartgwt.client.widgets.form.fields.events.ChangeHandler; +import com.smartgwt.client.widgets.form.fields.events.ChangedEvent; +import com.smartgwt.client.widgets.form.fields.events.ChangedHandler; import com.smartgwt.client.widgets.grid.ListGridRecord; import com.smartgwt.client.widgets.layout.HLayout;
@@ -63,7 +70,9 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c public static final String FIELD_ID = "id"; public static final String FIELD_NAME = "name"; public static final String FIELD_DESCRIPTION = "description"; - private static boolean queryCompleted = false; + final TextItem searchTextItem = new TextItem(); + protected int cursorPosition; + private static int retryAttempt = 0;
//override the selector key for ldap group selection. protected String getSelectorKey() { @@ -86,7 +95,7 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c availableFilterForm.setNumCols(2); } int groupPanelWidth = 375; - int groupPanelHeight = 140; + int groupPanelHeight = 150;
// final TextItem search = new TextItem("search", // MSG.common_title_search()); @@ -100,6 +109,25 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c availableGroupDetails.setGroupTitle("Available Groups Results"); availableGroupDetails.setIsGroup(true); availableGroupDetails.setWrapItemTitles(false); + //add itemChanged handler to listen for changes to SearchItem + availableGroupDetails.addItemChangedHandler(new ItemChangedHandler() { + public void onItemChanged(ItemChangedEvent itemChangedEvent) { + + latestCriteria = getLatestCriteria(null); + + Timer timer = new Timer() { + @Override + public void run() { + if (latestCriteria != null) { + Criteria criteria = latestCriteria; + latestCriteria = null; + populateAvailableGrid(criteria); + } + } + }; + timer.schedule(500); + } + }); } final TextItem resultCountItem = new TextItem("resultCount", "Groups Found"); { @@ -111,23 +139,49 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c pageCountItem.setCanEdit(false); pageCountItem.setWidth("100%"); } + final TextAreaItem adviceItem = new TextAreaItem("advice", "Suggest"); + { + adviceItem.setWidth("100%"); + adviceItem.setHeight(20); + String feedback = "(None)"; + adviceItem.setValue(feedback); + adviceItem.setTooltip(feedback); + adviceItem.setDisabled(true); + adviceItem.addChangeHandler(new ChangeHandler() { + @Override + public void onChange(ChangeEvent event) { + event.cancel(); + cursorPosition = adviceItem.getSelectionRange()[0]; + } + }); + adviceItem.addChangedHandler(new ChangedHandler() { + + @Override + public void onChanged(ChangedEvent event) { + adviceItem.setSelectionRange(cursorPosition, cursorPosition); + } + }); + } // final TextItem search = new TextItem("search", // MSG.common_title_search()); - final TextItem search = new TextItem("search", "Search[within results]"); { - search.setWidth("100%"); - search.setTooltip("Start typing here to show groups containing the typed characters."); + searchTextItem.setName("search"); + searchTextItem.setTitle("Filter[results below]"); + searchTextItem.setWidth("100%"); + searchTextItem.setTooltip("Start typing here to only show groups containing the typed characters."); } final FormItemIcon loadingIcon = new FormItemIcon(); final FormItemIcon successIcon = new FormItemIcon(); final FormItemIcon failIcon = new FormItemIcon(); + final FormItemIcon attentionIcon = new FormItemIcon(); String successIconPath = "[SKIN]/actions/ok.png"; String failedIconPath = "[SKIN]/actions/exclamation.png"; String loadingIconPath = "[SKIN]/loading.gif"; - //icon.setSrc("[SKIN]/actions/help.png"); + String attentionIconPath = "[SKIN]/Dialog/warn.png"; loadingIcon.setSrc(loadingIconPath); successIcon.setSrc(successIconPath); failIcon.setSrc(failedIconPath); + attentionIcon.setSrc(attentionIconPath);
final StaticTextItem groupQueryStatus = new StaticTextItem(); { @@ -136,7 +190,7 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c groupQueryStatus.setDefaultValue("Loading..."); groupQueryStatus.setIcons(loadingIcon); } - availableGroupDetails.setItems(resultCountItem, pageCountItem, groupQueryStatus, new SpacerItem(), search); + availableGroupDetails.setItems(resultCountItem, pageCountItem, groupQueryStatus, adviceItem, searchTextItem);
// Ldap Group Settings region final DynamicForm ldapGroupSettings = new DynamicForm(); @@ -182,8 +236,8 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c //You have to set this attribute groupUsePosixGroupsItem.setAttribute("labelAsTitle", true); } - ldapGroupSettings - .setItems(groupSearch, groupMember, groupQueryPagingItem, groupQueryPagingCountItem, groupUsePosixGroupsItem); + ldapGroupSettings.setItems(groupSearch, groupMember, groupQueryPagingItem, groupQueryPagingCountItem, + groupUsePosixGroupsItem);
// orient both panels next to each other HLayout panel = new HLayout(); @@ -196,6 +250,8 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c } availableFilterForm.addChild(panel);
+ final long ldapGroupSelectorRequestId = System.currentTimeMillis(); + //launch operations to populate/refresh LDAP Group Query contents. final Timer ldapPropertiesTimer = new Timer() { public void run() { @@ -230,75 +286,119 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c } } }; - ldapPropertiesTimer.scheduleRepeating(2000); // repeat interval in milliseconds, e.g. 30000 = 30seconds + ldapPropertiesTimer.schedule(2000); // repeat interval in milliseconds, e.g. 30000 = 30seconds
//launch operations to populate/refresh LDAP Group Query contents. final Timer availableGroupsTimer = new Timer() { public void run() { - if (!queryCompleted) { - //make request to RHQ about state of latest LDAP GWT request - GWTServiceLookup.getLdapService().findAvailableGroupsStatus( - new AsyncCallback<Set<Map<String, String>>>() { - @Override - public void onFailure(Throwable caught) { - groupQueryStatus.setIcons(failIcon); - groupQueryStatus - .setDefaultValue("Fail: Unable to retrieve status for latest AvailableGroups() call."); - //TODO: update this message - CoreGUI.getErrorHandler().handleError(MSG.view_adminRoles_failLdap(), caught); + final String attention = "Attention"; + final String success = "Success"; + final String none = "(None)"; + final String failed = "Failed"; + //make request to RHQ about state of latest LDAP GWT request + GWTServiceLookup.getLdapService().findAvailableGroupsStatus( + new AsyncCallback<Set<Map<String, String>>>() { + @Override + public void onFailure(Throwable caught) { + groupQueryStatus.setIcons(failIcon); + groupQueryStatus.setDefaultValue(failed); + String adviceValue = "Failed: Unable to retrieve status for latest AvailableGroups() call."; + adviceItem.setValue(adviceValue); + adviceItem.setTooltip(adviceValue); + //TODO: update this message + CoreGUI.getErrorHandler().handleError(MSG.view_adminRoles_failLdap(), caught); + retryAttempt++; + if (retryAttempt > 3) { + cancel();//kill thread + retryAttempt = 0; } + }
- @Override - public void onSuccess(Set<Map<String, String>> results) { - // Log.debug("@@@@@@@ findAvailableGroupsStatus: SUCCESS:" + System.currentTimeMillis() - // + ":count:" - // + results.size()); - long start = -1, end = -1; - int pageCount = 0; - int resultCountValue = 0; - for (Map<String, String> map : results) { - String key = map.keySet().toArray()[0] + ""; - if (key.equals("query.results.parsed")) { - String value = map.get(key); - resultCountItem.setValue(value); - resultCountValue = Integer.valueOf(value); - } else if (key.equals("query.complete")) { - String value = map.get(key); - queryCompleted = Boolean.valueOf(value); - } else if (key.equals("query.start.time")) { - String value = map.get(key); - start = Long.valueOf(value); - } else if (key.equals("query.end.time")) { - String value = map.get(key); - end = Long.valueOf(value); - } else if (key.equals("query.page.count")) { - String value = map.get(key); - pageCountItem.setValue(value); - pageCount = Integer.valueOf(value); - } + @Override + public void onSuccess(Set<Map<String, String>> results) { + long start = -1, current = -1; + int pageCount = 0; + int resultCountValue = 0; + boolean queryCompleted = false; + for (Map<String, String> map : results) { + String key = map.keySet().toArray()[0] + ""; + if (key.equals("query.results.parsed")) { + String value = map.get(key); + resultCountItem.setValue(value); + resultCountValue = Integer.valueOf(value); + } else if (key.equals("query.complete")) { + String value = map.get(key); + queryCompleted = Boolean.valueOf(value); + } else if (key.equals("query.start.time")) { + String value = map.get(key); + start = Long.valueOf(value); + } else if (key.equals("query.current.time")) { + String value = map.get(key); + current = Long.valueOf(value); + } else if (key.equals("query.page.count")) { + String value = map.get(key); + pageCountItem.setValue(value); + pageCount = Integer.valueOf(value); } - //act on status details to add extra perf suggestions - if (queryCompleted) { + } + //Update status information + String warnTooManyResults = " A lot of results are being returned. Modify your 'Group Search Filter' to return fewer results."; + String warnQueryTakingLongResults = " Query taking a while to complete. Modify your 'Group Search Filter' to return fewer results."; + String warnParsingManyPagesResults = " Query requires a lot of pages. Modify your 'Group Search Page Size' to return more results per request."; + boolean resultCountWarning = false; + boolean pageCountWarning = false; + boolean timePassingWarning = false; + if ((resultCountWarning = (resultCountValue > 5000)) + || (pageCountWarning = (pageCount > 5)) + || (timePassingWarning = (current - start) > 5 * 1000)) { + adviceItem.setDisabled(false); + groupQueryStatus.setIcons(attentionIcon); + if (resultCountWarning) { + adviceItem.setValue(warnTooManyResults); + adviceItem.setTooltip(warnTooManyResults); + } else if (pageCountWarning) { + adviceItem.setValue(warnParsingManyPagesResults); + adviceItem.setTooltip(warnParsingManyPagesResults); + } else if (timePassingWarning) { + adviceItem.setValue(warnQueryTakingLongResults); + adviceItem.setTooltip(warnQueryTakingLongResults); + } + } + + //act on status details to add extra perf suggestions. Kill threads older than 30 mins + long parseTime = System.currentTimeMillis() - ldapGroupSelectorRequestId; + if ((queryCompleted) || (parseTime) > 30 * 60 * 1000) { + String tooManyResults = "Too many results to show all. Modify your 'Group Search Filter' to return fewer than 20000 results."; + String queryTookLongResults = " Query took " + parseTime + + " ms to complete. Modify your 'Group Search Filter' to return fewer results."; + String queryTookManyPagesResults = " Query required " + + pageCount + + " pages to complete. Modify 'Group Search Page Size' to return more results per request."; + adviceItem.setDisabled(false); + groupQueryStatus.setIcons(attentionIcon); + groupQueryStatus.setDefaultValue(attention); + if (resultCountValue > 20000) {//results throttled + adviceItem.setValue(tooManyResults); + adviceItem.setTooltip(tooManyResults); + } else if ((current - start) >= 10 * 1000) {// took longer than 10s + adviceItem.setValue(queryTookLongResults); + adviceItem.setTooltip(queryTookLongResults); + } else if (pageCount >= 20) {// required more than 20 pages of results + adviceItem.setValue(queryTookManyPagesResults); + adviceItem.setTooltip(queryTookManyPagesResults); + } else {//simple success. + groupQueryStatus.setDefaultValue(success); groupQueryStatus.setIcons(successIcon); - String success = "Success"; - String tooManyResults = success + ": Too many results."; - String queryTookLongResults = success + ": Query took long to complete."; - String queryTookManyPagesResults = success + ": Query required a lot of paging."; - //TODO: add in extra information about results. - if (resultCountValue > 20000) {//results throttled - groupQueryStatus.setDefaultValue(tooManyResults); - } else if ((end - start) >= 10 * 1000) {// took longer than 10s - groupQueryStatus.setDefaultValue(queryTookLongResults); - } else if (pageCount >= 20) {// took longer than 10s - groupQueryStatus.setDefaultValue(queryTookManyPagesResults); - } + adviceItem.setValue(none); + adviceItem.setTooltip(none); + adviceItem.setDisabled(true); } - availableGroupDetails.markForRedraw(); //now cancel the timer cancel(); } - }); - } + availableGroupDetails.markForRedraw(); + } + }); } }; availableGroupsTimer.scheduleRepeating(3000); // repeat interval in milliseconds, e.g. 30000 = 30seconds @@ -315,7 +415,9 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c */ @Override protected Criteria getLatestCriteria(DynamicForm availableFilterForm) { - String search = (String) availableFilterForm.getValue("search"); + //String search = (String) availableFilterForm.getValue("search"); + //non-trivial recursive form items possible. Retrieve from correct form item. + String search = searchTextItem.getValueAsString(); Criteria criteria = new Criteria(); if (null != search) { criteria.addCriteria("name", search); @@ -330,9 +432,10 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c
public static class LdapGroupsDataSource extends RPCDataSource<LdapGroup, org.rhq.core.domain.criteria.Criteria> {
- //cache ldap group data from external server + //cache ldap group data from external server for 30 mins then stale. private Set<Map<String, String>> cachedLdapGroupsAvailable; private Map<String, Map<String, String>> cachedNameKeyedMap; + private long cachedLdapGroupsLast = -1;
public LdapGroupsDataSource() { DataSourceTextField nameField = new DataSourceTextField(FIELD_NAME, FIELD_NAME); @@ -368,8 +471,9 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c @Override protected void executeFetch(final DSRequest request, final DSResponse response, final org.rhq.core.domain.criteria.Criteria unused) { - //if not null then go through to initialize - if (cachedLdapGroupsAvailable == null) { + //if not null or stale then go through to initialize|reset + if ((cachedLdapGroupsAvailable == null) + || ((System.currentTimeMillis() - cachedLdapGroupsLast) > 30 * 60 * 1000)) { fetchLdapGroupsFromServerAsync(request, response); } else {//use cached data and return correct response //process cachedLdapGroupsAvailable based on criteria @@ -422,6 +526,7 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c public void onSuccess(Set<Map<String, String>> locatedGroupMaps) { Log.debug("Successfully located " + locatedGroupMaps.size() + " available LDAP groups."); cachedLdapGroupsAvailable = locatedGroupMaps; + cachedLdapGroupsLast = System.currentTimeMillis(); //all groups displayed initially PageList<LdapGroup> ldapGroups = convertToPageList(locatedGroupMaps); sendSuccessResponse(request, response, ldapGroups); diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/selector/AbstractSelector.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/selector/AbstractSelector.java index 12c5eb0..6f09142 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/selector/AbstractSelector.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/selector/AbstractSelector.java @@ -65,9 +65,9 @@ import com.smartgwt.client.widgets.layout.VStack;
import org.rhq.enterprise.gui.coregui.client.ImageManager; import org.rhq.enterprise.gui.coregui.client.util.RPCDataSource; +import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedUtility; import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedVLayout; import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedVStack; -import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedUtility;
/** * @author Greg Hinkle @@ -344,7 +344,7 @@ public abstract class AbstractSelector<T, C extends org.rhq.core.domain.criteria return availableSectionStack; }
- private void populateAvailableGrid(Criteria criteria) { + protected void populateAvailableGrid(Criteria criteria) { // TODO until http://code.google.com/p/smartgwt/issues/detail?id=490 is fixed always go to the server for data this.datasource.invalidateCache(); DSRequest requestProperties = new DSRequest(); diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java index 67abb77..dc53a3c 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java @@ -96,14 +96,15 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { private static boolean groupQueryComplete = false; private static int groupQueryResultCount = 0; private static long groupQueryStartTime = -1; - private static long groupQueryEndTime = -1; + private static long groupQueryCurrentTime = -1; private static int groupQueryPageCount = 0; + private static final int LDAP_GROUP_QUERY_LIMIT = 20000;//start to see a lot of ui responsiveness issues beyond this.
private void resetGroupQueryDetails() { groupQueryComplete = false; groupQueryResultCount = 0; groupQueryStartTime = -1; - groupQueryEndTime = -1; + groupQueryCurrentTime = -1; groupQueryPageCount = 0; } public Set<Map<String, String>> findAvailableGroups() { @@ -138,8 +139,8 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { availableGroupsQueryStatus.add(buildStatusEntry("query.results.parsed", String.valueOf(groupQueryResultCount))); //query.start.time => timestamp availableGroupsQueryStatus.add(buildStatusEntry("query.start.time", String.valueOf(groupQueryStartTime))); - //query.end.time => timestamp|-1 - availableGroupsQueryStatus.add(buildStatusEntry("query.end.time", String.valueOf(groupQueryEndTime))); + //query.current.time => timestamp|-1 + availableGroupsQueryStatus.add(buildStatusEntry("query.current.time", String.valueOf(groupQueryCurrentTime))); //query.page.count => 0...N availableGroupsQueryStatus.add(buildStatusEntry("query.page.count", String.valueOf(groupQueryPageCount)));
@@ -470,8 +471,11 @@ SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE.name(), "" int passedInPageSize = -1; try { passedInPageSize = Integer.valueOf(groupPageSize.trim()); - if (passedInPageSize > 0) { + if ((passedInPageSize > 0) && (passedInPageSize <= LDAP_GROUP_QUERY_LIMIT)) { defaultPageSize = passedInPageSize; + } else {//keep defaults and log actual value being used. + log.debug("LDAP Group Page Size passed '" + groupPageSize + + "' was ignored. Defaulting to 1000."); } } catch (NumberFormatException nfe) { //log issue and do nothing. Go with the default. @@ -494,6 +498,7 @@ SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE.name(), ""
//update queryResultCount groupQueryResultCount = groupDetailsMap.size(); + groupQueryCurrentTime = System.currentTimeMillis();
// continually parsing pages of results until we're done. // only if they're enabled in the UI. @@ -511,7 +516,7 @@ SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE.name(), "" } } //continually parsing pages of results until we're done. - while (cookie != null) { + while ((groupQueryResultCount <= LDAP_GROUP_QUERY_LIMIT) && (cookie != null)) { //ensure the next requests contains the session/cookie details ctx.setRequestControls(new Control[] { new PagedResultsControl(defaultPageSize, cookie, Control.CRITICAL) }); @@ -520,19 +525,21 @@ SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE.name(), "" //update Query state after each page groupQueryResultCount = groupDetailsMap.size(); groupQueryPageCount++; + groupQueryCurrentTime = System.currentTimeMillis();
//empty out cookie cookie = null; - //test for further iterations - controls = ctx.getResponseControls(); - if (controls != null) { - for (Control control : controls) { - if (control instanceof PagedResultsResponseControl) { - PagedResultsResponseControl pagedResult = (PagedResultsResponseControl) control; - cookie = pagedResult.getCookie(); + //insert group query throttle. + //test for further iterations + controls = ctx.getResponseControls(); + if (controls != null) { + for (Control control : controls) { + if (control instanceof PagedResultsResponseControl) { + PagedResultsResponseControl pagedResult = (PagedResultsResponseControl) control; + cookie = pagedResult.getCookie(); + } } } - } } } } @@ -553,7 +560,7 @@ SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE.name(), "" throw new LdapCommunicationException(iex); } //update end of query information - groupQueryEndTime = System.currentTimeMillis(); + groupQueryCurrentTime = System.currentTimeMillis(); groupQueryComplete = true; return groupDetailsMap; } @@ -575,7 +582,9 @@ SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE.name(), "" //execute search based on controls and context passed in. NamingEnumeration<SearchResult> answer = ctx.search(baseDNs[x], filter, searchControls); boolean ldapApiEnumerationBugEncountered = false; - while ((!ldapApiEnumerationBugEncountered) && answer.hasMoreElements()) {//BZ:582471- ldap api bug change + int resultCount = 0; + while ((resultCount <= LDAP_GROUP_QUERY_LIMIT) && (groupDetailsMap.size() <= LDAP_GROUP_QUERY_LIMIT) + && (!ldapApiEnumerationBugEncountered) && answer.hasMoreElements()) {//BZ:582471- ldap api bug change // We use the first match SearchResult si = null; try { @@ -595,6 +604,7 @@ SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE.name(), "" entry.put("name", name); entry.put("description", description); groupDetailsMap.add(entry); + resultCount++; } }
commit 78aca9df109b7f36cbe7e679da4ef1647d124ce6 Author: Simeon Pinder spinder@fulliautomatix.conchfritter.com Date: Tue Aug 6 14:36:55 2013 -0400
[BZ 990576] Add messaging and updates around ldap query performance for adminstrator.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RoleLdapGroupSelector.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RoleLdapGroupSelector.java index 4f763ff..b17961d 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RoleLdapGroupSelector.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RoleLdapGroupSelector.java @@ -27,17 +27,25 @@ import java.util.HashSet; import java.util.Map; import java.util.Set;
+import com.google.gwt.user.client.Timer; import com.google.gwt.user.client.rpc.AsyncCallback; import com.smartgwt.client.data.Criteria; import com.smartgwt.client.data.DSRequest; import com.smartgwt.client.data.DSResponse; import com.smartgwt.client.data.Record; import com.smartgwt.client.data.fields.DataSourceTextField; +import com.smartgwt.client.types.TitleOrientation; import com.smartgwt.client.widgets.form.DynamicForm; +import com.smartgwt.client.widgets.form.fields.CheckboxItem; +import com.smartgwt.client.widgets.form.fields.FormItemIcon; import com.smartgwt.client.widgets.form.fields.SpacerItem; +import com.smartgwt.client.widgets.form.fields.StaticTextItem; import com.smartgwt.client.widgets.form.fields.TextItem; import com.smartgwt.client.widgets.grid.ListGridRecord; +import com.smartgwt.client.widgets.layout.HLayout;
+import org.rhq.core.domain.common.composite.SystemSetting; +import org.rhq.core.domain.common.composite.SystemSettings; import org.rhq.core.domain.resource.group.LdapGroup; import org.rhq.core.domain.util.PageControl; import org.rhq.core.domain.util.PageList; @@ -55,6 +63,7 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c public static final String FIELD_ID = "id"; public static final String FIELD_NAME = "name"; public static final String FIELD_DESCRIPTION = "description"; + private static boolean queryCompleted = false;
//override the selector key for ldap group selection. protected String getSelectorKey() { @@ -72,11 +81,227 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c @Override protected DynamicForm getAvailableFilterForm() { DynamicForm availableFilterForm = new DynamicForm(); - availableFilterForm.setWidth100(); - availableFilterForm.setNumCols(2); - - final TextItem search = new TextItem("search", MSG.common_title_search()); - availableFilterForm.setItems(search, new SpacerItem()); + { + availableFilterForm.setWidth100(); + availableFilterForm.setNumCols(2); + } + int groupPanelWidth = 375; + int groupPanelHeight = 140; + + // final TextItem search = new TextItem("search", + // MSG.common_title_search()); + + // Structure the display area into two separate display regions + // Available Groups region + final DynamicForm availableGroupDetails = new DynamicForm(); + { + availableGroupDetails.setWidth(groupPanelWidth); + availableGroupDetails.setHeight(groupPanelHeight); + availableGroupDetails.setGroupTitle("Available Groups Results"); + availableGroupDetails.setIsGroup(true); + availableGroupDetails.setWrapItemTitles(false); + } + final TextItem resultCountItem = new TextItem("resultCount", "Groups Found"); + { + resultCountItem.setCanEdit(false); + resultCountItem.setWidth("100%"); + } + final TextItem pageCountItem = new TextItem("pageCount", "Query Pages Parsed"); + { + pageCountItem.setCanEdit(false); + pageCountItem.setWidth("100%"); + } + // final TextItem search = new TextItem("search", + // MSG.common_title_search()); + final TextItem search = new TextItem("search", "Search[within results]"); + { + search.setWidth("100%"); + search.setTooltip("Start typing here to show groups containing the typed characters."); + } + final FormItemIcon loadingIcon = new FormItemIcon(); + final FormItemIcon successIcon = new FormItemIcon(); + final FormItemIcon failIcon = new FormItemIcon(); + String successIconPath = "[SKIN]/actions/ok.png"; + String failedIconPath = "[SKIN]/actions/exclamation.png"; + String loadingIconPath = "[SKIN]/loading.gif"; + //icon.setSrc("[SKIN]/actions/help.png"); + loadingIcon.setSrc(loadingIconPath); + successIcon.setSrc(successIconPath); + failIcon.setSrc(failedIconPath); + + final StaticTextItem groupQueryStatus = new StaticTextItem(); + { + groupQueryStatus.setName("groupQueryStatus"); + groupQueryStatus.setTitle("Query Progress"); + groupQueryStatus.setDefaultValue("Loading..."); + groupQueryStatus.setIcons(loadingIcon); + } + availableGroupDetails.setItems(resultCountItem, pageCountItem, groupQueryStatus, new SpacerItem(), search); + + // Ldap Group Settings region + final DynamicForm ldapGroupSettings = new DynamicForm(); + { + ldapGroupSettings.setWidth(groupPanelWidth); + ldapGroupSettings.setHeight(groupPanelHeight); + ldapGroupSettings.setGroupTitle("[Read Only] Ldap Group Settings. Edit in 'System Settings'"); + ldapGroupSettings.setIsGroup(true); + ldapGroupSettings.setWrapItemTitles(false); + } + final TextItem groupSearch = new TextItem("groupSearch", "Search Filter"); + { + groupSearch.setCanEdit(false); + groupSearch.setWidth("100%"); + } + final TextItem groupMember = new TextItem("groupMember", "Member Filter"); + { + groupMember.setCanEdit(false); + groupMember.setWidth("100%"); + } + final CheckboxItem groupQueryPagingItem = new CheckboxItem("groupQueryEnable", "Query Paging Enabled"); + { + groupQueryPagingItem.setCanEdit(false); + groupQueryPagingItem.setValue(false); + groupQueryPagingItem.setShowLabel(false); + groupQueryPagingItem.setShowTitle(true); + groupQueryPagingItem.setTitleOrientation(TitleOrientation.LEFT); + //You have to set this attribute + groupQueryPagingItem.setAttribute("labelAsTitle", true); + } + final TextItem groupQueryPagingCountItem = new TextItem("groupQueryCount", "Query Page Size"); + { + groupQueryPagingCountItem.setCanEdit(false); + groupQueryPagingCountItem.setWidth("100%"); + } + final CheckboxItem groupUsePosixGroupsItem = new CheckboxItem("groupUsePosixGroups", "Use Posix Enabled"); + { + groupUsePosixGroupsItem.setCanEdit(false); + groupUsePosixGroupsItem.setValue(false); + groupUsePosixGroupsItem.setShowLabel(false); + groupUsePosixGroupsItem.setShowTitle(true); + groupUsePosixGroupsItem.setTitleOrientation(TitleOrientation.LEFT); + //You have to set this attribute + groupUsePosixGroupsItem.setAttribute("labelAsTitle", true); + } + ldapGroupSettings + .setItems(groupSearch, groupMember, groupQueryPagingItem, groupQueryPagingCountItem, groupUsePosixGroupsItem); + + // orient both panels next to each other + HLayout panel = new HLayout(); + { + panel.addMember(availableGroupDetails); + DynamicForm spacerWrapper = new DynamicForm(); + spacerWrapper.setItems(new SpacerItem()); + panel.addMember(spacerWrapper); + panel.addMember(ldapGroupSettings); + } + availableFilterForm.addChild(panel); + + //launch operations to populate/refresh LDAP Group Query contents. + final Timer ldapPropertiesTimer = new Timer() { + public void run() { + //if system properties not set, launch request/update + String ldapGroupQuery = groupSearch.getValueAsString(); + if ((ldapGroupQuery == null) || (ldapGroupQuery.trim().isEmpty())) { + GWTServiceLookup.getSystemService().getSystemSettings(new AsyncCallback<SystemSettings>() { + @Override + public void onFailure(Throwable caught) { + groupQueryStatus.setIcons(failIcon); + groupQueryStatus.setDefaultValue("Fail: Unable to retrieve system settings."); + //TODO: update this message + CoreGUI.getErrorHandler().handleError(MSG.view_adminRoles_failLdap(), caught); + } + + @Override + public void onSuccess(SystemSettings settings) { + //retrieve relevant information once and update ui + String ldapGroupFilter = settings.get(SystemSetting.LDAP_GROUP_FILTER); + String ldapGroupMember = settings.get(SystemSetting.LDAP_GROUP_MEMBER); + String ldapGroupPagingEnabled = settings.get(SystemSetting.LDAP_GROUP_PAGING); + String ldapGroupPagingValue = settings.get(SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE); + String ldapGroupIsPosix = settings.get(SystemSetting.LDAP_GROUP_USE_POSIX); + groupSearch.setValue(ldapGroupFilter); + groupMember.setValue(ldapGroupMember); + groupQueryPagingItem.setValue(Boolean.valueOf(ldapGroupPagingEnabled)); + groupQueryPagingCountItem.setValue(ldapGroupPagingValue); + groupUsePosixGroupsItem.setValue(Boolean.valueOf(ldapGroupIsPosix)); + ldapGroupSettings.markForRedraw(); + } + }); + } + } + }; + ldapPropertiesTimer.scheduleRepeating(2000); // repeat interval in milliseconds, e.g. 30000 = 30seconds + + //launch operations to populate/refresh LDAP Group Query contents. + final Timer availableGroupsTimer = new Timer() { + public void run() { + if (!queryCompleted) { + //make request to RHQ about state of latest LDAP GWT request + GWTServiceLookup.getLdapService().findAvailableGroupsStatus( + new AsyncCallback<Set<Map<String, String>>>() { + @Override + public void onFailure(Throwable caught) { + groupQueryStatus.setIcons(failIcon); + groupQueryStatus + .setDefaultValue("Fail: Unable to retrieve status for latest AvailableGroups() call."); + //TODO: update this message + CoreGUI.getErrorHandler().handleError(MSG.view_adminRoles_failLdap(), caught); + } + + @Override + public void onSuccess(Set<Map<String, String>> results) { + // Log.debug("@@@@@@@ findAvailableGroupsStatus: SUCCESS:" + System.currentTimeMillis() + // + ":count:" + // + results.size()); + long start = -1, end = -1; + int pageCount = 0; + int resultCountValue = 0; + for (Map<String, String> map : results) { + String key = map.keySet().toArray()[0] + ""; + if (key.equals("query.results.parsed")) { + String value = map.get(key); + resultCountItem.setValue(value); + resultCountValue = Integer.valueOf(value); + } else if (key.equals("query.complete")) { + String value = map.get(key); + queryCompleted = Boolean.valueOf(value); + } else if (key.equals("query.start.time")) { + String value = map.get(key); + start = Long.valueOf(value); + } else if (key.equals("query.end.time")) { + String value = map.get(key); + end = Long.valueOf(value); + } else if (key.equals("query.page.count")) { + String value = map.get(key); + pageCountItem.setValue(value); + pageCount = Integer.valueOf(value); + } + } + //act on status details to add extra perf suggestions + if (queryCompleted) { + groupQueryStatus.setIcons(successIcon); + String success = "Success"; + String tooManyResults = success + ": Too many results."; + String queryTookLongResults = success + ": Query took long to complete."; + String queryTookManyPagesResults = success + ": Query required a lot of paging."; + //TODO: add in extra information about results. + if (resultCountValue > 20000) {//results throttled + groupQueryStatus.setDefaultValue(tooManyResults); + } else if ((end - start) >= 10 * 1000) {// took longer than 10s + groupQueryStatus.setDefaultValue(queryTookLongResults); + } else if (pageCount >= 20) {// took longer than 10s + groupQueryStatus.setDefaultValue(queryTookManyPagesResults); + } + } + availableGroupDetails.markForRedraw(); + //now cancel the timer + cancel(); + } + }); + } + } + }; + availableGroupsTimer.scheduleRepeating(3000); // repeat interval in milliseconds, e.g. 30000 = 30seconds
return availableFilterForm; } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/LdapGWTService.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/LdapGWTService.java index 8356773..521556b 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/LdapGWTService.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/LdapGWTService.java @@ -42,6 +42,11 @@ public interface LdapGWTService extends RemoteService { Set<Map<String, String>> findAvailableGroups() throws RuntimeException;
/** + * @return Map with status of last LDAP groups query available + */ + Set<Map<String, String>> findAvailableGroupsStatus() throws RuntimeException; + + /** * @return Map with LDAP details for user passed. */ Map<String, String> getLdapDetailsFor(String user) throws RuntimeException; diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/LdapGWTServiceImpl.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/LdapGWTServiceImpl.java index 3255b8f..28d9323 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/LdapGWTServiceImpl.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/LdapGWTServiceImpl.java @@ -77,6 +77,28 @@ public class LdapGWTServiceImpl extends AbstractGWTServiceImpl implements LdapGW } }
+ @Override + public Set<Map<String, String>> findAvailableGroupsStatus() throws RuntimeException { + try { + //add permissions check + Set<Permission> globalPermissions = authorizationManager.getExplicitGlobalPermissions(getSessionSubject()); + Boolean accessGranted = globalPermissions.contains(Permission.MANAGE_SECURITY); + + Set<Map<String, String>> results = null; + if (accessGranted) { + results = ldapManager.findAvailableGroupsStatus(); + } else { + String message = "User '" + getSessionSubject().getName() + + "' does not have sufficient permissions to query the status of available LDAP groups request."; + log.debug(message); + throw new PermissionException(message); + } + return SerialUtility.prepare(results, "findAvailableGroups"); + } catch (Throwable t) { + throw getExceptionToThrowToClient(t); + } + } + public void setLdapGroupsForRole(int roleId, List<String> groupIds) throws RuntimeException { try { //add permissions check diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java index 8a0e321..67abb77 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java @@ -93,9 +93,24 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { @EJB private SystemManagerLocal systemManager;
+ private static boolean groupQueryComplete = false; + private static int groupQueryResultCount = 0; + private static long groupQueryStartTime = -1; + private static long groupQueryEndTime = -1; + private static int groupQueryPageCount = 0; + + private void resetGroupQueryDetails() { + groupQueryComplete = false; + groupQueryResultCount = 0; + groupQueryStartTime = -1; + groupQueryEndTime = -1; + groupQueryPageCount = 0; + } public Set<Map<String, String>> findAvailableGroups() { //load current system properties Properties systemConfig = populateProperties(systemManager.getSystemSettings(subjectManager.getOverlord())); + //reset group query details + resetGroupQueryDetails();
//retrieve the filters. String groupFilter = (String) systemConfig.get(SystemSetting.LDAP_GROUP_FILTER.name()); @@ -114,6 +129,29 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { return emptyAvailableGroups; }
+ public Set<Map<String, String>> findAvailableGroupsStatus() { + Set<Map<String, String>> availableGroupsQueryStatus = new HashSet<Map<String, String>>(); + + //query.complete => true|false + availableGroupsQueryStatus.add(buildStatusEntry("query.complete", String.valueOf(groupQueryComplete))); + //query.results.parsed => 0...N + availableGroupsQueryStatus.add(buildStatusEntry("query.results.parsed", String.valueOf(groupQueryResultCount))); + //query.start.time => timestamp + availableGroupsQueryStatus.add(buildStatusEntry("query.start.time", String.valueOf(groupQueryStartTime))); + //query.end.time => timestamp|-1 + availableGroupsQueryStatus.add(buildStatusEntry("query.end.time", String.valueOf(groupQueryEndTime))); + //query.page.count => 0...N + availableGroupsQueryStatus.add(buildStatusEntry("query.page.count", String.valueOf(groupQueryPageCount))); + + return availableGroupsQueryStatus; + } + + private Map<String, String> buildStatusEntry(String key, String value) { + HashMap<String, String> status = new HashMap<String, String>(); + status.put(key, value); + return status; + } + public Set<String> findAvailableGroupsFor(String userName) { Properties options = populateProperties(systemManager.getSystemSettings(subjectManager.getOverlord())); String groupFilter = options.getProperty(SystemSetting.LDAP_GROUP_FILTER.name(), ""); @@ -449,8 +487,14 @@ SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE.name(), "" String[] baseDNs = baseDN.split(BASEDN_DELIMITER);
for (int x = 0; x < baseDNs.length; x++) { + //update query start time + groupQueryStartTime = System.currentTimeMillis(); + executeGroupSearch(filter, groupDetailsMap, ctx, searchControls, baseDNs, x);
+ //update queryResultCount + groupQueryResultCount = groupDetailsMap.size(); + // continually parsing pages of results until we're done. // only if they're enabled in the UI. if (useQueryPaging) { @@ -472,6 +516,11 @@ SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE.name(), "" ctx.setRequestControls(new Control[] { new PagedResultsControl(defaultPageSize, cookie, Control.CRITICAL) }); executeGroupSearch(filter, groupDetailsMap, ctx, searchControls, baseDNs, x); + + //update Query state after each page + groupQueryResultCount = groupDetailsMap.size(); + groupQueryPageCount++; + //empty out cookie cookie = null; //test for further iterations @@ -484,25 +533,6 @@ SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE.name(), "" } } } - //continually parsing pages of results until we're done. - while (cookie != null) { - //ensure the next requests contains the session/cookie details - ctx.setRequestControls(new Control[] { new PagedResultsControl(defaultPageSize, cookie, - Control.CRITICAL) }); - executeGroupSearch(filter, groupDetailsMap, ctx, searchControls, baseDNs, x); - //empty out cookie - cookie = null; - //test for further iterations - controls = ctx.getResponseControls(); - if (controls != null) { - for (Control control : controls) { - if (control instanceof PagedResultsResponseControl) { - PagedResultsResponseControl pagedResult = (PagedResultsResponseControl) control; - cookie = pagedResult.getCookie(); - } - } - } - } } } } @@ -522,6 +552,9 @@ SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE.name(), "" log.error("Unexpected LDAP communciation error:" + iex.getMessage(), iex); throw new LdapCommunicationException(iex); } + //update end of query information + groupQueryEndTime = System.currentTimeMillis(); + groupQueryComplete = true; return groupDetailsMap; }
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerLocal.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerLocal.java index 14945ce..fe3c392 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerLocal.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerLocal.java @@ -51,6 +51,8 @@ public interface LdapGroupManagerLocal {
Set<Map<String, String>> findAvailableGroups();
+ Set<Map<String, String>> findAvailableGroupsStatus(); + Set<String> findAvailableGroupsFor(String userName);
Map<String, String> findLdapUserDetails(String userName);
commit ca1f06a7084f21dd34160738fbeb0661e0eac106 Author: Stefan Negrea snegrea@redhat.com Date: Tue Aug 13 15:21:28 2013 -0500
Update the schema manager to check for schema version at startup. The server should not start if there is schema disagreement.
Also throw an exception during update if the installed schema is too advanced for existing installation.
diff --git a/.classpath b/.classpath index cad0bdb..d590a22 100644 --- a/.classpath +++ b/.classpath @@ -380,7 +380,7 @@ <classpathentry exported="true" kind="var" path="M2_REPO/org/jboss/byteman/byteman-submit/1.5.2/byteman-submit-1.5.2.jar" sourcepath="M2_REPO/org/jboss/byteman/byteman-submit/1.5.2/byteman-submit-1.5.2-sources.jar"/> <classpathentry exported="true" kind="var" path="M2_REPO/org/jboss/byteman/byteman-bmunit/1.5.2/byteman-bmunit-1.5.2.jar" sourcepath="M2_REPO/org/jboss/byteman/byteman-bmunit/1.5.2/byteman-bmunit-1.5.2-sources.jar"/> <classpathentry kind="var" path="M2_REPO/org/apache/cassandra/cassandra-all/1.2.4/cassandra-all-1.2.4.jar"/> - <classpathentry kind="var" path="M2_REPO/com/datastax/cassandra/cassandra-driver-core/1.0.0-rhq-1.2.4/cassandra-driver-core-1.0.0-rhq-1.2.4.jar"/> + <classpathentry kind="var" path="M2_REPO/com/datastax/cassandra/cassandra-driver-core/1.0.2-rhq-1.2.4/cassandra-driver-core-1.0.2-rhq-1.2.4.jar"/> <classpathentry kind="var" path="M2_REPO/org/apache/thrift/libthrift/0.7.0/libthrift-0.7.0.jar"/> <classpathentry kind="var" path="M2_REPO/commons-cli/commons-cli/1.2/commons-cli-1.2.jar"/> <classpathentry kind="var" path="M2_REPO/com/google/guava/guava/12.0/guava-12.0.jar"/> @@ -392,5 +392,6 @@ <classpathentry exported="true" kind="var" path="JDK_HOME/jre/lib/rt.jar"/> <classpathentry kind="var" path="M2_REPO/commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar"/> <classpathentry kind="var" path="M2_REPO/org/apache/maven/plugin-tools/maven-plugin-annotations/3.2/maven-plugin-annotations-3.2.jar"/> + <classpathentry kind="var" path="M2_REPO/org/jboss/jboss-vfs/3.1.0.Final/jboss-vfs-3.1.0.Final.jar"/> <classpathentry kind="output" path="eclipse-classes"/> </classpath> diff --git a/modules/common/cassandra-schema/pom.xml b/modules/common/cassandra-schema/pom.xml index 024800d..b06464e 100644 --- a/modules/common/cassandra-schema/pom.xml +++ b/modules/common/cassandra-schema/pom.xml @@ -47,6 +47,13 @@ <artifactId>cassandra-driver-core</artifactId> <version>${cassandra.driver.version}</version> </dependency> + + <dependency> + <groupId>org.jboss</groupId> + <artifactId>jboss-vfs</artifactId> + <version>${jboss-vfs.version}</version> + </dependency> + </dependencies>
<build> diff --git a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/AbstractManager.java b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/AbstractManager.java index baf7c23..7b8c520 100644 --- a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/AbstractManager.java +++ b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/AbstractManager.java @@ -92,7 +92,7 @@ abstract class AbstractManager { }
/** - * Init the Cassandra cluster session with the username and password provided + * Init the storage cluster session with the username and password provided * at creation. */ protected void initClusterSession() { @@ -100,7 +100,7 @@ abstract class AbstractManager { }
/** - * Init the Cassandra cluster session with provided username and password. + * Init the storage cluster session with provided username and password. * * @param username * @param password @@ -125,7 +125,7 @@ abstract class AbstractManager { }
/** - * Shutdown the Cassandra cluster connection. + * Shutdown the storage cluster connection. */ protected void shutdownClusterConnection() { log.info("Shutting down existing cluster connections"); @@ -135,7 +135,7 @@ abstract class AbstractManager { }
/** - * Get cluster size. + * Get storage cluster size. * * @return cluster size */ @@ -158,7 +158,7 @@ abstract class AbstractManager { }
/** - * Runs a CQL query to check the existence of the RHQ user + * Runs a CQL query to check the existence of the RHQ user on the storage cluster. * * @return true if the RHQ user exists, false otherwise */ @@ -173,7 +173,7 @@ abstract class AbstractManager { }
/** - * Run a CQL query to check the existence of the RHQ schema + * Run a CQL query to check the existence of the RHQ schema. * * @return true if the RHQ schema exists, false otherwise */ @@ -192,11 +192,11 @@ abstract class AbstractManager { }
/** - * Run a CQL query to retrieve the current RHQ schema version + * Run a CQL query to retrieve the installed storage schema version. * * @return current RHQ schema version */ - protected int getSchemaVersion() { + protected int getInstalledSchemaVersion() { int maxVersion = 0; try { ResultSet resultSet = executeManagementQuery(Query.VERSION); diff --git a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/SchemaManager.java b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/SchemaManager.java index 8d28bfa..1a82779 100644 --- a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/SchemaManager.java +++ b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/SchemaManager.java @@ -80,7 +80,7 @@ public class SchemaManager { }
/** - * Install and update the RHQ schema. + * Install and update the storage cluster schema. * * @throws Exception */ @@ -90,7 +90,18 @@ public class SchemaManager { }
/** - * Drop RHQ schema and revert the database to pre-RHQ state. + * Check the existing storage cluster schema version to ensure it is compatible with the + * current installation. + * + * @throws Exception + */ + public void checkCompatibility() throws Exception { + VersionManager version = new VersionManager(username, password, nodes); + version.checkCompatibility(); + } + + /** + * Drop storage cluster schema and revert the storage cluster to pre-RHQ state. * * @throws Exception */ diff --git a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/UpdateFolder.java b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/UpdateFolder.java index 84cb515..31266ca 100644 --- a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/UpdateFolder.java +++ b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/UpdateFolder.java @@ -30,6 +30,7 @@ import java.io.InputStream; import java.io.InputStreamReader; import java.net.JarURLConnection; import java.net.URL; +import java.net.URLConnection; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; @@ -41,6 +42,8 @@ import java.util.jar.JarFile; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory;
+import org.jboss.vfs.VirtualFile; + /** * @author Stefan Negrea */ @@ -83,6 +86,19 @@ class UpdateFolder { }
/** + * The version represented by the latest/highest xml update file. + * + * @return the version + */ + public int getLatestVersion() { + if (this.updateFiles != null && this.updateFiles.size() > 0) { + return this.updateFiles.get(this.updateFiles.size() - 1).extractVersion(); + } + + return 0; + } + + /** * Loads the initial set of update files based on the input folder. * * @return list of update files @@ -115,9 +131,13 @@ class UpdateFolder { } } } else if (resourceFolderURL.getProtocol().equals("vfs")) { - // TODO need to add support for VFS if going to use inside EAP - throw new RuntimeException("The URL protocol [" + resourceFolderURL.getProtocol() + "] is not " + - "supported"); + URLConnection conn = resourceFolderURL.openConnection(); + VirtualFile virtualFolder = (VirtualFile)conn.getContent(); + for (VirtualFile virtualChild : virtualFolder.getChildren()) { + if (!virtualChild.isDirectory()) { + files.add(new UpdateFile(virtualChild.getPathNameRelativeTo(virtualFolder.getParent()))); + } + } } else { // In the event we get another protocol that we do not recognize, throw an // exception instead of failing silently. diff --git a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/VersionManager.java b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/VersionManager.java index 794e991..ec54a0c 100644 --- a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/VersionManager.java +++ b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/VersionManager.java @@ -34,6 +34,8 @@ import com.datastax.driver.core.exceptions.AuthenticationException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory;
+import org.rhq.cassandra.schema.exception.InstalledSchemaTooAdvancedException; +import org.rhq.cassandra.schema.exception.InstalledSchemaTooOldException; import org.rhq.core.domain.cloud.StorageNode;
/** @@ -73,14 +75,14 @@ class VersionManager extends AbstractManager { * @throws Exception */ public void install() throws Exception { - log.info("Preparing to install schema"); + log.info("Preparing to install storage schema");
boolean clusterSessionInitialized = false; try { initClusterSession(); clusterSessionInitialized = true; } catch (AuthenticationException e) { - log.debug("Authentication exception. Will now attempt to create the schema."); + log.debug("Authentication exception. Will now attempt to create the storage schema."); log.debug(e); } finally { shutdownClusterConnection(); @@ -119,7 +121,7 @@ class VersionManager extends AbstractManager { if (!schemaExists()) { execute(updateFolder.getUpdateFiles().get(0), properties); } else { - log.info("RHQ schema already exists."); + log.info("Storage schema already exists."); } } catch (Exception ex) { log.error(ex); @@ -147,28 +149,44 @@ class VersionManager extends AbstractManager { initClusterSession();
if (!schemaExists()) { - log.error("Schema not installed."); - throw new RuntimeException("Schema not installed propertly, cannot apply schema updates."); + log.error("Storage schema not installed."); + throw new RuntimeException("Storage schema not installed propertly, cannot apply schema updates."); }
UpdateFolder updateFolder = new UpdateFolder(Task.Update.getFolder());
- int currentSchemaVersion = getSchemaVersion(); - log.info("Current schema version is " + currentSchemaVersion); - updateFolder.removeAppliedUpdates(currentSchemaVersion); + int installedSchemaVersion = getInstalledSchemaVersion(); + log.info("Installed storage schema version is " + installedSchemaVersion);
- if (updateFolder.getUpdateFiles().size() == 0) { - log.info("RHQ schema is current! No updates applied."); + int requiredSchemaVersion = updateFolder.getLatestVersion(); + log.info("Required storage schema version is " + requiredSchemaVersion); + + if (requiredSchemaVersion == installedSchemaVersion) { + log.info("Storage schema version is current ( " + installedSchemaVersion + " ). No updates applied."); + } else if (requiredSchemaVersion < installedSchemaVersion) { + log.error("Installed storage cluster schema version: " + installedSchemaVersion + + ". Required schema version: " + requiredSchemaVersion + + ". Storage cluster schema has been updated beyond the capability of the existing server installation."); + throw new InstalledSchemaTooAdvancedException(); } else { - for (UpdateFile updateFile : updateFolder.getUpdateFiles()) { - execute(updateFile); + log.info("Storage schema requires udpates. Updating from version " + installedSchemaVersion + + " to version " + requiredSchemaVersion + "."); + + updateFolder.removeAppliedUpdates(installedSchemaVersion); + + if (updateFolder.getUpdateFiles().size() == 0) { + log.info("Storage schema is current! No updates applied."); + } else { + for (UpdateFile updateFile : updateFolder.getUpdateFiles()) { + execute(updateFile);
- Properties versionProperties = new Properties(); - versionProperties.put("version", updateFile.extractVersion() + ""); - versionProperties.put("time", System.currentTimeMillis() + ""); - executeManagementQuery(Query.INSERT_SCHEMA_VERSION, versionProperties); + Properties versionProperties = new Properties(); + versionProperties.put("version", updateFile.extractVersion() + ""); + versionProperties.put("time", System.currentTimeMillis() + ""); + executeManagementQuery(Query.INSERT_SCHEMA_VERSION, versionProperties);
- log.info("RHQ schema update " + updateFile +" applied."); + log.info("Storage schema update " + updateFile + " applied."); + } } } } finally { @@ -185,7 +203,7 @@ class VersionManager extends AbstractManager { * @throws Exception */ public void drop() throws Exception { - log.info("Preparing to drop RHQ schema"); + log.info("Preparing to drop storage schema.");
UpdateFolder updateFolder = new UpdateFolder(Task.Drop.getFolder()); Properties properties = new Properties(System.getProperties()); @@ -209,22 +227,60 @@ class VersionManager extends AbstractManager { if (schemaExists()) { //2. Drop RHQ schema execute(updateFolder.getUpdateFiles().get(1), properties); - log.info("RHQ schema dropped."); + log.info("Storage schema dropped."); } else { - log.info("RHQ schema does not exist. Drop operation not required."); + log.info("Storage schema does not exist. Drop operation not required."); }
if (userExists()) { //3. Drop RHQ user execute(updateFolder.getUpdateFiles().get(2), properties); - log.info("RHQ admin user dropped."); + log.info("RHQ admin user dropped from storage cluster."); } else { - log.info("RHQ admin user does not exist. Drop operation not required."); + log.info("RHQ admin user does not exist on the storage cluster. Drop operation not required."); + } + } catch (Exception e) { + throw new RuntimeException(e); + } finally { + shutdownClusterConnection(); + } + } + + /** + * Check storage cluster schema version compatibility. + * If the version installed on the storage cluster is too advanced or too old compared + * to the version available in the current schema manager an error will thrown. + * + * @throws Exception schema compatibility exception + */ + public void checkCompatibility() throws Exception { + log.info("Preparing to check storage schema compatibility."); + try { + initClusterSession(); + + int installedSchemaVersion = this.getInstalledSchemaVersion(); + + UpdateFolder folder = new UpdateFolder(Task.Update.getFolder()); + int requiredSchemaVersion = folder.getLatestVersion(); + + if (installedSchemaVersion < requiredSchemaVersion) { + log.error("Storage cluster schema version:" + installedSchemaVersion + ". Required schema version: " + + requiredSchemaVersion + ". Please update storage cluster schema version."); + throw new InstalledSchemaTooOldException(); + } + + if (installedSchemaVersion > requiredSchemaVersion) { + log.error("Storage cluster schema version:" + installedSchemaVersion + ". Required schema version: " + + requiredSchemaVersion + + ". Storage clutser has been updated beyond the capability of the current server installation."); + throw new InstalledSchemaTooAdvancedException(); } } catch (Exception e) { throw new RuntimeException(e); } finally { shutdownClusterConnection(); + + log.info("Completed check for storage schema compatibility."); } } } diff --git a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/exception/InstalledSchemaTooAdvancedException.java b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/exception/InstalledSchemaTooAdvancedException.java new file mode 100644 index 0000000..2f83ef5 --- /dev/null +++ b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/exception/InstalledSchemaTooAdvancedException.java @@ -0,0 +1,38 @@ +/* + * + * RHQ Management Platform + * Copyright (C) 2005-2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation, and/or the GNU Lesser + * General Public License, version 2.1, also as published by the Free + * Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License and the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * and the GNU Lesser General Public License along with this program; + * if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +package org.rhq.cassandra.schema.exception; + + +/** + * @author Stefan Negrea + */ +public class InstalledSchemaTooAdvancedException extends Exception { + + public InstalledSchemaTooAdvancedException() { + super( + "Storage schema is too advanced for the current installation. Schema revisions have been applied beyond the capability of the installation."); + } +} \ No newline at end of file diff --git a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/exception/InstalledSchemaTooOldException.java b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/exception/InstalledSchemaTooOldException.java new file mode 100644 index 0000000..4da863b --- /dev/null +++ b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/exception/InstalledSchemaTooOldException.java @@ -0,0 +1,38 @@ +/* + * + * RHQ Management Platform + * Copyright (C) 2005-2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation, and/or the GNU Lesser + * General Public License, version 2.1, also as published by the Free + * Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License and the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * and the GNU Lesser General Public License along with this program; + * if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +package org.rhq.cassandra.schema.exception; + + +/** + * @author Stefan Negrea + */ +public class InstalledSchemaTooOldException extends Exception { + + public InstalledSchemaTooOldException() { + super( + "Storage schema needs to be updated. The schema manager contains updates not yet applied to the storage cluster installation."); + } +} \ No newline at end of file diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/listener/CoreGuiServletContextListener.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/listener/CoreGuiServletContextListener.java index 8260327..67e4389 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/listener/CoreGuiServletContextListener.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/listener/CoreGuiServletContextListener.java @@ -54,7 +54,11 @@ public class CoreGuiServletContextListener implements ServletContextListener { scheduledExecutorService.schedule(new Runnable() { @Override public void run() { - startupBean.init(); + try { + startupBean.init(); + } catch (Exception e) { + shutdownListener.handleNotification(); + } } }, 10, SECONDS); } diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClientManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClientManagerBean.java index 6828b12..dbd599a 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClientManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClientManagerBean.java @@ -40,6 +40,7 @@ import com.datastax.driver.core.Session; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory;
+import org.rhq.cassandra.schema.SchemaManager; import org.rhq.cassandra.util.ClusterBuilder; import org.rhq.core.domain.cloud.StorageNode; import org.rhq.core.util.StringUtil; @@ -86,7 +87,6 @@ public class StorageClientManagerBean { String username = getRequiredStorageProperty(USERNAME_PROP); String password = getRequiredStorageProperty(PASSWORD_PROP);
- metricsConfiguration = new MetricsConfiguration(); List<StorageNode> storageNodes = storageNodeManager.getStorageNodes(); if (storageNodes.isEmpty()) { throw new IllegalStateException( @@ -94,12 +94,17 @@ public class StorageClientManagerBean { + "result of running dbsetup or deleting rows from rhq_storage_node table. Please re-install the " + "storage node to fix this issue."); } + + checkSchemaCompability(username, password, storageNodes); + + Session wrappedSession = createSession(username, password, storageNodeManager.getStorageNodes()); session = new StorageSession(wrappedSession);
storageClusterMonitor = new StorageClusterMonitor(); session.addStorageStateListener(storageClusterMonitor);
+ metricsConfiguration = new MetricsConfiguration(); metricsDAO = new MetricsDAO(session, metricsConfiguration);
initMetricsServer(); @@ -108,6 +113,22 @@ public class StorageClientManagerBean { log.info("Storage client subsystem is now initialized"); }
+ /** + * Checks storage node schema compatibility. + * + * @param username username + * @param password password + * @param storageNodes storage nodes + */ + private void checkSchemaCompability(String username, String password, List<StorageNode> storageNodes) { + SchemaManager schemaManager = new SchemaManager(username, password, storageNodes); + try { + schemaManager.checkCompatibility(); + } catch (Exception e) { + throw new RuntimeException(e.getMessage(), e); + } + } + public synchronized void shutdown() { if (!initialized) { log.info("Storage client subsystem is already shut down. Skipping shutdown steps."); diff --git a/pom.xml b/pom.xml index 46ed3be..e637a37 100644 --- a/pom.xml +++ b/pom.xml @@ -108,6 +108,8 @@ <jboss-modules.version>1.1.1.GA</jboss-modules.version> <jboss-dmr.version>1.1.1.Final</jboss-dmr.version> <jboss-msc.version>1.0.2.GA</jboss-msc.version> + <jboss-vfs.version>3.1.0.Final</jboss-vfs.version> +
<!-- Not Provided - some of these are needed by the agent --> <jboss-annotations.version>4.2.3.GA</jboss-annotations.version>
commit 3f6eb3395c56fa45a7c0b8748e6b6f7ed9e4465c Author: Jirka Kremser jkremser@redhat.com Date: Tue Aug 13 20:43:51 2013 +0200
Improvements to cluster-wide alert UI: improved grouping, headers of groups, link to definition.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAlertHistoryView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAlertHistoryView.java index 9c26b5c..0821585 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAlertHistoryView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAlertHistoryView.java @@ -23,7 +23,6 @@ import java.util.ArrayList; import com.smartgwt.client.data.Record; import com.smartgwt.client.types.GroupStartOpen; import com.smartgwt.client.types.ImageStyle; -import com.smartgwt.client.types.SummaryFunctionType; import com.smartgwt.client.widgets.Img; import com.smartgwt.client.widgets.grid.CellFormatter; import com.smartgwt.client.widgets.grid.ListGrid; @@ -37,6 +36,8 @@ import org.rhq.enterprise.gui.coregui.client.ImageManager; import org.rhq.enterprise.gui.coregui.client.LinkManager; import org.rhq.enterprise.gui.coregui.client.alert.AlertDataSource; import org.rhq.enterprise.gui.coregui.client.alert.AlertHistoryView; +import org.rhq.enterprise.gui.coregui.client.components.table.AbstractTableAction; +import org.rhq.enterprise.gui.coregui.client.components.table.TableActionEnablement; import org.rhq.enterprise.gui.coregui.client.inventory.resource.AncestryUtil; import org.rhq.enterprise.gui.coregui.client.util.StringUtility;
@@ -45,6 +46,7 @@ import org.rhq.enterprise.gui.coregui.client.util.StringUtility; * */ public class StorageNodeAlertHistoryView extends AlertHistoryView { + private boolean isGouped = true;
public StorageNodeAlertHistoryView(String tableTitle, int[] resourceIds) { super(tableTitle, resourceIds); @@ -64,8 +66,32 @@ public class StorageNodeAlertHistoryView extends AlertHistoryView { continue; } if (AlertCriteria.SORT_FIELD_CTIME.equals(field.getName())) { field.setWidth(240); + field.setShowGridSummary(true); + field.setShowGroupSummary(true); + field.setSummaryFunction(new SummaryFunction() { + public Object getSummaryValue(Record[] records, ListGridField field) { + if (records != null && records.length > 0 && records[0] != null) { + Integer resourceId = records[0].getAttributeAsInt(AncestryUtil.RESOURCE_ID); + Integer defId = records[0].getAttributeAsInt("definitionId"); + String url = LinkManager.getSubsystemAlertDefinitionLink(resourceId, defId); + return LinkManager.getHref(url, "Link to Definition"); + } else return ""; + } + }); + field.setCellFormatter(new CellFormatter() { + public String format(Object o, ListGridRecord listGridRecord, int i, int i1) { + if (listGridRecord.getAttribute("groupValue") != null) { + return (String) o; + } + Integer resourceId = listGridRecord.getAttributeAsInt(AncestryUtil.RESOURCE_ID); + Integer defId = listGridRecord.getAttributeAsInt("definitionId"); + String url = LinkManager.getSubsystemAlertDefinitionLink(resourceId, defId); + return LinkManager.getHref(url, o.toString()); + } + }); + field.setWidth(240); } else if ("conditionValue".equals(field.getName())) { - field.setWidth(90); + field.setWidth(140); } else if ("acknowledgingSubject".equals(field.getName())) { field.setSummaryFunction(new SummaryFunction() { public Object getSummaryValue(Record[] records, ListGridField field) { @@ -74,8 +100,8 @@ public class StorageNodeAlertHistoryView extends AlertHistoryView { if (record.getAttribute("acknowledgingSubject") != null) { count++; } - } - return count + " Unacked"; + } + return "(" + count + " / " + records.length + ")"; } }); field.setCellFormatter(new CellFormatter() { @@ -100,20 +126,12 @@ public class StorageNodeAlertHistoryView extends AlertHistoryView { newFields.add(1, field); continue; } else if ("name".equals(field.getName())) { - field.setShowGridSummary(true); - field.setShowGroupSummary(true); - field.setSummaryFunction(SummaryFunctionType.COUNT); field.setCellFormatter(new CellFormatter() { public String format(Object o, ListGridRecord listGridRecord, int i, int i1) { - if (listGridRecord.getAttribute("groupValue") != null) { - return (String) o; - } - Integer resourceId = listGridRecord.getAttributeAsInt(AncestryUtil.RESOURCE_ID); - Integer defId = listGridRecord.getAttributeAsInt("definitionId"); - String url = LinkManager.getSubsystemAlertDefinitionLink(resourceId, defId); - return LinkManager.getHref(url, o.toString()); + return o.toString(); } }); + field.setHidden(true); } newFields.add(field); } @@ -142,6 +160,9 @@ public class StorageNodeAlertHistoryView extends AlertHistoryView { if (value == null) { return ""; } + if (record.getAttribute("groupValue") != null) { + return value.toString(); + } String detailsUrl = getDetailUrlFromRecord(record); String formattedValue = StringUtility.escapeHtml(value.toString()); return LinkManager.getHref(detailsUrl, formattedValue); @@ -169,5 +190,20 @@ public class StorageNodeAlertHistoryView extends AlertHistoryView { throw new IllegalArgumentException(msg); } } - + + @Override + protected void configureTable() { + super.configureTable(); + addTableAction("(Un)Group Alerts", new AbstractTableAction(TableActionEnablement.ALWAYS) { + public void executeAction(ListGridRecord[] selection, Object actionValue) { + if (isGouped) { + getListGrid().ungroup(); + } else { + getListGrid().groupBy("name"); + } + isGouped = !isGouped; + refreshTableInfo(); + } + }); + } }
commit 7e1e68f2db20cd0389db474701b29e4a5c264003 Author: Jirka Kremser jkremser@redhat.com Date: Tue Aug 13 18:43:59 2013 +0200
Storage node configuration: added check for Max Heap Size (-Xmx) > Heap New Size (-Xmn).
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeConfigurationEditor.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeConfigurationEditor.java index f1c0003..7bcf2cd 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeConfigurationEditor.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeConfigurationEditor.java @@ -19,8 +19,10 @@ package org.rhq.enterprise.gui.coregui.client.admin.storage;
import java.util.ArrayList; +import java.util.HashMap; import java.util.LinkedHashSet; import java.util.List; +import java.util.Map; import java.util.Set;
import com.google.gwt.user.client.rpc.AsyncCallback; @@ -194,7 +196,7 @@ public class StorageNodeConfigurationEditor extends EnhancedVLayout implements R FIELD_HEAP_MAX, "Max Heap Size", configuration.getHeapSize(), - "The maximum heap size. This value will be used with the -Xmx JVM option. The value should be an integer with a suffix of M or G to indicate megabytes or gigabytes.")); + "The maximum heap size. This value will be used with the -Xmx JVM option. If you are going to increase/decrease this value, then you should also increase/decrease the new generation proportionally. The value should be an integer with a suffix of M or G to indicate megabytes or gigabytes.")); items .addAll(buildOneFormRowWithCombobox( FIELD_HEAP_NEW, @@ -233,6 +235,13 @@ public class StorageNodeConfigurationEditor extends EnhancedVLayout implements R saveButton.addClickHandler(new ClickHandler() { public void onClick(ClickEvent clickEvent) { if (form.validate()) { + if (!checkNewHeapLowerThanMaxHeap()) { + Map<String, String> errors = new HashMap<String, String>(2); + errors.put(FIELD_HEAP_MAX, "Should be lower than Heap New Size."); + errors.put(FIELD_HEAP_NEW, "Should be higher than Max Heap Size."); + form.setErrors(errors, true); + return; + } SC.ask( "Changing the storage node configuration requires restart of storage node. Do you want to continue?", new BooleanCallback() { @@ -263,6 +272,30 @@ public class StorageNodeConfigurationEditor extends EnhancedVLayout implements R return configuration; }
+ private boolean checkNewHeapLowerThanMaxHeap() { + // let's be paranoid + Object maxHeapObject = form.getField(FIELD_HEAP_MAX).getValue(); + Object newHeapObject = form.getField(FIELD_HEAP_NEW).getValue(); + + String maxHeapString = maxHeapObject != null ? maxHeapObject.toString().trim() : ""; + String newHeapString = newHeapObject != null ? newHeapObject.toString().trim() : ""; + + if (maxHeapString.isEmpty() || newHeapString.isEmpty()) { + return false; + } + + int maxHeap = Integer.parseInt(maxHeapString.substring(0, maxHeapString.length() - 2)); + int newHeap = Integer.parseInt(newHeapString.substring(0, newHeapString.length() - 2)); + + boolean isMaxHeapInMegs = maxHeapString.toLowerCase().indexOf("m") != -1; + boolean isNewHeapInMegs = newHeapString.toLowerCase().indexOf("m") != -1; + + maxHeap = isMaxHeapInMegs ? maxHeap : maxHeap * 1024; + newHeap = isNewHeapInMegs ? newHeap : newHeap * 1024; + + return newHeap < maxHeap; + } + private String getJVMMemoryString(String raw) { if (raw == null || raw.trim().isEmpty()) { throw new IllegalArgumentException("input string is null or empty");
commit 150dcce51cbbb601a09a57e655a9dc4f6ad43628 Author: Jirka Kremser jkremser@redhat.com Date: Tue Aug 13 12:57:44 2013 +0200
api checks: adding the return type of intentionally changed method for clirr to make it work
diff --git a/modules/core/domain/intentional-api-changes-since-4.8.0.xml b/modules/core/domain/intentional-api-changes-since-4.8.0.xml index b14d38e..6a22ea5 100644 --- a/modules/core/domain/intentional-api-changes-since-4.8.0.xml +++ b/modules/core/domain/intentional-api-changes-since-4.8.0.xml @@ -11,7 +11,7 @@ <difference> <className>org/rhq/core/domain/cloud/Server</className> <differenceType>7004</differenceType><!-- num argments changed --> - <method>clearStatus()</method> + <method>void clearStatus()</method> <justification> This class is not exposed remotely. The previous method signature was doing a blind clear of the status. The new method will clear only the specified status.
commit f0ffafaa95ca2c0685b2bb4ce93c2bbe11d58983 Author: Jirka Kremser jkremser@redhat.com Date: Mon Aug 12 21:40:05 2013 +0200
New component for storage node alerts (derived from AlertHistoryView); Predefined alert templates shoud have the description fields correctly filled.; calling the update configuration method in an async way.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java index 5538db5..cd0ec54 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java @@ -18,14 +18,22 @@ */ package org.rhq.enterprise.gui.coregui.client.admin.storage;
+import java.util.ArrayList; import java.util.EnumSet;
import com.google.gwt.user.client.Timer; import com.google.gwt.user.client.rpc.AsyncCallback; +import com.smartgwt.client.data.DataSourceField; +import com.smartgwt.client.types.GroupStartOpen; import com.smartgwt.client.widgets.Label; +import com.smartgwt.client.widgets.grid.CellFormatter; +import com.smartgwt.client.widgets.grid.ListGrid; +import com.smartgwt.client.widgets.grid.ListGridField; +import com.smartgwt.client.widgets.grid.ListGridRecord; import com.smartgwt.client.widgets.tab.events.TabSelectedEvent; import com.smartgwt.client.widgets.tab.events.TabSelectedHandler;
+import org.rhq.core.domain.criteria.AlertCriteria; import org.rhq.core.domain.criteria.ResourceGroupCriteria; import org.rhq.core.domain.resource.ResourceType; import org.rhq.core.domain.resource.group.composite.ResourceGroupComposite; @@ -34,17 +42,22 @@ import org.rhq.core.domain.util.collection.ArrayUtils; import org.rhq.enterprise.gui.coregui.client.BookmarkableView; import org.rhq.enterprise.gui.coregui.client.CoreGUI; import org.rhq.enterprise.gui.coregui.client.IconEnum; +import org.rhq.enterprise.gui.coregui.client.LinkManager; import org.rhq.enterprise.gui.coregui.client.ViewPath; import org.rhq.enterprise.gui.coregui.client.admin.AdministrationView; +import org.rhq.enterprise.gui.coregui.client.alert.AlertDataSource; import org.rhq.enterprise.gui.coregui.client.alert.AlertHistoryView; import org.rhq.enterprise.gui.coregui.client.components.tab.NamedTab; import org.rhq.enterprise.gui.coregui.client.components.tab.NamedTabSet; +import org.rhq.enterprise.gui.coregui.client.components.table.Table; import org.rhq.enterprise.gui.coregui.client.components.view.HasViewName; import org.rhq.enterprise.gui.coregui.client.components.view.ViewName; import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; import org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.configuration.GroupResourceConfigurationEditView; +import org.rhq.enterprise.gui.coregui.client.inventory.resource.AncestryUtil; import org.rhq.enterprise.gui.coregui.client.inventory.resource.type.ResourceTypeRepository; import org.rhq.enterprise.gui.coregui.client.util.Log; +import org.rhq.enterprise.gui.coregui.client.util.StringUtility; import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedVLayout; import org.rhq.enterprise.gui.coregui.client.util.message.Message;
@@ -120,7 +133,8 @@ public class StorageNodeAdminView extends EnhancedVLayout implements/* HasViewNa tabset.getTabByName(tabInfo.name.getName()).setPane(new Label("in progress..")); } else if (tabInfo.equals(alertsTabInfo)) { if (resIds != null) { - tabset.getTabByName(tabInfo.name.getName()).setPane(new AlertHistoryView("storageNodesAlerts", resIds)); + tabset.getTabByName(tabInfo.name.getName()).setPane( + new StorageNodeAlertHistoryView("storageNodesAlerts", resIds)); } else { GWTServiceLookup.getStorageService().findResourcesWithAlertDefinitions(new AsyncCallback<Integer[]>() { @Override @@ -137,7 +151,7 @@ public class StorageNodeAdminView extends EnhancedVLayout implements/* HasViewNa } else { resIds = ArrayUtils.unwrapArray(result); tabset.getTabByName(tabInfo.name.getName()).setPane( - new AlertHistoryView("storageNodesAlerts", resIds)); + new StorageNodeAlertHistoryView("storageNodesAlerts", resIds)); tabset.selectTab(tabInfo.index); } } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAlertHistoryView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAlertHistoryView.java new file mode 100644 index 0000000..9c26b5c --- /dev/null +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAlertHistoryView.java @@ -0,0 +1,173 @@ +/* + * RHQ Management Platform + * Copyright (C) 2005-2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +package org.rhq.enterprise.gui.coregui.client.admin.storage; + +import java.util.ArrayList; + +import com.smartgwt.client.data.Record; +import com.smartgwt.client.types.GroupStartOpen; +import com.smartgwt.client.types.ImageStyle; +import com.smartgwt.client.types.SummaryFunctionType; +import com.smartgwt.client.widgets.Img; +import com.smartgwt.client.widgets.grid.CellFormatter; +import com.smartgwt.client.widgets.grid.ListGrid; +import com.smartgwt.client.widgets.grid.ListGridField; +import com.smartgwt.client.widgets.grid.ListGridRecord; +import com.smartgwt.client.widgets.grid.SummaryFunction; + +import org.rhq.core.domain.criteria.AlertCriteria; +import org.rhq.enterprise.gui.coregui.client.CoreGUI; +import org.rhq.enterprise.gui.coregui.client.ImageManager; +import org.rhq.enterprise.gui.coregui.client.LinkManager; +import org.rhq.enterprise.gui.coregui.client.alert.AlertDataSource; +import org.rhq.enterprise.gui.coregui.client.alert.AlertHistoryView; +import org.rhq.enterprise.gui.coregui.client.inventory.resource.AncestryUtil; +import org.rhq.enterprise.gui.coregui.client.util.StringUtility; + +/** + * @author Jirka Kremser + * + */ +public class StorageNodeAlertHistoryView extends AlertHistoryView { + + public StorageNodeAlertHistoryView(String tableTitle, int[] resourceIds) { + super(tableTitle, resourceIds); + } + + @Override + public AlertDataSource getDataSource() { + return new AlertDataSource() { + @Override + public ArrayList<ListGridField> getListGridFields() { + ArrayList<ListGridField> fields = super.getListGridFields(); + ArrayList<ListGridField> newFields = new ArrayList<ListGridField>(fields.size()); + for (ListGridField field : fields) { + if ("priority".equals(field.getName()) + || AncestryUtil.RESOURCE_NAME.equals(field.getName()) + || AncestryUtil.RESOURCE_ANCESTRY.equals(field.getName())) { + continue; + } if (AlertCriteria.SORT_FIELD_CTIME.equals(field.getName())) { + field.setWidth(240); + } else if ("conditionValue".equals(field.getName())) { + field.setWidth(90); + } else if ("acknowledgingSubject".equals(field.getName())) { + field.setSummaryFunction(new SummaryFunction() { + public Object getSummaryValue(Record[] records, ListGridField field) { + int count = 0; + for (Record record : records) { + if (record.getAttribute("acknowledgingSubject") != null) { + count++; + } + } + return count + " Unacked"; + } + }); + field.setCellFormatter(new CellFormatter() { + public String format(Object o, ListGridRecord listGridRecord, int i, int i1) { + if (listGridRecord.getAttribute("groupValue") != null) { + return (String) o; + } + String ackSubject = listGridRecord.getAttribute("acknowledgingSubject"); + if (ackSubject == null) { + return " "; + } else { + Img checkedImg = new Img(ImageManager.getAlertStatusCheckedIcon(), 80, 16); + checkedImg.setImageType(ImageStyle.CENTER); + return checkedImg.getInnerHTML(); + } + } + }); + + field.setShowGridSummary(false); + field.setShowGroupSummary(true); + field.setWidth(90); + newFields.add(1, field); + continue; + } else if ("name".equals(field.getName())) { + field.setShowGridSummary(true); + field.setShowGroupSummary(true); + field.setSummaryFunction(SummaryFunctionType.COUNT); + field.setCellFormatter(new CellFormatter() { + public String format(Object o, ListGridRecord listGridRecord, int i, int i1) { + if (listGridRecord.getAttribute("groupValue") != null) { + return (String) o; + } + Integer resourceId = listGridRecord.getAttributeAsInt(AncestryUtil.RESOURCE_ID); + Integer defId = listGridRecord.getAttributeAsInt("definitionId"); + String url = LinkManager.getSubsystemAlertDefinitionLink(resourceId, defId); + return LinkManager.getHref(url, o.toString()); + } + }); + } + newFields.add(field); + } + ListGridField descriptionField = new ListGridField("description", MSG.common_title_description()); + descriptionField.setCanSortClientOnly(true); + newFields.add(descriptionField); + return newFields; + } + }; + } + + @Override + protected void configureListGrid(ListGrid grid) { + ListGrid listGrid = super.getListGrid(); + listGrid.setGroupStartOpen(GroupStartOpen.ALL); + listGrid.setShowGroupSummary(true); + listGrid.setShowGroupSummaryInHeader(true); + + listGrid.setGroupByField("name"); + } + + @Override + protected CellFormatter getDetailsLinkColumnCellFormatter() { + return new CellFormatter() { + public String format(Object value, ListGridRecord record, int i, int i1) { + if (value == null) { + return ""; + } + String detailsUrl = getDetailUrlFromRecord(record); + String formattedValue = StringUtility.escapeHtml(value.toString()); + return LinkManager.getHref(detailsUrl, formattedValue); + } + }; + } + + @Override + public void showDetails(ListGridRecord record) { + CoreGUI.goToView(getDetailUrlFromRecord(record)); + } + + private String getDetailUrlFromRecord(ListGridRecord record) { + if (record == null) { + throw new IllegalArgumentException("'record' parameter is null."); + } + Integer recordId = getId(record); + Integer resourceId = record.getAttributeAsInt(AncestryUtil.RESOURCE_ID); + if (recordId != null && recordId.intValue() > 0 && resourceId != null && resourceId > 0) { + return "#Resource/" + resourceId + "/Alerts/History/" + convertIDToCurrentViewPath(recordId); + } else { + String msg = MSG.view_tableSection_error_badId(this.getClass().toString(), (recordId == null) ? "null" + : recordId.toString()); + CoreGUI.getErrorHandler().handleError(msg); + throw new IllegalArgumentException(msg); + } + } + +} diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeConfigurationEditor.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeConfigurationEditor.java index 3c4923b..f1c0003 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeConfigurationEditor.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeConfigurationEditor.java @@ -74,15 +74,12 @@ public class StorageNodeConfigurationEditor extends EnhancedVLayout implements R
private void save() { updateConfiguration(); - GWTServiceLookup.getStorageService().updateConfiguration(configuration, new AsyncCallback<Boolean>() { - public void onSuccess(Boolean result) { - if (result) { - Message msg = new Message("Storage node settings were successfully updated.", Message.Severity.Info); - CoreGUI.getMessageCenter().notify(msg); - } else { - onFailure(new Exception("Operation failed.")); - } + GWTServiceLookup.getStorageService().updateConfiguration(configuration, new AsyncCallback<Void>() { + public void onSuccess(Void result) { + Message msg = new Message("Storage node settings were successfully updated.", Message.Severity.Info); + CoreGUI.getMessageCenter().notify(msg); } + public void onFailure(Throwable caught) { CoreGUI.getErrorHandler().handleError("Unable to update the storage node settings.", caught); } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/StorageGWTService.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/StorageGWTService.java index 6e6df57..957bf34 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/StorageGWTService.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/StorageGWTService.java @@ -84,5 +84,5 @@ public interface StorageGWTService extends RemoteService {
StorageNodeConfigurationComposite retrieveConfiguration(StorageNode storageNode) throws RuntimeException;
- boolean updateConfiguration(StorageNodeConfigurationComposite storageNodeConfiguration) throws RuntimeException; + void updateConfiguration(StorageNodeConfigurationComposite storageNodeConfiguration) throws RuntimeException; } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/StorageGWTServiceImpl.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/StorageGWTServiceImpl.java index 4a01427..f7f7442 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/StorageGWTServiceImpl.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/StorageGWTServiceImpl.java @@ -163,9 +163,9 @@ public class StorageGWTServiceImpl extends AbstractGWTServiceImpl implements Sto }
@Override - public boolean updateConfiguration(StorageNodeConfigurationComposite storageNodeConfiguration) throws RuntimeException { + public void updateConfiguration(StorageNodeConfigurationComposite storageNodeConfiguration) throws RuntimeException { try { - return storageNodeManager.updateConfiguration(getSessionSubject(), storageNodeConfiguration); + storageNodeManager.updateConfigurationAsync(getSessionSubject(), storageNodeConfiguration); } catch (Throwable t) { throw getExceptionToThrowToClient(t); } diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index 053b57e..9e41692 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -36,6 +36,11 @@ import java.util.Map; import java.util.Queue; import java.util.Set;
+import org.rhq.enterprise.server.storage.StorageClusterSettings; +import org.rhq.enterprise.server.storage.StorageClusterSettingsManagerBean; +import org.rhq.enterprise.server.storage.StorageNodeOperationsHandlerLocal; + +import javax.ejb.Asynchronous; import javax.ejb.EJB; import javax.ejb.Stateless; import javax.ejb.TransactionAttribute; @@ -83,9 +88,6 @@ import org.rhq.enterprise.server.resource.ResourceManagerLocal; import org.rhq.enterprise.server.resource.ResourceTypeManagerLocal; import org.rhq.enterprise.server.rest.reporting.MeasurementConverter; import org.rhq.enterprise.server.scheduler.SchedulerLocal; -import org.rhq.enterprise.server.storage.StorageClusterSettings; -import org.rhq.enterprise.server.storage.StorageClusterSettingsManagerBean; -import org.rhq.enterprise.server.storage.StorageNodeOperationsHandlerLocal; import org.rhq.enterprise.server.util.CriteriaQueryGenerator; import org.rhq.enterprise.server.util.CriteriaQueryRunner; import org.rhq.enterprise.server.util.LookupUtil; @@ -624,6 +626,12 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
return configuration; } + + @Override + @Asynchronous + public void updateConfigurationAsync(Subject subject, StorageNodeConfigurationComposite storageNodeConfiguration) { + updateConfiguration(subject, storageNodeConfiguration); + }
@Override public boolean updateConfiguration(Subject subject, StorageNodeConfigurationComposite storageNodeConfiguration) { diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java index 0c1b0ab..b5ee7f0 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java @@ -65,6 +65,8 @@ public interface StorageNodeManagerLocal { StorageNodeConfigurationComposite retrieveConfiguration(Subject subject, StorageNode storageNode);
boolean updateConfiguration(Subject subject, StorageNodeConfigurationComposite storageNodeConfiguration); + + void updateConfigurationAsync(Subject subject, StorageNodeConfigurationComposite storageNodeConfiguration);
/** * Fetches the list of StorageNode entities based on provided criteria. diff --git a/modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java b/modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java index 840477c..f752d1d 100644 --- a/modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java +++ b/modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java @@ -258,6 +258,7 @@ public class AlertDefinitionServerPluginComponent implements ServerPluginCompone newTemplate.setResourceType(resourceType); newTemplate.setPriority(AlertPriority.MEDIUM); newTemplate.setConditionExpression(BooleanExpression.ANY); + newTemplate.setDescription(storageNodeHighHeapTemplate.getDescription()); newTemplate.setRecoveryId(0); newTemplate.setEnabled(true);
@@ -308,6 +309,7 @@ public class AlertDefinitionServerPluginComponent implements ServerPluginCompone newTemplate.setResourceType(resourceType); newTemplate.setPriority(AlertPriority.MEDIUM); newTemplate.setConditionExpression(BooleanExpression.ANY); + newTemplate.setDescription(storageNodeHighDiskUsageTemplate.getDescription()); newTemplate.setRecoveryId(0); newTemplate.setEnabled(true);
@@ -377,6 +379,7 @@ public class AlertDefinitionServerPluginComponent implements ServerPluginCompone newTemplate.setResourceType(resourceType); newTemplate.setPriority(AlertPriority.MEDIUM); newTemplate.setConditionExpression(BooleanExpression.ANY); + newTemplate.setDescription(storageNodeSnapshotFailureTemplate.getDescription()); newTemplate.setRecoveryId(0); newTemplate.setEnabled(true);
commit 5981e7a330d6f6460f3453a94de98bd44011c57e Author: Stefan Negrea snegrea@redhat.com Date: Mon Aug 12 12:17:17 2013 -0500
Slight change to the comment text.
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/Server.java b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/Server.java index 7fa82f7..671db6b 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/Server.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/Server.java @@ -88,9 +88,9 @@ import org.rhq.core.domain.resource.Agent; + " AND ( s.name <> :thisServerName OR :thisServerName IS NULL ) "), // @NamedQuery(name = Server.QUERY_UPDATE_STATUS_BY_NAME, query = "" // + " UPDATE Server s " // - + " SET s.status = 3 " //change this to the only value possible before adding MANUAL_MAINTENANCE_MODE - //this status should never be set to negative numbers since they are values allowed - //by the bitmask. + + " SET s.status = 3 " //Change this to the only value possible before adding MANUAL_MAINTENANCE_MODE. + //This status should never be set to negative since negative values are not + //allowed by the bitmask. + " WHERE s.status = 0 ") }) @SequenceGenerator(allocationSize = org.rhq.core.domain.util.Constants.ALLOCATION_SIZE, name = "RHQ_SERVER_ID_SEQ", sequenceName = "RHQ_SERVER_ID_SEQ") @Table(name = "RHQ_SERVER")
commit 4fea183cb635454d922de5d0af9fc1b05003ab5b Author: Stefan Negrea snegrea@redhat.com Date: Mon Aug 12 12:09:47 2013 -0500
One more attempt at the justification for the Server interface change.
diff --git a/modules/core/domain/intentional-api-changes-since-4.8.0.xml b/modules/core/domain/intentional-api-changes-since-4.8.0.xml index 6630d2c..b14d38e 100644 --- a/modules/core/domain/intentional-api-changes-since-4.8.0.xml +++ b/modules/core/domain/intentional-api-changes-since-4.8.0.xml @@ -11,7 +11,7 @@ <difference> <className>org/rhq/core/domain/cloud/Server</className> <differenceType>7004</differenceType><!-- num argments changed --> - <method>clearStatus(*)</method> + <method>clearStatus()</method> <justification> This class is not exposed remotely. The previous method signature was doing a blind clear of the status. The new method will clear only the specified status.
commit 75b1eb6f6296498bec59cc0245de9ee509130f90 Author: Stefan Negrea snegrea@redhat.com Date: Mon Aug 12 12:09:10 2013 -0500
Update code related to initial value of status field. Also, reserved the first five bits of the bitmask for debug purposes.
Because the field was initialized to -1 it was wrongly computing flags during startup resulting in a false manual maintenance mode.
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/Server.java b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/Server.java index caeabcf..7fa82f7 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/Server.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/Server.java @@ -49,7 +49,7 @@ import org.rhq.core.domain.resource.Agent;
/** * An RHQ server node in the cluster - * + * * @author Joseph Marques */ @Entity(name = "Server") @@ -88,7 +88,9 @@ import org.rhq.core.domain.resource.Agent; + " AND ( s.name <> :thisServerName OR :thisServerName IS NULL ) "), // @NamedQuery(name = Server.QUERY_UPDATE_STATUS_BY_NAME, query = "" // + " UPDATE Server s " // - + " SET s.status = -1 " // negative numbers so that bitmask strat does not conflict with this one + + " SET s.status = 3 " //change this to the only value possible before adding MANUAL_MAINTENANCE_MODE + //this status should never be set to negative numbers since they are values allowed + //by the bitmask. + " WHERE s.status = 0 ") }) @SequenceGenerator(allocationSize = org.rhq.core.domain.util.Constants.ALLOCATION_SIZE, name = "RHQ_SERVER_ID_SEQ", sequenceName = "RHQ_SERVER_ID_SEQ") @Table(name = "RHQ_SERVER") @@ -276,7 +278,7 @@ public class Server implements Serializable { /** * Returns 0 if this server is current. Otherwise, returns a mask of {@link Server.Status} * elements corresponding to the updates that have occurred that are related to this server. - * + * * @return 0 if this server is current. Otherwise, returns a mask of {@link Server.Status} * elements corresponding to the updates that have occurred that are related to this server. */ @@ -320,11 +322,17 @@ public class Server implements Serializable { return Status.getMessages(status); }
+ //Please read BZ 535484 for initial design: https://bugzilla.redhat.com/show_bug.cgi?id=535484 + //Prior to MANUAL_MAINTENANCE_MODE only used for debug purposes, design now changed to + //persist statuses between server restarts in production code public enum Status {
+ //Debug only flags (first five bits are reserved for debug flags) RESOURCE_HIERARCHY_UPDATED(1, "The resource hierarchy has been updated"), // ALERT_DEFINITION(2, "Some alert definition with a global condition category was updated"), - MANUAL_MAINTENANCE_MODE(4,"Manual Maintenance mode setup by the user either via UI or properties file."); + + //Production flags + MANUAL_MAINTENANCE_MODE(32, "Manual Maintenance mode setup by the user either via UI or properties file.");
public final int mask; public final String message;
commit c0f35d4258501941288d602ed3b160eb3892b7b7 Author: Jay Shaughnessy jshaughn@redhat.com Date: Mon Aug 12 09:53:23 2013 -0400
Fix dbupgrade issue in storagenode address task. Needs to properly support non-postgres dbs.
diff --git a/modules/core/dbutils/src/main/java/org/rhq/core/db/upgrade/StorageNodeAddressUpgradeTask.java b/modules/core/dbutils/src/main/java/org/rhq/core/db/upgrade/StorageNodeAddressUpgradeTask.java index 9fc90ed..0bd8277 100644 --- a/modules/core/dbutils/src/main/java/org/rhq/core/db/upgrade/StorageNodeAddressUpgradeTask.java +++ b/modules/core/dbutils/src/main/java/org/rhq/core/db/upgrade/StorageNodeAddressUpgradeTask.java @@ -6,12 +6,12 @@ import java.sql.Connection; import java.sql.SQLException; import java.util.List;
+import mazz.i18n.Logger; + import org.rhq.core.db.DatabaseType; import org.rhq.core.db.DbUtilsI18NFactory; import org.rhq.core.db.DbUtilsI18NResourceKeys;
-import mazz.i18n.Logger; - /** * Updates the address field of storage node entities to ensure we are storing IP addresses and not hostnames. We want * to store the IP address since that is what Cassandra uses for inter-node communication. JMX operations that return @@ -34,7 +34,7 @@ public class StorageNodeAddressUpgradeTask implements DatabaseUpgradeTask { String storageNodeAddress = null; try { for (Object[] row : results) { - id = (Integer) row[0]; + id = databaseType.getInteger(row[0]); storageNodeAddress = (String) row[1]; InetAddress address = InetAddress.getByName(storageNodeAddress); if (!storageNodeAddress.equals(address.getHostAddress())) {
commit 8bf5b65d945774aedfdceaf01320239a289d9047 Author: Jay Shaughnessy jshaughn@redhat.com Date: Fri Aug 9 15:46:30 2013 -0400
update sample bundles with new required 'compliance' attribute
diff --git a/etc/samples/provisioning/sample-cli/resources/sample-bundle-v1.zip b/etc/samples/provisioning/sample-cli/resources/sample-bundle-v1.zip index c044d9a..b574072 100644 Binary files a/etc/samples/provisioning/sample-cli/resources/sample-bundle-v1.zip and b/etc/samples/provisioning/sample-cli/resources/sample-bundle-v1.zip differ diff --git a/etc/samples/provisioning/sample-cli/resources/sample-bundle-v2.zip b/etc/samples/provisioning/sample-cli/resources/sample-bundle-v2.zip index f8b3392..ad42eb7 100644 Binary files a/etc/samples/provisioning/sample-cli/resources/sample-bundle-v2.zip and b/etc/samples/provisioning/sample-cli/resources/sample-bundle-v2.zip differ
commit 21b30fe6630fb0ffc3d8e6b40c5de1945322692b Author: Stefan Negrea snegrea@redhat.com Date: Fri Aug 9 18:18:25 2013 -0500
Thanks Eclipse for clipping an import on save :(
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index 2f91dba..053b57e 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -85,6 +85,7 @@ import org.rhq.enterprise.server.rest.reporting.MeasurementConverter; import org.rhq.enterprise.server.scheduler.SchedulerLocal; import org.rhq.enterprise.server.storage.StorageClusterSettings; import org.rhq.enterprise.server.storage.StorageClusterSettingsManagerBean; +import org.rhq.enterprise.server.storage.StorageNodeOperationsHandlerLocal; import org.rhq.enterprise.server.util.CriteriaQueryGenerator; import org.rhq.enterprise.server.util.CriteriaQueryRunner; import org.rhq.enterprise.server.util.LookupUtil;
commit 754a390442418daa47ab4e61505ef7e2dae42574 Author: Stefan Negrea snegrea@redhat.com Date: Fri Aug 9 16:47:12 2013 -0500
Simplify the configuration update method by allowing the plugin to restart the storage node if necessary. This is possible now because the JMX port is no longer required for checking the cluster availability.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index d21be2f..2f91dba 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -85,7 +85,6 @@ import org.rhq.enterprise.server.rest.reporting.MeasurementConverter; import org.rhq.enterprise.server.scheduler.SchedulerLocal; import org.rhq.enterprise.server.storage.StorageClusterSettings; import org.rhq.enterprise.server.storage.StorageClusterSettingsManagerBean; -import org.rhq.enterprise.server.storage.StorageNodeOperationsHandlerLocal; import org.rhq.enterprise.server.util.CriteriaQueryGenerator; import org.rhq.enterprise.server.util.CriteriaQueryRunner; import org.rhq.enterprise.server.util.LookupUtil; @@ -643,7 +642,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN if (storageNodeConfiguration.getThreadStackSize() != null) { parameters.setSimpleValue("threadStackSize", storageNodeConfiguration.getThreadStackSize() + ""); } - parameters.setSimpleValue("restartIfRequired", "false"); + parameters.setSimpleValue("restartIfRequired", "true");
Resource storageNodeResource = storageNode.getResource();
@@ -656,13 +655,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN storageNode.setJmxPort(storageNodeConfiguration.getJmxPort()); entityManager.merge(storageNode);
- //3. Restart the storage node - result = runOperationAndWaitForResult(subject, storageNodeResource, RESTART_OPERATION, - new Configuration()); - - //4. Update the plugin configuration to talk with the new server - //Up to this point communication with the storage node should not have been affected by the intermediate - //changes + //3. Update the plugin configuration to talk with the new server Configuration storageNodePluginConfig = configurationManager.getPluginConfiguration(subject, storageNodeResource.getId());
commit 54cb3bda6f905bbfef2e04ede8019d99fda06922 Author: Stefan Negrea snegrea@redhat.com Date: Fri Aug 9 16:39:30 2013 -0500
[BZ 995424] + [BZ 991598] + Code Review changes
Updates to the server operation mode design based on code review and recent BZs. The most important change is initalizing the storage cluster connection ahead of initializing the server. This will elliminate place the server in maintenance mode because the storage cluster connection is not yet initialized.
diff --git a/modules/core/domain/intentional-api-changes-since-4.8.0.xml b/modules/core/domain/intentional-api-changes-since-4.8.0.xml index a8a77b4..6630d2c 100644 --- a/modules/core/domain/intentional-api-changes-since-4.8.0.xml +++ b/modules/core/domain/intentional-api-changes-since-4.8.0.xml @@ -8,4 +8,13 @@ Changing StorageNode.QUERY_FIND_BY_ADDRESS. The constants for native queries shouldn't be part of the public API. </justification> </difference> + <difference> + <className>org/rhq/core/domain/cloud/Server</className> + <differenceType>7004</differenceType><!-- num argments changed --> + <method>clearStatus(*)</method> + <justification> + This class is not exposed remotely. The previous method signature was doing a blind clear of the status. + The new method will clear only the specified status. + </justification> + </difference> </differences> diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/Server.java b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/Server.java index c43a234..caeabcf 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/Server.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/Server.java @@ -241,19 +241,19 @@ public class Server implements Serializable { NORMAL("This server is a Normal Member of the HA server cloud", true);
public final String message; - private final boolean configurable; + private final boolean readOnly;
private OperationMode(String message, boolean configurable) { this.message = message; - this.configurable = configurable; + this.readOnly = configurable; }
public String getMessage() { return message; }
- public boolean isConfigurable() { - return configurable; + public boolean isReadOnly() { + return readOnly; } }
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/TopologyManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/TopologyManagerBean.java index 4db1697..c4225de 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/TopologyManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/TopologyManagerBean.java @@ -241,7 +241,7 @@ public class TopologyManagerBean implements TopologyManagerLocal { if (mode == null) { throw new IllegalArgumentException("Mode cannot be null."); } - if (!mode.isConfigurable()) { + if (!mode.isReadOnly()) { throw new IllegalArgumentException("Cannot directly set a mode that is not configurable. Mode " + mode.name() + " is not configurable."); } diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/instance/ServerManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/instance/ServerManagerBean.java index 44bd557..ddbd0ba 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/instance/ServerManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/instance/ServerManagerBean.java @@ -270,7 +270,7 @@ public class ServerManagerBean implements ServerManagerLocal { lastEstablishedServerMode = serverMode; server.setOperationMode(lastEstablishedServerMode); server.setMtime(System.currentTimeMillis()); - } catch (Exception e) { + } catch (Throwable e) { log.error("Unable to change HA Server Mode from " + lastEstablishedServerMode + " to " + serverMode + ": " + e); } diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/core/StartupBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/core/StartupBean.java index b0187e5..d99bb7b 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/core/StartupBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/core/StartupBean.java @@ -185,6 +185,10 @@ public class StartupBean implements StartupLocal { log.error("Could not load ResourceFacets cache.", t); }
+ //Server depends on the storage cluster availability. Since the storage client init just + //establishes connectivity with the storage cluster, then run it before the server init. + initStorageClient(); + // Before starting determine the operating mode of this server and // take any necessary initialization action. Must happen before comm startup since listeners // may be added. @@ -203,7 +207,6 @@ public class StartupBean implements StartupLocal { startPluginDeployer(); // make sure this is initialized before starting the server plugin container startServerPluginContainer(); // before comm in case an agent wants to talk to it upgradeRhqUserSecurityDomainIfNeeded(); - initStorageClient(); startServerCommunicationServices(); startScheduler(); scheduleJobs(); @@ -436,7 +439,6 @@ public class StartupBean implements StartupLocal { */ private void initStorageClient() { storageClientManager.init(); - serverManager.establishCurrentServerMode(); }
/** @@ -659,7 +661,7 @@ public class StartupBean implements StartupLocal { log.error("Cannot create storage cluster read repair job", e); } } - + /** * This seeds the agent clients cache with clients for all known agents. These clients will be started so they can * immediately begin to send any persisted guaranteed messages that might already exist. This method must be called diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/StorageSession.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/StorageSession.java index c8f44d9..3f7af3f 100644 --- a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/StorageSession.java +++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/StorageSession.java @@ -29,14 +29,6 @@ public class StorageSession implements Host.StateListener {
public void addStorageStateListener(StorageStateListener listener) { listeners.add(listener); - - for (Host host : wrappedSession.getCluster().getMetadata().getAllHosts()) { - if(host.getMonitor().isUp()){ - listener.onStorageNodeUp(host.getAddress()); - } else { - listener.onStorageNodeUp(host.getAddress()); - } - } }
public ResultSet execute(String query) {
commit 31422e907aac429ffcdc966801d7cdecc4bca882 Author: Mike Thompson mithomps@redhat.com Date: Fri Aug 9 14:00:58 2013 -0700
Fix graph labels for chrome & IE.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityOverUnderGraphType.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityOverUnderGraphType.java index 3e573dd..64c355d 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityOverUnderGraphType.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityOverUnderGraphType.java @@ -260,8 +260,6 @@ public class AvailabilityOverUnderGraphType implements AvailabilityGraphType { .attr("fill", "#b0b0b0") .attr("stroke-width", "0.5") .attr("transform", "translate(0," + height + ")") - .attr("letter-spacing", "3") - .style("text-anchor", "end") .call(xAxis);
svg.append("text") @@ -296,6 +294,8 @@ public class AvailabilityOverUnderGraphType implements AvailabilityGraphType {
}
+ + function createHovers() { $wnd.jQuery('svg rect.availBars').tipsy({ gravity: 's', diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/StackedBarMetricGraphImpl.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/StackedBarMetricGraphImpl.java index 67552d3..0c95f44 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/StackedBarMetricGraphImpl.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/StackedBarMetricGraphImpl.java @@ -469,8 +469,6 @@ public class StackedBarMetricGraphImpl extends AbstractMetricGraph { svg.append("g") .attr("class", "x axis") .attr("transform", "translate(0," + height + ")") - .attr("letter-spacing", "3") - .style("text-anchor", "end") .call(xAxis);
@@ -481,7 +479,6 @@ public class StackedBarMetricGraphImpl extends AbstractMetricGraph { .append("text") .attr("transform", "rotate(-90),translate( -60,0)") .attr("y", -30) - .attr("letter-spacing", "3") .style("text-anchor", "end") .text(chartContext.yAxisUnits === "NONE" ? "" : chartContext.yAxisUnits);
commit 125bc23aba00ebc793f51d146268c9a90f43cda8 Author: John Sanda jsanda@redhat.com Date: Tue Aug 13 23:56:39 2013 -0400
adding test impl of StorageClusterSettingsManagerLocal
diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/storage/FakeStorageClusterSettingsManagerBean.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/storage/FakeStorageClusterSettingsManagerBean.java new file mode 100644 index 0000000..3ac61e4 --- /dev/null +++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/storage/FakeStorageClusterSettingsManagerBean.java @@ -0,0 +1,26 @@ +package org.rhq.enterprise.server.storage; + +import javax.ejb.Stateless; + +import org.rhq.core.domain.auth.Subject; + +/** + * @author John Sanda + */ +@Stateless +public class FakeStorageClusterSettingsManagerBean implements StorageClusterSettingsManagerLocal { + + @Override + public StorageClusterSettings getClusterSettings(Subject subject) { + StorageClusterSettings settings = new StorageClusterSettings(); + settings.setGossipPort(7100); + settings.setCqlPort(9042); + + return settings; + } + + @Override + public void setClusterSettings(Subject subject, StorageClusterSettings clusterSettings) { + //To change body of implemented methods use File | Settings | File Templates. + } +}
commit 2da51f406a79a5ff6ba1251755d610364df1f119 Author: John Sanda jsanda@redhat.com Date: Tue Aug 13 23:06:17 2013 -0400
attempting to fix server itest failures
I am not sure why but DiscoveryBossBeanTest has been failing due to calls to StorageClusterSettingsManagerBean made by StorageNodeManagerBean. I am deploying a test impl for StorageClusterSettingsManager to see if it resovles the issues. There is no need to deploy the production EJB. It is just a thin wrapper around SystemManagerBean, and it can/should be tested separately.
diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/AbstractEJB3Test.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/AbstractEJB3Test.java index 44a4646..0f1c6fb 100644 --- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/AbstractEJB3Test.java +++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/AbstractEJB3Test.java @@ -88,6 +88,7 @@ import org.rhq.enterprise.server.plugin.pc.ServerPluginService; import org.rhq.enterprise.server.plugin.pc.ServerPluginServiceMBean; import org.rhq.enterprise.server.scheduler.SchedulerService; import org.rhq.enterprise.server.scheduler.SchedulerServiceMBean; +import org.rhq.enterprise.server.storage.FakeStorageClusterSettingsManagerBean; import org.rhq.enterprise.server.storage.StorageClientManagerBean; import org.rhq.enterprise.server.util.LookupUtil; import org.rhq.test.AssertUtils; @@ -352,6 +353,8 @@ public abstract class AbstractEJB3Test extends Arquillian { .create("/rhq-server.jar/org/rhq/enterprise/server/core/StartupBean$1.class")); testEar.delete(ArchivePaths .create("/rhq-server.jar/org/rhq/enterprise/server/core/ShutdownListener.class")); + testEar.delete(ArchivePaths + .create("/rhq-server.jar/org/rhq/enterprise/server/storage/StorageClusterSettingsManagerBean.class"));
//replace the above startup beans with stripped down versions testEar.add(new ClassAsset(StrippedDownStartupBean.class), ArchivePaths @@ -359,6 +362,8 @@ public abstract class AbstractEJB3Test extends Arquillian { testEar.add(new ClassAsset(StrippedDownStartupBeanPreparation.class), ArchivePaths .create("/rhq-server.jar/org/rhq/enterprise/server/test/" + "StrippedDownStartupBeanPreparation.class")); + testEar.add(new ClassAsset(FakeStorageClusterSettingsManagerBean.class), ArchivePaths + .create("/rhq-server.jar/org/rhq/enterprise/server/storage/FakeStorageClusterSettingsManagerBean.class")); testEar.addAsManifestResource(new ByteArrayAsset("<beans/>".getBytes()), ArchivePaths.create("beans.xml"));
// add the test classes to the deployment diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index 9e41692..e925de2 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -36,10 +36,6 @@ import java.util.Map; import java.util.Queue; import java.util.Set;
-import org.rhq.enterprise.server.storage.StorageClusterSettings; -import org.rhq.enterprise.server.storage.StorageClusterSettingsManagerBean; -import org.rhq.enterprise.server.storage.StorageNodeOperationsHandlerLocal; - import javax.ejb.Asynchronous; import javax.ejb.EJB; import javax.ejb.Stateless; @@ -88,6 +84,9 @@ import org.rhq.enterprise.server.resource.ResourceManagerLocal; import org.rhq.enterprise.server.resource.ResourceTypeManagerLocal; import org.rhq.enterprise.server.rest.reporting.MeasurementConverter; import org.rhq.enterprise.server.scheduler.SchedulerLocal; +import org.rhq.enterprise.server.storage.StorageClusterSettings; +import org.rhq.enterprise.server.storage.StorageClusterSettingsManagerLocal; +import org.rhq.enterprise.server.storage.StorageNodeOperationsHandlerLocal; import org.rhq.enterprise.server.util.CriteriaQueryGenerator; import org.rhq.enterprise.server.util.CriteriaQueryRunner; import org.rhq.enterprise.server.util.LookupUtil; @@ -154,7 +153,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN private ResourceManagerLocal resourceManager;
@EJB - private StorageClusterSettingsManagerBean storageClusterSettingsManager; + private StorageClusterSettingsManagerLocal storageClusterSettingsManager;
@EJB private StorageNodeOperationsHandlerLocal storageNodeOperationsHandler; diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterSettingsManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterSettingsManagerBean.java index 3e10a2f..9418bca 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterSettingsManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterSettingsManagerBean.java @@ -3,7 +3,7 @@ package org.rhq.enterprise.server.storage; import java.util.Map;
import javax.ejb.EJB; -import javax.ejb.Singleton; +import javax.ejb.Stateless;
import org.rhq.core.domain.auth.Subject; import org.rhq.core.domain.common.composite.SystemSetting; @@ -13,12 +13,13 @@ import org.rhq.enterprise.server.system.SystemManagerLocal; /** * @author John Sanda */ -@Singleton -public class StorageClusterSettingsManagerBean { +@Stateless +public class StorageClusterSettingsManagerBean implements StorageClusterSettingsManagerLocal {
@EJB private SystemManagerLocal systemManager;
+ @Override public StorageClusterSettings getClusterSettings(Subject subject) { SystemSettings settings = systemManager.getSystemSettings(subject); Map<String, String> settingsMap = settings.toMap(); @@ -41,6 +42,7 @@ public class StorageClusterSettingsManagerBean { return clusterSettings; }
+ @Override public void setClusterSettings(Subject subject, StorageClusterSettings clusterSettings) { SystemSettings settings = new SystemSettings(); settings.put(SystemSetting.STORAGE_CQL_PORT, Integer.toString(clusterSettings.getCqlPort())); diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterSettingsManagerLocal.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterSettingsManagerLocal.java new file mode 100644 index 0000000..cb63bc4 --- /dev/null +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterSettingsManagerLocal.java @@ -0,0 +1,15 @@ +package org.rhq.enterprise.server.storage; + +import javax.ejb.Local; + +import org.rhq.core.domain.auth.Subject; + +/** + * @author John Sanda + */ +@Local +public interface StorageClusterSettingsManagerLocal { + StorageClusterSettings getClusterSettings(Subject subject); + + void setClusterSettings(Subject subject, StorageClusterSettings clusterSettings); +} diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java index 3e0eed8..1bf3cec 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java @@ -63,7 +63,7 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa private OperationManagerLocal operationManager;
@EJB - private StorageClusterSettingsManagerBean storageClusterSettingsManager; + private StorageClusterSettingsManagerLocal storageClusterSettingsManager;
@EJB private StorageClientManagerBean storageClientManager;
commit 8724984f76bf0f7f4738306045de36feba4ea2f3 Merge: 3d34ad8 fd55bc6 Author: Mike Thompson mithomps@redhat.com Date: Tue Aug 13 15:40:11 2013 -0700
Merge branch 'mtho11/ie-chart-tooltips'
commit fd55bc69e89433a25d37752e6cfb946fc6a4d35d Author: Mike Thompson mithomps@redhat.com Date: Tue Aug 13 15:32:30 2013 -0700
[BZ 949750] - Chart hovers fail to render in IE. Availability Chart hover fix.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityOverUnderGraphType.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityOverUnderGraphType.java index 64c355d..59d4c75 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityOverUnderGraphType.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityOverUnderGraphType.java @@ -250,6 +250,44 @@ public class AvailabilityOverUnderGraphType implements AvailabilityGraphType { .attr("opacity", ".75") .attr("fill", function (d) { return calcBarFill(d); + }).on("mouseover",function (d) { + + var timeFormatter = $wnd.d3.time.format(availChartContext.chartHoverTimeFormat), + dateFormatter = $wnd.d3.time.format(availChartContext.chartHoverDateFormat), + availStart = new Date(+d.availStart), + xPosition = parseFloat($wnd.d3.select(this).attr("x")), + availTooltipDiv = $wnd.d3.select("#availTooltip") + .style("left", xPosition + "px") + .style("top", "0px"); + + availTooltipDiv.select("#availTooltipLabel") + .text(availChartContext.hoverBarAvailabilityLabel); + + availTooltipDiv + .select("#availTooltipType") + .text(d.availTypeMessage); + + availTooltipDiv + .select("#availTooltipStartDate") + .text(dateFormatter(availStart)); + + availTooltipDiv + .select("#availTooltipStartTime") + .text(timeFormatter(availStart)); + + availTooltipDiv + .select("#availTooltipDurationLabel") + .text(availChartContext.hoverBarLabel); + + availTooltipDiv + .select("#availTooltipDuration") + .text(d.availDuration); + + //Show the tooltip + $wnd.d3.select("#availTooltip").classed("hidden", false); + }).on("mouseout", function () { + //Hide the tooltip + $wnd.d3.select("#availTooltip").classed("hidden", true); });
xAxis.tickFormat($wnd.rhqCommon.getD3CustomTimeFormat(availChartContext.chartXaxisTimeFormatHours, availChartContext.chartXaxisTimeFormatHoursMinutes)); @@ -295,42 +333,11 @@ public class AvailabilityOverUnderGraphType implements AvailabilityGraphType { }
- - function createHovers() { - $wnd.jQuery('svg rect.availBars').tipsy({ - gravity: 's', - html: true, - trigger: 'hover', - title: function () { - var d = this.__data__; - return formatHovers(d); - }, - show: function (e, el) { - el.css({ 'z-index': '990000'}) - } - }); - } - - - function formatHovers(d) { - var timeFormatter = $wnd.d3.time.format(availChartContext.chartHoverTimeFormat), - dateFormatter = $wnd.d3.time.format(availChartContext.chartHoverDateFormat), - availStart = new Date(+d.availStart); - - return '<div class="chartHoverEnclosingDiv">' + - '<div class="chartHoverAlignLeft"><span >' + availChartContext.hoverBarAvailabilityLabel + ': </span><span style="width:50px;">' + d.availTypeMessage + '</span></div>' + - '<div class="chartHoverAlignLeft"><span>' + dateFormatter(availStart) + ' ' + timeFormatter(availStart) + '</span></div>' + - '<div class="chartHoverAlignLeft"><span >' + availChartContext.hoverBarLabel + ': </span><span style="width:50px;">' + d.availDuration + '</span></div>' + - '</div>'; - - } - return { // Public API draw: function (availChartContext) { "use strict"; drawBars(availChartContext); - createHovers(); } }; // end public closure
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/avail/AvailabilityD3GraphView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/avail/AvailabilityD3GraphView.java index 3fc0192..6e3b590 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/avail/AvailabilityD3GraphView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/avail/AvailabilityD3GraphView.java @@ -83,7 +83,22 @@ public class AvailabilityD3GraphView<T extends AvailabilityGraphType> extends En divAndSvgDefs.append("<div id="availChart-" + availabilityGraphType.getChartId() + "" ><svg xmlns="http://www.w3.org/2000/svg%5C" version="1.1" style="height:65px;">"); divAndSvgDefs.append(getSvgDefs()); - divAndSvgDefs.append("</svg></div>"); + divAndSvgDefs.append("</svg>"); + divAndSvgDefs.append("<div id="availTooltip" class="hidden" >"); + divAndSvgDefs.append("<div>"); + divAndSvgDefs.append("<span id="availTooltipLabel" class="availTooltipLabel"></span>"); + divAndSvgDefs.append("<span> : </span>"); + divAndSvgDefs.append("<span id="availTooltipType" style="width:40px;font-weight:bold;"></span>"); + divAndSvgDefs.append("<span> - </span>"); + divAndSvgDefs.append("<span id="availTooltipDuration" ></span>"); + divAndSvgDefs.append("<div/>"); + divAndSvgDefs.append("<div>"); + divAndSvgDefs.append("<span id="availTooltipStartDate" ></span>"); + divAndSvgDefs.append("<span> </span>"); + divAndSvgDefs.append("<span id="availTooltipStartTime" ></span>"); + divAndSvgDefs.append("</div>"); + divAndSvgDefs.append("</div>"); // end availTooltipDiv + divAndSvgDefs.append("</div>"); HTMLFlow graph = new HTMLFlow(divAndSvgDefs.toString()); graph.setWidth100(); graph.setHeight(65); diff --git a/modules/enterprise/gui/coregui/src/main/webapp/css/charts.css b/modules/enterprise/gui/coregui/src/main/webapp/css/charts.css index 2dbd79b..6a004b4 100644 --- a/modules/enterprise/gui/coregui/src/main/webapp/css/charts.css +++ b/modules/enterprise/gui/coregui/src/main/webapp/css/charts.css @@ -156,3 +156,39 @@ rect.low { font-weight: bold; color: #70c4e2; } + +#availTooltip { + z-index: 990000; + position: absolute; + width: 150px; + height: auto; + padding: 5px; + background-color: #000; + opacity: 0.55; + -webkit-border-radius: 5px; + -moz-border-radius: 5px; + border-radius: 5px; + -webkit-box-shadow: 4px 4px 5px rgba(0, 0, 0, 0.4); + -moz-box-shadow: 4px 4px 5px rgba(0, 0, 0, 0.4); + box-shadow: 4px 4px 5px rgba(0, 0, 0, 0.4); + pointer-events: none; +} + +#availTooltip.hidden { + display: none; +} + +#availTooltip div { + margin: 0; + font-family: Arial, Verdana, sans-serif; + font-size: 9px; + color: #d3d3d6; +} + +.availTooltipLabel { + width: 40px; + font-weight: bold; + color: #d3d3d6; +} + +
commit 3d34ad80b5ca4d1adb0ec5828753761043904dad Author: Jay Shaughnessy jshaughn@redhat.com Date: Tue Aug 13 15:01:50 2013 -0400
fix some issues, add some new api methods
diff --git a/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml b/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml index 40e826a..5182ef1 100644 --- a/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml +++ b/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml @@ -67,7 +67,7 @@ <difference> <className>org/rhq/enterprise/server/bundle/BundleManagerRemote</className> <differenceType>7012</differenceType> <!-- method added to an interface --> - <method>org.rhq.core.domain.bundle.BundleGroup createBundleGroup(org.rhq.core.domain.auth.Subject, java.lang.String, java.lang.String)</method> + <method>org.rhq.core.domain.bundle.BundleGroup createBundleGroup(org.rhq.core.domain.auth.Subject, org.rhq.core.domain.bundle.BundleGroup)</method> <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> </difference>
@@ -128,38 +128,45 @@ </difference>
<difference> - <className>org/rhq/enterprise/server/bundle/RoleManagerRemote</className> + <className>org/rhq/enterprise/server/authz/RoleManagerRemote</className> <differenceType>7012</differenceType> <!-- method added to an interface --> <method>void addBundleGroupsToRole(org.rhq.core.domain.auth.Subject, int, int[])</method> <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> </difference>
<difference> - <className>org/rhq/enterprise/server/bundle/RoleManagerRemote</className> + <className>org/rhq/enterprise/server/authz/RoleManagerRemote</className> <differenceType>7012</differenceType> <!-- method added to an interface --> <method>void addRolesToBundleGroup(org.rhq.core.domain.auth.Subject, int, int[])</method> <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> </difference>
<difference> - <className>org/rhq/enterprise/server/bundle/RoleManagerRemote</className> + <className>org/rhq/enterprise/server/authz/RoleManagerRemote</className> <differenceType>7012</differenceType> <!-- method added to an interface --> <method>void removeBundleGroupsFromRole(org.rhq.core.domain.auth.Subject, int, int[])</method> <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> </difference>
<difference> - <className>org/rhq/enterprise/server/bundle/RoleManagerRemote</className> + <className>org/rhq/enterprise/server/authz/RoleManagerRemote</className> <differenceType>7012</differenceType> <!-- method added to an interface --> <method>void removeRolesFromBundleGroup(org.rhq.core.domain.auth.Subject, int, int[])</method> <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> </difference>
<difference> - <className>org/rhq/enterprise/server/bundle/RoleManagerRemote</className> + <className>org/rhq/enterprise/server/authz/RoleManagerRemote</className> <differenceType>7012</differenceType> <!-- method added to an interface --> <method>void setAssignedBundleGroups(org.rhq.core.domain.auth.Subject, int, int[])</method> <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> </difference>
+ <difference> + <className>org/rhq/enterprise/server/bundle/BundleManagerRemote</className> + <differenceType>7012</differenceType> <!-- method added to an interface --> + <method>org.rhq.core.domain.bundle.BundleGroup updateBundleGroup(org.rhq.core.domain.auth.Subject, org.rhq.core.domain.bundle.BundleGroup)</method> + <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> + </difference> + </differences>
commit 2c874481d5b6f91ca916380633b11f1a45000510 Author: Simeon Pinder spinder@fulliautomatix.conchfritter.com Date: Mon Aug 12 15:17:15 2013 -0400
[BZ 990576] fix issue with available groups status update speed, guard against client failures and add more logging.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RoleLdapGroupSelector.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RoleLdapGroupSelector.java index c4e7145..4cb1275 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RoleLdapGroupSelector.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RoleLdapGroupSelector.java @@ -72,7 +72,8 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c public static final String FIELD_DESCRIPTION = "description"; final TextItem searchTextItem = new TextItem(); protected int cursorPosition; - private static int retryAttempt = 0; + private static int retryAttempt = 0;//limit retries on failure + private static int noProgressAttempts = 0;//limit really slow attempt parse times
//override the selector key for ldap group selection. protected String getSelectorKey() { @@ -263,6 +264,7 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c groupQueryStatus.setIcons(failIcon); groupQueryStatus.setDefaultValue(MSG.view_adminRoles_failLdapGroupsSettings()); CoreGUI.getErrorHandler().handleError(MSG.view_adminRoles_failLdapGroupsSettings(), caught); + Log.debug(MSG.view_adminRoles_failLdapGroupsSettings()); }
@Override @@ -308,6 +310,7 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c retryAttempt++; if (retryAttempt > 3) { cancel();//kill thread + Log.debug(MSG.view_adminRoles_failLdapRetry()); retryAttempt = 0; } } @@ -339,6 +342,10 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c pageCount = Integer.valueOf(value); } } + + if (resultCountValue == 0) { + noProgressAttempts++; + } //Update status information String warnTooManyResults = MSG.view_adminRoles_ldapWarnTooManyResults(); String warnQueryTakingLongResults = MSG.view_adminRoles_ldapWarnQueryTakingLongResults(); @@ -378,12 +385,15 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c if (resultCountValue > 20000) {//results throttled adviceItem.setValue(tooManyResults); adviceItem.setTooltip(tooManyResults); + Log.debug(tooManyResults);//log error to client. } else if ((current - start) >= 10 * 1000) {// took longer than 10s adviceItem.setValue(queryTookLongResults); adviceItem.setTooltip(queryTookLongResults); + Log.debug(queryTookLongResults);//log error to client. } else if (pageCount >= 20) {// required more than 20 pages of results adviceItem.setValue(queryTookManyPagesResults); adviceItem.setTooltip(queryTookManyPagesResults); + Log.debug(queryTookManyPagesResults);//log error to client. } else {//simple success. groupQueryStatus.setDefaultValue(success); groupQueryStatus.setIcons(successIcon); @@ -391,8 +401,19 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c adviceItem.setTooltip(none); adviceItem.setDisabled(true); } + noProgressAttempts = 0; //now cancel the timer cancel(); + } else if (noProgressAttempts >= 10) {//availGroups query stuck on server side + //cancel the timer. + cancel(); + String clientSideQuitting = MSG.view_adminRoles_failLdapCancelling();//catch all + adviceItem.setDisabled(false); + groupQueryStatus.setIcons(attentionIcon); + adviceItem.setValue(clientSideQuitting); + adviceItem.setTooltip(clientSideQuitting); + noProgressAttempts = 0; + Log.debug(clientSideQuitting);//log error to client. } availableGroupDetails.markForRedraw(); } diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties index 869a8ce..9fd2b24 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties @@ -512,9 +512,11 @@ view_adminRoles_assignedSubjects = Assigned Subjects view_adminRoles_failCreateRoleWithExistingName = Failed to create role with existing name [{0}]. Please use another name. view_adminRoles_failLdap = Failed to determine if LDAP configured - assuming no LDAP. view_adminRoles_failLdapAvailableGroups = Failed: Unable to retrieve status for latest AvailableGroups() call. +view_adminRoles_failLdapCancelling = Client unable to proceed. Cancelling all future available groups status checks for this thread. Likely 1)LDAP server communication failures or 2)ldap query checking inexplicably failed. view_adminRoles_failLdapGroups = Failed to retrieve available LDAP groups - assuming no LDAP groups. view_adminRoles_failLdapGroupsRole = Failed to load LDAP groups available for role. view_adminRoles_failLdapGroupsSettings = Failed to retrieve system settings details for LDAP groups. +view_adminRoles_failLdapRetry = Retried 3 times. Cancelling future available group requests. view_adminRoles_failRoles = Failed to fetch roles. view_adminRoles_globalPerms = Global Permissions view_adminRoles_ldapGroups = LDAP Groups diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties index 4e6532d..dca98cb 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties @@ -531,9 +531,11 @@ view_adminRoles_assignedSubjects = PÅiÅazené subjekty view_adminRoles_failCreateRoleWithExistingName = NepodaÅilo se vytvoÅit roli s existujÃcÃm jménem [{0}]. PouÅŸÃjte prosÃm jiné jméno. view_adminRoles_failLdap = NepodaÅilo se urÄit je-li LDAP nastaven - pÅedpokládám, ÅŸe LDAP nenà pÅÃtomen. ##view_adminRoles_failLdapAvailableGroups = NepodaÅilo se zÃskat informace o LDAP skupinách - pÅedpokládám, ÅŸe LDAP nenà pÅÃtomen. +##view_adminRoles_failLdapCancelling = NepodaÅilo se zÃskat informace o LDAP skupinách - pÅedpokládám, ÅŸe LDAP nenà pÅÃtomen. view_adminRoles_failLdapGroups = NepodaÅilo se zÃskat informace o LDAP skupinách - pÅedpokládám, ÅŸe LDAP nenà pÅÃtomen. view_adminRoles_failLdapGroupsRole = NepodaÅilo se zÃskat informace o LDAP skupinách pro roli. ##view_adminRoles_failLdapGroupsSettings = NepodaÅilo se zÃskat informace o LDAP skupinách - pÅedpokládám, ÅŸe LDAP nenà pÅÃtomen. +##view_adminRoles_failLdapRetry = NepodaÅilo se zÃskat informace o LDAP skupinách - pÅedpokládám, ÅŸe LDAP nenà pÅÃtomen. view_adminRoles_failRoles = NepodaÅilo se zÃskat role. view_adminRoles_globalPerms = Globalnà povolenà view_adminRoles_ldapGroups = LDAP skupiny diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties index c50c577..8e76bca 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties @@ -509,9 +509,11 @@ view_adminRoles_assignedSubjects = Zugewisene Benutzer view_adminRoles_failCreateRoleWithExistingName = Konnte die Rolle mit dem existierenden Namen [{0}] nicht anlegen. Bitte wÀhlen Sie einen anderen Namen. view_adminRoles_failLdap = Konnte nicht ermitteln, ob LDAP konfiguriert ist - gehe von keiner LDAP-Konfiguration aus. ##view_adminRoles_failLdapAvailableGroups = Konnte de LDAP-Gruppen nicht laden. Annahme ist, dass es keine gibt. +##view_adminRoles_failLdapCancelling = Konnte de LDAP-Gruppen nicht laden. Annahme ist, dass es keine gibt. view_adminRoles_failLdapGroups = Konnte de LDAP-Gruppen nicht laden. Annahme ist, dass es keine gibt. view_adminRoles_failLdapGroupsRole = Konnte die LDAP-Gruppen fÃŒr die Rolle nicht laden. ##view_adminRoles_failLdapGroupsSettings = Konnte de LDAP-Gruppen nicht laden. Annahme ist, dass es keine gibt. +##view_adminRoles_failLdapRetry = Konnte de LDAP-Gruppen nicht laden. Annahme ist, dass es keine gibt. view_adminRoles_failRoles = Konnte die Rollen nicht laden. view_adminRoles_globalPerms = Applikationsweite Rechte view_adminRoles_ldapGroups = LDAP-Gruppen diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties index 54fef87..a8e5795 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties @@ -508,9 +508,11 @@ view_adminRoles_assignedSubjects = å²ãåœãŠãããSubjects ##view_adminRoles_failCreateRoleWithExistingName = Failed to create role with existing name [{0}]. Please use another name. view_adminRoles_failLdap = LDAPãæ§æããããã©ããã®æ±ºå®ã«å€±æããŸãã - LDAPãç¡ãããšãä»®å®ããŸã ##view_adminRoles_failLdapAvailableGroups = å©çšå¯èœãªLDAPã°ã«ãŒããèªã¿åºãã®ã«å€±æããŸãã - LDAPã°ã«ãŒããç¡ãããšãä»®å®ããŸã +##view_adminRoles_failLdapCancelling = å©çšå¯èœãªLDAPã°ã«ãŒããèªã¿åºãã®ã«å€±æããŸãã - LDAPã°ã«ãŒããç¡ãããšãä»®å®ããŸã view_adminRoles_failLdapGroups = å©çšå¯èœãªLDAPã°ã«ãŒããèªã¿åºãã®ã«å€±æããŸãã - LDAPã°ã«ãŒããç¡ãããšãä»®å®ããŸã view_adminRoles_failLdapGroupsRole = ããŒã«ã®ããã«å©çšå¯èœãªLDAPã°ã«ãŒããããŒãããã®ã«å€±æããŸãã ##view_adminRoles_failLdapGroupsSettings = å©çšå¯èœãªLDAPã°ã«ãŒããèªã¿åºãã®ã«å€±æããŸãã - LDAPã°ã«ãŒããç¡ãããšãä»®å®ããŸã +##view_adminRoles_failLdapRetry = å©çšå¯èœãªLDAPã°ã«ãŒããèªã¿åºãã®ã«å€±æããŸãã - LDAPã°ã«ãŒããç¡ãããšãä»®å®ããŸã view_adminRoles_failRoles = ããŒã«ã®åãåºãã«å€±æããŸãã view_adminRoles_globalPerms = ã°ããŒãã«æš©é view_adminRoles_ldapGroups = LDAPã°ã«ãŒã diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties index 4deaf49..872ad93 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties @@ -459,9 +459,11 @@ view_adminRoles_assignedSubjects = í ë¹ë Subjects view_adminRoles_failCreateRoleWithExistingName = êž°ì¡Ž ìŽëŠ [{0}]곌 ìí ì ë§ë€ì§ 못íìµëë€. ë€ë¥ž ìŽëŠì ì¬ì©íììì€. view_adminRoles_failLdap = LDAPìŽ ì€ì ëìëì§ ì¬ë¶ì ê²°ì ì ì€íšíìµëë€ - LDAPìŽ ìë ê²ì ê°ì í©ëë€. ##view_adminRoles_failLdapAvailableGroups = ì¬ì©ê°ë¥í LDAP 귞룹ì ìœëë° ì€íšíìµëë€ - LDAP ê·žë£¹ìŽ ìë ê²ì ê°ì í©ëë€. +##view_adminRoles_failLdapCancelling = ì¬ì©ê°ë¥í LDAP 귞룹ì ìœëë° ì€íšíìµëë€ - LDAP ê·žë£¹ìŽ ìë ê²ì ê°ì í©ëë€. view_adminRoles_failLdapGroups = ì¬ì©ê°ë¥í LDAP 귞룹ì ìœëë° ì€íšíìµëë€ - LDAP ê·žë£¹ìŽ ìë ê²ì ê°ì í©ëë€. view_adminRoles_failLdapGroupsRole = ìí ì ìíŽ ì¬ì©ê°ë¥í LDAP 귞룹ì ë¡ëíëë° ì€íšíìµëë€. ##view_adminRoles_failLdapGroupsSettings = ì¬ì©ê°ë¥í LDAP 귞룹ì ìœëë° ì€íšíìµëë€ - LDAP ê·žë£¹ìŽ ìë ê²ì ê°ì í©ëë€. +##view_adminRoles_failLdapRetry = ì¬ì©ê°ë¥í LDAP 귞룹ì ìœëë° ì€íšíìµëë€ - LDAP ê·žë£¹ìŽ ìë ê²ì ê°ì í©ëë€. view_adminRoles_failRoles = ìí ì ê±°ì ì€íšíìµëë€. view_adminRoles_globalPerms = êžë¡ë² ê¶í view_adminRoles_ldapGroups = LDAP 귞룹 diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties index 35d2aec..9a4e437 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties @@ -514,9 +514,11 @@ view_adminRoles_assignedSubjects = Perfis Associados ##view_adminRoles_failCreateRoleWithExistingName = Failed to create role with existing name [{0}]. Please Use another name. view_adminRoles_failLdap = Falha ao determinar se o LDAP foi configurado - assumindo como LDAP n\u00E3o configurado. ##view_adminRoles_failLdapAvailableGroups = Falha ao recuperar os grupos dispon\u00EDveis no LDAP - assumindo como LDAP sem grupos. +##view_adminRoles_failLdapCancelling = Falha ao recuperar os grupos dispon\u00EDveis no LDAP - assumindo como LDAP sem grupos. view_adminRoles_failLdapGroups = Falha ao recuperar os grupos dispon\u00EDveis no LDAP - assumindo como LDAP sem grupos. view_adminRoles_failLdapGroupsRole = Falha ao carregar grupos do LDAP dispon\u00EDveis para o perfil. ##view_adminRoles_failLdapGroupsSettings = Falha ao recuperar os grupos dispon\u00EDveis no LDAP - assumindo como LDAP sem grupos. +##view_adminRoles_failLdapRetry = Falha ao recuperar os grupos dispon\u00EDveis no LDAP - assumindo como LDAP sem grupos. view_adminRoles_failRoles = Falha ao recuperar perfis. view_adminRoles_globalPerms = Permiss\u00F5es Globais view_adminRoles_ldapGroups = Grupos LDAP diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties index 3035f91..d978d76 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties @@ -2595,14 +2595,15 @@ view_adminRoles_assignedSubjects = ÐазМаÑеММÑе ÑÑбÑекÑÑ ##view_adminRoles_failCreateRoleWithExistingName = Failed to create role with existing name [{0}]. Please use another name. view_adminRoles_failLdap = Ðе ÑЎалПÑÑ ÐŸÐ¿ÑеЎелОÑÑ, МаÑÑÑПеМ лО LDAP - пÑеЎпПлПжОÑелÑМП LDAP ПÑÑÑÑÑÑвÑеÑ. ##view_adminRoles_failLdapAvailableGroups = Ðе ÑЎалПÑÑ Ð¿ÐŸÐ»ÑÑОÑÑ LDAP-гÑÑппÑ. ÐÑеЎпПлПжОÑелÑМП LDAP гÑÑÐ¿Ð¿Ñ ÐŸÑÑÑÑÑÑвÑÑÑ +##view_adminRoles_failLdapCancelling = Ðе ÑЎалПÑÑ Ð¿ÐŸÐ»ÑÑОÑÑ LDAP-гÑÑппÑ. ÐÑеЎпПлПжОÑелÑМП LDAP гÑÑÐ¿Ð¿Ñ ÐŸÑÑÑÑÑÑвÑÑÑ view_adminRoles_failLdapGroups = Ðе ÑЎалПÑÑ Ð¿ÐŸÐ»ÑÑОÑÑ LDAP-гÑÑппÑ. ÐÑеЎпПлПжОÑелÑМП LDAP гÑÑÐ¿Ð¿Ñ ÐŸÑÑÑÑÑÑвÑÑÑ view_adminRoles_failLdapGroupsRole = Ðе ÑЎалПÑÑ Ð·Ð°Ð³ÑÑзОÑÑ LDAP гÑÑппÑ, ЎПÑÑÑпМÑе ÐŽÐ»Ñ ÑПлО. ##view_adminRoles_failLdapGroupsSettings = Ðе ÑЎалПÑÑ Ð¿ÐŸÐ»ÑÑОÑÑ LDAP-гÑÑппÑ. ÐÑеЎпПлПжОÑелÑМП LDAP гÑÑÐ¿Ð¿Ñ ÐŸÑÑÑÑÑÑвÑÑÑ +##view_adminRoles_failLdapRetry = Ðе ÑЎалПÑÑ Ð¿ÐŸÐ»ÑÑОÑÑ LDAP-гÑÑппÑ. ÐÑеЎпПлПжОÑелÑМП LDAP гÑÑÐ¿Ð¿Ñ ÐŸÑÑÑÑÑÑвÑÑÑ view_adminRoles_failRoles = Ðе ÑЎалПÑÑ Ð¿ÐŸÐ»ÑÑОÑÑ ÑПлО. view_adminRoles_globalPerms = ÐлПбалÑÐœÑе пПлМПЌПÑÐžÑ view_adminRoles_ldapGroups = LDAP-гÑÑÐ¿Ð¿Ñ view_adminRoles_ldapGroupsReadOnly = ЎаММÑе LDAP гÑÑпп ЎПÑÑÑÐ¿ÐœÑ ÑПлÑкП ÐŽÐ»Ñ ÑÑÐµÐœÐžÑ -##view_adminRoles_noLdap = The LDAP security integration is not configured. To configure LDAP, go to <a {0}>{1}</a>. ##view_adminRoles_ldapGroupsSettingsReadOnly = ЎаММÑе LDAP гÑÑпп ЎПÑÑÑÐ¿ÐœÑ ÑПлÑкП ÐŽÐ»Ñ ÑÑÐµÐœÐžÑ ##view_adminRoles_ldapQueryPageSize = ÐПлМПЌПÑÐžÑ ÑеÑÑÑÑа ##view_adminRoles_ldapTooManyResults = ЎаММÑе LDAP гÑÑпп ЎПÑÑÑÐ¿ÐœÑ ÑПлÑкП ÐŽÐ»Ñ ÑÑÐµÐœÐžÑ @@ -2611,6 +2612,7 @@ view_adminRoles_ldapGroupsReadOnly = ЎаММÑе LDAP гÑÑпп ЎПÑÑÑпМ ##view_adminRoles_ldapWarnParsingManyPagesResults = ЎаММÑе LDAP гÑÑпп ЎПÑÑÑÐ¿ÐœÑ ÑПлÑкП ÐŽÐ»Ñ ÑÑÐµÐœÐžÑ ##view_adminRoles_ldapWarnQueryTakingLongResults = ЎаММÑе LDAP гÑÑпп ЎПÑÑÑÐ¿ÐœÑ ÑПлÑкП ÐŽÐ»Ñ ÑÑÐµÐœÐžÑ ##view_adminRoles_ldapWarnTooManyResults = ЎаММÑе LDAP гÑÑпп ЎПÑÑÑÐ¿ÐœÑ ÑПлÑкП ÐŽÐ»Ñ ÑÑÐµÐœÐžÑ +##view_adminRoles_noLdap = The LDAP security integration is not configured. To configure LDAP, go to <a {0}>{1}</a>. view_adminRoles_permissions_autoselecting_configureRead_implied = ÐвÑПЌаÑОÑеÑкО ПÑклÑÑеМП CONFIGURE_WRITE пПлМПЌПÑОе, пПÑкПлÑÐºÑ ÐŸÑÑÑÑÑÑвÑÐµÑ CONFIGURE_READ... view_adminRoles_permissions_autoselecting_configureWrite_implied = ÐвÑПЌаÑОÑеÑкО вÑбÑаМП CONFIGURE_READ пПлМПЌПÑОе, пПÑкПлÑÐºÑ CONFIGURE_WRITE пПЎÑазÑЌеваеÑ, ÑÑП ... ##view_adminRoles_permissions_autoselecting_manageBundleGroups_implied = Autoselected View Bundles, which is granted by Manage Bundle Groups... diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties index 615d3a0..b4f0369 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties @@ -502,9 +502,11 @@ view_adminRoles_assignedSubjects = \u5206\u914d\u7684\u4e3b\u9898 ##view_adminRoles_failCreateRoleWithExistingName = Failed to create role with existing name [{0}]. Please Use another name. view_adminRoles_failLdap = \u65e0\u6cd5\u786e\u5b9aLDAP - \u5047\u5b9a\u6ca1\u6709LDAP. ##view_adminRoles_failLdapAvailableGroups = \u67e5\u8be2\u6709\u6548LDAP\u7ec4\u5931\u8d25 - \u5047\u5b9a\u65e0DAP\u7ec4. +##view_adminRoles_failLdapCancelling = \u67e5\u8be2\u6709\u6548LDAP\u7ec4\u5931\u8d25 - \u5047\u5b9a\u65e0DAP\u7ec4. view_adminRoles_failLdapGroups = \u67e5\u8be2\u6709\u6548LDAP\u7ec4\u5931\u8d25 - \u5047\u5b9a\u65e0DAP\u7ec4. view_adminRoles_failLdapGroupsRole = \u52a0\u8f7dLDAP\u7ec4\u89d2\u8272\u5931\u8d25. ##view_adminRoles_failLdapGroupsSettings = \u67e5\u8be2\u6709\u6548LDAP\u7ec4\u5931\u8d25 - \u5047\u5b9a\u65e0DAP\u7ec4. +##view_adminRoles_failLdapRetry = \u67e5\u8be2\u6709\u6548LDAP\u7ec4\u5931\u8d25 - \u5047\u5b9a\u65e0DAP\u7ec4. view_adminRoles_failRoles = \u53d6\u5f97\u89d2\u8272\u5931\u8d25. view_adminRoles_globalPerms = \u5168\u5c40\u6388\u6743 view_adminRoles_ldapGroups = LDAP\u7ec4 diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java index dc53a3c..5c8031d 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java @@ -604,7 +604,13 @@ SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE.name(), "" entry.put("name", name); entry.put("description", description); groupDetailsMap.add(entry); - resultCount++; + + resultCount++;//monitor the number of groups returned during this query. + groupQueryResultCount = resultCount;//update result count + if (groupQueryPageCount == 0) { + groupQueryPageCount++; + } + groupQueryCurrentTime = System.currentTimeMillis(); } }
commit 6d7689daeff6504e5c3d4178cc0480525a3e7d7a Author: Simeon Pinder spinder@fulliautomatix.conchfritter.com Date: Fri Aug 9 16:46:29 2013 -0400
[BZ 990576] add I18N for new fields.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RoleLdapGroupSelector.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RoleLdapGroupSelector.java index 8d53b82..c4e7145 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RoleLdapGroupSelector.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RoleLdapGroupSelector.java @@ -97,16 +97,13 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c int groupPanelWidth = 375; int groupPanelHeight = 150;
- // final TextItem search = new TextItem("search", - // MSG.common_title_search()); - // Structure the display area into two separate display regions // Available Groups region final DynamicForm availableGroupDetails = new DynamicForm(); { availableGroupDetails.setWidth(groupPanelWidth); availableGroupDetails.setHeight(groupPanelHeight); - availableGroupDetails.setGroupTitle("Available Groups Results"); + availableGroupDetails.setGroupTitle(MSG.common_title_ldapGroupsAvailable()); availableGroupDetails.setIsGroup(true); availableGroupDetails.setWrapItemTitles(false); //add itemChanged handler to listen for changes to SearchItem @@ -129,21 +126,21 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c } }); } - final TextItem resultCountItem = new TextItem("resultCount", "Groups Found"); + final TextItem resultCountItem = new TextItem("resultCount", MSG.common_title_groupsFound()); { resultCountItem.setCanEdit(false); resultCountItem.setWidth("100%"); } - final TextItem pageCountItem = new TextItem("pageCount", "Query Pages Parsed"); + final TextItem pageCountItem = new TextItem("pageCount", MSG.common_title_queryPagesParsed()); { pageCountItem.setCanEdit(false); pageCountItem.setWidth("100%"); } - final TextAreaItem adviceItem = new TextAreaItem("advice", "Suggest"); + final TextAreaItem adviceItem = new TextAreaItem("advice", MSG.common_title_suggest()); { adviceItem.setWidth("100%"); adviceItem.setHeight(20); - String feedback = "(None)"; + String feedback = MSG.common_val_none(); adviceItem.setValue(feedback); adviceItem.setTooltip(feedback); adviceItem.setDisabled(true); @@ -162,13 +159,12 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c } }); } - // final TextItem search = new TextItem("search", - // MSG.common_title_search()); + //Customize Search component { - searchTextItem.setName("search"); - searchTextItem.setTitle("Filter[results below]"); + searchTextItem.setName(MSG.common_title_search()); + searchTextItem.setTitle(MSG.common_title_filterResultsBelow()); searchTextItem.setWidth("100%"); - searchTextItem.setTooltip("Start typing here to only show groups containing the typed characters."); + searchTextItem.setTooltip(MSG.common_msg_typeToFilterResults()); } final FormItemIcon loadingIcon = new FormItemIcon(); final FormItemIcon successIcon = new FormItemIcon(); @@ -186,8 +182,8 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c final StaticTextItem groupQueryStatus = new StaticTextItem(); { groupQueryStatus.setName("groupQueryStatus"); - groupQueryStatus.setTitle("Query Progress"); - groupQueryStatus.setDefaultValue("Loading..."); + groupQueryStatus.setTitle(MSG.common_title_queryProgress()); + groupQueryStatus.setDefaultValue(MSG.common_msg_loading()); groupQueryStatus.setIcons(loadingIcon); } availableGroupDetails.setItems(resultCountItem, pageCountItem, groupQueryStatus, adviceItem, searchTextItem); @@ -197,21 +193,22 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c { ldapGroupSettings.setWidth(groupPanelWidth); ldapGroupSettings.setHeight(groupPanelHeight); - ldapGroupSettings.setGroupTitle("[Read Only] Ldap Group Settings. Edit in 'System Settings'"); + ldapGroupSettings.setGroupTitle(MSG.view_adminRoles_ldapGroupsSettingsReadOnly()); ldapGroupSettings.setIsGroup(true); ldapGroupSettings.setWrapItemTitles(false); } - final TextItem groupSearch = new TextItem("groupSearch", "Search Filter"); + final TextItem groupSearch = new TextItem("groupSearch", MSG.view_admin_systemSettings_LDAPFilter_name()); { groupSearch.setCanEdit(false); groupSearch.setWidth("100%"); } - final TextItem groupMember = new TextItem("groupMember", "Member Filter"); + final TextItem groupMember = new TextItem("groupMember", MSG.view_admin_systemSettings_LDAPGroupMember_name()); { groupMember.setCanEdit(false); groupMember.setWidth("100%"); } - final CheckboxItem groupQueryPagingItem = new CheckboxItem("groupQueryEnable", "Query Paging Enabled"); + final CheckboxItem groupQueryPagingItem = new CheckboxItem("groupQueryEnable", + MSG.view_admin_systemSettings_LDAPGroupUsePaging_name()); { groupQueryPagingItem.setCanEdit(false); groupQueryPagingItem.setValue(false); @@ -221,12 +218,14 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c //You have to set this attribute groupQueryPagingItem.setAttribute("labelAsTitle", true); } - final TextItem groupQueryPagingCountItem = new TextItem("groupQueryCount", "Query Page Size"); + final TextItem groupQueryPagingCountItem = new TextItem("groupQueryCount", + MSG.view_adminRoles_ldapQueryPageSize()); { groupQueryPagingCountItem.setCanEdit(false); groupQueryPagingCountItem.setWidth("100%"); } - final CheckboxItem groupUsePosixGroupsItem = new CheckboxItem("groupUsePosixGroups", "Use Posix Enabled"); + final CheckboxItem groupUsePosixGroupsItem = new CheckboxItem("groupUsePosixGroups", + MSG.view_admin_systemSettings_LDAPGroupUsePosixGroup_name()); { groupUsePosixGroupsItem.setCanEdit(false); groupUsePosixGroupsItem.setValue(false); @@ -262,9 +261,8 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c @Override public void onFailure(Throwable caught) { groupQueryStatus.setIcons(failIcon); - groupQueryStatus.setDefaultValue("Fail: Unable to retrieve system settings."); - //TODO: update this message - CoreGUI.getErrorHandler().handleError(MSG.view_adminRoles_failLdap(), caught); + groupQueryStatus.setDefaultValue(MSG.view_adminRoles_failLdapGroupsSettings()); + CoreGUI.getErrorHandler().handleError(MSG.view_adminRoles_failLdapGroupsSettings(), caught); }
@Override @@ -291,10 +289,10 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c //launch operations to populate/refresh LDAP Group Query contents. final Timer availableGroupsTimer = new Timer() { public void run() { - final String attention = "Attention"; - final String success = "Success"; - final String none = "(None)"; - final String failed = "Failed"; + final String attention = MSG.common_status_attention(); + final String success = MSG.common_status_success(); + final String none = MSG.common_val_none(); + final String failed = MSG.common_status_failed(); //make request to RHQ about state of latest LDAP GWT request GWTServiceLookup.getLdapService().findAvailableGroupsStatus( new AsyncCallback<Set<Map<String, String>>>() { @@ -302,11 +300,11 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c public void onFailure(Throwable caught) { groupQueryStatus.setIcons(failIcon); groupQueryStatus.setDefaultValue(failed); - String adviceValue = "Failed: Unable to retrieve status for latest AvailableGroups() call."; + String adviceValue = MSG.view_adminRoles_failLdapAvailableGroups(); adviceItem.setValue(adviceValue); adviceItem.setTooltip(adviceValue); - //TODO: update this message - CoreGUI.getErrorHandler().handleError(MSG.view_adminRoles_failLdap(), caught); + CoreGUI.getErrorHandler() + .handleError(MSG.view_adminRoles_failLdapAvailableGroups(), caught); retryAttempt++; if (retryAttempt > 3) { cancel();//kill thread @@ -342,9 +340,10 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c } } //Update status information - String warnTooManyResults = " A lot of results are being returned. Modify your 'Group Search Filter' to return fewer results."; - String warnQueryTakingLongResults = " Query taking a while to complete. Modify your 'Group Search Filter' to return fewer results."; - String warnParsingManyPagesResults = " Query requires a lot of pages. Modify your 'Group Search Page Size' to return more results per request."; + String warnTooManyResults = MSG.view_adminRoles_ldapWarnTooManyResults(); + String warnQueryTakingLongResults = MSG.view_adminRoles_ldapWarnQueryTakingLongResults(); + String warnParsingManyPagesResults = MSG.view_adminRoles_ldapWarnParsingManyPagesResults(); + boolean resultCountWarning = false; boolean pageCountWarning = false; boolean timePassingWarning = false; @@ -368,12 +367,11 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c //act on status details to add extra perf suggestions. Kill threads older than 30 mins long parseTime = System.currentTimeMillis() - ldapGroupSelectorRequestId; if ((queryCompleted) || (parseTime) > 30 * 60 * 1000) { - String tooManyResults = "Too many results to show all. Modify your 'Group Search Filter' to return fewer than 20000 results."; - String queryTookLongResults = " Query took " + parseTime - + " ms to complete. Modify your 'Group Search Filter' to return fewer results."; - String queryTookManyPagesResults = " Query required " - + pageCount - + " pages to complete. Modify 'Group Search Page Size' to return more results per request."; + String tooManyResults = MSG.view_adminRoles_ldapTooManyResults(); + String queryTookLongResults = MSG.view_adminRoles_ldapTookLongResults(parseTime + ""); + String queryTookManyPagesResults = MSG + .view_adminRoles_ldapTookManyPagesResults(pageCount + ""); + adviceItem.setDisabled(false); groupQueryStatus.setIcons(attentionIcon); groupQueryStatus.setDefaultValue(attention); diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties index 67ec087..869a8ce 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties @@ -136,11 +136,13 @@ common_msg_noItemsToShow = No items to show common_msg_notYetImplemented = Not Yet Implemented common_msg_see_more = see more... common_msg_step_x_of_y = Step {0} of {1} +common_msg_typeToFilterResults = Start typing here to only show results containing the typed characters. common_severity_debug = Debug common_severity_error = Error common_severity_fatal = Fatal common_severity_info = Info common_severity_warn = Warn +common_status_attention = Attention common_status_canceled = Canceled common_status_deferred = Deferred common_status_failed = Failed @@ -192,11 +194,13 @@ common_title_edit = Edit? common_title_edit_mode = Edit Mode common_title_enabled = Enabled? common_title_end = End +common_title_filterResultsBelow = Filter[results below] common_title_generalProp = General Properties common_title_group = Group common_title_group_def_total = Group Definition Total common_title_group_member_health = Group Member Health common_title_groups = Groups +common_title_groupsFound = Groups Found common_title_help = Help common_title_host = Host common_title_id = ID @@ -210,6 +214,7 @@ common_title_kind = Kind common_title_lastUpdated = Last Updated common_title_lastUpdatedBy = Last Updated By common_title_ldapGroups = LDAP Groups +common_title_ldapGroupsAvailable = Available Groups Results common_title_mashup = Mashup common_title_members_reporting = Members Reporting common_title_message = Message @@ -234,6 +239,8 @@ common_title_platform_total = Platform Total common_title_plugin = Plugin common_title_port = Port common_title_providers = Providers +common_title_queryPagesParsed = Query Pages Parsed +common_title_queryProgress = Query Progress common_title_recent_alerts = Recent Alerts common_title_recent_bundle_deployments = Recent Bundle Deployments common_title_recent_configuration_updates = Recent Configuration Updates @@ -273,6 +280,7 @@ common_title_sort_order_tooltip = Sets sort order for results. common_title_start = Start common_title_status = Status common_title_stop = Stop +common_title_suggest = Suggest common_title_summary = Summary common_title_tag_cloud = Tag Cloud common_title_the = The @@ -503,12 +511,22 @@ view_adminRoles_assignedGroups = Assigned Resource Groups view_adminRoles_assignedSubjects = Assigned Subjects view_adminRoles_failCreateRoleWithExistingName = Failed to create role with existing name [{0}]. Please use another name. view_adminRoles_failLdap = Failed to determine if LDAP configured - assuming no LDAP. +view_adminRoles_failLdapAvailableGroups = Failed: Unable to retrieve status for latest AvailableGroups() call. view_adminRoles_failLdapGroups = Failed to retrieve available LDAP groups - assuming no LDAP groups. view_adminRoles_failLdapGroupsRole = Failed to load LDAP groups available for role. +view_adminRoles_failLdapGroupsSettings = Failed to retrieve system settings details for LDAP groups. view_adminRoles_failRoles = Failed to fetch roles. view_adminRoles_globalPerms = Global Permissions view_adminRoles_ldapGroups = LDAP Groups view_adminRoles_ldapGroupsReadOnly = LDAP group data is read only +view_adminRoles_ldapGroupsSettingsReadOnly = [Read Only] Ldap Group Settings. Edit in 'System Settings' +view_adminRoles_ldapQueryPageSize = Query Page Size +view_adminRoles_ldapTooManyResults = Too many results to show all. Modify your 'Group Search Filter' to return fewer than 20000 results. +view_adminRoles_ldapTookLongResults = Query took {0} ms to complete. Modify your 'Group Search Filter' to return fewer results. +view_adminRoles_ldapTookManyPagesResults = Query required {0} pages to complete. Modify 'Group Search Page Size' to return more results per request. +view_adminRoles_ldapWarnParsingManyPagesResults = Query requires a lot of pages. Modify your 'Group Search Page Size' to return more results per request. +view_adminRoles_ldapWarnQueryTakingLongResults = \ Query taking a while to complete. Modify your 'Group Search Filter' to return fewer results. +view_adminRoles_ldapWarnTooManyResults = \ A lot of results are being returned. Modify your 'Group Search Filter' to return fewer results. view_adminRoles_noLdap = The LDAP security integration is not configured. To configure LDAP, go to <a {0}>{1}</a>. view_adminRoles_permissions_autoselecting_configureRead_implied = Autodeselected CONFIGURE_WRITE permission, since lack of CONFIGURE_READ implies lack of it... view_adminRoles_permissions_autoselecting_configureWrite_implied = Autoselected CONFIGURE_READ permission, since CONFIGURE_WRITE implies it... diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties index 2c97c84..4e6532d 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties @@ -159,11 +159,13 @@ common_msg_noItemsToShow = Ŝádné poloÅŸky k zobrazenà common_msg_notYetImplemented = JeÅ¡tÄ nebylo implementováno common_msg_see_more = vÃce... common_msg_step_x_of_y = Krok {0} z {1} +##common_msg_typeToFilterResults = JeÅ¡tÄ nebylo implementováno common_severity_debug = LadÄnà common_severity_error = Chyba common_severity_fatal = Fatálnà common_severity_info = InformaÄnà common_severity_warn = Varovánà +##common_status_attention = ÃspÄch common_status_canceled = ZruÅ¡eno common_status_deferred = OdloÅŸeno common_status_failed = Selhalo @@ -215,11 +217,13 @@ common_title_edit_mode = EditaÄnà mód common_title_enabled = Povoleno? common_title_end = Konec common_title_favorites = OblÃbené +##common_title_filterResultsBelow = OblÃbené common_title_generalProp = Obecné vlastnosti common_title_group = Skupina common_title_group_def_total = Definic skupin celkem common_title_group_member_health = Zdravà Älena skupiny common_title_groups = Skupiny +##common_title_groupsFound = Skupiny common_title_help = NápovÄda common_title_host = Host common_title_icon = Ikona @@ -234,6 +238,7 @@ common_title_kind = Druh common_title_lastUpdated = Naposledy modifikováno common_title_lastUpdatedBy = Naposledy modifikoval common_title_ldapGroups = LDAP skupiny +##common_title_ldapGroupsAvailable = LDAP skupiny common_title_mashup = Mashup common_title_members_reporting = Reportovánà Älenů common_title_message = Zpráva @@ -258,6 +263,8 @@ common_title_platform_total = Celkem platforem common_title_plugin = ZásuvnÃœ modul common_title_port = Port common_title_providers = Poskytovatelé +##common_title_queryPagesParsed = LDAP skupiny +##common_title_queryProgress = LDAP skupiny common_title_recent_alerts = Nedávné vÃœstrahy common_title_recent_bundle_deployments = Nedávno nasazené balÃky common_title_recent_configuration_updates = Nedávno zmÄnÄné konfigurace @@ -297,6 +304,7 @@ common_title_sort_order_tooltip = Nastavà Åazenà pro vÃœsledky. common_title_start = Start common_title_status = Stav common_title_stop = Stop +##common_title_suggest = Shrnutà common_title_summary = Shrnutà common_title_tag_cloud = Tag cloud common_title_timestamp = Datum/Äas @@ -522,12 +530,22 @@ view_adminRoles_assignedGroups = PÅiÅazené skupiny zdrojů view_adminRoles_assignedSubjects = PÅiÅazené subjekty view_adminRoles_failCreateRoleWithExistingName = NepodaÅilo se vytvoÅit roli s existujÃcÃm jménem [{0}]. PouÅŸÃjte prosÃm jiné jméno. view_adminRoles_failLdap = NepodaÅilo se urÄit je-li LDAP nastaven - pÅedpokládám, ÅŸe LDAP nenà pÅÃtomen. +##view_adminRoles_failLdapAvailableGroups = NepodaÅilo se zÃskat informace o LDAP skupinách - pÅedpokládám, ÅŸe LDAP nenà pÅÃtomen. view_adminRoles_failLdapGroups = NepodaÅilo se zÃskat informace o LDAP skupinách - pÅedpokládám, ÅŸe LDAP nenà pÅÃtomen. view_adminRoles_failLdapGroupsRole = NepodaÅilo se zÃskat informace o LDAP skupinách pro roli. +##view_adminRoles_failLdapGroupsSettings = NepodaÅilo se zÃskat informace o LDAP skupinách - pÅedpokládám, ÅŸe LDAP nenà pÅÃtomen. view_adminRoles_failRoles = NepodaÅilo se zÃskat role. view_adminRoles_globalPerms = Globalnà povolenà view_adminRoles_ldapGroups = LDAP skupiny view_adminRoles_ldapGroupsReadOnly = data LDAP skupiny jsou jen pro Ätenà +##view_adminRoles_ldapGroupsSettingsReadOnly = data LDAP skupiny jsou jen pro Ätenà +##view_adminRoles_ldapQueryPageSize = OprávnÄnà zdroje +##view_adminRoles_ldapTooManyResults = data LDAP skupiny jsou jen pro Ätenà +##view_adminRoles_ldapTookLongResults = data LDAP skupiny jsou jen pro Ätenà +##view_adminRoles_ldapTookManyPagesResults = data LDAP skupiny jsou jen pro Ätenà +##view_adminRoles_ldapWarnParsingManyPagesResults = data LDAP skupiny jsou jen pro Ätenà +##view_adminRoles_ldapWarnQueryTakingLongResults = data LDAP skupiny jsou jen pro Ätenà +##view_adminRoles_ldapWarnTooManyResults = data LDAP skupiny jsou jen pro Ätenà view_adminRoles_noLdap = Integrace LDAP nenà nakonfigurována. K nastavenà ÅÃzenà bezpeÄnosti pÅes LDAP prosÃm navÅ¡tivte <a {0}>{1}</a>. view_adminRoles_permissions_autoselecting_configureRead_implied = Automaticky odoznaÄeno CONFIGURE_WRITE povolenÃ, protoÅŸe absence CONFIGURE_READ to implikuje... view_adminRoles_permissions_autoselecting_configureWrite_implied = Automaticky oznaÄeno CONFIGURE_READ povolenÃ, protoÅŸe CONFIGURE_WRITE jej implikuje... diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties index d4f053b..c50c577 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties @@ -139,11 +139,13 @@ common_msg_noItemsToShow = Keine EintrÀge vorhanden common_msg_notYetImplemented = Noch nicht implementiert common_msg_see_more = mehr ... common_msg_step_x_of_y = Schritt {0} von {1} +##common_msg_typeToFilterResults = Noch nicht implementiert common_severity_debug = Debug common_severity_error = Fehler common_severity_fatal = Fatal common_severity_info = Information common_severity_warn = Warnung +##common_status_attention = Erfolg common_status_canceled = Abgebrochen common_status_deferred = Verzögert common_status_failed = Fehlgeschlagen @@ -199,6 +201,7 @@ common_title_group = Gruppe common_title_group_def_total = Anzahl Gruppen-Definitionen common_title_group_member_health = Zustand der Gruppenmitglieder common_title_groups = Gruppen +##common_title_groupsFound = Gruppen common_title_help = Hilfe common_title_host = Rechner common_title_id = ID @@ -211,6 +214,7 @@ common_title_kind = Art common_title_lastUpdated = Zuletzt aktualisiert common_title_lastUpdatedBy = Zuletzt aktualisiert durch common_title_ldapGroups = LDAP-Gruppen +##common_title_ldapGroupsAvailable = LDAP-Gruppen ##common_title_mashup = Mashup common_title_members_reporting = Anzahl berichtender Mitglieder common_title_message = Nachricht @@ -235,6 +239,8 @@ common_title_platform_total = Anzahl Platformen common_title_plugin = Plugin common_title_port = Port common_title_providers = Anbieter +##common_title_queryPagesParsed = LDAP-Gruppen +##common_title_queryProgress = LDAP-Gruppen common_title_recent_alerts = KÃŒrzlich ausgelöste Alarme common_title_recent_bundle_deployments = KÃŒrzliche Bundle-Deployments common_title_recent_configuration_updates = Frische Konfigurationsaktualisierungen @@ -274,6 +280,7 @@ common_title_sort_order_tooltip = Legt die Sortierreihenfolge fÃŒr Ergebnisse fe common_title_start = Start common_title_status = Status common_title_stop = Stop +##common_title_suggest = Zusammenfassung common_title_summary = Zusammenfassung common_title_tag_cloud = Tag-Wolke common_title_the = Die @@ -501,12 +508,22 @@ view_adminRoles_assignedGroups = Zugewiesene Ressourcen-Gruppen view_adminRoles_assignedSubjects = Zugewisene Benutzer view_adminRoles_failCreateRoleWithExistingName = Konnte die Rolle mit dem existierenden Namen [{0}] nicht anlegen. Bitte wÀhlen Sie einen anderen Namen. view_adminRoles_failLdap = Konnte nicht ermitteln, ob LDAP konfiguriert ist - gehe von keiner LDAP-Konfiguration aus. +##view_adminRoles_failLdapAvailableGroups = Konnte de LDAP-Gruppen nicht laden. Annahme ist, dass es keine gibt. view_adminRoles_failLdapGroups = Konnte de LDAP-Gruppen nicht laden. Annahme ist, dass es keine gibt. view_adminRoles_failLdapGroupsRole = Konnte die LDAP-Gruppen fÃŒr die Rolle nicht laden. +##view_adminRoles_failLdapGroupsSettings = Konnte de LDAP-Gruppen nicht laden. Annahme ist, dass es keine gibt. view_adminRoles_failRoles = Konnte die Rollen nicht laden. view_adminRoles_globalPerms = Applikationsweite Rechte view_adminRoles_ldapGroups = LDAP-Gruppen view_adminRoles_ldapGroupsReadOnly = LDAP Gruppendaten können nur gelesen werden +##view_adminRoles_ldapGroupsSettingsReadOnly = LDAP Gruppendaten können nur gelesen werden +##view_adminRoles_ldapQueryPageSize = Rechte auf Ressourcen +##view_adminRoles_ldapTooManyResults = LDAP Gruppendaten können nur gelesen werden +##view_adminRoles_ldapTookLongResults = LDAP Gruppendaten können nur gelesen werden +##view_adminRoles_ldapTookManyPagesResults = LDAP Gruppendaten können nur gelesen werden +##view_adminRoles_ldapWarnParsingManyPagesResults = LDAP Gruppendaten können nur gelesen werden +##view_adminRoles_ldapWarnQueryTakingLongResults = LDAP Gruppendaten können nur gelesen werden +##view_adminRoles_ldapWarnTooManyResults = LDAP Gruppendaten können nur gelesen werden view_adminRoles_noLdap = Die LDAP-Integration ist nicht konfiguriert. Um LDAP zu konfigurieren, wechseln sie zu <a {0}>{1}</a>. ##view_adminRoles_permissions_autoselecting_configureRead_implied = Autodeselected CONFIGURE_WRITE permission, since lack of CONFIGURE_READ implies lack of it... ##view_adminRoles_permissions_autoselecting_configureWrite_implied = Autoselected CONFIGURE_READ permission, since CONFIGURE_WRITE implies it... diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties index c7c1f0a..54fef87 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties @@ -138,11 +138,13 @@ common_msg_noItemsToShow = 衚瀺ããé ç®ã¯ãããŸãã common_msg_notYetImplemented = å®è£ ãããŠããŸãã common_msg_see_more = 詳现... common_msg_step_x_of_y = ã¹ããã {1} äž {0} çªã +##common_msg_typeToFilterResults = å®è£ ãããŠããŸãã common_severity_debug = ãããã° common_severity_error = ãšã©ãŒ common_severity_fatal = èŽåœç common_severity_info = æ å ± common_severity_warn = èŠå +##common_status_attention = æå common_status_canceled = ãã£ã³ã»ã« common_status_deferred = 延æ common_status_failed = 倱æ @@ -199,6 +201,7 @@ common_title_group = ã°ã«ãŒã common_title_group_def_total = ã°ã«ãŒãå®çŸ©ã®åèš common_title_group_member_health = ã°ã«ãŒãã¡ã³ããŒã®ç¶æ common_title_groups = ã°ã«ãŒã +##common_title_groupsFound = ã°ã«ãŒã common_title_help = ãã«ã common_title_host = ãã¹ã common_title_id = ID @@ -212,6 +215,7 @@ common_title_kind = çš®é¡ common_title_lastUpdated = æçµæŽæ° common_title_lastUpdatedBy = æçµæŽæ°ïŒã«ããïŒ common_title_ldapGroups = LDAPã°ã«ãŒã +##common_title_ldapGroupsAvailable = LDAPã°ã«ãŒã common_title_mashup = ããã·ã¥ã¢ãã common_title_members_reporting = ã¡ã³ããŒã®ãªããŒã common_title_message = ã¡ãã»ãŒãž @@ -236,6 +240,8 @@ common_title_platform_total = ãã©ãããã©ãŒã ã®åèš common_title_plugin = ãã©ã°ã€ã³ common_title_port = ããŒã common_title_providers = ãããã€ã +##common_title_queryPagesParsed = LDAPã°ã«ãŒã +##common_title_queryProgress = LDAPã°ã«ãŒã common_title_recent_alerts = æè¿ã®ã¢ã©ãŒã common_title_recent_bundle_deployments = æè¿ã®ãã³ãã«ã®ããã〠common_title_recent_configuration_updates = æè¿ã®æ§æã®æŽæ° @@ -275,6 +281,7 @@ common_title_sort_order_tooltip = çµæã®ãœãŒãé ãèšå® common_title_start = éå§ common_title_status = ã¹ããŒã¿ã¹ common_title_stop = åæ¢ +##common_title_suggest = ãµã㪠common_title_summary = ãµã㪠common_title_tag_cloud = ã¿ã°ã¯ã©ãŠã common_title_timestamp = æ¥ä»/æé @@ -500,12 +507,22 @@ view_adminRoles_assignedGroups = å²ãåœãŠããããªãœãŒã¹ã°ã«ãŒã view_adminRoles_assignedSubjects = å²ãåœãŠãããSubjects ##view_adminRoles_failCreateRoleWithExistingName = Failed to create role with existing name [{0}]. Please use another name. view_adminRoles_failLdap = LDAPãæ§æããããã©ããã®æ±ºå®ã«å€±æããŸãã - LDAPãç¡ãããšãä»®å®ããŸã +##view_adminRoles_failLdapAvailableGroups = å©çšå¯èœãªLDAPã°ã«ãŒããèªã¿åºãã®ã«å€±æããŸãã - LDAPã°ã«ãŒããç¡ãããšãä»®å®ããŸã view_adminRoles_failLdapGroups = å©çšå¯èœãªLDAPã°ã«ãŒããèªã¿åºãã®ã«å€±æããŸãã - LDAPã°ã«ãŒããç¡ãããšãä»®å®ããŸã view_adminRoles_failLdapGroupsRole = ããŒã«ã®ããã«å©çšå¯èœãªLDAPã°ã«ãŒããããŒãããã®ã«å€±æããŸãã +##view_adminRoles_failLdapGroupsSettings = å©çšå¯èœãªLDAPã°ã«ãŒããèªã¿åºãã®ã«å€±æããŸãã - LDAPã°ã«ãŒããç¡ãããšãä»®å®ããŸã view_adminRoles_failRoles = ããŒã«ã®åãåºãã«å€±æããŸãã view_adminRoles_globalPerms = ã°ããŒãã«æš©é view_adminRoles_ldapGroups = LDAPã°ã«ãŒã view_adminRoles_ldapGroupsReadOnly = LDAPã°ã«ãŒãããŒã¿ã¯èªã¿åºãå°çšã§ã +##view_adminRoles_ldapGroupsSettingsReadOnly = LDAPã°ã«ãŒãããŒã¿ã¯èªã¿åºãå°çšã§ã +##view_adminRoles_ldapQueryPageSize = ãªãœãŒã¹æš©é +##view_adminRoles_ldapTooManyResults = LDAPã°ã«ãŒãããŒã¿ã¯èªã¿åºãå°çšã§ã +##view_adminRoles_ldapTookLongResults = LDAPã°ã«ãŒãããŒã¿ã¯èªã¿åºãå°çšã§ã +##view_adminRoles_ldapTookManyPagesResults = LDAPã°ã«ãŒãããŒã¿ã¯èªã¿åºãå°çšã§ã +##view_adminRoles_ldapWarnParsingManyPagesResults = LDAPã°ã«ãŒãããŒã¿ã¯èªã¿åºãå°çšã§ã +##view_adminRoles_ldapWarnQueryTakingLongResults = LDAPã°ã«ãŒãããŒã¿ã¯èªã¿åºãå°çšã§ã +##view_adminRoles_ldapWarnTooManyResults = LDAPã°ã«ãŒãããŒã¿ã¯èªã¿åºãå°çšã§ã view_adminRoles_noLdap = LDAPã»ãã¥ãªãã£ã®çµ±åã¯æ§æãããŠããŸãããLDAPãæ§æããã«ã¯ã <a {0}>{1}</a>ã«è¡ã£ãŠãã ããã view_adminRoles_permissions_autoselecting_configureRead_implied = CONFIGURE_WRITEæš©éãèªåçã«éžæãããŸããããªããªããCONFIGURE_READãç¡ãããšããããæ瀺ããŠããããã§ãã view_adminRoles_permissions_autoselecting_configureWrite_implied = CONFIGURE_READæš©éãèªåçã«éžæãããŸããããªããªããCONFIGURE_WRITEããããæ瀺ããŠããããã§ãã diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties index 0213540..4deaf49 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties @@ -132,10 +132,12 @@ common_msg_noItemsToShow = íìí íëª©ìŽ ììµëë€. common_msg_notYetImplemented = 구íëì§ ìììµëë€. common_msg_see_more = ììží볎Ʞ... common_msg_step_x_of_y = ëšê³ {0} ì€ {1} ë²ì§ž +##common_msg_typeToFilterResults = 구íëì§ ìììµëë€. common_severity_debug = ëë²ê·ž common_severity_error = ìë¬ common_severity_info = ì 볎 common_severity_warn = ê²œê³ +##common_status_attention = ì±ê³µ common_status_canceled = ì·šì common_status_deferred = ì°êž° common_status_failed = ì€íš @@ -187,6 +189,7 @@ common_title_group = 귞룹 common_title_group_def_total = ì 첎 귞룹 ì ì common_title_group_member_health = ê·žë£¹ë©€ë² ìí common_title_groups = 귞룹 +##common_title_groupsFound = 귞룹 common_title_help = ëìë§ common_title_host = ížì€íž common_title_id = ID @@ -197,6 +200,7 @@ common_title_inventory = ìžë²€í 늬 common_title_inventoryStatus = ìžë²€í 늬 ìí common_title_inventorySummary = ìžë²€í 늬 ììœ common_title_ldapGroups = LDAP 귞룹 +##common_title_ldapGroupsAvailable = LDAP 귞룹 common_title_members_reporting = ë©€ë² ëŠ¬í¬íž common_title_message = ë©ìì§ common_title_metric = ìž¡ì í목 @@ -219,6 +223,8 @@ common_title_platform_total = ì 첎 íë«íŒ common_title_plugin = íë¬ê·žìž common_title_port = í¬íž common_title_providers = ê³µêžì +##common_title_queryPagesParsed = LDAP 귞룹 +##common_title_queryProgress = LDAP 귞룹 common_title_recent_alerts = ìµê·Œ ì늌 common_title_recent_bundle_deployments = ìµê·Œ ë²ë€ ë°°í¬ common_title_recent_drifts = ìµê·Œ ë늬ííž @@ -254,6 +260,7 @@ common_title_sort_order_tooltip = 결곌ì ì ë ¬ ìì륌 ì€ì common_title_start = ìì common_title_status = ìí common_title_stop = ì ì§ +##common_title_suggest = ììœ common_title_summary = ììœ common_title_tag_cloud = íê·ž íŽëŒì°ë common_title_timestamp = ë ì§/ìê° @@ -451,12 +458,15 @@ view_adminRoles_assignedGroups = í ë¹ë ìì 귞룹 view_adminRoles_assignedSubjects = í ë¹ë Subjects view_adminRoles_failCreateRoleWithExistingName = êž°ì¡Ž ìŽëŠ [{0}]곌 ìí ì ë§ë€ì§ 못íìµëë€. ë€ë¥ž ìŽëŠì ì¬ì©íììì€. view_adminRoles_failLdap = LDAPìŽ ì€ì ëìëì§ ì¬ë¶ì ê²°ì ì ì€íšíìµëë€ - LDAPìŽ ìë ê²ì ê°ì í©ëë€. +##view_adminRoles_failLdapAvailableGroups = ì¬ì©ê°ë¥í LDAP 귞룹ì ìœëë° ì€íšíìµëë€ - LDAP ê·žë£¹ìŽ ìë ê²ì ê°ì í©ëë€. view_adminRoles_failLdapGroups = ì¬ì©ê°ë¥í LDAP 귞룹ì ìœëë° ì€íšíìµëë€ - LDAP ê·žë£¹ìŽ ìë ê²ì ê°ì í©ëë€. view_adminRoles_failLdapGroupsRole = ìí ì ìíŽ ì¬ì©ê°ë¥í LDAP 귞룹ì ë¡ëíëë° ì€íšíìµëë€. +##view_adminRoles_failLdapGroupsSettings = ì¬ì©ê°ë¥í LDAP 귞룹ì ìœëë° ì€íšíìµëë€ - LDAP ê·žë£¹ìŽ ìë ê²ì ê°ì í©ëë€. view_adminRoles_failRoles = ìí ì ê±°ì ì€íšíìµëë€. view_adminRoles_globalPerms = êžë¡ë² ê¶í view_adminRoles_ldapGroups = LDAP 귞룹 ##view_adminRoles_ldapGroupsReadOnly = LDAP group data is read only +##view_adminRoles_ldapQueryPageSize = 늬ìì€ ê¶í view_adminRoles_noLdap = LDAP 볎ì íµí©ìŽ ì€ì ëì§ ìììµëë€. LDAPì 구ì±íë €ë©Ž <a {0}>{1}</a>ë¡ ê°ììì. ##view_adminRoles_permissions_autoselecting_configureRead_implied = Autodeselected CONFIGURE_WRITE permission, since lack of CONFIGURE_READ implies lack of it... ##view_adminRoles_permissions_autoselecting_configureWrite_implied = Autoselected CONFIGURE_READ permission, since CONFIGURE_WRITE implies it... diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties index 952b01f..35d2aec 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties @@ -135,11 +135,13 @@ common_msg_noItemsToShow = N\u00E3o existem itens para apresentar common_msg_notYetImplemented = N\u00E3o implementado ainda common_msg_see_more = Ver detalhes... common_msg_step_x_of_y = Passo {0} de {1} +##common_msg_typeToFilterResults = N\u00E3o implementado ainda ##common_severity_debug = Debug common_severity_error = Erro ##common_severity_fatal = Fatal ##common_severity_info = Info ##common_severity_warn = Warn +##common_status_attention = Sucesso common_status_canceled = Cancelado common_status_deferred = Deferido common_status_failed = Falha @@ -191,11 +193,13 @@ common_title_edit_mode = Modo Edi\u00E7\u00E3o common_title_enabled = Habilitado? common_title_end = Fim common_title_favorites = Favoritos +##common_title_filterResultsBelow = Favoritos common_title_generalProp = Propriedades Gerais common_title_group = Grupo common_title_group_def_total = Total de Defini\u00E7\u00E3o de Grupos common_title_group_member_health = Sa\u00FAde do Membro do Grupo common_title_groups = Grupos +##common_title_groupsFound = Grupos common_title_help = Ajuda common_title_host = Host common_title_icon = \u00EDcone @@ -209,6 +213,7 @@ common_title_inventorySummary = Sum\u00E1rio do Invent\u00E1rio common_title_lastUpdated = \u00DAltima Atualiza\u00E7\u00E3o common_title_lastUpdatedBy = \u00DAltima Atualiza\u00E7\u00E3o feita por common_title_ldapGroups = Grupos LDAP +##common_title_ldapGroupsAvailable = Grupos LDAP common_title_mashup = Mashup common_title_members_reporting = Relat\u00F3rio de Membros common_title_message = Mensagem @@ -233,6 +238,8 @@ common_title_platform_total = Total de Plataformas ##common_title_plugin = Plugin common_title_port = Porta common_title_providers = Provedores +##common_title_queryPagesParsed = Grupos LDAP +##common_title_queryProgress = Grupos LDAP common_title_recent_alerts = Alertas Recentes common_title_recent_bundle_deployments = Deployments de Bundle recente common_title_recent_configuration_updates = Configura\u00E7\u00F5es Alteradas Recentemente @@ -272,6 +279,7 @@ common_title_sort_order_tooltip = Define a ordem dos resultados. common_title_start = Iniciar common_title_status = Status common_title_stop = Parar +##common_title_suggest = Sum\u00E1rio common_title_summary = Sum\u00E1rio common_title_tag_cloud = Nuvem de Tags common_title_the = O @@ -505,12 +513,22 @@ view_adminRoles_assignedGroups = Grupos de Recursos Associados view_adminRoles_assignedSubjects = Perfis Associados ##view_adminRoles_failCreateRoleWithExistingName = Failed to create role with existing name [{0}]. Please Use another name. view_adminRoles_failLdap = Falha ao determinar se o LDAP foi configurado - assumindo como LDAP n\u00E3o configurado. +##view_adminRoles_failLdapAvailableGroups = Falha ao recuperar os grupos dispon\u00EDveis no LDAP - assumindo como LDAP sem grupos. view_adminRoles_failLdapGroups = Falha ao recuperar os grupos dispon\u00EDveis no LDAP - assumindo como LDAP sem grupos. view_adminRoles_failLdapGroupsRole = Falha ao carregar grupos do LDAP dispon\u00EDveis para o perfil. +##view_adminRoles_failLdapGroupsSettings = Falha ao recuperar os grupos dispon\u00EDveis no LDAP - assumindo como LDAP sem grupos. view_adminRoles_failRoles = Falha ao recuperar perfis. view_adminRoles_globalPerms = Permiss\u00F5es Globais view_adminRoles_ldapGroups = Grupos LDAP view_adminRoles_ldapGroupsReadOnly = Informa\u00E7\u00F5es do grupo LDAP com permiss\u00F5es apenas de leitura +##view_adminRoles_ldapGroupsSettingsReadOnly = Informa\u00E7\u00F5es do grupo LDAP com permiss\u00F5es apenas de leitura +##view_adminRoles_ldapQueryPageSize = Permiss\u00F5es do Recurso +##view_adminRoles_ldapTooManyResults = Informa\u00E7\u00F5es do grupo LDAP com permiss\u00F5es apenas de leitura +##view_adminRoles_ldapTookLongResults = Informa\u00E7\u00F5es do grupo LDAP com permiss\u00F5es apenas de leitura +##view_adminRoles_ldapTookManyPagesResults = Informa\u00E7\u00F5es do grupo LDAP com permiss\u00F5es apenas de leitura +##view_adminRoles_ldapWarnParsingManyPagesResults = Informa\u00E7\u00F5es do grupo LDAP com permiss\u00F5es apenas de leitura +##view_adminRoles_ldapWarnQueryTakingLongResults = Informa\u00E7\u00F5es do grupo LDAP com permiss\u00F5es apenas de leitura +##view_adminRoles_ldapWarnTooManyResults = Informa\u00E7\u00F5es do grupo LDAP com permiss\u00F5es apenas de leitura view_adminRoles_noLdap = A integra\u00E7\u00E3o com o LDAP ainda n\u00E3o foi configurada. Para configurar o LDAP acesse <a {0}>{1}</a>. view_adminRoles_permissions_autoselecting_configureRead_implied = Permiss\u00E3o CONFIGURE_WRITE desmarcada automaticamente devida a aus\u00EAncia da permiss\u00E3o CONFIGURE_READ... view_adminRoles_permissions_autoselecting_configureWrite_implied = Permiss\u00E3o CONFIGURE_READ marcada automaticamente devida a marca\u00E7\u00E3o de CONFIGURE_WRITE... diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties index 4fbef12..3035f91 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties @@ -2252,11 +2252,13 @@ common_msg_noItemsToShow = ÐÑÑÑÑÑÑвÑÑÑ ÑлеЌеМÑÑ ÐŽÐ»Ñ ÐŸÑП common_msg_notYetImplemented = ÐÑе Ме ÑеалОзПваМП common_msg_see_more = пПЎÑПбМее... common_msg_step_x_of_y = Каг {0} Оз {1} +##common_msg_typeToFilterResults = ÐÑе Ме ÑеалОзПваМП common_severity_debug = ÐÑлаЎка common_severity_error = ÐÑОбка common_severity_fatal = ЀаÑалÑÐœÑй common_severity_info = ÐÐœÑПÑЌаÑÐžÑ common_severity_warn = ÐÑеЎÑпÑежЎеМОе +##common_status_attention = УÑпеÑÐœÑй common_status_canceled = ÐÑЌеМÑÐœ common_status_deferred = ÐÑлПжеММÑй common_status_failed = ÐезÑÑпеÑМП @@ -2312,6 +2314,7 @@ common_title_group = ÐÑÑппа common_title_group_def_total = ÐпÑеЎелеМОе кПлОÑеÑÑва гÑÑпп common_title_group_member_health = СПÑÑПÑМОе ÑлеМа гÑÑÐ¿Ð¿Ñ common_title_groups = ÐÑÑÐ¿Ð¿Ñ +##common_title_groupsFound = ÐÑÑÐ¿Ð¿Ñ common_title_help = ÐПЌПÑÑ common_title_host = Host common_title_id = ID @@ -2323,6 +2326,7 @@ common_title_inventorySummary = ÐÑПг ОМвеМÑаÑОзаÑОО common_title_lastUpdated = ÐПÑлеЎМее ПбМПвлеМОе common_title_lastUpdatedBy = ÐПÑлеЎМее ПбМПвлеМОе ÐŸÑ common_title_ldapGroups = LDAP-гÑÑÐ¿Ð¿Ñ +##common_title_ldapGroupsAvailable = LDAP-гÑÑÐ¿Ð¿Ñ common_title_mashup = Mashup common_title_members_reporting = ÐПлОÑеÑÑвП пПлÑзПваÑелей ПÑÑеÑМПÑÑО common_title_message = СППбÑеМОе @@ -2346,6 +2350,8 @@ common_title_platform_total = ÐПлОÑеÑÑвП плаÑÑПÑÐŒ common_title_plugin = ÐлагОМ common_title_port = ÐПÑÑ common_title_providers = ÐÑПвайЎеÑÑ +##common_title_queryPagesParsed = LDAP-гÑÑÐ¿Ð¿Ñ +##common_title_queryProgress = LDAP-гÑÑÐ¿Ð¿Ñ common_title_recent_alerts = ÐПÑлеЎМОе пÑеЎÑпÑÐµÐ¶ÐŽÐµÐœÐžÑ common_title_recent_bundle_deployments = ÐПÑлеЎМОе ÑазвеÑÑÑÐ²Ð°ÐœÐžÑ Ð¿Ð°ÐºÐµÑа common_title_recent_configuration_updates = ÐПÑлеЎМОе ÐŸÐ±ÐœÐŸÐ²Ð»ÐµÐœÐžÑ ÐºÐŸÐœÑОгÑÑаÑОО @@ -2382,6 +2388,7 @@ common_title_sort_order_tooltip = УÑÑаМПвОÑÑ Ð¿ÐŸÑÑЎПк ÑПÑÑО common_title_start = СÑаÑÑ common_title_status = СÑаÑÑÑ common_title_stop = СÑПп +##common_title_suggest = СвПЎка common_title_summary = СвПЎка common_title_tag_cloud = ÐблакП ÑегПв common_title_timestamp = ÐаÑа/ÐÑеЌÑcommon_title_total=ÐÑПг @@ -2587,13 +2594,23 @@ view_adminRoles_assignedGroups = ÐазМаÑеММÑе гÑÑÐ¿Ð¿Ñ ÑеÑÑÑ view_adminRoles_assignedSubjects = ÐазМаÑеММÑе ÑÑбÑекÑÑ ##view_adminRoles_failCreateRoleWithExistingName = Failed to create role with existing name [{0}]. Please use another name. view_adminRoles_failLdap = Ðе ÑЎалПÑÑ ÐŸÐ¿ÑеЎелОÑÑ, МаÑÑÑПеМ лО LDAP - пÑеЎпПлПжОÑелÑМП LDAP ПÑÑÑÑÑÑвÑеÑ. +##view_adminRoles_failLdapAvailableGroups = Ðе ÑЎалПÑÑ Ð¿ÐŸÐ»ÑÑОÑÑ LDAP-гÑÑппÑ. ÐÑеЎпПлПжОÑелÑМП LDAP гÑÑÐ¿Ð¿Ñ ÐŸÑÑÑÑÑÑвÑÑÑ view_adminRoles_failLdapGroups = Ðе ÑЎалПÑÑ Ð¿ÐŸÐ»ÑÑОÑÑ LDAP-гÑÑппÑ. ÐÑеЎпПлПжОÑелÑМП LDAP гÑÑÐ¿Ð¿Ñ ÐŸÑÑÑÑÑÑвÑÑÑ view_adminRoles_failLdapGroupsRole = Ðе ÑЎалПÑÑ Ð·Ð°Ð³ÑÑзОÑÑ LDAP гÑÑппÑ, ЎПÑÑÑпМÑе ÐŽÐ»Ñ ÑПлО. +##view_adminRoles_failLdapGroupsSettings = Ðе ÑЎалПÑÑ Ð¿ÐŸÐ»ÑÑОÑÑ LDAP-гÑÑппÑ. ÐÑеЎпПлПжОÑелÑМП LDAP гÑÑÐ¿Ð¿Ñ ÐŸÑÑÑÑÑÑвÑÑÑ view_adminRoles_failRoles = Ðе ÑЎалПÑÑ Ð¿ÐŸÐ»ÑÑОÑÑ ÑПлО. view_adminRoles_globalPerms = ÐлПбалÑÐœÑе пПлМПЌПÑÐžÑ view_adminRoles_ldapGroups = LDAP-гÑÑÐ¿Ð¿Ñ view_adminRoles_ldapGroupsReadOnly = ЎаММÑе LDAP гÑÑпп ЎПÑÑÑÐ¿ÐœÑ ÑПлÑкП ÐŽÐ»Ñ ÑÑÐµÐœÐžÑ ##view_adminRoles_noLdap = The LDAP security integration is not configured. To configure LDAP, go to <a {0}>{1}</a>. +##view_adminRoles_ldapGroupsSettingsReadOnly = ЎаММÑе LDAP гÑÑпп ЎПÑÑÑÐ¿ÐœÑ ÑПлÑкП ÐŽÐ»Ñ ÑÑÐµÐœÐžÑ +##view_adminRoles_ldapQueryPageSize = ÐПлМПЌПÑÐžÑ ÑеÑÑÑÑа +##view_adminRoles_ldapTooManyResults = ЎаММÑе LDAP гÑÑпп ЎПÑÑÑÐ¿ÐœÑ ÑПлÑкП ÐŽÐ»Ñ ÑÑÐµÐœÐžÑ +##view_adminRoles_ldapTookLongResults = ЎаММÑе LDAP гÑÑпп ЎПÑÑÑÐ¿ÐœÑ ÑПлÑкП ÐŽÐ»Ñ ÑÑÐµÐœÐžÑ +##view_adminRoles_ldapTookManyPagesResults = ЎаММÑе LDAP гÑÑпп ЎПÑÑÑÐ¿ÐœÑ ÑПлÑкП ÐŽÐ»Ñ ÑÑÐµÐœÐžÑ +##view_adminRoles_ldapWarnParsingManyPagesResults = ЎаММÑе LDAP гÑÑпп ЎПÑÑÑÐ¿ÐœÑ ÑПлÑкП ÐŽÐ»Ñ ÑÑÐµÐœÐžÑ +##view_adminRoles_ldapWarnQueryTakingLongResults = ЎаММÑе LDAP гÑÑпп ЎПÑÑÑÐ¿ÐœÑ ÑПлÑкП ÐŽÐ»Ñ ÑÑÐµÐœÐžÑ +##view_adminRoles_ldapWarnTooManyResults = ЎаММÑе LDAP гÑÑпп ЎПÑÑÑÐ¿ÐœÑ ÑПлÑкП ÐŽÐ»Ñ ÑÑÐµÐœÐžÑ view_adminRoles_permissions_autoselecting_configureRead_implied = ÐвÑПЌаÑОÑеÑкО ПÑклÑÑеМП CONFIGURE_WRITE пПлМПЌПÑОе, пПÑкПлÑÐºÑ ÐŸÑÑÑÑÑÑвÑÐµÑ CONFIGURE_READ... view_adminRoles_permissions_autoselecting_configureWrite_implied = ÐвÑПЌаÑОÑеÑкО вÑбÑаМП CONFIGURE_READ пПлМПЌПÑОе, пПÑкПлÑÐºÑ CONFIGURE_WRITE пПЎÑазÑЌеваеÑ, ÑÑП ... ##view_adminRoles_permissions_autoselecting_manageBundleGroups_implied = Autoselected View Bundles, which is granted by Manage Bundle Groups... diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties index 24ea681..615d3a0 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties @@ -135,11 +135,13 @@ common_msg_noItemsToShow = \u6ca1\u6709\u6761\u76ee\u663e\u793a common_msg_notYetImplemented = \u5c1a\u672a\u5b9e\u73b0 common_msg_see_more = \u67e5\u770b\u66f4\u591a... common_msg_step_x_of_y = Step {0} of {1} +##common_msg_typeToFilterResults = \u5c1a\u672a\u5b9e\u73b0 common_severity_debug = \u8c03\u8bd5 common_severity_error = Error common_severity_fatal = \u4e25\u91cd common_severity_info = \u4fe1\u606f common_severity_warn = \u8b66\u544a +##common_status_attention = \u6210\u529f common_status_canceled = \u53d6\u6d88 common_status_deferred = \u5ef6\u671f common_status_failed = \u5931\u8d25 @@ -195,6 +197,7 @@ common_title_group = \u7ec4 common_title_group_def_total = \u5b9a\u4e49\u7ec4\u603b\u6570 common_title_group_member_health = \u7ec4\u5458\u5065\u5eb7\u5ea6 common_title_groups = \u7ec4 +##common_title_groupsFound = \u7ec4 common_title_help = \u5e2e\u52a9 common_title_host = \u4e3b\u673a common_title_id = ID @@ -206,6 +209,7 @@ common_title_inventorySummary = \u6e05\u5355\u6c47\u603b common_title_lastUpdated = \u6700\u8fd1\u66f4\u65b0 common_title_lastUpdatedBy = \u6700\u8fd1\u66f4\u65b0\u8005 common_title_ldapGroups = LDAP\u7ec4 +##common_title_ldapGroupsAvailable = LDAP\u7ec4 common_title_mashup = Mashup common_title_members_reporting = \u7ec4\u62a5\u8868 common_title_message = \u6d88\u606f @@ -230,6 +234,8 @@ common_title_platform_total = \u6240\u6709\u5e73\u53f0 common_title_plugin = \u63d2\u4ef6 common_title_port = \u7aef\u53e3 common_title_providers = \u63d0\u4f9b\u8005 +##common_title_queryPagesParsed = LDAP\u7ec4 +##common_title_queryProgress = LDAP\u7ec4 common_title_recent_alerts = \u6700\u8fd1\u544a\u8b66 common_title_recent_bundle_deployments = \u6700\u8fd1\u53d1\u5e03Bundles common_title_recent_configuration_updates = \u6700\u8fd1\u914d\u7f6e\u66f4\u65b0 @@ -269,6 +275,7 @@ common_title_sort_order_tooltip = \u8bbe\u7f6e\u6392\u5e8f\u65b9\u5f0f. common_title_start = \u5f00\u59cb common_title_status = \u72b6\u6001 common_title_stop = \u505c\u6b62 +##common_title_suggest = \u7edf\u8ba1 common_title_summary = \u7edf\u8ba1 common_title_tag_cloud = \u6807\u7b7e\u4e91 common_title_the = The @@ -494,12 +501,22 @@ view_adminRoles_assignedGroups = \u5206\u914d\u7684\u8d44\u6e90\u7ec4 view_adminRoles_assignedSubjects = \u5206\u914d\u7684\u4e3b\u9898 ##view_adminRoles_failCreateRoleWithExistingName = Failed to create role with existing name [{0}]. Please Use another name. view_adminRoles_failLdap = \u65e0\u6cd5\u786e\u5b9aLDAP - \u5047\u5b9a\u6ca1\u6709LDAP. +##view_adminRoles_failLdapAvailableGroups = \u67e5\u8be2\u6709\u6548LDAP\u7ec4\u5931\u8d25 - \u5047\u5b9a\u65e0DAP\u7ec4. view_adminRoles_failLdapGroups = \u67e5\u8be2\u6709\u6548LDAP\u7ec4\u5931\u8d25 - \u5047\u5b9a\u65e0DAP\u7ec4. view_adminRoles_failLdapGroupsRole = \u52a0\u8f7dLDAP\u7ec4\u89d2\u8272\u5931\u8d25. +##view_adminRoles_failLdapGroupsSettings = \u67e5\u8be2\u6709\u6548LDAP\u7ec4\u5931\u8d25 - \u5047\u5b9a\u65e0DAP\u7ec4. view_adminRoles_failRoles = \u53d6\u5f97\u89d2\u8272\u5931\u8d25. view_adminRoles_globalPerms = \u5168\u5c40\u6388\u6743 view_adminRoles_ldapGroups = LDAP\u7ec4 view_adminRoles_ldapGroupsReadOnly = LDAP\u7ec4\u6570\u636e\u4e3a\u53ea\u8bfb +##view_adminRoles_ldapGroupsSettingsReadOnly = LDAP\u7ec4\u6570\u636e\u4e3a\u53ea\u8bfb +##view_adminRoles_ldapQueryPageSize = \u8d44\u6e90\u6388\u6743 +##view_adminRoles_ldapTooManyResults = LDAP\u7ec4\u6570\u636e\u4e3a\u53ea\u8bfb +##view_adminRoles_ldapTookLongResults = LDAP\u7ec4\u6570\u636e\u4e3a\u53ea\u8bfb +##view_adminRoles_ldapTookManyPagesResults = LDAP\u7ec4\u6570\u636e\u4e3a\u53ea\u8bfb +##view_adminRoles_ldapWarnParsingManyPagesResults = LDAP\u7ec4\u6570\u636e\u4e3a\u53ea\u8bfb +##view_adminRoles_ldapWarnQueryTakingLongResults = LDAP\u7ec4\u6570\u636e\u4e3a\u53ea\u8bfb +##view_adminRoles_ldapWarnTooManyResults = LDAP\u7ec4\u6570\u636e\u4e3a\u53ea\u8bfb view_adminRoles_noLdap = \u6ca1\u6709\u96c6\u6210LDAP\u5b89\u5168, \u5230<a {0}>{1}</a>. ##view_adminRoles_permissions_autoselecting_configureRead_implied = Autodeselected CONFIGURE_WRITE permission, since lack of CONFIGURE_READ implies lack of it... ##view_adminRoles_permissions_autoselecting_configureWrite_implied = Autoselected CONFIGURE_READ permission, since CONFIGURE_WRITE implies it...
commit 993910de8445769b88413362a87257e0f0b3eea4 Author: Simeon Pinder spinder@fulliautomatix.conchfritter.com Date: Fri Aug 9 10:22:37 2013 -0400
[BZ 990576] - insert hard ldap group limit with parsing to guard against excessive ui perf issues. - fix ldap search filter issue - add Suggest/Advice component for more feedback for ldap integration. - disable edit on Suggest/Advice component - clean up selector feedback ui threads to guard against refresh. - refresh cached ldap results after 30 mins. - modify AbstractSelector to better support overriding the availableFilterForm
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RoleLdapGroupSelector.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RoleLdapGroupSelector.java index b17961d..8d53b82 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RoleLdapGroupSelector.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RoleLdapGroupSelector.java @@ -36,11 +36,18 @@ import com.smartgwt.client.data.Record; import com.smartgwt.client.data.fields.DataSourceTextField; import com.smartgwt.client.types.TitleOrientation; import com.smartgwt.client.widgets.form.DynamicForm; +import com.smartgwt.client.widgets.form.events.ItemChangedEvent; +import com.smartgwt.client.widgets.form.events.ItemChangedHandler; import com.smartgwt.client.widgets.form.fields.CheckboxItem; import com.smartgwt.client.widgets.form.fields.FormItemIcon; import com.smartgwt.client.widgets.form.fields.SpacerItem; import com.smartgwt.client.widgets.form.fields.StaticTextItem; +import com.smartgwt.client.widgets.form.fields.TextAreaItem; import com.smartgwt.client.widgets.form.fields.TextItem; +import com.smartgwt.client.widgets.form.fields.events.ChangeEvent; +import com.smartgwt.client.widgets.form.fields.events.ChangeHandler; +import com.smartgwt.client.widgets.form.fields.events.ChangedEvent; +import com.smartgwt.client.widgets.form.fields.events.ChangedHandler; import com.smartgwt.client.widgets.grid.ListGridRecord; import com.smartgwt.client.widgets.layout.HLayout;
@@ -63,7 +70,9 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c public static final String FIELD_ID = "id"; public static final String FIELD_NAME = "name"; public static final String FIELD_DESCRIPTION = "description"; - private static boolean queryCompleted = false; + final TextItem searchTextItem = new TextItem(); + protected int cursorPosition; + private static int retryAttempt = 0;
//override the selector key for ldap group selection. protected String getSelectorKey() { @@ -86,7 +95,7 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c availableFilterForm.setNumCols(2); } int groupPanelWidth = 375; - int groupPanelHeight = 140; + int groupPanelHeight = 150;
// final TextItem search = new TextItem("search", // MSG.common_title_search()); @@ -100,6 +109,25 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c availableGroupDetails.setGroupTitle("Available Groups Results"); availableGroupDetails.setIsGroup(true); availableGroupDetails.setWrapItemTitles(false); + //add itemChanged handler to listen for changes to SearchItem + availableGroupDetails.addItemChangedHandler(new ItemChangedHandler() { + public void onItemChanged(ItemChangedEvent itemChangedEvent) { + + latestCriteria = getLatestCriteria(null); + + Timer timer = new Timer() { + @Override + public void run() { + if (latestCriteria != null) { + Criteria criteria = latestCriteria; + latestCriteria = null; + populateAvailableGrid(criteria); + } + } + }; + timer.schedule(500); + } + }); } final TextItem resultCountItem = new TextItem("resultCount", "Groups Found"); { @@ -111,23 +139,49 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c pageCountItem.setCanEdit(false); pageCountItem.setWidth("100%"); } + final TextAreaItem adviceItem = new TextAreaItem("advice", "Suggest"); + { + adviceItem.setWidth("100%"); + adviceItem.setHeight(20); + String feedback = "(None)"; + adviceItem.setValue(feedback); + adviceItem.setTooltip(feedback); + adviceItem.setDisabled(true); + adviceItem.addChangeHandler(new ChangeHandler() { + @Override + public void onChange(ChangeEvent event) { + event.cancel(); + cursorPosition = adviceItem.getSelectionRange()[0]; + } + }); + adviceItem.addChangedHandler(new ChangedHandler() { + + @Override + public void onChanged(ChangedEvent event) { + adviceItem.setSelectionRange(cursorPosition, cursorPosition); + } + }); + } // final TextItem search = new TextItem("search", // MSG.common_title_search()); - final TextItem search = new TextItem("search", "Search[within results]"); { - search.setWidth("100%"); - search.setTooltip("Start typing here to show groups containing the typed characters."); + searchTextItem.setName("search"); + searchTextItem.setTitle("Filter[results below]"); + searchTextItem.setWidth("100%"); + searchTextItem.setTooltip("Start typing here to only show groups containing the typed characters."); } final FormItemIcon loadingIcon = new FormItemIcon(); final FormItemIcon successIcon = new FormItemIcon(); final FormItemIcon failIcon = new FormItemIcon(); + final FormItemIcon attentionIcon = new FormItemIcon(); String successIconPath = "[SKIN]/actions/ok.png"; String failedIconPath = "[SKIN]/actions/exclamation.png"; String loadingIconPath = "[SKIN]/loading.gif"; - //icon.setSrc("[SKIN]/actions/help.png"); + String attentionIconPath = "[SKIN]/Dialog/warn.png"; loadingIcon.setSrc(loadingIconPath); successIcon.setSrc(successIconPath); failIcon.setSrc(failedIconPath); + attentionIcon.setSrc(attentionIconPath);
final StaticTextItem groupQueryStatus = new StaticTextItem(); { @@ -136,7 +190,7 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c groupQueryStatus.setDefaultValue("Loading..."); groupQueryStatus.setIcons(loadingIcon); } - availableGroupDetails.setItems(resultCountItem, pageCountItem, groupQueryStatus, new SpacerItem(), search); + availableGroupDetails.setItems(resultCountItem, pageCountItem, groupQueryStatus, adviceItem, searchTextItem);
// Ldap Group Settings region final DynamicForm ldapGroupSettings = new DynamicForm(); @@ -182,8 +236,8 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c //You have to set this attribute groupUsePosixGroupsItem.setAttribute("labelAsTitle", true); } - ldapGroupSettings - .setItems(groupSearch, groupMember, groupQueryPagingItem, groupQueryPagingCountItem, groupUsePosixGroupsItem); + ldapGroupSettings.setItems(groupSearch, groupMember, groupQueryPagingItem, groupQueryPagingCountItem, + groupUsePosixGroupsItem);
// orient both panels next to each other HLayout panel = new HLayout(); @@ -196,6 +250,8 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c } availableFilterForm.addChild(panel);
+ final long ldapGroupSelectorRequestId = System.currentTimeMillis(); + //launch operations to populate/refresh LDAP Group Query contents. final Timer ldapPropertiesTimer = new Timer() { public void run() { @@ -230,75 +286,119 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c } } }; - ldapPropertiesTimer.scheduleRepeating(2000); // repeat interval in milliseconds, e.g. 30000 = 30seconds + ldapPropertiesTimer.schedule(2000); // repeat interval in milliseconds, e.g. 30000 = 30seconds
//launch operations to populate/refresh LDAP Group Query contents. final Timer availableGroupsTimer = new Timer() { public void run() { - if (!queryCompleted) { - //make request to RHQ about state of latest LDAP GWT request - GWTServiceLookup.getLdapService().findAvailableGroupsStatus( - new AsyncCallback<Set<Map<String, String>>>() { - @Override - public void onFailure(Throwable caught) { - groupQueryStatus.setIcons(failIcon); - groupQueryStatus - .setDefaultValue("Fail: Unable to retrieve status for latest AvailableGroups() call."); - //TODO: update this message - CoreGUI.getErrorHandler().handleError(MSG.view_adminRoles_failLdap(), caught); + final String attention = "Attention"; + final String success = "Success"; + final String none = "(None)"; + final String failed = "Failed"; + //make request to RHQ about state of latest LDAP GWT request + GWTServiceLookup.getLdapService().findAvailableGroupsStatus( + new AsyncCallback<Set<Map<String, String>>>() { + @Override + public void onFailure(Throwable caught) { + groupQueryStatus.setIcons(failIcon); + groupQueryStatus.setDefaultValue(failed); + String adviceValue = "Failed: Unable to retrieve status for latest AvailableGroups() call."; + adviceItem.setValue(adviceValue); + adviceItem.setTooltip(adviceValue); + //TODO: update this message + CoreGUI.getErrorHandler().handleError(MSG.view_adminRoles_failLdap(), caught); + retryAttempt++; + if (retryAttempt > 3) { + cancel();//kill thread + retryAttempt = 0; } + }
- @Override - public void onSuccess(Set<Map<String, String>> results) { - // Log.debug("@@@@@@@ findAvailableGroupsStatus: SUCCESS:" + System.currentTimeMillis() - // + ":count:" - // + results.size()); - long start = -1, end = -1; - int pageCount = 0; - int resultCountValue = 0; - for (Map<String, String> map : results) { - String key = map.keySet().toArray()[0] + ""; - if (key.equals("query.results.parsed")) { - String value = map.get(key); - resultCountItem.setValue(value); - resultCountValue = Integer.valueOf(value); - } else if (key.equals("query.complete")) { - String value = map.get(key); - queryCompleted = Boolean.valueOf(value); - } else if (key.equals("query.start.time")) { - String value = map.get(key); - start = Long.valueOf(value); - } else if (key.equals("query.end.time")) { - String value = map.get(key); - end = Long.valueOf(value); - } else if (key.equals("query.page.count")) { - String value = map.get(key); - pageCountItem.setValue(value); - pageCount = Integer.valueOf(value); - } + @Override + public void onSuccess(Set<Map<String, String>> results) { + long start = -1, current = -1; + int pageCount = 0; + int resultCountValue = 0; + boolean queryCompleted = false; + for (Map<String, String> map : results) { + String key = map.keySet().toArray()[0] + ""; + if (key.equals("query.results.parsed")) { + String value = map.get(key); + resultCountItem.setValue(value); + resultCountValue = Integer.valueOf(value); + } else if (key.equals("query.complete")) { + String value = map.get(key); + queryCompleted = Boolean.valueOf(value); + } else if (key.equals("query.start.time")) { + String value = map.get(key); + start = Long.valueOf(value); + } else if (key.equals("query.current.time")) { + String value = map.get(key); + current = Long.valueOf(value); + } else if (key.equals("query.page.count")) { + String value = map.get(key); + pageCountItem.setValue(value); + pageCount = Integer.valueOf(value); } - //act on status details to add extra perf suggestions - if (queryCompleted) { + } + //Update status information + String warnTooManyResults = " A lot of results are being returned. Modify your 'Group Search Filter' to return fewer results."; + String warnQueryTakingLongResults = " Query taking a while to complete. Modify your 'Group Search Filter' to return fewer results."; + String warnParsingManyPagesResults = " Query requires a lot of pages. Modify your 'Group Search Page Size' to return more results per request."; + boolean resultCountWarning = false; + boolean pageCountWarning = false; + boolean timePassingWarning = false; + if ((resultCountWarning = (resultCountValue > 5000)) + || (pageCountWarning = (pageCount > 5)) + || (timePassingWarning = (current - start) > 5 * 1000)) { + adviceItem.setDisabled(false); + groupQueryStatus.setIcons(attentionIcon); + if (resultCountWarning) { + adviceItem.setValue(warnTooManyResults); + adviceItem.setTooltip(warnTooManyResults); + } else if (pageCountWarning) { + adviceItem.setValue(warnParsingManyPagesResults); + adviceItem.setTooltip(warnParsingManyPagesResults); + } else if (timePassingWarning) { + adviceItem.setValue(warnQueryTakingLongResults); + adviceItem.setTooltip(warnQueryTakingLongResults); + } + } + + //act on status details to add extra perf suggestions. Kill threads older than 30 mins + long parseTime = System.currentTimeMillis() - ldapGroupSelectorRequestId; + if ((queryCompleted) || (parseTime) > 30 * 60 * 1000) { + String tooManyResults = "Too many results to show all. Modify your 'Group Search Filter' to return fewer than 20000 results."; + String queryTookLongResults = " Query took " + parseTime + + " ms to complete. Modify your 'Group Search Filter' to return fewer results."; + String queryTookManyPagesResults = " Query required " + + pageCount + + " pages to complete. Modify 'Group Search Page Size' to return more results per request."; + adviceItem.setDisabled(false); + groupQueryStatus.setIcons(attentionIcon); + groupQueryStatus.setDefaultValue(attention); + if (resultCountValue > 20000) {//results throttled + adviceItem.setValue(tooManyResults); + adviceItem.setTooltip(tooManyResults); + } else if ((current - start) >= 10 * 1000) {// took longer than 10s + adviceItem.setValue(queryTookLongResults); + adviceItem.setTooltip(queryTookLongResults); + } else if (pageCount >= 20) {// required more than 20 pages of results + adviceItem.setValue(queryTookManyPagesResults); + adviceItem.setTooltip(queryTookManyPagesResults); + } else {//simple success. + groupQueryStatus.setDefaultValue(success); groupQueryStatus.setIcons(successIcon); - String success = "Success"; - String tooManyResults = success + ": Too many results."; - String queryTookLongResults = success + ": Query took long to complete."; - String queryTookManyPagesResults = success + ": Query required a lot of paging."; - //TODO: add in extra information about results. - if (resultCountValue > 20000) {//results throttled - groupQueryStatus.setDefaultValue(tooManyResults); - } else if ((end - start) >= 10 * 1000) {// took longer than 10s - groupQueryStatus.setDefaultValue(queryTookLongResults); - } else if (pageCount >= 20) {// took longer than 10s - groupQueryStatus.setDefaultValue(queryTookManyPagesResults); - } + adviceItem.setValue(none); + adviceItem.setTooltip(none); + adviceItem.setDisabled(true); } - availableGroupDetails.markForRedraw(); //now cancel the timer cancel(); } - }); - } + availableGroupDetails.markForRedraw(); + } + }); } }; availableGroupsTimer.scheduleRepeating(3000); // repeat interval in milliseconds, e.g. 30000 = 30seconds @@ -315,7 +415,9 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c */ @Override protected Criteria getLatestCriteria(DynamicForm availableFilterForm) { - String search = (String) availableFilterForm.getValue("search"); + //String search = (String) availableFilterForm.getValue("search"); + //non-trivial recursive form items possible. Retrieve from correct form item. + String search = searchTextItem.getValueAsString(); Criteria criteria = new Criteria(); if (null != search) { criteria.addCriteria("name", search); @@ -330,9 +432,10 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c
public static class LdapGroupsDataSource extends RPCDataSource<LdapGroup, org.rhq.core.domain.criteria.Criteria> {
- //cache ldap group data from external server + //cache ldap group data from external server for 30 mins then stale. private Set<Map<String, String>> cachedLdapGroupsAvailable; private Map<String, Map<String, String>> cachedNameKeyedMap; + private long cachedLdapGroupsLast = -1;
public LdapGroupsDataSource() { DataSourceTextField nameField = new DataSourceTextField(FIELD_NAME, FIELD_NAME); @@ -368,8 +471,9 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c @Override protected void executeFetch(final DSRequest request, final DSResponse response, final org.rhq.core.domain.criteria.Criteria unused) { - //if not null then go through to initialize - if (cachedLdapGroupsAvailable == null) { + //if not null or stale then go through to initialize|reset + if ((cachedLdapGroupsAvailable == null) + || ((System.currentTimeMillis() - cachedLdapGroupsLast) > 30 * 60 * 1000)) { fetchLdapGroupsFromServerAsync(request, response); } else {//use cached data and return correct response //process cachedLdapGroupsAvailable based on criteria @@ -422,6 +526,7 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c public void onSuccess(Set<Map<String, String>> locatedGroupMaps) { Log.debug("Successfully located " + locatedGroupMaps.size() + " available LDAP groups."); cachedLdapGroupsAvailable = locatedGroupMaps; + cachedLdapGroupsLast = System.currentTimeMillis(); //all groups displayed initially PageList<LdapGroup> ldapGroups = convertToPageList(locatedGroupMaps); sendSuccessResponse(request, response, ldapGroups); diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/selector/AbstractSelector.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/selector/AbstractSelector.java index 12c5eb0..6f09142 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/selector/AbstractSelector.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/selector/AbstractSelector.java @@ -65,9 +65,9 @@ import com.smartgwt.client.widgets.layout.VStack;
import org.rhq.enterprise.gui.coregui.client.ImageManager; import org.rhq.enterprise.gui.coregui.client.util.RPCDataSource; +import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedUtility; import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedVLayout; import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedVStack; -import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedUtility;
/** * @author Greg Hinkle @@ -344,7 +344,7 @@ public abstract class AbstractSelector<T, C extends org.rhq.core.domain.criteria return availableSectionStack; }
- private void populateAvailableGrid(Criteria criteria) { + protected void populateAvailableGrid(Criteria criteria) { // TODO until http://code.google.com/p/smartgwt/issues/detail?id=490 is fixed always go to the server for data this.datasource.invalidateCache(); DSRequest requestProperties = new DSRequest(); diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java index 67abb77..dc53a3c 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java @@ -96,14 +96,15 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { private static boolean groupQueryComplete = false; private static int groupQueryResultCount = 0; private static long groupQueryStartTime = -1; - private static long groupQueryEndTime = -1; + private static long groupQueryCurrentTime = -1; private static int groupQueryPageCount = 0; + private static final int LDAP_GROUP_QUERY_LIMIT = 20000;//start to see a lot of ui responsiveness issues beyond this.
private void resetGroupQueryDetails() { groupQueryComplete = false; groupQueryResultCount = 0; groupQueryStartTime = -1; - groupQueryEndTime = -1; + groupQueryCurrentTime = -1; groupQueryPageCount = 0; } public Set<Map<String, String>> findAvailableGroups() { @@ -138,8 +139,8 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { availableGroupsQueryStatus.add(buildStatusEntry("query.results.parsed", String.valueOf(groupQueryResultCount))); //query.start.time => timestamp availableGroupsQueryStatus.add(buildStatusEntry("query.start.time", String.valueOf(groupQueryStartTime))); - //query.end.time => timestamp|-1 - availableGroupsQueryStatus.add(buildStatusEntry("query.end.time", String.valueOf(groupQueryEndTime))); + //query.current.time => timestamp|-1 + availableGroupsQueryStatus.add(buildStatusEntry("query.current.time", String.valueOf(groupQueryCurrentTime))); //query.page.count => 0...N availableGroupsQueryStatus.add(buildStatusEntry("query.page.count", String.valueOf(groupQueryPageCount)));
@@ -470,8 +471,11 @@ SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE.name(), "" int passedInPageSize = -1; try { passedInPageSize = Integer.valueOf(groupPageSize.trim()); - if (passedInPageSize > 0) { + if ((passedInPageSize > 0) && (passedInPageSize <= LDAP_GROUP_QUERY_LIMIT)) { defaultPageSize = passedInPageSize; + } else {//keep defaults and log actual value being used. + log.debug("LDAP Group Page Size passed '" + groupPageSize + + "' was ignored. Defaulting to 1000."); } } catch (NumberFormatException nfe) { //log issue and do nothing. Go with the default. @@ -494,6 +498,7 @@ SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE.name(), ""
//update queryResultCount groupQueryResultCount = groupDetailsMap.size(); + groupQueryCurrentTime = System.currentTimeMillis();
// continually parsing pages of results until we're done. // only if they're enabled in the UI. @@ -511,7 +516,7 @@ SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE.name(), "" } } //continually parsing pages of results until we're done. - while (cookie != null) { + while ((groupQueryResultCount <= LDAP_GROUP_QUERY_LIMIT) && (cookie != null)) { //ensure the next requests contains the session/cookie details ctx.setRequestControls(new Control[] { new PagedResultsControl(defaultPageSize, cookie, Control.CRITICAL) }); @@ -520,19 +525,21 @@ SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE.name(), "" //update Query state after each page groupQueryResultCount = groupDetailsMap.size(); groupQueryPageCount++; + groupQueryCurrentTime = System.currentTimeMillis();
//empty out cookie cookie = null; - //test for further iterations - controls = ctx.getResponseControls(); - if (controls != null) { - for (Control control : controls) { - if (control instanceof PagedResultsResponseControl) { - PagedResultsResponseControl pagedResult = (PagedResultsResponseControl) control; - cookie = pagedResult.getCookie(); + //insert group query throttle. + //test for further iterations + controls = ctx.getResponseControls(); + if (controls != null) { + for (Control control : controls) { + if (control instanceof PagedResultsResponseControl) { + PagedResultsResponseControl pagedResult = (PagedResultsResponseControl) control; + cookie = pagedResult.getCookie(); + } } } - } } } } @@ -553,7 +560,7 @@ SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE.name(), "" throw new LdapCommunicationException(iex); } //update end of query information - groupQueryEndTime = System.currentTimeMillis(); + groupQueryCurrentTime = System.currentTimeMillis(); groupQueryComplete = true; return groupDetailsMap; } @@ -575,7 +582,9 @@ SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE.name(), "" //execute search based on controls and context passed in. NamingEnumeration<SearchResult> answer = ctx.search(baseDNs[x], filter, searchControls); boolean ldapApiEnumerationBugEncountered = false; - while ((!ldapApiEnumerationBugEncountered) && answer.hasMoreElements()) {//BZ:582471- ldap api bug change + int resultCount = 0; + while ((resultCount <= LDAP_GROUP_QUERY_LIMIT) && (groupDetailsMap.size() <= LDAP_GROUP_QUERY_LIMIT) + && (!ldapApiEnumerationBugEncountered) && answer.hasMoreElements()) {//BZ:582471- ldap api bug change // We use the first match SearchResult si = null; try { @@ -595,6 +604,7 @@ SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE.name(), "" entry.put("name", name); entry.put("description", description); groupDetailsMap.add(entry); + resultCount++; } }
commit cccb252d70b88922088e1f5e614ec9deb34f1e45 Author: Simeon Pinder spinder@fulliautomatix.conchfritter.com Date: Tue Aug 6 14:36:55 2013 -0400
[BZ 990576] Add messaging and updates around ldap query performance for adminstrator.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RoleLdapGroupSelector.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RoleLdapGroupSelector.java index 4f763ff..b17961d 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RoleLdapGroupSelector.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RoleLdapGroupSelector.java @@ -27,17 +27,25 @@ import java.util.HashSet; import java.util.Map; import java.util.Set;
+import com.google.gwt.user.client.Timer; import com.google.gwt.user.client.rpc.AsyncCallback; import com.smartgwt.client.data.Criteria; import com.smartgwt.client.data.DSRequest; import com.smartgwt.client.data.DSResponse; import com.smartgwt.client.data.Record; import com.smartgwt.client.data.fields.DataSourceTextField; +import com.smartgwt.client.types.TitleOrientation; import com.smartgwt.client.widgets.form.DynamicForm; +import com.smartgwt.client.widgets.form.fields.CheckboxItem; +import com.smartgwt.client.widgets.form.fields.FormItemIcon; import com.smartgwt.client.widgets.form.fields.SpacerItem; +import com.smartgwt.client.widgets.form.fields.StaticTextItem; import com.smartgwt.client.widgets.form.fields.TextItem; import com.smartgwt.client.widgets.grid.ListGridRecord; +import com.smartgwt.client.widgets.layout.HLayout;
+import org.rhq.core.domain.common.composite.SystemSetting; +import org.rhq.core.domain.common.composite.SystemSettings; import org.rhq.core.domain.resource.group.LdapGroup; import org.rhq.core.domain.util.PageControl; import org.rhq.core.domain.util.PageList; @@ -55,6 +63,7 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c public static final String FIELD_ID = "id"; public static final String FIELD_NAME = "name"; public static final String FIELD_DESCRIPTION = "description"; + private static boolean queryCompleted = false;
//override the selector key for ldap group selection. protected String getSelectorKey() { @@ -72,11 +81,227 @@ public class RoleLdapGroupSelector extends AbstractSelector<LdapGroup, org.rhq.c @Override protected DynamicForm getAvailableFilterForm() { DynamicForm availableFilterForm = new DynamicForm(); - availableFilterForm.setWidth100(); - availableFilterForm.setNumCols(2); - - final TextItem search = new TextItem("search", MSG.common_title_search()); - availableFilterForm.setItems(search, new SpacerItem()); + { + availableFilterForm.setWidth100(); + availableFilterForm.setNumCols(2); + } + int groupPanelWidth = 375; + int groupPanelHeight = 140; + + // final TextItem search = new TextItem("search", + // MSG.common_title_search()); + + // Structure the display area into two separate display regions + // Available Groups region + final DynamicForm availableGroupDetails = new DynamicForm(); + { + availableGroupDetails.setWidth(groupPanelWidth); + availableGroupDetails.setHeight(groupPanelHeight); + availableGroupDetails.setGroupTitle("Available Groups Results"); + availableGroupDetails.setIsGroup(true); + availableGroupDetails.setWrapItemTitles(false); + } + final TextItem resultCountItem = new TextItem("resultCount", "Groups Found"); + { + resultCountItem.setCanEdit(false); + resultCountItem.setWidth("100%"); + } + final TextItem pageCountItem = new TextItem("pageCount", "Query Pages Parsed"); + { + pageCountItem.setCanEdit(false); + pageCountItem.setWidth("100%"); + } + // final TextItem search = new TextItem("search", + // MSG.common_title_search()); + final TextItem search = new TextItem("search", "Search[within results]"); + { + search.setWidth("100%"); + search.setTooltip("Start typing here to show groups containing the typed characters."); + } + final FormItemIcon loadingIcon = new FormItemIcon(); + final FormItemIcon successIcon = new FormItemIcon(); + final FormItemIcon failIcon = new FormItemIcon(); + String successIconPath = "[SKIN]/actions/ok.png"; + String failedIconPath = "[SKIN]/actions/exclamation.png"; + String loadingIconPath = "[SKIN]/loading.gif"; + //icon.setSrc("[SKIN]/actions/help.png"); + loadingIcon.setSrc(loadingIconPath); + successIcon.setSrc(successIconPath); + failIcon.setSrc(failedIconPath); + + final StaticTextItem groupQueryStatus = new StaticTextItem(); + { + groupQueryStatus.setName("groupQueryStatus"); + groupQueryStatus.setTitle("Query Progress"); + groupQueryStatus.setDefaultValue("Loading..."); + groupQueryStatus.setIcons(loadingIcon); + } + availableGroupDetails.setItems(resultCountItem, pageCountItem, groupQueryStatus, new SpacerItem(), search); + + // Ldap Group Settings region + final DynamicForm ldapGroupSettings = new DynamicForm(); + { + ldapGroupSettings.setWidth(groupPanelWidth); + ldapGroupSettings.setHeight(groupPanelHeight); + ldapGroupSettings.setGroupTitle("[Read Only] Ldap Group Settings. Edit in 'System Settings'"); + ldapGroupSettings.setIsGroup(true); + ldapGroupSettings.setWrapItemTitles(false); + } + final TextItem groupSearch = new TextItem("groupSearch", "Search Filter"); + { + groupSearch.setCanEdit(false); + groupSearch.setWidth("100%"); + } + final TextItem groupMember = new TextItem("groupMember", "Member Filter"); + { + groupMember.setCanEdit(false); + groupMember.setWidth("100%"); + } + final CheckboxItem groupQueryPagingItem = new CheckboxItem("groupQueryEnable", "Query Paging Enabled"); + { + groupQueryPagingItem.setCanEdit(false); + groupQueryPagingItem.setValue(false); + groupQueryPagingItem.setShowLabel(false); + groupQueryPagingItem.setShowTitle(true); + groupQueryPagingItem.setTitleOrientation(TitleOrientation.LEFT); + //You have to set this attribute + groupQueryPagingItem.setAttribute("labelAsTitle", true); + } + final TextItem groupQueryPagingCountItem = new TextItem("groupQueryCount", "Query Page Size"); + { + groupQueryPagingCountItem.setCanEdit(false); + groupQueryPagingCountItem.setWidth("100%"); + } + final CheckboxItem groupUsePosixGroupsItem = new CheckboxItem("groupUsePosixGroups", "Use Posix Enabled"); + { + groupUsePosixGroupsItem.setCanEdit(false); + groupUsePosixGroupsItem.setValue(false); + groupUsePosixGroupsItem.setShowLabel(false); + groupUsePosixGroupsItem.setShowTitle(true); + groupUsePosixGroupsItem.setTitleOrientation(TitleOrientation.LEFT); + //You have to set this attribute + groupUsePosixGroupsItem.setAttribute("labelAsTitle", true); + } + ldapGroupSettings + .setItems(groupSearch, groupMember, groupQueryPagingItem, groupQueryPagingCountItem, groupUsePosixGroupsItem); + + // orient both panels next to each other + HLayout panel = new HLayout(); + { + panel.addMember(availableGroupDetails); + DynamicForm spacerWrapper = new DynamicForm(); + spacerWrapper.setItems(new SpacerItem()); + panel.addMember(spacerWrapper); + panel.addMember(ldapGroupSettings); + } + availableFilterForm.addChild(panel); + + //launch operations to populate/refresh LDAP Group Query contents. + final Timer ldapPropertiesTimer = new Timer() { + public void run() { + //if system properties not set, launch request/update + String ldapGroupQuery = groupSearch.getValueAsString(); + if ((ldapGroupQuery == null) || (ldapGroupQuery.trim().isEmpty())) { + GWTServiceLookup.getSystemService().getSystemSettings(new AsyncCallback<SystemSettings>() { + @Override + public void onFailure(Throwable caught) { + groupQueryStatus.setIcons(failIcon); + groupQueryStatus.setDefaultValue("Fail: Unable to retrieve system settings."); + //TODO: update this message + CoreGUI.getErrorHandler().handleError(MSG.view_adminRoles_failLdap(), caught); + } + + @Override + public void onSuccess(SystemSettings settings) { + //retrieve relevant information once and update ui + String ldapGroupFilter = settings.get(SystemSetting.LDAP_GROUP_FILTER); + String ldapGroupMember = settings.get(SystemSetting.LDAP_GROUP_MEMBER); + String ldapGroupPagingEnabled = settings.get(SystemSetting.LDAP_GROUP_PAGING); + String ldapGroupPagingValue = settings.get(SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE); + String ldapGroupIsPosix = settings.get(SystemSetting.LDAP_GROUP_USE_POSIX); + groupSearch.setValue(ldapGroupFilter); + groupMember.setValue(ldapGroupMember); + groupQueryPagingItem.setValue(Boolean.valueOf(ldapGroupPagingEnabled)); + groupQueryPagingCountItem.setValue(ldapGroupPagingValue); + groupUsePosixGroupsItem.setValue(Boolean.valueOf(ldapGroupIsPosix)); + ldapGroupSettings.markForRedraw(); + } + }); + } + } + }; + ldapPropertiesTimer.scheduleRepeating(2000); // repeat interval in milliseconds, e.g. 30000 = 30seconds + + //launch operations to populate/refresh LDAP Group Query contents. + final Timer availableGroupsTimer = new Timer() { + public void run() { + if (!queryCompleted) { + //make request to RHQ about state of latest LDAP GWT request + GWTServiceLookup.getLdapService().findAvailableGroupsStatus( + new AsyncCallback<Set<Map<String, String>>>() { + @Override + public void onFailure(Throwable caught) { + groupQueryStatus.setIcons(failIcon); + groupQueryStatus + .setDefaultValue("Fail: Unable to retrieve status for latest AvailableGroups() call."); + //TODO: update this message + CoreGUI.getErrorHandler().handleError(MSG.view_adminRoles_failLdap(), caught); + } + + @Override + public void onSuccess(Set<Map<String, String>> results) { + // Log.debug("@@@@@@@ findAvailableGroupsStatus: SUCCESS:" + System.currentTimeMillis() + // + ":count:" + // + results.size()); + long start = -1, end = -1; + int pageCount = 0; + int resultCountValue = 0; + for (Map<String, String> map : results) { + String key = map.keySet().toArray()[0] + ""; + if (key.equals("query.results.parsed")) { + String value = map.get(key); + resultCountItem.setValue(value); + resultCountValue = Integer.valueOf(value); + } else if (key.equals("query.complete")) { + String value = map.get(key); + queryCompleted = Boolean.valueOf(value); + } else if (key.equals("query.start.time")) { + String value = map.get(key); + start = Long.valueOf(value); + } else if (key.equals("query.end.time")) { + String value = map.get(key); + end = Long.valueOf(value); + } else if (key.equals("query.page.count")) { + String value = map.get(key); + pageCountItem.setValue(value); + pageCount = Integer.valueOf(value); + } + } + //act on status details to add extra perf suggestions + if (queryCompleted) { + groupQueryStatus.setIcons(successIcon); + String success = "Success"; + String tooManyResults = success + ": Too many results."; + String queryTookLongResults = success + ": Query took long to complete."; + String queryTookManyPagesResults = success + ": Query required a lot of paging."; + //TODO: add in extra information about results. + if (resultCountValue > 20000) {//results throttled + groupQueryStatus.setDefaultValue(tooManyResults); + } else if ((end - start) >= 10 * 1000) {// took longer than 10s + groupQueryStatus.setDefaultValue(queryTookLongResults); + } else if (pageCount >= 20) {// took longer than 10s + groupQueryStatus.setDefaultValue(queryTookManyPagesResults); + } + } + availableGroupDetails.markForRedraw(); + //now cancel the timer + cancel(); + } + }); + } + } + }; + availableGroupsTimer.scheduleRepeating(3000); // repeat interval in milliseconds, e.g. 30000 = 30seconds
return availableFilterForm; } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/LdapGWTService.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/LdapGWTService.java index 8356773..521556b 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/LdapGWTService.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/LdapGWTService.java @@ -42,6 +42,11 @@ public interface LdapGWTService extends RemoteService { Set<Map<String, String>> findAvailableGroups() throws RuntimeException;
/** + * @return Map with status of last LDAP groups query available + */ + Set<Map<String, String>> findAvailableGroupsStatus() throws RuntimeException; + + /** * @return Map with LDAP details for user passed. */ Map<String, String> getLdapDetailsFor(String user) throws RuntimeException; diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/LdapGWTServiceImpl.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/LdapGWTServiceImpl.java index 3255b8f..28d9323 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/LdapGWTServiceImpl.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/LdapGWTServiceImpl.java @@ -77,6 +77,28 @@ public class LdapGWTServiceImpl extends AbstractGWTServiceImpl implements LdapGW } }
+ @Override + public Set<Map<String, String>> findAvailableGroupsStatus() throws RuntimeException { + try { + //add permissions check + Set<Permission> globalPermissions = authorizationManager.getExplicitGlobalPermissions(getSessionSubject()); + Boolean accessGranted = globalPermissions.contains(Permission.MANAGE_SECURITY); + + Set<Map<String, String>> results = null; + if (accessGranted) { + results = ldapManager.findAvailableGroupsStatus(); + } else { + String message = "User '" + getSessionSubject().getName() + + "' does not have sufficient permissions to query the status of available LDAP groups request."; + log.debug(message); + throw new PermissionException(message); + } + return SerialUtility.prepare(results, "findAvailableGroups"); + } catch (Throwable t) { + throw getExceptionToThrowToClient(t); + } + } + public void setLdapGroupsForRole(int roleId, List<String> groupIds) throws RuntimeException { try { //add permissions check diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java index 8a0e321..67abb77 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java @@ -93,9 +93,24 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { @EJB private SystemManagerLocal systemManager;
+ private static boolean groupQueryComplete = false; + private static int groupQueryResultCount = 0; + private static long groupQueryStartTime = -1; + private static long groupQueryEndTime = -1; + private static int groupQueryPageCount = 0; + + private void resetGroupQueryDetails() { + groupQueryComplete = false; + groupQueryResultCount = 0; + groupQueryStartTime = -1; + groupQueryEndTime = -1; + groupQueryPageCount = 0; + } public Set<Map<String, String>> findAvailableGroups() { //load current system properties Properties systemConfig = populateProperties(systemManager.getSystemSettings(subjectManager.getOverlord())); + //reset group query details + resetGroupQueryDetails();
//retrieve the filters. String groupFilter = (String) systemConfig.get(SystemSetting.LDAP_GROUP_FILTER.name()); @@ -114,6 +129,29 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { return emptyAvailableGroups; }
+ public Set<Map<String, String>> findAvailableGroupsStatus() { + Set<Map<String, String>> availableGroupsQueryStatus = new HashSet<Map<String, String>>(); + + //query.complete => true|false + availableGroupsQueryStatus.add(buildStatusEntry("query.complete", String.valueOf(groupQueryComplete))); + //query.results.parsed => 0...N + availableGroupsQueryStatus.add(buildStatusEntry("query.results.parsed", String.valueOf(groupQueryResultCount))); + //query.start.time => timestamp + availableGroupsQueryStatus.add(buildStatusEntry("query.start.time", String.valueOf(groupQueryStartTime))); + //query.end.time => timestamp|-1 + availableGroupsQueryStatus.add(buildStatusEntry("query.end.time", String.valueOf(groupQueryEndTime))); + //query.page.count => 0...N + availableGroupsQueryStatus.add(buildStatusEntry("query.page.count", String.valueOf(groupQueryPageCount))); + + return availableGroupsQueryStatus; + } + + private Map<String, String> buildStatusEntry(String key, String value) { + HashMap<String, String> status = new HashMap<String, String>(); + status.put(key, value); + return status; + } + public Set<String> findAvailableGroupsFor(String userName) { Properties options = populateProperties(systemManager.getSystemSettings(subjectManager.getOverlord())); String groupFilter = options.getProperty(SystemSetting.LDAP_GROUP_FILTER.name(), ""); @@ -449,8 +487,14 @@ SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE.name(), "" String[] baseDNs = baseDN.split(BASEDN_DELIMITER);
for (int x = 0; x < baseDNs.length; x++) { + //update query start time + groupQueryStartTime = System.currentTimeMillis(); + executeGroupSearch(filter, groupDetailsMap, ctx, searchControls, baseDNs, x);
+ //update queryResultCount + groupQueryResultCount = groupDetailsMap.size(); + // continually parsing pages of results until we're done. // only if they're enabled in the UI. if (useQueryPaging) { @@ -472,6 +516,11 @@ SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE.name(), "" ctx.setRequestControls(new Control[] { new PagedResultsControl(defaultPageSize, cookie, Control.CRITICAL) }); executeGroupSearch(filter, groupDetailsMap, ctx, searchControls, baseDNs, x); + + //update Query state after each page + groupQueryResultCount = groupDetailsMap.size(); + groupQueryPageCount++; + //empty out cookie cookie = null; //test for further iterations @@ -484,25 +533,6 @@ SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE.name(), "" } } } - //continually parsing pages of results until we're done. - while (cookie != null) { - //ensure the next requests contains the session/cookie details - ctx.setRequestControls(new Control[] { new PagedResultsControl(defaultPageSize, cookie, - Control.CRITICAL) }); - executeGroupSearch(filter, groupDetailsMap, ctx, searchControls, baseDNs, x); - //empty out cookie - cookie = null; - //test for further iterations - controls = ctx.getResponseControls(); - if (controls != null) { - for (Control control : controls) { - if (control instanceof PagedResultsResponseControl) { - PagedResultsResponseControl pagedResult = (PagedResultsResponseControl) control; - cookie = pagedResult.getCookie(); - } - } - } - } } } } @@ -522,6 +552,9 @@ SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE.name(), "" log.error("Unexpected LDAP communciation error:" + iex.getMessage(), iex); throw new LdapCommunicationException(iex); } + //update end of query information + groupQueryEndTime = System.currentTimeMillis(); + groupQueryComplete = true; return groupDetailsMap; }
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerLocal.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerLocal.java index 14945ce..fe3c392 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerLocal.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerLocal.java @@ -51,6 +51,8 @@ public interface LdapGroupManagerLocal {
Set<Map<String, String>> findAvailableGroups();
+ Set<Map<String, String>> findAvailableGroupsStatus(); + Set<String> findAvailableGroupsFor(String userName);
Map<String, String> findLdapUserDetails(String userName);
commit 52bdb069fe9ac0c313f8269b8e264e09cdb0a4a4 Author: Stefan Negrea snegrea@redhat.com Date: Tue Aug 13 15:21:28 2013 -0500
Update the schema manager to check for schema version at startup. The server should not start if there is schema disagreement.
Also throw an exception during update if the installed schema is too advanced for existing installation.
diff --git a/.classpath b/.classpath index cad0bdb..d590a22 100644 --- a/.classpath +++ b/.classpath @@ -380,7 +380,7 @@ <classpathentry exported="true" kind="var" path="M2_REPO/org/jboss/byteman/byteman-submit/1.5.2/byteman-submit-1.5.2.jar" sourcepath="M2_REPO/org/jboss/byteman/byteman-submit/1.5.2/byteman-submit-1.5.2-sources.jar"/> <classpathentry exported="true" kind="var" path="M2_REPO/org/jboss/byteman/byteman-bmunit/1.5.2/byteman-bmunit-1.5.2.jar" sourcepath="M2_REPO/org/jboss/byteman/byteman-bmunit/1.5.2/byteman-bmunit-1.5.2-sources.jar"/> <classpathentry kind="var" path="M2_REPO/org/apache/cassandra/cassandra-all/1.2.4/cassandra-all-1.2.4.jar"/> - <classpathentry kind="var" path="M2_REPO/com/datastax/cassandra/cassandra-driver-core/1.0.0-rhq-1.2.4/cassandra-driver-core-1.0.0-rhq-1.2.4.jar"/> + <classpathentry kind="var" path="M2_REPO/com/datastax/cassandra/cassandra-driver-core/1.0.2-rhq-1.2.4/cassandra-driver-core-1.0.2-rhq-1.2.4.jar"/> <classpathentry kind="var" path="M2_REPO/org/apache/thrift/libthrift/0.7.0/libthrift-0.7.0.jar"/> <classpathentry kind="var" path="M2_REPO/commons-cli/commons-cli/1.2/commons-cli-1.2.jar"/> <classpathentry kind="var" path="M2_REPO/com/google/guava/guava/12.0/guava-12.0.jar"/> @@ -392,5 +392,6 @@ <classpathentry exported="true" kind="var" path="JDK_HOME/jre/lib/rt.jar"/> <classpathentry kind="var" path="M2_REPO/commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar"/> <classpathentry kind="var" path="M2_REPO/org/apache/maven/plugin-tools/maven-plugin-annotations/3.2/maven-plugin-annotations-3.2.jar"/> + <classpathentry kind="var" path="M2_REPO/org/jboss/jboss-vfs/3.1.0.Final/jboss-vfs-3.1.0.Final.jar"/> <classpathentry kind="output" path="eclipse-classes"/> </classpath> diff --git a/modules/common/cassandra-schema/pom.xml b/modules/common/cassandra-schema/pom.xml index 8e3b44a..6fb2915 100644 --- a/modules/common/cassandra-schema/pom.xml +++ b/modules/common/cassandra-schema/pom.xml @@ -47,6 +47,13 @@ <artifactId>cassandra-driver-core</artifactId> <version>${cassandra.driver.version}</version> </dependency> + + <dependency> + <groupId>org.jboss</groupId> + <artifactId>jboss-vfs</artifactId> + <version>${jboss-vfs.version}</version> + </dependency> + </dependencies>
<build> diff --git a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/AbstractManager.java b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/AbstractManager.java index baf7c23..7b8c520 100644 --- a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/AbstractManager.java +++ b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/AbstractManager.java @@ -92,7 +92,7 @@ abstract class AbstractManager { }
/** - * Init the Cassandra cluster session with the username and password provided + * Init the storage cluster session with the username and password provided * at creation. */ protected void initClusterSession() { @@ -100,7 +100,7 @@ abstract class AbstractManager { }
/** - * Init the Cassandra cluster session with provided username and password. + * Init the storage cluster session with provided username and password. * * @param username * @param password @@ -125,7 +125,7 @@ abstract class AbstractManager { }
/** - * Shutdown the Cassandra cluster connection. + * Shutdown the storage cluster connection. */ protected void shutdownClusterConnection() { log.info("Shutting down existing cluster connections"); @@ -135,7 +135,7 @@ abstract class AbstractManager { }
/** - * Get cluster size. + * Get storage cluster size. * * @return cluster size */ @@ -158,7 +158,7 @@ abstract class AbstractManager { }
/** - * Runs a CQL query to check the existence of the RHQ user + * Runs a CQL query to check the existence of the RHQ user on the storage cluster. * * @return true if the RHQ user exists, false otherwise */ @@ -173,7 +173,7 @@ abstract class AbstractManager { }
/** - * Run a CQL query to check the existence of the RHQ schema + * Run a CQL query to check the existence of the RHQ schema. * * @return true if the RHQ schema exists, false otherwise */ @@ -192,11 +192,11 @@ abstract class AbstractManager { }
/** - * Run a CQL query to retrieve the current RHQ schema version + * Run a CQL query to retrieve the installed storage schema version. * * @return current RHQ schema version */ - protected int getSchemaVersion() { + protected int getInstalledSchemaVersion() { int maxVersion = 0; try { ResultSet resultSet = executeManagementQuery(Query.VERSION); diff --git a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/SchemaManager.java b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/SchemaManager.java index 8d28bfa..1a82779 100644 --- a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/SchemaManager.java +++ b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/SchemaManager.java @@ -80,7 +80,7 @@ public class SchemaManager { }
/** - * Install and update the RHQ schema. + * Install and update the storage cluster schema. * * @throws Exception */ @@ -90,7 +90,18 @@ public class SchemaManager { }
/** - * Drop RHQ schema and revert the database to pre-RHQ state. + * Check the existing storage cluster schema version to ensure it is compatible with the + * current installation. + * + * @throws Exception + */ + public void checkCompatibility() throws Exception { + VersionManager version = new VersionManager(username, password, nodes); + version.checkCompatibility(); + } + + /** + * Drop storage cluster schema and revert the storage cluster to pre-RHQ state. * * @throws Exception */ diff --git a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/UpdateFolder.java b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/UpdateFolder.java index 84cb515..31266ca 100644 --- a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/UpdateFolder.java +++ b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/UpdateFolder.java @@ -30,6 +30,7 @@ import java.io.InputStream; import java.io.InputStreamReader; import java.net.JarURLConnection; import java.net.URL; +import java.net.URLConnection; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; @@ -41,6 +42,8 @@ import java.util.jar.JarFile; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory;
+import org.jboss.vfs.VirtualFile; + /** * @author Stefan Negrea */ @@ -83,6 +86,19 @@ class UpdateFolder { }
/** + * The version represented by the latest/highest xml update file. + * + * @return the version + */ + public int getLatestVersion() { + if (this.updateFiles != null && this.updateFiles.size() > 0) { + return this.updateFiles.get(this.updateFiles.size() - 1).extractVersion(); + } + + return 0; + } + + /** * Loads the initial set of update files based on the input folder. * * @return list of update files @@ -115,9 +131,13 @@ class UpdateFolder { } } } else if (resourceFolderURL.getProtocol().equals("vfs")) { - // TODO need to add support for VFS if going to use inside EAP - throw new RuntimeException("The URL protocol [" + resourceFolderURL.getProtocol() + "] is not " + - "supported"); + URLConnection conn = resourceFolderURL.openConnection(); + VirtualFile virtualFolder = (VirtualFile)conn.getContent(); + for (VirtualFile virtualChild : virtualFolder.getChildren()) { + if (!virtualChild.isDirectory()) { + files.add(new UpdateFile(virtualChild.getPathNameRelativeTo(virtualFolder.getParent()))); + } + } } else { // In the event we get another protocol that we do not recognize, throw an // exception instead of failing silently. diff --git a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/VersionManager.java b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/VersionManager.java index 794e991..ec54a0c 100644 --- a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/VersionManager.java +++ b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/VersionManager.java @@ -34,6 +34,8 @@ import com.datastax.driver.core.exceptions.AuthenticationException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory;
+import org.rhq.cassandra.schema.exception.InstalledSchemaTooAdvancedException; +import org.rhq.cassandra.schema.exception.InstalledSchemaTooOldException; import org.rhq.core.domain.cloud.StorageNode;
/** @@ -73,14 +75,14 @@ class VersionManager extends AbstractManager { * @throws Exception */ public void install() throws Exception { - log.info("Preparing to install schema"); + log.info("Preparing to install storage schema");
boolean clusterSessionInitialized = false; try { initClusterSession(); clusterSessionInitialized = true; } catch (AuthenticationException e) { - log.debug("Authentication exception. Will now attempt to create the schema."); + log.debug("Authentication exception. Will now attempt to create the storage schema."); log.debug(e); } finally { shutdownClusterConnection(); @@ -119,7 +121,7 @@ class VersionManager extends AbstractManager { if (!schemaExists()) { execute(updateFolder.getUpdateFiles().get(0), properties); } else { - log.info("RHQ schema already exists."); + log.info("Storage schema already exists."); } } catch (Exception ex) { log.error(ex); @@ -147,28 +149,44 @@ class VersionManager extends AbstractManager { initClusterSession();
if (!schemaExists()) { - log.error("Schema not installed."); - throw new RuntimeException("Schema not installed propertly, cannot apply schema updates."); + log.error("Storage schema not installed."); + throw new RuntimeException("Storage schema not installed propertly, cannot apply schema updates."); }
UpdateFolder updateFolder = new UpdateFolder(Task.Update.getFolder());
- int currentSchemaVersion = getSchemaVersion(); - log.info("Current schema version is " + currentSchemaVersion); - updateFolder.removeAppliedUpdates(currentSchemaVersion); + int installedSchemaVersion = getInstalledSchemaVersion(); + log.info("Installed storage schema version is " + installedSchemaVersion);
- if (updateFolder.getUpdateFiles().size() == 0) { - log.info("RHQ schema is current! No updates applied."); + int requiredSchemaVersion = updateFolder.getLatestVersion(); + log.info("Required storage schema version is " + requiredSchemaVersion); + + if (requiredSchemaVersion == installedSchemaVersion) { + log.info("Storage schema version is current ( " + installedSchemaVersion + " ). No updates applied."); + } else if (requiredSchemaVersion < installedSchemaVersion) { + log.error("Installed storage cluster schema version: " + installedSchemaVersion + + ". Required schema version: " + requiredSchemaVersion + + ". Storage cluster schema has been updated beyond the capability of the existing server installation."); + throw new InstalledSchemaTooAdvancedException(); } else { - for (UpdateFile updateFile : updateFolder.getUpdateFiles()) { - execute(updateFile); + log.info("Storage schema requires udpates. Updating from version " + installedSchemaVersion + + " to version " + requiredSchemaVersion + "."); + + updateFolder.removeAppliedUpdates(installedSchemaVersion); + + if (updateFolder.getUpdateFiles().size() == 0) { + log.info("Storage schema is current! No updates applied."); + } else { + for (UpdateFile updateFile : updateFolder.getUpdateFiles()) { + execute(updateFile);
- Properties versionProperties = new Properties(); - versionProperties.put("version", updateFile.extractVersion() + ""); - versionProperties.put("time", System.currentTimeMillis() + ""); - executeManagementQuery(Query.INSERT_SCHEMA_VERSION, versionProperties); + Properties versionProperties = new Properties(); + versionProperties.put("version", updateFile.extractVersion() + ""); + versionProperties.put("time", System.currentTimeMillis() + ""); + executeManagementQuery(Query.INSERT_SCHEMA_VERSION, versionProperties);
- log.info("RHQ schema update " + updateFile +" applied."); + log.info("Storage schema update " + updateFile + " applied."); + } } } } finally { @@ -185,7 +203,7 @@ class VersionManager extends AbstractManager { * @throws Exception */ public void drop() throws Exception { - log.info("Preparing to drop RHQ schema"); + log.info("Preparing to drop storage schema.");
UpdateFolder updateFolder = new UpdateFolder(Task.Drop.getFolder()); Properties properties = new Properties(System.getProperties()); @@ -209,22 +227,60 @@ class VersionManager extends AbstractManager { if (schemaExists()) { //2. Drop RHQ schema execute(updateFolder.getUpdateFiles().get(1), properties); - log.info("RHQ schema dropped."); + log.info("Storage schema dropped."); } else { - log.info("RHQ schema does not exist. Drop operation not required."); + log.info("Storage schema does not exist. Drop operation not required."); }
if (userExists()) { //3. Drop RHQ user execute(updateFolder.getUpdateFiles().get(2), properties); - log.info("RHQ admin user dropped."); + log.info("RHQ admin user dropped from storage cluster."); } else { - log.info("RHQ admin user does not exist. Drop operation not required."); + log.info("RHQ admin user does not exist on the storage cluster. Drop operation not required."); + } + } catch (Exception e) { + throw new RuntimeException(e); + } finally { + shutdownClusterConnection(); + } + } + + /** + * Check storage cluster schema version compatibility. + * If the version installed on the storage cluster is too advanced or too old compared + * to the version available in the current schema manager an error will thrown. + * + * @throws Exception schema compatibility exception + */ + public void checkCompatibility() throws Exception { + log.info("Preparing to check storage schema compatibility."); + try { + initClusterSession(); + + int installedSchemaVersion = this.getInstalledSchemaVersion(); + + UpdateFolder folder = new UpdateFolder(Task.Update.getFolder()); + int requiredSchemaVersion = folder.getLatestVersion(); + + if (installedSchemaVersion < requiredSchemaVersion) { + log.error("Storage cluster schema version:" + installedSchemaVersion + ". Required schema version: " + + requiredSchemaVersion + ". Please update storage cluster schema version."); + throw new InstalledSchemaTooOldException(); + } + + if (installedSchemaVersion > requiredSchemaVersion) { + log.error("Storage cluster schema version:" + installedSchemaVersion + ". Required schema version: " + + requiredSchemaVersion + + ". Storage clutser has been updated beyond the capability of the current server installation."); + throw new InstalledSchemaTooAdvancedException(); } } catch (Exception e) { throw new RuntimeException(e); } finally { shutdownClusterConnection(); + + log.info("Completed check for storage schema compatibility."); } } } diff --git a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/exception/InstalledSchemaTooAdvancedException.java b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/exception/InstalledSchemaTooAdvancedException.java new file mode 100644 index 0000000..2f83ef5 --- /dev/null +++ b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/exception/InstalledSchemaTooAdvancedException.java @@ -0,0 +1,38 @@ +/* + * + * RHQ Management Platform + * Copyright (C) 2005-2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation, and/or the GNU Lesser + * General Public License, version 2.1, also as published by the Free + * Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License and the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * and the GNU Lesser General Public License along with this program; + * if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +package org.rhq.cassandra.schema.exception; + + +/** + * @author Stefan Negrea + */ +public class InstalledSchemaTooAdvancedException extends Exception { + + public InstalledSchemaTooAdvancedException() { + super( + "Storage schema is too advanced for the current installation. Schema revisions have been applied beyond the capability of the installation."); + } +} \ No newline at end of file diff --git a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/exception/InstalledSchemaTooOldException.java b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/exception/InstalledSchemaTooOldException.java new file mode 100644 index 0000000..4da863b --- /dev/null +++ b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/exception/InstalledSchemaTooOldException.java @@ -0,0 +1,38 @@ +/* + * + * RHQ Management Platform + * Copyright (C) 2005-2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation, and/or the GNU Lesser + * General Public License, version 2.1, also as published by the Free + * Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License and the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * and the GNU Lesser General Public License along with this program; + * if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +package org.rhq.cassandra.schema.exception; + + +/** + * @author Stefan Negrea + */ +public class InstalledSchemaTooOldException extends Exception { + + public InstalledSchemaTooOldException() { + super( + "Storage schema needs to be updated. The schema manager contains updates not yet applied to the storage cluster installation."); + } +} \ No newline at end of file diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/listener/CoreGuiServletContextListener.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/listener/CoreGuiServletContextListener.java index 8260327..67e4389 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/listener/CoreGuiServletContextListener.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/listener/CoreGuiServletContextListener.java @@ -54,7 +54,11 @@ public class CoreGuiServletContextListener implements ServletContextListener { scheduledExecutorService.schedule(new Runnable() { @Override public void run() { - startupBean.init(); + try { + startupBean.init(); + } catch (Exception e) { + shutdownListener.handleNotification(); + } } }, 10, SECONDS); } diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClientManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClientManagerBean.java index 6828b12..dbd599a 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClientManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClientManagerBean.java @@ -40,6 +40,7 @@ import com.datastax.driver.core.Session; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory;
+import org.rhq.cassandra.schema.SchemaManager; import org.rhq.cassandra.util.ClusterBuilder; import org.rhq.core.domain.cloud.StorageNode; import org.rhq.core.util.StringUtil; @@ -86,7 +87,6 @@ public class StorageClientManagerBean { String username = getRequiredStorageProperty(USERNAME_PROP); String password = getRequiredStorageProperty(PASSWORD_PROP);
- metricsConfiguration = new MetricsConfiguration(); List<StorageNode> storageNodes = storageNodeManager.getStorageNodes(); if (storageNodes.isEmpty()) { throw new IllegalStateException( @@ -94,12 +94,17 @@ public class StorageClientManagerBean { + "result of running dbsetup or deleting rows from rhq_storage_node table. Please re-install the " + "storage node to fix this issue."); } + + checkSchemaCompability(username, password, storageNodes); + + Session wrappedSession = createSession(username, password, storageNodeManager.getStorageNodes()); session = new StorageSession(wrappedSession);
storageClusterMonitor = new StorageClusterMonitor(); session.addStorageStateListener(storageClusterMonitor);
+ metricsConfiguration = new MetricsConfiguration(); metricsDAO = new MetricsDAO(session, metricsConfiguration);
initMetricsServer(); @@ -108,6 +113,22 @@ public class StorageClientManagerBean { log.info("Storage client subsystem is now initialized"); }
+ /** + * Checks storage node schema compatibility. + * + * @param username username + * @param password password + * @param storageNodes storage nodes + */ + private void checkSchemaCompability(String username, String password, List<StorageNode> storageNodes) { + SchemaManager schemaManager = new SchemaManager(username, password, storageNodes); + try { + schemaManager.checkCompatibility(); + } catch (Exception e) { + throw new RuntimeException(e.getMessage(), e); + } + } + public synchronized void shutdown() { if (!initialized) { log.info("Storage client subsystem is already shut down. Skipping shutdown steps."); diff --git a/pom.xml b/pom.xml index 3f2adbd..eb71cb3 100644 --- a/pom.xml +++ b/pom.xml @@ -108,6 +108,8 @@ <jboss-modules.version>1.1.1.GA</jboss-modules.version> <jboss-dmr.version>1.1.1.Final</jboss-dmr.version> <jboss-msc.version>1.0.2.GA</jboss-msc.version> + <jboss-vfs.version>3.1.0.Final</jboss-vfs.version> +
<!-- Not Provided - some of these are needed by the agent --> <jboss-annotations.version>4.2.3.GA</jboss-annotations.version>
commit 5a60d2669e90e87022fcad9f628e987f1aaf5385 Author: Jirka Kremser jkremser@redhat.com Date: Tue Aug 13 20:43:51 2013 +0200
Improvements to cluster-wide alert UI: improved grouping, headers of groups, link to definition.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAlertHistoryView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAlertHistoryView.java index 9c26b5c..0821585 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAlertHistoryView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAlertHistoryView.java @@ -23,7 +23,6 @@ import java.util.ArrayList; import com.smartgwt.client.data.Record; import com.smartgwt.client.types.GroupStartOpen; import com.smartgwt.client.types.ImageStyle; -import com.smartgwt.client.types.SummaryFunctionType; import com.smartgwt.client.widgets.Img; import com.smartgwt.client.widgets.grid.CellFormatter; import com.smartgwt.client.widgets.grid.ListGrid; @@ -37,6 +36,8 @@ import org.rhq.enterprise.gui.coregui.client.ImageManager; import org.rhq.enterprise.gui.coregui.client.LinkManager; import org.rhq.enterprise.gui.coregui.client.alert.AlertDataSource; import org.rhq.enterprise.gui.coregui.client.alert.AlertHistoryView; +import org.rhq.enterprise.gui.coregui.client.components.table.AbstractTableAction; +import org.rhq.enterprise.gui.coregui.client.components.table.TableActionEnablement; import org.rhq.enterprise.gui.coregui.client.inventory.resource.AncestryUtil; import org.rhq.enterprise.gui.coregui.client.util.StringUtility;
@@ -45,6 +46,7 @@ import org.rhq.enterprise.gui.coregui.client.util.StringUtility; * */ public class StorageNodeAlertHistoryView extends AlertHistoryView { + private boolean isGouped = true;
public StorageNodeAlertHistoryView(String tableTitle, int[] resourceIds) { super(tableTitle, resourceIds); @@ -64,8 +66,32 @@ public class StorageNodeAlertHistoryView extends AlertHistoryView { continue; } if (AlertCriteria.SORT_FIELD_CTIME.equals(field.getName())) { field.setWidth(240); + field.setShowGridSummary(true); + field.setShowGroupSummary(true); + field.setSummaryFunction(new SummaryFunction() { + public Object getSummaryValue(Record[] records, ListGridField field) { + if (records != null && records.length > 0 && records[0] != null) { + Integer resourceId = records[0].getAttributeAsInt(AncestryUtil.RESOURCE_ID); + Integer defId = records[0].getAttributeAsInt("definitionId"); + String url = LinkManager.getSubsystemAlertDefinitionLink(resourceId, defId); + return LinkManager.getHref(url, "Link to Definition"); + } else return ""; + } + }); + field.setCellFormatter(new CellFormatter() { + public String format(Object o, ListGridRecord listGridRecord, int i, int i1) { + if (listGridRecord.getAttribute("groupValue") != null) { + return (String) o; + } + Integer resourceId = listGridRecord.getAttributeAsInt(AncestryUtil.RESOURCE_ID); + Integer defId = listGridRecord.getAttributeAsInt("definitionId"); + String url = LinkManager.getSubsystemAlertDefinitionLink(resourceId, defId); + return LinkManager.getHref(url, o.toString()); + } + }); + field.setWidth(240); } else if ("conditionValue".equals(field.getName())) { - field.setWidth(90); + field.setWidth(140); } else if ("acknowledgingSubject".equals(field.getName())) { field.setSummaryFunction(new SummaryFunction() { public Object getSummaryValue(Record[] records, ListGridField field) { @@ -74,8 +100,8 @@ public class StorageNodeAlertHistoryView extends AlertHistoryView { if (record.getAttribute("acknowledgingSubject") != null) { count++; } - } - return count + " Unacked"; + } + return "(" + count + " / " + records.length + ")"; } }); field.setCellFormatter(new CellFormatter() { @@ -100,20 +126,12 @@ public class StorageNodeAlertHistoryView extends AlertHistoryView { newFields.add(1, field); continue; } else if ("name".equals(field.getName())) { - field.setShowGridSummary(true); - field.setShowGroupSummary(true); - field.setSummaryFunction(SummaryFunctionType.COUNT); field.setCellFormatter(new CellFormatter() { public String format(Object o, ListGridRecord listGridRecord, int i, int i1) { - if (listGridRecord.getAttribute("groupValue") != null) { - return (String) o; - } - Integer resourceId = listGridRecord.getAttributeAsInt(AncestryUtil.RESOURCE_ID); - Integer defId = listGridRecord.getAttributeAsInt("definitionId"); - String url = LinkManager.getSubsystemAlertDefinitionLink(resourceId, defId); - return LinkManager.getHref(url, o.toString()); + return o.toString(); } }); + field.setHidden(true); } newFields.add(field); } @@ -142,6 +160,9 @@ public class StorageNodeAlertHistoryView extends AlertHistoryView { if (value == null) { return ""; } + if (record.getAttribute("groupValue") != null) { + return value.toString(); + } String detailsUrl = getDetailUrlFromRecord(record); String formattedValue = StringUtility.escapeHtml(value.toString()); return LinkManager.getHref(detailsUrl, formattedValue); @@ -169,5 +190,20 @@ public class StorageNodeAlertHistoryView extends AlertHistoryView { throw new IllegalArgumentException(msg); } } - + + @Override + protected void configureTable() { + super.configureTable(); + addTableAction("(Un)Group Alerts", new AbstractTableAction(TableActionEnablement.ALWAYS) { + public void executeAction(ListGridRecord[] selection, Object actionValue) { + if (isGouped) { + getListGrid().ungroup(); + } else { + getListGrid().groupBy("name"); + } + isGouped = !isGouped; + refreshTableInfo(); + } + }); + } }
commit eb914a6d455656d5f24a7dfae93f7eb39201926c Author: Jirka Kremser jkremser@redhat.com Date: Tue Aug 13 18:43:59 2013 +0200
Storage node configuration: added check for Max Heap Size (-Xmx) > Heap New Size (-Xmn).
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeConfigurationEditor.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeConfigurationEditor.java index f1c0003..7bcf2cd 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeConfigurationEditor.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeConfigurationEditor.java @@ -19,8 +19,10 @@ package org.rhq.enterprise.gui.coregui.client.admin.storage;
import java.util.ArrayList; +import java.util.HashMap; import java.util.LinkedHashSet; import java.util.List; +import java.util.Map; import java.util.Set;
import com.google.gwt.user.client.rpc.AsyncCallback; @@ -194,7 +196,7 @@ public class StorageNodeConfigurationEditor extends EnhancedVLayout implements R FIELD_HEAP_MAX, "Max Heap Size", configuration.getHeapSize(), - "The maximum heap size. This value will be used with the -Xmx JVM option. The value should be an integer with a suffix of M or G to indicate megabytes or gigabytes.")); + "The maximum heap size. This value will be used with the -Xmx JVM option. If you are going to increase/decrease this value, then you should also increase/decrease the new generation proportionally. The value should be an integer with a suffix of M or G to indicate megabytes or gigabytes.")); items .addAll(buildOneFormRowWithCombobox( FIELD_HEAP_NEW, @@ -233,6 +235,13 @@ public class StorageNodeConfigurationEditor extends EnhancedVLayout implements R saveButton.addClickHandler(new ClickHandler() { public void onClick(ClickEvent clickEvent) { if (form.validate()) { + if (!checkNewHeapLowerThanMaxHeap()) { + Map<String, String> errors = new HashMap<String, String>(2); + errors.put(FIELD_HEAP_MAX, "Should be lower than Heap New Size."); + errors.put(FIELD_HEAP_NEW, "Should be higher than Max Heap Size."); + form.setErrors(errors, true); + return; + } SC.ask( "Changing the storage node configuration requires restart of storage node. Do you want to continue?", new BooleanCallback() { @@ -263,6 +272,30 @@ public class StorageNodeConfigurationEditor extends EnhancedVLayout implements R return configuration; }
+ private boolean checkNewHeapLowerThanMaxHeap() { + // let's be paranoid + Object maxHeapObject = form.getField(FIELD_HEAP_MAX).getValue(); + Object newHeapObject = form.getField(FIELD_HEAP_NEW).getValue(); + + String maxHeapString = maxHeapObject != null ? maxHeapObject.toString().trim() : ""; + String newHeapString = newHeapObject != null ? newHeapObject.toString().trim() : ""; + + if (maxHeapString.isEmpty() || newHeapString.isEmpty()) { + return false; + } + + int maxHeap = Integer.parseInt(maxHeapString.substring(0, maxHeapString.length() - 2)); + int newHeap = Integer.parseInt(newHeapString.substring(0, newHeapString.length() - 2)); + + boolean isMaxHeapInMegs = maxHeapString.toLowerCase().indexOf("m") != -1; + boolean isNewHeapInMegs = newHeapString.toLowerCase().indexOf("m") != -1; + + maxHeap = isMaxHeapInMegs ? maxHeap : maxHeap * 1024; + newHeap = isNewHeapInMegs ? newHeap : newHeap * 1024; + + return newHeap < maxHeap; + } + private String getJVMMemoryString(String raw) { if (raw == null || raw.trim().isEmpty()) { throw new IllegalArgumentException("input string is null or empty");
commit eb6d49756238bef5d2435927a04cbe44dea2805b Author: Jirka Kremser jkremser@redhat.com Date: Tue Aug 13 12:57:44 2013 +0200
api checks: adding the return type of intentionally changed method for clirr to make it work
diff --git a/modules/core/domain/intentional-api-changes-since-4.8.0.xml b/modules/core/domain/intentional-api-changes-since-4.8.0.xml index b14d38e..6a22ea5 100644 --- a/modules/core/domain/intentional-api-changes-since-4.8.0.xml +++ b/modules/core/domain/intentional-api-changes-since-4.8.0.xml @@ -11,7 +11,7 @@ <difference> <className>org/rhq/core/domain/cloud/Server</className> <differenceType>7004</differenceType><!-- num argments changed --> - <method>clearStatus()</method> + <method>void clearStatus()</method> <justification> This class is not exposed remotely. The previous method signature was doing a blind clear of the status. The new method will clear only the specified status.
commit b3fd4fe564007c32ffaa33b42f7123a704c6964b Author: Simeon Pinder spinder@fulliautomatix.conchfritter.com Date: Mon Aug 12 23:22:49 2013 -0400
Fix hard-coded parent pom version for cassandra-auth.
diff --git a/modules/common/cassandra-auth/pom.xml b/modules/common/cassandra-auth/pom.xml index c53c752..5195e6e 100644 --- a/modules/common/cassandra-auth/pom.xml +++ b/modules/common/cassandra-auth/pom.xml @@ -6,7 +6,7 @@ <parent> <groupId>org.rhq</groupId> <artifactId>rhq-common-parent</artifactId> - <version>4.9.0-SNAPSHOT</version> + <version>4.7.0.JON</version> </parent>
<artifactId>rhq-cassandra-auth</artifactId>
commit c9283bd57a3bd4efc410f3a54b948c382cbc4626 Author: Jirka Kremser jkremser@redhat.com Date: Mon Aug 12 21:40:05 2013 +0200
New component for storage node alerts (derived from AlertHistoryView); Predefined alert templates shoud have the description fields correctly filled.; calling the update configuration method in an async way.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java index 5538db5..cd0ec54 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java @@ -18,14 +18,22 @@ */ package org.rhq.enterprise.gui.coregui.client.admin.storage;
+import java.util.ArrayList; import java.util.EnumSet;
import com.google.gwt.user.client.Timer; import com.google.gwt.user.client.rpc.AsyncCallback; +import com.smartgwt.client.data.DataSourceField; +import com.smartgwt.client.types.GroupStartOpen; import com.smartgwt.client.widgets.Label; +import com.smartgwt.client.widgets.grid.CellFormatter; +import com.smartgwt.client.widgets.grid.ListGrid; +import com.smartgwt.client.widgets.grid.ListGridField; +import com.smartgwt.client.widgets.grid.ListGridRecord; import com.smartgwt.client.widgets.tab.events.TabSelectedEvent; import com.smartgwt.client.widgets.tab.events.TabSelectedHandler;
+import org.rhq.core.domain.criteria.AlertCriteria; import org.rhq.core.domain.criteria.ResourceGroupCriteria; import org.rhq.core.domain.resource.ResourceType; import org.rhq.core.domain.resource.group.composite.ResourceGroupComposite; @@ -34,17 +42,22 @@ import org.rhq.core.domain.util.collection.ArrayUtils; import org.rhq.enterprise.gui.coregui.client.BookmarkableView; import org.rhq.enterprise.gui.coregui.client.CoreGUI; import org.rhq.enterprise.gui.coregui.client.IconEnum; +import org.rhq.enterprise.gui.coregui.client.LinkManager; import org.rhq.enterprise.gui.coregui.client.ViewPath; import org.rhq.enterprise.gui.coregui.client.admin.AdministrationView; +import org.rhq.enterprise.gui.coregui.client.alert.AlertDataSource; import org.rhq.enterprise.gui.coregui.client.alert.AlertHistoryView; import org.rhq.enterprise.gui.coregui.client.components.tab.NamedTab; import org.rhq.enterprise.gui.coregui.client.components.tab.NamedTabSet; +import org.rhq.enterprise.gui.coregui.client.components.table.Table; import org.rhq.enterprise.gui.coregui.client.components.view.HasViewName; import org.rhq.enterprise.gui.coregui.client.components.view.ViewName; import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; import org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.configuration.GroupResourceConfigurationEditView; +import org.rhq.enterprise.gui.coregui.client.inventory.resource.AncestryUtil; import org.rhq.enterprise.gui.coregui.client.inventory.resource.type.ResourceTypeRepository; import org.rhq.enterprise.gui.coregui.client.util.Log; +import org.rhq.enterprise.gui.coregui.client.util.StringUtility; import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedVLayout; import org.rhq.enterprise.gui.coregui.client.util.message.Message;
@@ -120,7 +133,8 @@ public class StorageNodeAdminView extends EnhancedVLayout implements/* HasViewNa tabset.getTabByName(tabInfo.name.getName()).setPane(new Label("in progress..")); } else if (tabInfo.equals(alertsTabInfo)) { if (resIds != null) { - tabset.getTabByName(tabInfo.name.getName()).setPane(new AlertHistoryView("storageNodesAlerts", resIds)); + tabset.getTabByName(tabInfo.name.getName()).setPane( + new StorageNodeAlertHistoryView("storageNodesAlerts", resIds)); } else { GWTServiceLookup.getStorageService().findResourcesWithAlertDefinitions(new AsyncCallback<Integer[]>() { @Override @@ -137,7 +151,7 @@ public class StorageNodeAdminView extends EnhancedVLayout implements/* HasViewNa } else { resIds = ArrayUtils.unwrapArray(result); tabset.getTabByName(tabInfo.name.getName()).setPane( - new AlertHistoryView("storageNodesAlerts", resIds)); + new StorageNodeAlertHistoryView("storageNodesAlerts", resIds)); tabset.selectTab(tabInfo.index); } } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAlertHistoryView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAlertHistoryView.java new file mode 100644 index 0000000..9c26b5c --- /dev/null +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAlertHistoryView.java @@ -0,0 +1,173 @@ +/* + * RHQ Management Platform + * Copyright (C) 2005-2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +package org.rhq.enterprise.gui.coregui.client.admin.storage; + +import java.util.ArrayList; + +import com.smartgwt.client.data.Record; +import com.smartgwt.client.types.GroupStartOpen; +import com.smartgwt.client.types.ImageStyle; +import com.smartgwt.client.types.SummaryFunctionType; +import com.smartgwt.client.widgets.Img; +import com.smartgwt.client.widgets.grid.CellFormatter; +import com.smartgwt.client.widgets.grid.ListGrid; +import com.smartgwt.client.widgets.grid.ListGridField; +import com.smartgwt.client.widgets.grid.ListGridRecord; +import com.smartgwt.client.widgets.grid.SummaryFunction; + +import org.rhq.core.domain.criteria.AlertCriteria; +import org.rhq.enterprise.gui.coregui.client.CoreGUI; +import org.rhq.enterprise.gui.coregui.client.ImageManager; +import org.rhq.enterprise.gui.coregui.client.LinkManager; +import org.rhq.enterprise.gui.coregui.client.alert.AlertDataSource; +import org.rhq.enterprise.gui.coregui.client.alert.AlertHistoryView; +import org.rhq.enterprise.gui.coregui.client.inventory.resource.AncestryUtil; +import org.rhq.enterprise.gui.coregui.client.util.StringUtility; + +/** + * @author Jirka Kremser + * + */ +public class StorageNodeAlertHistoryView extends AlertHistoryView { + + public StorageNodeAlertHistoryView(String tableTitle, int[] resourceIds) { + super(tableTitle, resourceIds); + } + + @Override + public AlertDataSource getDataSource() { + return new AlertDataSource() { + @Override + public ArrayList<ListGridField> getListGridFields() { + ArrayList<ListGridField> fields = super.getListGridFields(); + ArrayList<ListGridField> newFields = new ArrayList<ListGridField>(fields.size()); + for (ListGridField field : fields) { + if ("priority".equals(field.getName()) + || AncestryUtil.RESOURCE_NAME.equals(field.getName()) + || AncestryUtil.RESOURCE_ANCESTRY.equals(field.getName())) { + continue; + } if (AlertCriteria.SORT_FIELD_CTIME.equals(field.getName())) { + field.setWidth(240); + } else if ("conditionValue".equals(field.getName())) { + field.setWidth(90); + } else if ("acknowledgingSubject".equals(field.getName())) { + field.setSummaryFunction(new SummaryFunction() { + public Object getSummaryValue(Record[] records, ListGridField field) { + int count = 0; + for (Record record : records) { + if (record.getAttribute("acknowledgingSubject") != null) { + count++; + } + } + return count + " Unacked"; + } + }); + field.setCellFormatter(new CellFormatter() { + public String format(Object o, ListGridRecord listGridRecord, int i, int i1) { + if (listGridRecord.getAttribute("groupValue") != null) { + return (String) o; + } + String ackSubject = listGridRecord.getAttribute("acknowledgingSubject"); + if (ackSubject == null) { + return " "; + } else { + Img checkedImg = new Img(ImageManager.getAlertStatusCheckedIcon(), 80, 16); + checkedImg.setImageType(ImageStyle.CENTER); + return checkedImg.getInnerHTML(); + } + } + }); + + field.setShowGridSummary(false); + field.setShowGroupSummary(true); + field.setWidth(90); + newFields.add(1, field); + continue; + } else if ("name".equals(field.getName())) { + field.setShowGridSummary(true); + field.setShowGroupSummary(true); + field.setSummaryFunction(SummaryFunctionType.COUNT); + field.setCellFormatter(new CellFormatter() { + public String format(Object o, ListGridRecord listGridRecord, int i, int i1) { + if (listGridRecord.getAttribute("groupValue") != null) { + return (String) o; + } + Integer resourceId = listGridRecord.getAttributeAsInt(AncestryUtil.RESOURCE_ID); + Integer defId = listGridRecord.getAttributeAsInt("definitionId"); + String url = LinkManager.getSubsystemAlertDefinitionLink(resourceId, defId); + return LinkManager.getHref(url, o.toString()); + } + }); + } + newFields.add(field); + } + ListGridField descriptionField = new ListGridField("description", MSG.common_title_description()); + descriptionField.setCanSortClientOnly(true); + newFields.add(descriptionField); + return newFields; + } + }; + } + + @Override + protected void configureListGrid(ListGrid grid) { + ListGrid listGrid = super.getListGrid(); + listGrid.setGroupStartOpen(GroupStartOpen.ALL); + listGrid.setShowGroupSummary(true); + listGrid.setShowGroupSummaryInHeader(true); + + listGrid.setGroupByField("name"); + } + + @Override + protected CellFormatter getDetailsLinkColumnCellFormatter() { + return new CellFormatter() { + public String format(Object value, ListGridRecord record, int i, int i1) { + if (value == null) { + return ""; + } + String detailsUrl = getDetailUrlFromRecord(record); + String formattedValue = StringUtility.escapeHtml(value.toString()); + return LinkManager.getHref(detailsUrl, formattedValue); + } + }; + } + + @Override + public void showDetails(ListGridRecord record) { + CoreGUI.goToView(getDetailUrlFromRecord(record)); + } + + private String getDetailUrlFromRecord(ListGridRecord record) { + if (record == null) { + throw new IllegalArgumentException("'record' parameter is null."); + } + Integer recordId = getId(record); + Integer resourceId = record.getAttributeAsInt(AncestryUtil.RESOURCE_ID); + if (recordId != null && recordId.intValue() > 0 && resourceId != null && resourceId > 0) { + return "#Resource/" + resourceId + "/Alerts/History/" + convertIDToCurrentViewPath(recordId); + } else { + String msg = MSG.view_tableSection_error_badId(this.getClass().toString(), (recordId == null) ? "null" + : recordId.toString()); + CoreGUI.getErrorHandler().handleError(msg); + throw new IllegalArgumentException(msg); + } + } + +} diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeConfigurationEditor.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeConfigurationEditor.java index 3c4923b..f1c0003 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeConfigurationEditor.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeConfigurationEditor.java @@ -74,15 +74,12 @@ public class StorageNodeConfigurationEditor extends EnhancedVLayout implements R
private void save() { updateConfiguration(); - GWTServiceLookup.getStorageService().updateConfiguration(configuration, new AsyncCallback<Boolean>() { - public void onSuccess(Boolean result) { - if (result) { - Message msg = new Message("Storage node settings were successfully updated.", Message.Severity.Info); - CoreGUI.getMessageCenter().notify(msg); - } else { - onFailure(new Exception("Operation failed.")); - } + GWTServiceLookup.getStorageService().updateConfiguration(configuration, new AsyncCallback<Void>() { + public void onSuccess(Void result) { + Message msg = new Message("Storage node settings were successfully updated.", Message.Severity.Info); + CoreGUI.getMessageCenter().notify(msg); } + public void onFailure(Throwable caught) { CoreGUI.getErrorHandler().handleError("Unable to update the storage node settings.", caught); } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/StorageGWTService.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/StorageGWTService.java index 6e6df57..957bf34 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/StorageGWTService.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/StorageGWTService.java @@ -84,5 +84,5 @@ public interface StorageGWTService extends RemoteService {
StorageNodeConfigurationComposite retrieveConfiguration(StorageNode storageNode) throws RuntimeException;
- boolean updateConfiguration(StorageNodeConfigurationComposite storageNodeConfiguration) throws RuntimeException; + void updateConfiguration(StorageNodeConfigurationComposite storageNodeConfiguration) throws RuntimeException; } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/StorageGWTServiceImpl.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/StorageGWTServiceImpl.java index 4a01427..f7f7442 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/StorageGWTServiceImpl.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/StorageGWTServiceImpl.java @@ -163,9 +163,9 @@ public class StorageGWTServiceImpl extends AbstractGWTServiceImpl implements Sto }
@Override - public boolean updateConfiguration(StorageNodeConfigurationComposite storageNodeConfiguration) throws RuntimeException { + public void updateConfiguration(StorageNodeConfigurationComposite storageNodeConfiguration) throws RuntimeException { try { - return storageNodeManager.updateConfiguration(getSessionSubject(), storageNodeConfiguration); + storageNodeManager.updateConfigurationAsync(getSessionSubject(), storageNodeConfiguration); } catch (Throwable t) { throw getExceptionToThrowToClient(t); } diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index 053b57e..9e41692 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -36,6 +36,11 @@ import java.util.Map; import java.util.Queue; import java.util.Set;
+import org.rhq.enterprise.server.storage.StorageClusterSettings; +import org.rhq.enterprise.server.storage.StorageClusterSettingsManagerBean; +import org.rhq.enterprise.server.storage.StorageNodeOperationsHandlerLocal; + +import javax.ejb.Asynchronous; import javax.ejb.EJB; import javax.ejb.Stateless; import javax.ejb.TransactionAttribute; @@ -83,9 +88,6 @@ import org.rhq.enterprise.server.resource.ResourceManagerLocal; import org.rhq.enterprise.server.resource.ResourceTypeManagerLocal; import org.rhq.enterprise.server.rest.reporting.MeasurementConverter; import org.rhq.enterprise.server.scheduler.SchedulerLocal; -import org.rhq.enterprise.server.storage.StorageClusterSettings; -import org.rhq.enterprise.server.storage.StorageClusterSettingsManagerBean; -import org.rhq.enterprise.server.storage.StorageNodeOperationsHandlerLocal; import org.rhq.enterprise.server.util.CriteriaQueryGenerator; import org.rhq.enterprise.server.util.CriteriaQueryRunner; import org.rhq.enterprise.server.util.LookupUtil; @@ -624,6 +626,12 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
return configuration; } + + @Override + @Asynchronous + public void updateConfigurationAsync(Subject subject, StorageNodeConfigurationComposite storageNodeConfiguration) { + updateConfiguration(subject, storageNodeConfiguration); + }
@Override public boolean updateConfiguration(Subject subject, StorageNodeConfigurationComposite storageNodeConfiguration) { diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java index 0c1b0ab..b5ee7f0 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java @@ -65,6 +65,8 @@ public interface StorageNodeManagerLocal { StorageNodeConfigurationComposite retrieveConfiguration(Subject subject, StorageNode storageNode);
boolean updateConfiguration(Subject subject, StorageNodeConfigurationComposite storageNodeConfiguration); + + void updateConfigurationAsync(Subject subject, StorageNodeConfigurationComposite storageNodeConfiguration);
/** * Fetches the list of StorageNode entities based on provided criteria. diff --git a/modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java b/modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java index 840477c..f752d1d 100644 --- a/modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java +++ b/modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java @@ -258,6 +258,7 @@ public class AlertDefinitionServerPluginComponent implements ServerPluginCompone newTemplate.setResourceType(resourceType); newTemplate.setPriority(AlertPriority.MEDIUM); newTemplate.setConditionExpression(BooleanExpression.ANY); + newTemplate.setDescription(storageNodeHighHeapTemplate.getDescription()); newTemplate.setRecoveryId(0); newTemplate.setEnabled(true);
@@ -308,6 +309,7 @@ public class AlertDefinitionServerPluginComponent implements ServerPluginCompone newTemplate.setResourceType(resourceType); newTemplate.setPriority(AlertPriority.MEDIUM); newTemplate.setConditionExpression(BooleanExpression.ANY); + newTemplate.setDescription(storageNodeHighDiskUsageTemplate.getDescription()); newTemplate.setRecoveryId(0); newTemplate.setEnabled(true);
@@ -377,6 +379,7 @@ public class AlertDefinitionServerPluginComponent implements ServerPluginCompone newTemplate.setResourceType(resourceType); newTemplate.setPriority(AlertPriority.MEDIUM); newTemplate.setConditionExpression(BooleanExpression.ANY); + newTemplate.setDescription(storageNodeSnapshotFailureTemplate.getDescription()); newTemplate.setRecoveryId(0); newTemplate.setEnabled(true);
commit 9eec2324f12a536387d285b91587392b16a3bef8 Merge: 5e6b489 6757c52 Author: Simeon Pinder spinder@fulliautomatix.conchfritter.com Date: Mon Aug 12 15:21:20 2013 -0400
Merge branch 'master' into nightly/rhq.jon
Conflicts: modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBeanTest.java modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java
diff --cc modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBeanTest.java index d44d637,78c0cff..9009eb9 --- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBeanTest.java +++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBeanTest.java @@@ -25,14 -25,12 +25,7 @@@
package org.rhq.enterprise.server.cloud;
--import static org.rhq.enterprise.server.cloud.StorageNodeManagerBean.STORAGE_NODE_GROUP_NAME; - import static org.rhq.enterprise.server.cloud.StorageNodeManagerBean.STORAGE_NODE_PLUGIN_NAME; - import static org.rhq.enterprise.server.cloud.StorageNodeManagerBean.STORAGE_NODE_RESOURCE_TYPE_NAME; -- --import java.util.ArrayList; --import java.util.Arrays; import java.util.HashSet; --import java.util.List; import java.util.Set; import java.util.UUID;
@@@ -40,19 -38,19 +33,16 @@@ import javax.ejb.EJB import javax.persistence.Query; import javax.transaction.Transaction;
--import org.testng.Assert; import org.testng.annotations.Test;
import org.rhq.core.domain.cloud.StorageNode; import org.rhq.core.domain.configuration.definition.ConfigurationDefinition; import org.rhq.core.domain.configuration.definition.PropertyDefinitionSimple; import org.rhq.core.domain.configuration.definition.PropertySimpleType; --import org.rhq.core.domain.criteria.ResourceGroupCriteria; import org.rhq.core.domain.criteria.StorageNodeCriteria; import org.rhq.core.domain.resource.Resource; import org.rhq.core.domain.resource.ResourceCategory; import org.rhq.core.domain.resource.ResourceType; --import org.rhq.core.domain.resource.group.ResourceGroup; import org.rhq.core.domain.util.PageList; import org.rhq.core.domain.util.PageOrdering; import org.rhq.enterprise.server.auth.SubjectManagerLocal;
commit bd60c17abb96278e4288bcbc97a6ab518e5738e6 Author: Stefan Negrea snegrea@redhat.com Date: Mon Aug 12 12:17:17 2013 -0500
Slight change to the comment text.
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/Server.java b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/Server.java index 7fa82f7..671db6b 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/Server.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/Server.java @@ -88,9 +88,9 @@ import org.rhq.core.domain.resource.Agent; + " AND ( s.name <> :thisServerName OR :thisServerName IS NULL ) "), // @NamedQuery(name = Server.QUERY_UPDATE_STATUS_BY_NAME, query = "" // + " UPDATE Server s " // - + " SET s.status = 3 " //change this to the only value possible before adding MANUAL_MAINTENANCE_MODE - //this status should never be set to negative numbers since they are values allowed - //by the bitmask. + + " SET s.status = 3 " //Change this to the only value possible before adding MANUAL_MAINTENANCE_MODE. + //This status should never be set to negative since negative values are not + //allowed by the bitmask. + " WHERE s.status = 0 ") }) @SequenceGenerator(allocationSize = org.rhq.core.domain.util.Constants.ALLOCATION_SIZE, name = "RHQ_SERVER_ID_SEQ", sequenceName = "RHQ_SERVER_ID_SEQ") @Table(name = "RHQ_SERVER")
commit d1414a0826353671c22044d8438181963e518217 Author: Stefan Negrea snegrea@redhat.com Date: Mon Aug 12 12:09:47 2013 -0500
One more attempt at the justification for the Server interface change.
diff --git a/modules/core/domain/intentional-api-changes-since-4.8.0.xml b/modules/core/domain/intentional-api-changes-since-4.8.0.xml index 6630d2c..b14d38e 100644 --- a/modules/core/domain/intentional-api-changes-since-4.8.0.xml +++ b/modules/core/domain/intentional-api-changes-since-4.8.0.xml @@ -11,7 +11,7 @@ <difference> <className>org/rhq/core/domain/cloud/Server</className> <differenceType>7004</differenceType><!-- num argments changed --> - <method>clearStatus(*)</method> + <method>clearStatus()</method> <justification> This class is not exposed remotely. The previous method signature was doing a blind clear of the status. The new method will clear only the specified status.
commit b00b9503c9367919969a7920d2a2091a9c7268d1 Author: Stefan Negrea snegrea@redhat.com Date: Mon Aug 12 12:09:10 2013 -0500
Update code related to initial value of status field. Also, reserved the first five bits of the bitmask for debug purposes.
Because the field was initialized to -1 it was wrongly computing flags during startup resulting in a false manual maintenance mode.
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/Server.java b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/Server.java index caeabcf..7fa82f7 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/Server.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/Server.java @@ -49,7 +49,7 @@ import org.rhq.core.domain.resource.Agent;
/** * An RHQ server node in the cluster - * + * * @author Joseph Marques */ @Entity(name = "Server") @@ -88,7 +88,9 @@ import org.rhq.core.domain.resource.Agent; + " AND ( s.name <> :thisServerName OR :thisServerName IS NULL ) "), // @NamedQuery(name = Server.QUERY_UPDATE_STATUS_BY_NAME, query = "" // + " UPDATE Server s " // - + " SET s.status = -1 " // negative numbers so that bitmask strat does not conflict with this one + + " SET s.status = 3 " //change this to the only value possible before adding MANUAL_MAINTENANCE_MODE + //this status should never be set to negative numbers since they are values allowed + //by the bitmask. + " WHERE s.status = 0 ") }) @SequenceGenerator(allocationSize = org.rhq.core.domain.util.Constants.ALLOCATION_SIZE, name = "RHQ_SERVER_ID_SEQ", sequenceName = "RHQ_SERVER_ID_SEQ") @Table(name = "RHQ_SERVER") @@ -276,7 +278,7 @@ public class Server implements Serializable { /** * Returns 0 if this server is current. Otherwise, returns a mask of {@link Server.Status} * elements corresponding to the updates that have occurred that are related to this server. - * + * * @return 0 if this server is current. Otherwise, returns a mask of {@link Server.Status} * elements corresponding to the updates that have occurred that are related to this server. */ @@ -320,11 +322,17 @@ public class Server implements Serializable { return Status.getMessages(status); }
+ //Please read BZ 535484 for initial design: https://bugzilla.redhat.com/show_bug.cgi?id=535484 + //Prior to MANUAL_MAINTENANCE_MODE only used for debug purposes, design now changed to + //persist statuses between server restarts in production code public enum Status {
+ //Debug only flags (first five bits are reserved for debug flags) RESOURCE_HIERARCHY_UPDATED(1, "The resource hierarchy has been updated"), // ALERT_DEFINITION(2, "Some alert definition with a global condition category was updated"), - MANUAL_MAINTENANCE_MODE(4,"Manual Maintenance mode setup by the user either via UI or properties file."); + + //Production flags + MANUAL_MAINTENANCE_MODE(32, "Manual Maintenance mode setup by the user either via UI or properties file.");
public final int mask; public final String message;
commit a33fcb1f904a1652636e45e7895584b399d32971 Author: Jay Shaughnessy jshaughn@redhat.com Date: Mon Aug 12 09:53:23 2013 -0400
Fix dbupgrade issue in storagenode address task. Needs to properly support non-postgres dbs.
diff --git a/modules/core/dbutils/src/main/java/org/rhq/core/db/upgrade/StorageNodeAddressUpgradeTask.java b/modules/core/dbutils/src/main/java/org/rhq/core/db/upgrade/StorageNodeAddressUpgradeTask.java index 9fc90ed..0bd8277 100644 --- a/modules/core/dbutils/src/main/java/org/rhq/core/db/upgrade/StorageNodeAddressUpgradeTask.java +++ b/modules/core/dbutils/src/main/java/org/rhq/core/db/upgrade/StorageNodeAddressUpgradeTask.java @@ -6,12 +6,12 @@ import java.sql.Connection; import java.sql.SQLException; import java.util.List;
+import mazz.i18n.Logger; + import org.rhq.core.db.DatabaseType; import org.rhq.core.db.DbUtilsI18NFactory; import org.rhq.core.db.DbUtilsI18NResourceKeys;
-import mazz.i18n.Logger; - /** * Updates the address field of storage node entities to ensure we are storing IP addresses and not hostnames. We want * to store the IP address since that is what Cassandra uses for inter-node communication. JMX operations that return @@ -34,7 +34,7 @@ public class StorageNodeAddressUpgradeTask implements DatabaseUpgradeTask { String storageNodeAddress = null; try { for (Object[] row : results) { - id = (Integer) row[0]; + id = databaseType.getInteger(row[0]); storageNodeAddress = (String) row[1]; InetAddress address = InetAddress.getByName(storageNodeAddress); if (!storageNodeAddress.equals(address.getHostAddress())) {
commit ac4563e9037db0fcc9531cf981d50288ba6490bd Author: Jay Shaughnessy jshaughn@redhat.com Date: Fri Aug 9 15:46:30 2013 -0400
update sample bundles with new required 'compliance' attribute
diff --git a/etc/samples/provisioning/sample-cli/resources/sample-bundle-v1.zip b/etc/samples/provisioning/sample-cli/resources/sample-bundle-v1.zip index c044d9a..b574072 100644 Binary files a/etc/samples/provisioning/sample-cli/resources/sample-bundle-v1.zip and b/etc/samples/provisioning/sample-cli/resources/sample-bundle-v1.zip differ diff --git a/etc/samples/provisioning/sample-cli/resources/sample-bundle-v2.zip b/etc/samples/provisioning/sample-cli/resources/sample-bundle-v2.zip index f8b3392..ad42eb7 100644 Binary files a/etc/samples/provisioning/sample-cli/resources/sample-bundle-v2.zip and b/etc/samples/provisioning/sample-cli/resources/sample-bundle-v2.zip differ
commit 34e1c7b697a0b345ae4a7b2385e1bc4aff10e19f Author: Stefan Negrea snegrea@redhat.com Date: Fri Aug 9 18:18:25 2013 -0500
Thanks Eclipse for clipping an import on save :(
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index 2f91dba..053b57e 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -85,6 +85,7 @@ import org.rhq.enterprise.server.rest.reporting.MeasurementConverter; import org.rhq.enterprise.server.scheduler.SchedulerLocal; import org.rhq.enterprise.server.storage.StorageClusterSettings; import org.rhq.enterprise.server.storage.StorageClusterSettingsManagerBean; +import org.rhq.enterprise.server.storage.StorageNodeOperationsHandlerLocal; import org.rhq.enterprise.server.util.CriteriaQueryGenerator; import org.rhq.enterprise.server.util.CriteriaQueryRunner; import org.rhq.enterprise.server.util.LookupUtil;
commit 112d5f3a218755dcc68fbb6ff9455560276b69af Author: Stefan Negrea snegrea@redhat.com Date: Fri Aug 9 16:47:12 2013 -0500
Simplify the configuration update method by allowing the plugin to restart the storage node if necessary. This is possible now because the JMX port is no longer required for checking the cluster availability.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index d21be2f..2f91dba 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -85,7 +85,6 @@ import org.rhq.enterprise.server.rest.reporting.MeasurementConverter; import org.rhq.enterprise.server.scheduler.SchedulerLocal; import org.rhq.enterprise.server.storage.StorageClusterSettings; import org.rhq.enterprise.server.storage.StorageClusterSettingsManagerBean; -import org.rhq.enterprise.server.storage.StorageNodeOperationsHandlerLocal; import org.rhq.enterprise.server.util.CriteriaQueryGenerator; import org.rhq.enterprise.server.util.CriteriaQueryRunner; import org.rhq.enterprise.server.util.LookupUtil; @@ -643,7 +642,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN if (storageNodeConfiguration.getThreadStackSize() != null) { parameters.setSimpleValue("threadStackSize", storageNodeConfiguration.getThreadStackSize() + ""); } - parameters.setSimpleValue("restartIfRequired", "false"); + parameters.setSimpleValue("restartIfRequired", "true");
Resource storageNodeResource = storageNode.getResource();
@@ -656,13 +655,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN storageNode.setJmxPort(storageNodeConfiguration.getJmxPort()); entityManager.merge(storageNode);
- //3. Restart the storage node - result = runOperationAndWaitForResult(subject, storageNodeResource, RESTART_OPERATION, - new Configuration()); - - //4. Update the plugin configuration to talk with the new server - //Up to this point communication with the storage node should not have been affected by the intermediate - //changes + //3. Update the plugin configuration to talk with the new server Configuration storageNodePluginConfig = configurationManager.getPluginConfiguration(subject, storageNodeResource.getId());
commit d1aa8e0eaf0cc2ac72b09599ddc406f9928cbdd9 Author: Stefan Negrea snegrea@redhat.com Date: Fri Aug 9 16:39:30 2013 -0500
[BZ 995424] + [BZ 991598] + Code Review changes
Updates to the server operation mode design based on code review and recent BZs. The most important change is initalizing the storage cluster connection ahead of initializing the server. This will elliminate place the server in maintenance mode because the storage cluster connection is not yet initialized.
diff --git a/modules/core/domain/intentional-api-changes-since-4.8.0.xml b/modules/core/domain/intentional-api-changes-since-4.8.0.xml index a8a77b4..6630d2c 100644 --- a/modules/core/domain/intentional-api-changes-since-4.8.0.xml +++ b/modules/core/domain/intentional-api-changes-since-4.8.0.xml @@ -8,4 +8,13 @@ Changing StorageNode.QUERY_FIND_BY_ADDRESS. The constants for native queries shouldn't be part of the public API. </justification> </difference> + <difference> + <className>org/rhq/core/domain/cloud/Server</className> + <differenceType>7004</differenceType><!-- num argments changed --> + <method>clearStatus(*)</method> + <justification> + This class is not exposed remotely. The previous method signature was doing a blind clear of the status. + The new method will clear only the specified status. + </justification> + </difference> </differences> diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/Server.java b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/Server.java index c43a234..caeabcf 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/Server.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/Server.java @@ -241,19 +241,19 @@ public class Server implements Serializable { NORMAL("This server is a Normal Member of the HA server cloud", true);
public final String message; - private final boolean configurable; + private final boolean readOnly;
private OperationMode(String message, boolean configurable) { this.message = message; - this.configurable = configurable; + this.readOnly = configurable; }
public String getMessage() { return message; }
- public boolean isConfigurable() { - return configurable; + public boolean isReadOnly() { + return readOnly; } }
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/TopologyManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/TopologyManagerBean.java index 4db1697..c4225de 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/TopologyManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/TopologyManagerBean.java @@ -241,7 +241,7 @@ public class TopologyManagerBean implements TopologyManagerLocal { if (mode == null) { throw new IllegalArgumentException("Mode cannot be null."); } - if (!mode.isConfigurable()) { + if (!mode.isReadOnly()) { throw new IllegalArgumentException("Cannot directly set a mode that is not configurable. Mode " + mode.name() + " is not configurable."); } diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/instance/ServerManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/instance/ServerManagerBean.java index 44bd557..ddbd0ba 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/instance/ServerManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/instance/ServerManagerBean.java @@ -270,7 +270,7 @@ public class ServerManagerBean implements ServerManagerLocal { lastEstablishedServerMode = serverMode; server.setOperationMode(lastEstablishedServerMode); server.setMtime(System.currentTimeMillis()); - } catch (Exception e) { + } catch (Throwable e) { log.error("Unable to change HA Server Mode from " + lastEstablishedServerMode + " to " + serverMode + ": " + e); } diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/core/StartupBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/core/StartupBean.java index b0187e5..d99bb7b 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/core/StartupBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/core/StartupBean.java @@ -185,6 +185,10 @@ public class StartupBean implements StartupLocal { log.error("Could not load ResourceFacets cache.", t); }
+ //Server depends on the storage cluster availability. Since the storage client init just + //establishes connectivity with the storage cluster, then run it before the server init. + initStorageClient(); + // Before starting determine the operating mode of this server and // take any necessary initialization action. Must happen before comm startup since listeners // may be added. @@ -203,7 +207,6 @@ public class StartupBean implements StartupLocal { startPluginDeployer(); // make sure this is initialized before starting the server plugin container startServerPluginContainer(); // before comm in case an agent wants to talk to it upgradeRhqUserSecurityDomainIfNeeded(); - initStorageClient(); startServerCommunicationServices(); startScheduler(); scheduleJobs(); @@ -436,7 +439,6 @@ public class StartupBean implements StartupLocal { */ private void initStorageClient() { storageClientManager.init(); - serverManager.establishCurrentServerMode(); }
/** @@ -659,7 +661,7 @@ public class StartupBean implements StartupLocal { log.error("Cannot create storage cluster read repair job", e); } } - + /** * This seeds the agent clients cache with clients for all known agents. These clients will be started so they can * immediately begin to send any persisted guaranteed messages that might already exist. This method must be called diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/StorageSession.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/StorageSession.java index c8f44d9..3f7af3f 100644 --- a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/StorageSession.java +++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/StorageSession.java @@ -29,14 +29,6 @@ public class StorageSession implements Host.StateListener {
public void addStorageStateListener(StorageStateListener listener) { listeners.add(listener); - - for (Host host : wrappedSession.getCluster().getMetadata().getAllHosts()) { - if(host.getMonitor().isUp()){ - listener.onStorageNodeUp(host.getAddress()); - } else { - listener.onStorageNodeUp(host.getAddress()); - } - } }
public ResultSet execute(String query) {
commit b6e84bf4ea7ed48672c1309c571634560f5b38af Author: Mike Thompson mithomps@redhat.com Date: Fri Aug 9 14:00:58 2013 -0700
Fix graph labels for chrome & IE.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityOverUnderGraphType.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityOverUnderGraphType.java index 3e573dd..64c355d 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityOverUnderGraphType.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityOverUnderGraphType.java @@ -260,8 +260,6 @@ public class AvailabilityOverUnderGraphType implements AvailabilityGraphType { .attr("fill", "#b0b0b0") .attr("stroke-width", "0.5") .attr("transform", "translate(0," + height + ")") - .attr("letter-spacing", "3") - .style("text-anchor", "end") .call(xAxis);
svg.append("text") @@ -296,6 +294,8 @@ public class AvailabilityOverUnderGraphType implements AvailabilityGraphType {
}
+ + function createHovers() { $wnd.jQuery('svg rect.availBars').tipsy({ gravity: 's', diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/StackedBarMetricGraphImpl.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/StackedBarMetricGraphImpl.java index 67552d3..0c95f44 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/StackedBarMetricGraphImpl.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/StackedBarMetricGraphImpl.java @@ -469,8 +469,6 @@ public class StackedBarMetricGraphImpl extends AbstractMetricGraph { svg.append("g") .attr("class", "x axis") .attr("transform", "translate(0," + height + ")") - .attr("letter-spacing", "3") - .style("text-anchor", "end") .call(xAxis);
@@ -481,7 +479,6 @@ public class StackedBarMetricGraphImpl extends AbstractMetricGraph { .append("text") .attr("transform", "rotate(-90),translate( -60,0)") .attr("y", -30) - .attr("letter-spacing", "3") .style("text-anchor", "end") .text(chartContext.yAxisUnits === "NONE" ? "" : chartContext.yAxisUnits);
commit 6757c52a42d6a91b1735544eaba18f5134820778 Author: John Sanda jsanda@redhat.com Date: Fri Aug 9 12:33:00 2013 -0400
MetricsServer no longer requires timstamp for initialization
diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/AbstractEJB3Test.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/AbstractEJB3Test.java index d13fddb..44a4646 100644 --- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/AbstractEJB3Test.java +++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/AbstractEJB3Test.java @@ -40,7 +40,6 @@ import java.util.regex.Pattern; import javax.ejb.EJB; import javax.management.MBeanServer; import javax.management.MalformedObjectNameException; -import javax.management.ObjectInstance; import javax.management.ObjectName; import javax.naming.Context; import javax.naming.InitialContext; @@ -527,7 +526,7 @@ public abstract class AbstractEJB3Test extends Arquillian { } } } - storageClientManager.init(System.currentTimeMillis() - 100000); + storageClientManager.init(); beforeMethod(); beforeMethod(method);
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/core/StartupBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/core/StartupBean.java index 3c19896..b0187e5 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/core/StartupBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/core/StartupBean.java @@ -435,7 +435,7 @@ public class StartupBean implements StartupLocal { * Initalizes the storage client subsystem which is needed for reading/writing metric data. */ private void initStorageClient() { - storageClientManager.init(serverManager.getServer().getCtime()); + storageClientManager.init(); serverManager.establishCurrentServerMode(); }
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClientManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClientManagerBean.java index a4e0b24..6828b12 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClientManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClientManagerBean.java @@ -73,7 +73,7 @@ public class StorageClientManagerBean { private boolean initialized; private StorageClusterMonitor storageClusterMonitor;
- public synchronized void init(long ctime) { + public synchronized void init() { if (initialized) { if (log.isDebugEnabled()) { log.debug("Storage client subsystem is already initialized. Skipping initialization."); @@ -102,7 +102,7 @@ public class StorageClientManagerBean {
metricsDAO = new MetricsDAO(session, metricsConfiguration);
- initMetricsServer(ctime); + initMetricsServer();
initialized = true; log.info("Storage client subsystem is now initialized"); @@ -175,7 +175,7 @@ public class StorageClientManagerBean { return cluster.connect(RHQ_KEYSPACE); }
- private void initMetricsServer(long serverInstallTime) { + private void initMetricsServer() { if (log.isDebugEnabled()) { log.debug("Initializing " + MetricsServer.class.getName()); } @@ -186,7 +186,7 @@ public class StorageClientManagerBean { DateTimeService dateTimeService = new DateTimeService(); dateTimeService.setConfiguration(metricsConfiguration); metricsServer.setDateTimeService(dateTimeService); - metricsServer.init(serverInstallTime); + metricsServer.init(); }
private String getRequiredStorageProperty(String property) {
commit 8c41a29b55cb0edcd136a5a1814c3577130e0ce0 Author: John Sanda jsanda@redhat.com Date: Fri Aug 9 12:26:07 2013 -0400
refactor how we determine the most recently stored raw data
I was previuosly using the server creation time to determine how far back to query the metrics_index table to find the most recent raw data. That logic breaks in an HA envrionment with multiple servers.
This change simplifies things a bit. We only need to check up until the duration of the raw retention period which is 7 days. Anything older than that will be purged.
diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsServer.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsServer.java index ca17bf6..f69ddc1 100644 --- a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsServer.java +++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsServer.java @@ -69,8 +69,6 @@ public class MetricsServer {
private Semaphore semaphore = new Semaphore(100);
- private boolean shutdown = false; - private boolean pastAggregationMissed;
private Long mostRecentRawDataPriorToStartup; @@ -87,16 +85,24 @@ public class MetricsServer { this.dateTimeService = dateTimeService; }
- public void init(long serverInstallTime) { - determineMostRecentRawDataSinceLastShutdown(serverInstallTime); + public void init() { + determineMostRecentRawDataSinceLastShutdown(); }
- private void determineMostRecentRawDataSinceLastShutdown(long serverInstallTime) { + /** + * In normal operating mode we compute aggregates from the last hour. If the server has + * been down, we need to determine the most recently stored raw data so we know the + * starting hour for which to compute aggregates. We only need to check up to the raw + * retention period though since anything older than that will automatically get + * purged. + */ + private void determineMostRecentRawDataSinceLastShutdown() { DateTime previousHour = currentHour().minusHours(1); + DateTime oldestRawTime = previousHour.minus(configuration.getRawRetention());
ResultSet resultSet = dao.setFindTimeSliceForIndex(MetricsTable.ONE_HOUR, previousHour.getMillis()); Row row = resultSet.one(); - while (row == null && previousHour.getMillis() >= serverInstallTime) { + while (row == null && previousHour.compareTo(oldestRawTime) > 0) { previousHour = previousHour.minusHours(1); resultSet = dao.setFindTimeSliceForIndex(MetricsTable.ONE_HOUR, previousHour.getMillis()); row = resultSet.one(); @@ -129,7 +135,6 @@ public class MetricsServer { }
public void shutdown() { - shutdown = true; }
public RawNumericMetric findLatestValueForResource(int scheduleId) { diff --git a/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsServerTest.java b/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsServerTest.java index 879f40c..2c140f6 100644 --- a/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsServerTest.java +++ b/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsServerTest.java @@ -355,7 +355,7 @@ public class MetricsServerTest extends CassandraIntegrationTest { // 2) re-initialize the metrics server // 3) insert some more raw data metricsServer.setCurrentHour(hour15); - metricsServer.init(hour0().plusHours(2).getMillis()); + metricsServer.init();
rawData = new HashSet<MeasurementDataNumeric>(); rawData.add(new MeasurementDataNumeric(hour14.plusMinutes(20).getMillis(), scheduleId, 3.0)); @@ -420,7 +420,7 @@ public class MetricsServerTest extends CassandraIntegrationTest { // 2) re-initialize the metrics server // 3) insert some more raw data metricsServer.setCurrentHour(hour9); - metricsServer.init(hour0().minusDays(1).plusHours(4).getMillis()); + metricsServer.init();
rawData = new HashSet<MeasurementDataNumeric>(); rawData.add(new MeasurementDataNumeric(hour8.plusMinutes(20).getMillis(), scheduleId, 8.0));
commit 6045c3edf38561edb7af37d2443a8bf77ced1a82 Author: Jay Shaughnessy jshaughn@redhat.com Date: Fri Aug 9 09:30:03 2013 -0400
Add intentional api changes for fine-grained bundle permissions work
diff --git a/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml b/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml index 44886b4..40e826a 100644 --- a/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml +++ b/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml @@ -57,4 +57,109 @@ <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> </difference>
+ <difference> + <className>org/rhq/enterprise/server/bundle/BundleManagerRemote</className> + <differenceType>7012</differenceType> <!-- method added to an interface --> + <method>void assignBundlesToBundleGroup(org.rhq.core.domain.auth.Subject, int, int[])</method> + <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> + </difference> + + <difference> + <className>org/rhq/enterprise/server/bundle/BundleManagerRemote</className> + <differenceType>7012</differenceType> <!-- method added to an interface --> + <method>org.rhq.core.domain.bundle.BundleGroup createBundleGroup(org.rhq.core.domain.auth.Subject, java.lang.String, java.lang.String)</method> + <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> + </difference> + + <difference> + <className>org/rhq/enterprise/server/bundle/BundleManagerRemote</className> + <differenceType>7012</differenceType> <!-- method added to an interface --> + <method>org.rhq.core.domain.bundle.BundleVersion createInitialBundleVersionViaByteArray(org.rhq.core.domain.auth.Subject, int, byte[])</method> + <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> + </difference> + + <difference> + <className>org/rhq/enterprise/server/bundle/BundleManagerRemote</className> + <differenceType>7012</differenceType> <!-- method added to an interface --> + <method>org.rhq.core.domain.bundle.BundleVersion createInitialBundleVersionViaFile(org.rhq.core.domain.auth.Subject, int, java.io.File)</method> + <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> + </difference> + + <difference> + <className>org/rhq/enterprise/server/bundle/BundleManagerRemote</className> + <differenceType>7012</differenceType> <!-- method added to an interface --> + <method>org.rhq.core.domain.bundle.BundleVersion createInitialBundleVersionViaRecipe(org.rhq.core.domain.auth.Subject, int, java.lang.String)</method> + <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> + </difference> + + <difference> + <className>org/rhq/enterprise/server/bundle/BundleManagerRemote</className> + <differenceType>7012</differenceType> <!-- method added to an interface --> + <method>org.rhq.core.domain.bundle.BundleVersion createInitialBundleVersionViaURL(org.rhq.core.domain.auth.Subject, int, java.lang.String)</method> + <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> + </difference> + + <difference> + <className>org/rhq/enterprise/server/bundle/BundleManagerRemote</className> + <differenceType>7012</differenceType> <!-- method added to an interface --> + <method>org.rhq.core.domain.bundle.BundleVersion createInitialBundleVersionViaURL(org.rhq.core.domain.auth.Subject, int, java.lang.String, java.lang.String, java.lang.String)</method> + <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> + </difference> + + <difference> + <className>org/rhq/enterprise/server/bundle/BundleManagerRemote</className> + <differenceType>7012</differenceType> <!-- method added to an interface --> + <method>void deleteBundleGroups(org.rhq.core.domain.auth.Subject, int[])</method> + <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> + </difference> + + <difference> + <className>org/rhq/enterprise/server/bundle/BundleManagerRemote</className> + <differenceType>7012</differenceType> <!-- method added to an interface --> + <method>org.rhq.core.domain.util.PageList findBundleGroupsByCriteria(org.rhq.core.domain.auth.Subject, org.rhq.core.domain.criteria.BundleGroupCriteria)</method> + <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> + </difference> + + <difference> + <className>org/rhq/enterprise/server/bundle/BundleManagerRemote</className> + <differenceType>7012</differenceType> <!-- method added to an interface --> + <method>void unassignBundlesFromBundleGroup(org.rhq.core.domain.auth.Subject, int, int[])</method> + <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> + </difference> + + <difference> + <className>org/rhq/enterprise/server/bundle/RoleManagerRemote</className> + <differenceType>7012</differenceType> <!-- method added to an interface --> + <method>void addBundleGroupsToRole(org.rhq.core.domain.auth.Subject, int, int[])</method> + <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> + </difference> + + <difference> + <className>org/rhq/enterprise/server/bundle/RoleManagerRemote</className> + <differenceType>7012</differenceType> <!-- method added to an interface --> + <method>void addRolesToBundleGroup(org.rhq.core.domain.auth.Subject, int, int[])</method> + <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> + </difference> + + <difference> + <className>org/rhq/enterprise/server/bundle/RoleManagerRemote</className> + <differenceType>7012</differenceType> <!-- method added to an interface --> + <method>void removeBundleGroupsFromRole(org.rhq.core.domain.auth.Subject, int, int[])</method> + <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> + </difference> + + <difference> + <className>org/rhq/enterprise/server/bundle/RoleManagerRemote</className> + <differenceType>7012</differenceType> <!-- method added to an interface --> + <method>void removeRolesFromBundleGroup(org.rhq.core.domain.auth.Subject, int, int[])</method> + <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> + </difference> + + <difference> + <className>org/rhq/enterprise/server/bundle/RoleManagerRemote</className> + <differenceType>7012</differenceType> <!-- method added to an interface --> + <method>void setAssignedBundleGroups(org.rhq.core.domain.auth.Subject, int, int[])</method> + <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> + </difference> + </differences>
commit 08e9d3b0328921ffeed978a59577b1be35a68dc8 Author: John Sanda jsanda@redhat.com Date: Fri Aug 9 10:11:50 2013 -0400
fixing test failures
diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/discovery/DiscoveryBossBeanTest.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/discovery/DiscoveryBossBeanTest.java index c4df956..607966b 100644 --- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/discovery/DiscoveryBossBeanTest.java +++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/discovery/DiscoveryBossBeanTest.java @@ -56,6 +56,7 @@ import org.xml.sax.InputSource;
import org.rhq.core.clientapi.agent.discovery.DiscoveryAgentService; import org.rhq.core.clientapi.server.discovery.InventoryReport; +import org.rhq.core.domain.cloud.StorageNode; import org.rhq.core.domain.configuration.Configuration; import org.rhq.core.domain.criteria.ResourceCriteria; import org.rhq.core.domain.discovery.MergeInventoryReportResults; @@ -475,6 +476,12 @@ public class DiscoveryBossBeanTest extends AbstractEJB3Test { storageNode.setUuid(String.valueOf(new Random().nextInt())); storagePlatform.addChildResource(storageNode);
+ storageNode.setPluginConfiguration(Configuration.builder() + .addSimple("nativeTransportPort", 9142) + .addSimple("storagePort", 7100) + .addSimple("host", "localhost") + .build()); + inventoryReport.addAddedRoot(storagePlatform);
// Merge this inventory report @@ -548,6 +555,11 @@ public class DiscoveryBossBeanTest extends AbstractEJB3Test { doomed = q.getResultList(); for (Object removeMe : doomed) { Resource res = em.getReference(Resource.class, ((Resource) removeMe).getId()); + StorageNode storageNode = findStorageNode(res); + if (storageNode != null) { + storageNode.setResource(null); + } + System.out.println("Deleting resource " + res); ResourceTreeHelper.deleteResource(em, res); } em.flush(); @@ -573,6 +585,15 @@ public class DiscoveryBossBeanTest extends AbstractEJB3Test { } }
+ private StorageNode findStorageNode(Resource resource) { + List<StorageNode> storageNodes = em.createQuery("SELECT s FROM StorageNode s where s.resource = :resource", + StorageNode.class).setParameter("resource", resource).getResultList(); + if (storageNodes.isEmpty()) { + return null; + } + return storageNodes.get(0); + } + void setDbType(IDatabaseConnection connection) throws Exception { DatabaseConfig dbConfig = connection.getConfig(); String name = connection.getConnection().getMetaData().getDatabaseProductName().toLowerCase(); diff --git a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java index aafa481..63517e1 100644 --- a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java +++ b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java @@ -261,7 +261,7 @@ public class StorageNodeComponentITest { @Test(dependsOnMethods = "restartStorageNode") public void prepareForBootstrap() throws Exception { Configuration params = Configuration.builder().addSimple("cqlPort", 9242).addSimple("gossipPort", 7200) - .openList("storageNodeIPAddresses", "storageNodeIPAddresse").addSimples("127.0.0.1", "127.0.0.2") + .openList("addresses", "address").addSimples("127.0.0.1", "127.0.0.2") .closeList().build();
OperationManager operationManager = PluginContainer.getInstance().getOperationManager();
commit 8a54d3319629efb0103d8531c6ec46614da2498a Author: Jirka Kremser jkremser@redhat.com Date: Fri Aug 9 15:54:57 2013 +0200
StorageNodeConfigurationEditor now updates the configuration for particular storage nodes.
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNodeConfigurationComposite.java b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNodeConfigurationComposite.java index 32d8ab3..d7d7b7d 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNodeConfigurationComposite.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNodeConfigurationComposite.java @@ -120,7 +120,8 @@ public class StorageNodeConfigurationComposite implements Serializable { */ public String toString() { StringBuilder builder = new StringBuilder(); - builder.append("storageNode.addresss=").append(storageNode.getAddress()).append(", "); + builder.append("storageNode.addresss=").append(storageNode == null ? "unknown" : storageNode.getAddress()) + .append(", "); builder.append("jmxPort=").append(jmxPort).append(","); builder.append("heapSize=").append(heapSize).append(", "); builder.append("heapNewSize=").append(heapSize).append(", "); diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeConfigurationEditor.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeConfigurationEditor.java index 15bb412..3c4923b 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeConfigurationEditor.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeConfigurationEditor.java @@ -23,51 +23,83 @@ import java.util.LinkedHashSet; import java.util.List; import java.util.Set;
+import com.google.gwt.user.client.rpc.AsyncCallback; import com.smartgwt.client.types.Alignment; import com.smartgwt.client.types.Overflow; +import com.smartgwt.client.util.BooleanCallback; +import com.smartgwt.client.util.SC; import com.smartgwt.client.widgets.events.ClickEvent; import com.smartgwt.client.widgets.events.ClickHandler; import com.smartgwt.client.widgets.form.fields.FormItem; import com.smartgwt.client.widgets.form.fields.StaticTextItem; import com.smartgwt.client.widgets.form.fields.TextItem; +import com.smartgwt.client.widgets.form.validator.IsIntegerValidator; +import com.smartgwt.client.widgets.form.validator.Validator; import com.smartgwt.client.widgets.layout.LayoutSpacer; +import com.smartgwt.client.widgets.toolbar.ToolStrip;
import org.rhq.core.domain.cloud.StorageNodeConfigurationComposite; import org.rhq.core.domain.measurement.MeasurementUnits; +import org.rhq.enterprise.gui.coregui.client.CoreGUI; import org.rhq.enterprise.gui.coregui.client.RefreshableView; -import org.rhq.enterprise.gui.coregui.client.components.configuration.PropertyValueChangeEvent; -import org.rhq.enterprise.gui.coregui.client.components.configuration.PropertyValueChangeListener; import org.rhq.enterprise.gui.coregui.client.components.form.EnhancedDynamicForm; import org.rhq.enterprise.gui.coregui.client.components.form.ValueWithUnitsItem; +import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedIButton; import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedToolStrip; import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedVLayout; +import org.rhq.enterprise.gui.coregui.client.util.message.Message;
/** * The component for editing the storage node configuration * * @author Jirka Kremser */ -public class StorageNodeConfigurationEditor extends EnhancedVLayout implements PropertyValueChangeListener, - RefreshableView { +public class StorageNodeConfigurationEditor extends EnhancedVLayout implements RefreshableView {
private EnhancedDynamicForm form; - private EnhancedToolStrip toolStrip; + private EnhancedIButton saveButton; private boolean oddRow; private final StorageNodeConfigurationComposite configuration; + + private static String FIELD_HEAP_MAX = "heap_max"; + private static String FIELD_HEAP_NEW = "heap_new"; + private static String FIELD_THREAD_STACK_SIZE = "thread_stack_size"; + private static String FIELD_JMX_PORT = "jmx_port";
public StorageNodeConfigurationEditor(final StorageNodeConfigurationComposite configuration) { super(); - this.configuration = configuration; - + this.configuration = configuration; }
private void save() { - + updateConfiguration(); + GWTServiceLookup.getStorageService().updateConfiguration(configuration, new AsyncCallback<Boolean>() { + public void onSuccess(Boolean result) { + if (result) { + Message msg = new Message("Storage node settings were successfully updated.", Message.Severity.Info); + CoreGUI.getMessageCenter().notify(msg); + } else { + onFailure(new Exception("Operation failed.")); + } + } + public void onFailure(Throwable caught) { + CoreGUI.getErrorHandler().handleError("Unable to update the storage node settings.", caught); + } + }); }
+ private List<FormItem> buildOneFormRowWithCombobox(String name, String title, String value, String description) { + return buildOneFormRow(name, title, value, description, true, null); + } + + private List<FormItem> buildOneFormRowWithValidator(String name, String title, String value, String description, + Validator validator) { + return buildOneFormRow(name, title, value, description, false, validator); + } + private List<FormItem> buildOneFormRow(String name, String title, String value, String description, - boolean unitsDropdown) { + boolean unitsDropdown, Validator validator) { List<FormItem> fields = new ArrayList<FormItem>(); StaticTextItem nameItem = new StaticTextItem(); nameItem.setStartRow(true); @@ -78,13 +110,17 @@ public class StorageNodeConfigurationEditor extends EnhancedVLayout implements P
FormItem valueItem = null; if (unitsDropdown) { - valueItem = buildJMXMemoryItem(name, value); + valueItem = buildJVMMemoryItem(name, value); } else { valueItem = new TextItem(); valueItem.setName(name); valueItem.setValue(value); valueItem.setWidth(220); + if (validator != null) { + valueItem.setValidators(validator); + } } + valueItem.setValidateOnChange(true); valueItem.setAlign(Alignment.CENTER); valueItem.setShowTitle(false); valueItem.setRequired(true); @@ -102,7 +138,7 @@ public class StorageNodeConfigurationEditor extends EnhancedVLayout implements P return fields; }
- private FormItem buildJMXMemoryItem(String name, String value) { + private FormItem buildJVMMemoryItem(String name, String value) { Set<MeasurementUnits> supportedUnits = new LinkedHashSet<MeasurementUnits>(); supportedUnits.add(MeasurementUnits.MEGABYTES); supportedUnits.add(MeasurementUnits.GIGABYTES); @@ -157,49 +193,83 @@ public class StorageNodeConfigurationEditor extends EnhancedVLayout implements P
List<FormItem> items = buildHeaderItems(); items - .addAll(buildOneFormRow( - "foo2", + .addAll(buildOneFormRowWithCombobox( + FIELD_HEAP_MAX, "Max Heap Size", configuration.getHeapSize(), - "The maximum heap size. This value will be used with the -Xmx JVM option. The value should be an integer with a suffix of M or G to indicate megabytes or gigabytes.", - true)); + "The maximum heap size. This value will be used with the -Xmx JVM option. The value should be an integer with a suffix of M or G to indicate megabytes or gigabytes.")); items - .addAll(buildOneFormRow( - "foo", + .addAll(buildOneFormRowWithCombobox( + FIELD_HEAP_NEW, "Heap New Size", configuration.getHeapNewSize(), - "The size of the new generation portion of the heap. This value will be used with the -Xmn JVM option. The value should be an integer with a suffix of M or G to indicate megabytes or gigabytes.", - true)); - - items.addAll(buildOneFormRow("foo3", "Thread Stack Size", configuration.getThreadStackSize(), - "asdfsdfffa df sdbla", false)); - items.addAll(buildOneFormRow("foo4", "JMX Port", String.valueOf(configuration.getJmxPort()), - "sdfla ffa blsdfa", false)); + "The size of the new generation portion of the heap. This value will be used with the -Xmn JVM option. The value should be an integer with a suffix of M or G to indicate megabytes or gigabytes.")); +// IntegerRangeValidator positiveInteger = new IntegerRangeValidator(); +// positiveInteger.setMin(1); +// positiveInteger.setMax(Integer.MAX_VALUE); + IsIntegerValidator validator = new IsIntegerValidator(); + items.addAll(buildOneFormRowWithValidator(FIELD_THREAD_STACK_SIZE, "Thread Stack Size", configuration.getThreadStackSize(), + "The thread stack size. This memory is allocated to each thread off heap. The value should be an integer that will be interpreted in kilobytes.", validator)); + +// IntegerRangeValidator portValidator = new IntegerRangeValidator(); +// portValidator.setMin(1); +// portValidator.setMax(65535); // (1 << 16) - 1 + validator = new IsIntegerValidator(); + items.addAll(buildOneFormRowWithValidator(FIELD_JMX_PORT, "JMX Port", String.valueOf(configuration.getJmxPort()), + "The JMX port for the RHQ Storage Node", validator)); form.setFields(items.toArray(new FormItem[items.size()])); + form.setWidth100(); + form.setOverflow(Overflow.VISIBLE); + setWidth100(); + + LayoutSpacer spacer = new LayoutSpacer(); + spacer.setWidth100(); + + ToolStrip toolStrip = buildToolStrip(); + setMembers(form, spacer, toolStrip); form.validate(); - - EnhancedIButton saveButton = new EnhancedIButton(MSG.common_button_save()); + markForRedraw(); + } + + private EnhancedToolStrip buildToolStrip() { + saveButton = new EnhancedIButton(MSG.common_button_save()); saveButton.addClickHandler(new ClickHandler() { public void onClick(ClickEvent clickEvent) { - save(); + if (form.validate()) { + SC.ask( + "Changing the storage node configuration requires restart of storage node. Do you want to continue?", + new BooleanCallback() { + @Override + public void execute(Boolean value) { + if (value) { + save(); + } + } + }); + } } }); - toolStrip = new EnhancedToolStrip(); + EnhancedToolStrip toolStrip = new EnhancedToolStrip(); toolStrip.setWidth100(); toolStrip.setMembersMargin(5); toolStrip.setLayoutMargin(5); toolStrip.addMember(saveButton); - form.setWidth100(); - form.setOverflow(Overflow.VISIBLE); - setWidth100(); - LayoutSpacer spacer = new LayoutSpacer(); - spacer.setWidth100(); - setMembers(form, spacer, toolStrip); - markForRedraw(); - }
- @Override - public void propertyValueChanged(PropertyValueChangeEvent event) { - + return toolStrip; + } + + private StorageNodeConfigurationComposite updateConfiguration() { + configuration.setHeapSize(getJVMMemoryString(form.getField(FIELD_HEAP_MAX).getValue().toString())); + configuration.setHeapNewSize(getJVMMemoryString(form.getField(FIELD_HEAP_NEW).getValue().toString())); + configuration.setThreadStackSize(form.getValueAsString(FIELD_THREAD_STACK_SIZE)); + configuration.setJmxPort(Integer.parseInt(form.getValueAsString(FIELD_JMX_PORT))); + return configuration; + } + + private String getJVMMemoryString(String raw) { + if (raw == null || raw.trim().isEmpty()) { + throw new IllegalArgumentException("input string is null or empty"); + } + return raw.trim().substring(0, raw.trim().length() - 1); } } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDetailView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDetailView.java index e395b2b..a5ae6eb 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDetailView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDetailView.java @@ -29,10 +29,8 @@ import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDat
import java.util.ArrayList; import java.util.Arrays; -import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Map.Entry;
import com.google.gwt.user.client.Timer; import com.google.gwt.user.client.rpc.AsyncCallback; @@ -48,10 +46,6 @@ import com.smartgwt.client.widgets.layout.SectionStackSection;
import org.rhq.core.domain.cloud.StorageNode; import org.rhq.core.domain.cloud.StorageNodeConfigurationComposite; -import org.rhq.core.domain.configuration.definition.ConfigurationDefinition; -import org.rhq.core.domain.configuration.definition.PropertyDefinition; -import org.rhq.core.domain.configuration.definition.PropertyGroupDefinition; -import org.rhq.core.domain.criteria.ResourceCriteria; import org.rhq.core.domain.criteria.StorageNodeCriteria; import org.rhq.core.domain.measurement.composite.MeasurementDataNumericHighLowComposite; import org.rhq.core.domain.resource.Resource; @@ -64,10 +58,6 @@ import org.rhq.enterprise.gui.coregui.client.ViewPath; import org.rhq.enterprise.gui.coregui.client.components.table.TimestampCellFormatter; import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; import org.rhq.enterprise.gui.coregui.client.inventory.InventoryView; -import org.rhq.enterprise.gui.coregui.client.inventory.common.detail.summary.AbstractActivityView.ChartViewWindow; -import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.configuration.ConfigurationFilter; -import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.configuration.ResourceConfigurationEditView; -import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.D3GraphListView; import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.operation.history.ResourceOperationHistoryListView; import org.rhq.enterprise.gui.coregui.client.util.Log; import org.rhq.enterprise.gui.coregui.client.util.MeasurementUtility; @@ -94,10 +84,7 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab private SectionStackSection operationSection; private SectionStackSection detailsAndLoadSection; private StaticTextItem alertsItem; - private int expandedSection = -1; private HTMLFlow header; - private ChartViewWindow window; - private D3GraphListView graphView;
private volatile int initSectionCount = 0; private int unackAlerts = -1; @@ -114,14 +101,8 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab sectionStack.setVisibilityMode(VisibilityMode.MULTIPLE); sectionStack.setWidth100(); sectionStack.setHeight100(); -// sectionStack.setMargin(5); -// sectionStack.setOverflow(Overflow.VISIBLE); + sectionStack.setCanResizeSections(false); } - -// public StorageNodeDetailView(int storageNodeId, int expandedSection) { -// this(storageNodeId); -// this.expandedSection = expandedSection; -// }
@Override protected void onInit() { @@ -140,17 +121,9 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab final StorageNode node = storageNodes.get(0); header.setContents("<div style='text-align: center; font-weight: bold; font-size: medium;'> Storage Node (" + node.getAddress() + ")</div>"); - Resource res = node.getResource(); - if (res != null) { -// fetchResourceComposite(res.getId()); - } else { - // skip this if the resource id is not there - initSectionCount++; - } fetchStorageNodeConfigurationComposite(node); prepareDetailsSection(node); fetchSparkLineDataForLoadComponent(node); - }
public void onFailure(Throwable caught) { @@ -181,32 +154,6 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab }); }
-// private void fetchResourceComposite(final int resourceId) { -// ResourceCriteria resourceCriteria = new ResourceCriteria(); -// resourceCriteria.addFilterId(resourceId); -// GWTServiceLookup.getResourceService().findResourceCompositesByCriteria(resourceCriteria, -// new AsyncCallback<PageList<ResourceComposite>>() { -// @Override -// public void onFailure(Throwable caught) { -// Message message = new Message(MSG.view_inventory_resource_loadFailed(String.valueOf(resourceId)), -// Message.Severity.Warning); -// CoreGUI.goToView(InventoryView.VIEW_ID.getName(), message); -// initSectionCount = SECTION_COUNT; -// } -// -// @Override -// public void onSuccess(PageList<ResourceComposite> result) { -// if (result.isEmpty()) { -// onFailure(new Exception("Resource with id [" + resourceId + "] does not exist.")); -// } else { -// final ResourceComposite resourceComposite = result.get(0); -//// prepareOperationHistory(resourceComposite); -// prepareResourceConfigEditor(resourceComposite); -// } -// } -// }); -// } - private void fetchSparkLineDataForLoadComponent(final StorageNode storageNode) {
GWTServiceLookup.getStorageService().findStorageNodeLoadDataForLast(storageNode, 8, MeasurementUtility.UNIT_HOURS, @@ -277,12 +224,6 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab if (null != configurationSection) { sectionStack.addSection(configurationSection); } -// if (expandedSection != -1) { -// for (int i = 1; i < SECTION_COUNT; i++) { -// sectionStack.collapseSection(i); -// } -// sectionStack.expandSection(expandedSection); -// } addMember(sectionStack); markForRedraw();
@@ -399,42 +340,12 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab initSectionCount++; }
-// private void prepareResourceConfigEditor(ResourceComposite resourceComposite) { private void prepareResourceConfigEditor(final StorageNodeConfigurationComposite configuration) { - + LayoutSpacer spacer = new LayoutSpacer(); + spacer.setHeight(15); StorageNodeConfigurationEditor editorView = new StorageNodeConfigurationEditor(configuration); - -// ResourceConfigurationEditView editorView = new ResourceConfigurationEditView(resourceComposite); -// ConfigurationFilter filter = new ConfigurationFilter() { -// @Override -// public ConfigurationDefinition filter(ConfigurationDefinition definition) { -// Map<String, PropertyDefinition> filteredConfigurationDefinition = new HashMap<String, PropertyDefinition>(); -// PropertyGroupDefinition groupDef = null; -// for (Entry<String, PropertyDefinition> propertyDefinitionEntry : definition.getPropertyDefinitions().entrySet()) { -// PropertyDefinition propertyDefinition = propertyDefinitionEntry.getValue(); -// if (propertyDefinition.getPropertyGroupDefinition() != null) { -// if (groupDef == null) { -// groupDef = propertyDefinition.getPropertyGroupDefinition(); -//// groupDef.setName("Storage Node Settings"); -// } -// propertyDefinition.setPropertyGroupDefinition(groupDef); -// } -// if (!"heapDumpOnOOMError".equals(propertyDefinition.getName()) -// && !"heapDumpDir".equals(propertyDefinition.getName()) -// && !"minHeapSize".equals(propertyDefinition.getName()) -// && !"gossipPort".equals(propertyDefinition.getName()) -// && !"cqlPort".equals(propertyDefinition.getName())) { -// filteredConfigurationDefinition.put(propertyDefinitionEntry.getKey(), -// propertyDefinitionEntry.getValue()); -// } -// } -// definition.setPropertyDefinitions(filteredConfigurationDefinition); -// return definition; -// } -// }; -// editorView.setFilter(filter); SectionStackSection section = new SectionStackSection("Configuration"); - section.setItems(editorView); + section.setItems(spacer, editorView); section.setExpanded(true); section.setCanCollapse(false);
@@ -444,16 +355,8 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab
@Override public void renderView(ViewPath viewPath) { - if (viewPath.toString().endsWith("/Config")) { -// for (int i = 1; i < SECTION_COUNT; i++) { -// sectionStack.collapseSection(i); -// } - expandedSection = 2; -// sectionStack.expandSection(expandedSection); -// detailsSection.setExpanded(false); -// loadSection.setExpanded(false); -// historySection.setExpanded(true); - } +// if (viewPath.toString().endsWith("/Config")) { +// } Log.debug("StorageNodeDetailView: " + viewPath); } } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeTableView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeTableView.java index 328545a..bd415b2 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeTableView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeTableView.java @@ -189,7 +189,7 @@ public class StorageNodeTableView extends TableSection<StorageNodeDatasource> {
@Override public Canvas getDetailsView(Integer id) { - HTMLFlow header = new HTMLFlow("id = " + id); + HTMLFlow header = new HTMLFlow(""); setHeader(header); return new StorageNodeDetailView(id, header); } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/form/ValueWithUnitsItem.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/form/ValueWithUnitsItem.java index a49600e..8719bec 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/form/ValueWithUnitsItem.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/form/ValueWithUnitsItem.java @@ -59,18 +59,18 @@ public class ValueWithUnitsItem extends CanvasItem {
if (supportedUnits != null && !supportedUnits.isEmpty()) { this.supportedUnits = supportedUnits; - if (null == this.valueUnit) { - this.valueUnit = supportedUnits.iterator().next(); + if (null == valueUnit) { + valueUnit = supportedUnits.iterator().next(); } }
- this.form = new EnhancedDynamicForm(false, false); - this.form.setNumCols(2); - this.form.setColWidths("126", "60"); + form = new EnhancedDynamicForm(false, false); + form.setNumCols(2); + form.setColWidths("126", "60");
final IntegerItem valueItem = new IntegerItem(FIELD_VALUE, title); valueItem.setShowTitle(getShowTitle()); - valueItem.setValue(getValue()); + valueItem.setValue(super.getValue()); IntegerRangeValidator integerRangeValidator = new IntegerRangeValidator(); integerRangeValidator.setMin(1); integerRangeValidator.setMax(Integer.MAX_VALUE); @@ -82,7 +82,7 @@ public class ValueWithUnitsItem extends CanvasItem {
LinkedHashMap<String, String> valueMap = new LinkedHashMap<String, String>(); for (MeasurementUnits unit : supportedUnits) { - valueMap.put(unit.name().toLowerCase(), unit.toString()); + valueMap.put(unit.toString(), unit.toString()); } unitsItem.setValueMap(valueMap); unitsItem.setDefaultToFirstOption(true); @@ -91,7 +91,7 @@ public class ValueWithUnitsItem extends CanvasItem { valueItem.setWidth(126); unitsItem.setWidth(60);
- setCanvas(this.form); + setCanvas(form); }
@Override @@ -109,24 +109,29 @@ public class ValueWithUnitsItem extends CanvasItem { throw new IllegalArgumentException(MSG.widget_durationItem_unitTypeNotSupported(unitType.name())); } if (value != null) { - this.form.setValue(FIELD_VALUE, value); + form.setValue(FIELD_VALUE, value); } else { - this.form.setValue(FIELD_VALUE, (String) null); + form.setValue(FIELD_VALUE, (String) null); } - this.form.setValue(FIELD_UNITS, this.valueUnit.name().toLowerCase()); + form.setValue(FIELD_UNITS, valueUnit.toString());
setValue(value); } + + @Override + public Object getValue() { + return form.getValue(FIELD_VALUE).toString() + form.getValue(FIELD_UNITS).toString(); + }
@Override public Boolean validate() { - return this.form.validate(); + return form.validate(); }
public void setContextualHelp(String contextualHelp) { if (contextualHelp != null) { FormItem item; - item = this.form.getItem(FIELD_UNITS); + item = form.getItem(FIELD_UNITS); FormUtility.addContextualHelp(item, contextualHelp); } } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/StorageGWTService.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/StorageGWTService.java index abe759c..6e6df57 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/StorageGWTService.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/StorageGWTService.java @@ -83,4 +83,6 @@ public interface StorageGWTService extends RemoteService { Map<String, List<MeasurementDataNumericHighLowComposite>> findStorageNodeLoadDataForLast(StorageNode node, int lastN, int unit, int numPoints) throws RuntimeException;
StorageNodeConfigurationComposite retrieveConfiguration(StorageNode storageNode) throws RuntimeException; + + boolean updateConfiguration(StorageNodeConfigurationComposite storageNodeConfiguration) throws RuntimeException; } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/StorageGWTServiceImpl.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/StorageGWTServiceImpl.java index 28ea78e..4a01427 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/StorageGWTServiceImpl.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/StorageGWTServiceImpl.java @@ -26,7 +26,6 @@ import java.util.ArrayList; import java.util.List; import java.util.Map;
-import org.rhq.core.domain.auth.Subject; import org.rhq.core.domain.cloud.StorageNode; import org.rhq.core.domain.cloud.StorageNodeConfigurationComposite; import org.rhq.core.domain.cloud.StorageNodeLoadComposite; @@ -162,4 +161,13 @@ public class StorageGWTServiceImpl extends AbstractGWTServiceImpl implements Sto throw getExceptionToThrowToClient(t); } } + + @Override + public boolean updateConfiguration(StorageNodeConfigurationComposite storageNodeConfiguration) throws RuntimeException { + try { + return storageNodeManager.updateConfiguration(getSessionSubject(), storageNodeConfiguration); + } catch (Throwable t) { + throw getExceptionToThrowToClient(t); + } + } }
commit 4f1415af3c1918823a2859955feb47a530c6fff9 Author: Thomas Segismont tsegismo@redhat.com Date: Fri Aug 9 10:13:40 2013 +0200
Removed testFailureIgnore=false from Cassandra and Storage Node plugins because this is not the standard for RHQ builds
diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/pom.xml b/modules/common/cassandra-ccm/cassandra-ccm-core/pom.xml index c12c567..a24ce10 100644 --- a/modules/common/cassandra-ccm/cassandra-ccm-core/pom.xml +++ b/modules/common/cassandra-ccm/cassandra-ccm-core/pom.xml @@ -220,9 +220,6 @@ <goals> <goal>verify</goal> </goals> - <configuration> - <testFailureIgnore>false</testFailureIgnore> - </configuration> </execution> </executions> </plugin> diff --git a/modules/plugins/rhq-storage/pom.xml b/modules/plugins/rhq-storage/pom.xml index ab97902..308af28 100644 --- a/modules/plugins/rhq-storage/pom.xml +++ b/modules/plugins/rhq-storage/pom.xml @@ -148,9 +148,6 @@ <goals> <goal>verify</goal> </goals> - <configuration> - <testFailureIgnore>false</testFailureIgnore> - </configuration> </execution> </executions> </plugin>
commit 9632b967b863ecb1320b5906bec572a92e2ca86b Merge: 62749ec a6354aa Author: Mike Thompson mithomps@redhat.com Date: Thu Aug 8 19:45:40 2013 -0700
Merge branch 'mtho11/pre4.9'
commit a6354aa825a06776090cb99a6875d9db1e1ce2dd Author: Mike Thompson mithomps@redhat.com Date: Thu Aug 8 19:44:56 2013 -0700
Refactor AvailabilitySummaryPieGraphType to module pattern to hide js vars from global scope.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilitySummaryPieGraphType.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilitySummaryPieGraphType.java index 678724a..72438fd6 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilitySummaryPieGraphType.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilitySummaryPieGraphType.java @@ -23,21 +23,19 @@ import java.util.List;
import com.smartgwt.client.widgets.HTMLFlow;
-import org.rhq.core.domain.measurement.MeasurementUnits; import org.rhq.enterprise.gui.coregui.client.util.Log; -import org.rhq.enterprise.gui.coregui.client.util.MeasurementConverterClient;
/** * Contains the javascript chart definition for an implementation of the d3 availability chart. This implementation is - * just a line that changes color based on availability type: up=green, down=red, orange=disabled, unknown=grey, - * empty=grey, warn=yellow. This version of the availability graph shows continuous intervals. + * just a line that changes color based on availability type: up=green, down=red, unknown=grey, warn=yellow. + * This version of the availability graph shows continuous intervals. * * @author Mike Thompson */ public class AvailabilitySummaryPieGraphType {
- public static final int HEIGHT = 100; - public static final int WIDTH = 100; + public static final int HEIGHT = 75; + public static final int WIDTH = 75;
private List<AvailabilitySummary> availabilitySummaries;
@@ -73,9 +71,7 @@ public class AvailabilitySummaryPieGraphType { // loop through the avail intervals for (AvailabilitySummary availabilitySummary : availabilitySummaries) { sb.append("{ "label":"" + availabilitySummary.getLabel() + "", "); - sb.append(" "value": "" - + MeasurementConverterClient.format(availabilitySummary.getValue(), MeasurementUnits.PERCENTAGE, - true) + "" },"); + sb.append(" "value": "" + availabilitySummary.getValue() * 100 + "" },"); } sb.setLength(sb.length() - 1); } @@ -90,48 +86,74 @@ public class AvailabilitySummaryPieGraphType { */ public native void drawJsniChart() /*-{ console.log("Draw Availability Summary Pie Chart"); + var global = this;
- var global = this, - w = @org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.AvailabilitySummaryPieGraphType::WIDTH, + var availPieGraph = (function () { + "use strict"; + + var w = @org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.AvailabilitySummaryPieGraphType::WIDTH, h = @org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.AvailabilitySummaryPieGraphType::HEIGHT, - r = h / 2, - color = $wnd.d3.scale.category10(), - data = global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.AvailabilitySummaryPieGraphType::getAvailabilitySummaryJson()(), - vis = $wnd.d3.select("#availSummaryChart svg") - .append("g") - .data(data) - .attr("width", w) - .attr("height", h) - .attr("transform", "translate(" + r + "," + r + ")"), - arc = $wnd.d3.svg.arc() - .outerRadius(r), - pie = $wnd.d3.layout.pie(), - arcs = vis.selectAll("g.slice") - .data(pie) - .enter() - .append("g") - .attr("class", "slice"); - - arcs.append("path") - .attr("fill", function (d, i) { - return color(i); - }) - .attr("d", arc); - - arcs.append("text") - .attr("transform", function (d) { - d.innerRadius = 0; - d.outerRadius = r; - return "translate(" + arc.centroid(d) + ")"; - }) - .attr("text-anchor", "middle") - .style("font-size", "9px") - .style("font-family", "Arial, Verdana, sans-serif;") - .attr("fill", "#000") - .text(function (d, i) { - return data[i].value; - }); - console.log("done with avail summary pie graph"); + outerRadius = w / 2, + innerRadius = 0, + data = $wnd.jQuery.parseJSON(global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.AvailabilitySummaryPieGraphType::getAvailabilitySummaryJson()()); + + function drawPieGraph() { + + var arc = $wnd.d3.svg.arc() + .innerRadius(innerRadius) + .outerRadius(outerRadius), + + pie = $wnd.d3.layout.pie() + .value(function (d) { + return d.value; + }), + + colorScale = $wnd.d3.scale.ordinal().range(["#8cbe89", "#c5888b", "#d8d8d8"]), + + svg = $wnd.d3.select("#availSummaryChart svg") + .append("g") + .attr("width", w) + .attr("height", h), + + arcs = svg.selectAll("g.arc") + .data(pie(data)) + .enter() + .append("g") + .attr("class", "arc") + .attr("transform", "translate(" + outerRadius + "," + outerRadius + ")"); + + arcs.append("path") + .attr("fill", function (d, i) { + return colorScale(i); + }) + .attr("d", arc).append("title").text(function (d) { + return d.label; + }); + + arcs.append("text") + .attr("transform", function (d) { + return "translate(" + arc.centroid(d) + ")"; + }) + .attr("text-anchor", "middle") + .attr("fill", "#FFF") + .style("font-size", "12px") + .style("font-family", "Arial, Verdana, sans-serif;") + .text(function (d) { + return d.value; + }); + } + + return { + drawGraph: function () { + return drawPieGraph(); + } + + }; + + })(); + + availPieGraph.drawGraph(); + console.log("done with avail summary pie graph drawing");
}-*/;
commit 62749ec27b869c81cc0f01d736e68e5decc079ea Author: John Sanda jsanda@redhat.com Date: Thu Aug 8 22:36:36 2013 -0400
removing the storage node resource group
The resource group was primarily used for executing cluster wide operations. Trying to maintain the resource group presents more challenges than benefits, and StorageNodeOperationsHandlerBean has supplanted the group for executing cluster wide operations.
diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/StrippedDownStartupBean.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/StrippedDownStartupBean.java index 58273d8..e10896d 100644 --- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/StrippedDownStartupBean.java +++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/StrippedDownStartupBean.java @@ -64,20 +64,10 @@ public class StrippedDownStartupBean {
/** * <p> - * Purges the storage node resource group, test server, and any storage nodes created during server initialization - * from a prior test run. - * </p> - * <p> - * Note that the storage node group deletion simply removes the entity from the rhq_resource_group table. At this - * point in the deployment, {@link ResourceGroupManagerLocal#deleteResourceGroup(org.rhq.core.domain.auth.Subject, int)} - * cannot be used; therefore, any test that added storage node resources to the group should take care of removing - * them as well. + * Purges the test server and any storage nodes created during server initialization from a prior test run. * </p> */ public void purgeTestServerAndStorageNodes() { - entityManager.createQuery("DELETE FROM " + ResourceGroup.class.getName() + " WHERE name = :storageNodeGroup") - .setParameter("storageNodeGroup", STORAGE_NODE_GROUP_NAME) - .executeUpdate(); entityManager.createQuery("DELETE FROM " + StorageNode.class.getName()).executeUpdate(); entityManager.createQuery("DELETE FROM " + Server.class.getName() + " WHERE name = :serverName") .setParameter("serverName", TestConstants.RHQ_TEST_SERVER_NAME) diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/StrippedDownStartupBeanPreparation.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/StrippedDownStartupBeanPreparation.java index 5f67888..cf9d84c 100644 --- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/StrippedDownStartupBeanPreparation.java +++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/StrippedDownStartupBeanPreparation.java @@ -104,7 +104,7 @@ public class StrippedDownStartupBeanPreparation { for (String seedInfo : seedsInfo) { StorageNode storageNode = new StorageNode(); storageNode.parseNodeInformation(seedInfo); - storageNode.setOperationMode(StorageNode.OperationMode.INSTALLED); + storageNode.setOperationMode(StorageNode.OperationMode.NORMAL); entityManager.persist(storageNode); } } diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index c530530..d21be2f 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -47,7 +47,6 @@ import javax.persistence.TypedQuery; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory;
-import org.rhq.cassandra.schema.SchemaManager; import org.rhq.core.domain.alert.Alert; import org.rhq.core.domain.auth.Subject; import org.rhq.core.domain.authz.Permission; @@ -60,7 +59,6 @@ import org.rhq.core.domain.configuration.Configuration; import org.rhq.core.domain.configuration.PropertyList; import org.rhq.core.domain.configuration.PropertySimple; import org.rhq.core.domain.criteria.AlertCriteria; -import org.rhq.core.domain.criteria.ResourceGroupCriteria; import org.rhq.core.domain.criteria.ResourceOperationHistoryCriteria; import org.rhq.core.domain.criteria.StorageNodeCriteria; import org.rhq.core.domain.measurement.MeasurementAggregate; @@ -68,15 +66,11 @@ import org.rhq.core.domain.measurement.MeasurementUnits; import org.rhq.core.domain.measurement.composite.MeasurementDataNumericHighLowComposite; import org.rhq.core.domain.operation.OperationRequestStatus; import org.rhq.core.domain.operation.ResourceOperationHistory; -import org.rhq.core.domain.operation.bean.GroupOperationSchedule; import org.rhq.core.domain.operation.bean.ResourceOperationSchedule; import org.rhq.core.domain.resource.Resource; -import org.rhq.core.domain.resource.ResourceType; -import org.rhq.core.domain.resource.group.ResourceGroup; import org.rhq.core.domain.util.PageControl; import org.rhq.core.domain.util.PageList; import org.rhq.core.domain.util.PageOrdering; -import org.rhq.core.util.StringUtil; import org.rhq.enterprise.server.RHQConstants; import org.rhq.enterprise.server.alert.AlertManagerLocal; import org.rhq.enterprise.server.auth.SubjectManagerLocal; @@ -87,7 +81,6 @@ import org.rhq.enterprise.server.measurement.MeasurementDataManagerLocal; import org.rhq.enterprise.server.operation.OperationManagerLocal; import org.rhq.enterprise.server.resource.ResourceManagerLocal; import org.rhq.enterprise.server.resource.ResourceTypeManagerLocal; -import org.rhq.enterprise.server.resource.group.ResourceGroupManagerLocal; import org.rhq.enterprise.server.rest.reporting.MeasurementConverter; import org.rhq.enterprise.server.scheduler.SchedulerLocal; import org.rhq.enterprise.server.storage.StorageClusterSettings; @@ -106,14 +99,6 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
private final Log log = LogFactory.getLog(StorageNodeManagerBean.class);
- private static final String USERNAME_PROPERTY = "rhq.cassandra.username"; - private static final String PASSWORD_PROPERTY = "rhq.cassandra.password"; - private final static String MAINTENANCE_OPERATION = "addNodeMaintenance"; - private final static String MAINTENANCE_OPERATION_NOTE = "Topology change maintenance."; - private final static String RUN_REPAIR_PROPERTY = "runRepair"; - private final static String UPDATE_SEEDS_LIST = "updateSeedsList"; - private final static String SEEDS_LIST = "seedsList"; - private static final String RHQ_STORAGE_CQL_PORT_PROPERTY = "nativeTransportPort"; private static final String RHQ_STORAGE_GOSSIP_PORT_PROPERTY = "storagePort"; private static final String RHQ_STORAGE_JMX_PORT_PROPERTY = "jmxPort"; @@ -152,9 +137,6 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN private SubjectManagerLocal subjectManager;
@EJB - private ResourceGroupManagerLocal resourceGroupManager; - - @EJB private OperationManagerLocal operationManager;
@EJB @@ -193,7 +175,6 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN storageNode.setResource(resource); storageNode.setOperationMode(OperationMode.NORMAL); initClusterSettingsIfNecessary(pluginConfig); - addStorageNodeToGroup(resource); } else { storageNode = new StorageNode(); storageNode.setAddress(address); @@ -278,83 +259,6 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN }
@Override - public void createStorageNodeGroup() { - log.info("Creating resource group [" + STORAGE_NODE_GROUP_NAME + "]"); - - ResourceGroup group = new ResourceGroup(STORAGE_NODE_GROUP_NAME); - - ResourceType type = resourceTypeManager.getResourceTypeByNameAndPlugin(STORAGE_NODE_RESOURCE_TYPE_NAME, - STORAGE_NODE_PLUGIN_NAME); - group.setResourceType(type); - group.setRecursive(false); - - resourceGroupManager.createResourceGroup(subjectManager.getOverlord(), group); - - addExistingStorageNodesToGroup(); - } - - private void addExistingStorageNodesToGroup() { - log.info("Adding existing storage nodes to resource group [" + STORAGE_NODE_GROUP_NAME + "]"); - - for (StorageNode node : getStorageNodes()) { - if (node.getResource() != null) { - addStorageNodeToGroup(node.getResource()); - } - } - } - - private void addStorageNodeToGroup(Resource resource) { - if (log.isInfoEnabled()) { - log.info("Adding " + resource + " to resource group [" + STORAGE_NODE_GROUP_NAME + "]"); - } - - ResourceGroup group = getStorageNodeGroup(); - resourceGroupManager.addResourcesToGroup(subjectManager.getOverlord(), group.getId(), - new int[]{resource.getId()}); - } - - @Override - public boolean storageNodeGroupExists() { - Subject overlord = subjectManager.getOverlord(); - - ResourceGroupCriteria criteria = new ResourceGroupCriteria(); - criteria.addFilterResourceTypeName(STORAGE_NODE_RESOURCE_TYPE_NAME); - criteria.addFilterPluginName(STORAGE_NODE_PLUGIN_NAME); - criteria.addFilterName(STORAGE_NODE_GROUP_NAME); - - List<ResourceGroup> groups = resourceGroupManager.findResourceGroupsByCriteria(overlord, criteria); - - return !groups.isEmpty(); - } - - @Override - public void addToStorageNodeGroup(StorageNode storageNode) { - storageNode.setOperationMode(OperationMode.NORMAL); - entityManager.merge(storageNode); - addStorageNodeToGroup(storageNode.getResource()); - } - - @Override - public ResourceGroup getStorageNodeGroup() { - Subject overlord = subjectManager.getOverlord(); - - ResourceGroupCriteria criteria = new ResourceGroupCriteria(); - criteria.addFilterResourceTypeName(STORAGE_NODE_RESOURCE_TYPE_NAME); - criteria.addFilterPluginName(STORAGE_NODE_PLUGIN_NAME); - criteria.addFilterName(STORAGE_NODE_GROUP_NAME); - criteria.fetchExplicitResources(true); - - List<ResourceGroup> groups = resourceGroupManager.findResourceGroupsByCriteria(overlord, criteria); - - if (groups.isEmpty()) { - throw new IllegalStateException("Resource group [" + STORAGE_NODE_GROUP_NAME + "] does not exist. This " + - "group must exist in order for the server to manage storage nodes. Restart the server for the group " + - "to be recreated."); - } - return groups.get(0); - } - - @Override @RequiredPermission(Permission.MANAGE_SETTINGS) public StorageNodeLoadComposite getLoad(Subject subject, StorageNode node, long beginTime, long endTime) { int resourceId = getResourceIdFromStorageNode(node); @@ -585,25 +489,27 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
@Override public void runReadRepair() { - ResourceGroup storageNodeGroup = getStorageNodeGroup(); - - if (storageNodeGroup.getExplicitResources().size() < 2) { - log.info("Skipping read repair since this is a single-node cluster"); - return; - } - - log.info("Scheduling read repair maintenance for storage cluster"); - - GroupOperationSchedule schedule = new GroupOperationSchedule(); - schedule.setGroup(storageNodeGroup); - schedule.setHaltOnFailure(false); - schedule.setExecutionOrder(new ArrayList<Resource>(storageNodeGroup.getExplicitResources())); - schedule.setJobTrigger(JobTrigger.createNowTrigger()); - schedule.setSubject(subjectManager.getOverlord()); - schedule.setOperationName("readRepair"); - schedule.setDescription("Run scheduled read repair on storage node"); - - operationManager.scheduleGroupOperation(subjectManager.getOverlord(), schedule); + // TODO Re-implement using work flow similar to how we deploy new nodes + +// ResourceGroup storageNodeGroup = getStorageNodeGroup(); +// +// if (storageNodeGroup.getExplicitResources().size() < 2) { +// log.info("Skipping read repair since this is a single-node cluster"); +// return; +// } +// +// log.info("Scheduling read repair maintenance for storage cluster"); +// +// GroupOperationSchedule schedule = new GroupOperationSchedule(); +// schedule.setGroup(storageNodeGroup); +// schedule.setHaltOnFailure(false); +// schedule.setExecutionOrder(new ArrayList<Resource>(storageNodeGroup.getExplicitResources())); +// schedule.setJobTrigger(JobTrigger.createNowTrigger()); +// schedule.setSubject(subjectManager.getOverlord()); +// schedule.setOperationName("readRepair"); +// schedule.setDescription("Run scheduled read repair on storage node"); +// +// operationManager.scheduleGroupOperation(subjectManager.getOverlord(), schedule); }
@Override @@ -889,127 +795,4 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN return successResultFound; }
- @Override - public void prepareNewNodesForBootstrap() { - List<StorageNode> newStorageNodes = entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE) - .setParameter("operationMode", OperationMode.INSTALLED).getResultList(); - if (newStorageNodes.isEmpty()) { - throw new RuntimeException("Failed to find storage node to bootstrap into cluster."); - } - // Right now, without some user input, we can only reliably bootstrap one node at a - // time. To support bootstrapping multiple nodes concurrently, a mechanism will have - // to be put in place for the user to declare in advance the nodes that are coming - // online. Then we can wait until all declared nodes have been committed into - // inventory and announced to the cluster - StorageNode storageNode = newStorageNodes.get(0); - - if (log.isInfoEnabled()) { - log.info("Preparing to bootstrap " + storageNode + " into cluster..."); - } - - ResourceOperationSchedule schedule = new ResourceOperationSchedule(); - schedule.setResource(storageNode.getResource()); - schedule.setJobTrigger(JobTrigger.createNowTrigger()); - schedule.setSubject(subjectManager.getOverlord()); - schedule.setOperationName("prepareForBootstrap"); - - StorageClusterSettings clusterSettings = storageClusterSettingsManager.getClusterSettings(subjectManager - .getOverlord()); - Configuration parameters = new Configuration(); - parameters.put(new PropertySimple("cqlPort", clusterSettings.getCqlPort())); - parameters.put(new PropertySimple("gossipPort", clusterSettings.getGossipPort())); - parameters.put(createPropertyListOfAddresses("storageNodeIPAddresses", getClusteredStorageNodes())); - - schedule.setParameters(parameters); - - operationManager.scheduleResourceOperation(subjectManager.getOverlord(), schedule); - } - - @Override - public void runAddNodeMaintenance() { - log.info("Preparing to schedule addNodeMaintenance on the storage cluster..."); - - List<StorageNode> storageNodes = entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE, - StorageNode.class).setParameter("operationMode", OperationMode.NORMAL).getResultList(); - - // The previous cluster size will be the current size - 1 since we currently only - // support deploying one node at a time. - int previousClusterSize = storageNodes.size() - 1; - boolean isReadRepairNeeded; - - if (previousClusterSize >= 4) { - // At 4 nodes we increase the RF to 3. We are not increasing the RF beyond - // that for additional nodes; so, there is no need to run repair if we are - // expanding from a 4 node cluster since the RF remains the same. - isReadRepairNeeded = false; - } else if (previousClusterSize == 1) { - // The RF will increase since we are going from a single to a multi-node - // cluster; therefore, we want to run repair. - isReadRepairNeeded = true; - } else if (previousClusterSize == 2) { - if (storageNodes.size() > 3) { - // If we go from 2 to > 3 nodes we will increase the RF to 3; therefore - // we want to run repair. - isReadRepairNeeded = true; - } else { - // If we go from 2 to 3 nodes, we keep the RF at 2 so there is no need - // to run repair. - isReadRepairNeeded = false; - } - } else if (previousClusterSize == 3) { - // We are increasing the cluster size > 3 which means the RF will be - // updated to 3; therefore, we want to run repair. - isReadRepairNeeded = true; - } else { - // If we cluster size of zero, then something is really screwed up. It - // should always be > 0. - isReadRepairNeeded = storageNodes.size() > 1; - } - - if (isReadRepairNeeded) { - updateTopology(storageNodes); - } - - ResourceGroup storageNodeGroup = getStorageNodeGroup(); - - GroupOperationSchedule schedule = new GroupOperationSchedule(); - schedule.setGroup(storageNodeGroup); - schedule.setHaltOnFailure(false); - schedule.setExecutionOrder(new ArrayList<Resource>(storageNodeGroup.getExplicitResources())); - schedule.setJobTrigger(JobTrigger.createNowTrigger()); - schedule.setSubject(subjectManager.getOverlord()); - schedule.setOperationName(MAINTENANCE_OPERATION); - schedule.setDescription(MAINTENANCE_OPERATION_NOTE); - - Configuration config = new Configuration(); - config.put(createPropertyListOfAddresses(SEEDS_LIST, storageNodes)); - config.put(new PropertySimple(RUN_REPAIR_PROPERTY, isReadRepairNeeded)); - config.put(new PropertySimple(UPDATE_SEEDS_LIST, Boolean.TRUE)); - - schedule.setParameters(config); - - operationManager.scheduleGroupOperation(subjectManager.getOverlord(), schedule); - } - - private void updateTopology(List<StorageNode> storageNodes) { - String username = getRequiredStorageProperty(USERNAME_PROPERTY); - String password = getRequiredStorageProperty(PASSWORD_PROPERTY); - SchemaManager schemaManager = new SchemaManager(username, password, storageNodes); - try{ - schemaManager.updateTopology(); - } catch (Exception e) { - log.error("An error occurred while applying schema topology changes", e); - } - } - - private String getRequiredStorageProperty(String property) { - String value = System.getProperty(property); - if (StringUtil.isEmpty(property)) { - throw new IllegalStateException("The system property [" + property + "] is not set. The RHQ " - + "server will not be able connect to the RHQ storage node(s). This property should be defined " - + "in rhq-server.properties."); - } - return value; - } - } \ No newline at end of file diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java index 6f98de3..0c1b0ab 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java @@ -30,10 +30,9 @@ import org.rhq.core.domain.cloud.StorageNode; import org.rhq.core.domain.cloud.StorageNodeConfigurationComposite; import org.rhq.core.domain.cloud.StorageNodeLoadComposite; import org.rhq.core.domain.criteria.StorageNodeCriteria; -import org.rhq.core.domain.operation.bean.ResourceOperationSchedule; import org.rhq.core.domain.measurement.composite.MeasurementDataNumericHighLowComposite; +import org.rhq.core.domain.operation.bean.ResourceOperationSchedule; import org.rhq.core.domain.resource.Resource; -import org.rhq.core.domain.resource.group.ResourceGroup; import org.rhq.core.domain.util.PageList;
@Local @@ -162,39 +161,8 @@ public interface StorageNodeManagerLocal { */ void runReadRepair();
- /** - * Creates the storage node resource group which will be named {@link #STORAGE_NODE_GROUP_NAME}. This method should - * only be called at start up by {@link org.rhq.enterprise.server.storage.StorageClientManagerBean StorageClientManagerBean}. - * Storage node entities created during installation will be added to the group. - */ - void createStorageNodeGroup(); - - /** - * Checks whether or not the storage node resource group exists. This method is very similar to - * {@link #getStorageNodeGroup()} but may be called prior to the group being created. - * - * @return true if the storage node resource group exists, false otherwise. - */ - boolean storageNodeGroupExists(); - - void addToStorageNodeGroup(StorageNode storageNode); - - /** - * This method assumes the storage node resource group already exists; as such, it should only be called from places - * in the code that are after the point(s) where the group has been created. - * - * @return The storage node resource group. - * @throws IllegalStateException if the group is not found or does not exist. - */ - ResourceGroup getStorageNodeGroup(); - - void scheduleOperationInNewTransaction(Subject subject, ResourceOperationSchedule schedule);
- void prepareNewNodesForBootstrap(); - - void runAddNodeMaintenance(); - Map<String, List<MeasurementDataNumericHighLowComposite>> findStorageNodeLoadDataForLast(Subject subject, StorageNode node, long beginTime, long endTime, int numPoints);
boolean isAddNodeMaintenanceInProgress(); diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClientManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClientManagerBean.java index ce3a6bd..a4e0b24 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClientManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClientManagerBean.java @@ -83,11 +83,6 @@ public class StorageClientManagerBean {
log.info("Initializing storage client subsystem");
- boolean isNewServerInstall = !storageNodeManager.storageNodeGroupExists(); - if (isNewServerInstall) { - storageNodeManager.createStorageNodeGroup(); - } - String username = getRequiredStorageProperty(USERNAME_PROP); String password = getRequiredStorageProperty(PASSWORD_PROP);
@@ -107,7 +102,7 @@ public class StorageClientManagerBean {
metricsDAO = new MetricsDAO(session, metricsConfiguration);
- initMetricsServer(isNewServerInstall, ctime); + initMetricsServer(ctime);
initialized = true; log.info("Storage client subsystem is now initialized"); @@ -141,7 +136,7 @@ public class StorageClientManagerBean { }
public MetricsConfiguration getMetricsConfiguration() { - return this.metricsConfiguration; + return metricsConfiguration; }
public boolean isClusterAvailable() { @@ -180,7 +175,7 @@ public class StorageClientManagerBean { return cluster.connect(RHQ_KEYSPACE); }
- private void initMetricsServer(boolean isNewInstall, long serverInstallTime) { + private void initMetricsServer(long serverInstallTime) { if (log.isDebugEnabled()) { log.debug("Initializing " + MetricsServer.class.getName()); } @@ -191,7 +186,7 @@ public class StorageClientManagerBean { DateTimeService dateTimeService = new DateTimeService(); dateTimeService.setConfiguration(metricsConfiguration); metricsServer.setDateTimeService(dateTimeService); - metricsServer.init(isNewInstall, serverInstallTime); + metricsServer.init(serverInstallTime); }
private String getRequiredStorageProperty(String property) { diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsServer.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsServer.java index 9756006..ca17bf6 100644 --- a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsServer.java +++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsServer.java @@ -87,10 +87,8 @@ public class MetricsServer { this.dateTimeService = dateTimeService; }
- public void init(boolean isNewServerInstall, long serverInstallTime) { - if (!isNewServerInstall) { - determineMostRecentRawDataSinceLastShutdown(serverInstallTime); - } + public void init(long serverInstallTime) { + determineMostRecentRawDataSinceLastShutdown(serverInstallTime); }
private void determineMostRecentRawDataSinceLastShutdown(long serverInstallTime) { diff --git a/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsServerTest.java b/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsServerTest.java index b4de4e0..879f40c 100644 --- a/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsServerTest.java +++ b/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsServerTest.java @@ -355,7 +355,7 @@ public class MetricsServerTest extends CassandraIntegrationTest { // 2) re-initialize the metrics server // 3) insert some more raw data metricsServer.setCurrentHour(hour15); - metricsServer.init(false, hour0().plusHours(2).getMillis()); + metricsServer.init(hour0().plusHours(2).getMillis());
rawData = new HashSet<MeasurementDataNumeric>(); rawData.add(new MeasurementDataNumeric(hour14.plusMinutes(20).getMillis(), scheduleId, 3.0)); @@ -420,7 +420,7 @@ public class MetricsServerTest extends CassandraIntegrationTest { // 2) re-initialize the metrics server // 3) insert some more raw data metricsServer.setCurrentHour(hour9); - metricsServer.init(false, hour0().minusDays(1).plusHours(4).getMillis()); + metricsServer.init(hour0().minusDays(1).plusHours(4).getMillis());
rawData = new HashSet<MeasurementDataNumeric>(); rawData.add(new MeasurementDataNumeric(hour8.plusMinutes(20).getMillis(), scheduleId, 8.0));
commit 011d33b712b0c805341c693fbb71640301c438e7 Author: John Sanda jsanda@redhat.com Date: Thu Aug 8 22:07:16 2013 -0400
StorageNodeMaintenanceJob is no longer used
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/core/StartupBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/core/StartupBean.java index 646a9fd..3c19896 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/core/StartupBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/core/StartupBean.java @@ -88,7 +88,6 @@ import org.rhq.enterprise.server.scheduler.jobs.PurgePluginsJob; import org.rhq.enterprise.server.scheduler.jobs.PurgeResourceTypesJob; import org.rhq.enterprise.server.scheduler.jobs.SavedSearchResultCountRecalculationJob; import org.rhq.enterprise.server.scheduler.jobs.StorageClusterReadRepairJob; -import org.rhq.enterprise.server.scheduler.jobs.StorageNodeMaintenanceJob; import org.rhq.enterprise.server.storage.StorageClientManagerBean; import org.rhq.enterprise.server.system.SystemManagerLocal; import org.rhq.enterprise.server.util.LookupUtil; @@ -436,13 +435,6 @@ public class StartupBean implements StartupLocal { * Initalizes the storage client subsystem which is needed for reading/writing metric data. */ private void initStorageClient() { - try { - //add the cluster maintenance job to the list of available jobs. - schedulerBean.scheduleTriggeredJob(StorageNodeMaintenanceJob.class, false, null); - } catch (Exception e) { - log.error("Cannot create storage node maintenance job.", e); - } - storageClientManager.init(serverManager.getServer().getCtime()); serverManager.establishCurrentServerMode(); } diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/scheduler/jobs/StorageNodeMaintenanceJob.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/scheduler/jobs/StorageNodeMaintenanceJob.java deleted file mode 100644 index aa55cb4..0000000 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/scheduler/jobs/StorageNodeMaintenanceJob.java +++ /dev/null @@ -1,299 +0,0 @@ -/* - * RHQ Management Platform - * Copyright (C) 2005-2008 Red Hat, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ -package org.rhq.enterprise.server.scheduler.jobs; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.quartz.JobDataMap; -import org.quartz.JobExecutionContext; -import org.quartz.JobExecutionException; - -import org.rhq.cassandra.schema.SchemaManager; -import org.rhq.core.domain.cloud.StorageNode; -import org.rhq.core.domain.common.JobTrigger; -import org.rhq.core.domain.configuration.Configuration; -import org.rhq.core.domain.configuration.Property; -import org.rhq.core.domain.configuration.PropertyList; -import org.rhq.core.domain.configuration.PropertyMap; -import org.rhq.core.domain.configuration.PropertySimple; -import org.rhq.core.domain.criteria.ResourceCriteria; -import org.rhq.core.domain.operation.bean.GroupOperationSchedule; -import org.rhq.core.domain.resource.Resource; -import org.rhq.core.domain.resource.group.ResourceGroup; -import org.rhq.core.util.StringUtil; -import org.rhq.enterprise.server.auth.SubjectManagerLocal; -import org.rhq.enterprise.server.cloud.StorageNodeManagerLocal; -import org.rhq.enterprise.server.operation.OperationManagerLocal; -import org.rhq.enterprise.server.util.LookupUtil; - -/** - * Quartz scheduler job that runs cluster wide maintenance. This should be - * invoked if and only if a topology change was detected in the storage cluster. - * - * - * @author Stefan Negrea - */ -public class StorageNodeMaintenanceJob extends AbstractStatefulJob { - - private final Log log = LogFactory.getLog(StorageNodeMaintenanceJob.class); - - public static final String JOB_DATA_PROPERTY_CLUSTER_SIZE = "clusterSize"; - - public static final String JOB_DATA_PROPERTY_TOPOLOGY_CHANGED = "topologyChanged"; - - private final static int MAX_ITERATIONS = 5; - private final static int TIMEOUT = 10000; - private final static String STORAGE_SERVICE = "Storage Service"; - private final static String LOAD_MAP_PROPERTY = "LoadMap"; - private final static String ENDPOINT_PROPERTY = "endpoint"; - private final static String MAINTENANCE_OPERATION = "addNodeMaintenance"; - private final static String MAINTENANCE_OPERATION_NOTE = "Topology change maintenance."; - private final static String RUN_REPAIR_PROPERTY = "runRepair"; - private final static String UPDATE_SEEDS_LIST = "updateSeedsList"; - private final static String SEEDS_LIST = "seedsList"; - private static final String USERNAME_PROP = "rhq.cassandra.username"; - private static final String PASSWORD_PROP = "rhq.cassandra.password"; - - @Override - public void executeJobCode(JobExecutionContext context) throws JobExecutionException { - JobDataMap jobDataMap = context.getMergedJobDataMap(); - int clusterSize = Integer.parseInt(jobDataMap.getString(JOB_DATA_PROPERTY_CLUSTER_SIZE)); - - //1. Wait for resouces to be linked to node storage nodes - waitForResouceLinks(); - - //2. Drop any storage nodes not linked to resources from the list of available nodes - // (if storage nodes are not linked to resources that means they are not yet managed) - List<StorageNode> storageNodes = getOnlyResourceLinkedStorageNodes(); - - //3. Wait for the all storage nodes to be part of the same cluster - storageNodes = waitForClustering(storageNodes); - - boolean isReadRepairNeeded; - - if (clusterSize >= 4) { - // At 4 nodes we increase the RF to 3. We are not increasing the RF beyond - // that for additional nodes; so, there is no need to run repair if we are - // expanding from a 4 node cluster since the RF remains the same. - isReadRepairNeeded = false; - } else if (clusterSize == 1) { - // The RF will increase since we are going from a single to a multi-node - // cluster; therefore, we want to run repair. - isReadRepairNeeded = true; - } else if (clusterSize == 2) { - if (storageNodes.size() > 3) { - // If we go from 2 to > 3 nodes we will increase the RF to 3; therefore - // we want to run repair. - isReadRepairNeeded = true; - } else { - // If we go from 2 to 3 nodes, we keep the RF at 2 so there is no need - // to run repair. - isReadRepairNeeded = false; - } - } else if (clusterSize == 3) { - // We are increasing the cluster size > 3 which means the RF will be - // updated to 3; therefore, we want to run repair. - isReadRepairNeeded = true; - } else { - // If we cluster size of zero, then something is really screwed up. It - // should always be > 0. - log.error("The job data property [" + JOB_DATA_PROPERTY_CLUSTER_SIZE + "] should always be greater " + - "than zero. This may be a bug in the code that scheduled this job."); - isReadRepairNeeded = storageNodes.size() > 1; - } - - if (isReadRepairNeeded) { - updateTopology(storageNodes); - } - - //5. run maintenance on each node - List<String> seedList = new ArrayList<String>(); - for (StorageNode storageNode : storageNodes) { - seedList.add(storageNode.getAddress()); - } - - runNodeMaintenance(seedList, isReadRepairNeeded); - } - - private void updateTopology(List<StorageNode> storageNodes) throws JobExecutionException { - String username = getRequiredStorageProperty(USERNAME_PROP); - String password = getRequiredStorageProperty(PASSWORD_PROP); - SchemaManager schemaManager = new SchemaManager(username, password, storageNodes); - try{ - schemaManager.updateTopology(); - } catch (Exception e) { - log.error("An error occurred while applying schema topology changes", e); - } - } - - private List<StorageNode> waitForClustering(List<StorageNode> storageNodes) { - List<String> existingEndpoints = new ArrayList<String>(); - for (StorageNode storageNode : storageNodes) { - existingEndpoints.add(storageNode.getAddress()); - } - Collections.sort(existingEndpoints); - - int iteration = 0; - boolean allStorageNodesPartOfCluster = false; - while (iteration < MAX_ITERATIONS) { - for (StorageNode storageNode : storageNodes) { - Resource resource = storageNode.getResource(); - List<String> endpoints = new ArrayList<String>(); - - try { - ResourceCriteria c = new ResourceCriteria(); - c.addFilterParentResourceId(resource.getId()); - List<Resource> childResources = LookupUtil.getResourceManager().findResourcesByCriteria( - LookupUtil.getSubjectManager().getOverlord(), c); - - for (Resource childResource : childResources) { - if (STORAGE_SERVICE.equals(childResource.getName())) { - try { - PropertyList propertyList = LookupUtil - .getConfigurationManager() - .getLiveResourceConfiguration(LookupUtil.getSubjectManager().getOverlord(), - childResource.getId(), true).getList(LOAD_MAP_PROPERTY); - - List<Property> actualList = propertyList.getList(); - for (Property property : actualList) { - PropertyMap map = (PropertyMap) property; - endpoints.add(map.getSimpleValue(ENDPOINT_PROPERTY, null)); - } - } catch (Exception e) { - log.error("Error fetching live configuration for resource " + resource.getId()); - } - - break; - } - } - } catch (Exception e) { - log.error("An exception occurred while waiting for nodes to cluster", e); - } - - Collections.sort(endpoints); - - if (existingEndpoints.equals(endpoints)) { - allStorageNodesPartOfCluster = true; - break; - } - } - - if (allStorageNodesPartOfCluster == true) { - break; - } else { - try { - Thread.sleep(TIMEOUT); - } catch (InterruptedException e) { - log.error(e); - } - } - - iteration++; - } - - return storageNodes; - } - - private void runNodeMaintenance(List<String> seedList, boolean runRepair) { - OperationManagerLocal operationManager = LookupUtil.getOperationManager(); - StorageNodeManagerLocal storageNodeManager = LookupUtil.getStorageNodeManager(); - SubjectManagerLocal subjectManager = LookupUtil.getSubjectManager(); - - ResourceGroup storageNodeGroup = storageNodeManager.getStorageNodeGroup(); - - GroupOperationSchedule schedule = new GroupOperationSchedule(); - schedule.setGroup(storageNodeGroup); - schedule.setHaltOnFailure(false); - schedule.setExecutionOrder(new ArrayList<Resource>(storageNodeGroup.getExplicitResources())); - schedule.setJobTrigger(JobTrigger.createNowTrigger()); - schedule.setSubject(subjectManager.getOverlord()); - schedule.setOperationName(MAINTENANCE_OPERATION); - schedule.setDescription(MAINTENANCE_OPERATION_NOTE); - - List<Property> properties = new ArrayList<Property>(); - properties.add(new PropertySimple(RUN_REPAIR_PROPERTY, runRepair)); - properties.add(new PropertySimple(UPDATE_SEEDS_LIST, Boolean.TRUE)); - - PropertyList seedListProperty = new PropertyList(SEEDS_LIST); - for (String seed : seedList) { - seedListProperty.add(new PropertySimple("seed", seed)); - } - properties.add(seedListProperty); - - Configuration config = new Configuration(); - config.setProperties(properties); - - schedule.setParameters(config); - - operationManager.scheduleGroupOperation(subjectManager.getOverlord(), schedule); - } - - private List<StorageNode> getOnlyResourceLinkedStorageNodes() { - StorageNodeManagerLocal storageNodeManager = LookupUtil.getStorageNodeManager(); - List<StorageNode> resourceLinkedstorageNodes = new ArrayList<StorageNode>(); - for(StorageNode storageNode : storageNodeManager.getStorageNodes()){ - if (storageNode.getResource() != null) { - resourceLinkedstorageNodes.add(storageNode); - } - } - - return resourceLinkedstorageNodes; - } - - private void waitForResouceLinks() { - StorageNodeManagerLocal storageNodeManager = LookupUtil.getStorageNodeManager(); - - boolean allResourcesLinked = true; - int iteration = 0; - while (iteration < MAX_ITERATIONS) { - allResourcesLinked = true; - List<StorageNode> t = storageNodeManager.getStorageNodes(); - - for (StorageNode storageNode : t) { - if (storageNode.getResource() == null) { - allResourcesLinked = false; - } - } - if (allResourcesLinked) { - break; - } else { - try { - Thread.sleep(TIMEOUT); - } catch (InterruptedException e) { - log.error(e); - } - } - iteration++; - } - } - - private String getRequiredStorageProperty(String property) throws JobExecutionException { - String value = System.getProperty(property); - if (StringUtil.isEmpty(property)) { - throw new JobExecutionException("The system property [" + property + "] is not set. The RHQ " - + "server will not be able connect to the RHQ storage node(s). This property should be defined " - + "in rhq-server.properties."); - } - return value; - } -}
commit 08fb6ebcef259688a5887b86db8757225764bbdb Author: John Sanda jsanda@redhat.com Date: Thu Aug 8 19:53:16 2013 -0400
initial support for storage node deployment workflow without using the resource group
In addition to removing the dependency on the storage node resource group, this commit introduces new storage node operation modes to track the state of the work flow which will make it possible to resume or undo the process in the event of a failure.
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java index a1010b5..6a5cf6a 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java @@ -58,6 +58,8 @@ import org.rhq.core.domain.resource.Resource; + " WHERE s.address = :address"), @NamedQuery(name = StorageNode.QUERY_FIND_ALL_BY_MODE, query = "SELECT s FROM StorageNode s WHERE s.operationMode = :operationMode"), + @NamedQuery(name = StorageNode.QUERY_FIND_ALL_BY_MODE_EXCLUDING, query = + "SELECT s FROM StorageNode s WHERE s.operationMode = :operationMode AND s <> :storageNode"), @NamedQuery(name = StorageNode.QUERY_FIND_ALL_NOT_INSTALLED, query = "SELECT s FROM StorageNode s WHERE NOT s.operationMode = 'INSTALLED'"), @NamedQuery(name = StorageNode.QUERY_FIND_ALL_NORMAL, query = "SELECT s FROM StorageNode s WHERE s.operationMode = 'NORMAL'"), @NamedQuery(name = StorageNode.QUERY_DELETE_BY_ID, query = "" // @@ -82,8 +84,9 @@ import org.rhq.core.domain.resource.Resource; @NamedQuery(name = StorageNode.QUERY_UPDATE_REMOVE_LINKED_RESOURCES, query = "" // + " UPDATE StorageNode s " // + " SET s.resource = NULL " // - + " WHERE s.resource.id in (:resourceIds)") // - + + " WHERE s.resource.id in (:resourceIds)"), + @NamedQuery(name = StorageNode.QUERY_UPDATE_OPERATION_MODE, query = + "UPDATE StorageNode s SET s.operationMode = :newOperationMode WHERE s.operationMode = :oldOperationMode") }) @SequenceGenerator(allocationSize = org.rhq.core.domain.util.Constants.ALLOCATION_SIZE, name = "RHQ_STORAGE_NODE_ID_SEQ", sequenceName = "RHQ_STORAGE_NODE_ID_SEQ") @Table(name = "RHQ_STORAGE_NODE") @@ -94,12 +97,14 @@ public class StorageNode implements Serializable { public static final String QUERY_FIND_ALL = "StorageNode.findAll"; public static final String QUERY_FIND_BY_ADDRESS = "StorageNode.findByAddress"; public static final String QUERY_FIND_ALL_BY_MODE = "StorageNode.findAllByMode"; + public static final String QUERY_FIND_ALL_BY_MODE_EXCLUDING = "StorageNode.findAllByModeExcluding"; public static final String QUERY_FIND_ALL_NOT_INSTALLED = "StorageNode.findAllCloudMembers"; public static final String QUERY_DELETE_BY_ID = "StorageNode.deleteById"; public static final String QUERY_FIND_ALL_NORMAL = "StorageNode.findAllNormalCloudMembers"; public static final String QUERY_FIND_SCHEDULE_IDS_BY_PARENT_RESOURCE_ID_AND_MEASUREMENT_DEFINITION_NAMES = "StorageNode.findScheduleIdsByParentResourceIdAndMeasurementDefinitionNames"; public static final String QUERY_FIND_SCHEDULE_IDS_BY_GRANDPARENT_RESOURCE_ID_AND_MEASUREMENT_DEFINITION_NAMES = "StorageNode.findScheduleIdsByGrandparentResourceIdAndMeasurementDefinitionNames"; public static final String QUERY_UPDATE_REMOVE_LINKED_RESOURCES = "StorageNode.updateRemoveLinkedResources"; + public static final String QUERY_UPDATE_OPERATION_MODE = "StorageNode.updateOperationMode";
private static final String JMX_CONNECTION_STRING = "service:jmx:rmi:///jndi/rmi://%s:%s/jmxrmi";
@@ -206,7 +211,11 @@ public class StorageNode implements Serializable { DOWN("This storage node is down"), // INSTALLED("This storage node is newly installed but not yet operationial"), // MAINTENANCE("This storage node is in maintenance mode"), // - NORMAL("This storage node is running normally"); + NORMAL("This storage node is running normally"), + ANNOUNCE("The storage node is running normally and is being updated to have newly deployed storage nodes " + + "announced to it so that those new nodes can join the cluster."), + ADD_NODE_MAINTENANCE("The storage node is running and is preparing to undergo routine maintenance that is " + + "necessary when a new node joins the cluster.");
public final String message;
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index ba94bb7..c530530 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -92,6 +92,7 @@ import org.rhq.enterprise.server.rest.reporting.MeasurementConverter; import org.rhq.enterprise.server.scheduler.SchedulerLocal; import org.rhq.enterprise.server.storage.StorageClusterSettings; import org.rhq.enterprise.server.storage.StorageClusterSettingsManagerBean; +import org.rhq.enterprise.server.storage.StorageNodeOperationsHandlerLocal; import org.rhq.enterprise.server.util.CriteriaQueryGenerator; import org.rhq.enterprise.server.util.CriteriaQueryRunner; import org.rhq.enterprise.server.util.LookupUtil; @@ -171,6 +172,9 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN @EJB private StorageClusterSettingsManagerBean storageClusterSettingsManager;
+ @EJB + private StorageNodeOperationsHandlerLocal storageNodeOperationsHandler; + @Override public void linkResource(Resource resource) { Configuration pluginConfig = resource.getPluginConfiguration(); @@ -243,21 +247,12 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN log.info("Announcing " + newStorageNode + " to storage node cluster."); }
- ResourceGroup storageNodeGroup = getStorageNodeGroup(); - - GroupOperationSchedule schedule = new GroupOperationSchedule(); - schedule.setGroup(storageNodeGroup); - schedule.setHaltOnFailure(false); - schedule.setExecutionOrder(new ArrayList<Resource>(storageNodeGroup.getExplicitResources())); - schedule.setJobTrigger(JobTrigger.createNowTrigger()); - schedule.setSubject(subjectManager.getOverlord()); - schedule.setOperationName("updateKnownNodes"); - - Configuration parameters = new Configuration(); - parameters.put(createPropertyListOfAddresses("ipAddresses", combine(getClusteredStorageNodes(), newStorageNode))); - schedule.setParameters(parameters); - - operationManager.scheduleGroupOperation(subjectManager.getOverlord(), schedule); + List<StorageNode> clusteredNodes = getClusteredStorageNodes(); + for (StorageNode node : clusteredNodes) { + node.setOperationMode(OperationMode.ANNOUNCE); + } + PropertyList addresses = createPropertyListOfAddresses("addresses", combine(clusteredNodes, newStorageNode)); + storageNodeOperationsHandler.announceNewStorageNode(newStorageNode, clusteredNodes.get(0), addresses); }
private List<StorageNode> combine(List<StorageNode> storageNodes, StorageNode storageNode) { @@ -277,6 +272,12 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN }
@Override + public boolean isAddNodeMaintenanceInProgress() { + return !entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE) + .setParameter("operationMode", OperationMode.ADD_NODE_MAINTENANCE).getResultList().isEmpty(); + } + + @Override public void createStorageNodeGroup() { log.info("Creating resource group [" + STORAGE_NODE_GROUP_NAME + "]");
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java index 0c86152..6f98de3 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java @@ -196,4 +196,6 @@ public interface StorageNodeManagerLocal { void runAddNodeMaintenance();
Map<String, List<MeasurementDataNumericHighLowComposite>> findStorageNodeLoadDataForLast(Subject subject, StorageNode node, long beginTime, long endTime, int numPoints); + + boolean isAddNodeMaintenanceInProgress(); } diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/operation/OperationManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/operation/OperationManagerBean.java index 5d3b1ae..f5ceae6 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/operation/OperationManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/operation/OperationManagerBean.java @@ -96,7 +96,7 @@ import org.rhq.enterprise.server.resource.ResourceNotFoundException; import org.rhq.enterprise.server.resource.group.ResourceGroupManagerLocal; import org.rhq.enterprise.server.resource.group.ResourceGroupNotFoundException; import org.rhq.enterprise.server.scheduler.SchedulerLocal; -import org.rhq.enterprise.server.storage.StorageNodeOperationsHandler; +import org.rhq.enterprise.server.storage.StorageNodeOperationsHandlerLocal; import org.rhq.enterprise.server.util.CriteriaQueryGenerator; import org.rhq.enterprise.server.util.CriteriaQueryRunner;
@@ -125,7 +125,7 @@ public class OperationManagerBean implements OperationManagerLocal, OperationMan private SubjectManagerLocal subjectManager;
@EJB - private StorageNodeOperationsHandler storageNodeOperationsHandler; + private StorageNodeOperationsHandlerLocal storageNodeOperationsHandler;
@SuppressWarnings("unchecked") public List<IntegerOptionItem> getResourceNameOptionItems(int groupId) { @@ -1727,7 +1727,6 @@ public class OperationManagerBean implements OperationManagerLocal, OperationMan if (!stillInProgress) { groupHistory.setErrorMessage((groupErrorMessage == null) ? null : groupErrorMessage.toString()); groupHistory.setStatus(groupStatus); - storageNodeOperationsHandler.handleGroupOperationUpdateIfNecessary(groupHistory); notifyAlertConditionCacheManager("checkForCompletedGroupOperation", groupHistory); } } diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterMonitor.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterMonitor.java index 190bfe0..734da35 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterMonitor.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterMonitor.java @@ -1,13 +1,14 @@ package org.rhq.enterprise.server.storage;
import java.net.InetAddress; -import java.util.concurrent.atomic.AtomicBoolean;
import com.datastax.driver.core.exceptions.NoHostAvailableException;
import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory;
+import org.rhq.enterprise.server.cloud.StorageNodeManagerLocal; +import org.rhq.enterprise.server.util.LookupUtil; import org.rhq.server.metrics.StorageStateListener;
/** @@ -17,29 +18,24 @@ public class StorageClusterMonitor implements StorageStateListener {
private Log log = LogFactory.getLog(StorageClusterMonitor.class);
- private AtomicBoolean isClusterAvailable = new AtomicBoolean(false); + private boolean isClusterAvailable = true;
public boolean isClusterAvailable() { - return isClusterAvailable.get(); + return isClusterAvailable; }
@Override public void onStorageNodeUp(InetAddress address) { log.info("Storage node at " + address.getHostAddress() + " is up"); - isClusterAvailable.set(true); - - //TODO: Add these back at a later time - /*StorageNodeManagerLocal storageNodeManager = LookupUtil.getStorageNodeManager(); - StorageNode newClusterNode = storageNodeManager.findStorageNodeByAddress(address); - - if (newClusterNode == null) { - log.error("Did not find storage node with address [" + address.getHostAddress() + "]. This should not " + - "happen."); - } else { - log.info("Adding " + newClusterNode + " to storage cluster and scheduling cluster maintenance..."); - storageNodeManager.addToStorageNodeGroup(newClusterNode); - storageNodeManager.runAddNodeMaintenance(); - }*/ + + isClusterAvailable = true; + + StorageNodeManagerLocal storageNodeManager = LookupUtil.getStorageNodeManager(); + if (storageNodeManager.isAddNodeMaintenanceInProgress()) { + log.info("Scheduling cluster maintenance..."); + StorageNodeOperationsHandlerLocal storageOperationsHandler = LookupUtil.getStorageNodeOperationsHandler(); + storageOperationsHandler.performAddNodeMaintenance(address); + } }
@Override @@ -54,6 +50,6 @@ public class StorageClusterMonitor implements StorageStateListener {
@Override public void onStorageClusterDown(NoHostAvailableException e) { - isClusterAvailable.set(false); + isClusterAvailable = false; } } diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterSettingsManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterSettingsManagerBean.java index e6b1fb7..3e10a2f 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterSettingsManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterSettingsManagerBean.java @@ -24,16 +24,18 @@ public class StorageClusterSettingsManagerBean { Map<String, String> settingsMap = settings.toMap(); StorageClusterSettings clusterSettings = new StorageClusterSettings();
- if (!settingsMap.containsKey(SystemSetting.STORAGE_CQL_PORT)) { + if (!settingsMap.containsKey(SystemSetting.STORAGE_CQL_PORT.getInternalName())) { return null; } else { - clusterSettings.setCqlPort(Integer.parseInt(settingsMap.get(SystemSetting.STORAGE_CQL_PORT))); + clusterSettings.setCqlPort(Integer.parseInt(settingsMap.get( + SystemSetting.STORAGE_CQL_PORT.getInternalName()))); }
- if (!settingsMap.containsKey(SystemSetting.STORAGE_GOSSIP_PORT)) { + if (!settingsMap.containsKey(SystemSetting.STORAGE_GOSSIP_PORT.getInternalName())) { return null; } else { - clusterSettings.setGossipPort(Integer.parseInt(settingsMap.get(SystemSetting.STORAGE_GOSSIP_PORT))); + clusterSettings.setGossipPort(Integer.parseInt(settingsMap.get( + SystemSetting.STORAGE_GOSSIP_PORT.getInternalName()))); }
return clusterSettings; diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandler.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandler.java deleted file mode 100644 index 96e8de8..0000000 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandler.java +++ /dev/null @@ -1,17 +0,0 @@ -package org.rhq.enterprise.server.storage; - -import javax.ejb.Asynchronous; - -import org.rhq.core.domain.operation.GroupOperationHistory; -import org.rhq.core.domain.operation.OperationHistory; - -/** - * @author John Sanda - */ -public interface StorageNodeOperationsHandler { - @Asynchronous - void handleOperationUpdateIfNecessary(OperationHistory operationHistory); - - @Asynchronous - void handleGroupOperationUpdateIfNecessary(GroupOperationHistory operationHistory); -} diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java index 6da5cca..3e0eed8 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java @@ -1,5 +1,10 @@ package org.rhq.enterprise.server.storage;
+import java.net.InetAddress; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + import javax.ejb.Asynchronous; import javax.ejb.EJB; import javax.ejb.Stateless; @@ -9,47 +14,233 @@ import javax.persistence.PersistenceContext; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory;
+import org.rhq.core.domain.auth.Subject; import org.rhq.core.domain.cloud.StorageNode; -import org.rhq.core.domain.operation.GroupOperationHistory; +import org.rhq.core.domain.common.JobTrigger; +import org.rhq.core.domain.configuration.Configuration; +import org.rhq.core.domain.configuration.Property; +import org.rhq.core.domain.configuration.PropertyList; +import org.rhq.core.domain.configuration.PropertySimple; import org.rhq.core.domain.operation.OperationDefinition; import org.rhq.core.domain.operation.OperationHistory; -import org.rhq.core.domain.operation.OperationRequestStatus; +import org.rhq.core.domain.operation.ResourceOperationHistory; +import org.rhq.core.domain.operation.bean.ResourceOperationSchedule; import org.rhq.core.domain.resource.Resource; import org.rhq.core.domain.resource.ResourceType; +import org.rhq.core.util.StringUtil; import org.rhq.enterprise.server.RHQConstants; +import org.rhq.enterprise.server.auth.SubjectManagerLocal; import org.rhq.enterprise.server.cloud.StorageNodeManagerLocal; +import org.rhq.enterprise.server.operation.OperationManagerLocal; +import org.rhq.server.metrics.StorageSession;
/** * @author John Sanda */ @Stateless -public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHandler { +public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHandlerLocal {
private final Log log = LogFactory.getLog(StorageNodeOperationsHandlerBean.class);
private static final String STORAGE_NODE_TYPE_NAME = "RHQ Storage Node"; private static final String STORAGE_NODE_PLUGIN_NAME = "RHQStorage"; + private static final String USERNAME_PROPERTY = "rhq.cassandra.username"; + private static final String PASSWORD_PROPERTY = "rhq.cassandra.password"; + private final static String RUN_REPAIR_PROPERTY = "runRepair"; + private final static String UPDATE_SEEDS_LIST = "updateSeedsList"; + private final static String SEEDS_LIST = "seedsList";
@PersistenceContext(unitName = RHQConstants.PERSISTENCE_UNIT_NAME) private EntityManager entityManager;
@EJB + private SubjectManagerLocal subjectManager; + + @EJB private StorageNodeManagerLocal storageNodeManager;
+ @EJB + private OperationManagerLocal operationManager; + + @EJB + private StorageClusterSettingsManagerBean storageClusterSettingsManager; + + @EJB + private StorageClientManagerBean storageClientManager; + + @Override + public void announceNewStorageNode(StorageNode newStorageNode, StorageNode clusterNode, PropertyList addresses) { + if (log.isInfoEnabled()) { + log.info("Announcing new storage node " + newStorageNode + " to cluster node " + clusterNode); + } + Subject overlord = subjectManager.getOverlord(); + ResourceOperationSchedule schedule = new ResourceOperationSchedule(); + schedule.setResource(clusterNode.getResource()); + schedule.setJobTrigger(JobTrigger.createNowTrigger()); + schedule.setSubject(overlord); + schedule.setOperationName("updateKnownNodes"); + Configuration parameters = new Configuration(); + parameters.put(addresses); + schedule.setParameters(parameters); + + operationManager.scheduleResourceOperation(overlord, schedule); + } + + @Override + public void performAddNodeMaintenance(InetAddress storageNodeAddress) { + StorageNode storageNode = entityManager.createNamedQuery(StorageNode.QUERY_FIND_BY_ADDRESS, + StorageNode.class).setParameter("address", storageNodeAddress.getHostAddress()).getSingleResult(); + storageNode.setOperationMode(StorageNode.OperationMode.ADD_NODE_MAINTENANCE); + + List<StorageNode> clusterNodes = entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE, + StorageNode.class).setParameter("operationMode", StorageNode.OperationMode.ADD_NODE_MAINTENANCE) + .getResultList(); + + boolean runRepair = updateSchemaIfNecessary(clusterNodes); + + performAddNodeMaintenance(storageNode, runRepair, createPropertyListOfAddresses(SEEDS_LIST, clusterNodes)); + } + + private void performAddNodeMaintenance(StorageNode storageNode, boolean runRepair, PropertyList seedsList) { + if (log.isInfoEnabled()) { + log.info("Running addNodeMaintenance for storage node " + storageNode); + } + + Subject overlord = subjectManager.getOverlord(); + + ResourceOperationSchedule schedule = new ResourceOperationSchedule(); + schedule.setResource(storageNode.getResource()); + schedule.setJobTrigger(JobTrigger.createNowTrigger()); + schedule.setSubject(overlord); + schedule.setOperationName("addNodeMaintenance"); + + Configuration config = new Configuration(); + config.put(seedsList); + config.put(new PropertySimple(RUN_REPAIR_PROPERTY, runRepair)); + config.put(new PropertySimple(UPDATE_SEEDS_LIST, Boolean.TRUE)); + + schedule.setParameters(config); + + operationManager.scheduleResourceOperation(overlord, schedule); + } + @Override @Asynchronous public void handleOperationUpdateIfNecessary(OperationHistory operationHistory) { -// if (isStorageNodeOperation(operationHistory.getOperationDefinition())) { -// if (operationHistory.getOperationDefinition().getName().equals("prepareForBootstrap")) { -// ResourceOperationHistory resourceOperationHistory = entityManager.find(ResourceOperationHistory.class, -// operationHistory.getId()); -// if (resourceOperationHistory.getStatus() == OperationRequestStatus.SUCCESS) { -// -// } -// StorageNode storageNode = findStorageNode(resourceOperationHistory.getResource()); -// storageNode.setOperationMode(StorageNode.OperationMode.NORMAL); -// } -// } + if (!(operationHistory instanceof ResourceOperationHistory)) { + return; + } + + ResourceOperationHistory resourceOperationHistory = entityManager.find(ResourceOperationHistory.class, + operationHistory.getId()); + if (resourceOperationHistory == null) { + return; + } + + if (isStorageNodeOperation(resourceOperationHistory.getOperationDefinition())) { + if (resourceOperationHistory.getOperationDefinition().getName().equals("updateKnownNodes")) { + handleUpdateKnownNodes(resourceOperationHistory); + } else if (operationHistory.getOperationDefinition().getName().equals("prepareForBootstrap")) { + handlePrepareForBootstrap(resourceOperationHistory); + } else if (operationHistory.getOperationDefinition().getName().equals("addNodeMaintenance")) { + handleAddNodeMaintenance(resourceOperationHistory); + } + } + } + + private void handlePrepareForBootstrap(ResourceOperationHistory resourceOperationHistory) { + StorageNode newStorageNode = findStorageNode(resourceOperationHistory.getResource()); + switch (resourceOperationHistory.getStatus()) { + case INPROGRESS: + // nothing to do here + return; + case CANCELED: + log.error("The operation [prepareForBootstrap] was canceled for " + newStorageNode + + ". Deployment of the new storage node cannot proceed."); + // TODO update workflow status (the status needs to be accessible in the UI) + return; + case FAILURE: + log.error("The operation [preparedForBootstrap] failed for " + newStorageNode + ". The reported " + + "failure is: " + resourceOperationHistory.getErrorMessage()); + log.error("Deployment of the new storage node cannot proceed."); + // TODO update workflow status (the status needs to be accessible in the UI) + return; + default: // SUCCESS + // Nothing to do because we wait for the C* driver to notify us that the + // storage node has joined the cluster before we proceed with the work flow. + } + } + + private void handleUpdateKnownNodes(ResourceOperationHistory resourceOperationHistory) { + StorageNode storageNode = findStorageNode(resourceOperationHistory.getResource()); + switch (resourceOperationHistory.getStatus()) { + case INPROGRESS: + // nothing to do here + return; + case CANCELED: + log.error("The operation [updateKnownNodes] was canceled for " + storageNode + + ". Deployment of the new storage node cannot proceed."); + // TODO update workflow status (the status needs to be accessible in the UI) + return; + case FAILURE: + log.error("The operation [updateKnownNodes] failed for " + storageNode + ". The reported " + + "failure is: " + resourceOperationHistory.getErrorMessage()); + log.error("Deployment of the new storage node cannot proceed."); + // TODO update workflow status (the status needs to be accessible in the UI) + return; + default: // SUCCESS + if (log.isInfoEnabled()) { + log.info("Finished announcing cluster nodes to " + storageNode); + } + storageNode.setOperationMode(StorageNode.OperationMode.ADD_NODE_MAINTENANCE); + Configuration parameters = resourceOperationHistory.getParameters(); + PropertyList addresses = parameters.getList("addresses"); + StorageNode nextNode = takeFromQueue(storageNode, StorageNode.OperationMode.ANNOUNCE); + + if (nextNode == null) { + log.info("Successfully announced new storage node to cluster"); + StorageNode installedNode = findStorageNodeToPrepareForBootstrap(addresses); + // Pass a copy of addresses to avoid a TransientObjectException + prepareNodeForBootstrap(installedNode, addresses.deepCopy(false)); + } else { + announceNewStorageNode(storageNode, nextNode, addresses.deepCopy(false)); + } + } + } + + private void handleAddNodeMaintenance(ResourceOperationHistory resourceOperationHistory) { + StorageNode storageNode = findStorageNode(resourceOperationHistory.getResource()); + switch (resourceOperationHistory.getStatus()) { + case INPROGRESS: + // nothing to do here + return; + case CANCELED: + log.error("The operation [addNodeMaintenance] was canceled for " + storageNode + ". This operation " + + "needs to be run on each storage node when a new node is added to the cluster."); + // TODO update workflow status (the status needs to be accessible in the UI) + return; + case FAILURE: + log.error("The operation [addNodeMaintenance] failed for " + storageNode + ". This operation " + + "needs to be run on each storage node when a new node is added to the cluster. The reported " + + "failure is: " + resourceOperationHistory.getErrorMessage()); + // TODO update workflow status (the status needs to be accessible in the UI) + return; + default: // SUCCESS + if (log.isInfoEnabled()) { + log.info("Finnished cluster maintenance for " + storageNode + " for addition of new node"); + } + storageNode.setOperationMode(StorageNode.OperationMode.NORMAL); + StorageNode nextNode = takeFromQueue(storageNode, StorageNode.OperationMode.ADD_NODE_MAINTENANCE); + + if (nextNode == null) { + log.info("Finished running cluster maintenance for addition of new node"); + } else { + Configuration parameters = resourceOperationHistory.getParameters(); + boolean runRepair = parameters.getSimple(RUN_REPAIR_PROPERTY).getBooleanValue(); + PropertyList seedsList = parameters.getList(SEEDS_LIST).deepCopy(false); + performAddNodeMaintenance(nextNode, runRepair, seedsList); + } + } }
private StorageNode findStorageNode(Resource resource) { @@ -61,24 +252,63 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa return null; }
- @Override - @Asynchronous - public void handleGroupOperationUpdateIfNecessary(GroupOperationHistory groupOperationHistory) { - if (isStorageNodeOperation(groupOperationHistory.getOperationDefinition())) { - if (groupOperationHistory.getOperationDefinition().getName().equals("updateKnownNodes")) { - if (groupOperationHistory.getStatus() == OperationRequestStatus.SUCCESS) { - log.info("New storage has been successfully announced to the storage node cluster."); - storageNodeManager.prepareNewNodesForBootstrap(); - } else if (groupOperationHistory.getStatus() == OperationRequestStatus.FAILURE) { - log.warn("Failed to announce new storage node to the cluster. It cannot join the cluster until " + - "it has been announced to existing cluster nodes."); - } else if (groupOperationHistory.getStatus() == OperationRequestStatus.CANCELED) { - log.warn("New storage node has not been announced to the cluster. The group operation " + - groupOperationHistory.getOperationDefinition().getName() + " has been canceled. The new node " + - "cannot join the cluster until it has been announced to existing cluster nodes."); - } + private StorageNode findStorageNodeToPrepareForBootstrap(PropertyList addressList) { + // It is possible that we could have more that one INSTALLED node. We want to make + // sure we grab the one that was just announced to the cluster. + Set<String> addresses = toSet(addressList); + List<StorageNode> installedNodes = entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE, + StorageNode.class).setParameter("operationMode", StorageNode.OperationMode.INSTALLED).getResultList(); + + for (StorageNode installedNode : installedNodes) { + if (addresses.contains(installedNode.getAddress())) { + return installedNode; } } + // TODO What should we do in the very unlikely event that we do not find the IP address? + throw new IllegalStateException("Failed to find storage node to be bootstrapped."); + } + + private Set<String> toSet(PropertyList propertyList) { + Set<String> set = new HashSet<String>(); + for (Property property : propertyList.getList()) { + PropertySimple simple = (PropertySimple) property; + set.add(simple.getStringValue()); + } + return set; + } + + private void prepareNodeForBootstrap(StorageNode storageNode, PropertyList addresses) { + if (log.isInfoEnabled()) { + log.info("Preparing to bootstrap " + storageNode + " into cluster..."); + } + + ResourceOperationSchedule schedule = new ResourceOperationSchedule(); + schedule.setResource(storageNode.getResource()); + schedule.setJobTrigger(JobTrigger.createNowTrigger()); + schedule.setSubject(subjectManager.getOverlord()); + schedule.setOperationName("prepareForBootstrap"); + + StorageClusterSettings clusterSettings = storageClusterSettingsManager.getClusterSettings( + subjectManager.getOverlord()); + Configuration parameters = new Configuration(); + parameters.put(new PropertySimple("cqlPort", clusterSettings.getCqlPort())); + parameters.put(new PropertySimple("gossipPort", clusterSettings.getGossipPort())); + parameters.put(addresses); + + schedule.setParameters(parameters); + + operationManager.scheduleResourceOperation(subjectManager.getOverlord(), schedule); + } + + private StorageNode takeFromQueue(StorageNode lastTaken, StorageNode.OperationMode queue) { + List<StorageNode> nodes = entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE_EXCLUDING, + StorageNode.class).setParameter("operationMode", queue).setParameter("storageNode", lastTaken) + .getResultList(); + + if (nodes.isEmpty()) { + return null; + } + return nodes.get(0); }
private boolean isStorageNodeOperation(OperationDefinition operationDefinition) { @@ -87,4 +317,98 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa resourceType.getPlugin().equals(STORAGE_NODE_PLUGIN_NAME); }
+ private boolean updateSchemaIfNecessary(List<StorageNode> storageNodes) { + // The previous cluster size will be the current size - 1 since we currently only + // support deploying one node at a time. + int previousClusterSize = storageNodes.size() - 1; + boolean isRepairNeeded; + int replicationFactor = 1; + + if (previousClusterSize >= 4) { + // At 4 nodes we increase the RF to 3. We are not increasing the RF beyond + // that for additional nodes; so, there is no need to run repair if we are + // expanding from a 4 node cluster since the RF remains the same. + isRepairNeeded = false; + } else if (previousClusterSize == 1) { + // The RF will increase since we are going from a single to a multi-node + // cluster; therefore, we want to run repair. + isRepairNeeded = true; + replicationFactor = 2; + } else if (previousClusterSize == 2) { + if (storageNodes.size() > 3) { + // If we go from 2 to > 3 nodes we will increase the RF to 3; therefore + // we want to run repair. + isRepairNeeded = true; + replicationFactor = 3; + } else { + // If we go from 2 to 3 nodes, we keep the RF at 2 so there is no need + // to run repair. + isRepairNeeded = false; + } + } else if (previousClusterSize == 3) { + // We are increasing the cluster size > 3 which means the RF will be + // updated to 3; therefore, we want to run repair. + isRepairNeeded = true; + replicationFactor = 3; + } else { + // If we cluster size of zero, then something is really screwed up. It + // should always be > 0. + throw new RuntimeException("The previous cluster size should never be zero at this point"); + } + + if (isRepairNeeded) { +// String username = getRequiredStorageProperty(USERNAME_PROPERTY); +// String password = getRequiredStorageProperty(PASSWORD_PROPERTY); +// SchemaManager schemaManager = new SchemaManager(username, password, storageNodes); +// try{ +// schemaManager.updateTopology(); +// } catch (Exception e) { +// log.error("An error occurred while applying schema topology changes", e); +// } + + updateReplicationFactor(replicationFactor); + if (previousClusterSize == 1) { + updateGCGraceSeconds(691200); // 8 days + } + } + + return isRepairNeeded; + } + + private void updateReplicationFactor(int replicationFactor) { + StorageSession session = storageClientManager.getSession(); + session.execute("ALTER KEYSPACE rhq WITH replication = {'class': 'SimpleStrategy', 'replication_factor': " + + replicationFactor + "}"); + session.execute("ALTER KEYSPACE system_auth WITH replication = {'class': 'SimpleStrategy', " + + "'replication_factor': " + replicationFactor + "}"); + } + + private void updateGCGraceSeconds(int seconds) { + StorageSession session = storageClientManager.getSession(); + session.execute("ALTER COLUMNFAMILY rhq.metrics_index WITH gc_grace_seconds = " + seconds); + session.execute("ALTER COLUMNFAMILY rhq.raw_metrics WITH gc_grace_seconds = " + seconds); + session.execute("ALTER COLUMNFAMILY rhq.one_hour_metrics WITH gc_grace_seconds = " + seconds); + session.execute("ALTER COLUMNFAMILY rhq.six_hour_metrics WITH gc_grace_seconds = " + seconds); + session.execute("ALTER COLUMNFAMILY rhq.twenty_four_hour_metrics WITH gc_grace_seconds = " + seconds); + session.execute("ALTER COLUMNFAMILY rhq.schema_version WITH gc_grace_seconds = " + seconds); + } + + private String getRequiredStorageProperty(String property) { + String value = System.getProperty(property); + if (StringUtil.isEmpty(property)) { + throw new IllegalStateException("The system property [" + property + "] is not set. The RHQ " + + "server will not be able connect to the RHQ storage node(s). This property should be defined " + + "in rhq-server.properties."); + } + return value; + } + + private PropertyList createPropertyListOfAddresses(String propertyName, List<StorageNode> nodes) { + PropertyList list = new PropertyList(propertyName); + for (StorageNode storageNode : nodes) { + list.add(new PropertySimple("address", storageNode.getAddress())); + } + return list; + } + } diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerLocal.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerLocal.java new file mode 100644 index 0000000..fcdcd3e --- /dev/null +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerLocal.java @@ -0,0 +1,22 @@ +package org.rhq.enterprise.server.storage; + +import java.net.InetAddress; + +import javax.ejb.Asynchronous; + +import org.rhq.core.domain.cloud.StorageNode; +import org.rhq.core.domain.configuration.PropertyList; +import org.rhq.core.domain.operation.OperationHistory; + +/** + * @author John Sanda + */ +public interface StorageNodeOperationsHandlerLocal { + + @Asynchronous + void handleOperationUpdateIfNecessary(OperationHistory operationHistory); + + void announceNewStorageNode(StorageNode newStorageNode, StorageNode clusterNode, PropertyList addresses); + + void performAddNodeMaintenance(InetAddress storageNodeAddress); +} diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/util/LookupUtil.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/util/LookupUtil.java index d597363..5aded7d 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/util/LookupUtil.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/util/LookupUtil.java @@ -188,6 +188,8 @@ import org.rhq.enterprise.server.scheduler.SchedulerBean; import org.rhq.enterprise.server.scheduler.SchedulerLocal; import org.rhq.enterprise.server.search.SavedSearchManagerBean; import org.rhq.enterprise.server.search.SavedSearchManagerLocal; +import org.rhq.enterprise.server.storage.StorageNodeOperationsHandlerLocal; +import org.rhq.enterprise.server.storage.StorageNodeOperationsHandlerBean; import org.rhq.enterprise.server.subsystem.AlertSubsystemManagerBean; import org.rhq.enterprise.server.subsystem.AlertSubsystemManagerLocal; import org.rhq.enterprise.server.subsystem.ConfigurationSubsystemManagerBean; @@ -487,6 +489,10 @@ public final class LookupUtil { return lookupLocal(StorageNodeManagerBean.class); }
+ public static StorageNodeOperationsHandlerLocal getStorageNodeOperationsHandler() { + return lookupLocal(StorageNodeOperationsHandlerBean.class); + } + public static ClusterManagerLocal getClusterManager() { return lookupLocal(ClusterManagerBean.class); } diff --git a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java index 2cb9501..e76cfa0 100644 --- a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java +++ b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java @@ -240,7 +240,7 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper private OperationResult updateKnownNodes(Configuration params) { OperationResult result = new OperationResult();
- PropertyList propertyList = params.getList("ipAddresses"); + PropertyList propertyList = params.getList("addresses"); Set<String> ipAddresses = new HashSet<String>();
for (Property property : propertyList.getList()) { @@ -305,7 +305,7 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper String address = pluginConfig.getSimpleValue("host"); int cqlPort = Integer.parseInt(params.getSimpleValue("cqlPort")); int gossipPort = Integer.parseInt(params.getSimpleValue("gossipPort")); - List<String> addresses = getAddresses(params.getList("storageNodeIPAddresses")); + List<String> addresses = getAddresses(params.getList("addresses"));
// Make sure this node's address is not in the list; otherwise, it // won't bootstrap properly. diff --git a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml index 8df8ace..a2d04d0 100644 --- a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml +++ b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml @@ -99,8 +99,8 @@
<operation name="updateKnownNodes"> <parameters> - <c:list-property name="ipAddresses"> - <c:simple-property name="ipAddress"/> + <c:list-property name="addresses"> + <c:simple-property name="address"/> </c:list-property> </parameters> </operation> @@ -109,8 +109,8 @@ <parameters> <c:simple-property name="cqlPort" type="integer" displayName="CQL Port"/> <c:simple-property name="gossipPort" type="integer"/> - <c:list-property name="storageNodeIPAddresses" displayName="Storage Node IP Addresses"> - <c:simple-property name="storageNodeIPAddress"/> + <c:list-property name="addresses" displayName="Storage Node IP Addresses"> + <c:simple-property name="address"/> </c:list-property> </parameters> </operation>
commit 6fb63e91182132eb26701136ed56f2c1d077a076 Author: John Sanda jsanda@redhat.com Date: Thu Aug 8 19:41:15 2013 -0400
chema manager is not usable inside the running server
The schema manager classes loads XML files from the classpath. It expects the URL protocol to be either file or jar. When running inside the server, the protocol is VFS. I put a TODO in the code to update it and added an else block to throw an exception for any other protocols that are not supported. Previously UpdateFolder was basically failing silently and late which made debugging difficult.
diff --git a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/UpdateFolder.java b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/UpdateFolder.java index 152a757..84cb515 100644 --- a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/UpdateFolder.java +++ b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/UpdateFolder.java @@ -114,6 +114,15 @@ class UpdateFolder { files.add(new UpdateFile(entry)); } } + } else if (resourceFolderURL.getProtocol().equals("vfs")) { + // TODO need to add support for VFS if going to use inside EAP + throw new RuntimeException("The URL protocol [" + resourceFolderURL.getProtocol() + "] is not " + + "supported"); + } else { + // In the event we get another protocol that we do not recognize, throw an + // exception instead of failing silently. + throw new RuntimeException("The URL protocol [" + resourceFolderURL.getProtocol() + "] is not " + + "supported"); }
Collections.sort(files, new Comparator<UpdateFile>() {
commit 90eb9a6955114a062f36f78b19e3564d479c4fa8 Merge: fdd7821 1ad43fd Author: Mike Thompson mithomps@redhat.com Date: Thu Aug 8 15:15:07 2013 -0700
Merge branch 'mtho11/pre4.9'
commit fdd78210fe1675588c8e57bf7580f056a7cd581b Author: Mike Thompson mithomps@redhat.com Date: Thu Aug 8 10:47:22 2013 -0700
IE UI fixes
diff --git a/modules/enterprise/gui/coregui/src/main/webapp/CoreGUI.html b/modules/enterprise/gui/coregui/src/main/webapp/CoreGUI.html index 96e50de..32c5789 100644 --- a/modules/enterprise/gui/coregui/src/main/webapp/CoreGUI.html +++ b/modules/enterprise/gui/coregui/src/main/webapp/CoreGUI.html @@ -27,6 +27,29 @@ }
</script> + <script type="text/javascript"> + (function() { + var method; + var noop = function () {}; + var methods = [ + 'assert', 'clear', 'count', 'debug', 'dir', 'dirxml', 'error', + 'exception', 'group', 'groupCollapsed', 'groupEnd', 'info', 'log', + 'markTimeline', 'profile', 'profileEnd', 'table', 'time', 'timeEnd', + 'timeStamp', 'trace', 'warn' + ]; + var length = methods.length; + var console = (window.console = window.console || {}); + + while (length--) { + method = methods[length]; + + // Only stub undefined methods. + if (!console[method]) { + console[method] = noop; + } + } + }()); + </script>
<title>RHQ</title> <link rel="icon" type="image/png" href="/images/favicon.png" /> diff --git a/modules/enterprise/gui/coregui/src/main/webapp/js/jquery-1.9.1.js b/modules/enterprise/gui/coregui/src/main/webapp/js/jquery-1.9.1.js deleted file mode 100644 index bc8f36a..0000000 --- a/modules/enterprise/gui/coregui/src/main/webapp/js/jquery-1.9.1.js +++ /dev/null @@ -1,9597 +0,0 @@ -/*! - * jQuery JavaScript Library v1.9.1 - * http://jquery.com/ - * - * Includes Sizzle.js - * http://sizzlejs.com/ - * - * Copyright 2005, 2012 jQuery Foundation, Inc. and other contributors - * Released under the MIT license - * http://jquery.org/license - * - * Date: 2013-2-4 - */ -(function( window, undefined ) { - -// Can't do this because several apps including ASP.NET trace -// the stack via arguments.caller.callee and Firefox dies if -// you try to trace through "use strict" call chains. (#13335) -// Support: Firefox 18+ -//"use strict"; - var - // The deferred used on DOM ready - readyList, - - // A central reference to the root jQuery(document) - rootjQuery, - - // Support: IE<9 - // For `typeof node.method` instead of `node.method !== undefined` - core_strundefined = typeof undefined, - - // Use the correct document accordingly with window argument (sandbox) - document = window.document, - location = window.location, - - // Map over jQuery in case of overwrite - _jQuery = window.jQuery, - - // Map over the $ in case of overwrite - _$ = window.$, - - // [[Class]] -> type pairs - class2type = {}, - - // List of deleted data cache ids, so we can reuse them - core_deletedIds = [], - - core_version = "1.9.1", - - // Save a reference to some core methods - core_concat = core_deletedIds.concat, - core_push = core_deletedIds.push, - core_slice = core_deletedIds.slice, - core_indexOf = core_deletedIds.indexOf, - core_toString = class2type.toString, - core_hasOwn = class2type.hasOwnProperty, - core_trim = core_version.trim, - - // Define a local copy of jQuery - jQuery = function( selector, context ) { - // The jQuery object is actually just the init constructor 'enhanced' - return new jQuery.fn.init( selector, context, rootjQuery ); - }, - - // Used for matching numbers - core_pnum = /[+-]?(?:\d*.|)\d+(?:[eE][+-]?\d+|)/.source, - - // Used for splitting on whitespace - core_rnotwhite = /\S+/g, - - // Make sure we trim BOM and NBSP (here's looking at you, Safari 5.0 and IE) - rtrim = /^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g, - - // A simple way to check for HTML strings - // Prioritize #id over <tag> to avoid XSS via location.hash (#9521) - // Strict HTML recognition (#11290: must start with <) - rquickExpr = /^(?:(<[\w\W]+>)[^>]*|#([\w-]*))$/, - - // Match a standalone tag - rsingleTag = /^<(\w+)\s*/?>(?:</\1>|)$/, - - // JSON RegExp - rvalidchars = /^[],:{}\s]*$/, - rvalidbraces = /(?:^|:|,)(?:\s*[)+/g, - rvalidescape = /\(?:["\/bfnrt]|u[\da-fA-F]{4})/g, - rvalidtokens = /"[^"\\r\n]*"|true|false|null|-?(?:\d+.|)\d+(?:[eE][+-]?\d+|)/g, - - // Matches dashed string for camelizing - rmsPrefix = /^-ms-/, - rdashAlpha = /-([\da-z])/gi, - - // Used by jQuery.camelCase as callback to replace() - fcamelCase = function( all, letter ) { - return letter.toUpperCase(); - }, - - // The ready event handler - completed = function( event ) { - - // readyState === "complete" is good enough for us to call the dom ready in oldIE - if ( document.addEventListener || event.type === "load" || document.readyState === "complete" ) { - detach(); - jQuery.ready(); - } - }, - // Clean-up method for dom ready events - detach = function() { - if ( document.addEventListener ) { - document.removeEventListener( "DOMContentLoaded", completed, false ); - window.removeEventListener( "load", completed, false ); - - } else { - document.detachEvent( "onreadystatechange", completed ); - window.detachEvent( "onload", completed ); - } - }; - - jQuery.fn = jQuery.prototype = { - // The current version of jQuery being used - jquery: core_version, - - constructor: jQuery, - init: function( selector, context, rootjQuery ) { - var match, elem; - - // HANDLE: $(""), $(null), $(undefined), $(false) - if ( !selector ) { - return this; - } - - // Handle HTML strings - if ( typeof selector === "string" ) { - if ( selector.charAt(0) === "<" && selector.charAt( selector.length - 1 ) === ">" && selector.length >= 3 ) { - // Assume that strings that start and end with <> are HTML and skip the regex check - match = [ null, selector, null ]; - - } else { - match = rquickExpr.exec( selector ); - } - - // Match html or make sure no context is specified for #id - if ( match && (match[1] || !context) ) { - - // HANDLE: $(html) -> $(array) - if ( match[1] ) { - context = context instanceof jQuery ? context[0] : context; - - // scripts is true for back-compat - jQuery.merge( this, jQuery.parseHTML( - match[1], - context && context.nodeType ? context.ownerDocument || context : document, - true - ) ); - - // HANDLE: $(html, props) - if ( rsingleTag.test( match[1] ) && jQuery.isPlainObject( context ) ) { - for ( match in context ) { - // Properties of context are called as methods if possible - if ( jQuery.isFunction( this[ match ] ) ) { - this[ match ]( context[ match ] ); - - // ...and otherwise set as attributes - } else { - this.attr( match, context[ match ] ); - } - } - } - - return this; - - // HANDLE: $(#id) - } else { - elem = document.getElementById( match[2] ); - - // Check parentNode to catch when Blackberry 4.6 returns - // nodes that are no longer in the document #6963 - if ( elem && elem.parentNode ) { - // Handle the case where IE and Opera return items - // by name instead of ID - if ( elem.id !== match[2] ) { - return rootjQuery.find( selector ); - } - - // Otherwise, we inject the element directly into the jQuery object - this.length = 1; - this[0] = elem; - } - - this.context = document; - this.selector = selector; - return this; - } - - // HANDLE: $(expr, $(...)) - } else if ( !context || context.jquery ) { - return ( context || rootjQuery ).find( selector ); - - // HANDLE: $(expr, context) - // (which is just equivalent to: $(context).find(expr) - } else { - return this.constructor( context ).find( selector ); - } - - // HANDLE: $(DOMElement) - } else if ( selector.nodeType ) { - this.context = this[0] = selector; - this.length = 1; - return this; - - // HANDLE: $(function) - // Shortcut for document ready - } else if ( jQuery.isFunction( selector ) ) { - return rootjQuery.ready( selector ); - } - - if ( selector.selector !== undefined ) { - this.selector = selector.selector; - this.context = selector.context; - } - - return jQuery.makeArray( selector, this ); - }, - - // Start with an empty selector - selector: "", - - // The default length of a jQuery object is 0 - length: 0, - - // The number of elements contained in the matched element set - size: function() { - return this.length; - }, - - toArray: function() { - return core_slice.call( this ); - }, - - // Get the Nth element in the matched element set OR - // Get the whole matched element set as a clean array - get: function( num ) { - return num == null ? - - // Return a 'clean' array - this.toArray() : - - // Return just the object - ( num < 0 ? this[ this.length + num ] : this[ num ] ); - }, - - // Take an array of elements and push it onto the stack - // (returning the new matched element set) - pushStack: function( elems ) { - - // Build a new jQuery matched element set - var ret = jQuery.merge( this.constructor(), elems ); - - // Add the old object onto the stack (as a reference) - ret.prevObject = this; - ret.context = this.context; - - // Return the newly-formed element set - return ret; - }, - - // Execute a callback for every element in the matched set. - // (You can seed the arguments with an array of args, but this is - // only used internally.) - each: function( callback, args ) { - return jQuery.each( this, callback, args ); - }, - - ready: function( fn ) { - // Add the callback - jQuery.ready.promise().done( fn ); - - return this; - }, - - slice: function() { - return this.pushStack( core_slice.apply( this, arguments ) ); - }, - - first: function() { - return this.eq( 0 ); - }, - - last: function() { - return this.eq( -1 ); - }, - - eq: function( i ) { - var len = this.length, - j = +i + ( i < 0 ? len : 0 ); - return this.pushStack( j >= 0 && j < len ? [ this[j] ] : [] ); - }, - - map: function( callback ) { - return this.pushStack( jQuery.map(this, function( elem, i ) { - return callback.call( elem, i, elem ); - })); - }, - - end: function() { - return this.prevObject || this.constructor(null); - }, - - // For internal use only. - // Behaves like an Array's method, not like a jQuery method. - push: core_push, - sort: [].sort, - splice: [].splice - }; - -// Give the init function the jQuery prototype for later instantiation - jQuery.fn.init.prototype = jQuery.fn; - - jQuery.extend = jQuery.fn.extend = function() { - var src, copyIsArray, copy, name, options, clone, - target = arguments[0] || {}, - i = 1, - length = arguments.length, - deep = false; - - // Handle a deep copy situation - if ( typeof target === "boolean" ) { - deep = target; - target = arguments[1] || {}; - // skip the boolean and the target - i = 2; - } - - // Handle case when target is a string or something (possible in deep copy) - if ( typeof target !== "object" && !jQuery.isFunction(target) ) { - target = {}; - } - - // extend jQuery itself if only one argument is passed - if ( length === i ) { - target = this; - --i; - } - - for ( ; i < length; i++ ) { - // Only deal with non-null/undefined values - if ( (options = arguments[ i ]) != null ) { - // Extend the base object - for ( name in options ) { - src = target[ name ]; - copy = options[ name ]; - - // Prevent never-ending loop - if ( target === copy ) { - continue; - } - - // Recurse if we're merging plain objects or arrays - if ( deep && copy && ( jQuery.isPlainObject(copy) || (copyIsArray = jQuery.isArray(copy)) ) ) { - if ( copyIsArray ) { - copyIsArray = false; - clone = src && jQuery.isArray(src) ? src : []; - - } else { - clone = src && jQuery.isPlainObject(src) ? src : {}; - } - - // Never move original objects, clone them - target[ name ] = jQuery.extend( deep, clone, copy ); - - // Don't bring in undefined values - } else if ( copy !== undefined ) { - target[ name ] = copy; - } - } - } - } - - // Return the modified object - return target; - }; - - jQuery.extend({ - noConflict: function( deep ) { - if ( window.$ === jQuery ) { - window.$ = _$; - } - - if ( deep && window.jQuery === jQuery ) { - window.jQuery = _jQuery; - } - - return jQuery; - }, - - // Is the DOM ready to be used? Set to true once it occurs. - isReady: false, - - // A counter to track how many items to wait for before - // the ready event fires. See #6781 - readyWait: 1, - - // Hold (or release) the ready event - holdReady: function( hold ) { - if ( hold ) { - jQuery.readyWait++; - } else { - jQuery.ready( true ); - } - }, - - // Handle when the DOM is ready - ready: function( wait ) { - - // Abort if there are pending holds or we're already ready - if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { - return; - } - - // Make sure body exists, at least, in case IE gets a little overzealous (ticket #5443). - if ( !document.body ) { - return setTimeout( jQuery.ready ); - } - - // Remember that the DOM is ready - jQuery.isReady = true; - - // If a normal DOM Ready event fired, decrement, and wait if need be - if ( wait !== true && --jQuery.readyWait > 0 ) { - return; - } - - // If there are functions bound, to execute - readyList.resolveWith( document, [ jQuery ] ); - - // Trigger any bound ready events - if ( jQuery.fn.trigger ) { - jQuery( document ).trigger("ready").off("ready"); - } - }, - - // See test/unit/core.js for details concerning isFunction. - // Since version 1.3, DOM methods and functions like alert - // aren't supported. They return false on IE (#2968). - isFunction: function( obj ) { - return jQuery.type(obj) === "function"; - }, - - isArray: Array.isArray || function( obj ) { - return jQuery.type(obj) === "array"; - }, - - isWindow: function( obj ) { - return obj != null && obj == obj.window; - }, - - isNumeric: function( obj ) { - return !isNaN( parseFloat(obj) ) && isFinite( obj ); - }, - - type: function( obj ) { - if ( obj == null ) { - return String( obj ); - } - return typeof obj === "object" || typeof obj === "function" ? - class2type[ core_toString.call(obj) ] || "object" : - typeof obj; - }, - - isPlainObject: function( obj ) { - // Must be an Object. - // Because of IE, we also have to check the presence of the constructor property. - // Make sure that DOM nodes and window objects don't pass through, as well - if ( !obj || jQuery.type(obj) !== "object" || obj.nodeType || jQuery.isWindow( obj ) ) { - return false; - } - - try { - // Not own constructor property must be Object - if ( obj.constructor && - !core_hasOwn.call(obj, "constructor") && - !core_hasOwn.call(obj.constructor.prototype, "isPrototypeOf") ) { - return false; - } - } catch ( e ) { - // IE8,9 Will throw exceptions on certain host objects #9897 - return false; - } - - // Own properties are enumerated firstly, so to speed up, - // if last one is own, then all properties are own. - - var key; - for ( key in obj ) {} - - return key === undefined || core_hasOwn.call( obj, key ); - }, - - isEmptyObject: function( obj ) { - var name; - for ( name in obj ) { - return false; - } - return true; - }, - - error: function( msg ) { - throw new Error( msg ); - }, - - // data: string of html - // context (optional): If specified, the fragment will be created in this context, defaults to document - // keepScripts (optional): If true, will include scripts passed in the html string - parseHTML: function( data, context, keepScripts ) { - if ( !data || typeof data !== "string" ) { - return null; - } - if ( typeof context === "boolean" ) { - keepScripts = context; - context = false; - } - context = context || document; - - var parsed = rsingleTag.exec( data ), - scripts = !keepScripts && []; - - // Single tag - if ( parsed ) { - return [ context.createElement( parsed[1] ) ]; - } - - parsed = jQuery.buildFragment( [ data ], context, scripts ); - if ( scripts ) { - jQuery( scripts ).remove(); - } - return jQuery.merge( [], parsed.childNodes ); - }, - - parseJSON: function( data ) { - // Attempt to parse using the native JSON parser first - if ( window.JSON && window.JSON.parse ) { - return window.JSON.parse( data ); - } - - if ( data === null ) { - return data; - } - - if ( typeof data === "string" ) { - - // Make sure leading/trailing whitespace is removed (IE can't handle it) - data = jQuery.trim( data ); - - if ( data ) { - // Make sure the incoming data is actual JSON - // Logic borrowed from http://json.org/json2.js - if ( rvalidchars.test( data.replace( rvalidescape, "@" ) - .replace( rvalidtokens, "]" ) - .replace( rvalidbraces, "")) ) { - - return ( new Function( "return " + data ) )(); - } - } - } - - jQuery.error( "Invalid JSON: " + data ); - }, - - // Cross-browser xml parsing - parseXML: function( data ) { - var xml, tmp; - if ( !data || typeof data !== "string" ) { - return null; - } - try { - if ( window.DOMParser ) { // Standard - tmp = new DOMParser(); - xml = tmp.parseFromString( data , "text/xml" ); - } else { // IE - xml = new ActiveXObject( "Microsoft.XMLDOM" ); - xml.async = "false"; - xml.loadXML( data ); - } - } catch( e ) { - xml = undefined; - } - if ( !xml || !xml.documentElement || xml.getElementsByTagName( "parsererror" ).length ) { - jQuery.error( "Invalid XML: " + data ); - } - return xml; - }, - - noop: function() {}, - - // Evaluates a script in a global context - // Workarounds based on findings by Jim Driscoll - // http://weblogs.java.net/blog/driscoll/archive/2009/09/08/eval-javascript-glo... - globalEval: function( data ) { - if ( data && jQuery.trim( data ) ) { - // We use execScript on Internet Explorer - // We use an anonymous function so that context is window - // rather than jQuery in Firefox - ( window.execScript || function( data ) { - window[ "eval" ].call( window, data ); - } )( data ); - } - }, - - // Convert dashed to camelCase; used by the css and data modules - // Microsoft forgot to hump their vendor prefix (#9572) - camelCase: function( string ) { - return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); - }, - - nodeName: function( elem, name ) { - return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); - }, - - // args is for internal usage only - each: function( obj, callback, args ) { - var value, - i = 0, - length = obj.length, - isArray = isArraylike( obj ); - - if ( args ) { - if ( isArray ) { - for ( ; i < length; i++ ) { - value = callback.apply( obj[ i ], args ); - - if ( value === false ) { - break; - } - } - } else { - for ( i in obj ) { - value = callback.apply( obj[ i ], args ); - - if ( value === false ) { - break; - } - } - } - - // A special, fast, case for the most common use of each - } else { - if ( isArray ) { - for ( ; i < length; i++ ) { - value = callback.call( obj[ i ], i, obj[ i ] ); - - if ( value === false ) { - break; - } - } - } else { - for ( i in obj ) { - value = callback.call( obj[ i ], i, obj[ i ] ); - - if ( value === false ) { - break; - } - } - } - } - - return obj; - }, - - // Use native String.trim function wherever possible - trim: core_trim && !core_trim.call("\uFEFF\xA0") ? - function( text ) { - return text == null ? - "" : - core_trim.call( text ); - } : - - // Otherwise use our own trimming functionality - function( text ) { - return text == null ? - "" : - ( text + "" ).replace( rtrim, "" ); - }, - - // results is for internal usage only - makeArray: function( arr, results ) { - var ret = results || []; - - if ( arr != null ) { - if ( isArraylike( Object(arr) ) ) { - jQuery.merge( ret, - typeof arr === "string" ? - [ arr ] : arr - ); - } else { - core_push.call( ret, arr ); - } - } - - return ret; - }, - - inArray: function( elem, arr, i ) { - var len; - - if ( arr ) { - if ( core_indexOf ) { - return core_indexOf.call( arr, elem, i ); - } - - len = arr.length; - i = i ? i < 0 ? Math.max( 0, len + i ) : i : 0; - - for ( ; i < len; i++ ) { - // Skip accessing in sparse arrays - if ( i in arr && arr[ i ] === elem ) { - return i; - } - } - } - - return -1; - }, - - merge: function( first, second ) { - var l = second.length, - i = first.length, - j = 0; - - if ( typeof l === "number" ) { - for ( ; j < l; j++ ) { - first[ i++ ] = second[ j ]; - } - } else { - while ( second[j] !== undefined ) { - first[ i++ ] = second[ j++ ]; - } - } - - first.length = i; - - return first; - }, - - grep: function( elems, callback, inv ) { - var retVal, - ret = [], - i = 0, - length = elems.length; - inv = !!inv; - - // Go through the array, only saving the items - // that pass the validator function - for ( ; i < length; i++ ) { - retVal = !!callback( elems[ i ], i ); - if ( inv !== retVal ) { - ret.push( elems[ i ] ); - } - } - - return ret; - }, - - // arg is for internal usage only - map: function( elems, callback, arg ) { - var value, - i = 0, - length = elems.length, - isArray = isArraylike( elems ), - ret = []; - - // Go through the array, translating each of the items to their - if ( isArray ) { - for ( ; i < length; i++ ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret[ ret.length ] = value; - } - } - - // Go through every key on the object, - } else { - for ( i in elems ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret[ ret.length ] = value; - } - } - } - - // Flatten any nested arrays - return core_concat.apply( [], ret ); - }, - - // A global GUID counter for objects - guid: 1, - - // Bind a function to a context, optionally partially applying any - // arguments. - proxy: function( fn, context ) { - var args, proxy, tmp; - - if ( typeof context === "string" ) { - tmp = fn[ context ]; - context = fn; - fn = tmp; - } - - // Quick check to determine if target is callable, in the spec - // this throws a TypeError, but we will just return undefined. - if ( !jQuery.isFunction( fn ) ) { - return undefined; - } - - // Simulated bind - args = core_slice.call( arguments, 2 ); - proxy = function() { - return fn.apply( context || this, args.concat( core_slice.call( arguments ) ) ); - }; - - // Set the guid of unique handler to the same of original handler, so it can be removed - proxy.guid = fn.guid = fn.guid || jQuery.guid++; - - return proxy; - }, - - // Multifunctional method to get and set values of a collection - // The value/s can optionally be executed if it's a function - access: function( elems, fn, key, value, chainable, emptyGet, raw ) { - var i = 0, - length = elems.length, - bulk = key == null; - - // Sets many values - if ( jQuery.type( key ) === "object" ) { - chainable = true; - for ( i in key ) { - jQuery.access( elems, fn, i, key[i], true, emptyGet, raw ); - } - - // Sets one value - } else if ( value !== undefined ) { - chainable = true; - - if ( !jQuery.isFunction( value ) ) { - raw = true; - } - - if ( bulk ) { - // Bulk operations run against the entire set - if ( raw ) { - fn.call( elems, value ); - fn = null; - - // ...except when executing function values - } else { - bulk = fn; - fn = function( elem, key, value ) { - return bulk.call( jQuery( elem ), value ); - }; - } - } - - if ( fn ) { - for ( ; i < length; i++ ) { - fn( elems[i], key, raw ? value : value.call( elems[i], i, fn( elems[i], key ) ) ); - } - } - } - - return chainable ? - elems : - - // Gets - bulk ? - fn.call( elems ) : - length ? fn( elems[0], key ) : emptyGet; - }, - - now: function() { - return ( new Date() ).getTime(); - } - }); - - jQuery.ready.promise = function( obj ) { - if ( !readyList ) { - - readyList = jQuery.Deferred(); - - // Catch cases where $(document).ready() is called after the browser event has already occurred. - // we once tried to use readyState "interactive" here, but it caused issues like the one - // discovered by ChrisS here: http://bugs.jquery.com/ticket/12282#comment:15 - if ( document.readyState === "complete" ) { - // Handle it asynchronously to allow scripts the opportunity to delay ready - setTimeout( jQuery.ready ); - - // Standards-based browsers support DOMContentLoaded - } else if ( document.addEventListener ) { - // Use the handy event callback - document.addEventListener( "DOMContentLoaded", completed, false ); - - // A fallback to window.onload, that will always work - window.addEventListener( "load", completed, false ); - - // If IE event model is used - } else { - // Ensure firing before onload, maybe late but safe also for iframes - document.attachEvent( "onreadystatechange", completed ); - - // A fallback to window.onload, that will always work - window.attachEvent( "onload", completed ); - - // If IE and not a frame - // continually check to see if the document is ready - var top = false; - - try { - top = window.frameElement == null && document.documentElement; - } catch(e) {} - - if ( top && top.doScroll ) { - (function doScrollCheck() { - if ( !jQuery.isReady ) { - - try { - // Use the trick by Diego Perini - // http://javascript.nwbox.com/IEContentLoaded/ - top.doScroll("left"); - } catch(e) { - return setTimeout( doScrollCheck, 50 ); - } - - // detach all dom ready events - detach(); - - // and execute any waiting functions - jQuery.ready(); - } - })(); - } - } - } - return readyList.promise( obj ); - }; - -// Populate the class2type map - jQuery.each("Boolean Number String Function Array Date RegExp Object Error".split(" "), function(i, name) { - class2type[ "[object " + name + "]" ] = name.toLowerCase(); - }); - - function isArraylike( obj ) { - var length = obj.length, - type = jQuery.type( obj ); - - if ( jQuery.isWindow( obj ) ) { - return false; - } - - if ( obj.nodeType === 1 && length ) { - return true; - } - - return type === "array" || type !== "function" && - ( length === 0 || - typeof length === "number" && length > 0 && ( length - 1 ) in obj ); - } - -// All jQuery objects should point back to these - rootjQuery = jQuery(document); -// String to Object options format cache - var optionsCache = {}; - -// Convert String-formatted options into Object-formatted ones and store in cache - function createOptions( options ) { - var object = optionsCache[ options ] = {}; - jQuery.each( options.match( core_rnotwhite ) || [], function( _, flag ) { - object[ flag ] = true; - }); - return object; - } - - /* - * Create a callback list using the following parameters: - * - * options: an optional list of space-separated options that will change how - * the callback list behaves or a more traditional option object - * - * By default a callback list will act like an event callback list and can be - * "fired" multiple times. - * - * Possible options: - * - * once: will ensure the callback list can only be fired once (like a Deferred) - * - * memory: will keep track of previous values and will call any callback added - * after the list has been fired right away with the latest "memorized" - * values (like a Deferred) - * - * unique: will ensure a callback can only be added once (no duplicate in the list) - * - * stopOnFalse: interrupt callings when a callback returns false - * - */ - jQuery.Callbacks = function( options ) { - - // Convert options from String-formatted to Object-formatted if needed - // (we check in cache first) - options = typeof options === "string" ? - ( optionsCache[ options ] || createOptions( options ) ) : - jQuery.extend( {}, options ); - - var // Flag to know if list is currently firing - firing, - // Last fire value (for non-forgettable lists) - memory, - // Flag to know if list was already fired - fired, - // End of the loop when firing - firingLength, - // Index of currently firing callback (modified by remove if needed) - firingIndex, - // First callback to fire (used internally by add and fireWith) - firingStart, - // Actual callback list - list = [], - // Stack of fire calls for repeatable lists - stack = !options.once && [], - // Fire callbacks - fire = function( data ) { - memory = options.memory && data; - fired = true; - firingIndex = firingStart || 0; - firingStart = 0; - firingLength = list.length; - firing = true; - for ( ; list && firingIndex < firingLength; firingIndex++ ) { - if ( list[ firingIndex ].apply( data[ 0 ], data[ 1 ] ) === false && options.stopOnFalse ) { - memory = false; // To prevent further calls using add - break; - } - } - firing = false; - if ( list ) { - if ( stack ) { - if ( stack.length ) { - fire( stack.shift() ); - } - } else if ( memory ) { - list = []; - } else { - self.disable(); - } - } - }, - // Actual Callbacks object - self = { - // Add a callback or a collection of callbacks to the list - add: function() { - if ( list ) { - // First, we save the current length - var start = list.length; - (function add( args ) { - jQuery.each( args, function( _, arg ) { - var type = jQuery.type( arg ); - if ( type === "function" ) { - if ( !options.unique || !self.has( arg ) ) { - list.push( arg ); - } - } else if ( arg && arg.length && type !== "string" ) { - // Inspect recursively - add( arg ); - } - }); - })( arguments ); - // Do we need to add the callbacks to the - // current firing batch? - if ( firing ) { - firingLength = list.length; - // With memory, if we're not firing then - // we should call right away - } else if ( memory ) { - firingStart = start; - fire( memory ); - } - } - return this; - }, - // Remove a callback from the list - remove: function() { - if ( list ) { - jQuery.each( arguments, function( _, arg ) { - var index; - while( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { - list.splice( index, 1 ); - // Handle firing indexes - if ( firing ) { - if ( index <= firingLength ) { - firingLength--; - } - if ( index <= firingIndex ) { - firingIndex--; - } - } - } - }); - } - return this; - }, - // Check if a given callback is in the list. - // If no argument is given, return whether or not list has callbacks attached. - has: function( fn ) { - return fn ? jQuery.inArray( fn, list ) > -1 : !!( list && list.length ); - }, - // Remove all callbacks from the list - empty: function() { - list = []; - return this; - }, - // Have the list do nothing anymore - disable: function() { - list = stack = memory = undefined; - return this; - }, - // Is it disabled? - disabled: function() { - return !list; - }, - // Lock the list in its current state - lock: function() { - stack = undefined; - if ( !memory ) { - self.disable(); - } - return this; - }, - // Is it locked? - locked: function() { - return !stack; - }, - // Call all callbacks with the given context and arguments - fireWith: function( context, args ) { - args = args || []; - args = [ context, args.slice ? args.slice() : args ]; - if ( list && ( !fired || stack ) ) { - if ( firing ) { - stack.push( args ); - } else { - fire( args ); - } - } - return this; - }, - // Call all the callbacks with the given arguments - fire: function() { - self.fireWith( this, arguments ); - return this; - }, - // To know if the callbacks have already been called at least once - fired: function() { - return !!fired; - } - }; - - return self; - }; - jQuery.extend({ - - Deferred: function( func ) { - var tuples = [ - // action, add listener, listener list, final state - [ "resolve", "done", jQuery.Callbacks("once memory"), "resolved" ], - [ "reject", "fail", jQuery.Callbacks("once memory"), "rejected" ], - [ "notify", "progress", jQuery.Callbacks("memory") ] - ], - state = "pending", - promise = { - state: function() { - return state; - }, - always: function() { - deferred.done( arguments ).fail( arguments ); - return this; - }, - then: function( /* fnDone, fnFail, fnProgress */ ) { - var fns = arguments; - return jQuery.Deferred(function( newDefer ) { - jQuery.each( tuples, function( i, tuple ) { - var action = tuple[ 0 ], - fn = jQuery.isFunction( fns[ i ] ) && fns[ i ]; - // deferred[ done | fail | progress ] for forwarding actions to newDefer - deferred[ tuple[1] ](function() { - var returned = fn && fn.apply( this, arguments ); - if ( returned && jQuery.isFunction( returned.promise ) ) { - returned.promise() - .done( newDefer.resolve ) - .fail( newDefer.reject ) - .progress( newDefer.notify ); - } else { - newDefer[ action + "With" ]( this === promise ? newDefer.promise() : this, fn ? [ returned ] : arguments ); - } - }); - }); - fns = null; - }).promise(); - }, - // Get a promise for this deferred - // If obj is provided, the promise aspect is added to the object - promise: function( obj ) { - return obj != null ? jQuery.extend( obj, promise ) : promise; - } - }, - deferred = {}; - - // Keep pipe for back-compat - promise.pipe = promise.then; - - // Add list-specific methods - jQuery.each( tuples, function( i, tuple ) { - var list = tuple[ 2 ], - stateString = tuple[ 3 ]; - - // promise[ done | fail | progress ] = list.add - promise[ tuple[1] ] = list.add; - - // Handle state - if ( stateString ) { - list.add(function() { - // state = [ resolved | rejected ] - state = stateString; - - // [ reject_list | resolve_list ].disable; progress_list.lock - }, tuples[ i ^ 1 ][ 2 ].disable, tuples[ 2 ][ 2 ].lock ); - } - - // deferred[ resolve | reject | notify ] - deferred[ tuple[0] ] = function() { - deferred[ tuple[0] + "With" ]( this === deferred ? promise : this, arguments ); - return this; - }; - deferred[ tuple[0] + "With" ] = list.fireWith; - }); - - // Make the deferred a promise - promise.promise( deferred ); - - // Call given func if any - if ( func ) { - func.call( deferred, deferred ); - } - - // All done! - return deferred; - }, - - // Deferred helper - when: function( subordinate /* , ..., subordinateN */ ) { - var i = 0, - resolveValues = core_slice.call( arguments ), - length = resolveValues.length, - - // the count of uncompleted subordinates - remaining = length !== 1 || ( subordinate && jQuery.isFunction( subordinate.promise ) ) ? length : 0, - - // the master Deferred. If resolveValues consist of only a single Deferred, just use that. - deferred = remaining === 1 ? subordinate : jQuery.Deferred(), - - // Update function for both resolve and progress values - updateFunc = function( i, contexts, values ) { - return function( value ) { - contexts[ i ] = this; - values[ i ] = arguments.length > 1 ? core_slice.call( arguments ) : value; - if( values === progressValues ) { - deferred.notifyWith( contexts, values ); - } else if ( !( --remaining ) ) { - deferred.resolveWith( contexts, values ); - } - }; - }, - - progressValues, progressContexts, resolveContexts; - - // add listeners to Deferred subordinates; treat others as resolved - if ( length > 1 ) { - progressValues = new Array( length ); - progressContexts = new Array( length ); - resolveContexts = new Array( length ); - for ( ; i < length; i++ ) { - if ( resolveValues[ i ] && jQuery.isFunction( resolveValues[ i ].promise ) ) { - resolveValues[ i ].promise() - .done( updateFunc( i, resolveContexts, resolveValues ) ) - .fail( deferred.reject ) - .progress( updateFunc( i, progressContexts, progressValues ) ); - } else { - --remaining; - } - } - } - - // if we're not waiting on anything, resolve the master - if ( !remaining ) { - deferred.resolveWith( resolveContexts, resolveValues ); - } - - return deferred.promise(); - } - }); - jQuery.support = (function() { - - var support, all, a, - input, select, fragment, - opt, eventName, isSupported, i, - div = document.createElement("div"); - - // Setup - div.setAttribute( "className", "t" ); - div.innerHTML = " <link/><table></table><a href='/a'>a</a><input type='checkbox'/>"; - - // Support tests won't run in some limited or non-browser environments - all = div.getElementsByTagName("*"); - a = div.getElementsByTagName("a")[ 0 ]; - if ( !all || !a || !all.length ) { - return {}; - } - - // First batch of tests - select = document.createElement("select"); - opt = select.appendChild( document.createElement("option") ); - input = div.getElementsByTagName("input")[ 0 ]; - - a.style.cssText = "top:1px;float:left;opacity:.5"; - support = { - // Test setAttribute on camelCase class. If it works, we need attrFixes when doing get/setAttribute (ie6/7) - getSetAttribute: div.className !== "t", - - // IE strips leading whitespace when .innerHTML is used - leadingWhitespace: div.firstChild.nodeType === 3, - - // Make sure that tbody elements aren't automatically inserted - // IE will insert them into empty tables - tbody: !div.getElementsByTagName("tbody").length, - - // Make sure that link elements get serialized correctly by innerHTML - // This requires a wrapper element in IE - htmlSerialize: !!div.getElementsByTagName("link").length, - - // Get the style information from getAttribute - // (IE uses .cssText instead) - style: /top/.test( a.getAttribute("style") ), - - // Make sure that URLs aren't manipulated - // (IE normalizes it by default) - hrefNormalized: a.getAttribute("href") === "/a", - - // Make sure that element opacity exists - // (IE uses filter instead) - // Use a regex to work around a WebKit issue. See #5145 - opacity: /^0.5/.test( a.style.opacity ), - - // Verify style float existence - // (IE uses styleFloat instead of cssFloat) - cssFloat: !!a.style.cssFloat, - - // Check the default checkbox/radio value ("" on WebKit; "on" elsewhere) - checkOn: !!input.value, - - // Make sure that a selected-by-default option has a working selected property. - // (WebKit defaults to false instead of true, IE too, if it's in an optgroup) - optSelected: opt.selected, - - // Tests for enctype support on a form (#6743) - enctype: !!document.createElement("form").enctype, - - // Makes sure cloning an html5 element does not cause problems - // Where outerHTML is undefined, this still works - html5Clone: document.createElement("nav").cloneNode( true ).outerHTML !== "<:nav></:nav>", - - // jQuery.support.boxModel DEPRECATED in 1.8 since we don't support Quirks Mode - boxModel: document.compatMode === "CSS1Compat", - - // Will be defined later - deleteExpando: true, - noCloneEvent: true, - inlineBlockNeedsLayout: false, - shrinkWrapBlocks: false, - reliableMarginRight: true, - boxSizingReliable: true, - pixelPosition: false - }; - - // Make sure checked status is properly cloned - input.checked = true; - support.noCloneChecked = input.cloneNode( true ).checked; - - // Make sure that the options inside disabled selects aren't marked as disabled - // (WebKit marks them as disabled) - select.disabled = true; - support.optDisabled = !opt.disabled; - - // Support: IE<9 - try { - delete div.test; - } catch( e ) { - support.deleteExpando = false; - } - - // Check if we can trust getAttribute("value") - input = document.createElement("input"); - input.setAttribute( "value", "" ); - support.input = input.getAttribute( "value" ) === ""; - - // Check if an input maintains its value after becoming a radio - input.value = "t"; - input.setAttribute( "type", "radio" ); - support.radioValue = input.value === "t"; - - // #11217 - WebKit loses check when the name is after the checked attribute - input.setAttribute( "checked", "t" ); - input.setAttribute( "name", "t" ); - - fragment = document.createDocumentFragment(); - fragment.appendChild( input ); - - // Check if a disconnected checkbox will retain its checked - // value of true after appended to the DOM (IE6/7) - support.appendChecked = input.checked; - - // WebKit doesn't clone checked state correctly in fragments - support.checkClone = fragment.cloneNode( true ).cloneNode( true ).lastChild.checked; - - // Support: IE<9 - // Opera does not clone events (and typeof div.attachEvent === undefined). - // IE9-10 clones events bound via attachEvent, but they don't trigger with .click() - if ( div.attachEvent ) { - div.attachEvent( "onclick", function() { - support.noCloneEvent = false; - }); - - div.cloneNode( true ).click(); - } - - // Support: IE<9 (lack submit/change bubble), Firefox 17+ (lack focusin event) - // Beware of CSP restrictions (https://developer.mozilla.org/en/Security/CSP), test/csp.php - for ( i in { submit: true, change: true, focusin: true }) { - div.setAttribute( eventName = "on" + i, "t" ); - - support[ i + "Bubbles" ] = eventName in window || div.attributes[ eventName ].expando === false; - } - - div.style.backgroundClip = "content-box"; - div.cloneNode( true ).style.backgroundClip = ""; - support.clearCloneStyle = div.style.backgroundClip === "content-box"; - - // Run tests that need a body at doc ready - jQuery(function() { - var container, marginDiv, tds, - divReset = "padding:0;margin:0;border:0;display:block;box-sizing:content-box;-moz-box-sizing:content-box;-webkit-box-sizing:content-box;", - body = document.getElementsByTagName("body")[0]; - - if ( !body ) { - // Return for frameset docs that don't have a body - return; - } - - container = document.createElement("div"); - container.style.cssText = "border:0;width:0;height:0;position:absolute;top:0;left:-9999px;margin-top:1px"; - - body.appendChild( container ).appendChild( div ); - - // Support: IE8 - // Check if table cells still have offsetWidth/Height when they are set - // to display:none and there are still other visible table cells in a - // table row; if so, offsetWidth/Height are not reliable for use when - // determining if an element has been hidden directly using - // display:none (it is still safe to use offsets if a parent element is - // hidden; don safety goggles and see bug #4512 for more information). - div.innerHTML = "<table><tr><td></td><td>t</td></tr></table>"; - tds = div.getElementsByTagName("td"); - tds[ 0 ].style.cssText = "padding:0;margin:0;border:0;display:none"; - isSupported = ( tds[ 0 ].offsetHeight === 0 ); - - tds[ 0 ].style.display = ""; - tds[ 1 ].style.display = "none"; - - // Support: IE8 - // Check if empty table cells still have offsetWidth/Height - support.reliableHiddenOffsets = isSupported && ( tds[ 0 ].offsetHeight === 0 ); - - // Check box-sizing and margin behavior - div.innerHTML = ""; - div.style.cssText = "box-sizing:border-box;-moz-box-sizing:border-box;-webkit-box-sizing:border-box;padding:1px;border:1px;display:block;width:4px;margin-top:1%;position:absolute;top:1%;"; - support.boxSizing = ( div.offsetWidth === 4 ); - support.doesNotIncludeMarginInBodyOffset = ( body.offsetTop !== 1 ); - - // Use window.getComputedStyle because jsdom on node.js will break without it. - if ( window.getComputedStyle ) { - support.pixelPosition = ( window.getComputedStyle( div, null ) || {} ).top !== "1%"; - support.boxSizingReliable = ( window.getComputedStyle( div, null ) || { width: "4px" } ).width === "4px"; - - // Check if div with explicit width and no margin-right incorrectly - // gets computed margin-right based on width of container. (#3333) - // Fails in WebKit before Feb 2011 nightlies - // WebKit Bug 13343 - getComputedStyle returns wrong value for margin-right - marginDiv = div.appendChild( document.createElement("div") ); - marginDiv.style.cssText = div.style.cssText = divReset; - marginDiv.style.marginRight = marginDiv.style.width = "0"; - div.style.width = "1px"; - - support.reliableMarginRight = - !parseFloat( ( window.getComputedStyle( marginDiv, null ) || {} ).marginRight ); - } - - if ( typeof div.style.zoom !== core_strundefined ) { - // Support: IE<8 - // Check if natively block-level elements act like inline-block - // elements when setting their display to 'inline' and giving - // them layout - div.innerHTML = ""; - div.style.cssText = divReset + "width:1px;padding:1px;display:inline;zoom:1"; - support.inlineBlockNeedsLayout = ( div.offsetWidth === 3 ); - - // Support: IE6 - // Check if elements with layout shrink-wrap their children - div.style.display = "block"; - div.innerHTML = "<div></div>"; - div.firstChild.style.width = "5px"; - support.shrinkWrapBlocks = ( div.offsetWidth !== 3 ); - - if ( support.inlineBlockNeedsLayout ) { - // Prevent IE 6 from affecting layout for positioned elements #11048 - // Prevent IE from shrinking the body in IE 7 mode #12869 - // Support: IE<8 - body.style.zoom = 1; - } - } - - body.removeChild( container ); - - // Null elements to avoid leaks in IE - container = div = tds = marginDiv = null; - }); - - // Null elements to avoid leaks in IE - all = select = fragment = opt = a = input = null; - - return support; - })(); - - var rbrace = /(?:{[\s\S]*}|[[\s\S]*])$/, - rmultiDash = /([A-Z])/g; - - function internalData( elem, name, data, pvt /* Internal Use Only */ ){ - if ( !jQuery.acceptData( elem ) ) { - return; - } - - var thisCache, ret, - internalKey = jQuery.expando, - getByName = typeof name === "string", - - // We have to handle DOM nodes and JS objects differently because IE6-7 - // can't GC object references properly across the DOM-JS boundary - isNode = elem.nodeType, - - // Only DOM nodes need the global jQuery cache; JS object data is - // attached directly to the object so GC can occur automatically - cache = isNode ? jQuery.cache : elem, - - // Only defining an ID for JS objects if its cache already exists allows - // the code to shortcut on the same path as a DOM node with no cache - id = isNode ? elem[ internalKey ] : elem[ internalKey ] && internalKey; - - // Avoid doing any more work than we need to when trying to get data on an - // object that has no data at all - if ( (!id || !cache[id] || (!pvt && !cache[id].data)) && getByName && data === undefined ) { - return; - } - - if ( !id ) { - // Only DOM nodes need a new unique ID for each element since their data - // ends up in the global cache - if ( isNode ) { - elem[ internalKey ] = id = core_deletedIds.pop() || jQuery.guid++; - } else { - id = internalKey; - } - } - - if ( !cache[ id ] ) { - cache[ id ] = {}; - - // Avoids exposing jQuery metadata on plain JS objects when the object - // is serialized using JSON.stringify - if ( !isNode ) { - cache[ id ].toJSON = jQuery.noop; - } - } - - // An object can be passed to jQuery.data instead of a key/value pair; this gets - // shallow copied over onto the existing cache - if ( typeof name === "object" || typeof name === "function" ) { - if ( pvt ) { - cache[ id ] = jQuery.extend( cache[ id ], name ); - } else { - cache[ id ].data = jQuery.extend( cache[ id ].data, name ); - } - } - - thisCache = cache[ id ]; - - // jQuery data() is stored in a separate object inside the object's internal data - // cache in order to avoid key collisions between internal data and user-defined - // data. - if ( !pvt ) { - if ( !thisCache.data ) { - thisCache.data = {}; - } - - thisCache = thisCache.data; - } - - if ( data !== undefined ) { - thisCache[ jQuery.camelCase( name ) ] = data; - } - - // Check for both converted-to-camel and non-converted data property names - // If a data property was specified - if ( getByName ) { - - // First Try to find as-is property data - ret = thisCache[ name ]; - - // Test for null|undefined property data - if ( ret == null ) { - - // Try to find the camelCased property - ret = thisCache[ jQuery.camelCase( name ) ]; - } - } else { - ret = thisCache; - } - - return ret; - } - - function internalRemoveData( elem, name, pvt ) { - if ( !jQuery.acceptData( elem ) ) { - return; - } - - var i, l, thisCache, - isNode = elem.nodeType, - - // See jQuery.data for more information - cache = isNode ? jQuery.cache : elem, - id = isNode ? elem[ jQuery.expando ] : jQuery.expando; - - // If there is already no cache entry for this object, there is no - // purpose in continuing - if ( !cache[ id ] ) { - return; - } - - if ( name ) { - - thisCache = pvt ? cache[ id ] : cache[ id ].data; - - if ( thisCache ) { - - // Support array or space separated string names for data keys - if ( !jQuery.isArray( name ) ) { - - // try the string as a key before any manipulation - if ( name in thisCache ) { - name = [ name ]; - } else { - - // split the camel cased version by spaces unless a key with the spaces exists - name = jQuery.camelCase( name ); - if ( name in thisCache ) { - name = [ name ]; - } else { - name = name.split(" "); - } - } - } else { - // If "name" is an array of keys... - // When data is initially created, via ("key", "val") signature, - // keys will be converted to camelCase. - // Since there is no way to tell _how_ a key was added, remove - // both plain key and camelCase key. #12786 - // This will only penalize the array argument path. - name = name.concat( jQuery.map( name, jQuery.camelCase ) ); - } - - for ( i = 0, l = name.length; i < l; i++ ) { - delete thisCache[ name[i] ]; - } - - // If there is no data left in the cache, we want to continue - // and let the cache object itself get destroyed - if ( !( pvt ? isEmptyDataObject : jQuery.isEmptyObject )( thisCache ) ) { - return; - } - } - } - - // See jQuery.data for more information - if ( !pvt ) { - delete cache[ id ].data; - - // Don't destroy the parent cache unless the internal data object - // had been the only thing left in it - if ( !isEmptyDataObject( cache[ id ] ) ) { - return; - } - } - - // Destroy the cache - if ( isNode ) { - jQuery.cleanData( [ elem ], true ); - - // Use delete when supported for expandos or `cache` is not a window per isWindow (#10080) - } else if ( jQuery.support.deleteExpando || cache != cache.window ) { - delete cache[ id ]; - - // When all else fails, null - } else { - cache[ id ] = null; - } - } - - jQuery.extend({ - cache: {}, - - // Unique for each copy of jQuery on the page - // Non-digits removed to match rinlinejQuery - expando: "jQuery" + ( core_version + Math.random() ).replace( /\D/g, "" ), - - // The following elements throw uncatchable exceptions if you - // attempt to add expando properties to them. - noData: { - "embed": true, - // Ban all objects except for Flash (which handle expandos) - "object": "clsid:D27CDB6E-AE6D-11cf-96B8-444553540000", - "applet": true - }, - - hasData: function( elem ) { - elem = elem.nodeType ? jQuery.cache[ elem[jQuery.expando] ] : elem[ jQuery.expando ]; - return !!elem && !isEmptyDataObject( elem ); - }, - - data: function( elem, name, data ) { - return internalData( elem, name, data ); - }, - - removeData: function( elem, name ) { - return internalRemoveData( elem, name ); - }, - - // For internal use only. - _data: function( elem, name, data ) { - return internalData( elem, name, data, true ); - }, - - _removeData: function( elem, name ) { - return internalRemoveData( elem, name, true ); - }, - - // A method for determining if a DOM node can handle the data expando - acceptData: function( elem ) { - // Do not set data on non-element because it will not be cleared (#8335). - if ( elem.nodeType && elem.nodeType !== 1 && elem.nodeType !== 9 ) { - return false; - } - - var noData = elem.nodeName && jQuery.noData[ elem.nodeName.toLowerCase() ]; - - // nodes accept data unless otherwise specified; rejection can be conditional - return !noData || noData !== true && elem.getAttribute("classid") === noData; - } - }); - - jQuery.fn.extend({ - data: function( key, value ) { - var attrs, name, - elem = this[0], - i = 0, - data = null; - - // Gets all values - if ( key === undefined ) { - if ( this.length ) { - data = jQuery.data( elem ); - - if ( elem.nodeType === 1 && !jQuery._data( elem, "parsedAttrs" ) ) { - attrs = elem.attributes; - for ( ; i < attrs.length; i++ ) { - name = attrs[i].name; - - if ( !name.indexOf( "data-" ) ) { - name = jQuery.camelCase( name.slice(5) ); - - dataAttr( elem, name, data[ name ] ); - } - } - jQuery._data( elem, "parsedAttrs", true ); - } - } - - return data; - } - - // Sets multiple values - if ( typeof key === "object" ) { - return this.each(function() { - jQuery.data( this, key ); - }); - } - - return jQuery.access( this, function( value ) { - - if ( value === undefined ) { - // Try to fetch any internally stored data first - return elem ? dataAttr( elem, key, jQuery.data( elem, key ) ) : null; - } - - this.each(function() { - jQuery.data( this, key, value ); - }); - }, null, value, arguments.length > 1, null, true ); - }, - - removeData: function( key ) { - return this.each(function() { - jQuery.removeData( this, key ); - }); - } - }); - - function dataAttr( elem, key, data ) { - // If nothing was found internally, try to fetch any - // data from the HTML5 data-* attribute - if ( data === undefined && elem.nodeType === 1 ) { - - var name = "data-" + key.replace( rmultiDash, "-$1" ).toLowerCase(); - - data = elem.getAttribute( name ); - - if ( typeof data === "string" ) { - try { - data = data === "true" ? true : - data === "false" ? false : - data === "null" ? null : - // Only convert to a number if it doesn't change the string - +data + "" === data ? +data : - rbrace.test( data ) ? jQuery.parseJSON( data ) : - data; - } catch( e ) {} - - // Make sure we set the data so it isn't changed later - jQuery.data( elem, key, data ); - - } else { - data = undefined; - } - } - - return data; - } - -// checks a cache object for emptiness - function isEmptyDataObject( obj ) { - var name; - for ( name in obj ) { - - // if the public data object is empty, the private is still empty - if ( name === "data" && jQuery.isEmptyObject( obj[name] ) ) { - continue; - } - if ( name !== "toJSON" ) { - return false; - } - } - - return true; - } - jQuery.extend({ - queue: function( elem, type, data ) { - var queue; - - if ( elem ) { - type = ( type || "fx" ) + "queue"; - queue = jQuery._data( elem, type ); - - // Speed up dequeue by getting out quickly if this is just a lookup - if ( data ) { - if ( !queue || jQuery.isArray(data) ) { - queue = jQuery._data( elem, type, jQuery.makeArray(data) ); - } else { - queue.push( data ); - } - } - return queue || []; - } - }, - - dequeue: function( elem, type ) { - type = type || "fx"; - - var queue = jQuery.queue( elem, type ), - startLength = queue.length, - fn = queue.shift(), - hooks = jQuery._queueHooks( elem, type ), - next = function() { - jQuery.dequeue( elem, type ); - }; - - // If the fx queue is dequeued, always remove the progress sentinel - if ( fn === "inprogress" ) { - fn = queue.shift(); - startLength--; - } - - hooks.cur = fn; - if ( fn ) { - - // Add a progress sentinel to prevent the fx queue from being - // automatically dequeued - if ( type === "fx" ) { - queue.unshift( "inprogress" ); - } - - // clear up the last queue stop function - delete hooks.stop; - fn.call( elem, next, hooks ); - } - - if ( !startLength && hooks ) { - hooks.empty.fire(); - } - }, - - // not intended for public consumption - generates a queueHooks object, or returns the current one - _queueHooks: function( elem, type ) { - var key = type + "queueHooks"; - return jQuery._data( elem, key ) || jQuery._data( elem, key, { - empty: jQuery.Callbacks("once memory").add(function() { - jQuery._removeData( elem, type + "queue" ); - jQuery._removeData( elem, key ); - }) - }); - } - }); - - jQuery.fn.extend({ - queue: function( type, data ) { - var setter = 2; - - if ( typeof type !== "string" ) { - data = type; - type = "fx"; - setter--; - } - - if ( arguments.length < setter ) { - return jQuery.queue( this[0], type ); - } - - return data === undefined ? - this : - this.each(function() { - var queue = jQuery.queue( this, type, data ); - - // ensure a hooks for this queue - jQuery._queueHooks( this, type ); - - if ( type === "fx" && queue[0] !== "inprogress" ) { - jQuery.dequeue( this, type ); - } - }); - }, - dequeue: function( type ) { - return this.each(function() { - jQuery.dequeue( this, type ); - }); - }, - // Based off of the plugin by Clint Helfers, with permission. - // http://blindsignals.com/index.php/2009/07/jquery-delay/ - delay: function( time, type ) { - time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time; - type = type || "fx"; - - return this.queue( type, function( next, hooks ) { - var timeout = setTimeout( next, time ); - hooks.stop = function() { - clearTimeout( timeout ); - }; - }); - }, - clearQueue: function( type ) { - return this.queue( type || "fx", [] ); - }, - // Get a promise resolved when queues of a certain type - // are emptied (fx is the type by default) - promise: function( type, obj ) { - var tmp, - count = 1, - defer = jQuery.Deferred(), - elements = this, - i = this.length, - resolve = function() { - if ( !( --count ) ) { - defer.resolveWith( elements, [ elements ] ); - } - }; - - if ( typeof type !== "string" ) { - obj = type; - type = undefined; - } - type = type || "fx"; - - while( i-- ) { - tmp = jQuery._data( elements[ i ], type + "queueHooks" ); - if ( tmp && tmp.empty ) { - count++; - tmp.empty.add( resolve ); - } - } - resolve(); - return defer.promise( obj ); - } - }); - var nodeHook, boolHook, - rclass = /[\t\r\n]/g, - rreturn = /\r/g, - rfocusable = /^(?:input|select|textarea|button|object)$/i, - rclickable = /^(?:a|area)$/i, - rboolean = /^(?:checked|selected|autofocus|autoplay|async|controls|defer|disabled|hidden|loop|multiple|open|readonly|required|scoped)$/i, - ruseDefault = /^(?:checked|selected)$/i, - getSetAttribute = jQuery.support.getSetAttribute, - getSetInput = jQuery.support.input; - - jQuery.fn.extend({ - attr: function( name, value ) { - return jQuery.access( this, jQuery.attr, name, value, arguments.length > 1 ); - }, - - removeAttr: function( name ) { - return this.each(function() { - jQuery.removeAttr( this, name ); - }); - }, - - prop: function( name, value ) { - return jQuery.access( this, jQuery.prop, name, value, arguments.length > 1 ); - }, - - removeProp: function( name ) { - name = jQuery.propFix[ name ] || name; - return this.each(function() { - // try/catch handles cases where IE balks (such as removing a property on window) - try { - this[ name ] = undefined; - delete this[ name ]; - } catch( e ) {} - }); - }, - - addClass: function( value ) { - var classes, elem, cur, clazz, j, - i = 0, - len = this.length, - proceed = typeof value === "string" && value; - - if ( jQuery.isFunction( value ) ) { - return this.each(function( j ) { - jQuery( this ).addClass( value.call( this, j, this.className ) ); - }); - } - - if ( proceed ) { - // The disjunction here is for better compressibility (see removeClass) - classes = ( value || "" ).match( core_rnotwhite ) || []; - - for ( ; i < len; i++ ) { - elem = this[ i ]; - cur = elem.nodeType === 1 && ( elem.className ? - ( " " + elem.className + " " ).replace( rclass, " " ) : - " " - ); - - if ( cur ) { - j = 0; - while ( (clazz = classes[j++]) ) { - if ( cur.indexOf( " " + clazz + " " ) < 0 ) { - cur += clazz + " "; - } - } - elem.className = jQuery.trim( cur ); - - } - } - } - - return this; - }, - - removeClass: function( value ) { - var classes, elem, cur, clazz, j, - i = 0, - len = this.length, - proceed = arguments.length === 0 || typeof value === "string" && value; - - if ( jQuery.isFunction( value ) ) { - return this.each(function( j ) { - jQuery( this ).removeClass( value.call( this, j, this.className ) ); - }); - } - if ( proceed ) { - classes = ( value || "" ).match( core_rnotwhite ) || []; - - for ( ; i < len; i++ ) { - elem = this[ i ]; - // This expression is here for better compressibility (see addClass) - cur = elem.nodeType === 1 && ( elem.className ? - ( " " + elem.className + " " ).replace( rclass, " " ) : - "" - ); - - if ( cur ) { - j = 0; - while ( (clazz = classes[j++]) ) { - // Remove *all* instances - while ( cur.indexOf( " " + clazz + " " ) >= 0 ) { - cur = cur.replace( " " + clazz + " ", " " ); - } - } - elem.className = value ? jQuery.trim( cur ) : ""; - } - } - } - - return this; - }, - - toggleClass: function( value, stateVal ) { - var type = typeof value, - isBool = typeof stateVal === "boolean"; - - if ( jQuery.isFunction( value ) ) { - return this.each(function( i ) { - jQuery( this ).toggleClass( value.call(this, i, this.className, stateVal), stateVal ); - }); - } - - return this.each(function() { - if ( type === "string" ) { - // toggle individual class names - var className, - i = 0, - self = jQuery( this ), - state = stateVal, - classNames = value.match( core_rnotwhite ) || []; - - while ( (className = classNames[ i++ ]) ) { - // check each className given, space separated list - state = isBool ? state : !self.hasClass( className ); - self[ state ? "addClass" : "removeClass" ]( className ); - } - - // Toggle whole class name - } else if ( type === core_strundefined || type === "boolean" ) { - if ( this.className ) { - // store className if set - jQuery._data( this, "__className__", this.className ); - } - - // If the element has a class name or if we're passed "false", - // then remove the whole classname (if there was one, the above saved it). - // Otherwise bring back whatever was previously saved (if anything), - // falling back to the empty string if nothing was stored. - this.className = this.className || value === false ? "" : jQuery._data( this, "__className__" ) || ""; - } - }); - }, - - hasClass: function( selector ) { - var className = " " + selector + " ", - i = 0, - l = this.length; - for ( ; i < l; i++ ) { - if ( this[i].nodeType === 1 && (" " + this[i].className + " ").replace(rclass, " ").indexOf( className ) >= 0 ) { - return true; - } - } - - return false; - }, - - val: function( value ) { - var ret, hooks, isFunction, - elem = this[0]; - - if ( !arguments.length ) { - if ( elem ) { - hooks = jQuery.valHooks[ elem.type ] || jQuery.valHooks[ elem.nodeName.toLowerCase() ]; - - if ( hooks && "get" in hooks && (ret = hooks.get( elem, "value" )) !== undefined ) { - return ret; - } - - ret = elem.value; - - return typeof ret === "string" ? - // handle most common string cases - ret.replace(rreturn, "") : - // handle cases where value is null/undef or number - ret == null ? "" : ret; - } - - return; - } - - isFunction = jQuery.isFunction( value ); - - return this.each(function( i ) { - var val, - self = jQuery(this); - - if ( this.nodeType !== 1 ) { - return; - } - - if ( isFunction ) { - val = value.call( this, i, self.val() ); - } else { - val = value; - } - - // Treat null/undefined as ""; convert numbers to string - if ( val == null ) { - val = ""; - } else if ( typeof val === "number" ) { - val += ""; - } else if ( jQuery.isArray( val ) ) { - val = jQuery.map(val, function ( value ) { - return value == null ? "" : value + ""; - }); - } - - hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ]; - - // If set returns undefined, fall back to normal setting - if ( !hooks || !("set" in hooks) || hooks.set( this, val, "value" ) === undefined ) { - this.value = val; - } - }); - } - }); - - jQuery.extend({ - valHooks: { - option: { - get: function( elem ) { - // attributes.value is undefined in Blackberry 4.7 but - // uses .value. See #6932 - var val = elem.attributes.value; - return !val || val.specified ? elem.value : elem.text; - } - }, - select: { - get: function( elem ) { - var value, option, - options = elem.options, - index = elem.selectedIndex, - one = elem.type === "select-one" || index < 0, - values = one ? null : [], - max = one ? index + 1 : options.length, - i = index < 0 ? - max : - one ? index : 0; - - // Loop through all the selected options - for ( ; i < max; i++ ) { - option = options[ i ]; - - // oldIE doesn't update selected after form reset (#2551) - if ( ( option.selected || i === index ) && - // Don't return options that are disabled or in a disabled optgroup - ( jQuery.support.optDisabled ? !option.disabled : option.getAttribute("disabled") === null ) && - ( !option.parentNode.disabled || !jQuery.nodeName( option.parentNode, "optgroup" ) ) ) { - - // Get the specific value for the option - value = jQuery( option ).val(); - - // We don't need an array for one selects - if ( one ) { - return value; - } - - // Multi-Selects return an array - values.push( value ); - } - } - - return values; - }, - - set: function( elem, value ) { - var values = jQuery.makeArray( value ); - - jQuery(elem).find("option").each(function() { - this.selected = jQuery.inArray( jQuery(this).val(), values ) >= 0; - }); - - if ( !values.length ) { - elem.selectedIndex = -1; - } - return values; - } - } - }, - - attr: function( elem, name, value ) { - var hooks, notxml, ret, - nType = elem.nodeType; - - // don't get/set attributes on text, comment and attribute nodes - if ( !elem || nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - // Fallback to prop when attributes are not supported - if ( typeof elem.getAttribute === core_strundefined ) { - return jQuery.prop( elem, name, value ); - } - - notxml = nType !== 1 || !jQuery.isXMLDoc( elem ); - - // All attributes are lowercase - // Grab necessary hook if one is defined - if ( notxml ) { - name = name.toLowerCase(); - hooks = jQuery.attrHooks[ name ] || ( rboolean.test( name ) ? boolHook : nodeHook ); - } - - if ( value !== undefined ) { - - if ( value === null ) { - jQuery.removeAttr( elem, name ); - - } else if ( hooks && notxml && "set" in hooks && (ret = hooks.set( elem, value, name )) !== undefined ) { - return ret; - - } else { - elem.setAttribute( name, value + "" ); - return value; - } - - } else if ( hooks && notxml && "get" in hooks && (ret = hooks.get( elem, name )) !== null ) { - return ret; - - } else { - - // In IE9+, Flash objects don't have .getAttribute (#12945) - // Support: IE9+ - if ( typeof elem.getAttribute !== core_strundefined ) { - ret = elem.getAttribute( name ); - } - - // Non-existent attributes return null, we normalize to undefined - return ret == null ? - undefined : - ret; - } - }, - - removeAttr: function( elem, value ) { - var name, propName, - i = 0, - attrNames = value && value.match( core_rnotwhite ); - - if ( attrNames && elem.nodeType === 1 ) { - while ( (name = attrNames[i++]) ) { - propName = jQuery.propFix[ name ] || name; - - // Boolean attributes get special treatment (#10870) - if ( rboolean.test( name ) ) { - // Set corresponding property to false for boolean attributes - // Also clear defaultChecked/defaultSelected (if appropriate) for IE<8 - if ( !getSetAttribute && ruseDefault.test( name ) ) { - elem[ jQuery.camelCase( "default-" + name ) ] = - elem[ propName ] = false; - } else { - elem[ propName ] = false; - } - - // See #9699 for explanation of this approach (setting first, then removal) - } else { - jQuery.attr( elem, name, "" ); - } - - elem.removeAttribute( getSetAttribute ? name : propName ); - } - } - }, - - attrHooks: { - type: { - set: function( elem, value ) { - if ( !jQuery.support.radioValue && value === "radio" && jQuery.nodeName(elem, "input") ) { - // Setting the type on a radio button after the value resets the value in IE6-9 - // Reset value to default in case type is set after value during creation - var val = elem.value; - elem.setAttribute( "type", value ); - if ( val ) { - elem.value = val; - } - return value; - } - } - } - }, - - propFix: { - tabindex: "tabIndex", - readonly: "readOnly", - "for": "htmlFor", - "class": "className", - maxlength: "maxLength", - cellspacing: "cellSpacing", - cellpadding: "cellPadding", - rowspan: "rowSpan", - colspan: "colSpan", - usemap: "useMap", - frameborder: "frameBorder", - contenteditable: "contentEditable" - }, - - prop: function( elem, name, value ) { - var ret, hooks, notxml, - nType = elem.nodeType; - - // don't get/set properties on text, comment and attribute nodes - if ( !elem || nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - notxml = nType !== 1 || !jQuery.isXMLDoc( elem ); - - if ( notxml ) { - // Fix name and attach hooks - name = jQuery.propFix[ name ] || name; - hooks = jQuery.propHooks[ name ]; - } - - if ( value !== undefined ) { - if ( hooks && "set" in hooks && (ret = hooks.set( elem, value, name )) !== undefined ) { - return ret; - - } else { - return ( elem[ name ] = value ); - } - - } else { - if ( hooks && "get" in hooks && (ret = hooks.get( elem, name )) !== null ) { - return ret; - - } else { - return elem[ name ]; - } - } - }, - - propHooks: { - tabIndex: { - get: function( elem ) { - // elem.tabIndex doesn't always return the correct value when it hasn't been explicitly set - // http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabinde... - var attributeNode = elem.getAttributeNode("tabindex"); - - return attributeNode && attributeNode.specified ? - parseInt( attributeNode.value, 10 ) : - rfocusable.test( elem.nodeName ) || rclickable.test( elem.nodeName ) && elem.href ? - 0 : - undefined; - } - } - } - }); - -// Hook for boolean attributes - boolHook = { - get: function( elem, name ) { - var - // Use .prop to determine if this attribute is understood as boolean - prop = jQuery.prop( elem, name ), - - // Fetch it accordingly - attr = typeof prop === "boolean" && elem.getAttribute( name ), - detail = typeof prop === "boolean" ? - - getSetInput && getSetAttribute ? - attr != null : - // oldIE fabricates an empty string for missing boolean attributes - // and conflates checked/selected into attroperties - ruseDefault.test( name ) ? - elem[ jQuery.camelCase( "default-" + name ) ] : - !!attr : - - // fetch an attribute node for properties not recognized as boolean - elem.getAttributeNode( name ); - - return detail && detail.value !== false ? - name.toLowerCase() : - undefined; - }, - set: function( elem, value, name ) { - if ( value === false ) { - // Remove boolean attributes when set to false - jQuery.removeAttr( elem, name ); - } else if ( getSetInput && getSetAttribute || !ruseDefault.test( name ) ) { - // IE<8 needs the *property* name - elem.setAttribute( !getSetAttribute && jQuery.propFix[ name ] || name, name ); - - // Use defaultChecked and defaultSelected for oldIE - } else { - elem[ jQuery.camelCase( "default-" + name ) ] = elem[ name ] = true; - } - - return name; - } - }; - -// fix oldIE value attroperty - if ( !getSetInput || !getSetAttribute ) { - jQuery.attrHooks.value = { - get: function( elem, name ) { - var ret = elem.getAttributeNode( name ); - return jQuery.nodeName( elem, "input" ) ? - - // Ignore the value *property* by using defaultValue - elem.defaultValue : - - ret && ret.specified ? ret.value : undefined; - }, - set: function( elem, value, name ) { - if ( jQuery.nodeName( elem, "input" ) ) { - // Does not return so that setAttribute is also used - elem.defaultValue = value; - } else { - // Use nodeHook if defined (#1954); otherwise setAttribute is fine - return nodeHook && nodeHook.set( elem, value, name ); - } - } - }; - } - -// IE6/7 do not support getting/setting some attributes with get/setAttribute - if ( !getSetAttribute ) { - - // Use this for any attribute in IE6/7 - // This fixes almost every IE6/7 issue - nodeHook = jQuery.valHooks.button = { - get: function( elem, name ) { - var ret = elem.getAttributeNode( name ); - return ret && ( name === "id" || name === "name" || name === "coords" ? ret.value !== "" : ret.specified ) ? - ret.value : - undefined; - }, - set: function( elem, value, name ) { - // Set the existing or create a new attribute node - var ret = elem.getAttributeNode( name ); - if ( !ret ) { - elem.setAttributeNode( - (ret = elem.ownerDocument.createAttribute( name )) - ); - } - - ret.value = value += ""; - - // Break association with cloned elements by also using setAttribute (#9646) - return name === "value" || value === elem.getAttribute( name ) ? - value : - undefined; - } - }; - - // Set contenteditable to false on removals(#10429) - // Setting to empty string throws an error as an invalid value - jQuery.attrHooks.contenteditable = { - get: nodeHook.get, - set: function( elem, value, name ) { - nodeHook.set( elem, value === "" ? false : value, name ); - } - }; - - // Set width and height to auto instead of 0 on empty string( Bug #8150 ) - // This is for removals - jQuery.each([ "width", "height" ], function( i, name ) { - jQuery.attrHooks[ name ] = jQuery.extend( jQuery.attrHooks[ name ], { - set: function( elem, value ) { - if ( value === "" ) { - elem.setAttribute( name, "auto" ); - return value; - } - } - }); - }); - } - - -// Some attributes require a special call on IE -// http://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx - if ( !jQuery.support.hrefNormalized ) { - jQuery.each([ "href", "src", "width", "height" ], function( i, name ) { - jQuery.attrHooks[ name ] = jQuery.extend( jQuery.attrHooks[ name ], { - get: function( elem ) { - var ret = elem.getAttribute( name, 2 ); - return ret == null ? undefined : ret; - } - }); - }); - - // href/src property should get the full normalized URL (#10299/#12915) - jQuery.each([ "href", "src" ], function( i, name ) { - jQuery.propHooks[ name ] = { - get: function( elem ) { - return elem.getAttribute( name, 4 ); - } - }; - }); - } - - if ( !jQuery.support.style ) { - jQuery.attrHooks.style = { - get: function( elem ) { - // Return undefined in the case of empty string - // Note: IE uppercases css property names, but if we were to .toLowerCase() - // .cssText, that would destroy case senstitivity in URL's, like in "background" - return elem.style.cssText || undefined; - }, - set: function( elem, value ) { - return ( elem.style.cssText = value + "" ); - } - }; - } - -// Safari mis-reports the default selected property of an option -// Accessing the parent's selectedIndex property fixes it - if ( !jQuery.support.optSelected ) { - jQuery.propHooks.selected = jQuery.extend( jQuery.propHooks.selected, { - get: function( elem ) { - var parent = elem.parentNode; - - if ( parent ) { - parent.selectedIndex; - - // Make sure that it also works with optgroups, see #5701 - if ( parent.parentNode ) { - parent.parentNode.selectedIndex; - } - } - return null; - } - }); - } - -// IE6/7 call enctype encoding - if ( !jQuery.support.enctype ) { - jQuery.propFix.enctype = "encoding"; - } - -// Radios and checkboxes getter/setter - if ( !jQuery.support.checkOn ) { - jQuery.each([ "radio", "checkbox" ], function() { - jQuery.valHooks[ this ] = { - get: function( elem ) { - // Handle the case where in Webkit "" is returned instead of "on" if a value isn't specified - return elem.getAttribute("value") === null ? "on" : elem.value; - } - }; - }); - } - jQuery.each([ "radio", "checkbox" ], function() { - jQuery.valHooks[ this ] = jQuery.extend( jQuery.valHooks[ this ], { - set: function( elem, value ) { - if ( jQuery.isArray( value ) ) { - return ( elem.checked = jQuery.inArray( jQuery(elem).val(), value ) >= 0 ); - } - } - }); - }); - var rformElems = /^(?:input|select|textarea)$/i, - rkeyEvent = /^key/, - rmouseEvent = /^(?:mouse|contextmenu)|click/, - rfocusMorph = /^(?:focusinfocus|focusoutblur)$/, - rtypenamespace = /^([^.]*)(?:.(.+)|)$/; - - function returnTrue() { - return true; - } - - function returnFalse() { - return false; - } - - /* - * Helper functions for managing events -- not part of the public interface. - * Props to Dean Edwards' addEvent library for many of the ideas. - */ - jQuery.event = { - - global: {}, - - add: function( elem, types, handler, data, selector ) { - var tmp, events, t, handleObjIn, - special, eventHandle, handleObj, - handlers, type, namespaces, origType, - elemData = jQuery._data( elem ); - - // Don't attach events to noData or text/comment nodes (but allow plain objects) - if ( !elemData ) { - return; - } - - // Caller can pass in an object of custom data in lieu of the handler - if ( handler.handler ) { - handleObjIn = handler; - handler = handleObjIn.handler; - selector = handleObjIn.selector; - } - - // Make sure that the handler has a unique ID, used to find/remove it later - if ( !handler.guid ) { - handler.guid = jQuery.guid++; - } - - // Init the element's event structure and main handler, if this is the first - if ( !(events = elemData.events) ) { - events = elemData.events = {}; - } - if ( !(eventHandle = elemData.handle) ) { - eventHandle = elemData.handle = function( e ) { - // Discard the second event of a jQuery.event.trigger() and - // when an event is called after a page has unloaded - return typeof jQuery !== core_strundefined && (!e || jQuery.event.triggered !== e.type) ? - jQuery.event.dispatch.apply( eventHandle.elem, arguments ) : - undefined; - }; - // Add elem as a property of the handle fn to prevent a memory leak with IE non-native events - eventHandle.elem = elem; - } - - // Handle multiple events separated by a space - // jQuery(...).bind("mouseover mouseout", fn); - types = ( types || "" ).match( core_rnotwhite ) || [""]; - t = types.length; - while ( t-- ) { - tmp = rtypenamespace.exec( types[t] ) || []; - type = origType = tmp[1]; - namespaces = ( tmp[2] || "" ).split( "." ).sort(); - - // If event changes its type, use the special event handlers for the changed type - special = jQuery.event.special[ type ] || {}; - - // If selector defined, determine special event api type, otherwise given type - type = ( selector ? special.delegateType : special.bindType ) || type; - - // Update special based on newly reset type - special = jQuery.event.special[ type ] || {}; - - // handleObj is passed to all event handlers - handleObj = jQuery.extend({ - type: type, - origType: origType, - data: data, - handler: handler, - guid: handler.guid, - selector: selector, - needsContext: selector && jQuery.expr.match.needsContext.test( selector ), - namespace: namespaces.join(".") - }, handleObjIn ); - - // Init the event handler queue if we're the first - if ( !(handlers = events[ type ]) ) { - handlers = events[ type ] = []; - handlers.delegateCount = 0; - - // Only use addEventListener/attachEvent if the special events handler returns false - if ( !special.setup || special.setup.call( elem, data, namespaces, eventHandle ) === false ) { - // Bind the global event handler to the element - if ( elem.addEventListener ) { - elem.addEventListener( type, eventHandle, false ); - - } else if ( elem.attachEvent ) { - elem.attachEvent( "on" + type, eventHandle ); - } - } - } - - if ( special.add ) { - special.add.call( elem, handleObj ); - - if ( !handleObj.handler.guid ) { - handleObj.handler.guid = handler.guid; - } - } - - // Add to the element's handler list, delegates in front - if ( selector ) { - handlers.splice( handlers.delegateCount++, 0, handleObj ); - } else { - handlers.push( handleObj ); - } - - // Keep track of which events have ever been used, for event optimization - jQuery.event.global[ type ] = true; - } - - // Nullify elem to prevent memory leaks in IE - elem = null; - }, - - // Detach an event or set of events from an element - remove: function( elem, types, handler, selector, mappedTypes ) { - var j, handleObj, tmp, - origCount, t, events, - special, handlers, type, - namespaces, origType, - elemData = jQuery.hasData( elem ) && jQuery._data( elem ); - - if ( !elemData || !(events = elemData.events) ) { - return; - } - - // Once for each type.namespace in types; type may be omitted - types = ( types || "" ).match( core_rnotwhite ) || [""]; - t = types.length; - while ( t-- ) { - tmp = rtypenamespace.exec( types[t] ) || []; - type = origType = tmp[1]; - namespaces = ( tmp[2] || "" ).split( "." ).sort(); - - // Unbind all events (on this namespace, if provided) for the element - if ( !type ) { - for ( type in events ) { - jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); - } - continue; - } - - special = jQuery.event.special[ type ] || {}; - type = ( selector ? special.delegateType : special.bindType ) || type; - handlers = events[ type ] || []; - tmp = tmp[2] && new RegExp( "(^|\.)" + namespaces.join("\.(?:.*\.|)") + "(\.|$)" ); - - // Remove matching events - origCount = j = handlers.length; - while ( j-- ) { - handleObj = handlers[ j ]; - - if ( ( mappedTypes || origType === handleObj.origType ) && - ( !handler || handler.guid === handleObj.guid ) && - ( !tmp || tmp.test( handleObj.namespace ) ) && - ( !selector || selector === handleObj.selector || selector === "**" && handleObj.selector ) ) { - handlers.splice( j, 1 ); - - if ( handleObj.selector ) { - handlers.delegateCount--; - } - if ( special.remove ) { - special.remove.call( elem, handleObj ); - } - } - } - - // Remove generic event handler if we removed something and no more handlers exist - // (avoids potential for endless recursion during removal of special event handlers) - if ( origCount && !handlers.length ) { - if ( !special.teardown || special.teardown.call( elem, namespaces, elemData.handle ) === false ) { - jQuery.removeEvent( elem, type, elemData.handle ); - } - - delete events[ type ]; - } - } - - // Remove the expando if it's no longer used - if ( jQuery.isEmptyObject( events ) ) { - delete elemData.handle; - - // removeData also checks for emptiness and clears the expando if empty - // so use it instead of delete - jQuery._removeData( elem, "events" ); - } - }, - - trigger: function( event, data, elem, onlyHandlers ) { - var handle, ontype, cur, - bubbleType, special, tmp, i, - eventPath = [ elem || document ], - type = core_hasOwn.call( event, "type" ) ? event.type : event, - namespaces = core_hasOwn.call( event, "namespace" ) ? event.namespace.split(".") : []; - - cur = tmp = elem = elem || document; - - // Don't do events on text and comment nodes - if ( elem.nodeType === 3 || elem.nodeType === 8 ) { - return; - } - - // focus/blur morphs to focusin/out; ensure we're not firing them right now - if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { - return; - } - - if ( type.indexOf(".") >= 0 ) { - // Namespaced trigger; create a regexp to match event type in handle() - namespaces = type.split("."); - type = namespaces.shift(); - namespaces.sort(); - } - ontype = type.indexOf(":") < 0 && "on" + type; - - // Caller can pass in a jQuery.Event object, Object, or just an event type string - event = event[ jQuery.expando ] ? - event : - new jQuery.Event( type, typeof event === "object" && event ); - - event.isTrigger = true; - event.namespace = namespaces.join("."); - event.namespace_re = event.namespace ? - new RegExp( "(^|\.)" + namespaces.join("\.(?:.*\.|)") + "(\.|$)" ) : - null; - - // Clean up the event in case it is being reused - event.result = undefined; - if ( !event.target ) { - event.target = elem; - } - - // Clone any incoming data and prepend the event, creating the handler arg list - data = data == null ? - [ event ] : - jQuery.makeArray( data, [ event ] ); - - // Allow special events to draw outside the lines - special = jQuery.event.special[ type ] || {}; - if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) { - return; - } - - // Determine event propagation path in advance, per W3C events spec (#9951) - // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) - if ( !onlyHandlers && !special.noBubble && !jQuery.isWindow( elem ) ) { - - bubbleType = special.delegateType || type; - if ( !rfocusMorph.test( bubbleType + type ) ) { - cur = cur.parentNode; - } - for ( ; cur; cur = cur.parentNode ) { - eventPath.push( cur ); - tmp = cur; - } - - // Only add window if we got to document (e.g., not plain obj or detached DOM) - if ( tmp === (elem.ownerDocument || document) ) { - eventPath.push( tmp.defaultView || tmp.parentWindow || window ); - } - } - - // Fire handlers on the event path - i = 0; - while ( (cur = eventPath[i++]) && !event.isPropagationStopped() ) { - - event.type = i > 1 ? - bubbleType : - special.bindType || type; - - // jQuery handler - handle = ( jQuery._data( cur, "events" ) || {} )[ event.type ] && jQuery._data( cur, "handle" ); - if ( handle ) { - handle.apply( cur, data ); - } - - // Native handler - handle = ontype && cur[ ontype ]; - if ( handle && jQuery.acceptData( cur ) && handle.apply && handle.apply( cur, data ) === false ) { - event.preventDefault(); - } - } - event.type = type; - - // If nobody prevented the default action, do it now - if ( !onlyHandlers && !event.isDefaultPrevented() ) { - - if ( (!special._default || special._default.apply( elem.ownerDocument, data ) === false) && - !(type === "click" && jQuery.nodeName( elem, "a" )) && jQuery.acceptData( elem ) ) { - - // Call a native DOM method on the target with the same name name as the event. - // Can't use an .isFunction() check here because IE6/7 fails that test. - // Don't do default actions on window, that's where global variables be (#6170) - if ( ontype && elem[ type ] && !jQuery.isWindow( elem ) ) { - - // Don't re-trigger an onFOO event when we call its FOO() method - tmp = elem[ ontype ]; - - if ( tmp ) { - elem[ ontype ] = null; - } - - // Prevent re-triggering of the same event, since we already bubbled it above - jQuery.event.triggered = type; - try { - elem[ type ](); - } catch ( e ) { - // IE<9 dies on focus/blur to hidden element (#1486,#12518) - // only reproducible on winXP IE8 native, not IE9 in IE8 mode - } - jQuery.event.triggered = undefined; - - if ( tmp ) { - elem[ ontype ] = tmp; - } - } - } - } - - return event.result; - }, - - dispatch: function( event ) { - - // Make a writable jQuery.Event from the native event object - event = jQuery.event.fix( event ); - - var i, ret, handleObj, matched, j, - handlerQueue = [], - args = core_slice.call( arguments ), - handlers = ( jQuery._data( this, "events" ) || {} )[ event.type ] || [], - special = jQuery.event.special[ event.type ] || {}; - - // Use the fix-ed jQuery.Event rather than the (read-only) native event - args[0] = event; - event.delegateTarget = this; - - // Call the preDispatch hook for the mapped type, and let it bail if desired - if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { - return; - } - - // Determine handlers - handlerQueue = jQuery.event.handlers.call( this, event, handlers ); - - // Run delegates first; they may want to stop propagation beneath us - i = 0; - while ( (matched = handlerQueue[ i++ ]) && !event.isPropagationStopped() ) { - event.currentTarget = matched.elem; - - j = 0; - while ( (handleObj = matched.handlers[ j++ ]) && !event.isImmediatePropagationStopped() ) { - - // Triggered event must either 1) have no namespace, or - // 2) have namespace(s) a subset or equal to those in the bound event (both can have no namespace). - if ( !event.namespace_re || event.namespace_re.test( handleObj.namespace ) ) { - - event.handleObj = handleObj; - event.data = handleObj.data; - - ret = ( (jQuery.event.special[ handleObj.origType ] || {}).handle || handleObj.handler ) - .apply( matched.elem, args ); - - if ( ret !== undefined ) { - if ( (event.result = ret) === false ) { - event.preventDefault(); - event.stopPropagation(); - } - } - } - } - } - - // Call the postDispatch hook for the mapped type - if ( special.postDispatch ) { - special.postDispatch.call( this, event ); - } - - return event.result; - }, - - handlers: function( event, handlers ) { - var sel, handleObj, matches, i, - handlerQueue = [], - delegateCount = handlers.delegateCount, - cur = event.target; - - // Find delegate handlers - // Black-hole SVG <use> instance trees (#13180) - // Avoid non-left-click bubbling in Firefox (#3861) - if ( delegateCount && cur.nodeType && (!event.button || event.type !== "click") ) { - - for ( ; cur != this; cur = cur.parentNode || this ) { - - // Don't check non-elements (#13208) - // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764) - if ( cur.nodeType === 1 && (cur.disabled !== true || event.type !== "click") ) { - matches = []; - for ( i = 0; i < delegateCount; i++ ) { - handleObj = handlers[ i ]; - - // Don't conflict with Object.prototype properties (#13203) - sel = handleObj.selector + " "; - - if ( matches[ sel ] === undefined ) { - matches[ sel ] = handleObj.needsContext ? - jQuery( sel, this ).index( cur ) >= 0 : - jQuery.find( sel, this, null, [ cur ] ).length; - } - if ( matches[ sel ] ) { - matches.push( handleObj ); - } - } - if ( matches.length ) { - handlerQueue.push({ elem: cur, handlers: matches }); - } - } - } - } - - // Add the remaining (directly-bound) handlers - if ( delegateCount < handlers.length ) { - handlerQueue.push({ elem: this, handlers: handlers.slice( delegateCount ) }); - } - - return handlerQueue; - }, - - fix: function( event ) { - if ( event[ jQuery.expando ] ) { - return event; - } - - // Create a writable copy of the event object and normalize some properties - var i, prop, copy, - type = event.type, - originalEvent = event, - fixHook = this.fixHooks[ type ]; - - if ( !fixHook ) { - this.fixHooks[ type ] = fixHook = - rmouseEvent.test( type ) ? this.mouseHooks : - rkeyEvent.test( type ) ? this.keyHooks : - {}; - } - copy = fixHook.props ? this.props.concat( fixHook.props ) : this.props; - - event = new jQuery.Event( originalEvent ); - - i = copy.length; - while ( i-- ) { - prop = copy[ i ]; - event[ prop ] = originalEvent[ prop ]; - } - - // Support: IE<9 - // Fix target property (#1925) - if ( !event.target ) { - event.target = originalEvent.srcElement || document; - } - - // Support: Chrome 23+, Safari? - // Target should not be a text node (#504, #13143) - if ( event.target.nodeType === 3 ) { - event.target = event.target.parentNode; - } - - // Support: IE<9 - // For mouse/key events, metaKey==false if it's undefined (#3368, #11328) - event.metaKey = !!event.metaKey; - - return fixHook.filter ? fixHook.filter( event, originalEvent ) : event; - }, - - // Includes some event props shared by KeyEvent and MouseEvent - props: "altKey bubbles cancelable ctrlKey currentTarget eventPhase metaKey relatedTarget shiftKey target timeStamp view which".split(" "), - - fixHooks: {}, - - keyHooks: { - props: "char charCode key keyCode".split(" "), - filter: function( event, original ) { - - // Add which for key events - if ( event.which == null ) { - event.which = original.charCode != null ? original.charCode : original.keyCode; - } - - return event; - } - }, - - mouseHooks: { - props: "button buttons clientX clientY fromElement offsetX offsetY pageX pageY screenX screenY toElement".split(" "), - filter: function( event, original ) { - var body, eventDoc, doc, - button = original.button, - fromElement = original.fromElement; - - // Calculate pageX/Y if missing and clientX/Y available - if ( event.pageX == null && original.clientX != null ) { - eventDoc = event.target.ownerDocument || document; - doc = eventDoc.documentElement; - body = eventDoc.body; - - event.pageX = original.clientX + ( doc && doc.scrollLeft || body && body.scrollLeft || 0 ) - ( doc && doc.clientLeft || body && body.clientLeft || 0 ); - event.pageY = original.clientY + ( doc && doc.scrollTop || body && body.scrollTop || 0 ) - ( doc && doc.clientTop || body && body.clientTop || 0 ); - } - - // Add relatedTarget, if necessary - if ( !event.relatedTarget && fromElement ) { - event.relatedTarget = fromElement === event.target ? original.toElement : fromElement; - } - - // Add which for click: 1 === left; 2 === middle; 3 === right - // Note: button is not normalized, so don't use it - if ( !event.which && button !== undefined ) { - event.which = ( button & 1 ? 1 : ( button & 2 ? 3 : ( button & 4 ? 2 : 0 ) ) ); - } - - return event; - } - }, - - special: { - load: { - // Prevent triggered image.load events from bubbling to window.load - noBubble: true - }, - click: { - // For checkbox, fire native event so checked state will be right - trigger: function() { - if ( jQuery.nodeName( this, "input" ) && this.type === "checkbox" && this.click ) { - this.click(); - return false; - } - } - }, - focus: { - // Fire native event if possible so blur/focus sequence is correct - trigger: function() { - if ( this !== document.activeElement && this.focus ) { - try { - this.focus(); - return false; - } catch ( e ) { - // Support: IE<9 - // If we error on focus to hidden element (#1486, #12518), - // let .trigger() run the handlers - } - } - }, - delegateType: "focusin" - }, - blur: { - trigger: function() { - if ( this === document.activeElement && this.blur ) { - this.blur(); - return false; - } - }, - delegateType: "focusout" - }, - - beforeunload: { - postDispatch: function( event ) { - - // Even when returnValue equals to undefined Firefox will still show alert - if ( event.result !== undefined ) { - event.originalEvent.returnValue = event.result; - } - } - } - }, - - simulate: function( type, elem, event, bubble ) { - // Piggyback on a donor event to simulate a different one. - // Fake originalEvent to avoid donor's stopPropagation, but if the - // simulated event prevents default then we do the same on the donor. - var e = jQuery.extend( - new jQuery.Event(), - event, - { type: type, - isSimulated: true, - originalEvent: {} - } - ); - if ( bubble ) { - jQuery.event.trigger( e, null, elem ); - } else { - jQuery.event.dispatch.call( elem, e ); - } - if ( e.isDefaultPrevented() ) { - event.preventDefault(); - } - } - }; - - jQuery.removeEvent = document.removeEventListener ? - function( elem, type, handle ) { - if ( elem.removeEventListener ) { - elem.removeEventListener( type, handle, false ); - } - } : - function( elem, type, handle ) { - var name = "on" + type; - - if ( elem.detachEvent ) { - - // #8545, #7054, preventing memory leaks for custom events in IE6-8 - // detachEvent needed property on element, by name of that event, to properly expose it to GC - if ( typeof elem[ name ] === core_strundefined ) { - elem[ name ] = null; - } - - elem.detachEvent( name, handle ); - } - }; - - jQuery.Event = function( src, props ) { - // Allow instantiation without the 'new' keyword - if ( !(this instanceof jQuery.Event) ) { - return new jQuery.Event( src, props ); - } - - // Event object - if ( src && src.type ) { - this.originalEvent = src; - this.type = src.type; - - // Events bubbling up the document may have been marked as prevented - // by a handler lower down the tree; reflect the correct value. - this.isDefaultPrevented = ( src.defaultPrevented || src.returnValue === false || - src.getPreventDefault && src.getPreventDefault() ) ? returnTrue : returnFalse; - - // Event type - } else { - this.type = src; - } - - // Put explicitly provided properties onto the event object - if ( props ) { - jQuery.extend( this, props ); - } - - // Create a timestamp if incoming event doesn't have one - this.timeStamp = src && src.timeStamp || jQuery.now(); - - // Mark it as fixed - this[ jQuery.expando ] = true; - }; - -// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding -// http://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding... - jQuery.Event.prototype = { - isDefaultPrevented: returnFalse, - isPropagationStopped: returnFalse, - isImmediatePropagationStopped: returnFalse, - - preventDefault: function() { - var e = this.originalEvent; - - this.isDefaultPrevented = returnTrue; - if ( !e ) { - return; - } - - // If preventDefault exists, run it on the original event - if ( e.preventDefault ) { - e.preventDefault(); - - // Support: IE - // Otherwise set the returnValue property of the original event to false - } else { - e.returnValue = false; - } - }, - stopPropagation: function() { - var e = this.originalEvent; - - this.isPropagationStopped = returnTrue; - if ( !e ) { - return; - } - // If stopPropagation exists, run it on the original event - if ( e.stopPropagation ) { - e.stopPropagation(); - } - - // Support: IE - // Set the cancelBubble property of the original event to true - e.cancelBubble = true; - }, - stopImmediatePropagation: function() { - this.isImmediatePropagationStopped = returnTrue; - this.stopPropagation(); - } - }; - -// Create mouseenter/leave events using mouseover/out and event-time checks - jQuery.each({ - mouseenter: "mouseover", - mouseleave: "mouseout" - }, function( orig, fix ) { - jQuery.event.special[ orig ] = { - delegateType: fix, - bindType: fix, - - handle: function( event ) { - var ret, - target = this, - related = event.relatedTarget, - handleObj = event.handleObj; - - // For mousenter/leave call the handler if related is outside the target. - // NB: No relatedTarget if the mouse left/entered the browser window - if ( !related || (related !== target && !jQuery.contains( target, related )) ) { - event.type = handleObj.origType; - ret = handleObj.handler.apply( this, arguments ); - event.type = fix; - } - return ret; - } - }; - }); - -// IE submit delegation - if ( !jQuery.support.submitBubbles ) { - - jQuery.event.special.submit = { - setup: function() { - // Only need this for delegated form submit events - if ( jQuery.nodeName( this, "form" ) ) { - return false; - } - - // Lazy-add a submit handler when a descendant form may potentially be submitted - jQuery.event.add( this, "click._submit keypress._submit", function( e ) { - // Node name check avoids a VML-related crash in IE (#9807) - var elem = e.target, - form = jQuery.nodeName( elem, "input" ) || jQuery.nodeName( elem, "button" ) ? elem.form : undefined; - if ( form && !jQuery._data( form, "submitBubbles" ) ) { - jQuery.event.add( form, "submit._submit", function( event ) { - event._submit_bubble = true; - }); - jQuery._data( form, "submitBubbles", true ); - } - }); - // return undefined since we don't need an event listener - }, - - postDispatch: function( event ) { - // If form was submitted by the user, bubble the event up the tree - if ( event._submit_bubble ) { - delete event._submit_bubble; - if ( this.parentNode && !event.isTrigger ) { - jQuery.event.simulate( "submit", this.parentNode, event, true ); - } - } - }, - - teardown: function() { - // Only need this for delegated form submit events - if ( jQuery.nodeName( this, "form" ) ) { - return false; - } - - // Remove delegated handlers; cleanData eventually reaps submit handlers attached above - jQuery.event.remove( this, "._submit" ); - } - }; - } - -// IE change delegation and checkbox/radio fix - if ( !jQuery.support.changeBubbles ) { - - jQuery.event.special.change = { - - setup: function() { - - if ( rformElems.test( this.nodeName ) ) { - // IE doesn't fire change on a check/radio until blur; trigger it on click - // after a propertychange. Eat the blur-change in special.change.handle. - // This still fires onchange a second time for check/radio after blur. - if ( this.type === "checkbox" || this.type === "radio" ) { - jQuery.event.add( this, "propertychange._change", function( event ) { - if ( event.originalEvent.propertyName === "checked" ) { - this._just_changed = true; - } - }); - jQuery.event.add( this, "click._change", function( event ) { - if ( this._just_changed && !event.isTrigger ) { - this._just_changed = false; - } - // Allow triggered, simulated change events (#11500) - jQuery.event.simulate( "change", this, event, true ); - }); - } - return false; - } - // Delegated event; lazy-add a change handler on descendant inputs - jQuery.event.add( this, "beforeactivate._change", function( e ) { - var elem = e.target; - - if ( rformElems.test( elem.nodeName ) && !jQuery._data( elem, "changeBubbles" ) ) { - jQuery.event.add( elem, "change._change", function( event ) { - if ( this.parentNode && !event.isSimulated && !event.isTrigger ) { - jQuery.event.simulate( "change", this.parentNode, event, true ); - } - }); - jQuery._data( elem, "changeBubbles", true ); - } - }); - }, - - handle: function( event ) { - var elem = event.target; - - // Swallow native change events from checkbox/radio, we already triggered them above - if ( this !== elem || event.isSimulated || event.isTrigger || (elem.type !== "radio" && elem.type !== "checkbox") ) { - return event.handleObj.handler.apply( this, arguments ); - } - }, - - teardown: function() { - jQuery.event.remove( this, "._change" ); - - return !rformElems.test( this.nodeName ); - } - }; - } - -// Create "bubbling" focus and blur events - if ( !jQuery.support.focusinBubbles ) { - jQuery.each({ focus: "focusin", blur: "focusout" }, function( orig, fix ) { - - // Attach a single capturing handler while someone wants focusin/focusout - var attaches = 0, - handler = function( event ) { - jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ), true ); - }; - - jQuery.event.special[ fix ] = { - setup: function() { - if ( attaches++ === 0 ) { - document.addEventListener( orig, handler, true ); - } - }, - teardown: function() { - if ( --attaches === 0 ) { - document.removeEventListener( orig, handler, true ); - } - } - }; - }); - } - - jQuery.fn.extend({ - - on: function( types, selector, data, fn, /*INTERNAL*/ one ) { - var type, origFn; - - // Types can be a map of types/handlers - if ( typeof types === "object" ) { - // ( types-Object, selector, data ) - if ( typeof selector !== "string" ) { - // ( types-Object, data ) - data = data || selector; - selector = undefined; - } - for ( type in types ) { - this.on( type, selector, data, types[ type ], one ); - } - return this; - } - - if ( data == null && fn == null ) { - // ( types, fn ) - fn = selector; - data = selector = undefined; - } else if ( fn == null ) { - if ( typeof selector === "string" ) { - // ( types, selector, fn ) - fn = data; - data = undefined; - } else { - // ( types, data, fn ) - fn = data; - data = selector; - selector = undefined; - } - } - if ( fn === false ) { - fn = returnFalse; - } else if ( !fn ) { - return this; - } - - if ( one === 1 ) { - origFn = fn; - fn = function( event ) { - // Can use an empty set, since event contains the info - jQuery().off( event ); - return origFn.apply( this, arguments ); - }; - // Use same guid so caller can remove using origFn - fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); - } - return this.each( function() { - jQuery.event.add( this, types, fn, data, selector ); - }); - }, - one: function( types, selector, data, fn ) { - return this.on( types, selector, data, fn, 1 ); - }, - off: function( types, selector, fn ) { - var handleObj, type; - if ( types && types.preventDefault && types.handleObj ) { - // ( event ) dispatched jQuery.Event - handleObj = types.handleObj; - jQuery( types.delegateTarget ).off( - handleObj.namespace ? handleObj.origType + "." + handleObj.namespace : handleObj.origType, - handleObj.selector, - handleObj.handler - ); - return this; - } - if ( typeof types === "object" ) { - // ( types-object [, selector] ) - for ( type in types ) { - this.off( type, selector, types[ type ] ); - } - return this; - } - if ( selector === false || typeof selector === "function" ) { - // ( types [, fn] ) - fn = selector; - selector = undefined; - } - if ( fn === false ) { - fn = returnFalse; - } - return this.each(function() { - jQuery.event.remove( this, types, fn, selector ); - }); - }, - - bind: function( types, data, fn ) { - return this.on( types, null, data, fn ); - }, - unbind: function( types, fn ) { - return this.off( types, null, fn ); - }, - - delegate: function( selector, types, data, fn ) { - return this.on( types, selector, data, fn ); - }, - undelegate: function( selector, types, fn ) { - // ( namespace ) or ( selector, types [, fn] ) - return arguments.length === 1 ? this.off( selector, "**" ) : this.off( types, selector || "**", fn ); - }, - - trigger: function( type, data ) { - return this.each(function() { - jQuery.event.trigger( type, data, this ); - }); - }, - triggerHandler: function( type, data ) { - var elem = this[0]; - if ( elem ) { - return jQuery.event.trigger( type, data, elem, true ); - } - } - }); - /*! - * Sizzle CSS Selector Engine - * Copyright 2012 jQuery Foundation and other contributors - * Released under the MIT license - * http://sizzlejs.com/ - */ - (function( window, undefined ) { - - var i, - cachedruns, - Expr, - getText, - isXML, - compile, - hasDuplicate, - outermostContext, - - // Local document vars - setDocument, - document, - docElem, - documentIsXML, - rbuggyQSA, - rbuggyMatches, - matches, - contains, - sortOrder, - - // Instance-specific data - expando = "sizzle" + -(new Date()), - preferredDoc = window.document, - support = {}, - dirruns = 0, - done = 0, - classCache = createCache(), - tokenCache = createCache(), - compilerCache = createCache(), - - // General-purpose constants - strundefined = typeof undefined, - MAX_NEGATIVE = 1 << 31, - - // Array methods - arr = [], - pop = arr.pop, - push = arr.push, - slice = arr.slice, - // Use a stripped-down indexOf if we can't use a native one - indexOf = arr.indexOf || function( elem ) { - var i = 0, - len = this.length; - for ( ; i < len; i++ ) { - if ( this[i] === elem ) { - return i; - } - } - return -1; - }, - - - // Regular expressions - - // Whitespace characters http://www.w3.org/TR/css3-selectors/#whitespace - whitespace = "[\x20\t\r\n\f]", - // http://www.w3.org/TR/css3-syntax/#characters - characterEncoding = "(?:\\.|[\w-]|[^\x00-\xa0])+", - - // Loosely modeled on CSS identifier characters - // An unquoted value should be a CSS identifier http://www.w3.org/TR/css3-selectors/#attribute-selectors - // Proper syntax: http://www.w3.org/TR/CSS21/syndata.html#value-def-identifier - identifier = characterEncoding.replace( "w", "w#" ), - - // Acceptable operators http://www.w3.org/TR/selectors/#attribute-selectors - operators = "([*^$|!~]?=)", - attributes = "\[" + whitespace + "*(" + characterEncoding + ")" + whitespace + - "*(?:" + operators + whitespace + "*(?:(['"])((?:\\.|[^\\])*?)\3|(" + identifier + ")|)|)" + whitespace + "*\]", - - // Prefer arguments quoted, - // then not containing pseudos/brackets, - // then attribute selectors/non-parenthetical expressions, - // then anything else - // These preferences are here to reduce the number of selectors - // needing tokenize in the PSEUDO preFilter - pseudos = ":(" + characterEncoding + ")(?:\(((['"])((?:\\.|[^\\])*?)\3|((?:\\.|[^\\()[\]]|" + attributes.replace( 3, 8 ) + ")*)|.*)\)|)", - - // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter - rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\])(?:\\.)*)" + whitespace + "+$", "g" ), - - rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), - rcombinators = new RegExp( "^" + whitespace + "*([\x20\t\r\n\f>+~])" + whitespace + "*" ), - rpseudo = new RegExp( pseudos ), - ridentifier = new RegExp( "^" + identifier + "$" ), - - matchExpr = { - "ID": new RegExp( "^#(" + characterEncoding + ")" ), - "CLASS": new RegExp( "^\.(" + characterEncoding + ")" ), - "NAME": new RegExp( "^\[name=['"]?(" + characterEncoding + ")['"]?\]" ), - "TAG": new RegExp( "^(" + characterEncoding.replace( "w", "w*" ) + ")" ), - "ATTR": new RegExp( "^" + attributes ), - "PSEUDO": new RegExp( "^" + pseudos ), - "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\(" + whitespace + - "*(even|odd|(([+-]|)(\d*)n|)" + whitespace + "*(?:([+-]|)" + whitespace + - "*(\d+)|))" + whitespace + "*\)|)", "i" ), - // For use in libraries implementing .is() - // We use this for POS matching in `select` - "needsContext": new RegExp( "^" + whitespace + "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\(" + - whitespace + "*((?:-\d)?\d*)" + whitespace + "*\)|)(?=[^-]|$)", "i" ) - }, - - rsibling = /[\x20\t\r\n\f]*[+~]/, - - rnative = /^[^{]+{\s*[native code/, - - // Easily-parseable/retrievable ID or TAG or CLASS selectors - rquickExpr = /^(?:#([\w-]+)|(\w+)|.([\w-]+))$/, - - rinputs = /^(?:input|select|textarea|button)$/i, - rheader = /^h\d$/i, - - rescape = /'|\/g, - rattributeQuotes = /=[\x20\t\r\n\f]*([^'"]]*)[\x20\t\r\n\f]*]/g, - - // CSS escapes http://www.w3.org/TR/CSS21/syndata.html#escaped-characters - runescape = /\([\da-fA-F]{1,6}[\x20\t\r\n\f]?|.)/g, - funescape = function( _, escaped ) { - var high = "0x" + escaped - 0x10000; - // NaN means non-codepoint - return high !== high ? - escaped : - // BMP codepoint - high < 0 ? - String.fromCharCode( high + 0x10000 ) : - // Supplemental Plane codepoint (surrogate pair) - String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); - }; - -// Use a stripped-down slice if we can't use a native one - try { - slice.call( preferredDoc.documentElement.childNodes, 0 )[0].nodeType; - } catch ( e ) { - slice = function( i ) { - var elem, - results = []; - while ( (elem = this[i++]) ) { - results.push( elem ); - } - return results; - }; - } - - /** - * For feature detection - * @param {Function} fn The function to test for native support - */ - function isNative( fn ) { - return rnative.test( fn + "" ); - } - - /** - * Create key-value caches of limited size - * @returns {Function(string, Object)} Returns the Object data after storing it on itself with - * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) - * deleting the oldest entry - */ - function createCache() { - var cache, - keys = []; - - return (cache = function( key, value ) { - // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) - if ( keys.push( key += " " ) > Expr.cacheLength ) { - // Only keep the most recent entries - delete cache[ keys.shift() ]; - } - return (cache[ key ] = value); - }); - } - - /** - * Mark a function for special use by Sizzle - * @param {Function} fn The function to mark - */ - function markFunction( fn ) { - fn[ expando ] = true; - return fn; - } - - /** - * Support testing using an element - * @param {Function} fn Passed the created div and expects a boolean result - */ - function assert( fn ) { - var div = document.createElement("div"); - - try { - return fn( div ); - } catch (e) { - return false; - } finally { - // release memory in IE - div = null; - } - } - - function Sizzle( selector, context, results, seed ) { - var match, elem, m, nodeType, - // QSA vars - i, groups, old, nid, newContext, newSelector; - - if ( ( context ? context.ownerDocument || context : preferredDoc ) !== document ) { - setDocument( context ); - } - - context = context || document; - results = results || []; - - if ( !selector || typeof selector !== "string" ) { - return results; - } - - if ( (nodeType = context.nodeType) !== 1 && nodeType !== 9 ) { - return []; - } - - if ( !documentIsXML && !seed ) { - - // Shortcuts - if ( (match = rquickExpr.exec( selector )) ) { - // Speed-up: Sizzle("#ID") - if ( (m = match[1]) ) { - if ( nodeType === 9 ) { - elem = context.getElementById( m ); - // Check parentNode to catch when Blackberry 4.6 returns - // nodes that are no longer in the document #6963 - if ( elem && elem.parentNode ) { - // Handle the case where IE, Opera, and Webkit return items - // by name instead of ID - if ( elem.id === m ) { - results.push( elem ); - return results; - } - } else { - return results; - } - } else { - // Context is not a document - if ( context.ownerDocument && (elem = context.ownerDocument.getElementById( m )) && - contains( context, elem ) && elem.id === m ) { - results.push( elem ); - return results; - } - } - - // Speed-up: Sizzle("TAG") - } else if ( match[2] ) { - push.apply( results, slice.call(context.getElementsByTagName( selector ), 0) ); - return results; - - // Speed-up: Sizzle(".CLASS") - } else if ( (m = match[3]) && support.getByClassName && context.getElementsByClassName ) { - push.apply( results, slice.call(context.getElementsByClassName( m ), 0) ); - return results; - } - } - - // QSA path - if ( support.qsa && !rbuggyQSA.test(selector) ) { - old = true; - nid = expando; - newContext = context; - newSelector = nodeType === 9 && selector; - - // qSA works strangely on Element-rooted queries - // We can work around this by specifying an extra ID on the root - // and working up from there (Thanks to Andrew Dupont for the technique) - // IE 8 doesn't work on object elements - if ( nodeType === 1 && context.nodeName.toLowerCase() !== "object" ) { - groups = tokenize( selector ); - - if ( (old = context.getAttribute("id")) ) { - nid = old.replace( rescape, "\$&" ); - } else { - context.setAttribute( "id", nid ); - } - nid = "[id='" + nid + "'] "; - - i = groups.length; - while ( i-- ) { - groups[i] = nid + toSelector( groups[i] ); - } - newContext = rsibling.test( selector ) && context.parentNode || context; - newSelector = groups.join(","); - } - - if ( newSelector ) { - try { - push.apply( results, slice.call( newContext.querySelectorAll( - newSelector - ), 0 ) ); - return results; - } catch(qsaError) { - } finally { - if ( !old ) { - context.removeAttribute("id"); - } - } - } - } - } - - // All others - return select( selector.replace( rtrim, "$1" ), context, results, seed ); - } - - /** - * Detect xml - * @param {Element|Object} elem An element or a document - */ - isXML = Sizzle.isXML = function( elem ) { - // documentElement is verified for cases where it doesn't yet exist - // (such as loading iframes in IE - #4833) - var documentElement = elem && (elem.ownerDocument || elem).documentElement; - return documentElement ? documentElement.nodeName !== "HTML" : false; - }; - - /** - * Sets document-related variables once based on the current document - * @param {Element|Object} [doc] An element or document object to use to set the document - * @returns {Object} Returns the current document - */ - setDocument = Sizzle.setDocument = function( node ) { - var doc = node ? node.ownerDocument || node : preferredDoc; - - // If no document and documentElement is available, return - if ( doc === document || doc.nodeType !== 9 || !doc.documentElement ) { - return document; - } - - // Set our document - document = doc; - docElem = doc.documentElement; - - // Support tests - documentIsXML = isXML( doc ); - - // Check if getElementsByTagName("*") returns only elements - support.tagNameNoComments = assert(function( div ) { - div.appendChild( doc.createComment("") ); - return !div.getElementsByTagName("*").length; - }); - - // Check if attributes should be retrieved by attribute nodes - support.attributes = assert(function( div ) { - div.innerHTML = "<select></select>"; - var type = typeof div.lastChild.getAttribute("multiple"); - // IE8 returns a string for some attributes even when not present - return type !== "boolean" && type !== "string"; - }); - - // Check if getElementsByClassName can be trusted - support.getByClassName = assert(function( div ) { - // Opera can't find a second classname (in 9.6) - div.innerHTML = "<div class='hidden e'></div><div class='hidden'></div>"; - if ( !div.getElementsByClassName || !div.getElementsByClassName("e").length ) { - return false; - } - - // Safari 3.2 caches class attributes and doesn't catch changes - div.lastChild.className = "e"; - return div.getElementsByClassName("e").length === 2; - }); - - // Check if getElementById returns elements by name - // Check if getElementsByName privileges form controls or returns elements by ID - support.getByName = assert(function( div ) { - // Inject content - div.id = expando + 0; - div.innerHTML = "<a name='" + expando + "'></a><div name='" + expando + "'></div>"; - docElem.insertBefore( div, docElem.firstChild ); - - // Test - var pass = doc.getElementsByName && - // buggy browsers will return fewer than the correct 2 - doc.getElementsByName( expando ).length === 2 + - // buggy browsers will return more than the correct 0 - doc.getElementsByName( expando + 0 ).length; - support.getIdNotName = !doc.getElementById( expando ); - - // Cleanup - docElem.removeChild( div ); - - return pass; - }); - - // IE6/7 return modified attributes - Expr.attrHandle = assert(function( div ) { - div.innerHTML = "<a href='#'></a>"; - return div.firstChild && typeof div.firstChild.getAttribute !== strundefined && - div.firstChild.getAttribute("href") === "#"; - }) ? - {} : - { - "href": function( elem ) { - return elem.getAttribute( "href", 2 ); - }, - "type": function( elem ) { - return elem.getAttribute("type"); - } - }; - - // ID find and filter - if ( support.getIdNotName ) { - Expr.find["ID"] = function( id, context ) { - if ( typeof context.getElementById !== strundefined && !documentIsXML ) { - var m = context.getElementById( id ); - // Check parentNode to catch when Blackberry 4.6 returns - // nodes that are no longer in the document #6963 - return m && m.parentNode ? [m] : []; - } - }; - Expr.filter["ID"] = function( id ) { - var attrId = id.replace( runescape, funescape ); - return function( elem ) { - return elem.getAttribute("id") === attrId; - }; - }; - } else { - Expr.find["ID"] = function( id, context ) { - if ( typeof context.getElementById !== strundefined && !documentIsXML ) { - var m = context.getElementById( id ); - - return m ? - m.id === id || typeof m.getAttributeNode !== strundefined && m.getAttributeNode("id").value === id ? - [m] : - undefined : - []; - } - }; - Expr.filter["ID"] = function( id ) { - var attrId = id.replace( runescape, funescape ); - return function( elem ) { - var node = typeof elem.getAttributeNode !== strundefined && elem.getAttributeNode("id"); - return node && node.value === attrId; - }; - }; - } - - // Tag - Expr.find["TAG"] = support.tagNameNoComments ? - function( tag, context ) { - if ( typeof context.getElementsByTagName !== strundefined ) { - return context.getElementsByTagName( tag ); - } - } : - function( tag, context ) { - var elem, - tmp = [], - i = 0, - results = context.getElementsByTagName( tag ); - - // Filter out possible comments - if ( tag === "*" ) { - while ( (elem = results[i++]) ) { - if ( elem.nodeType === 1 ) { - tmp.push( elem ); - } - } - - return tmp; - } - return results; - }; - - // Name - Expr.find["NAME"] = support.getByName && function( tag, context ) { - if ( typeof context.getElementsByName !== strundefined ) { - return context.getElementsByName( name ); - } - }; - - // Class - Expr.find["CLASS"] = support.getByClassName && function( className, context ) { - if ( typeof context.getElementsByClassName !== strundefined && !documentIsXML ) { - return context.getElementsByClassName( className ); - } - }; - - // QSA and matchesSelector support - - // matchesSelector(:active) reports false when true (IE9/Opera 11.5) - rbuggyMatches = []; - - // qSa(:focus) reports false when true (Chrome 21), - // no need to also add to buggyMatches since matches checks buggyQSA - // A support test would require too much code (would include document ready) - rbuggyQSA = [ ":focus" ]; - - if ( (support.qsa = isNative(doc.querySelectorAll)) ) { - // Build QSA regex - // Regex strategy adopted from Diego Perini - assert(function( div ) { - // Select is set to empty string on purpose - // This is to test IE's treatment of not explictly - // setting a boolean content attribute, - // since its presence should be enough - // http://bugs.jquery.com/ticket/12359 - div.innerHTML = "<select><option selected=''></option></select>"; - - // IE8 - Some boolean attributes are not treated correctly - if ( !div.querySelectorAll("[selected]").length ) { - rbuggyQSA.push( "\[" + whitespace + "*(?:checked|disabled|ismap|multiple|readonly|selected|value)" ); - } - - // Webkit/Opera - :checked should return selected option elements - // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked - // IE8 throws error here and will not see later tests - if ( !div.querySelectorAll(":checked").length ) { - rbuggyQSA.push(":checked"); - } - }); - - assert(function( div ) { - - // Opera 10-12/IE8 - ^= $= *= and empty values - // Should not select anything - div.innerHTML = "<input type='hidden' i=''/>"; - if ( div.querySelectorAll("[i^='']").length ) { - rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:""|'')" ); - } - - // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) - // IE8 throws error here and will not see later tests - if ( !div.querySelectorAll(":enabled").length ) { - rbuggyQSA.push( ":enabled", ":disabled" ); - } - - // Opera 10-11 does not throw on post-comma invalid pseudos - div.querySelectorAll("*,:x"); - rbuggyQSA.push(",.*:"); - }); - } - - if ( (support.matchesSelector = isNative( (matches = docElem.matchesSelector || - docElem.mozMatchesSelector || - docElem.webkitMatchesSelector || - docElem.oMatchesSelector || - docElem.msMatchesSelector) )) ) { - - assert(function( div ) { - // Check to see if it's possible to do matchesSelector - // on a disconnected node (IE 9) - support.disconnectedMatch = matches.call( div, "div" ); - - // This should fail with an exception - // Gecko does not error, returns false instead - matches.call( div, "[s!='']:x" ); - rbuggyMatches.push( "!=", pseudos ); - }); - } - - rbuggyQSA = new RegExp( rbuggyQSA.join("|") ); - rbuggyMatches = new RegExp( rbuggyMatches.join("|") ); - - // Element contains another - // Purposefully does not implement inclusive descendent - // As in, an element does not contain itself - contains = isNative(docElem.contains) || docElem.compareDocumentPosition ? - function( a, b ) { - var adown = a.nodeType === 9 ? a.documentElement : a, - bup = b && b.parentNode; - return a === bup || !!( bup && bup.nodeType === 1 && ( - adown.contains ? - adown.contains( bup ) : - a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 - )); - } : - function( a, b ) { - if ( b ) { - while ( (b = b.parentNode) ) { - if ( b === a ) { - return true; - } - } - } - return false; - }; - - // Document order sorting - sortOrder = docElem.compareDocumentPosition ? - function( a, b ) { - var compare; - - if ( a === b ) { - hasDuplicate = true; - return 0; - } - - if ( (compare = b.compareDocumentPosition && a.compareDocumentPosition && a.compareDocumentPosition( b )) ) { - if ( compare & 1 || a.parentNode && a.parentNode.nodeType === 11 ) { - if ( a === doc || contains( preferredDoc, a ) ) { - return -1; - } - if ( b === doc || contains( preferredDoc, b ) ) { - return 1; - } - return 0; - } - return compare & 4 ? -1 : 1; - } - - return a.compareDocumentPosition ? -1 : 1; - } : - function( a, b ) { - var cur, - i = 0, - aup = a.parentNode, - bup = b.parentNode, - ap = [ a ], - bp = [ b ]; - - // Exit early if the nodes are identical - if ( a === b ) { - hasDuplicate = true; - return 0; - - // Parentless nodes are either documents or disconnected - } else if ( !aup || !bup ) { - return a === doc ? -1 : - b === doc ? 1 : - aup ? -1 : - bup ? 1 : - 0; - - // If the nodes are siblings, we can do a quick check - } else if ( aup === bup ) { - return siblingCheck( a, b ); - } - - // Otherwise we need full lists of their ancestors for comparison - cur = a; - while ( (cur = cur.parentNode) ) { - ap.unshift( cur ); - } - cur = b; - while ( (cur = cur.parentNode) ) { - bp.unshift( cur ); - } - - // Walk down the tree looking for a discrepancy - while ( ap[i] === bp[i] ) { - i++; - } - - return i ? - // Do a sibling check if the nodes have a common ancestor - siblingCheck( ap[i], bp[i] ) : - - // Otherwise nodes in our document sort first - ap[i] === preferredDoc ? -1 : - bp[i] === preferredDoc ? 1 : - 0; - }; - - // Always assume the presence of duplicates if sort doesn't - // pass them to our comparison function (as in Google Chrome). - hasDuplicate = false; - [0, 0].sort( sortOrder ); - support.detectDuplicates = hasDuplicate; - - return document; - }; - - Sizzle.matches = function( expr, elements ) { - return Sizzle( expr, null, null, elements ); - }; - - Sizzle.matchesSelector = function( elem, expr ) { - // Set document vars if needed - if ( ( elem.ownerDocument || elem ) !== document ) { - setDocument( elem ); - } - - // Make sure that attribute selectors are quoted - expr = expr.replace( rattributeQuotes, "='$1']" ); - - // rbuggyQSA always contains :focus, so no need for an existence check - if ( support.matchesSelector && !documentIsXML && (!rbuggyMatches || !rbuggyMatches.test(expr)) && !rbuggyQSA.test(expr) ) { - try { - var ret = matches.call( elem, expr ); - - // IE 9's matchesSelector returns false on disconnected nodes - if ( ret || support.disconnectedMatch || - // As well, disconnected nodes are said to be in a document - // fragment in IE 9 - elem.document && elem.document.nodeType !== 11 ) { - return ret; - } - } catch(e) {} - } - - return Sizzle( expr, document, null, [elem] ).length > 0; - }; - - Sizzle.contains = function( context, elem ) { - // Set document vars if needed - if ( ( context.ownerDocument || context ) !== document ) { - setDocument( context ); - } - return contains( context, elem ); - }; - - Sizzle.attr = function( elem, name ) { - var val; - - // Set document vars if needed - if ( ( elem.ownerDocument || elem ) !== document ) { - setDocument( elem ); - } - - if ( !documentIsXML ) { - name = name.toLowerCase(); - } - if ( (val = Expr.attrHandle[ name ]) ) { - return val( elem ); - } - if ( documentIsXML || support.attributes ) { - return elem.getAttribute( name ); - } - return ( (val = elem.getAttributeNode( name )) || elem.getAttribute( name ) ) && elem[ name ] === true ? - name : - val && val.specified ? val.value : null; - }; - - Sizzle.error = function( msg ) { - throw new Error( "Syntax error, unrecognized expression: " + msg ); - }; - -// Document sorting and removing duplicates - Sizzle.uniqueSort = function( results ) { - var elem, - duplicates = [], - i = 1, - j = 0; - - // Unless we *know* we can detect duplicates, assume their presence - hasDuplicate = !support.detectDuplicates; - results.sort( sortOrder ); - - if ( hasDuplicate ) { - for ( ; (elem = results[i]); i++ ) { - if ( elem === results[ i - 1 ] ) { - j = duplicates.push( i ); - } - } - while ( j-- ) { - results.splice( duplicates[ j ], 1 ); - } - } - - return results; - }; - - function siblingCheck( a, b ) { - var cur = b && a, - diff = cur && ( ~b.sourceIndex || MAX_NEGATIVE ) - ( ~a.sourceIndex || MAX_NEGATIVE ); - - // Use IE sourceIndex if available on both nodes - if ( diff ) { - return diff; - } - - // Check if b follows a - if ( cur ) { - while ( (cur = cur.nextSibling) ) { - if ( cur === b ) { - return -1; - } - } - } - - return a ? 1 : -1; - } - -// Returns a function to use in pseudos for input types - function createInputPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && elem.type === type; - }; - } - -// Returns a function to use in pseudos for buttons - function createButtonPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return (name === "input" || name === "button") && elem.type === type; - }; - } - -// Returns a function to use in pseudos for positionals - function createPositionalPseudo( fn ) { - return markFunction(function( argument ) { - argument = +argument; - return markFunction(function( seed, matches ) { - var j, - matchIndexes = fn( [], seed.length, argument ), - i = matchIndexes.length; - - // Match elements found at the specified indexes - while ( i-- ) { - if ( seed[ (j = matchIndexes[i]) ] ) { - seed[j] = !(matches[j] = seed[j]); - } - } - }); - }); - } - - /** - * Utility function for retrieving the text value of an array of DOM nodes - * @param {Array|Element} elem - */ - getText = Sizzle.getText = function( elem ) { - var node, - ret = "", - i = 0, - nodeType = elem.nodeType; - - if ( !nodeType ) { - // If no nodeType, this is expected to be an array - for ( ; (node = elem[i]); i++ ) { - // Do not traverse comment nodes - ret += getText( node ); - } - } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { - // Use textContent for elements - // innerText usage removed for consistency of new lines (see #11153) - if ( typeof elem.textContent === "string" ) { - return elem.textContent; - } else { - // Traverse its children - for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { - ret += getText( elem ); - } - } - } else if ( nodeType === 3 || nodeType === 4 ) { - return elem.nodeValue; - } - // Do not include comment or processing instruction nodes - - return ret; - }; - - Expr = Sizzle.selectors = { - - // Can be adjusted by the user - cacheLength: 50, - - createPseudo: markFunction, - - match: matchExpr, - - find: {}, - - relative: { - ">": { dir: "parentNode", first: true }, - " ": { dir: "parentNode" }, - "+": { dir: "previousSibling", first: true }, - "~": { dir: "previousSibling" } - }, - - preFilter: { - "ATTR": function( match ) { - match[1] = match[1].replace( runescape, funescape ); - - // Move the given value to match[3] whether quoted or unquoted - match[3] = ( match[4] || match[5] || "" ).replace( runescape, funescape ); - - if ( match[2] === "~=" ) { - match[3] = " " + match[3] + " "; - } - - return match.slice( 0, 4 ); - }, - - "CHILD": function( match ) { - /* matches from matchExpr["CHILD"] - 1 type (only|nth|...) - 2 what (child|of-type) - 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...) - 4 xn-component of xn+y argument ([+-]?\d*n|) - 5 sign of xn-component - 6 x of xn-component - 7 sign of y-component - 8 y of y-component - */ - match[1] = match[1].toLowerCase(); - - if ( match[1].slice( 0, 3 ) === "nth" ) { - // nth-* requires argument - if ( !match[3] ) { - Sizzle.error( match[0] ); - } - - // numeric x and y parameters for Expr.filter.CHILD - // remember that false/true cast respectively to 0/1 - match[4] = +( match[4] ? match[5] + (match[6] || 1) : 2 * ( match[3] === "even" || match[3] === "odd" ) ); - match[5] = +( ( match[7] + match[8] ) || match[3] === "odd" ); - - // other types prohibit arguments - } else if ( match[3] ) { - Sizzle.error( match[0] ); - } - - return match; - }, - - "PSEUDO": function( match ) { - var excess, - unquoted = !match[5] && match[2]; - - if ( matchExpr["CHILD"].test( match[0] ) ) { - return null; - } - - // Accept quoted arguments as-is - if ( match[4] ) { - match[2] = match[4]; - - // Strip excess characters from unquoted arguments - } else if ( unquoted && rpseudo.test( unquoted ) && - // Get excess from tokenize (recursively) - (excess = tokenize( unquoted, true )) && - // advance to the next closing parenthesis - (excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length) ) { - - // excess is a negative index - match[0] = match[0].slice( 0, excess ); - match[2] = unquoted.slice( 0, excess ); - } - - // Return only captures needed by the pseudo filter method (type and argument) - return match.slice( 0, 3 ); - } - }, - - filter: { - - "TAG": function( nodeName ) { - if ( nodeName === "*" ) { - return function() { return true; }; - } - - nodeName = nodeName.replace( runescape, funescape ).toLowerCase(); - return function( elem ) { - return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; - }; - }, - - "CLASS": function( className ) { - var pattern = classCache[ className + " " ]; - - return pattern || - (pattern = new RegExp( "(^|" + whitespace + ")" + className + "(" + whitespace + "|$)" )) && - classCache( className, function( elem ) { - return pattern.test( elem.className || (typeof elem.getAttribute !== strundefined && elem.getAttribute("class")) || "" ); - }); - }, - - "ATTR": function( name, operator, check ) { - return function( elem ) { - var result = Sizzle.attr( elem, name ); - - if ( result == null ) { - return operator === "!="; - } - if ( !operator ) { - return true; - } - - result += ""; - - return operator === "=" ? result === check : - operator === "!=" ? result !== check : - operator === "^=" ? check && result.indexOf( check ) === 0 : - operator === "*=" ? check && result.indexOf( check ) > -1 : - operator === "$=" ? check && result.slice( -check.length ) === check : - operator === "~=" ? ( " " + result + " " ).indexOf( check ) > -1 : - operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : - false; - }; - }, - - "CHILD": function( type, what, argument, first, last ) { - var simple = type.slice( 0, 3 ) !== "nth", - forward = type.slice( -4 ) !== "last", - ofType = what === "of-type"; - - return first === 1 && last === 0 ? - - // Shortcut for :nth-*(n) - function( elem ) { - return !!elem.parentNode; - } : - - function( elem, context, xml ) { - var cache, outerCache, node, diff, nodeIndex, start, - dir = simple !== forward ? "nextSibling" : "previousSibling", - parent = elem.parentNode, - name = ofType && elem.nodeName.toLowerCase(), - useCache = !xml && !ofType; - - if ( parent ) { - - // :(first|last|only)-(child|of-type) - if ( simple ) { - while ( dir ) { - node = elem; - while ( (node = node[ dir ]) ) { - if ( ofType ? node.nodeName.toLowerCase() === name : node.nodeType === 1 ) { - return false; - } - } - // Reverse direction for :only-* (if we haven't yet done so) - start = dir = type === "only" && !start && "nextSibling"; - } - return true; - } - - start = [ forward ? parent.firstChild : parent.lastChild ]; - - // non-xml :nth-child(...) stores cache data on `parent` - if ( forward && useCache ) { - // Seek `elem` from a previously-cached index - outerCache = parent[ expando ] || (parent[ expando ] = {}); - cache = outerCache[ type ] || []; - nodeIndex = cache[0] === dirruns && cache[1]; - diff = cache[0] === dirruns && cache[2]; - node = nodeIndex && parent.childNodes[ nodeIndex ]; - - while ( (node = ++nodeIndex && node && node[ dir ] || - - // Fallback to seeking `elem` from the start - (diff = nodeIndex = 0) || start.pop()) ) { - - // When found, cache indexes on `parent` and break - if ( node.nodeType === 1 && ++diff && node === elem ) { - outerCache[ type ] = [ dirruns, nodeIndex, diff ]; - break; - } - } - - // Use previously-cached element index if available - } else if ( useCache && (cache = (elem[ expando ] || (elem[ expando ] = {}))[ type ]) && cache[0] === dirruns ) { - diff = cache[1]; - - // xml :nth-child(...) or :nth-last-child(...) or :nth(-last)?-of-type(...) - } else { - // Use the same loop as above to seek `elem` from the start - while ( (node = ++nodeIndex && node && node[ dir ] || - (diff = nodeIndex = 0) || start.pop()) ) { - - if ( ( ofType ? node.nodeName.toLowerCase() === name : node.nodeType === 1 ) && ++diff ) { - // Cache the index of each encountered element - if ( useCache ) { - (node[ expando ] || (node[ expando ] = {}))[ type ] = [ dirruns, diff ]; - } - - if ( node === elem ) { - break; - } - } - } - } - - // Incorporate the offset, then check against cycle size - diff -= last; - return diff === first || ( diff % first === 0 && diff / first >= 0 ); - } - }; - }, - - "PSEUDO": function( pseudo, argument ) { - // pseudo-class names are case-insensitive - // http://www.w3.org/TR/selectors/#pseudo-classes - // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters - // Remember that setFilters inherits from pseudos - var args, - fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || - Sizzle.error( "unsupported pseudo: " + pseudo ); - - // The user may use createPseudo to indicate that - // arguments are needed to create the filter function - // just as Sizzle does - if ( fn[ expando ] ) { - return fn( argument ); - } - - // But maintain support for old signatures - if ( fn.length > 1 ) { - args = [ pseudo, pseudo, "", argument ]; - return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? - markFunction(function( seed, matches ) { - var idx, - matched = fn( seed, argument ), - i = matched.length; - while ( i-- ) { - idx = indexOf.call( seed, matched[i] ); - seed[ idx ] = !( matches[ idx ] = matched[i] ); - } - }) : - function( elem ) { - return fn( elem, 0, args ); - }; - } - - return fn; - } - }, - - pseudos: { - // Potentially complex pseudos - "not": markFunction(function( selector ) { - // Trim the selector passed to compile - // to avoid treating leading and trailing - // spaces as combinators - var input = [], - results = [], - matcher = compile( selector.replace( rtrim, "$1" ) ); - - return matcher[ expando ] ? - markFunction(function( seed, matches, context, xml ) { - var elem, - unmatched = matcher( seed, null, xml, [] ), - i = seed.length; - - // Match elements unmatched by `matcher` - while ( i-- ) { - if ( (elem = unmatched[i]) ) { - seed[i] = !(matches[i] = elem); - } - } - }) : - function( elem, context, xml ) { - input[0] = elem; - matcher( input, null, xml, results ); - return !results.pop(); - }; - }), - - "has": markFunction(function( selector ) { - return function( elem ) { - return Sizzle( selector, elem ).length > 0; - }; - }), - - "contains": markFunction(function( text ) { - return function( elem ) { - return ( elem.textContent || elem.innerText || getText( elem ) ).indexOf( text ) > -1; - }; - }), - - // "Whether an element is represented by a :lang() selector - // is based solely on the element's language value - // being equal to the identifier C, - // or beginning with the identifier C immediately followed by "-". - // The matching of C against the element's language value is performed case-insensitively. - // The identifier C does not have to be a valid language name." - // http://www.w3.org/TR/selectors/#lang-pseudo - "lang": markFunction( function( lang ) { - // lang value must be a valid identifider - if ( !ridentifier.test(lang || "") ) { - Sizzle.error( "unsupported lang: " + lang ); - } - lang = lang.replace( runescape, funescape ).toLowerCase(); - return function( elem ) { - var elemLang; - do { - if ( (elemLang = documentIsXML ? - elem.getAttribute("xml:lang") || elem.getAttribute("lang") : - elem.lang) ) { - - elemLang = elemLang.toLowerCase(); - return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; - } - } while ( (elem = elem.parentNode) && elem.nodeType === 1 ); - return false; - }; - }), - - // Miscellaneous - "target": function( elem ) { - var hash = window.location && window.location.hash; - return hash && hash.slice( 1 ) === elem.id; - }, - - "root": function( elem ) { - return elem === docElem; - }, - - "focus": function( elem ) { - return elem === document.activeElement && (!document.hasFocus || document.hasFocus()) && !!(elem.type || elem.href || ~elem.tabIndex); - }, - - // Boolean properties - "enabled": function( elem ) { - return elem.disabled === false; - }, - - "disabled": function( elem ) { - return elem.disabled === true; - }, - - "checked": function( elem ) { - // In CSS3, :checked should return both checked and selected elements - // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked - var nodeName = elem.nodeName.toLowerCase(); - return (nodeName === "input" && !!elem.checked) || (nodeName === "option" && !!elem.selected); - }, - - "selected": function( elem ) { - // Accessing this property makes selected-by-default - // options in Safari work properly - if ( elem.parentNode ) { - elem.parentNode.selectedIndex; - } - - return elem.selected === true; - }, - - // Contents - "empty": function( elem ) { - // http://www.w3.org/TR/selectors/#empty-pseudo - // :empty is only affected by element nodes and content nodes(including text(3), cdata(4)), - // not comment, processing instructions, or others - // Thanks to Diego Perini for the nodeName shortcut - // Greater than "@" means alpha characters (specifically not starting with "#" or "?") - for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { - if ( elem.nodeName > "@" || elem.nodeType === 3 || elem.nodeType === 4 ) { - return false; - } - } - return true; - }, - - "parent": function( elem ) { - return !Expr.pseudos["empty"]( elem ); - }, - - // Element/input types - "header": function( elem ) { - return rheader.test( elem.nodeName ); - }, - - "input": function( elem ) { - return rinputs.test( elem.nodeName ); - }, - - "button": function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && elem.type === "button" || name === "button"; - }, - - "text": function( elem ) { - var attr; - // IE6 and 7 will map elem.type to 'text' for new HTML5 types (search, etc) - // use getAttribute instead to test this case - return elem.nodeName.toLowerCase() === "input" && - elem.type === "text" && - ( (attr = elem.getAttribute("type")) == null || attr.toLowerCase() === elem.type ); - }, - - // Position-in-collection - "first": createPositionalPseudo(function() { - return [ 0 ]; - }), - - "last": createPositionalPseudo(function( matchIndexes, length ) { - return [ length - 1 ]; - }), - - "eq": createPositionalPseudo(function( matchIndexes, length, argument ) { - return [ argument < 0 ? argument + length : argument ]; - }), - - "even": createPositionalPseudo(function( matchIndexes, length ) { - var i = 0; - for ( ; i < length; i += 2 ) { - matchIndexes.push( i ); - } - return matchIndexes; - }), - - "odd": createPositionalPseudo(function( matchIndexes, length ) { - var i = 1; - for ( ; i < length; i += 2 ) { - matchIndexes.push( i ); - } - return matchIndexes; - }), - - "lt": createPositionalPseudo(function( matchIndexes, length, argument ) { - var i = argument < 0 ? argument + length : argument; - for ( ; --i >= 0; ) { - matchIndexes.push( i ); - } - return matchIndexes; - }), - - "gt": createPositionalPseudo(function( matchIndexes, length, argument ) { - var i = argument < 0 ? argument + length : argument; - for ( ; ++i < length; ) { - matchIndexes.push( i ); - } - return matchIndexes; - }) - } - }; - -// Add button/input type pseudos - for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { - Expr.pseudos[ i ] = createInputPseudo( i ); - } - for ( i in { submit: true, reset: true } ) { - Expr.pseudos[ i ] = createButtonPseudo( i ); - } - - function tokenize( selector, parseOnly ) { - var matched, match, tokens, type, - soFar, groups, preFilters, - cached = tokenCache[ selector + " " ]; - - if ( cached ) { - return parseOnly ? 0 : cached.slice( 0 ); - } - - soFar = selector; - groups = []; - preFilters = Expr.preFilter; - - while ( soFar ) { - - // Comma and first run - if ( !matched || (match = rcomma.exec( soFar )) ) { - if ( match ) { - // Don't consume trailing commas as valid - soFar = soFar.slice( match[0].length ) || soFar; - } - groups.push( tokens = [] ); - } - - matched = false; - - // Combinators - if ( (match = rcombinators.exec( soFar )) ) { - matched = match.shift(); - tokens.push( { - value: matched, - // Cast descendant combinators to space - type: match[0].replace( rtrim, " " ) - } ); - soFar = soFar.slice( matched.length ); - } - - // Filters - for ( type in Expr.filter ) { - if ( (match = matchExpr[ type ].exec( soFar )) && (!preFilters[ type ] || - (match = preFilters[ type ]( match ))) ) { - matched = match.shift(); - tokens.push( { - value: matched, - type: type, - matches: match - } ); - soFar = soFar.slice( matched.length ); - } - } - - if ( !matched ) { - break; - } - } - - // Return the length of the invalid excess - // if we're just parsing - // Otherwise, throw an error or return tokens - return parseOnly ? - soFar.length : - soFar ? - Sizzle.error( selector ) : - // Cache the tokens - tokenCache( selector, groups ).slice( 0 ); - } - - function toSelector( tokens ) { - var i = 0, - len = tokens.length, - selector = ""; - for ( ; i < len; i++ ) { - selector += tokens[i].value; - } - return selector; - } - - function addCombinator( matcher, combinator, base ) { - var dir = combinator.dir, - checkNonElements = base && dir === "parentNode", - doneName = done++; - - return combinator.first ? - // Check against closest ancestor/preceding element - function( elem, context, xml ) { - while ( (elem = elem[ dir ]) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - return matcher( elem, context, xml ); - } - } - } : - - // Check against all ancestor/preceding elements - function( elem, context, xml ) { - var data, cache, outerCache, - dirkey = dirruns + " " + doneName; - - // We can't set arbitrary data on XML nodes, so they don't benefit from dir caching - if ( xml ) { - while ( (elem = elem[ dir ]) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - if ( matcher( elem, context, xml ) ) { - return true; - } - } - } - } else { - while ( (elem = elem[ dir ]) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - outerCache = elem[ expando ] || (elem[ expando ] = {}); - if ( (cache = outerCache[ dir ]) && cache[0] === dirkey ) { - if ( (data = cache[1]) === true || data === cachedruns ) { - return data === true; - } - } else { - cache = outerCache[ dir ] = [ dirkey ]; - cache[1] = matcher( elem, context, xml ) || cachedruns; - if ( cache[1] === true ) { - return true; - } - } - } - } - } - }; - } - - function elementMatcher( matchers ) { - return matchers.length > 1 ? - function( elem, context, xml ) { - var i = matchers.length; - while ( i-- ) { - if ( !matchers[i]( elem, context, xml ) ) { - return false; - } - } - return true; - } : - matchers[0]; - } - - function condense( unmatched, map, filter, context, xml ) { - var elem, - newUnmatched = [], - i = 0, - len = unmatched.length, - mapped = map != null; - - for ( ; i < len; i++ ) { - if ( (elem = unmatched[i]) ) { - if ( !filter || filter( elem, context, xml ) ) { - newUnmatched.push( elem ); - if ( mapped ) { - map.push( i ); - } - } - } - } - - return newUnmatched; - } - - function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { - if ( postFilter && !postFilter[ expando ] ) { - postFilter = setMatcher( postFilter ); - } - if ( postFinder && !postFinder[ expando ] ) { - postFinder = setMatcher( postFinder, postSelector ); - } - return markFunction(function( seed, results, context, xml ) { - var temp, i, elem, - preMap = [], - postMap = [], - preexisting = results.length, - - // Get initial elements from seed or context - elems = seed || multipleContexts( selector || "*", context.nodeType ? [ context ] : context, [] ), - - // Prefilter to get matcher input, preserving a map for seed-results synchronization - matcherIn = preFilter && ( seed || !selector ) ? - condense( elems, preMap, preFilter, context, xml ) : - elems, - - matcherOut = matcher ? - // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, - postFinder || ( seed ? preFilter : preexisting || postFilter ) ? - - // ...intermediate processing is necessary - [] : - - // ...otherwise use results directly - results : - matcherIn; - - // Find primary matches - if ( matcher ) { - matcher( matcherIn, matcherOut, context, xml ); - } - - // Apply postFilter - if ( postFilter ) { - temp = condense( matcherOut, postMap ); - postFilter( temp, [], context, xml ); - - // Un-match failing elements by moving them back to matcherIn - i = temp.length; - while ( i-- ) { - if ( (elem = temp[i]) ) { - matcherOut[ postMap[i] ] = !(matcherIn[ postMap[i] ] = elem); - } - } - } - - if ( seed ) { - if ( postFinder || preFilter ) { - if ( postFinder ) { - // Get the final matcherOut by condensing this intermediate into postFinder contexts - temp = []; - i = matcherOut.length; - while ( i-- ) { - if ( (elem = matcherOut[i]) ) { - // Restore matcherIn since elem is not yet a final match - temp.push( (matcherIn[i] = elem) ); - } - } - postFinder( null, (matcherOut = []), temp, xml ); - } - - // Move matched elements from seed to results to keep them synchronized - i = matcherOut.length; - while ( i-- ) { - if ( (elem = matcherOut[i]) && - (temp = postFinder ? indexOf.call( seed, elem ) : preMap[i]) > -1 ) { - - seed[temp] = !(results[temp] = elem); - } - } - } - - // Add elements to results, through postFinder if defined - } else { - matcherOut = condense( - matcherOut === results ? - matcherOut.splice( preexisting, matcherOut.length ) : - matcherOut - ); - if ( postFinder ) { - postFinder( null, results, matcherOut, xml ); - } else { - push.apply( results, matcherOut ); - } - } - }); - } - - function matcherFromTokens( tokens ) { - var checkContext, matcher, j, - len = tokens.length, - leadingRelative = Expr.relative[ tokens[0].type ], - implicitRelative = leadingRelative || Expr.relative[" "], - i = leadingRelative ? 1 : 0, - - // The foundational matcher ensures that elements are reachable from top-level context(s) - matchContext = addCombinator( function( elem ) { - return elem === checkContext; - }, implicitRelative, true ), - matchAnyContext = addCombinator( function( elem ) { - return indexOf.call( checkContext, elem ) > -1; - }, implicitRelative, true ), - matchers = [ function( elem, context, xml ) { - return ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( - (checkContext = context).nodeType ? - matchContext( elem, context, xml ) : - matchAnyContext( elem, context, xml ) ); - } ]; - - for ( ; i < len; i++ ) { - if ( (matcher = Expr.relative[ tokens[i].type ]) ) { - matchers = [ addCombinator(elementMatcher( matchers ), matcher) ]; - } else { - matcher = Expr.filter[ tokens[i].type ].apply( null, tokens[i].matches ); - - // Return special upon seeing a positional matcher - if ( matcher[ expando ] ) { - // Find the next relative operator (if any) for proper handling - j = ++i; - for ( ; j < len; j++ ) { - if ( Expr.relative[ tokens[j].type ] ) { - break; - } - } - return setMatcher( - i > 1 && elementMatcher( matchers ), - i > 1 && toSelector( tokens.slice( 0, i - 1 ) ).replace( rtrim, "$1" ), - matcher, - i < j && matcherFromTokens( tokens.slice( i, j ) ), - j < len && matcherFromTokens( (tokens = tokens.slice( j )) ), - j < len && toSelector( tokens ) - ); - } - matchers.push( matcher ); - } - } - - return elementMatcher( matchers ); - } - - function matcherFromGroupMatchers( elementMatchers, setMatchers ) { - // A counter to specify which element is currently being matched - var matcherCachedRuns = 0, - bySet = setMatchers.length > 0, - byElement = elementMatchers.length > 0, - superMatcher = function( seed, context, xml, results, expandContext ) { - var elem, j, matcher, - setMatched = [], - matchedCount = 0, - i = "0", - unmatched = seed && [], - outermost = expandContext != null, - contextBackup = outermostContext, - // We must always have either seed elements or context - elems = seed || byElement && Expr.find["TAG"]( "*", expandContext && context.parentNode || context ), - // Use integer dirruns iff this is the outermost matcher - dirrunsUnique = (dirruns += contextBackup == null ? 1 : Math.random() || 0.1); - - if ( outermost ) { - outermostContext = context !== document && context; - cachedruns = matcherCachedRuns; - } - - // Add elements passing elementMatchers directly to results - // Keep `i` a string if there are no elements so `matchedCount` will be "00" below - for ( ; (elem = elems[i]) != null; i++ ) { - if ( byElement && elem ) { - j = 0; - while ( (matcher = elementMatchers[j++]) ) { - if ( matcher( elem, context, xml ) ) { - results.push( elem ); - break; - } - } - if ( outermost ) { - dirruns = dirrunsUnique; - cachedruns = ++matcherCachedRuns; - } - } - - // Track unmatched elements for set filters - if ( bySet ) { - // They will have gone through all possible matchers - if ( (elem = !matcher && elem) ) { - matchedCount--; - } - - // Lengthen the array for every element, matched or not - if ( seed ) { - unmatched.push( elem ); - } - } - } - - // Apply set filters to unmatched elements - matchedCount += i; - if ( bySet && i !== matchedCount ) { - j = 0; - while ( (matcher = setMatchers[j++]) ) { - matcher( unmatched, setMatched, context, xml ); - } - - if ( seed ) { - // Reintegrate element matches to eliminate the need for sorting - if ( matchedCount > 0 ) { - while ( i-- ) { - if ( !(unmatched[i] || setMatched[i]) ) { - setMatched[i] = pop.call( results ); - } - } - } - - // Discard index placeholder values to get only actual matches - setMatched = condense( setMatched ); - } - - // Add matches to results - push.apply( results, setMatched ); - - // Seedless set matches succeeding multiple successful matchers stipulate sorting - if ( outermost && !seed && setMatched.length > 0 && - ( matchedCount + setMatchers.length ) > 1 ) { - - Sizzle.uniqueSort( results ); - } - } - - // Override manipulation of globals by nested matchers - if ( outermost ) { - dirruns = dirrunsUnique; - outermostContext = contextBackup; - } - - return unmatched; - }; - - return bySet ? - markFunction( superMatcher ) : - superMatcher; - } - - compile = Sizzle.compile = function( selector, group /* Internal Use Only */ ) { - var i, - setMatchers = [], - elementMatchers = [], - cached = compilerCache[ selector + " " ]; - - if ( !cached ) { - // Generate a function of recursive functions that can be used to check each element - if ( !group ) { - group = tokenize( selector ); - } - i = group.length; - while ( i-- ) { - cached = matcherFromTokens( group[i] ); - if ( cached[ expando ] ) { - setMatchers.push( cached ); - } else { - elementMatchers.push( cached ); - } - } - - // Cache the compiled function - cached = compilerCache( selector, matcherFromGroupMatchers( elementMatchers, setMatchers ) ); - } - return cached; - }; - - function multipleContexts( selector, contexts, results ) { - var i = 0, - len = contexts.length; - for ( ; i < len; i++ ) { - Sizzle( selector, contexts[i], results ); - } - return results; - } - - function select( selector, context, results, seed ) { - var i, tokens, token, type, find, - match = tokenize( selector ); - - if ( !seed ) { - // Try to minimize operations if there is only one group - if ( match.length === 1 ) { - - // Take a shortcut and set the context if the root selector is an ID - tokens = match[0] = match[0].slice( 0 ); - if ( tokens.length > 2 && (token = tokens[0]).type === "ID" && - context.nodeType === 9 && !documentIsXML && - Expr.relative[ tokens[1].type ] ) { - - context = Expr.find["ID"]( token.matches[0].replace( runescape, funescape ), context )[0]; - if ( !context ) { - return results; - } - - selector = selector.slice( tokens.shift().value.length ); - } - - // Fetch a seed set for right-to-left matching - i = matchExpr["needsContext"].test( selector ) ? 0 : tokens.length; - while ( i-- ) { - token = tokens[i]; - - // Abort if we hit a combinator - if ( Expr.relative[ (type = token.type) ] ) { - break; - } - if ( (find = Expr.find[ type ]) ) { - // Search, expanding context for leading sibling combinators - if ( (seed = find( - token.matches[0].replace( runescape, funescape ), - rsibling.test( tokens[0].type ) && context.parentNode || context - )) ) { - - // If seed is empty or no tokens remain, we can return early - tokens.splice( i, 1 ); - selector = seed.length && toSelector( tokens ); - if ( !selector ) { - push.apply( results, slice.call( seed, 0 ) ); - return results; - } - - break; - } - } - } - } - } - - // Compile and execute a filtering function - // Provide `match` to avoid retokenization if we modified the selector above - compile( selector, match )( - seed, - context, - documentIsXML, - results, - rsibling.test( selector ) - ); - return results; - } - -// Deprecated - Expr.pseudos["nth"] = Expr.pseudos["eq"]; - -// Easy API for creating new setFilters - function setFilters() {} - Expr.filters = setFilters.prototype = Expr.pseudos; - Expr.setFilters = new setFilters(); - -// Initialize with the default document - setDocument(); - -// Override sizzle attribute retrieval - Sizzle.attr = jQuery.attr; - jQuery.find = Sizzle; - jQuery.expr = Sizzle.selectors; - jQuery.expr[":"] = jQuery.expr.pseudos; - jQuery.unique = Sizzle.uniqueSort; - jQuery.text = Sizzle.getText; - jQuery.isXMLDoc = Sizzle.isXML; - jQuery.contains = Sizzle.contains; - - - })( window ); - var runtil = /Until$/, - rparentsprev = /^(?:parents|prev(?:Until|All))/, - isSimple = /^.[^:#[.,]*$/, - rneedsContext = jQuery.expr.match.needsContext, - // methods guaranteed to produce a unique set when starting from a unique set - guaranteedUnique = { - children: true, - contents: true, - next: true, - prev: true - }; - - jQuery.fn.extend({ - find: function( selector ) { - var i, ret, self, - len = this.length; - - if ( typeof selector !== "string" ) { - self = this; - return this.pushStack( jQuery( selector ).filter(function() { - for ( i = 0; i < len; i++ ) { - if ( jQuery.contains( self[ i ], this ) ) { - return true; - } - } - }) ); - } - - ret = []; - for ( i = 0; i < len; i++ ) { - jQuery.find( selector, this[ i ], ret ); - } - - // Needed because $( selector, context ) becomes $( context ).find( selector ) - ret = this.pushStack( len > 1 ? jQuery.unique( ret ) : ret ); - ret.selector = ( this.selector ? this.selector + " " : "" ) + selector; - return ret; - }, - - has: function( target ) { - var i, - targets = jQuery( target, this ), - len = targets.length; - - return this.filter(function() { - for ( i = 0; i < len; i++ ) { - if ( jQuery.contains( this, targets[i] ) ) { - return true; - } - } - }); - }, - - not: function( selector ) { - return this.pushStack( winnow(this, selector, false) ); - }, - - filter: function( selector ) { - return this.pushStack( winnow(this, selector, true) ); - }, - - is: function( selector ) { - return !!selector && ( - typeof selector === "string" ? - // If this is a positional/relative selector, check membership in the returned set - // so $("p:first").is("p:last") won't return true for a doc with two "p". - rneedsContext.test( selector ) ? - jQuery( selector, this.context ).index( this[0] ) >= 0 : - jQuery.filter( selector, this ).length > 0 : - this.filter( selector ).length > 0 ); - }, - - closest: function( selectors, context ) { - var cur, - i = 0, - l = this.length, - ret = [], - pos = rneedsContext.test( selectors ) || typeof selectors !== "string" ? - jQuery( selectors, context || this.context ) : - 0; - - for ( ; i < l; i++ ) { - cur = this[i]; - - while ( cur && cur.ownerDocument && cur !== context && cur.nodeType !== 11 ) { - if ( pos ? pos.index(cur) > -1 : jQuery.find.matchesSelector(cur, selectors) ) { - ret.push( cur ); - break; - } - cur = cur.parentNode; - } - } - - return this.pushStack( ret.length > 1 ? jQuery.unique( ret ) : ret ); - }, - - // Determine the position of an element within - // the matched set of elements - index: function( elem ) { - - // No argument, return index in parent - if ( !elem ) { - return ( this[0] && this[0].parentNode ) ? this.first().prevAll().length : -1; - } - - // index in selector - if ( typeof elem === "string" ) { - return jQuery.inArray( this[0], jQuery( elem ) ); - } - - // Locate the position of the desired element - return jQuery.inArray( - // If it receives a jQuery object, the first element is used - elem.jquery ? elem[0] : elem, this ); - }, - - add: function( selector, context ) { - var set = typeof selector === "string" ? - jQuery( selector, context ) : - jQuery.makeArray( selector && selector.nodeType ? [ selector ] : selector ), - all = jQuery.merge( this.get(), set ); - - return this.pushStack( jQuery.unique(all) ); - }, - - addBack: function( selector ) { - return this.add( selector == null ? - this.prevObject : this.prevObject.filter(selector) - ); - } - }); - - jQuery.fn.andSelf = jQuery.fn.addBack; - - function sibling( cur, dir ) { - do { - cur = cur[ dir ]; - } while ( cur && cur.nodeType !== 1 ); - - return cur; - } - - jQuery.each({ - parent: function( elem ) { - var parent = elem.parentNode; - return parent && parent.nodeType !== 11 ? parent : null; - }, - parents: function( elem ) { - return jQuery.dir( elem, "parentNode" ); - }, - parentsUntil: function( elem, i, until ) { - return jQuery.dir( elem, "parentNode", until ); - }, - next: function( elem ) { - return sibling( elem, "nextSibling" ); - }, - prev: function( elem ) { - return sibling( elem, "previousSibling" ); - }, - nextAll: function( elem ) { - return jQuery.dir( elem, "nextSibling" ); - }, - prevAll: function( elem ) { - return jQuery.dir( elem, "previousSibling" ); - }, - nextUntil: function( elem, i, until ) { - return jQuery.dir( elem, "nextSibling", until ); - }, - prevUntil: function( elem, i, until ) { - return jQuery.dir( elem, "previousSibling", until ); - }, - siblings: function( elem ) { - return jQuery.sibling( ( elem.parentNode || {} ).firstChild, elem ); - }, - children: function( elem ) { - return jQuery.sibling( elem.firstChild ); - }, - contents: function( elem ) { - return jQuery.nodeName( elem, "iframe" ) ? - elem.contentDocument || elem.contentWindow.document : - jQuery.merge( [], elem.childNodes ); - } - }, function( name, fn ) { - jQuery.fn[ name ] = function( until, selector ) { - var ret = jQuery.map( this, fn, until ); - - if ( !runtil.test( name ) ) { - selector = until; - } - - if ( selector && typeof selector === "string" ) { - ret = jQuery.filter( selector, ret ); - } - - ret = this.length > 1 && !guaranteedUnique[ name ] ? jQuery.unique( ret ) : ret; - - if ( this.length > 1 && rparentsprev.test( name ) ) { - ret = ret.reverse(); - } - - return this.pushStack( ret ); - }; - }); - - jQuery.extend({ - filter: function( expr, elems, not ) { - if ( not ) { - expr = ":not(" + expr + ")"; - } - - return elems.length === 1 ? - jQuery.find.matchesSelector(elems[0], expr) ? [ elems[0] ] : [] : - jQuery.find.matches(expr, elems); - }, - - dir: function( elem, dir, until ) { - var matched = [], - cur = elem[ dir ]; - - while ( cur && cur.nodeType !== 9 && (until === undefined || cur.nodeType !== 1 || !jQuery( cur ).is( until )) ) { - if ( cur.nodeType === 1 ) { - matched.push( cur ); - } - cur = cur[dir]; - } - return matched; - }, - - sibling: function( n, elem ) { - var r = []; - - for ( ; n; n = n.nextSibling ) { - if ( n.nodeType === 1 && n !== elem ) { - r.push( n ); - } - } - - return r; - } - }); - -// Implement the identical functionality for filter and not - function winnow( elements, qualifier, keep ) { - - // Can't pass null or undefined to indexOf in Firefox 4 - // Set to 0 to skip string check - qualifier = qualifier || 0; - - if ( jQuery.isFunction( qualifier ) ) { - return jQuery.grep(elements, function( elem, i ) { - var retVal = !!qualifier.call( elem, i, elem ); - return retVal === keep; - }); - - } else if ( qualifier.nodeType ) { - return jQuery.grep(elements, function( elem ) { - return ( elem === qualifier ) === keep; - }); - - } else if ( typeof qualifier === "string" ) { - var filtered = jQuery.grep(elements, function( elem ) { - return elem.nodeType === 1; - }); - - if ( isSimple.test( qualifier ) ) { - return jQuery.filter(qualifier, filtered, !keep); - } else { - qualifier = jQuery.filter( qualifier, filtered ); - } - } - - return jQuery.grep(elements, function( elem ) { - return ( jQuery.inArray( elem, qualifier ) >= 0 ) === keep; - }); - } - function createSafeFragment( document ) { - var list = nodeNames.split( "|" ), - safeFrag = document.createDocumentFragment(); - - if ( safeFrag.createElement ) { - while ( list.length ) { - safeFrag.createElement( - list.pop() - ); - } - } - return safeFrag; - } - - var nodeNames = "abbr|article|aside|audio|bdi|canvas|data|datalist|details|figcaption|figure|footer|" + - "header|hgroup|mark|meter|nav|output|progress|section|summary|time|video", - rinlinejQuery = / jQuery\d+="(?:null|\d+)"/g, - rnoshimcache = new RegExp("<(?:" + nodeNames + ")[\s/>]", "i"), - rleadingWhitespace = /^\s+/, - rxhtmlTag = /<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)/>/gi, - rtagName = /<([\w:]+)/, - rtbody = /<tbody/i, - rhtml = /<|&#?\w+;/, - rnoInnerhtml = /<(?:script|style|link)/i, - manipulation_rcheckableType = /^(?:checkbox|radio)$/i, - // checked="checked" or checked - rchecked = /checked\s*(?:[^=]|=\s*.checked.)/i, - rscriptType = /^$|/(?:java|ecma)script/i, - rscriptTypeMasked = /^true/(.*)/, - rcleanScript = /^\s*<!(?:[CDATA[|--)|(?:]]|--)>\s*$/g, - - // We have to close these tags to support XHTML (#13200) - wrapMap = { - option: [ 1, "<select multiple='multiple'>", "</select>" ], - legend: [ 1, "<fieldset>", "</fieldset>" ], - area: [ 1, "<map>", "</map>" ], - param: [ 1, "<object>", "</object>" ], - thead: [ 1, "<table>", "</table>" ], - tr: [ 2, "<table><tbody>", "</tbody></table>" ], - col: [ 2, "<table><tbody></tbody><colgroup>", "</colgroup></table>" ], - td: [ 3, "<table><tbody><tr>", "</tr></tbody></table>" ], - - // IE6-8 can't serialize link, script, style, or any html5 (NoScope) tags, - // unless wrapped in a div with non-breaking characters in front of it. - _default: jQuery.support.htmlSerialize ? [ 0, "", "" ] : [ 1, "X<div>", "</div>" ] - }, - safeFragment = createSafeFragment( document ), - fragmentDiv = safeFragment.appendChild( document.createElement("div") ); - - wrapMap.optgroup = wrapMap.option; - wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; - wrapMap.th = wrapMap.td; - - jQuery.fn.extend({ - text: function( value ) { - return jQuery.access( this, function( value ) { - return value === undefined ? - jQuery.text( this ) : - this.empty().append( ( this[0] && this[0].ownerDocument || document ).createTextNode( value ) ); - }, null, value, arguments.length ); - }, - - wrapAll: function( html ) { - if ( jQuery.isFunction( html ) ) { - return this.each(function(i) { - jQuery(this).wrapAll( html.call(this, i) ); - }); - } - - if ( this[0] ) { - // The elements to wrap the target around - var wrap = jQuery( html, this[0].ownerDocument ).eq(0).clone(true); - - if ( this[0].parentNode ) { - wrap.insertBefore( this[0] ); - } - - wrap.map(function() { - var elem = this; - - while ( elem.firstChild && elem.firstChild.nodeType === 1 ) { - elem = elem.firstChild; - } - - return elem; - }).append( this ); - } - - return this; - }, - - wrapInner: function( html ) { - if ( jQuery.isFunction( html ) ) { - return this.each(function(i) { - jQuery(this).wrapInner( html.call(this, i) ); - }); - } - - return this.each(function() { - var self = jQuery( this ), - contents = self.contents(); - - if ( contents.length ) { - contents.wrapAll( html ); - - } else { - self.append( html ); - } - }); - }, - - wrap: function( html ) { - var isFunction = jQuery.isFunction( html ); - - return this.each(function(i) { - jQuery( this ).wrapAll( isFunction ? html.call(this, i) : html ); - }); - }, - - unwrap: function() { - return this.parent().each(function() { - if ( !jQuery.nodeName( this, "body" ) ) { - jQuery( this ).replaceWith( this.childNodes ); - } - }).end(); - }, - - append: function() { - return this.domManip(arguments, true, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - this.appendChild( elem ); - } - }); - }, - - prepend: function() { - return this.domManip(arguments, true, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - this.insertBefore( elem, this.firstChild ); - } - }); - }, - - before: function() { - return this.domManip( arguments, false, function( elem ) { - if ( this.parentNode ) { - this.parentNode.insertBefore( elem, this ); - } - }); - }, - - after: function() { - return this.domManip( arguments, false, function( elem ) { - if ( this.parentNode ) { - this.parentNode.insertBefore( elem, this.nextSibling ); - } - }); - }, - - // keepData is for internal use only--do not document - remove: function( selector, keepData ) { - var elem, - i = 0; - - for ( ; (elem = this[i]) != null; i++ ) { - if ( !selector || jQuery.filter( selector, [ elem ] ).length > 0 ) { - if ( !keepData && elem.nodeType === 1 ) { - jQuery.cleanData( getAll( elem ) ); - } - - if ( elem.parentNode ) { - if ( keepData && jQuery.contains( elem.ownerDocument, elem ) ) { - setGlobalEval( getAll( elem, "script" ) ); - } - elem.parentNode.removeChild( elem ); - } - } - } - - return this; - }, - - empty: function() { - var elem, - i = 0; - - for ( ; (elem = this[i]) != null; i++ ) { - // Remove element nodes and prevent memory leaks - if ( elem.nodeType === 1 ) { - jQuery.cleanData( getAll( elem, false ) ); - } - - // Remove any remaining nodes - while ( elem.firstChild ) { - elem.removeChild( elem.firstChild ); - } - - // If this is a select, ensure that it displays empty (#12336) - // Support: IE<9 - if ( elem.options && jQuery.nodeName( elem, "select" ) ) { - elem.options.length = 0; - } - } - - return this; - }, - - clone: function( dataAndEvents, deepDataAndEvents ) { - dataAndEvents = dataAndEvents == null ? false : dataAndEvents; - deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; - - return this.map( function () { - return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); - }); - }, - - html: function( value ) { - return jQuery.access( this, function( value ) { - var elem = this[0] || {}, - i = 0, - l = this.length; - - if ( value === undefined ) { - return elem.nodeType === 1 ? - elem.innerHTML.replace( rinlinejQuery, "" ) : - undefined; - } - - // See if we can take a shortcut and just use innerHTML - if ( typeof value === "string" && !rnoInnerhtml.test( value ) && - ( jQuery.support.htmlSerialize || !rnoshimcache.test( value ) ) && - ( jQuery.support.leadingWhitespace || !rleadingWhitespace.test( value ) ) && - !wrapMap[ ( rtagName.exec( value ) || ["", ""] )[1].toLowerCase() ] ) { - - value = value.replace( rxhtmlTag, "<$1></$2>" ); - - try { - for (; i < l; i++ ) { - // Remove element nodes and prevent memory leaks - elem = this[i] || {}; - if ( elem.nodeType === 1 ) { - jQuery.cleanData( getAll( elem, false ) ); - elem.innerHTML = value; - } - } - - elem = 0; - - // If using innerHTML throws an exception, use the fallback method - } catch(e) {} - } - - if ( elem ) { - this.empty().append( value ); - } - }, null, value, arguments.length ); - }, - - replaceWith: function( value ) { - var isFunc = jQuery.isFunction( value ); - - // Make sure that the elements are removed from the DOM before they are inserted - // this can help fix replacing a parent with child elements - if ( !isFunc && typeof value !== "string" ) { - value = jQuery( value ).not( this ).detach(); - } - - return this.domManip( [ value ], true, function( elem ) { - var next = this.nextSibling, - parent = this.parentNode; - - if ( parent ) { - jQuery( this ).remove(); - parent.insertBefore( elem, next ); - } - }); - }, - - detach: function( selector ) { - return this.remove( selector, true ); - }, - - domManip: function( args, table, callback ) { - - // Flatten any nested arrays - args = core_concat.apply( [], args ); - - var first, node, hasScripts, - scripts, doc, fragment, - i = 0, - l = this.length, - set = this, - iNoClone = l - 1, - value = args[0], - isFunction = jQuery.isFunction( value ); - - // We can't cloneNode fragments that contain checked, in WebKit - if ( isFunction || !( l <= 1 || typeof value !== "string" || jQuery.support.checkClone || !rchecked.test( value ) ) ) { - return this.each(function( index ) { - var self = set.eq( index ); - if ( isFunction ) { - args[0] = value.call( this, index, table ? self.html() : undefined ); - } - self.domManip( args, table, callback ); - }); - } - - if ( l ) { - fragment = jQuery.buildFragment( args, this[ 0 ].ownerDocument, false, this ); - first = fragment.firstChild; - - if ( fragment.childNodes.length === 1 ) { - fragment = first; - } - - if ( first ) { - table = table && jQuery.nodeName( first, "tr" ); - scripts = jQuery.map( getAll( fragment, "script" ), disableScript ); - hasScripts = scripts.length; - - // Use the original fragment for the last item instead of the first because it can end up - // being emptied incorrectly in certain situations (#8070). - for ( ; i < l; i++ ) { - node = fragment; - - if ( i !== iNoClone ) { - node = jQuery.clone( node, true, true ); - - // Keep references to cloned scripts for later restoration - if ( hasScripts ) { - jQuery.merge( scripts, getAll( node, "script" ) ); - } - } - - callback.call( - table && jQuery.nodeName( this[i], "table" ) ? - findOrAppend( this[i], "tbody" ) : - this[i], - node, - i - ); - } - - if ( hasScripts ) { - doc = scripts[ scripts.length - 1 ].ownerDocument; - - // Reenable scripts - jQuery.map( scripts, restoreScript ); - - // Evaluate executable scripts on first document insertion - for ( i = 0; i < hasScripts; i++ ) { - node = scripts[ i ]; - if ( rscriptType.test( node.type || "" ) && - !jQuery._data( node, "globalEval" ) && jQuery.contains( doc, node ) ) { - - if ( node.src ) { - // Hope ajax is available... - jQuery.ajax({ - url: node.src, - type: "GET", - dataType: "script", - async: false, - global: false, - "throws": true - }); - } else { - jQuery.globalEval( ( node.text || node.textContent || node.innerHTML || "" ).replace( rcleanScript, "" ) ); - } - } - } - } - - // Fix #11809: Avoid leaking memory - fragment = first = null; - } - } - - return this; - } - }); - - function findOrAppend( elem, tag ) { - return elem.getElementsByTagName( tag )[0] || elem.appendChild( elem.ownerDocument.createElement( tag ) ); - } - -// Replace/restore the type attribute of script elements for safe DOM manipulation - function disableScript( elem ) { - var attr = elem.getAttributeNode("type"); - elem.type = ( attr && attr.specified ) + "/" + elem.type; - return elem; - } - function restoreScript( elem ) { - var match = rscriptTypeMasked.exec( elem.type ); - if ( match ) { - elem.type = match[1]; - } else { - elem.removeAttribute("type"); - } - return elem; - } - -// Mark scripts as having already been evaluated - function setGlobalEval( elems, refElements ) { - var elem, - i = 0; - for ( ; (elem = elems[i]) != null; i++ ) { - jQuery._data( elem, "globalEval", !refElements || jQuery._data( refElements[i], "globalEval" ) ); - } - } - - function cloneCopyEvent( src, dest ) { - - if ( dest.nodeType !== 1 || !jQuery.hasData( src ) ) { - return; - } - - var type, i, l, - oldData = jQuery._data( src ), - curData = jQuery._data( dest, oldData ), - events = oldData.events; - - if ( events ) { - delete curData.handle; - curData.events = {}; - - for ( type in events ) { - for ( i = 0, l = events[ type ].length; i < l; i++ ) { - jQuery.event.add( dest, type, events[ type ][ i ] ); - } - } - } - - // make the cloned public data object a copy from the original - if ( curData.data ) { - curData.data = jQuery.extend( {}, curData.data ); - } - } - - function fixCloneNodeIssues( src, dest ) { - var nodeName, e, data; - - // We do not need to do anything for non-Elements - if ( dest.nodeType !== 1 ) { - return; - } - - nodeName = dest.nodeName.toLowerCase(); - - // IE6-8 copies events bound via attachEvent when using cloneNode. - if ( !jQuery.support.noCloneEvent && dest[ jQuery.expando ] ) { - data = jQuery._data( dest ); - - for ( e in data.events ) { - jQuery.removeEvent( dest, e, data.handle ); - } - - // Event data gets referenced instead of copied if the expando gets copied too - dest.removeAttribute( jQuery.expando ); - } - - // IE blanks contents when cloning scripts, and tries to evaluate newly-set text - if ( nodeName === "script" && dest.text !== src.text ) { - disableScript( dest ).text = src.text; - restoreScript( dest ); - - // IE6-10 improperly clones children of object elements using classid. - // IE10 throws NoModificationAllowedError if parent is null, #12132. - } else if ( nodeName === "object" ) { - if ( dest.parentNode ) { - dest.outerHTML = src.outerHTML; - } - - // This path appears unavoidable for IE9. When cloning an object - // element in IE9, the outerHTML strategy above is not sufficient. - // If the src has innerHTML and the destination does not, - // copy the src.innerHTML into the dest.innerHTML. #10324 - if ( jQuery.support.html5Clone && ( src.innerHTML && !jQuery.trim(dest.innerHTML) ) ) { - dest.innerHTML = src.innerHTML; - } - - } else if ( nodeName === "input" && manipulation_rcheckableType.test( src.type ) ) { - // IE6-8 fails to persist the checked state of a cloned checkbox - // or radio button. Worse, IE6-7 fail to give the cloned element - // a checked appearance if the defaultChecked value isn't also set - - dest.defaultChecked = dest.checked = src.checked; - - // IE6-7 get confused and end up setting the value of a cloned - // checkbox/radio button to an empty string instead of "on" - if ( dest.value !== src.value ) { - dest.value = src.value; - } - - // IE6-8 fails to return the selected option to the default selected - // state when cloning options - } else if ( nodeName === "option" ) { - dest.defaultSelected = dest.selected = src.defaultSelected; - - // IE6-8 fails to set the defaultValue to the correct value when - // cloning other types of input fields - } else if ( nodeName === "input" || nodeName === "textarea" ) { - dest.defaultValue = src.defaultValue; - } - } - - jQuery.each({ - appendTo: "append", - prependTo: "prepend", - insertBefore: "before", - insertAfter: "after", - replaceAll: "replaceWith" - }, function( name, original ) { - jQuery.fn[ name ] = function( selector ) { - var elems, - i = 0, - ret = [], - insert = jQuery( selector ), - last = insert.length - 1; - - for ( ; i <= last; i++ ) { - elems = i === last ? this : this.clone(true); - jQuery( insert[i] )[ original ]( elems ); - - // Modern browsers can apply jQuery collections as arrays, but oldIE needs a .get() - core_push.apply( ret, elems.get() ); - } - - return this.pushStack( ret ); - }; - }); - - function getAll( context, tag ) { - var elems, elem, - i = 0, - found = typeof context.getElementsByTagName !== core_strundefined ? context.getElementsByTagName( tag || "*" ) : - typeof context.querySelectorAll !== core_strundefined ? context.querySelectorAll( tag || "*" ) : - undefined; - - if ( !found ) { - for ( found = [], elems = context.childNodes || context; (elem = elems[i]) != null; i++ ) { - if ( !tag || jQuery.nodeName( elem, tag ) ) { - found.push( elem ); - } else { - jQuery.merge( found, getAll( elem, tag ) ); - } - } - } - - return tag === undefined || tag && jQuery.nodeName( context, tag ) ? - jQuery.merge( [ context ], found ) : - found; - } - -// Used in buildFragment, fixes the defaultChecked property - function fixDefaultChecked( elem ) { - if ( manipulation_rcheckableType.test( elem.type ) ) { - elem.defaultChecked = elem.checked; - } - } - - jQuery.extend({ - clone: function( elem, dataAndEvents, deepDataAndEvents ) { - var destElements, node, clone, i, srcElements, - inPage = jQuery.contains( elem.ownerDocument, elem ); - - if ( jQuery.support.html5Clone || jQuery.isXMLDoc(elem) || !rnoshimcache.test( "<" + elem.nodeName + ">" ) ) { - clone = elem.cloneNode( true ); - - // IE<=8 does not properly clone detached, unknown element nodes - } else { - fragmentDiv.innerHTML = elem.outerHTML; - fragmentDiv.removeChild( clone = fragmentDiv.firstChild ); - } - - if ( (!jQuery.support.noCloneEvent || !jQuery.support.noCloneChecked) && - (elem.nodeType === 1 || elem.nodeType === 11) && !jQuery.isXMLDoc(elem) ) { - - // We eschew Sizzle here for performance reasons: http://jsperf.com/getall-vs-sizzle/2 - destElements = getAll( clone ); - srcElements = getAll( elem ); - - // Fix all IE cloning issues - for ( i = 0; (node = srcElements[i]) != null; ++i ) { - // Ensure that the destination node is not null; Fixes #9587 - if ( destElements[i] ) { - fixCloneNodeIssues( node, destElements[i] ); - } - } - } - - // Copy the events from the original to the clone - if ( dataAndEvents ) { - if ( deepDataAndEvents ) { - srcElements = srcElements || getAll( elem ); - destElements = destElements || getAll( clone ); - - for ( i = 0; (node = srcElements[i]) != null; i++ ) { - cloneCopyEvent( node, destElements[i] ); - } - } else { - cloneCopyEvent( elem, clone ); - } - } - - // Preserve script evaluation history - destElements = getAll( clone, "script" ); - if ( destElements.length > 0 ) { - setGlobalEval( destElements, !inPage && getAll( elem, "script" ) ); - } - - destElements = srcElements = node = null; - - // Return the cloned set - return clone; - }, - - buildFragment: function( elems, context, scripts, selection ) { - var j, elem, contains, - tmp, tag, tbody, wrap, - l = elems.length, - - // Ensure a safe fragment - safe = createSafeFragment( context ), - - nodes = [], - i = 0; - - for ( ; i < l; i++ ) { - elem = elems[ i ]; - - if ( elem || elem === 0 ) { - - // Add nodes directly - if ( jQuery.type( elem ) === "object" ) { - jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem ); - - // Convert non-html into a text node - } else if ( !rhtml.test( elem ) ) { - nodes.push( context.createTextNode( elem ) ); - - // Convert html into DOM nodes - } else { - tmp = tmp || safe.appendChild( context.createElement("div") ); - - // Deserialize a standard representation - tag = ( rtagName.exec( elem ) || ["", ""] )[1].toLowerCase(); - wrap = wrapMap[ tag ] || wrapMap._default; - - tmp.innerHTML = wrap[1] + elem.replace( rxhtmlTag, "<$1></$2>" ) + wrap[2]; - - // Descend through wrappers to the right content - j = wrap[0]; - while ( j-- ) { - tmp = tmp.lastChild; - } - - // Manually add leading whitespace removed by IE - if ( !jQuery.support.leadingWhitespace && rleadingWhitespace.test( elem ) ) { - nodes.push( context.createTextNode( rleadingWhitespace.exec( elem )[0] ) ); - } - - // Remove IE's autoinserted <tbody> from table fragments - if ( !jQuery.support.tbody ) { - - // String was a <table>, *may* have spurious <tbody> - elem = tag === "table" && !rtbody.test( elem ) ? - tmp.firstChild : - - // String was a bare <thead> or <tfoot> - wrap[1] === "<table>" && !rtbody.test( elem ) ? - tmp : - 0; - - j = elem && elem.childNodes.length; - while ( j-- ) { - if ( jQuery.nodeName( (tbody = elem.childNodes[j]), "tbody" ) && !tbody.childNodes.length ) { - elem.removeChild( tbody ); - } - } - } - - jQuery.merge( nodes, tmp.childNodes ); - - // Fix #12392 for WebKit and IE > 9 - tmp.textContent = ""; - - // Fix #12392 for oldIE - while ( tmp.firstChild ) { - tmp.removeChild( tmp.firstChild ); - } - - // Remember the top-level container for proper cleanup - tmp = safe.lastChild; - } - } - } - - // Fix #11356: Clear elements from fragment - if ( tmp ) { - safe.removeChild( tmp ); - } - - // Reset defaultChecked for any radios and checkboxes - // about to be appended to the DOM in IE 6/7 (#8060) - if ( !jQuery.support.appendChecked ) { - jQuery.grep( getAll( nodes, "input" ), fixDefaultChecked ); - } - - i = 0; - while ( (elem = nodes[ i++ ]) ) { - - // #4087 - If origin and destination elements are the same, and this is - // that element, do not do anything - if ( selection && jQuery.inArray( elem, selection ) !== -1 ) { - continue; - } - - contains = jQuery.contains( elem.ownerDocument, elem ); - - // Append to fragment - tmp = getAll( safe.appendChild( elem ), "script" ); - - // Preserve script evaluation history - if ( contains ) { - setGlobalEval( tmp ); - } - - // Capture executables - if ( scripts ) { - j = 0; - while ( (elem = tmp[ j++ ]) ) { - if ( rscriptType.test( elem.type || "" ) ) { - scripts.push( elem ); - } - } - } - } - - tmp = null; - - return safe; - }, - - cleanData: function( elems, /* internal */ acceptData ) { - var elem, type, id, data, - i = 0, - internalKey = jQuery.expando, - cache = jQuery.cache, - deleteExpando = jQuery.support.deleteExpando, - special = jQuery.event.special; - - for ( ; (elem = elems[i]) != null; i++ ) { - - if ( acceptData || jQuery.acceptData( elem ) ) { - - id = elem[ internalKey ]; - data = id && cache[ id ]; - - if ( data ) { - if ( data.events ) { - for ( type in data.events ) { - if ( special[ type ] ) { - jQuery.event.remove( elem, type ); - - // This is a shortcut to avoid jQuery.event.remove's overhead - } else { - jQuery.removeEvent( elem, type, data.handle ); - } - } - } - - // Remove cache only if it was not already removed by jQuery.event.remove - if ( cache[ id ] ) { - - delete cache[ id ]; - - // IE does not allow us to delete expando properties from nodes, - // nor does it have a removeAttribute function on Document nodes; - // we must handle all of these cases - if ( deleteExpando ) { - delete elem[ internalKey ]; - - } else if ( typeof elem.removeAttribute !== core_strundefined ) { - elem.removeAttribute( internalKey ); - - } else { - elem[ internalKey ] = null; - } - - core_deletedIds.push( id ); - } - } - } - } - } - }); - var iframe, getStyles, curCSS, - ralpha = /alpha([^)]*)/i, - ropacity = /opacity\s*=\s*([^)]*)/, - rposition = /^(top|right|bottom|left)$/, - // swappable if display is none or starts with table except "table", "table-cell", or "table-caption" - // see here for display values: https://developer.mozilla.org/en-US/docs/CSS/display - rdisplayswap = /^(none|table(?!-c[ea]).+)/, - rmargin = /^margin/, - rnumsplit = new RegExp( "^(" + core_pnum + ")(.*)$", "i" ), - rnumnonpx = new RegExp( "^(" + core_pnum + ")(?!px)[a-z%]+$", "i" ), - rrelNum = new RegExp( "^([+-])=(" + core_pnum + ")", "i" ), - elemdisplay = { BODY: "block" }, - - cssShow = { position: "absolute", visibility: "hidden", display: "block" }, - cssNormalTransform = { - letterSpacing: 0, - fontWeight: 400 - }, - - cssExpand = [ "Top", "Right", "Bottom", "Left" ], - cssPrefixes = [ "Webkit", "O", "Moz", "ms" ]; - -// return a css property mapped to a potentially vendor prefixed property - function vendorPropName( style, name ) { - - // shortcut for names that are not vendor prefixed - if ( name in style ) { - return name; - } - - // check for vendor prefixed names - var capName = name.charAt(0).toUpperCase() + name.slice(1), - origName = name, - i = cssPrefixes.length; - - while ( i-- ) { - name = cssPrefixes[ i ] + capName; - if ( name in style ) { - return name; - } - } - - return origName; - } - - function isHidden( elem, el ) { - // isHidden might be called from jQuery#filter function; - // in that case, element will be second argument - elem = el || elem; - return jQuery.css( elem, "display" ) === "none" || !jQuery.contains( elem.ownerDocument, elem ); - } - - function showHide( elements, show ) { - var display, elem, hidden, - values = [], - index = 0, - length = elements.length; - - for ( ; index < length; index++ ) { - elem = elements[ index ]; - if ( !elem.style ) { - continue; - } - - values[ index ] = jQuery._data( elem, "olddisplay" ); - display = elem.style.display; - if ( show ) { - // Reset the inline display of this element to learn if it is - // being hidden by cascaded rules or not - if ( !values[ index ] && display === "none" ) { - elem.style.display = ""; - } - - // Set elements which have been overridden with display: none - // in a stylesheet to whatever the default browser style is - // for such an element - if ( elem.style.display === "" && isHidden( elem ) ) { - values[ index ] = jQuery._data( elem, "olddisplay", css_defaultDisplay(elem.nodeName) ); - } - } else { - - if ( !values[ index ] ) { - hidden = isHidden( elem ); - - if ( display && display !== "none" || !hidden ) { - jQuery._data( elem, "olddisplay", hidden ? display : jQuery.css( elem, "display" ) ); - } - } - } - } - - // Set the display of most of the elements in a second loop - // to avoid the constant reflow - for ( index = 0; index < length; index++ ) { - elem = elements[ index ]; - if ( !elem.style ) { - continue; - } - if ( !show || elem.style.display === "none" || elem.style.display === "" ) { - elem.style.display = show ? values[ index ] || "" : "none"; - } - } - - return elements; - } - - jQuery.fn.extend({ - css: function( name, value ) { - return jQuery.access( this, function( elem, name, value ) { - var len, styles, - map = {}, - i = 0; - - if ( jQuery.isArray( name ) ) { - styles = getStyles( elem ); - len = name.length; - - for ( ; i < len; i++ ) { - map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles ); - } - - return map; - } - - return value !== undefined ? - jQuery.style( elem, name, value ) : - jQuery.css( elem, name ); - }, name, value, arguments.length > 1 ); - }, - show: function() { - return showHide( this, true ); - }, - hide: function() { - return showHide( this ); - }, - toggle: function( state ) { - var bool = typeof state === "boolean"; - - return this.each(function() { - if ( bool ? state : isHidden( this ) ) { - jQuery( this ).show(); - } else { - jQuery( this ).hide(); - } - }); - } - }); - - jQuery.extend({ - // Add in style property hooks for overriding the default - // behavior of getting and setting a style property - cssHooks: { - opacity: { - get: function( elem, computed ) { - if ( computed ) { - // We should always get a number back from opacity - var ret = curCSS( elem, "opacity" ); - return ret === "" ? "1" : ret; - } - } - } - }, - - // Exclude the following css properties to add px - cssNumber: { - "columnCount": true, - "fillOpacity": true, - "fontWeight": true, - "lineHeight": true, - "opacity": true, - "orphans": true, - "widows": true, - "zIndex": true, - "zoom": true - }, - - // Add in properties whose names you wish to fix before - // setting or getting the value - cssProps: { - // normalize float css property - "float": jQuery.support.cssFloat ? "cssFloat" : "styleFloat" - }, - - // Get and set the style property on a DOM Node - style: function( elem, name, value, extra ) { - // Don't set styles on text and comment nodes - if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) { - return; - } - - // Make sure that we're working with the right name - var ret, type, hooks, - origName = jQuery.camelCase( name ), - style = elem.style; - - name = jQuery.cssProps[ origName ] || ( jQuery.cssProps[ origName ] = vendorPropName( style, origName ) ); - - // gets hook for the prefixed version - // followed by the unprefixed version - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // Check if we're setting a value - if ( value !== undefined ) { - type = typeof value; - - // convert relative number strings (+= or -=) to relative numbers. #7345 - if ( type === "string" && (ret = rrelNum.exec( value )) ) { - value = ( ret[1] + 1 ) * ret[2] + parseFloat( jQuery.css( elem, name ) ); - // Fixes bug #9237 - type = "number"; - } - - // Make sure that NaN and null values aren't set. See: #7116 - if ( value == null || type === "number" && isNaN( value ) ) { - return; - } - - // If a number was passed in, add 'px' to the (except for certain CSS properties) - if ( type === "number" && !jQuery.cssNumber[ origName ] ) { - value += "px"; - } - - // Fixes #8908, it can be done more correctly by specifing setters in cssHooks, - // but it would mean to define eight (for every problematic property) identical functions - if ( !jQuery.support.clearCloneStyle && value === "" && name.indexOf("background") === 0 ) { - style[ name ] = "inherit"; - } - - // If a hook was provided, use that value, otherwise just set the specified value - if ( !hooks || !("set" in hooks) || (value = hooks.set( elem, value, extra )) !== undefined ) { - - // Wrapped to prevent IE from throwing errors when 'invalid' values are provided - // Fixes bug #5509 - try { - style[ name ] = value; - } catch(e) {} - } - - } else { - // If a hook was provided get the non-computed value from there - if ( hooks && "get" in hooks && (ret = hooks.get( elem, false, extra )) !== undefined ) { - return ret; - } - - // Otherwise just get the value from the style object - return style[ name ]; - } - }, - - css: function( elem, name, extra, styles ) { - var num, val, hooks, - origName = jQuery.camelCase( name ); - - // Make sure that we're working with the right name - name = jQuery.cssProps[ origName ] || ( jQuery.cssProps[ origName ] = vendorPropName( elem.style, origName ) ); - - // gets hook for the prefixed version - // followed by the unprefixed version - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // If a hook was provided get the computed value from there - if ( hooks && "get" in hooks ) { - val = hooks.get( elem, true, extra ); - } - - // Otherwise, if a way to get the computed value exists, use that - if ( val === undefined ) { - val = curCSS( elem, name, styles ); - } - - //convert "normal" to computed value - if ( val === "normal" && name in cssNormalTransform ) { - val = cssNormalTransform[ name ]; - } - - // Return, converting to number if forced or a qualifier was provided and val looks numeric - if ( extra === "" || extra ) { - num = parseFloat( val ); - return extra === true || jQuery.isNumeric( num ) ? num || 0 : val; - } - return val; - }, - - // A method for quickly swapping in/out CSS properties to get correct calculations - swap: function( elem, options, callback, args ) { - var ret, name, - old = {}; - - // Remember the old values, and insert the new ones - for ( name in options ) { - old[ name ] = elem.style[ name ]; - elem.style[ name ] = options[ name ]; - } - - ret = callback.apply( elem, args || [] ); - - // Revert the old values - for ( name in options ) { - elem.style[ name ] = old[ name ]; - } - - return ret; - } - }); - -// NOTE: we've included the "window" in window.getComputedStyle -// because jsdom on node.js will break without it. - if ( window.getComputedStyle ) { - getStyles = function( elem ) { - return window.getComputedStyle( elem, null ); - }; - - curCSS = function( elem, name, _computed ) { - var width, minWidth, maxWidth, - computed = _computed || getStyles( elem ), - - // getPropertyValue is only needed for .css('filter') in IE9, see #12537 - ret = computed ? computed.getPropertyValue( name ) || computed[ name ] : undefined, - style = elem.style; - - if ( computed ) { - - if ( ret === "" && !jQuery.contains( elem.ownerDocument, elem ) ) { - ret = jQuery.style( elem, name ); - } - - // A tribute to the "awesome hack by Dean Edwards" - // Chrome < 17 and Safari 5.0 uses "computed value" instead of "used value" for margin-right - // Safari 5.1.7 (at least) returns percentage for a larger set of values, but width seems to be reliably pixels - // this is against the CSSOM draft spec: http://dev.w3.org/csswg/cssom/#resolved-values - if ( rnumnonpx.test( ret ) && rmargin.test( name ) ) { - - // Remember the original values - width = style.width; - minWidth = style.minWidth; - maxWidth = style.maxWidth; - - // Put in the new values to get a computed value out - style.minWidth = style.maxWidth = style.width = ret; - ret = computed.width; - - // Revert the changed values - style.width = width; - style.minWidth = minWidth; - style.maxWidth = maxWidth; - } - } - - return ret; - }; - } else if ( document.documentElement.currentStyle ) { - getStyles = function( elem ) { - return elem.currentStyle; - }; - - curCSS = function( elem, name, _computed ) { - var left, rs, rsLeft, - computed = _computed || getStyles( elem ), - ret = computed ? computed[ name ] : undefined, - style = elem.style; - - // Avoid setting ret to empty string here - // so we don't default to auto - if ( ret == null && style && style[ name ] ) { - ret = style[ name ]; - } - - // From the awesome hack by Dean Edwards - // http://erik.eae.net/archives/2007/07/27/18.54.15/#comment-102291 - - // If we're not dealing with a regular pixel number - // but a number that has a weird ending, we need to convert it to pixels - // but not position css attributes, as those are proportional to the parent element instead - // and we can't measure the parent instead because it might trigger a "stacking dolls" problem - if ( rnumnonpx.test( ret ) && !rposition.test( name ) ) { - - // Remember the original values - left = style.left; - rs = elem.runtimeStyle; - rsLeft = rs && rs.left; - - // Put in the new values to get a computed value out - if ( rsLeft ) { - rs.left = elem.currentStyle.left; - } - style.left = name === "fontSize" ? "1em" : ret; - ret = style.pixelLeft + "px"; - - // Revert the changed values - style.left = left; - if ( rsLeft ) { - rs.left = rsLeft; - } - } - - return ret === "" ? "auto" : ret; - }; - } - - function setPositiveNumber( elem, value, subtract ) { - var matches = rnumsplit.exec( value ); - return matches ? - // Guard against undefined "subtract", e.g., when used as in cssHooks - Math.max( 0, matches[ 1 ] - ( subtract || 0 ) ) + ( matches[ 2 ] || "px" ) : - value; - } - - function augmentWidthOrHeight( elem, name, extra, isBorderBox, styles ) { - var i = extra === ( isBorderBox ? "border" : "content" ) ? - // If we already have the right measurement, avoid augmentation - 4 : - // Otherwise initialize for horizontal or vertical properties - name === "width" ? 1 : 0, - - val = 0; - - for ( ; i < 4; i += 2 ) { - // both box models exclude margin, so add it if we want it - if ( extra === "margin" ) { - val += jQuery.css( elem, extra + cssExpand[ i ], true, styles ); - } - - if ( isBorderBox ) { - // border-box includes padding, so remove it if we want content - if ( extra === "content" ) { - val -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); - } - - // at this point, extra isn't border nor margin, so remove border - if ( extra !== "margin" ) { - val -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - } - } else { - // at this point, extra isn't content, so add padding - val += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); - - // at this point, extra isn't content nor padding, so add border - if ( extra !== "padding" ) { - val += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - } - } - } - - return val; - } - - function getWidthOrHeight( elem, name, extra ) { - - // Start with offset property, which is equivalent to the border-box value - var valueIsBorderBox = true, - val = name === "width" ? elem.offsetWidth : elem.offsetHeight, - styles = getStyles( elem ), - isBorderBox = jQuery.support.boxSizing && jQuery.css( elem, "boxSizing", false, styles ) === "border-box"; - - // some non-html elements return undefined for offsetWidth, so check for null/undefined - // svg - https://bugzilla.mozilla.org/show_bug.cgi?id=649285 - // MathML - https://bugzilla.mozilla.org/show_bug.cgi?id=491668 - if ( val <= 0 || val == null ) { - // Fall back to computed then uncomputed css if necessary - val = curCSS( elem, name, styles ); - if ( val < 0 || val == null ) { - val = elem.style[ name ]; - } - - // Computed unit is not pixels. Stop here and return. - if ( rnumnonpx.test(val) ) { - return val; - } - - // we need the check for style in case a browser which returns unreliable values - // for getComputedStyle silently falls back to the reliable elem.style - valueIsBorderBox = isBorderBox && ( jQuery.support.boxSizingReliable || val === elem.style[ name ] ); - - // Normalize "", auto, and prepare for extra - val = parseFloat( val ) || 0; - } - - // use the active box-sizing model to add/subtract irrelevant styles - return ( val + - augmentWidthOrHeight( - elem, - name, - extra || ( isBorderBox ? "border" : "content" ), - valueIsBorderBox, - styles - ) - ) + "px"; - } - -// Try to determine the default display value of an element - function css_defaultDisplay( nodeName ) { - var doc = document, - display = elemdisplay[ nodeName ]; - - if ( !display ) { - display = actualDisplay( nodeName, doc ); - - // If the simple way fails, read from inside an iframe - if ( display === "none" || !display ) { - // Use the already-created iframe if possible - iframe = ( iframe || - jQuery("<iframe frameborder='0' width='0' height='0'/>") - .css( "cssText", "display:block !important" ) - ).appendTo( doc.documentElement ); - - // Always write a new HTML skeleton so Webkit and Firefox don't choke on reuse - doc = ( iframe[0].contentWindow || iframe[0].contentDocument ).document; - doc.write("<!doctype html><html><body>"); - doc.close(); - - display = actualDisplay( nodeName, doc ); - iframe.detach(); - } - - // Store the correct default display - elemdisplay[ nodeName ] = display; - } - - return display; - } - -// Called ONLY from within css_defaultDisplay - function actualDisplay( name, doc ) { - var elem = jQuery( doc.createElement( name ) ).appendTo( doc.body ), - display = jQuery.css( elem[0], "display" ); - elem.remove(); - return display; - } - - jQuery.each([ "height", "width" ], function( i, name ) { - jQuery.cssHooks[ name ] = { - get: function( elem, computed, extra ) { - if ( computed ) { - // certain elements can have dimension info if we invisibly show them - // however, it must have a current display style that would benefit from this - return elem.offsetWidth === 0 && rdisplayswap.test( jQuery.css( elem, "display" ) ) ? - jQuery.swap( elem, cssShow, function() { - return getWidthOrHeight( elem, name, extra ); - }) : - getWidthOrHeight( elem, name, extra ); - } - }, - - set: function( elem, value, extra ) { - var styles = extra && getStyles( elem ); - return setPositiveNumber( elem, value, extra ? - augmentWidthOrHeight( - elem, - name, - extra, - jQuery.support.boxSizing && jQuery.css( elem, "boxSizing", false, styles ) === "border-box", - styles - ) : 0 - ); - } - }; - }); - - if ( !jQuery.support.opacity ) { - jQuery.cssHooks.opacity = { - get: function( elem, computed ) { - // IE uses filters for opacity - return ropacity.test( (computed && elem.currentStyle ? elem.currentStyle.filter : elem.style.filter) || "" ) ? - ( 0.01 * parseFloat( RegExp.$1 ) ) + "" : - computed ? "1" : ""; - }, - - set: function( elem, value ) { - var style = elem.style, - currentStyle = elem.currentStyle, - opacity = jQuery.isNumeric( value ) ? "alpha(opacity=" + value * 100 + ")" : "", - filter = currentStyle && currentStyle.filter || style.filter || ""; - - // IE has trouble with opacity if it does not have layout - // Force it by setting the zoom level - style.zoom = 1; - - // if setting opacity to 1, and no other filters exist - attempt to remove filter attribute #6652 - // if value === "", then remove inline opacity #12685 - if ( ( value >= 1 || value === "" ) && - jQuery.trim( filter.replace( ralpha, "" ) ) === "" && - style.removeAttribute ) { - - // Setting style.filter to null, "" & " " still leave "filter:" in the cssText - // if "filter:" is present at all, clearType is disabled, we want to avoid this - // style.removeAttribute is IE Only, but so apparently is this code path... - style.removeAttribute( "filter" ); - - // if there is no filter style applied in a css rule or unset inline opacity, we are done - if ( value === "" || currentStyle && !currentStyle.filter ) { - return; - } - } - - // otherwise, set new filter values - style.filter = ralpha.test( filter ) ? - filter.replace( ralpha, opacity ) : - filter + " " + opacity; - } - }; - } - -// These hooks cannot be added until DOM ready because the support test -// for it is not run until after DOM ready - jQuery(function() { - if ( !jQuery.support.reliableMarginRight ) { - jQuery.cssHooks.marginRight = { - get: function( elem, computed ) { - if ( computed ) { - // WebKit Bug 13343 - getComputedStyle returns wrong value for margin-right - // Work around by temporarily setting element display to inline-block - return jQuery.swap( elem, { "display": "inline-block" }, - curCSS, [ elem, "marginRight" ] ); - } - } - }; - } - - // Webkit bug: https://bugs.webkit.org/show_bug.cgi?id=29084 - // getComputedStyle returns percent when specified for top/left/bottom/right - // rather than make the css module depend on the offset module, we just check for it here - if ( !jQuery.support.pixelPosition && jQuery.fn.position ) { - jQuery.each( [ "top", "left" ], function( i, prop ) { - jQuery.cssHooks[ prop ] = { - get: function( elem, computed ) { - if ( computed ) { - computed = curCSS( elem, prop ); - // if curCSS returns percentage, fallback to offset - return rnumnonpx.test( computed ) ? - jQuery( elem ).position()[ prop ] + "px" : - computed; - } - } - }; - }); - } - - }); - - if ( jQuery.expr && jQuery.expr.filters ) { - jQuery.expr.filters.hidden = function( elem ) { - // Support: Opera <= 12.12 - // Opera reports offsetWidths and offsetHeights less than zero on some elements - return elem.offsetWidth <= 0 && elem.offsetHeight <= 0 || - (!jQuery.support.reliableHiddenOffsets && ((elem.style && elem.style.display) || jQuery.css( elem, "display" )) === "none"); - }; - - jQuery.expr.filters.visible = function( elem ) { - return !jQuery.expr.filters.hidden( elem ); - }; - } - -// These hooks are used by animate to expand properties - jQuery.each({ - margin: "", - padding: "", - border: "Width" - }, function( prefix, suffix ) { - jQuery.cssHooks[ prefix + suffix ] = { - expand: function( value ) { - var i = 0, - expanded = {}, - - // assumes a single number if not a string - parts = typeof value === "string" ? value.split(" ") : [ value ]; - - for ( ; i < 4; i++ ) { - expanded[ prefix + cssExpand[ i ] + suffix ] = - parts[ i ] || parts[ i - 2 ] || parts[ 0 ]; - } - - return expanded; - } - }; - - if ( !rmargin.test( prefix ) ) { - jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber; - } - }); - var r20 = /%20/g, - rbracket = /[]$/, - rCRLF = /\r?\n/g, - rsubmitterTypes = /^(?:submit|button|image|reset|file)$/i, - rsubmittable = /^(?:input|select|textarea|keygen)/i; - - jQuery.fn.extend({ - serialize: function() { - return jQuery.param( this.serializeArray() ); - }, - serializeArray: function() { - return this.map(function(){ - // Can add propHook for "elements" to filter or add form elements - var elements = jQuery.prop( this, "elements" ); - return elements ? jQuery.makeArray( elements ) : this; - }) - .filter(function(){ - var type = this.type; - // Use .is(":disabled") so that fieldset[disabled] works - return this.name && !jQuery( this ).is( ":disabled" ) && - rsubmittable.test( this.nodeName ) && !rsubmitterTypes.test( type ) && - ( this.checked || !manipulation_rcheckableType.test( type ) ); - }) - .map(function( i, elem ){ - var val = jQuery( this ).val(); - - return val == null ? - null : - jQuery.isArray( val ) ? - jQuery.map( val, function( val ){ - return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; - }) : - { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; - }).get(); - } - }); - -//Serialize an array of form elements or a set of -//key/values into a query string - jQuery.param = function( a, traditional ) { - var prefix, - s = [], - add = function( key, value ) { - // If value is a function, invoke it and return its value - value = jQuery.isFunction( value ) ? value() : ( value == null ? "" : value ); - s[ s.length ] = encodeURIComponent( key ) + "=" + encodeURIComponent( value ); - }; - - // Set traditional to true for jQuery <= 1.3.2 behavior. - if ( traditional === undefined ) { - traditional = jQuery.ajaxSettings && jQuery.ajaxSettings.traditional; - } - - // If an array was passed in, assume that it is an array of form elements. - if ( jQuery.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) { - // Serialize the form elements - jQuery.each( a, function() { - add( this.name, this.value ); - }); - - } else { - // If traditional, encode the "old" way (the way 1.3.2 or older - // did it), otherwise encode params recursively. - for ( prefix in a ) { - buildParams( prefix, a[ prefix ], traditional, add ); - } - } - - // Return the resulting serialization - return s.join( "&" ).replace( r20, "+" ); - }; - - function buildParams( prefix, obj, traditional, add ) { - var name; - - if ( jQuery.isArray( obj ) ) { - // Serialize array item. - jQuery.each( obj, function( i, v ) { - if ( traditional || rbracket.test( prefix ) ) { - // Treat each array item as a scalar. - add( prefix, v ); - - } else { - // Item is non-scalar (array or object), encode its numeric index. - buildParams( prefix + "[" + ( typeof v === "object" ? i : "" ) + "]", v, traditional, add ); - } - }); - - } else if ( !traditional && jQuery.type( obj ) === "object" ) { - // Serialize object item. - for ( name in obj ) { - buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add ); - } - - } else { - // Serialize scalar item. - add( prefix, obj ); - } - } - jQuery.each( ("blur focus focusin focusout load resize scroll unload click dblclick " + - "mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave " + - "change select submit keydown keypress keyup error contextmenu").split(" "), function( i, name ) { - - // Handle event binding - jQuery.fn[ name ] = function( data, fn ) { - return arguments.length > 0 ? - this.on( name, null, data, fn ) : - this.trigger( name ); - }; - }); - - jQuery.fn.hover = function( fnOver, fnOut ) { - return this.mouseenter( fnOver ).mouseleave( fnOut || fnOver ); - }; - var - // Document location - ajaxLocParts, - ajaxLocation, - ajax_nonce = jQuery.now(), - - ajax_rquery = /?/, - rhash = /#.*$/, - rts = /([?&])_=[^&]*/, - rheaders = /^(.*?):[ \t]*([^\r\n]*)\r?$/mg, // IE leaves an \r character at EOL - // #7653, #8125, #8152: local protocol detection - rlocalProtocol = /^(?:about|app|app-storage|.+-extension|file|res|widget):$/, - rnoContent = /^(?:GET|HEAD)$/, - rprotocol = /^///, - rurl = /^([\w.+-]+:)(?://([^/?#:]*)(?::(\d+)|)|)/, - - // Keep a copy of the old load method - _load = jQuery.fn.load, - - /* Prefilters - * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example) - * 2) These are called: - * - BEFORE asking for a transport - * - AFTER param serialization (s.data is a string if s.processData is true) - * 3) key is the dataType - * 4) the catchall symbol "*" can be used - * 5) execution will start with transport dataType and THEN continue down to "*" if needed - */ - prefilters = {}, - - /* Transports bindings - * 1) key is the dataType - * 2) the catchall symbol "*" can be used - * 3) selection will start with transport dataType and THEN go to "*" if needed - */ - transports = {}, - - // Avoid comment-prolog char sequence (#10098); must appease lint and evade compression - allTypes = "*/".concat("*"); - -// #8138, IE may throw an exception when accessing -// a field from window.location if document.domain has been set - try { - ajaxLocation = location.href; - } catch( e ) { - // Use the href attribute of an A element - // since IE will modify it given document.location - ajaxLocation = document.createElement( "a" ); - ajaxLocation.href = ""; - ajaxLocation = ajaxLocation.href; - } - -// Segment location into parts - ajaxLocParts = rurl.exec( ajaxLocation.toLowerCase() ) || []; - -// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport - function addToPrefiltersOrTransports( structure ) { - - // dataTypeExpression is optional and defaults to "*" - return function( dataTypeExpression, func ) { - - if ( typeof dataTypeExpression !== "string" ) { - func = dataTypeExpression; - dataTypeExpression = "*"; - } - - var dataType, - i = 0, - dataTypes = dataTypeExpression.toLowerCase().match( core_rnotwhite ) || []; - - if ( jQuery.isFunction( func ) ) { - // For each dataType in the dataTypeExpression - while ( (dataType = dataTypes[i++]) ) { - // Prepend if requested - if ( dataType[0] === "+" ) { - dataType = dataType.slice( 1 ) || "*"; - (structure[ dataType ] = structure[ dataType ] || []).unshift( func ); - - // Otherwise append - } else { - (structure[ dataType ] = structure[ dataType ] || []).push( func ); - } - } - } - }; - } - -// Base inspection function for prefilters and transports - function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR ) { - - var inspected = {}, - seekingTransport = ( structure === transports ); - - function inspect( dataType ) { - var selected; - inspected[ dataType ] = true; - jQuery.each( structure[ dataType ] || [], function( _, prefilterOrFactory ) { - var dataTypeOrTransport = prefilterOrFactory( options, originalOptions, jqXHR ); - if( typeof dataTypeOrTransport === "string" && !seekingTransport && !inspected[ dataTypeOrTransport ] ) { - options.dataTypes.unshift( dataTypeOrTransport ); - inspect( dataTypeOrTransport ); - return false; - } else if ( seekingTransport ) { - return !( selected = dataTypeOrTransport ); - } - }); - return selected; - } - - return inspect( options.dataTypes[ 0 ] ) || !inspected[ "*" ] && inspect( "*" ); - } - -// A special extend for ajax options -// that takes "flat" options (not to be deep extended) -// Fixes #9887 - function ajaxExtend( target, src ) { - var deep, key, - flatOptions = jQuery.ajaxSettings.flatOptions || {}; - - for ( key in src ) { - if ( src[ key ] !== undefined ) { - ( flatOptions[ key ] ? target : ( deep || (deep = {}) ) )[ key ] = src[ key ]; - } - } - if ( deep ) { - jQuery.extend( true, target, deep ); - } - - return target; - } - - jQuery.fn.load = function( url, params, callback ) { - if ( typeof url !== "string" && _load ) { - return _load.apply( this, arguments ); - } - - var selector, response, type, - self = this, - off = url.indexOf(" "); - - if ( off >= 0 ) { - selector = url.slice( off, url.length ); - url = url.slice( 0, off ); - } - - // If it's a function - if ( jQuery.isFunction( params ) ) { - - // We assume that it's the callback - callback = params; - params = undefined; - - // Otherwise, build a param string - } else if ( params && typeof params === "object" ) { - type = "POST"; - } - - // If we have elements to modify, make the request - if ( self.length > 0 ) { - jQuery.ajax({ - url: url, - - // if "type" variable is undefined, then "GET" method will be used - type: type, - dataType: "html", - data: params - }).done(function( responseText ) { - - // Save response for use in complete callback - response = arguments; - - self.html( selector ? - - // If a selector was specified, locate the right elements in a dummy div - // Exclude scripts to avoid IE 'Permission Denied' errors - jQuery("<div>").append( jQuery.parseHTML( responseText ) ).find( selector ) : - - // Otherwise use the full result - responseText ); - - }).complete( callback && function( jqXHR, status ) { - self.each( callback, response || [ jqXHR.responseText, status, jqXHR ] ); - }); - } - - return this; - }; - -// Attach a bunch of functions for handling common AJAX events - jQuery.each( [ "ajaxStart", "ajaxStop", "ajaxComplete", "ajaxError", "ajaxSuccess", "ajaxSend" ], function( i, type ){ - jQuery.fn[ type ] = function( fn ){ - return this.on( type, fn ); - }; - }); - - jQuery.each( [ "get", "post" ], function( i, method ) { - jQuery[ method ] = function( url, data, callback, type ) { - // shift arguments if data argument was omitted - if ( jQuery.isFunction( data ) ) { - type = type || callback; - callback = data; - data = undefined; - } - - return jQuery.ajax({ - url: url, - type: method, - dataType: type, - data: data, - success: callback - }); - }; - }); - - jQuery.extend({ - - // Counter for holding the number of active queries - active: 0, - - // Last-Modified header cache for next request - lastModified: {}, - etag: {}, - - ajaxSettings: { - url: ajaxLocation, - type: "GET", - isLocal: rlocalProtocol.test( ajaxLocParts[ 1 ] ), - global: true, - processData: true, - async: true, - contentType: "application/x-www-form-urlencoded; charset=UTF-8", - /* - timeout: 0, - data: null, - dataType: null, - username: null, - password: null, - cache: null, - throws: false, - traditional: false, - headers: {}, - */ - - accepts: { - "*": allTypes, - text: "text/plain", - html: "text/html", - xml: "application/xml, text/xml", - json: "application/json, text/javascript" - }, - - contents: { - xml: /xml/, - html: /html/, - json: /json/ - }, - - responseFields: { - xml: "responseXML", - text: "responseText" - }, - - // Data converters - // Keys separate source (or catchall "*") and destination types with a single space - converters: { - - // Convert anything to text - "* text": window.String, - - // Text to html (true = no transformation) - "text html": true, - - // Evaluate text as a json expression - "text json": jQuery.parseJSON, - - // Parse text as xml - "text xml": jQuery.parseXML - }, - - // For options that shouldn't be deep extended: - // you can add your own custom options here if - // and when you create one that shouldn't be - // deep extended (see ajaxExtend) - flatOptions: { - url: true, - context: true - } - }, - - // Creates a full fledged settings object into target - // with both ajaxSettings and settings fields. - // If target is omitted, writes into ajaxSettings. - ajaxSetup: function( target, settings ) { - return settings ? - - // Building a settings object - ajaxExtend( ajaxExtend( target, jQuery.ajaxSettings ), settings ) : - - // Extending ajaxSettings - ajaxExtend( jQuery.ajaxSettings, target ); - }, - - ajaxPrefilter: addToPrefiltersOrTransports( prefilters ), - ajaxTransport: addToPrefiltersOrTransports( transports ), - - // Main method - ajax: function( url, options ) { - - // If url is an object, simulate pre-1.5 signature - if ( typeof url === "object" ) { - options = url; - url = undefined; - } - - // Force options to be an object - options = options || {}; - - var // Cross-domain detection vars - parts, - // Loop variable - i, - // URL without anti-cache param - cacheURL, - // Response headers as string - responseHeadersString, - // timeout handle - timeoutTimer, - - // To know if global events are to be dispatched - fireGlobals, - - transport, - // Response headers - responseHeaders, - // Create the final options object - s = jQuery.ajaxSetup( {}, options ), - // Callbacks context - callbackContext = s.context || s, - // Context for global events is callbackContext if it is a DOM node or jQuery collection - globalEventContext = s.context && ( callbackContext.nodeType || callbackContext.jquery ) ? - jQuery( callbackContext ) : - jQuery.event, - // Deferreds - deferred = jQuery.Deferred(), - completeDeferred = jQuery.Callbacks("once memory"), - // Status-dependent callbacks - statusCode = s.statusCode || {}, - // Headers (they are sent all at once) - requestHeaders = {}, - requestHeadersNames = {}, - // The jqXHR state - state = 0, - // Default abort message - strAbort = "canceled", - // Fake xhr - jqXHR = { - readyState: 0, - - // Builds headers hashtable if needed - getResponseHeader: function( key ) { - var match; - if ( state === 2 ) { - if ( !responseHeaders ) { - responseHeaders = {}; - while ( (match = rheaders.exec( responseHeadersString )) ) { - responseHeaders[ match[1].toLowerCase() ] = match[ 2 ]; - } - } - match = responseHeaders[ key.toLowerCase() ]; - } - return match == null ? null : match; - }, - - // Raw string - getAllResponseHeaders: function() { - return state === 2 ? responseHeadersString : null; - }, - - // Caches the header - setRequestHeader: function( name, value ) { - var lname = name.toLowerCase(); - if ( !state ) { - name = requestHeadersNames[ lname ] = requestHeadersNames[ lname ] || name; - requestHeaders[ name ] = value; - } - return this; - }, - - // Overrides response content-type header - overrideMimeType: function( type ) { - if ( !state ) { - s.mimeType = type; - } - return this; - }, - - // Status-dependent callbacks - statusCode: function( map ) { - var code; - if ( map ) { - if ( state < 2 ) { - for ( code in map ) { - // Lazy-add the new callback in a way that preserves old ones - statusCode[ code ] = [ statusCode[ code ], map[ code ] ]; - } - } else { - // Execute the appropriate callbacks - jqXHR.always( map[ jqXHR.status ] ); - } - } - return this; - }, - - // Cancel the request - abort: function( statusText ) { - var finalText = statusText || strAbort; - if ( transport ) { - transport.abort( finalText ); - } - done( 0, finalText ); - return this; - } - }; - - // Attach deferreds - deferred.promise( jqXHR ).complete = completeDeferred.add; - jqXHR.success = jqXHR.done; - jqXHR.error = jqXHR.fail; - - // Remove hash character (#7531: and string promotion) - // Add protocol if not provided (#5866: IE7 issue with protocol-less urls) - // Handle falsy url in the settings object (#10093: consistency with old signature) - // We also use the url parameter if available - s.url = ( ( url || s.url || ajaxLocation ) + "" ).replace( rhash, "" ).replace( rprotocol, ajaxLocParts[ 1 ] + "//" ); - - // Alias method option to type as per ticket #12004 - s.type = options.method || options.type || s.method || s.type; - - // Extract dataTypes list - s.dataTypes = jQuery.trim( s.dataType || "*" ).toLowerCase().match( core_rnotwhite ) || [""]; - - // A cross-domain request is in order when we have a protocol:host:port mismatch - if ( s.crossDomain == null ) { - parts = rurl.exec( s.url.toLowerCase() ); - s.crossDomain = !!( parts && - ( parts[ 1 ] !== ajaxLocParts[ 1 ] || parts[ 2 ] !== ajaxLocParts[ 2 ] || - ( parts[ 3 ] || ( parts[ 1 ] === "http:" ? 80 : 443 ) ) != - ( ajaxLocParts[ 3 ] || ( ajaxLocParts[ 1 ] === "http:" ? 80 : 443 ) ) ) - ); - } - - // Convert data if not already a string - if ( s.data && s.processData && typeof s.data !== "string" ) { - s.data = jQuery.param( s.data, s.traditional ); - } - - // Apply prefilters - inspectPrefiltersOrTransports( prefilters, s, options, jqXHR ); - - // If request was aborted inside a prefilter, stop there - if ( state === 2 ) { - return jqXHR; - } - - // We can fire global events as of now if asked to - fireGlobals = s.global; - - // Watch for a new set of requests - if ( fireGlobals && jQuery.active++ === 0 ) { - jQuery.event.trigger("ajaxStart"); - } - - // Uppercase the type - s.type = s.type.toUpperCase(); - - // Determine if request has content - s.hasContent = !rnoContent.test( s.type ); - - // Save the URL in case we're toying with the If-Modified-Since - // and/or If-None-Match header later on - cacheURL = s.url; - - // More options handling for requests with no content - if ( !s.hasContent ) { - - // If data is available, append data to url - if ( s.data ) { - cacheURL = ( s.url += ( ajax_rquery.test( cacheURL ) ? "&" : "?" ) + s.data ); - // #9682: remove data so that it's not used in an eventual retry - delete s.data; - } - - // Add anti-cache in url if needed - if ( s.cache === false ) { - s.url = rts.test( cacheURL ) ? - - // If there is already a '_' parameter, set its value - cacheURL.replace( rts, "$1_=" + ajax_nonce++ ) : - - // Otherwise add one to the end - cacheURL + ( ajax_rquery.test( cacheURL ) ? "&" : "?" ) + "_=" + ajax_nonce++; - } - } - - // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. - if ( s.ifModified ) { - if ( jQuery.lastModified[ cacheURL ] ) { - jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ cacheURL ] ); - } - if ( jQuery.etag[ cacheURL ] ) { - jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ cacheURL ] ); - } - } - - // Set the correct header, if data is being sent - if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) { - jqXHR.setRequestHeader( "Content-Type", s.contentType ); - } - - // Set the Accepts header for the server, depending on the dataType - jqXHR.setRequestHeader( - "Accept", - s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[0] ] ? - s.accepts[ s.dataTypes[0] ] + ( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) : - s.accepts[ "*" ] - ); - - // Check for headers option - for ( i in s.headers ) { - jqXHR.setRequestHeader( i, s.headers[ i ] ); - } - - // Allow custom headers/mimetypes and early abort - if ( s.beforeSend && ( s.beforeSend.call( callbackContext, jqXHR, s ) === false || state === 2 ) ) { - // Abort if not done already and return - return jqXHR.abort(); - } - - // aborting is no longer a cancellation - strAbort = "abort"; - - // Install callbacks on deferreds - for ( i in { success: 1, error: 1, complete: 1 } ) { - jqXHR[ i ]( s[ i ] ); - } - - // Get transport - transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR ); - - // If no transport, we auto-abort - if ( !transport ) { - done( -1, "No Transport" ); - } else { - jqXHR.readyState = 1; - - // Send global event - if ( fireGlobals ) { - globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] ); - } - // Timeout - if ( s.async && s.timeout > 0 ) { - timeoutTimer = setTimeout(function() { - jqXHR.abort("timeout"); - }, s.timeout ); - } - - try { - state = 1; - transport.send( requestHeaders, done ); - } catch ( e ) { - // Propagate exception as error if not done - if ( state < 2 ) { - done( -1, e ); - // Simply rethrow otherwise - } else { - throw e; - } - } - } - - // Callback for when everything is done - function done( status, nativeStatusText, responses, headers ) { - var isSuccess, success, error, response, modified, - statusText = nativeStatusText; - - // Called once - if ( state === 2 ) { - return; - } - - // State is "done" now - state = 2; - - // Clear timeout if it exists - if ( timeoutTimer ) { - clearTimeout( timeoutTimer ); - } - - // Dereference transport for early garbage collection - // (no matter how long the jqXHR object will be used) - transport = undefined; - - // Cache response headers - responseHeadersString = headers || ""; - - // Set readyState - jqXHR.readyState = status > 0 ? 4 : 0; - - // Get response data - if ( responses ) { - response = ajaxHandleResponses( s, jqXHR, responses ); - } - - // If successful, handle type chaining - if ( status >= 200 && status < 300 || status === 304 ) { - - // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. - if ( s.ifModified ) { - modified = jqXHR.getResponseHeader("Last-Modified"); - if ( modified ) { - jQuery.lastModified[ cacheURL ] = modified; - } - modified = jqXHR.getResponseHeader("etag"); - if ( modified ) { - jQuery.etag[ cacheURL ] = modified; - } - } - - // if no content - if ( status === 204 ) { - isSuccess = true; - statusText = "nocontent"; - - // if not modified - } else if ( status === 304 ) { - isSuccess = true; - statusText = "notmodified"; - - // If we have data, let's convert it - } else { - isSuccess = ajaxConvert( s, response ); - statusText = isSuccess.state; - success = isSuccess.data; - error = isSuccess.error; - isSuccess = !error; - } - } else { - // We extract error from statusText - // then normalize statusText and status for non-aborts - error = statusText; - if ( status || !statusText ) { - statusText = "error"; - if ( status < 0 ) { - status = 0; - } - } - } - - // Set data for the fake xhr object - jqXHR.status = status; - jqXHR.statusText = ( nativeStatusText || statusText ) + ""; - - // Success/Error - if ( isSuccess ) { - deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] ); - } else { - deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] ); - } - - // Status-dependent callbacks - jqXHR.statusCode( statusCode ); - statusCode = undefined; - - if ( fireGlobals ) { - globalEventContext.trigger( isSuccess ? "ajaxSuccess" : "ajaxError", - [ jqXHR, s, isSuccess ? success : error ] ); - } - - // Complete - completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] ); - - if ( fireGlobals ) { - globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] ); - // Handle the global AJAX counter - if ( !( --jQuery.active ) ) { - jQuery.event.trigger("ajaxStop"); - } - } - } - - return jqXHR; - }, - - getScript: function( url, callback ) { - return jQuery.get( url, undefined, callback, "script" ); - }, - - getJSON: function( url, data, callback ) { - return jQuery.get( url, data, callback, "json" ); - } - }); - - /* Handles responses to an ajax request: - * - sets all responseXXX fields accordingly - * - finds the right dataType (mediates between content-type and expected dataType) - * - returns the corresponding response - */ - function ajaxHandleResponses( s, jqXHR, responses ) { - var firstDataType, ct, finalDataType, type, - contents = s.contents, - dataTypes = s.dataTypes, - responseFields = s.responseFields; - - // Fill responseXXX fields - for ( type in responseFields ) { - if ( type in responses ) { - jqXHR[ responseFields[type] ] = responses[ type ]; - } - } - - // Remove auto dataType and get content-type in the process - while( dataTypes[ 0 ] === "*" ) { - dataTypes.shift(); - if ( ct === undefined ) { - ct = s.mimeType || jqXHR.getResponseHeader("Content-Type"); - } - } - - // Check if we're dealing with a known content-type - if ( ct ) { - for ( type in contents ) { - if ( contents[ type ] && contents[ type ].test( ct ) ) { - dataTypes.unshift( type ); - break; - } - } - } - - // Check to see if we have a response for the expected dataType - if ( dataTypes[ 0 ] in responses ) { - finalDataType = dataTypes[ 0 ]; - } else { - // Try convertible dataTypes - for ( type in responses ) { - if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[0] ] ) { - finalDataType = type; - break; - } - if ( !firstDataType ) { - firstDataType = type; - } - } - // Or just use first one - finalDataType = finalDataType || firstDataType; - } - - // If we found a dataType - // We add the dataType to the list if needed - // and return the corresponding response - if ( finalDataType ) { - if ( finalDataType !== dataTypes[ 0 ] ) { - dataTypes.unshift( finalDataType ); - } - return responses[ finalDataType ]; - } - } - -// Chain conversions given the request and the original response - function ajaxConvert( s, response ) { - var conv2, current, conv, tmp, - converters = {}, - i = 0, - // Work with a copy of dataTypes in case we need to modify it for conversion - dataTypes = s.dataTypes.slice(), - prev = dataTypes[ 0 ]; - - // Apply the dataFilter if provided - if ( s.dataFilter ) { - response = s.dataFilter( response, s.dataType ); - } - - // Create converters map with lowercased keys - if ( dataTypes[ 1 ] ) { - for ( conv in s.converters ) { - converters[ conv.toLowerCase() ] = s.converters[ conv ]; - } - } - - // Convert to each sequential dataType, tolerating list modification - for ( ; (current = dataTypes[++i]); ) { - - // There's only work to do if current dataType is non-auto - if ( current !== "*" ) { - - // Convert response if prev dataType is non-auto and differs from current - if ( prev !== "*" && prev !== current ) { - - // Seek a direct converter - conv = converters[ prev + " " + current ] || converters[ "* " + current ]; - - // If none found, seek a pair - if ( !conv ) { - for ( conv2 in converters ) { - - // If conv2 outputs current - tmp = conv2.split(" "); - if ( tmp[ 1 ] === current ) { - - // If prev can be converted to accepted input - conv = converters[ prev + " " + tmp[ 0 ] ] || - converters[ "* " + tmp[ 0 ] ]; - if ( conv ) { - // Condense equivalence converters - if ( conv === true ) { - conv = converters[ conv2 ]; - - // Otherwise, insert the intermediate dataType - } else if ( converters[ conv2 ] !== true ) { - current = tmp[ 0 ]; - dataTypes.splice( i--, 0, current ); - } - - break; - } - } - } - } - - // Apply converter (if not an equivalence) - if ( conv !== true ) { - - // Unless errors are allowed to bubble, catch and return them - if ( conv && s["throws"] ) { - response = conv( response ); - } else { - try { - response = conv( response ); - } catch ( e ) { - return { state: "parsererror", error: conv ? e : "No conversion from " + prev + " to " + current }; - } - } - } - } - - // Update prev for next iteration - prev = current; - } - } - - return { state: "success", data: response }; - } -// Install script dataType - jQuery.ajaxSetup({ - accepts: { - script: "text/javascript, application/javascript, application/ecmascript, application/x-ecmascript" - }, - contents: { - script: /(?:java|ecma)script/ - }, - converters: { - "text script": function( text ) { - jQuery.globalEval( text ); - return text; - } - } - }); - -// Handle cache's special case and global - jQuery.ajaxPrefilter( "script", function( s ) { - if ( s.cache === undefined ) { - s.cache = false; - } - if ( s.crossDomain ) { - s.type = "GET"; - s.global = false; - } - }); - -// Bind script tag hack transport - jQuery.ajaxTransport( "script", function(s) { - - // This transport only deals with cross domain requests - if ( s.crossDomain ) { - - var script, - head = document.head || jQuery("head")[0] || document.documentElement; - - return { - - send: function( _, callback ) { - - script = document.createElement("script"); - - script.async = true; - - if ( s.scriptCharset ) { - script.charset = s.scriptCharset; - } - - script.src = s.url; - - // Attach handlers for all browsers - script.onload = script.onreadystatechange = function( _, isAbort ) { - - if ( isAbort || !script.readyState || /loaded|complete/.test( script.readyState ) ) { - - // Handle memory leak in IE - script.onload = script.onreadystatechange = null; - - // Remove the script - if ( script.parentNode ) { - script.parentNode.removeChild( script ); - } - - // Dereference the script - script = null; - - // Callback if not abort - if ( !isAbort ) { - callback( 200, "success" ); - } - } - }; - - // Circumvent IE6 bugs with base elements (#2709 and #4378) by prepending - // Use native DOM manipulation to avoid our domManip AJAX trickery - head.insertBefore( script, head.firstChild ); - }, - - abort: function() { - if ( script ) { - script.onload( undefined, true ); - } - } - }; - } - }); - var oldCallbacks = [], - rjsonp = /(=)?(?=&|$)|??/; - -// Default jsonp settings - jQuery.ajaxSetup({ - jsonp: "callback", - jsonpCallback: function() { - var callback = oldCallbacks.pop() || ( jQuery.expando + "_" + ( ajax_nonce++ ) ); - this[ callback ] = true; - return callback; - } - }); - -// Detect, normalize options and install callbacks for jsonp requests - jQuery.ajaxPrefilter( "json jsonp", function( s, originalSettings, jqXHR ) { - - var callbackName, overwritten, responseContainer, - jsonProp = s.jsonp !== false && ( rjsonp.test( s.url ) ? - "url" : - typeof s.data === "string" && !( s.contentType || "" ).indexOf("application/x-www-form-urlencoded") && rjsonp.test( s.data ) && "data" - ); - - // Handle iff the expected data type is "jsonp" or we have a parameter to set - if ( jsonProp || s.dataTypes[ 0 ] === "jsonp" ) { - - // Get callback name, remembering preexisting value associated with it - callbackName = s.jsonpCallback = jQuery.isFunction( s.jsonpCallback ) ? - s.jsonpCallback() : - s.jsonpCallback; - - // Insert callback into url or form data - if ( jsonProp ) { - s[ jsonProp ] = s[ jsonProp ].replace( rjsonp, "$1" + callbackName ); - } else if ( s.jsonp !== false ) { - s.url += ( ajax_rquery.test( s.url ) ? "&" : "?" ) + s.jsonp + "=" + callbackName; - } - - // Use data converter to retrieve json after script execution - s.converters["script json"] = function() { - if ( !responseContainer ) { - jQuery.error( callbackName + " was not called" ); - } - return responseContainer[ 0 ]; - }; - - // force json dataType - s.dataTypes[ 0 ] = "json"; - - // Install callback - overwritten = window[ callbackName ]; - window[ callbackName ] = function() { - responseContainer = arguments; - }; - - // Clean-up function (fires after converters) - jqXHR.always(function() { - // Restore preexisting value - window[ callbackName ] = overwritten; - - // Save back as free - if ( s[ callbackName ] ) { - // make sure that re-using the options doesn't screw things around - s.jsonpCallback = originalSettings.jsonpCallback; - - // save the callback name for future use - oldCallbacks.push( callbackName ); - } - - // Call if it was a function and we have a response - if ( responseContainer && jQuery.isFunction( overwritten ) ) { - overwritten( responseContainer[ 0 ] ); - } - - responseContainer = overwritten = undefined; - }); - - // Delegate to script - return "script"; - } - }); - var xhrCallbacks, xhrSupported, - xhrId = 0, - // #5280: Internet Explorer will keep connections alive if we don't abort on unload - xhrOnUnloadAbort = window.ActiveXObject && function() { - // Abort all pending requests - var key; - for ( key in xhrCallbacks ) { - xhrCallbacks[ key ]( undefined, true ); - } - }; - -// Functions to create xhrs - function createStandardXHR() { - try { - return new window.XMLHttpRequest(); - } catch( e ) {} - } - - function createActiveXHR() { - try { - return new window.ActiveXObject("Microsoft.XMLHTTP"); - } catch( e ) {} - } - -// Create the request object -// (This is still attached to ajaxSettings for backward compatibility) - jQuery.ajaxSettings.xhr = window.ActiveXObject ? - /* Microsoft failed to properly - * implement the XMLHttpRequest in IE7 (can't request local files), - * so we use the ActiveXObject when it is available - * Additionally XMLHttpRequest can be disabled in IE7/IE8 so - * we need a fallback. - */ - function() { - return !this.isLocal && createStandardXHR() || createActiveXHR(); - } : - // For all other browsers, use the standard XMLHttpRequest object - createStandardXHR; - -// Determine support properties - xhrSupported = jQuery.ajaxSettings.xhr(); - jQuery.support.cors = !!xhrSupported && ( "withCredentials" in xhrSupported ); - xhrSupported = jQuery.support.ajax = !!xhrSupported; - -// Create transport if the browser can provide an xhr - if ( xhrSupported ) { - - jQuery.ajaxTransport(function( s ) { - // Cross domain only allowed if supported through XMLHttpRequest - if ( !s.crossDomain || jQuery.support.cors ) { - - var callback; - - return { - send: function( headers, complete ) { - - // Get a new xhr - var handle, i, - xhr = s.xhr(); - - // Open the socket - // Passing null username, generates a login popup on Opera (#2865) - if ( s.username ) { - xhr.open( s.type, s.url, s.async, s.username, s.password ); - } else { - xhr.open( s.type, s.url, s.async ); - } - - // Apply custom fields if provided - if ( s.xhrFields ) { - for ( i in s.xhrFields ) { - xhr[ i ] = s.xhrFields[ i ]; - } - } - - // Override mime type if needed - if ( s.mimeType && xhr.overrideMimeType ) { - xhr.overrideMimeType( s.mimeType ); - } - - // X-Requested-With header - // For cross-domain requests, seeing as conditions for a preflight are - // akin to a jigsaw puzzle, we simply never set it to be sure. - // (it can always be set on a per-request basis or even using ajaxSetup) - // For same-domain requests, won't change header if already provided. - if ( !s.crossDomain && !headers["X-Requested-With"] ) { - headers["X-Requested-With"] = "XMLHttpRequest"; - } - - // Need an extra try/catch for cross domain requests in Firefox 3 - try { - for ( i in headers ) { - xhr.setRequestHeader( i, headers[ i ] ); - } - } catch( err ) {} - - // Do send the request - // This may raise an exception which is actually - // handled in jQuery.ajax (so no try/catch here) - xhr.send( ( s.hasContent && s.data ) || null ); - - // Listener - callback = function( _, isAbort ) { - var status, responseHeaders, statusText, responses; - - // Firefox throws exceptions when accessing properties - // of an xhr when a network error occurred - // http://helpful.knobs-dials.com/index.php/Component_returned_failure_code:_0x...) - try { - - // Was never called and is aborted or complete - if ( callback && ( isAbort || xhr.readyState === 4 ) ) { - - // Only called once - callback = undefined; - - // Do not keep as active anymore - if ( handle ) { - xhr.onreadystatechange = jQuery.noop; - if ( xhrOnUnloadAbort ) { - delete xhrCallbacks[ handle ]; - } - } - - // If it's an abort - if ( isAbort ) { - // Abort it manually if needed - if ( xhr.readyState !== 4 ) { - xhr.abort(); - } - } else { - responses = {}; - status = xhr.status; - responseHeaders = xhr.getAllResponseHeaders(); - - // When requesting binary data, IE6-9 will throw an exception - // on any attempt to access responseText (#11426) - if ( typeof xhr.responseText === "string" ) { - responses.text = xhr.responseText; - } - - // Firefox throws an exception when accessing - // statusText for faulty cross-domain requests - try { - statusText = xhr.statusText; - } catch( e ) { - // We normalize with Webkit giving an empty statusText - statusText = ""; - } - - // Filter status for non standard behaviors - - // If the request is local and we have data: assume a success - // (success with no data won't get notified, that's the best we - // can do given current implementations) - if ( !status && s.isLocal && !s.crossDomain ) { - status = responses.text ? 200 : 404; - // IE - #1450: sometimes returns 1223 when it should be 204 - } else if ( status === 1223 ) { - status = 204; - } - } - } - } catch( firefoxAccessException ) { - if ( !isAbort ) { - complete( -1, firefoxAccessException ); - } - } - - // Call complete if needed - if ( responses ) { - complete( status, statusText, responses, responseHeaders ); - } - }; - - if ( !s.async ) { - // if we're in sync mode we fire the callback - callback(); - } else if ( xhr.readyState === 4 ) { - // (IE6 & IE7) if it's in cache and has been - // retrieved directly we need to fire the callback - setTimeout( callback ); - } else { - handle = ++xhrId; - if ( xhrOnUnloadAbort ) { - // Create the active xhrs callbacks list if needed - // and attach the unload handler - if ( !xhrCallbacks ) { - xhrCallbacks = {}; - jQuery( window ).unload( xhrOnUnloadAbort ); - } - // Add to list of active xhrs callbacks - xhrCallbacks[ handle ] = callback; - } - xhr.onreadystatechange = callback; - } - }, - - abort: function() { - if ( callback ) { - callback( undefined, true ); - } - } - }; - } - }); - } - var fxNow, timerId, - rfxtypes = /^(?:toggle|show|hide)$/, - rfxnum = new RegExp( "^(?:([+-])=|)(" + core_pnum + ")([a-z%]*)$", "i" ), - rrun = /queueHooks$/, - animationPrefilters = [ defaultPrefilter ], - tweeners = { - "*": [function( prop, value ) { - var end, unit, - tween = this.createTween( prop, value ), - parts = rfxnum.exec( value ), - target = tween.cur(), - start = +target || 0, - scale = 1, - maxIterations = 20; - - if ( parts ) { - end = +parts[2]; - unit = parts[3] || ( jQuery.cssNumber[ prop ] ? "" : "px" ); - - // We need to compute starting value - if ( unit !== "px" && start ) { - // Iteratively approximate from a nonzero starting point - // Prefer the current property, because this process will be trivial if it uses the same units - // Fallback to end or a simple constant - start = jQuery.css( tween.elem, prop, true ) || end || 1; - - do { - // If previous iteration zeroed out, double until we get *something* - // Use a string for doubling factor so we don't accidentally see scale as unchanged below - scale = scale || ".5"; - - // Adjust and apply - start = start / scale; - jQuery.style( tween.elem, prop, start + unit ); - - // Update scale, tolerating zero or NaN from tween.cur() - // And breaking the loop if scale is unchanged or perfect, or if we've just had enough - } while ( scale !== (scale = tween.cur() / target) && scale !== 1 && --maxIterations ); - } - - tween.unit = unit; - tween.start = start; - // If a +=/-= token was provided, we're doing a relative animation - tween.end = parts[1] ? start + ( parts[1] + 1 ) * end : end; - } - return tween; - }] - }; - -// Animations created synchronously will run synchronously - function createFxNow() { - setTimeout(function() { - fxNow = undefined; - }); - return ( fxNow = jQuery.now() ); - } - - function createTweens( animation, props ) { - jQuery.each( props, function( prop, value ) { - var collection = ( tweeners[ prop ] || [] ).concat( tweeners[ "*" ] ), - index = 0, - length = collection.length; - for ( ; index < length; index++ ) { - if ( collection[ index ].call( animation, prop, value ) ) { - - // we're done with this property - return; - } - } - }); - } - - function Animation( elem, properties, options ) { - var result, - stopped, - index = 0, - length = animationPrefilters.length, - deferred = jQuery.Deferred().always( function() { - // don't match elem in the :animated selector - delete tick.elem; - }), - tick = function() { - if ( stopped ) { - return false; - } - var currentTime = fxNow || createFxNow(), - remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ), - // archaic crash bug won't allow us to use 1 - ( 0.5 || 0 ) (#12497) - temp = remaining / animation.duration || 0, - percent = 1 - temp, - index = 0, - length = animation.tweens.length; - - for ( ; index < length ; index++ ) { - animation.tweens[ index ].run( percent ); - } - - deferred.notifyWith( elem, [ animation, percent, remaining ]); - - if ( percent < 1 && length ) { - return remaining; - } else { - deferred.resolveWith( elem, [ animation ] ); - return false; - } - }, - animation = deferred.promise({ - elem: elem, - props: jQuery.extend( {}, properties ), - opts: jQuery.extend( true, { specialEasing: {} }, options ), - originalProperties: properties, - originalOptions: options, - startTime: fxNow || createFxNow(), - duration: options.duration, - tweens: [], - createTween: function( prop, end ) { - var tween = jQuery.Tween( elem, animation.opts, prop, end, - animation.opts.specialEasing[ prop ] || animation.opts.easing ); - animation.tweens.push( tween ); - return tween; - }, - stop: function( gotoEnd ) { - var index = 0, - // if we are going to the end, we want to run all the tweens - // otherwise we skip this part - length = gotoEnd ? animation.tweens.length : 0; - if ( stopped ) { - return this; - } - stopped = true; - for ( ; index < length ; index++ ) { - animation.tweens[ index ].run( 1 ); - } - - // resolve when we played the last frame - // otherwise, reject - if ( gotoEnd ) { - deferred.resolveWith( elem, [ animation, gotoEnd ] ); - } else { - deferred.rejectWith( elem, [ animation, gotoEnd ] ); - } - return this; - } - }), - props = animation.props; - - propFilter( props, animation.opts.specialEasing ); - - for ( ; index < length ; index++ ) { - result = animationPrefilters[ index ].call( animation, elem, props, animation.opts ); - if ( result ) { - return result; - } - } - - createTweens( animation, props ); - - if ( jQuery.isFunction( animation.opts.start ) ) { - animation.opts.start.call( elem, animation ); - } - - jQuery.fx.timer( - jQuery.extend( tick, { - elem: elem, - anim: animation, - queue: animation.opts.queue - }) - ); - - // attach callbacks from options - return animation.progress( animation.opts.progress ) - .done( animation.opts.done, animation.opts.complete ) - .fail( animation.opts.fail ) - .always( animation.opts.always ); - } - - function propFilter( props, specialEasing ) { - var value, name, index, easing, hooks; - - // camelCase, specialEasing and expand cssHook pass - for ( index in props ) { - name = jQuery.camelCase( index ); - easing = specialEasing[ name ]; - value = props[ index ]; - if ( jQuery.isArray( value ) ) { - easing = value[ 1 ]; - value = props[ index ] = value[ 0 ]; - } - - if ( index !== name ) { - props[ name ] = value; - delete props[ index ]; - } - - hooks = jQuery.cssHooks[ name ]; - if ( hooks && "expand" in hooks ) { - value = hooks.expand( value ); - delete props[ name ]; - - // not quite $.extend, this wont overwrite keys already present. - // also - reusing 'index' from above because we have the correct "name" - for ( index in value ) { - if ( !( index in props ) ) { - props[ index ] = value[ index ]; - specialEasing[ index ] = easing; - } - } - } else { - specialEasing[ name ] = easing; - } - } - } - - jQuery.Animation = jQuery.extend( Animation, { - - tweener: function( props, callback ) { - if ( jQuery.isFunction( props ) ) { - callback = props; - props = [ "*" ]; - } else { - props = props.split(" "); - } - - var prop, - index = 0, - length = props.length; - - for ( ; index < length ; index++ ) { - prop = props[ index ]; - tweeners[ prop ] = tweeners[ prop ] || []; - tweeners[ prop ].unshift( callback ); - } - }, - - prefilter: function( callback, prepend ) { - if ( prepend ) { - animationPrefilters.unshift( callback ); - } else { - animationPrefilters.push( callback ); - } - } - }); - - function defaultPrefilter( elem, props, opts ) { - /*jshint validthis:true */ - var prop, index, length, - value, dataShow, toggle, - tween, hooks, oldfire, - anim = this, - style = elem.style, - orig = {}, - handled = [], - hidden = elem.nodeType && isHidden( elem ); - - // handle queue: false promises - if ( !opts.queue ) { - hooks = jQuery._queueHooks( elem, "fx" ); - if ( hooks.unqueued == null ) { - hooks.unqueued = 0; - oldfire = hooks.empty.fire; - hooks.empty.fire = function() { - if ( !hooks.unqueued ) { - oldfire(); - } - }; - } - hooks.unqueued++; - - anim.always(function() { - // doing this makes sure that the complete handler will be called - // before this completes - anim.always(function() { - hooks.unqueued--; - if ( !jQuery.queue( elem, "fx" ).length ) { - hooks.empty.fire(); - } - }); - }); - } - - // height/width overflow pass - if ( elem.nodeType === 1 && ( "height" in props || "width" in props ) ) { - // Make sure that nothing sneaks out - // Record all 3 overflow attributes because IE does not - // change the overflow attribute when overflowX and - // overflowY are set to the same value - opts.overflow = [ style.overflow, style.overflowX, style.overflowY ]; - - // Set display property to inline-block for height/width - // animations on inline elements that are having width/height animated - if ( jQuery.css( elem, "display" ) === "inline" && - jQuery.css( elem, "float" ) === "none" ) { - - // inline-level elements accept inline-block; - // block-level elements need to be inline with layout - if ( !jQuery.support.inlineBlockNeedsLayout || css_defaultDisplay( elem.nodeName ) === "inline" ) { - style.display = "inline-block"; - - } else { - style.zoom = 1; - } - } - } - - if ( opts.overflow ) { - style.overflow = "hidden"; - if ( !jQuery.support.shrinkWrapBlocks ) { - anim.always(function() { - style.overflow = opts.overflow[ 0 ]; - style.overflowX = opts.overflow[ 1 ]; - style.overflowY = opts.overflow[ 2 ]; - }); - } - } - - - // show/hide pass - for ( index in props ) { - value = props[ index ]; - if ( rfxtypes.exec( value ) ) { - delete props[ index ]; - toggle = toggle || value === "toggle"; - if ( value === ( hidden ? "hide" : "show" ) ) { - continue; - } - handled.push( index ); - } - } - - length = handled.length; - if ( length ) { - dataShow = jQuery._data( elem, "fxshow" ) || jQuery._data( elem, "fxshow", {} ); - if ( "hidden" in dataShow ) { - hidden = dataShow.hidden; - } - - // store state if its toggle - enables .stop().toggle() to "reverse" - if ( toggle ) { - dataShow.hidden = !hidden; - } - if ( hidden ) { - jQuery( elem ).show(); - } else { - anim.done(function() { - jQuery( elem ).hide(); - }); - } - anim.done(function() { - var prop; - jQuery._removeData( elem, "fxshow" ); - for ( prop in orig ) { - jQuery.style( elem, prop, orig[ prop ] ); - } - }); - for ( index = 0 ; index < length ; index++ ) { - prop = handled[ index ]; - tween = anim.createTween( prop, hidden ? dataShow[ prop ] : 0 ); - orig[ prop ] = dataShow[ prop ] || jQuery.style( elem, prop ); - - if ( !( prop in dataShow ) ) { - dataShow[ prop ] = tween.start; - if ( hidden ) { - tween.end = tween.start; - tween.start = prop === "width" || prop === "height" ? 1 : 0; - } - } - } - } - } - - function Tween( elem, options, prop, end, easing ) { - return new Tween.prototype.init( elem, options, prop, end, easing ); - } - jQuery.Tween = Tween; - - Tween.prototype = { - constructor: Tween, - init: function( elem, options, prop, end, easing, unit ) { - this.elem = elem; - this.prop = prop; - this.easing = easing || "swing"; - this.options = options; - this.start = this.now = this.cur(); - this.end = end; - this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" ); - }, - cur: function() { - var hooks = Tween.propHooks[ this.prop ]; - - return hooks && hooks.get ? - hooks.get( this ) : - Tween.propHooks._default.get( this ); - }, - run: function( percent ) { - var eased, - hooks = Tween.propHooks[ this.prop ]; - - if ( this.options.duration ) { - this.pos = eased = jQuery.easing[ this.easing ]( - percent, this.options.duration * percent, 0, 1, this.options.duration - ); - } else { - this.pos = eased = percent; - } - this.now = ( this.end - this.start ) * eased + this.start; - - if ( this.options.step ) { - this.options.step.call( this.elem, this.now, this ); - } - - if ( hooks && hooks.set ) { - hooks.set( this ); - } else { - Tween.propHooks._default.set( this ); - } - return this; - } - }; - - Tween.prototype.init.prototype = Tween.prototype; - - Tween.propHooks = { - _default: { - get: function( tween ) { - var result; - - if ( tween.elem[ tween.prop ] != null && - (!tween.elem.style || tween.elem.style[ tween.prop ] == null) ) { - return tween.elem[ tween.prop ]; - } - - // passing an empty string as a 3rd parameter to .css will automatically - // attempt a parseFloat and fallback to a string if the parse fails - // so, simple values such as "10px" are parsed to Float. - // complex values such as "rotate(1rad)" are returned as is. - result = jQuery.css( tween.elem, tween.prop, "" ); - // Empty strings, null, undefined and "auto" are converted to 0. - return !result || result === "auto" ? 0 : result; - }, - set: function( tween ) { - // use step hook for back compat - use cssHook if its there - use .style if its - // available and use plain properties where available - if ( jQuery.fx.step[ tween.prop ] ) { - jQuery.fx.step[ tween.prop ]( tween ); - } else if ( tween.elem.style && ( tween.elem.style[ jQuery.cssProps[ tween.prop ] ] != null || jQuery.cssHooks[ tween.prop ] ) ) { - jQuery.style( tween.elem, tween.prop, tween.now + tween.unit ); - } else { - tween.elem[ tween.prop ] = tween.now; - } - } - } - }; - -// Remove in 2.0 - this supports IE8's panic based approach -// to setting things on disconnected nodes - - Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = { - set: function( tween ) { - if ( tween.elem.nodeType && tween.elem.parentNode ) { - tween.elem[ tween.prop ] = tween.now; - } - } - }; - - jQuery.each([ "toggle", "show", "hide" ], function( i, name ) { - var cssFn = jQuery.fn[ name ]; - jQuery.fn[ name ] = function( speed, easing, callback ) { - return speed == null || typeof speed === "boolean" ? - cssFn.apply( this, arguments ) : - this.animate( genFx( name, true ), speed, easing, callback ); - }; - }); - - jQuery.fn.extend({ - fadeTo: function( speed, to, easing, callback ) { - - // show any hidden elements after setting opacity to 0 - return this.filter( isHidden ).css( "opacity", 0 ).show() - - // animate to the value specified - .end().animate({ opacity: to }, speed, easing, callback ); - }, - animate: function( prop, speed, easing, callback ) { - var empty = jQuery.isEmptyObject( prop ), - optall = jQuery.speed( speed, easing, callback ), - doAnimation = function() { - // Operate on a copy of prop so per-property easing won't be lost - var anim = Animation( this, jQuery.extend( {}, prop ), optall ); - doAnimation.finish = function() { - anim.stop( true ); - }; - // Empty animations, or finishing resolves immediately - if ( empty || jQuery._data( this, "finish" ) ) { - anim.stop( true ); - } - }; - doAnimation.finish = doAnimation; - - return empty || optall.queue === false ? - this.each( doAnimation ) : - this.queue( optall.queue, doAnimation ); - }, - stop: function( type, clearQueue, gotoEnd ) { - var stopQueue = function( hooks ) { - var stop = hooks.stop; - delete hooks.stop; - stop( gotoEnd ); - }; - - if ( typeof type !== "string" ) { - gotoEnd = clearQueue; - clearQueue = type; - type = undefined; - } - if ( clearQueue && type !== false ) { - this.queue( type || "fx", [] ); - } - - return this.each(function() { - var dequeue = true, - index = type != null && type + "queueHooks", - timers = jQuery.timers, - data = jQuery._data( this ); - - if ( index ) { - if ( data[ index ] && data[ index ].stop ) { - stopQueue( data[ index ] ); - } - } else { - for ( index in data ) { - if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) { - stopQueue( data[ index ] ); - } - } - } - - for ( index = timers.length; index--; ) { - if ( timers[ index ].elem === this && (type == null || timers[ index ].queue === type) ) { - timers[ index ].anim.stop( gotoEnd ); - dequeue = false; - timers.splice( index, 1 ); - } - } - - // start the next in the queue if the last step wasn't forced - // timers currently will call their complete callbacks, which will dequeue - // but only if they were gotoEnd - if ( dequeue || !gotoEnd ) { - jQuery.dequeue( this, type ); - } - }); - }, - finish: function( type ) { - if ( type !== false ) { - type = type || "fx"; - } - return this.each(function() { - var index, - data = jQuery._data( this ), - queue = data[ type + "queue" ], - hooks = data[ type + "queueHooks" ], - timers = jQuery.timers, - length = queue ? queue.length : 0; - - // enable finishing flag on private data - data.finish = true; - - // empty the queue first - jQuery.queue( this, type, [] ); - - if ( hooks && hooks.cur && hooks.cur.finish ) { - hooks.cur.finish.call( this ); - } - - // look for any active animations, and finish them - for ( index = timers.length; index--; ) { - if ( timers[ index ].elem === this && timers[ index ].queue === type ) { - timers[ index ].anim.stop( true ); - timers.splice( index, 1 ); - } - } - - // look for any animations in the old queue and finish them - for ( index = 0; index < length; index++ ) { - if ( queue[ index ] && queue[ index ].finish ) { - queue[ index ].finish.call( this ); - } - } - - // turn off finishing flag - delete data.finish; - }); - } - }); - -// Generate parameters to create a standard animation - function genFx( type, includeWidth ) { - var which, - attrs = { height: type }, - i = 0; - - // if we include width, step value is 1 to do all cssExpand values, - // if we don't include width, step value is 2 to skip over Left and Right - includeWidth = includeWidth? 1 : 0; - for( ; i < 4 ; i += 2 - includeWidth ) { - which = cssExpand[ i ]; - attrs[ "margin" + which ] = attrs[ "padding" + which ] = type; - } - - if ( includeWidth ) { - attrs.opacity = attrs.width = type; - } - - return attrs; - } - -// Generate shortcuts for custom animations - jQuery.each({ - slideDown: genFx("show"), - slideUp: genFx("hide"), - slideToggle: genFx("toggle"), - fadeIn: { opacity: "show" }, - fadeOut: { opacity: "hide" }, - fadeToggle: { opacity: "toggle" } - }, function( name, props ) { - jQuery.fn[ name ] = function( speed, easing, callback ) { - return this.animate( props, speed, easing, callback ); - }; - }); - - jQuery.speed = function( speed, easing, fn ) { - var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : { - complete: fn || !fn && easing || - jQuery.isFunction( speed ) && speed, - duration: speed, - easing: fn && easing || easing && !jQuery.isFunction( easing ) && easing - }; - - opt.duration = jQuery.fx.off ? 0 : typeof opt.duration === "number" ? opt.duration : - opt.duration in jQuery.fx.speeds ? jQuery.fx.speeds[ opt.duration ] : jQuery.fx.speeds._default; - - // normalize opt.queue - true/undefined/null -> "fx" - if ( opt.queue == null || opt.queue === true ) { - opt.queue = "fx"; - } - - // Queueing - opt.old = opt.complete; - - opt.complete = function() { - if ( jQuery.isFunction( opt.old ) ) { - opt.old.call( this ); - } - - if ( opt.queue ) { - jQuery.dequeue( this, opt.queue ); - } - }; - - return opt; - }; - - jQuery.easing = { - linear: function( p ) { - return p; - }, - swing: function( p ) { - return 0.5 - Math.cos( p*Math.PI ) / 2; - } - }; - - jQuery.timers = []; - jQuery.fx = Tween.prototype.init; - jQuery.fx.tick = function() { - var timer, - timers = jQuery.timers, - i = 0; - - fxNow = jQuery.now(); - - for ( ; i < timers.length; i++ ) { - timer = timers[ i ]; - // Checks the timer has not already been removed - if ( !timer() && timers[ i ] === timer ) { - timers.splice( i--, 1 ); - } - } - - if ( !timers.length ) { - jQuery.fx.stop(); - } - fxNow = undefined; - }; - - jQuery.fx.timer = function( timer ) { - if ( timer() && jQuery.timers.push( timer ) ) { - jQuery.fx.start(); - } - }; - - jQuery.fx.interval = 13; - - jQuery.fx.start = function() { - if ( !timerId ) { - timerId = setInterval( jQuery.fx.tick, jQuery.fx.interval ); - } - }; - - jQuery.fx.stop = function() { - clearInterval( timerId ); - timerId = null; - }; - - jQuery.fx.speeds = { - slow: 600, - fast: 200, - // Default speed - _default: 400 - }; - -// Back Compat <1.8 extension point - jQuery.fx.step = {}; - - if ( jQuery.expr && jQuery.expr.filters ) { - jQuery.expr.filters.animated = function( elem ) { - return jQuery.grep(jQuery.timers, function( fn ) { - return elem === fn.elem; - }).length; - }; - } - jQuery.fn.offset = function( options ) { - if ( arguments.length ) { - return options === undefined ? - this : - this.each(function( i ) { - jQuery.offset.setOffset( this, options, i ); - }); - } - - var docElem, win, - box = { top: 0, left: 0 }, - elem = this[ 0 ], - doc = elem && elem.ownerDocument; - - if ( !doc ) { - return; - } - - docElem = doc.documentElement; - - // Make sure it's not a disconnected DOM node - if ( !jQuery.contains( docElem, elem ) ) { - return box; - } - - // If we don't have gBCR, just use 0,0 rather than error - // BlackBerry 5, iOS 3 (original iPhone) - if ( typeof elem.getBoundingClientRect !== core_strundefined ) { - box = elem.getBoundingClientRect(); - } - win = getWindow( doc ); - return { - top: box.top + ( win.pageYOffset || docElem.scrollTop ) - ( docElem.clientTop || 0 ), - left: box.left + ( win.pageXOffset || docElem.scrollLeft ) - ( docElem.clientLeft || 0 ) - }; - }; - - jQuery.offset = { - - setOffset: function( elem, options, i ) { - var position = jQuery.css( elem, "position" ); - - // set position first, in-case top/left are set even on static elem - if ( position === "static" ) { - elem.style.position = "relative"; - } - - var curElem = jQuery( elem ), - curOffset = curElem.offset(), - curCSSTop = jQuery.css( elem, "top" ), - curCSSLeft = jQuery.css( elem, "left" ), - calculatePosition = ( position === "absolute" || position === "fixed" ) && jQuery.inArray("auto", [curCSSTop, curCSSLeft]) > -1, - props = {}, curPosition = {}, curTop, curLeft; - - // need to be able to calculate position if either top or left is auto and position is either absolute or fixed - if ( calculatePosition ) { - curPosition = curElem.position(); - curTop = curPosition.top; - curLeft = curPosition.left; - } else { - curTop = parseFloat( curCSSTop ) || 0; - curLeft = parseFloat( curCSSLeft ) || 0; - } - - if ( jQuery.isFunction( options ) ) { - options = options.call( elem, i, curOffset ); - } - - if ( options.top != null ) { - props.top = ( options.top - curOffset.top ) + curTop; - } - if ( options.left != null ) { - props.left = ( options.left - curOffset.left ) + curLeft; - } - - if ( "using" in options ) { - options.using.call( elem, props ); - } else { - curElem.css( props ); - } - } - }; - - - jQuery.fn.extend({ - - position: function() { - if ( !this[ 0 ] ) { - return; - } - - var offsetParent, offset, - parentOffset = { top: 0, left: 0 }, - elem = this[ 0 ]; - - // fixed elements are offset from window (parentOffset = {top:0, left: 0}, because it is it's only offset parent - if ( jQuery.css( elem, "position" ) === "fixed" ) { - // we assume that getBoundingClientRect is available when computed position is fixed - offset = elem.getBoundingClientRect(); - } else { - // Get *real* offsetParent - offsetParent = this.offsetParent(); - - // Get correct offsets - offset = this.offset(); - if ( !jQuery.nodeName( offsetParent[ 0 ], "html" ) ) { - parentOffset = offsetParent.offset(); - } - - // Add offsetParent borders - parentOffset.top += jQuery.css( offsetParent[ 0 ], "borderTopWidth", true ); - parentOffset.left += jQuery.css( offsetParent[ 0 ], "borderLeftWidth", true ); - } - - // Subtract parent offsets and element margins - // note: when an element has margin: auto the offsetLeft and marginLeft - // are the same in Safari causing offset.left to incorrectly be 0 - return { - top: offset.top - parentOffset.top - jQuery.css( elem, "marginTop", true ), - left: offset.left - parentOffset.left - jQuery.css( elem, "marginLeft", true) - }; - }, - - offsetParent: function() { - return this.map(function() { - var offsetParent = this.offsetParent || document.documentElement; - while ( offsetParent && ( !jQuery.nodeName( offsetParent, "html" ) && jQuery.css( offsetParent, "position") === "static" ) ) { - offsetParent = offsetParent.offsetParent; - } - return offsetParent || document.documentElement; - }); - } - }); - - -// Create scrollLeft and scrollTop methods - jQuery.each( {scrollLeft: "pageXOffset", scrollTop: "pageYOffset"}, function( method, prop ) { - var top = /Y/.test( prop ); - - jQuery.fn[ method ] = function( val ) { - return jQuery.access( this, function( elem, method, val ) { - var win = getWindow( elem ); - - if ( val === undefined ) { - return win ? (prop in win) ? win[ prop ] : - win.document.documentElement[ method ] : - elem[ method ]; - } - - if ( win ) { - win.scrollTo( - !top ? val : jQuery( win ).scrollLeft(), - top ? val : jQuery( win ).scrollTop() - ); - - } else { - elem[ method ] = val; - } - }, method, val, arguments.length, null ); - }; - }); - - function getWindow( elem ) { - return jQuery.isWindow( elem ) ? - elem : - elem.nodeType === 9 ? - elem.defaultView || elem.parentWindow : - false; - } -// Create innerHeight, innerWidth, height, width, outerHeight and outerWidth methods - jQuery.each( { Height: "height", Width: "width" }, function( name, type ) { - jQuery.each( { padding: "inner" + name, content: type, "": "outer" + name }, function( defaultExtra, funcName ) { - // margin is only for outerHeight, outerWidth - jQuery.fn[ funcName ] = function( margin, value ) { - var chainable = arguments.length && ( defaultExtra || typeof margin !== "boolean" ), - extra = defaultExtra || ( margin === true || value === true ? "margin" : "border" ); - - return jQuery.access( this, function( elem, type, value ) { - var doc; - - if ( jQuery.isWindow( elem ) ) { - // As of 5/8/2012 this will yield incorrect results for Mobile Safari, but there - // isn't a whole lot we can do. See pull request at this URL for discussion: - // https://github.com/jquery/jquery/pull/764 - return elem.document.documentElement[ "client" + name ]; - } - - // Get document width or height - if ( elem.nodeType === 9 ) { - doc = elem.documentElement; - - // Either scroll[Width/Height] or offset[Width/Height] or client[Width/Height], whichever is greatest - // unfortunately, this causes bug #3838 in IE6/8 only, but there is currently no good, small way to fix it. - return Math.max( - elem.body[ "scroll" + name ], doc[ "scroll" + name ], - elem.body[ "offset" + name ], doc[ "offset" + name ], - doc[ "client" + name ] - ); - } - - return value === undefined ? - // Get width or height on the element, requesting but not forcing parseFloat - jQuery.css( elem, type, extra ) : - - // Set width or height on the element - jQuery.style( elem, type, value, extra ); - }, type, chainable ? margin : undefined, chainable, null ); - }; - }); - }); -// Limit scope pollution from any deprecated API -// (function() { - -// })(); -// Expose jQuery to the global object - window.jQuery = window.$ = jQuery; - -// Expose jQuery as an AMD module, but only for AMD loaders that -// understand the issues with loading multiple versions of jQuery -// in a page that all might call define(). The loader will indicate -// they have special allowances for multiple jQuery versions by -// specifying define.amd.jQuery = true. Register as a named module, -// since jQuery can be concatenated with other files that may use define, -// but not use a proper concatenation script that understands anonymous -// AMD modules. A named AMD is safest and most robust way to register. -// Lowercase jquery is used because AMD module names are derived from -// file names, and jQuery is normally delivered in a lowercase file name. -// Do this after creating the global so that if an AMD module wants to call -// noConflict to hide this version of jQuery, it will work. - if ( typeof define === "function" && define.amd && define.amd.jQuery ) { - define( "jquery", [], function () { return jQuery; } ); - } - -})( window ); \ No newline at end of file diff --git a/modules/enterprise/gui/coregui/src/main/webapp/js/jquery-1.9.1.min.js b/modules/enterprise/gui/coregui/src/main/webapp/js/jquery-1.9.1.min.js deleted file mode 100644 index 5d9ee08..0000000 --- a/modules/enterprise/gui/coregui/src/main/webapp/js/jquery-1.9.1.min.js +++ /dev/null @@ -1,5 +0,0 @@ -/*! jQuery v1.9.1 | (c) 2005, 2012 jQuery Foundation, Inc. | jquery.org/license - //@ sourceMappingURL=jquery.min.map - */(function(e,t){var n,r,i=typeof t,o=e.document,a=e.location,s=e.jQuery,u=e.$,l={},c=[],p="1.9.1",f=c.concat,d=c.push,h=c.slice,g=c.indexOf,m=l.toString,y=l.hasOwnProperty,v=p.trim,b=function(e,t){return new b.fn.init(e,t,r)},x=/[+-]?(?:\d*.|)\d+(?:[eE][+-]?\d+|)/.source,w=/\S+/g,T=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g,N=/^(?:(<[\w\W]+>)[^>]*|#([\w-]*))$/,C=/^<(\w+)\s*/?>(?:</\1>|)$/,k=/^[],:{}\s]*$/,E=/(?:^|:|,)(?:\s*[)+/g,S=/\(?:["\/bfnrt]|u[\da-fA-F]{4})/g,A=/"[^"\\r\n]*"|true|false|null|-?(?:\d+.|)\d+(?:[eE][+-]?\d+|)/g,j=/^-ms-/,D=/-([\da-z])/gi,L=function(e,t){return t.toUpperCase()},H=function(e){(o.addEventListener||"load"===e.type||"complete"===o.readyState)&&(q(),b.ready())},q=function(){o.addEventListener?(o.removeEventListener("DOMContentLoaded",H,!1),e.removeEventListener("load",H,!1)):(o.detachEvent("onreadystatechange",H),e.detachEvent("onload",H))};b.fn=b.prototype={jquery:p,constructor:b,init:function(e,n,r){var i,a;if(!e)return this;if("string"== typeof e){if(i="<"===e.charAt(0)&&">"===e.charAt(e.length-1)&&e.length>=3?[null,e,null]:N.exec(e),!i||!i[1]&&n)return!n||n.jquery?(n||r).find(e):this.constructor(n).find(e);if(i[1]){if(n=n instanceof b?n[0]:n,b.merge(this,b.parseHTML(i[1],n&&n.nodeType?n.ownerDocument||n:o,!0)),C.test(i[1])&&b.isPlainObject(n))for(i in n)b.isFunction(this[i])?this[i](n[i]):this.attr(i,n[i]);return this}if(a=o.getElementById(i[2]),a&&a.parentNode){if(a.id!==i[2])return r.find(e);this.length=1,this[0]=a}return this.context=o,this.selector=e,this}return e.nodeType?(this.context=this[0]=e,this.length=1,this):b.isFunction(e)?r.ready(e):(e.selector!==t&&(this.selector=e.selector,this.context=e.context),b.makeArray(e,this))},selector:"",length:0,size:function(){return this.length},toArray:function(){return h.call(this)},get:function(e){return null==e?this.toArray():0>e?this[this.length+e]:this[e]},pushStack:function(e){var t=b.merge(this.constructor(),e);return t.prevObject=this,t.context=this.cont ext,t},each:function(e,t){return b.each(this,e,t)},ready:function(e){return b.ready.promise().done(e),this},slice:function(){return this.pushStack(h.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(e){var t=this.length,n=+e+(0>e?t:0);return this.pushStack(n>=0&&t>n?[this[n]]:[])},map:function(e){return this.pushStack(b.map(this,function(t,n){return e.call(t,n,t)}))},end:function(){return this.prevObject||this.constructor(null)},push:d,sort:[].sort,splice:[].splice},b.fn.init.prototype=b.fn,b.extend=b.fn.extend=function(){var e,n,r,i,o,a,s=arguments[0]||{},u=1,l=arguments.length,c=!1;for("boolean"==typeof s&&(c=s,s=arguments[1]||{},u=2),"object"==typeof s||b.isFunction(s)||(s={}),l===u&&(s=this,--u);l>u;u++)if(null!=(o=arguments[u]))for(i in o)e=s[i],r=o[i],s!==r&&(c&&r&&(b.isPlainObject(r)||(n=b.isArray(r)))?(n?(n=!1,a=e&&b.isArray(e)?e:[]):a=e&&b.isPlainObject(e)?e:{},s[i]=b.extend(c,a,r)):r!==t&&(s[i]=r));return s} ,b.extend({noConflict:function(t){return e.$===b&&(e.$=u),t&&e.jQuery===b&&(e.jQuery=s),b},isReady:!1,readyWait:1,holdReady:function(e){e?b.readyWait++:b.ready(!0)},ready:function(e){if(e===!0?!--b.readyWait:!b.isReady){if(!o.body)return setTimeout(b.ready);b.isReady=!0,e!==!0&&--b.readyWait>0||(n.resolveWith(o,[b]),b.fn.trigger&&b(o).trigger("ready").off("ready"))}},isFunction:function(e){return"function"===b.type(e)},isArray:Array.isArray||function(e){return"array"===b.type(e)},isWindow:function(e){return null!=e&&e==e.window},isNumeric:function(e){return!isNaN(parseFloat(e))&&isFinite(e)},type:function(e){return null==e?e+"":"object"==typeof e||"function"==typeof e?l[m.call(e)]||"object":typeof e},isPlainObject:function(e){if(!e||"object"!==b.type(e)||e.nodeType||b.isWindow(e))return!1;try{if(e.constructor&&!y.call(e,"constructor")&&!y.call(e.constructor.prototype,"isPrototypeOf"))return!1}catch(n){return!1}var r;for(r in e);return r===t||y.call(e,r)},isEmptyObject:functi on(e){var t;for(t in e)return!1;return!0},error:function(e){throw Error(e)},parseHTML:function(e,t,n){if(!e||"string"!=typeof e)return null;"boolean"==typeof t&&(n=t,t=!1),t=t||o;var r=C.exec(e),i=!n&&[];return r?[t.createElement(r[1])]:(r=b.buildFragment([e],t,i),i&&b(i).remove(),b.merge([],r.childNodes))},parseJSON:function(n){return e.JSON&&e.JSON.parse?e.JSON.parse(n):null===n?n:"string"==typeof n&&(n=b.trim(n),n&&k.test(n.replace(S,"@").replace(A,"]").replace(E,"")))?Function("return "+n)():(b.error("Invalid JSON: "+n),t)},parseXML:function(n){var r,i;if(!n||"string"!=typeof n)return null;try{e.DOMParser?(i=new DOMParser,r=i.parseFromString(n,"text/xml")):(r=new ActiveXObject("Microsoft.XMLDOM"),r.async="false",r.loadXML(n))}catch(o){r=t}return r&&r.documentElement&&!r.getElementsByTagName("parsererror").length||b.error("Invalid XML: "+n),r},noop:function(){},globalEval:function(t){t&&b.trim(t)&&(e.execScript||function(t){e.eval.call(e,t)})(t)},camelCase:function(e){ret urn e.replace(j,"ms-").replace(D,L)},nodeName:function(e,t){return e.nodeName&&e.nodeName.toLowerCase()===t.toLowerCase()},each:function(e,t,n){var r,i=0,o=e.length,a=M(e);if(n){if(a){for(;o>i;i++)if(r=t.apply(e[i],n),r===!1)break}else for(i in e)if(r=t.apply(e[i],n),r===!1)break}else if(a){for(;o>i;i++)if(r=t.call(e[i],i,e[i]),r===!1)break}else for(i in e)if(r=t.call(e[i],i,e[i]),r===!1)break;return e},trim:v&&!v.call("\ufeff\u00a0")?function(e){return null==e?"":v.call(e)}:function(e){return null==e?"":(e+"").replace(T,"")},makeArray:function(e,t){var n=t||[];return null!=e&&(M(Object(e))?b.merge(n,"string"==typeof e?[e]:e):d.call(n,e)),n},inArray:function(e,t,n){var r;if(t){if(g)return g.call(t,e,n);for(r=t.length,n=n?0>n?Math.max(0,r+n):n:0;r>n;n++)if(n in t&&t[n]===e)return n}return-1},merge:function(e,n){var r=n.length,i=e.length,o=0;if("number"==typeof r)for(;r>o;o++)e[i++]=n[o];else while(n[o]!==t)e[i++]=n[o++];return e.length=i,e},grep:function(e,t,n){var r,i=[],o=0 ,a=e.length;for(n=!!n;a>o;o++)r=!!t(e[o],o),n!==r&&i.push(e[o]);return i},map:function(e,t,n){var r,i=0,o=e.length,a=M(e),s=[];if(a)for(;o>i;i++)r=t(e[i],i,n),null!=r&&(s[s.length]=r);else for(i in e)r=t(e[i],i,n),null!=r&&(s[s.length]=r);return f.apply([],s)},guid:1,proxy:function(e,n){var r,i,o;return"string"==typeof n&&(o=e[n],n=e,e=o),b.isFunction(e)?(r=h.call(arguments,2),i=function(){return e.apply(n||this,r.concat(h.call(arguments)))},i.guid=e.guid=e.guid||b.guid++,i):t},access:function(e,n,r,i,o,a,s){var u=0,l=e.length,c=null==r;if("object"===b.type(r)){o=!0;for(u in r)b.access(e,n,u,r[u],!0,a,s)}else if(i!==t&&(o=!0,b.isFunction(i)||(s=!0),c&&(s?(n.call(e,i),n=null):(c=n,n=function(e,t,n){return c.call(b(e),n)})),n))for(;l>u;u++)n(e[u],r,s?i:i.call(e[u],u,n(e[u],r)));return o?e:c?n.call(e):l?n(e[0],r):a},now:function(){return(new Date).getTime()}}),b.ready.promise=function(t){if(!n)if(n=b.Deferred(),"complete"===o.readyState)setTimeout(b.ready);else if(o.addEventLis tener)o.addEventListener("DOMContentLoaded",H,!1),e.addEventListener("load",H,!1);else{o.attachEvent("onreadystatechange",H),e.attachEvent("onload",H);var r=!1;try{r=null==e.frameElement&&o.documentElement}catch(i){}r&&r.doScroll&&function a(){if(!b.isReady){try{r.doScroll("left")}catch(e){return setTimeout(a,50)}q(),b.ready()}}()}return n.promise(t)},b.each("Boolean Number String Function Array Date RegExp Object Error".split(" "),function(e,t){l["[object "+t+"]"]=t.toLowerCase()});function M(e){var t=e.length,n=b.type(e);return b.isWindow(e)?!1:1===e.nodeType&&t?!0:"array"===n||"function"!==n&&(0===t||"number"==typeof t&&t>0&&t-1 in e)}r=b(o);var _={};function F(e){var t=_[e]={};return b.each(e.match(w)||[],function(e,n){t[n]=!0}),t}b.Callbacks=function(e){e="string"==typeof e?_[e]||F(e):b.extend({},e);var n,r,i,o,a,s,u=[],l=!e.once&&[],c=function(t){for(r=e.memory&&t,i=!0,a=s||0,s=0,o=u.length,n=!0;u&&o>a;a++)if(u[a].apply(t[0],t[1])===!1&&e.stopOnFalse){r=!1;break}n=!1,u &&(l?l.length&&c(l.shift()):r?u=[]:p.disable())},p={add:function(){if(u){var t=u.length;(function i(t){b.each(t,function(t,n){var r=b.type(n);"function"===r?e.unique&&p.has(n)||u.push(n):n&&n.length&&"string"!==r&&i(n)})})(arguments),n?o=u.length:r&&(s=t,c(r))}return this},remove:function(){return u&&b.each(arguments,function(e,t){var r;while((r=b.inArray(t,u,r))>-1)u.splice(r,1),n&&(o>=r&&o--,a>=r&&a--)}),this},has:function(e){return e?b.inArray(e,u)>-1:!(!u||!u.length)},empty:function(){return u=[],this},disable:function(){return u=l=r=t,this},disabled:function(){return!u},lock:function(){return l=t,r||p.disable(),this},locked:function(){return!l},fireWith:function(e,t){return t=t||[],t=[e,t.slice?t.slice():t],!u||i&&!l||(n?l.push(t):c(t)),this},fire:function(){return p.fireWith(this,arguments),this},fired:function(){return!!i}};return p},b.extend({Deferred:function(e){var t=[["resolve","done",b.Callbacks("once memory"),"resolved"],["reject","fail",b.Callbacks("once memory "),"rejected"],["notify","progress",b.Callbacks("memory")]],n="pending",r={state:function(){return n},always:function(){return i.done(arguments).fail(arguments),this},then:function(){var e=arguments;return b.Deferred(function(n){b.each(t,function(t,o){var a=o[0],s=b.isFunction(e[t])&&e[t];i[o[1]](function(){var e=s&&s.apply(this,arguments);e&&b.isFunction(e.promise)?e.promise().done(n.resolve).fail(n.reject).progress(n.notify):n[a+"With"](this===r?n.promise():this,s?[e]:arguments)})}),e=null}).promise()},promise:function(e){return null!=e?b.extend(e,r):r}},i={};return r.pipe=r.then,b.each(t,function(e,o){var a=o[2],s=o[3];r[o[1]]=a.add,s&&a.add(function(){n=s},t[1^e][2].disable,t[2][2].lock),i[o[0]]=function(){return i[o[0]+"With"](this===i?r:this,arguments),this},i[o[0]+"With"]=a.fireWith}),r.promise(i),e&&e.call(i,i),i},when:function(e){var t=0,n=h.call(arguments),r=n.length,i=1!==r||e&&b.isFunction(e.promise)?r:0,o=1===i?e:b.Deferred(),a=function(e,t,n){return function(r) {t[e]=this,n[e]=arguments.length>1?h.call(arguments):r,n===s?o.notifyWith(t,n):--i||o.resolveWith(t,n)}},s,u,l;if(r>1)for(s=Array(r),u=Array(r),l=Array(r);r>t;t++)n[t]&&b.isFunction(n[t].promise)?n[t].promise().done(a(t,l,n)).fail(o.reject).progress(a(t,u,s)):--i;return i||o.resolveWith(l,n),o.promise()}}),b.support=function(){var t,n,r,a,s,u,l,c,p,f,d=o.createElement("div");if(d.setAttribute("className","t"),d.innerHTML=" <link/><table></table><a href='/a'>a</a><input type='checkbox'/>",n=d.getElementsByTagName("*"),r=d.getElementsByTagName("a")[0],!n||!r||!n.length)return{};s=o.createElement("select"),l=s.appendChild(o.createElement("option")),a=d.getElementsByTagName("input")[0],r.style.cssText="top:1px;float:left;opacity:.5",t={getSetAttribute:"t"!==d.className,leadingWhitespace:3===d.firstChild.nodeType,tbody:!d.getElementsByTagName("tbody").length,htmlSerialize:!!d.getElementsByTagName("link").length,style:/top/.test(r.getAttribute("style")),hrefNormalized:"/a"===r.ge tAttribute("href"),opacity:/^0.5/.test(r.style.opacity),cssFloat:!!r.style.cssFloat,checkOn:!!a.value,optSelected:l.selected,enctype:!!o.createElement("form").enctype,html5Clone:"<:nav></:nav>"!==o.createElement("nav").cloneNode(!0).outerHTML,boxModel:"CSS1Compat"===o.compatMode,deleteExpando:!0,noCloneEvent:!0,inlineBlockNeedsLayout:!1,shrinkWrapBlocks:!1,reliableMarginRight:!0,boxSizingReliable:!0,pixelPosition:!1},a.checked=!0,t.noCloneChecked=a.cloneNode(!0).checked,s.disabled=!0,t.optDisabled=!l.disabled;try{delete d.test}catch(h){t.deleteExpando=!1}a=o.createElement("input"),a.setAttribute("value",""),t.input=""===a.getAttribute("value"),a.value="t",a.setAttribute("type","radio"),t.radioValue="t"===a.value,a.setAttribute("checked","t"),a.setAttribute("name","t"),u=o.createDocumentFragment(),u.appendChild(a),t.appendChecked=a.checked,t.checkClone=u.cloneNode(!0).cloneNode(!0).lastChild.checked,d.attachEvent&&(d.attachEvent("onclick",function(){t.noCloneEvent=!1}),d.clon eNode(!0).click());for(f in{submit:!0,change:!0,focusin:!0})d.setAttribute(c="on"+f,"t"),t[f+"Bubbles"]=c in e||d.attributes[c].expando===!1;return d.style.backgroundClip="content-box",d.cloneNode(!0).style.backgroundClip="",t.clearCloneStyle="content-box"===d.style.backgroundClip,b(function(){var n,r,a,s="padding:0;margin:0;border:0;display:block;box-sizing:content-box;-moz-box-sizing:content-box;-webkit-box-sizing:content-box;",u=o.getElementsByTagName("body")[0];u&&(n=o.createElement("div"),n.style.cssText="border:0;width:0;height:0;position:absolute;top:0;left:-9999px;margin-top:1px",u.appendChild(n).appendChild(d),d.innerHTML="<table><tr><td></td><td>t</td></tr></table>",a=d.getElementsByTagName("td"),a[0].style.cssText="padding:0;margin:0;border:0;display:none",p=0===a[0].offsetHeight,a[0].style.display="",a[1].style.display="none",t.reliableHiddenOffsets=p&&0===a[0].offsetHeight,d.innerHTML="",d.style.cssText="box-sizing:border-box;-moz-box-sizing:border-box;-webkit-b ox-sizing:border-box;padding:1px;border:1px;display:block;width:4px;margin-top:1%;position:absolute;top:1%;",t.boxSizing=4===d.offsetWidth,t.doesNotIncludeMarginInBodyOffset=1!==u.offsetTop,e.getComputedStyle&&(t.pixelPosition="1%"!==(e.getComputedStyle(d,null)||{}).top,t.boxSizingReliable="4px"===(e.getComputedStyle(d,null)||{width:"4px"}).width,r=d.appendChild(o.createElement("div")),r.style.cssText=d.style.cssText=s,r.style.marginRight=r.style.width="0",d.style.width="1px",t.reliableMarginRight=!parseFloat((e.getComputedStyle(r,null)||{}).marginRight)),typeof d.style.zoom!==i&&(d.innerHTML="",d.style.cssText=s+"width:1px;padding:1px;display:inline;zoom:1",t.inlineBlockNeedsLayout=3===d.offsetWidth,d.style.display="block",d.innerHTML="<div></div>",d.firstChild.style.width="5px",t.shrinkWrapBlocks=3!==d.offsetWidth,t.inlineBlockNeedsLayout&&(u.style.zoom=1)),u.removeChild(n),n=d=a=r=null)}),n=s=u=l=r=a=null,t}();var O=/(?:{[\s\S]*}|[[\s\S]*])$/,B=/([A-Z])/g;function P(e ,n,r,i){if(b.acceptData(e)){var o,a,s=b.expando,u="string"==typeof n,l=e.nodeType,p=l?b.cache:e,f=l?e[s]:e[s]&&s;if(f&&p[f]&&(i||p[f].data)||!u||r!==t)return f||(l?e[s]=f=c.pop()||b.guid++:f=s),p[f]||(p[f]={},l||(p[f].toJSON=b.noop)),("object"==typeof n||"function"==typeof n)&&(i?p[f]=b.extend(p[f],n):p[f].data=b.extend(p[f].data,n)),o=p[f],i||(o.data||(o.data={}),o=o.data),r!==t&&(o[b.camelCase(n)]=r),u?(a=o[n],null==a&&(a=o[b.camelCase(n)])):a=o,a}}function R(e,t,n){if(b.acceptData(e)){var r,i,o,a=e.nodeType,s=a?b.cache:e,u=a?e[b.expando]:b.expando;if(s[u]){if(t&&(o=n?s[u]:s[u].data)){b.isArray(t)?t=t.concat(b.map(t,b.camelCase)):t in o?t=[t]:(t=b.camelCase(t),t=t in o?[t]:t.split(" "));for(r=0,i=t.length;i>r;r++)delete o[t[r]];if(!(n?$:b.isEmptyObject)(o))return}(n||(delete s[u].data,$(s[u])))&&(a?b.cleanData([e],!0):b.support.deleteExpando||s!=s.window?delete s[u]:s[u]=null)}}}b.extend({cache:{},expando:"jQuery"+(p+Math.random()).replace(/\D/g,""),noData:{embed:!0,object :"clsid:D27CDB6E-AE6D-11cf-96B8-444553540000",applet:!0},hasData:function(e){return e=e.nodeType?b.cache[e[b.expando]]:e[b.expando],!!e&&!$(e)},data:function(e,t,n){return P(e,t,n)},removeData:function(e,t){return R(e,t)},_data:function(e,t,n){return P(e,t,n,!0)},_removeData:function(e,t){return R(e,t,!0)},acceptData:function(e){if(e.nodeType&&1!==e.nodeType&&9!==e.nodeType)return!1;var t=e.nodeName&&b.noData[e.nodeName.toLowerCase()];return!t||t!==!0&&e.getAttribute("classid")===t}}),b.fn.extend({data:function(e,n){var r,i,o=this[0],a=0,s=null;if(e===t){if(this.length&&(s=b.data(o),1===o.nodeType&&!b._data(o,"parsedAttrs"))){for(r=o.attributes;r.length>a;a++)i=r[a].name,i.indexOf("data-")||(i=b.camelCase(i.slice(5)),W(o,i,s[i]));b._data(o,"parsedAttrs",!0)}return s}return"object"==typeof e?this.each(function(){b.data(this,e)}):b.access(this,function(n){return n===t?o?W(o,e,b.data(o,e)):null:(this.each(function(){b.data(this,e,n)}),t)},null,n,arguments.length>1,null,!0)},rem oveData:function(e){return this.each(function(){b.removeData(this,e)})}});function W(e,n,r){if(r===t&&1===e.nodeType){var i="data-"+n.replace(B,"-$1").toLowerCase();if(r=e.getAttribute(i),"string"==typeof r){try{r="true"===r?!0:"false"===r?!1:"null"===r?null:+r+""===r?+r:O.test(r)?b.parseJSON(r):r}catch(o){}b.data(e,n,r)}else r=t}return r}function $(e){var t;for(t in e)if(("data"!==t||!b.isEmptyObject(e[t]))&&"toJSON"!==t)return!1;return!0}b.extend({queue:function(e,n,r){var i;return e?(n=(n||"fx")+"queue",i=b._data(e,n),r&&(!i||b.isArray(r)?i=b._data(e,n,b.makeArray(r)):i.push(r)),i||[]):t},dequeue:function(e,t){t=t||"fx";var n=b.queue(e,t),r=n.length,i=n.shift(),o=b._queueHooks(e,t),a=function(){b.dequeue(e,t)};"inprogress"===i&&(i=n.shift(),r--),o.cur=i,i&&("fx"===t&&n.unshift("inprogress"),delete o.stop,i.call(e,a,o)),!r&&o&&o.empty.fire()},_queueHooks:function(e,t){var n=t+"queueHooks";return b._data(e,n)||b._data(e,n,{empty:b.Callbacks("once memory").add(function(){b._ removeData(e,t+"queue"),b._removeData(e,n)})})}}),b.fn.extend({queue:function(e,n){var r=2;return"string"!=typeof e&&(n=e,e="fx",r--),r>arguments.length?b.queue(this[0],e):n===t?this:this.each(function(){var t=b.queue(this,e,n);b._queueHooks(this,e),"fx"===e&&"inprogress"!==t[0]&&b.dequeue(this,e)})},dequeue:function(e){return this.each(function(){b.dequeue(this,e)})},delay:function(e,t){return e=b.fx?b.fx.speeds[e]||e:e,t=t||"fx",this.queue(t,function(t,n){var r=setTimeout(t,e);n.stop=function(){clearTimeout(r)}})},clearQueue:function(e){return this.queue(e||"fx",[])},promise:function(e,n){var r,i=1,o=b.Deferred(),a=this,s=this.length,u=function(){--i||o.resolveWith(a,[a])};"string"!=typeof e&&(n=e,e=t),e=e||"fx";while(s--)r=b._data(a[s],e+"queueHooks"),r&&r.empty&&(i++,r.empty.add(u));return u(),o.promise(n)}});var I,z,X=/[\t\r\n]/g,U=/\r/g,V=/^(?:input|select|textarea|button|object)$/i,Y=/^(?:a|area)$/i,J=/^(?:checked|selected|autofocus|autoplay|async|controls|defer|disab led|hidden|loop|multiple|open|readonly|required|scoped)$/i,G=/^(?:checked|selected)$/i,Q=b.support.getSetAttribute,K=b.support.input;b.fn.extend({attr:function(e,t){return b.access(this,b.attr,e,t,arguments.length>1)},removeAttr:function(e){return this.each(function(){b.removeAttr(this,e)})},prop:function(e,t){return b.access(this,b.prop,e,t,arguments.length>1)},removeProp:function(e){return e=b.propFix[e]||e,this.each(function(){try{this[e]=t,delete this[e]}catch(n){}})},addClass:function(e){var t,n,r,i,o,a=0,s=this.length,u="string"==typeof e&&e;if(b.isFunction(e))return this.each(function(t){b(this).addClass(e.call(this,t,this.className))});if(u)for(t=(e||"").match(w)||[];s>a;a++)if(n=this[a],r=1===n.nodeType&&(n.className?(" "+n.className+" ").replace(X," "):" ")){o=0;while(i=t[o++])0>r.indexOf(" "+i+" ")&&(r+=i+" ");n.className=b.trim(r)}return this},removeClass:function(e){var t,n,r,i,o,a=0,s=this.length,u=0===arguments.length||"string"==typeof e&&e;if(b.isFunction(e)) return this.each(function(t){b(this).removeClass(e.call(this,t,this.className))});if(u)for(t=(e||"").match(w)||[];s>a;a++)if(n=this[a],r=1===n.nodeType&&(n.className?(" "+n.className+" ").replace(X," "):"")){o=0;while(i=t[o++])while(r.indexOf(" "+i+" ")>=0)r=r.replace(" "+i+" "," ");n.className=e?b.trim(r):""}return this},toggleClass:function(e,t){var n=typeof e,r="boolean"==typeof t;return b.isFunction(e)?this.each(function(n){b(this).toggleClass(e.call(this,n,this.className,t),t)}):this.each(function(){if("string"===n){var o,a=0,s=b(this),u=t,l=e.match(w)||[];while(o=l[a++])u=r?u:!s.hasClass(o),s[u?"addClass":"removeClass"](o)}else(n===i||"boolean"===n)&&(this.className&&b._data(this,"__className__",this.className),this.className=this.className||e===!1?"":b._data(this,"__className__")||"")})},hasClass:function(e){var t=" "+e+" ",n=0,r=this.length;for(;r>n;n++)if(1===this[n].nodeType&&(" "+this[n].className+" ").replace(X," ").indexOf(t)>=0)return!0;return!1},val:function(e ){var n,r,i,o=this[0];{if(arguments.length)return i=b.isFunction(e),this.each(function(n){var o,a=b(this);1===this.nodeType&&(o=i?e.call(this,n,a.val()):e,null==o?o="":"number"==typeof o?o+="":b.isArray(o)&&(o=b.map(o,function(e){return null==e?"":e+""})),r=b.valHooks[this.type]||b.valHooks[this.nodeName.toLowerCase()],r&&"set"in r&&r.set(this,o,"value")!==t||(this.value=o))});if(o)return r=b.valHooks[o.type]||b.valHooks[o.nodeName.toLowerCase()],r&&"get"in r&&(n=r.get(o,"value"))!==t?n:(n=o.value,"string"==typeof n?n.replace(U,""):null==n?"":n)}}}),b.extend({valHooks:{option:{get:function(e){var t=e.attributes.value;return!t||t.specified?e.value:e.text}},select:{get:function(e){var t,n,r=e.options,i=e.selectedIndex,o="select-one"===e.type||0>i,a=o?null:[],s=o?i+1:r.length,u=0>i?s:o?i:0;for(;s>u;u++)if(n=r[u],!(!n.selected&&u!==i||(b.support.optDisabled?n.disabled:null!==n.getAttribute("disabled"))||n.parentNode.disabled&&b.nodeName(n.parentNode,"optgroup"))){if(t=b(n).val() ,o)return t;a.push(t)}return a},set:function(e,t){var n=b.makeArray(t);return b(e).find("option").each(function(){this.selected=b.inArray(b(this).val(),n)>=0}),n.length||(e.selectedIndex=-1),n}}},attr:function(e,n,r){var o,a,s,u=e.nodeType;if(e&&3!==u&&8!==u&&2!==u)return typeof e.getAttribute===i?b.prop(e,n,r):(a=1!==u||!b.isXMLDoc(e),a&&(n=n.toLowerCase(),o=b.attrHooks[n]||(J.test(n)?z:I)),r===t?o&&a&&"get"in o&&null!==(s=o.get(e,n))?s:(typeof e.getAttribute!==i&&(s=e.getAttribute(n)),null==s?t:s):null!==r?o&&a&&"set"in o&&(s=o.set(e,r,n))!==t?s:(e.setAttribute(n,r+""),r):(b.removeAttr(e,n),t))},removeAttr:function(e,t){var n,r,i=0,o=t&&t.match(w);if(o&&1===e.nodeType)while(n=o[i++])r=b.propFix[n]||n,J.test(n)?!Q&&G.test(n)?e[b.camelCase("default-"+n)]=e[r]=!1:e[r]=!1:b.attr(e,n,""),e.removeAttribute(Q?n:r)},attrHooks:{type:{set:function(e,t){if(!b.support.radioValue&&"radio"===t&&b.nodeName(e,"input")){var n=e.value;return e.setAttribute("type",t),n&&(e.value=n),t}}}},pro pFix:{tabindex:"tabIndex",readonly:"readOnly","for":"htmlFor","class":"className",maxlength:"maxLength",cellspacing:"cellSpacing",cellpadding:"cellPadding",rowspan:"rowSpan",colspan:"colSpan",usemap:"useMap",frameborder:"frameBorder",contenteditable:"contentEditable"},prop:function(e,n,r){var i,o,a,s=e.nodeType;if(e&&3!==s&&8!==s&&2!==s)return a=1!==s||!b.isXMLDoc(e),a&&(n=b.propFix[n]||n,o=b.propHooks[n]),r!==t?o&&"set"in o&&(i=o.set(e,r,n))!==t?i:e[n]=r:o&&"get"in o&&null!==(i=o.get(e,n))?i:e[n]},propHooks:{tabIndex:{get:function(e){var n=e.getAttributeNode("tabindex");return n&&n.specified?parseInt(n.value,10):V.test(e.nodeName)||Y.test(e.nodeName)&&e.href?0:t}}}}),z={get:function(e,n){var r=b.prop(e,n),i="boolean"==typeof r&&e.getAttribute(n),o="boolean"==typeof r?K&&Q?null!=i:G.test(n)?e[b.camelCase("default-"+n)]:!!i:e.getAttributeNode(n);return o&&o.value!==!1?n.toLowerCase():t},set:function(e,t,n){return t===!1?b.removeAttr(e,n):K&&Q||!G.test(n)?e.setAttribute(!Q&&b. propFix[n]||n,n):e[b.camelCase("default-"+n)]=e[n]=!0,n}},K&&Q||(b.attrHooks.value={get:function(e,n){var r=e.getAttributeNode(n);return b.nodeName(e,"input")?e.defaultValue:r&&r.specified?r.value:t},set:function(e,n,r){return b.nodeName(e,"input")?(e.defaultValue=n,t):I&&I.set(e,n,r)}}),Q||(I=b.valHooks.button={get:function(e,n){var r=e.getAttributeNode(n);return r&&("id"===n||"name"===n||"coords"===n?""!==r.value:r.specified)?r.value:t},set:function(e,n,r){var i=e.getAttributeNode(r);return i||e.setAttributeNode(i=e.ownerDocument.createAttribute(r)),i.value=n+="","value"===r||n===e.getAttribute(r)?n:t}},b.attrHooks.contenteditable={get:I.get,set:function(e,t,n){I.set(e,""===t?!1:t,n)}},b.each(["width","height"],function(e,n){b.attrHooks[n]=b.extend(b.attrHooks[n],{set:function(e,r){return""===r?(e.setAttribute(n,"auto"),r):t}})})),b.support.hrefNormalized||(b.each(["href","src","width","height"],function(e,n){b.attrHooks[n]=b.extend(b.attrHooks[n],{get:function(e){var r=e. getAttribute(n,2);return null==r?t:r}})}),b.each(["href","src"],function(e,t){b.propHooks[t]={get:function(e){return e.getAttribute(t,4)}}})),b.support.style||(b.attrHooks.style={get:function(e){return e.style.cssText||t},set:function(e,t){return e.style.cssText=t+""}}),b.support.optSelected||(b.propHooks.selected=b.extend(b.propHooks.selected,{get:function(e){var t=e.parentNode;return t&&(t.selectedIndex,t.parentNode&&t.parentNode.selectedIndex),null}})),b.support.enctype||(b.propFix.enctype="encoding"),b.support.checkOn||b.each(["radio","checkbox"],function(){b.valHooks[this]={get:function(e){return null===e.getAttribute("value")?"on":e.value}}}),b.each(["radio","checkbox"],function(){b.valHooks[this]=b.extend(b.valHooks[this],{set:function(e,n){return b.isArray(n)?e.checked=b.inArray(b(e).val(),n)>=0:t}})});var Z=/^(?:input|select|textarea)$/i,et=/^key/,tt=/^(?:mouse|contextmenu)|click/,nt=/^(?:focusinfocus|focusoutblur)$/,rt=/^([^.]*)(?:.(.+)|)$/;function it(){return!0} function ot(){return!1}b.event={global:{},add:function(e,n,r,o,a){var s,u,l,c,p,f,d,h,g,m,y,v=b._data(e);if(v){r.handler&&(c=r,r=c.handler,a=c.selector),r.guid||(r.guid=b.guid++),(u=v.events)||(u=v.events={}),(f=v.handle)||(f=v.handle=function(e){return typeof b===i||e&&b.event.triggered===e.type?t:b.event.dispatch.apply(f.elem,arguments)},f.elem=e),n=(n||"").match(w)||[""],l=n.length;while(l--)s=rt.exec(n[l])||[],g=y=s[1],m=(s[2]||"").split(".").sort(),p=b.event.special[g]||{},g=(a?p.delegateType:p.bindType)||g,p=b.event.special[g]||{},d=b.extend({type:g,origType:y,data:o,handler:r,guid:r.guid,selector:a,needsContext:a&&b.expr.match.needsContext.test(a),namespace:m.join(".")},c),(h=u[g])||(h=u[g]=[],h.delegateCount=0,p.setup&&p.setup.call(e,o,m,f)!==!1||(e.addEventListener?e.addEventListener(g,f,!1):e.attachEvent&&e.attachEvent("on"+g,f))),p.add&&(p.add.call(e,d),d.handler.guid||(d.handler.guid=r.guid)),a?h.splice(h.delegateCount++,0,d):h.push(d),b.event.global[g]=!0;e=null }},remove:function(e,t,n,r,i){var o,a,s,u,l,c,p,f,d,h,g,m=b.hasData(e)&&b._data(e);if(m&&(c=m.events)){t=(t||"").match(w)||[""],l=t.length;while(l--)if(s=rt.exec(t[l])||[],d=g=s[1],h=(s[2]||"").split(".").sort(),d){p=b.event.special[d]||{},d=(r?p.delegateType:p.bindType)||d,f=c[d]||[],s=s[2]&&RegExp("(^|\.)"+h.join("\.(?:.*\.|)")+"(\.|$)"),u=o=f.length;while(o--)a=f[o],!i&&g!==a.origType||n&&n.guid!==a.guid||s&&!s.test(a.namespace)||r&&r!==a.selector&&("**"!==r||!a.selector)||(f.splice(o,1),a.selector&&f.delegateCount--,p.remove&&p.remove.call(e,a));u&&!f.length&&(p.teardown&&p.teardown.call(e,h,m.handle)!==!1||b.removeEvent(e,d,m.handle),delete c[d])}else for(d in c)b.event.remove(e,d+t[l],n,r,!0);b.isEmptyObject(c)&&(delete m.handle,b._removeData(e,"events"))}},trigger:function(n,r,i,a){var s,u,l,c,p,f,d,h=[i||o],g=y.call(n,"type")?n.type:n,m=y.call(n,"namespace")?n.namespace.split("."):[];if(l=f=i=i||o,3!==i.nodeType&&8!==i.nodeType&&!nt.test(g+b.event.triggered)&&(g. indexOf(".")>=0&&(m=g.split("."),g=m.shift(),m.sort()),u=0>g.indexOf(":")&&"on"+g,n=n[b.expando]?n:new b.Event(g,"object"==typeof n&&n),n.isTrigger=!0,n.namespace=m.join("."),n.namespace_re=n.namespace?RegExp("(^|\.)"+m.join("\.(?:.*\.|)")+"(\.|$)"):null,n.result=t,n.target||(n.target=i),r=null==r?[n]:b.makeArray(r,[n]),p=b.event.special[g]||{},a||!p.trigger||p.trigger.apply(i,r)!==!1)){if(!a&&!p.noBubble&&!b.isWindow(i)){for(c=p.delegateType||g,nt.test(c+g)||(l=l.parentNode);l;l=l.parentNode)h.push(l),f=l;f===(i.ownerDocument||o)&&h.push(f.defaultView||f.parentWindow||e)}d=0;while((l=h[d++])&&!n.isPropagationStopped())n.type=d>1?c:p.bindType||g,s=(b._data(l,"events")||{})[n.type]&&b._data(l,"handle"),s&&s.apply(l,r),s=u&&l[u],s&&b.acceptData(l)&&s.apply&&s.apply(l,r)===!1&&n.preventDefault();if(n.type=g,!(a||n.isDefaultPrevented()||p._default&&p._default.apply(i.ownerDocument,r)!==!1||"click"===g&&b.nodeName(i,"a")||!b.acceptData(i)||!u||!i[g]||b.isWindow(i))){f=i[u],f& &(i[u]=null),b.event.triggered=g;try{i[g]()}catch(v){}b.event.triggered=t,f&&(i[u]=f)}return n.result}},dispatch:function(e){e=b.event.fix(e);var n,r,i,o,a,s=[],u=h.call(arguments),l=(b._data(this,"events")||{})[e.type]||[],c=b.event.special[e.type]||{};if(u[0]=e,e.delegateTarget=this,!c.preDispatch||c.preDispatch.call(this,e)!==!1){s=b.event.handlers.call(this,e,l),n=0;while((o=s[n++])&&!e.isPropagationStopped()){e.currentTarget=o.elem,a=0;while((i=o.handlers[a++])&&!e.isImmediatePropagationStopped())(!e.namespace_re||e.namespace_re.test(i.namespace))&&(e.handleObj=i,e.data=i.data,r=((b.event.special[i.origType]||{}).handle||i.handler).apply(o.elem,u),r!==t&&(e.result=r)===!1&&(e.preventDefault(),e.stopPropagation()))}return c.postDispatch&&c.postDispatch.call(this,e),e.result}},handlers:function(e,n){var r,i,o,a,s=[],u=n.delegateCount,l=e.target;if(u&&l.nodeType&&(!e.button||"click"!==e.type))for(;l!=this;l=l.parentNode||this)if(1===l.nodeType&&(l.disabled!==!0||"click"!== e.type)){for(o=[],a=0;u>a;a++)i=n[a],r=i.selector+" ",o[r]===t&&(o[r]=i.needsContext?b(r,this).index(l)>=0:b.find(r,this,null,[l]).length),o[r]&&o.push(i);o.length&&s.push({elem:l,handlers:o})}return n.length>u&&s.push({elem:this,handlers:n.slice(u)}),s},fix:function(e){if(e[b.expando])return e;var t,n,r,i=e.type,a=e,s=this.fixHooks[i];s||(this.fixHooks[i]=s=tt.test(i)?this.mouseHooks:et.test(i)?this.keyHooks:{}),r=s.props?this.props.concat(s.props):this.props,e=new b.Event(a),t=r.length;while(t--)n=r[t],e[n]=a[n];return e.target||(e.target=a.srcElement||o),3===e.target.nodeType&&(e.target=e.target.parentNode),e.metaKey=!!e.metaKey,s.filter?s.filter(e,a):e},props:"altKey bubbles cancelable ctrlKey currentTarget eventPhase metaKey relatedTarget shiftKey target timeStamp view which".split(" "),fixHooks:{},keyHooks:{props:"char charCode key keyCode".split(" "),filter:function(e,t){return null==e.which&&(e.which=null!=t.charCode?t.charCode:t.keyCode),e}},mouseHooks:{props:"butto n buttons clientX clientY fromElement offsetX offsetY pageX pageY screenX screenY toElement".split(" "),filter:function(e,n){var r,i,a,s=n.button,u=n.fromElement;return null==e.pageX&&null!=n.clientX&&(i=e.target.ownerDocument||o,a=i.documentElement,r=i.body,e.pageX=n.clientX+(a&&a.scrollLeft||r&&r.scrollLeft||0)-(a&&a.clientLeft||r&&r.clientLeft||0),e.pageY=n.clientY+(a&&a.scrollTop||r&&r.scrollTop||0)-(a&&a.clientTop||r&&r.clientTop||0)),!e.relatedTarget&&u&&(e.relatedTarget=u===e.target?n.toElement:u),e.which||s===t||(e.which=1&s?1:2&s?3:4&s?2:0),e}},special:{load:{noBubble:!0},click:{trigger:function(){return b.nodeName(this,"input")&&"checkbox"===this.type&&this.click?(this.click(),!1):t}},focus:{trigger:function(){if(this!==o.activeElement&&this.focus)try{return this.focus(),!1}catch(e){}},delegateType:"focusin"},blur:{trigger:function(){return this===o.activeElement&&this.blur?(this.blur(),!1):t},delegateType:"focusout"},beforeunload:{postDispatch:function(e){e.result !==t&&(e.originalEvent.returnValue=e.result)}}},simulate:function(e,t,n,r){var i=b.extend(new b.Event,n,{type:e,isSimulated:!0,originalEvent:{}});r?b.event.trigger(i,null,t):b.event.dispatch.call(t,i),i.isDefaultPrevented()&&n.preventDefault()}},b.removeEvent=o.removeEventListener?function(e,t,n){e.removeEventListener&&e.removeEventListener(t,n,!1)}:function(e,t,n){var r="on"+t;e.detachEvent&&(typeof e[r]===i&&(e[r]=null),e.detachEvent(r,n))},b.Event=function(e,n){return this instanceof b.Event?(e&&e.type?(this.originalEvent=e,this.type=e.type,this.isDefaultPrevented=e.defaultPrevented||e.returnValue===!1||e.getPreventDefault&&e.getPreventDefault()?it:ot):this.type=e,n&&b.extend(this,n),this.timeStamp=e&&e.timeStamp||b.now(),this[b.expando]=!0,t):new b.Event(e,n)},b.Event.prototype={isDefaultPrevented:ot,isPropagationStopped:ot,isImmediatePropagationStopped:ot,preventDefault:function(){var e=this.originalEvent;this.isDefaultPrevented=it,e&&(e.preventDefault?e.preventDefault( ):e.returnValue=!1)},stopPropagation:function(){var e=this.originalEvent;this.isPropagationStopped=it,e&&(e.stopPropagation&&e.stopPropagation(),e.cancelBubble=!0)},stopImmediatePropagation:function(){this.isImmediatePropagationStopped=it,this.stopPropagation()}},b.each({mouseenter:"mouseover",mouseleave:"mouseout"},function(e,t){b.event.special[e]={delegateType:t,bindType:t,handle:function(e){var n,r=this,i=e.relatedTarget,o=e.handleObj; - return(!i||i!==r&&!b.contains(r,i))&&(e.type=o.origType,n=o.handler.apply(this,arguments),e.type=t),n}}}),b.support.submitBubbles||(b.event.special.submit={setup:function(){return b.nodeName(this,"form")?!1:(b.event.add(this,"click._submit keypress._submit",function(e){var n=e.target,r=b.nodeName(n,"input")||b.nodeName(n,"button")?n.form:t;r&&!b._data(r,"submitBubbles")&&(b.event.add(r,"submit._submit",function(e){e._submit_bubble=!0}),b._data(r,"submitBubbles",!0))}),t)},postDispatch:function(e){e._submit_bubble&&(delete e._submit_bubble,this.parentNode&&!e.isTrigger&&b.event.simulate("submit",this.parentNode,e,!0))},teardown:function(){return b.nodeName(this,"form")?!1:(b.event.remove(this,"._submit"),t)}}),b.support.changeBubbles||(b.event.special.change={setup:function(){return Z.test(this.nodeName)?(("checkbox"===this.type||"radio"===this.type)&&(b.event.add(this,"propertychange._change",function(e){"checked"===e.originalEvent.propertyName&&(this._just_changed=!0)}) ,b.event.add(this,"click._change",function(e){this._just_changed&&!e.isTrigger&&(this._just_changed=!1),b.event.simulate("change",this,e,!0)})),!1):(b.event.add(this,"beforeactivate._change",function(e){var t=e.target;Z.test(t.nodeName)&&!b._data(t,"changeBubbles")&&(b.event.add(t,"change._change",function(e){!this.parentNode||e.isSimulated||e.isTrigger||b.event.simulate("change",this.parentNode,e,!0)}),b._data(t,"changeBubbles",!0))}),t)},handle:function(e){var n=e.target;return this!==n||e.isSimulated||e.isTrigger||"radio"!==n.type&&"checkbox"!==n.type?e.handleObj.handler.apply(this,arguments):t},teardown:function(){return b.event.remove(this,"._change"),!Z.test(this.nodeName)}}),b.support.focusinBubbles||b.each({focus:"focusin",blur:"focusout"},function(e,t){var n=0,r=function(e){b.event.simulate(t,e.target,b.event.fix(e),!0)};b.event.special[t]={setup:function(){0===n++&&o.addEventListener(e,r,!0)},teardown:function(){0===--n&&o.removeEventListener(e,r,!0)}}}),b.fn.exten d({on:function(e,n,r,i,o){var a,s;if("object"==typeof e){"string"!=typeof n&&(r=r||n,n=t);for(a in e)this.on(a,n,r,e[a],o);return this}if(null==r&&null==i?(i=n,r=n=t):null==i&&("string"==typeof n?(i=r,r=t):(i=r,r=n,n=t)),i===!1)i=ot;else if(!i)return this;return 1===o&&(s=i,i=function(e){return b().off(e),s.apply(this,arguments)},i.guid=s.guid||(s.guid=b.guid++)),this.each(function(){b.event.add(this,e,i,r,n)})},one:function(e,t,n,r){return this.on(e,t,n,r,1)},off:function(e,n,r){var i,o;if(e&&e.preventDefault&&e.handleObj)return i=e.handleObj,b(e.delegateTarget).off(i.namespace?i.origType+"."+i.namespace:i.origType,i.selector,i.handler),this;if("object"==typeof e){for(o in e)this.off(o,n,e[o]);return this}return(n===!1||"function"==typeof n)&&(r=n,n=t),r===!1&&(r=ot),this.each(function(){b.event.remove(this,e,r,n)})},bind:function(e,t,n){return this.on(e,null,t,n)},unbind:function(e,t){return this.off(e,null,t)},delegate:function(e,t,n,r){return this.on(t,e,n,r)},undelegate :function(e,t,n){return 1===arguments.length?this.off(e,"**"):this.off(t,e||"**",n)},trigger:function(e,t){return this.each(function(){b.event.trigger(e,t,this)})},triggerHandler:function(e,n){var r=this[0];return r?b.event.trigger(e,n,r,!0):t}}),function(e,t){var n,r,i,o,a,s,u,l,c,p,f,d,h,g,m,y,v,x="sizzle"+-new Date,w=e.document,T={},N=0,C=0,k=it(),E=it(),S=it(),A=typeof t,j=1<<31,D=[],L=D.pop,H=D.push,q=D.slice,M=D.indexOf||function(e){var t=0,n=this.length;for(;n>t;t++)if(this[t]===e)return t;return-1},_="[\x20\t\r\n\f]",F="(?:\\.|[\w-]|[^\x00-\xa0])+",O=F.replace("w","w#"),B="([*^$|!~]?=)",P="\["+_+"*("+F+")"+_+"*(?:"+B+_+"*(?:(['"])((?:\\.|[^\\])*?)\3|("+O+")|)|)"+_+"*\]",R=":("+F+")(?:\(((['"])((?:\\.|[^\\])*?)\3|((?:\\.|[^\\()[\]]|"+P.replace(3,8)+")*)|.*)\)|)",W=RegExp("^"+_+"+|((?:^|[^\\])(?:\\.)*)"+_+"+$","g"),$=RegExp("^"+_+"*,"+_+"*"),I=RegExp("^"+_+"*([\x20\t\r\n\f>+~])"+_+"*"),z=RegExp(R),X=RegExp("^"+O+"$"),U={ID:RegExp ("^#("+F+")"),CLASS:RegExp("^\.("+F+")"),NAME:RegExp("^\[name=['"]?("+F+")['"]?\]"),TAG:RegExp("^("+F.replace("w","w*")+")"),ATTR:RegExp("^"+P),PSEUDO:RegExp("^"+R),CHILD:RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\("+_+"*(even|odd|(([+-]|)(\d*)n|)"+_+"*(?:([+-]|)"+_+"*(\d+)|))"+_+"*\)|)","i"),needsContext:RegExp("^"+_+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\("+_+"*((?:-\d)?\d*)"+_+"*\)|)(?=[^-]|$)","i")},V=/[\x20\t\r\n\f]*[+~]/,Y=/^[^{]+{\s*[native code/,J=/^(?:#([\w-]+)|(\w+)|.([\w-]+))$/,G=/^(?:input|select|textarea|button)$/i,Q=/^h\d$/i,K=/'|\/g,Z=/=[\x20\t\r\n\f]*([^'"]]*)[\x20\t\r\n\f]*]/g,et=/\([\da-fA-F]{1,6}[\x20\t\r\n\f]?|.)/g,tt=function(e,t){var n="0x"+t-65536;return n!==n?t:0>n?String.fromCharCode(n+65536):String.fromCharCode(55296|n>>10,56320|1023&n)};try{q.call(w.documentElement.childNodes,0)[0].nodeType}catch(nt){q=function(e){var t,n=[];while(t=this[e++])n.push(t);return n}}function rt(e){return Y.test(e+"")}functi on it(){var e,t=[];return e=function(n,r){return t.push(n+=" ")>i.cacheLength&&delete e[t.shift()],e[n]=r}}function ot(e){return e[x]=!0,e}function at(e){var t=p.createElement("div");try{return e(t)}catch(n){return!1}finally{t=null}}function st(e,t,n,r){var i,o,a,s,u,l,f,g,m,v;if((t?t.ownerDocument||t:w)!==p&&c(t),t=t||p,n=n||[],!e||"string"!=typeof e)return n;if(1!==(s=t.nodeType)&&9!==s)return[];if(!d&&!r){if(i=J.exec(e))if(a=i[1]){if(9===s){if(o=t.getElementById(a),!o||!o.parentNode)return n;if(o.id===a)return n.push(o),n}else if(t.ownerDocument&&(o=t.ownerDocument.getElementById(a))&&y(t,o)&&o.id===a)return n.push(o),n}else{if(i[2])return H.apply(n,q.call(t.getElementsByTagName(e),0)),n;if((a=i[3])&&T.getByClassName&&t.getElementsByClassName)return H.apply(n,q.call(t.getElementsByClassName(a),0)),n}if(T.qsa&&!h.test(e)){if(f=!0,g=x,m=t,v=9===s&&e,1===s&&"object"!==t.nodeName.toLowerCase()){l=ft(e),(f=t.getAttribute("id"))?g=f.replace(K,"\$&"):t.setAttribute("id",g),g="[ id='"+g+"'] ",u=l.length;while(u--)l[u]=g+dt(l[u]);m=V.test(e)&&t.parentNode||t,v=l.join(",")}if(v)try{return H.apply(n,q.call(m.querySelectorAll(v),0)),n}catch(b){}finally{f||t.removeAttribute("id")}}}return wt(e.replace(W,"$1"),t,n,r)}a=st.isXML=function(e){var t=e&&(e.ownerDocument||e).documentElement;return t?"HTML"!==t.nodeName:!1},c=st.setDocument=function(e){var n=e?e.ownerDocument||e:w;return n!==p&&9===n.nodeType&&n.documentElement?(p=n,f=n.documentElement,d=a(n),T.tagNameNoComments=at(function(e){return e.appendChild(n.createComment("")),!e.getElementsByTagName("*").length}),T.attributes=at(function(e){e.innerHTML="<select></select>";var t=typeof e.lastChild.getAttribute("multiple");return"boolean"!==t&&"string"!==t}),T.getByClassName=at(function(e){return e.innerHTML="<div class='hidden e'></div><div class='hidden'></div>",e.getElementsByClassName&&e.getElementsByClassName("e").length?(e.lastChild.className="e",2===e.getElementsByClassName("e").length):!1}),T.getB yName=at(function(e){e.id=x+0,e.innerHTML="<a name='"+x+"'></a><div name='"+x+"'></div>",f.insertBefore(e,f.firstChild);var t=n.getElementsByName&&n.getElementsByName(x).length===2+n.getElementsByName(x+0).length;return T.getIdNotName=!n.getElementById(x),f.removeChild(e),t}),i.attrHandle=at(function(e){return e.innerHTML="<a href='#'></a>",e.firstChild&&typeof e.firstChild.getAttribute!==A&&"#"===e.firstChild.getAttribute("href")})?{}:{href:function(e){return e.getAttribute("href",2)},type:function(e){return e.getAttribute("type")}},T.getIdNotName?(i.find.ID=function(e,t){if(typeof t.getElementById!==A&&!d){var n=t.getElementById(e);return n&&n.parentNode?[n]:[]}},i.filter.ID=function(e){var t=e.replace(et,tt);return function(e){return e.getAttribute("id")===t}}):(i.find.ID=function(e,n){if(typeof n.getElementById!==A&&!d){var r=n.getElementById(e);return r?r.id===e||typeof r.getAttributeNode!==A&&r.getAttributeNode("id").value===e?[r]:t:[]}},i.filter.ID=function(e){var t=e .replace(et,tt);return function(e){var n=typeof e.getAttributeNode!==A&&e.getAttributeNode("id");return n&&n.value===t}}),i.find.TAG=T.tagNameNoComments?function(e,n){return typeof n.getElementsByTagName!==A?n.getElementsByTagName(e):t}:function(e,t){var n,r=[],i=0,o=t.getElementsByTagName(e);if("*"===e){while(n=o[i++])1===n.nodeType&&r.push(n);return r}return o},i.find.NAME=T.getByName&&function(e,n){return typeof n.getElementsByName!==A?n.getElementsByName(name):t},i.find.CLASS=T.getByClassName&&function(e,n){return typeof n.getElementsByClassName===A||d?t:n.getElementsByClassName(e)},g=[],h=[":focus"],(T.qsa=rt(n.querySelectorAll))&&(at(function(e){e.innerHTML="<select><option selected=''></option></select>",e.querySelectorAll("[selected]").length||h.push("\["+_+"*(?:checked|disabled|ismap|multiple|readonly|selected|value)"),e.querySelectorAll(":checked").length||h.push(":checked")}),at(function(e){e.innerHTML="<input type='hidden' i=''/>",e.querySelectorAll("[i^='']").l ength&&h.push("[*^$]="+_+"*(?:""|'')"),e.querySelectorAll(":enabled").length||h.push(":enabled",":disabled"),e.querySelectorAll("*,:x"),h.push(",.*:")})),(T.matchesSelector=rt(m=f.matchesSelector||f.mozMatchesSelector||f.webkitMatchesSelector||f.oMatchesSelector||f.msMatchesSelector))&&at(function(e){T.disconnectedMatch=m.call(e,"div"),m.call(e,"[s!='']:x"),g.push("!=",R)}),h=RegExp(h.join("|")),g=RegExp(g.join("|")),y=rt(f.contains)||f.compareDocumentPosition?function(e,t){var n=9===e.nodeType?e.documentElement:e,r=t&&t.parentNode;return e===r||!(!r||1!==r.nodeType||!(n.contains?n.contains(r):e.compareDocumentPosition&&16&e.compareDocumentPosition(r)))}:function(e,t){if(t)while(t=t.parentNode)if(t===e)return!0;return!1},v=f.compareDocumentPosition?function(e,t){var r;return e===t?(u=!0,0):(r=t.compareDocumentPosition&&e.compareDocumentPosition&&e.compareDocumentPosition(t))?1&r||e.parentNode&&11===e.parentNode.nodeType?e===n||y(w,e)?-1:t===n||y(w,t)?1:0:4&r?-1:1:e.compare DocumentPosition?-1:1}:function(e,t){var r,i=0,o=e.parentNode,a=t.parentNode,s=[e],l=[t];if(e===t)return u=!0,0;if(!o||!a)return e===n?-1:t===n?1:o?-1:a?1:0;if(o===a)return ut(e,t);r=e;while(r=r.parentNode)s.unshift(r);r=t;while(r=r.parentNode)l.unshift(r);while(s[i]===l[i])i++;return i?ut(s[i],l[i]):s[i]===w?-1:l[i]===w?1:0},u=!1,[0,0].sort(v),T.detectDuplicates=u,p):p},st.matches=function(e,t){return st(e,null,null,t)},st.matchesSelector=function(e,t){if((e.ownerDocument||e)!==p&&c(e),t=t.replace(Z,"='$1']"),!(!T.matchesSelector||d||g&&g.test(t)||h.test(t)))try{var n=m.call(e,t);if(n||T.disconnectedMatch||e.document&&11!==e.document.nodeType)return n}catch(r){}return st(t,p,null,[e]).length>0},st.contains=function(e,t){return(e.ownerDocument||e)!==p&&c(e),y(e,t)},st.attr=function(e,t){var n;return(e.ownerDocument||e)!==p&&c(e),d||(t=t.toLowerCase()),(n=i.attrHandle[t])?n(e):d||T.attributes?e.getAttribute(t):((n=e.getAttributeNode(t))||e.getAttribute(t))&&e[t]===!0?t:n&&n.s pecified?n.value:null},st.error=function(e){throw Error("Syntax error, unrecognized expression: "+e)},st.uniqueSort=function(e){var t,n=[],r=1,i=0;if(u=!T.detectDuplicates,e.sort(v),u){for(;t=e[r];r++)t===e[r-1]&&(i=n.push(r));while(i--)e.splice(n[i],1)}return e};function ut(e,t){var n=t&&e,r=n&&(~t.sourceIndex||j)-(~e.sourceIndex||j);if(r)return r;if(n)while(n=n.nextSibling)if(n===t)return-1;return e?1:-1}function lt(e){return function(t){var n=t.nodeName.toLowerCase();return"input"===n&&t.type===e}}function ct(e){return function(t){var n=t.nodeName.toLowerCase();return("input"===n||"button"===n)&&t.type===e}}function pt(e){return ot(function(t){return t=+t,ot(function(n,r){var i,o=e([],n.length,t),a=o.length;while(a--)n[i=o[a]]&&(n[i]=!(r[i]=n[i]))})})}o=st.getText=function(e){var t,n="",r=0,i=e.nodeType;if(i){if(1===i||9===i||11===i){if("string"==typeof e.textContent)return e.textContent;for(e=e.firstChild;e;e=e.nextSibling)n+=o(e)}else if(3===i||4===i)return e.nodeValue} else for(;t=e[r];r++)n+=o(t);return n},i=st.selectors={cacheLength:50,createPseudo:ot,match:U,find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(e){return e[1]=e[1].replace(et,tt),e[3]=(e[4]||e[5]||"").replace(et,tt),"~="===e[2]&&(e[3]=" "+e[3]+" "),e.slice(0,4)},CHILD:function(e){return e[1]=e[1].toLowerCase(),"nth"===e[1].slice(0,3)?(e[3]||st.error(e[0]),e[4]=+(e[4]?e[5]+(e[6]||1):2*("even"===e[3]||"odd"===e[3])),e[5]=+(e[7]+e[8]||"odd"===e[3])):e[3]&&st.error(e[0]),e},PSEUDO:function(e){var t,n=!e[5]&&e[2];return U.CHILD.test(e[0])?null:(e[4]?e[2]=e[4]:n&&z.test(n)&&(t=ft(n,!0))&&(t=n.indexOf(")",n.length-t)-n.length)&&(e[0]=e[0].slice(0,t),e[2]=n.slice(0,t)),e.slice(0,3))}},filter:{TAG:function(e){return"*"===e?function(){return!0}:(e=e.replace(et,tt).toLowerCase(),function(t){return t.nodeName&&t.nodeName.toLowerCase()===e})},CLASS:function(e){var t=k[e+" "]; return t||(t=RegExp("(^|"+_+")"+e+"("+_+"|$)"))&&k(e,function(e){return t.test(e.className||typeof e.getAttribute!==A&&e.getAttribute("class")||"")})},ATTR:function(e,t,n){return function(r){var i=st.attr(r,e);return null==i?"!="===t:t?(i+="","="===t?i===n:"!="===t?i!==n:"^="===t?n&&0===i.indexOf(n):"*="===t?n&&i.indexOf(n)>-1:"$="===t?n&&i.slice(-n.length)===n:"~="===t?(" "+i+" ").indexOf(n)>-1:"|="===t?i===n||i.slice(0,n.length+1)===n+"-":!1):!0}},CHILD:function(e,t,n,r,i){var o="nth"!==e.slice(0,3),a="last"!==e.slice(-4),s="of-type"===t;return 1===r&&0===i?function(e){return!!e.parentNode}:function(t,n,u){var l,c,p,f,d,h,g=o!==a?"nextSibling":"previousSibling",m=t.parentNode,y=s&&t.nodeName.toLowerCase(),v=!u&&!s;if(m){if(o){while(g){p=t;while(p=p[g])if(s?p.nodeName.toLowerCase()===y:1===p.nodeType)return!1;h=g="only"===e&&!h&&"nextSibling"}return!0}if(h=[a?m.firstChild:m.lastChild],a&&v){c=m[x]||(m[x]={}),l=c[e]||[],d=l[0]===N&&l[1],f=l[0]===N&&l[2],p=d&&m.childNodes[d]; while(p=++d&&p&&p[g]||(f=d=0)||h.pop())if(1===p.nodeType&&++f&&p===t){c[e]=[N,d,f];break}}else if(v&&(l=(t[x]||(t[x]={}))[e])&&l[0]===N)f=l[1];else while(p=++d&&p&&p[g]||(f=d=0)||h.pop())if((s?p.nodeName.toLowerCase()===y:1===p.nodeType)&&++f&&(v&&((p[x]||(p[x]={}))[e]=[N,f]),p===t))break;return f-=i,f===r||0===f%r&&f/r>=0}}},PSEUDO:function(e,t){var n,r=i.pseudos[e]||i.setFilters[e.toLowerCase()]||st.error("unsupported pseudo: "+e);return r[x]?r(t):r.length>1?(n=[e,e,"",t],i.setFilters.hasOwnProperty(e.toLowerCase())?ot(function(e,n){var i,o=r(e,t),a=o.length;while(a--)i=M.call(e,o[a]),e[i]=!(n[i]=o[a])}):function(e){return r(e,0,n)}):r}},pseudos:{not:ot(function(e){var t=[],n=[],r=s(e.replace(W,"$1"));return r[x]?ot(function(e,t,n,i){var o,a=r(e,null,i,[]),s=e.length;while(s--)(o=a[s])&&(e[s]=!(t[s]=o))}):function(e,i,o){return t[0]=e,r(t,null,o,n),!n.pop()}}),has:ot(function(e){return function(t){return st(e,t).length>0}}),contains:ot(function(e){return function(t){return (t.textContent||t.innerText||o(t)).indexOf(e)>-1}}),lang:ot(function(e){return X.test(e||"")||st.error("unsupported lang: "+e),e=e.replace(et,tt).toLowerCase(),function(t){var n;do if(n=d?t.getAttribute("xml:lang")||t.getAttribute("lang"):t.lang)return n=n.toLowerCase(),n===e||0===n.indexOf(e+"-");while((t=t.parentNode)&&1===t.nodeType);return!1}}),target:function(t){var n=e.location&&e.location.hash;return n&&n.slice(1)===t.id},root:function(e){return e===f},focus:function(e){return e===p.activeElement&&(!p.hasFocus||p.hasFocus())&&!!(e.type||e.href||~e.tabIndex)},enabled:function(e){return e.disabled===!1},disabled:function(e){return e.disabled===!0},checked:function(e){var t=e.nodeName.toLowerCase();return"input"===t&&!!e.checked||"option"===t&&!!e.selected},selected:function(e){return e.parentNode&&e.parentNode.selectedIndex,e.selected===!0},empty:function(e){for(e=e.firstChild;e;e=e.nextSibling)if(e.nodeName>"@"||3===e.nodeType||4===e.nodeType)return!1;return!0},parent: function(e){return!i.pseudos.empty(e)},header:function(e){return Q.test(e.nodeName)},input:function(e){return G.test(e.nodeName)},button:function(e){var t=e.nodeName.toLowerCase();return"input"===t&&"button"===e.type||"button"===t},text:function(e){var t;return"input"===e.nodeName.toLowerCase()&&"text"===e.type&&(null==(t=e.getAttribute("type"))||t.toLowerCase()===e.type)},first:pt(function(){return[0]}),last:pt(function(e,t){return[t-1]}),eq:pt(function(e,t,n){return[0>n?n+t:n]}),even:pt(function(e,t){var n=0;for(;t>n;n+=2)e.push(n);return e}),odd:pt(function(e,t){var n=1;for(;t>n;n+=2)e.push(n);return e}),lt:pt(function(e,t,n){var r=0>n?n+t:n;for(;--r>=0;)e.push(r);return e}),gt:pt(function(e,t,n){var r=0>n?n+t:n;for(;t>++r;)e.push(r);return e})}};for(n in{radio:!0,checkbox:!0,file:!0,password:!0,image:!0})i.pseudos[n]=lt(n);for(n in{submit:!0,reset:!0})i.pseudos[n]=ct(n);function ft(e,t){var n,r,o,a,s,u,l,c=E[e+" "];if(c)return t?0:c.slice(0);s=e,u=[],l=i.preFilter;while( s){(!n||(r=$.exec(s)))&&(r&&(s=s.slice(r[0].length)||s),u.push(o=[])),n=!1,(r=I.exec(s))&&(n=r.shift(),o.push({value:n,type:r[0].replace(W," ")}),s=s.slice(n.length));for(a in i.filter)!(r=U[a].exec(s))||l[a]&&!(r=l[a](r))||(n=r.shift(),o.push({value:n,type:a,matches:r}),s=s.slice(n.length));if(!n)break}return t?s.length:s?st.error(e):E(e,u).slice(0)}function dt(e){var t=0,n=e.length,r="";for(;n>t;t++)r+=e[t].value;return r}function ht(e,t,n){var i=t.dir,o=n&&"parentNode"===i,a=C++;return t.first?function(t,n,r){while(t=t[i])if(1===t.nodeType||o)return e(t,n,r)}:function(t,n,s){var u,l,c,p=N+" "+a;if(s){while(t=t[i])if((1===t.nodeType||o)&&e(t,n,s))return!0}else while(t=t[i])if(1===t.nodeType||o)if(c=t[x]||(t[x]={}),(l=c[i])&&l[0]===p){if((u=l[1])===!0||u===r)return u===!0}else if(l=c[i]=[p],l[1]=e(t,n,s)||r,l[1]===!0)return!0}}function gt(e){return e.length>1?function(t,n,r){var i=e.length;while(i--)if(!e[i](t,n,r))return!1;return!0}:e[0]}function mt(e,t,n,r,i){var o,a=[],s =0,u=e.length,l=null!=t;for(;u>s;s++)(o=e[s])&&(!n||n(o,r,i))&&(a.push(o),l&&t.push(s));return a}function yt(e,t,n,r,i,o){return r&&!r[x]&&(r=yt(r)),i&&!i[x]&&(i=yt(i,o)),ot(function(o,a,s,u){var l,c,p,f=[],d=[],h=a.length,g=o||xt(t||"*",s.nodeType?[s]:s,[]),m=!e||!o&&t?g:mt(g,f,e,s,u),y=n?i||(o?e:h||r)?[]:a:m;if(n&&n(m,y,s,u),r){l=mt(y,d),r(l,[],s,u),c=l.length;while(c--)(p=l[c])&&(y[d[c]]=!(m[d[c]]=p))}if(o){if(i||e){if(i){l=[],c=y.length;while(c--)(p=y[c])&&l.push(m[c]=p);i(null,y=[],l,u)}c=y.length;while(c--)(p=y[c])&&(l=i?M.call(o,p):f[c])>-1&&(o[l]=!(a[l]=p))}}else y=mt(y===a?y.splice(h,y.length):y),i?i(null,a,y,u):H.apply(a,y)})}function vt(e){var t,n,r,o=e.length,a=i.relative[e[0].type],s=a||i.relative[" "],u=a?1:0,c=ht(function(e){return e===t},s,!0),p=ht(function(e){return M.call(t,e)>-1},s,!0),f=[function(e,n,r){return!a&&(r||n!==l)||((t=n).nodeType?c(e,n,r):p(e,n,r))}];for(;o>u;u++)if(n=i.relative[e[u].type])f=[ht(gt(f),n)];else{if(n=i.filter[e[u].type].apply(nul l,e[u].matches),n[x]){for(r=++u;o>r;r++)if(i.relative[e[r].type])break;return yt(u>1&>(f),u>1&&dt(e.slice(0,u-1)).replace(W,"$1"),n,r>u&&vt(e.slice(u,r)),o>r&&vt(e=e.slice(r)),o>r&&dt(e))}f.push(n)}return gt(f)}function bt(e,t){var n=0,o=t.length>0,a=e.length>0,s=function(s,u,c,f,d){var h,g,m,y=[],v=0,b="0",x=s&&[],w=null!=d,T=l,C=s||a&&i.find.TAG("*",d&&u.parentNode||u),k=N+=null==T?1:Math.random()||.1;for(w&&(l=u!==p&&u,r=n);null!=(h=C[b]);b++){if(a&&h){g=0;while(m=e[g++])if(m(h,u,c)){f.push(h);break}w&&(N=k,r=++n)}o&&((h=!m&&h)&&v--,s&&x.push(h))}if(v+=b,o&&b!==v){g=0;while(m=t[g++])m(x,y,u,c);if(s){if(v>0)while(b--)x[b]||y[b]||(y[b]=L.call(f));y=mt(y)}H.apply(f,y),w&&!s&&y.length>0&&v+t.length>1&&st.uniqueSort(f)}return w&&(N=k,l=T),x};return o?ot(s):s}s=st.compile=function(e,t){var n,r=[],i=[],o=S[e+" "];if(!o){t||(t=ft(e)),n=t.length;while(n--)o=vt(t[n]),o[x]?r.push(o):i.push(o);o=S(e,bt(i,r))}return o};function xt(e,t,n){var r=0,i=t.length;for(;i>r;r++)st(e,t[r],n); return n}function wt(e,t,n,r){var o,a,u,l,c,p=ft(e);if(!r&&1===p.length){if(a=p[0]=p[0].slice(0),a.length>2&&"ID"===(u=a[0]).type&&9===t.nodeType&&!d&&i.relative[a[1].type]){if(t=i.find.ID(u.matches[0].replace(et,tt),t)[0],!t)return n;e=e.slice(a.shift().value.length)}o=U.needsContext.test(e)?0:a.length;while(o--){if(u=a[o],i.relative[l=u.type])break;if((c=i.find[l])&&(r=c(u.matches[0].replace(et,tt),V.test(a[0].type)&&t.parentNode||t))){if(a.splice(o,1),e=r.length&&dt(a),!e)return H.apply(n,q.call(r,0)),n;break}}}return s(e,p)(r,t,d,n,V.test(e)),n}i.pseudos.nth=i.pseudos.eq;function Tt(){}i.filters=Tt.prototype=i.pseudos,i.setFilters=new Tt,c(),st.attr=b.attr,b.find=st,b.expr=st.selectors,b.expr[":"]=b.expr.pseudos,b.unique=st.uniqueSort,b.text=st.getText,b.isXMLDoc=st.isXML,b.contains=st.contains}(e);var at=/Until$/,st=/^(?:parents|prev(?:Until|All))/,ut=/^.[^:#[.,]*$/,lt=b.expr.match.needsContext,ct={children:!0,contents:!0,next:!0,prev:!0};b.fn.extend({find:function(e) {var t,n,r,i=this.length;if("string"!=typeof e)return r=this,this.pushStack(b(e).filter(function(){for(t=0;i>t;t++)if(b.contains(r[t],this))return!0}));for(n=[],t=0;i>t;t++)b.find(e,this[t],n);return n=this.pushStack(i>1?b.unique(n):n),n.selector=(this.selector?this.selector+" ":"")+e,n},has:function(e){var t,n=b(e,this),r=n.length;return this.filter(function(){for(t=0;r>t;t++)if(b.contains(this,n[t]))return!0})},not:function(e){return this.pushStack(ft(this,e,!1))},filter:function(e){return this.pushStack(ft(this,e,!0))},is:function(e){return!!e&&("string"==typeof e?lt.test(e)?b(e,this.context).index(this[0])>=0:b.filter(e,this).length>0:this.filter(e).length>0)},closest:function(e,t){var n,r=0,i=this.length,o=[],a=lt.test(e)||"string"!=typeof e?b(e,t||this.context):0;for(;i>r;r++){n=this[r];while(n&&n.ownerDocument&&n!==t&&11!==n.nodeType){if(a?a.index(n)>-1:b.find.matchesSelector(n,e)){o.push(n);break}n=n.parentNode}}return this.pushStack(o.length>1?b.unique(o):o)},index: function(e){return e?"string"==typeof e?b.inArray(this[0],b(e)):b.inArray(e.jquery?e[0]:e,this):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(e,t){var n="string"==typeof e?b(e,t):b.makeArray(e&&e.nodeType?[e]:e),r=b.merge(this.get(),n);return this.pushStack(b.unique(r))},addBack:function(e){return this.add(null==e?this.prevObject:this.prevObject.filter(e))}}),b.fn.andSelf=b.fn.addBack;function pt(e,t){do e=e[t];while(e&&1!==e.nodeType);return e}b.each({parent:function(e){var t=e.parentNode;return t&&11!==t.nodeType?t:null},parents:function(e){return b.dir(e,"parentNode")},parentsUntil:function(e,t,n){return b.dir(e,"parentNode",n)},next:function(e){return pt(e,"nextSibling")},prev:function(e){return pt(e,"previousSibling")},nextAll:function(e){return b.dir(e,"nextSibling")},prevAll:function(e){return b.dir(e,"previousSibling")},nextUntil:function(e,t,n){return b.dir(e,"nextSibling",n)},prevUntil:function(e,t,n){return b.dir(e,"previousSibling",n) },siblings:function(e){return b.sibling((e.parentNode||{}).firstChild,e)},children:function(e){return b.sibling(e.firstChild)},contents:function(e){return b.nodeName(e,"iframe")?e.contentDocument||e.contentWindow.document:b.merge([],e.childNodes)}},function(e,t){b.fn[e]=function(n,r){var i=b.map(this,t,n);return at.test(e)||(r=n),r&&"string"==typeof r&&(i=b.filter(r,i)),i=this.length>1&&!ct[e]?b.unique(i):i,this.length>1&&st.test(e)&&(i=i.reverse()),this.pushStack(i)}}),b.extend({filter:function(e,t,n){return n&&(e=":not("+e+")"),1===t.length?b.find.matchesSelector(t[0],e)?[t[0]]:[]:b.find.matches(e,t)},dir:function(e,n,r){var i=[],o=e[n];while(o&&9!==o.nodeType&&(r===t||1!==o.nodeType||!b(o).is(r)))1===o.nodeType&&i.push(o),o=o[n];return i},sibling:function(e,t){var n=[];for(;e;e=e.nextSibling)1===e.nodeType&&e!==t&&n.push(e);return n}});function ft(e,t,n){if(t=t||0,b.isFunction(t))return b.grep(e,function(e,r){var i=!!t.call(e,r,e);return i===n});if(t.nodeType)return b.gre p(e,function(e){return e===t===n});if("string"==typeof t){var r=b.grep(e,function(e){return 1===e.nodeType});if(ut.test(t))return b.filter(t,r,!n);t=b.filter(t,r)}return b.grep(e,function(e){return b.inArray(e,t)>=0===n})}function dt(e){var t=ht.split("|"),n=e.createDocumentFragment();if(n.createElement)while(t.length)n.createElement(t.pop());return n}var ht="abbr|article|aside|audio|bdi|canvas|data|datalist|details|figcaption|figure|footer|header|hgroup|mark|meter|nav|output|progress|section|summary|time|video",gt=/ jQuery\d+="(?:null|\d+)"/g,mt=RegExp("<(?:"+ht+")[\s/>]","i"),yt=/^\s+/,vt=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)/>/gi,bt=/<([\w:]+)/,xt=/<tbody/i,wt=/<|&#?\w+;/,Tt=/<(?:script|style|link)/i,Nt=/^(?:checkbox|radio)$/i,Ct=/checked\s*(?:[^=]|=\s*.checked.)/i,kt=/^$|/(?:java|ecma)script/i,Et=/^true/(.*)/,St=/^\s*<!(?:[CDATA[|--)|(?:]]|--)>\s*$/g,At={option:[1,"<select multiple='multiple'>","</select>"],legend:[1,"<fieldset>","</ fieldset>"],area:[1,"<map>","</map>"],param:[1,"<object>","</object>"],thead:[1,"<table>","</table>"],tr:[2,"<table><tbody>","</tbody></table>"],col:[2,"<table><tbody></tbody><colgroup>","</colgroup></table>"],td:[3,"<table><tbody><tr>","</tr></tbody></table>"],_default:b.support.htmlSerialize?[0,"",""]:[1,"X<div>","</div>"]},jt=dt(o),Dt=jt.appendChild(o.createElement("div"));At.optgroup=At.option,At.tbody=At.tfoot=At.colgroup=At.caption=At.thead,At.th=At.td,b.fn.extend({text:function(e){return b.access(this,function(e){return e===t?b.text(this):this.empty().append((this[0]&&this[0].ownerDocument||o).createTextNode(e))},null,e,arguments.length)},wrapAll:function(e){if(b.isFunction(e))return this.each(function(t){b(this).wrapAll(e.call(this,t))});if(this[0]){var t=b(e,this[0].ownerDocument).eq(0).clone(!0);this[0].parentNode&&t.insertBefore(this[0]),t.map(function(){var e=this;while(e.firstChild&&1===e.firstChild.nodeType)e=e.firstChild;return e}).append(this)}return this},wr apInner:function(e){return b.isFunction(e)?this.each(function(t){b(this).wrapInner(e.call(this,t))}):this.each(function(){var t=b(this),n=t.contents();n.length?n.wrapAll(e):t.append(e)})},wrap:function(e){var t=b.isFunction(e);return this.each(function(n){b(this).wrapAll(t?e.call(this,n):e)})},unwrap:function(){return this.parent().each(function(){b.nodeName(this,"body")||b(this).replaceWith(this.childNodes)}).end()},append:function(){return this.domManip(arguments,!0,function(e){(1===this.nodeType||11===this.nodeType||9===this.nodeType)&&this.appendChild(e)})},prepend:function(){return this.domManip(arguments,!0,function(e){(1===this.nodeType||11===this.nodeType||9===this.nodeType)&&this.insertBefore(e,this.firstChild)})},before:function(){return this.domManip(arguments,!1,function(e){this.parentNode&&this.parentNode.insertBefore(e,this)})},after:function(){return this.domManip(arguments,!1,function(e){this.parentNode&&this.parentNode.insertBefore(e,this.nextSibling)})},rem ove:function(e,t){var n,r=0;for(;null!=(n=this[r]);r++)(!e||b.filter(e,[n]).length>0)&&(t||1!==n.nodeType||b.cleanData(Ot(n)),n.parentNode&&(t&&b.contains(n.ownerDocument,n)&&Mt(Ot(n,"script")),n.parentNode.removeChild(n)));return this},empty:function(){var e,t=0;for(;null!=(e=this[t]);t++){1===e.nodeType&&b.cleanData(Ot(e,!1));while(e.firstChild)e.removeChild(e.firstChild);e.options&&b.nodeName(e,"select")&&(e.options.length=0)}return this},clone:function(e,t){return e=null==e?!1:e,t=null==t?e:t,this.map(function(){return b.clone(this,e,t)})},html:function(e){return b.access(this,function(e){var n=this[0]||{},r=0,i=this.length;if(e===t)return 1===n.nodeType?n.innerHTML.replace(gt,""):t;if(!("string"!=typeof e||Tt.test(e)||!b.support.htmlSerialize&&mt.test(e)||!b.support.leadingWhitespace&&yt.test(e)||At[(bt.exec(e)||["",""])[1].toLowerCase()])){e=e.replace(vt,"<$1></$2>");try{for(;i>r;r++)n=this[r]||{},1===n.nodeType&&(b.cleanData(Ot(n,!1)),n.innerHTML=e);n=0}catch(o){}}n&& this.empty().append(e)},null,e,arguments.length)},replaceWith:function(e){var t=b.isFunction(e);return t||"string"==typeof e||(e=b(e).not(this).detach()),this.domManip([e],!0,function(e){var t=this.nextSibling,n=this.parentNode;n&&(b(this).remove(),n.insertBefore(e,t))})},detach:function(e){return this.remove(e,!0)},domManip:function(e,n,r){e=f.apply([],e);var i,o,a,s,u,l,c=0,p=this.length,d=this,h=p-1,g=e[0],m=b.isFunction(g);if(m||!(1>=p||"string"!=typeof g||b.support.checkClone)&&Ct.test(g))return this.each(function(i){var o=d.eq(i);m&&(e[0]=g.call(this,i,n?o.html():t)),o.domManip(e,n,r)});if(p&&(l=b.buildFragment(e,this[0].ownerDocument,!1,this),i=l.firstChild,1===l.childNodes.length&&(l=i),i)){for(n=n&&b.nodeName(i,"tr"),s=b.map(Ot(l,"script"),Ht),a=s.length;p>c;c++)o=l,c!==h&&(o=b.clone(o,!0,!0),a&&b.merge(s,Ot(o,"script"))),r.call(n&&b.nodeName(this[c],"table")?Lt(this[c],"tbody"):this[c],o,c);if(a)for(u=s[s.length-1].ownerDocument,b.map(s,qt),c=0;a>c;c++)o=s[c],kt.te st(o.type||"")&&!b._data(o,"globalEval")&&b.contains(u,o)&&(o.src?b.ajax({url:o.src,type:"GET",dataType:"script",async:!1,global:!1,"throws":!0}):b.globalEval((o.text||o.textContent||o.innerHTML||"").replace(St,"")));l=i=null}return this}});function Lt(e,t){return e.getElementsByTagName(t)[0]||e.appendChild(e.ownerDocument.createElement(t))}function Ht(e){var t=e.getAttributeNode("type");return e.type=(t&&t.specified)+"/"+e.type,e}function qt(e){var t=Et.exec(e.type);return t?e.type=t[1]:e.removeAttribute("type"),e}function Mt(e,t){var n,r=0;for(;null!=(n=e[r]);r++)b._data(n,"globalEval",!t||b._data(t[r],"globalEval"))}function _t(e,t){if(1===t.nodeType&&b.hasData(e)){var n,r,i,o=b._data(e),a=b._data(t,o),s=o.events;if(s){delete a.handle,a.events={};for(n in s)for(r=0,i=s[n].length;i>r;r++)b.event.add(t,n,s[n][r])}a.data&&(a.data=b.extend({},a.data))}}function Ft(e,t){var n,r,i;if(1===t.nodeType){if(n=t.nodeName.toLowerCase(),!b.support.noCloneEvent&&t[b.expando]){i=b._data( t);for(r in i.events)b.removeEvent(t,r,i.handle);t.removeAttribute(b.expando)}"script"===n&&t.text!==e.text?(Ht(t).text=e.text,qt(t)):"object"===n?(t.parentNode&&(t.outerHTML=e.outerHTML),b.support.html5Clone&&e.innerHTML&&!b.trim(t.innerHTML)&&(t.innerHTML=e.innerHTML)):"input"===n&&Nt.test(e.type)?(t.defaultChecked=t.checked=e.checked,t.value!==e.value&&(t.value=e.value)):"option"===n?t.defaultSelected=t.selected=e.defaultSelected:("input"===n||"textarea"===n)&&(t.defaultValue=e.defaultValue)}}b.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(e,t){b.fn[e]=function(e){var n,r=0,i=[],o=b(e),a=o.length-1;for(;a>=r;r++)n=r===a?this:this.clone(!0),b(o[r])[t](n),d.apply(i,n.get());return this.pushStack(i)}});function Ot(e,n){var r,o,a=0,s=typeof e.getElementsByTagName!==i?e.getElementsByTagName(n||"*"):typeof e.querySelectorAll!==i?e.querySelectorAll(n||"*"):t;if(!s)for(s=[],r=e.childNodes||e;null!=(o=r[a]) ;a++)!n||b.nodeName(o,n)?s.push(o):b.merge(s,Ot(o,n));return n===t||n&&b.nodeName(e,n)?b.merge([e],s):s}function Bt(e){Nt.test(e.type)&&(e.defaultChecked=e.checked)}b.extend({clone:function(e,t,n){var r,i,o,a,s,u=b.contains(e.ownerDocument,e);if(b.support.html5Clone||b.isXMLDoc(e)||!mt.test("<"+e.nodeName+">")?o=e.cloneNode(!0):(Dt.innerHTML=e.outerHTML,Dt.removeChild(o=Dt.firstChild)),!(b.support.noCloneEvent&&b.support.noCloneChecked||1!==e.nodeType&&11!==e.nodeType||b.isXMLDoc(e)))for(r=Ot(o),s=Ot(e),a=0;null!=(i=s[a]);++a)r[a]&&Ft(i,r[a]);if(t)if(n)for(s=s||Ot(e),r=r||Ot(o),a=0;null!=(i=s[a]);a++)_t(i,r[a]);else _t(e,o);return r=Ot(o,"script"),r.length>0&&Mt(r,!u&&Ot(e,"script")),r=s=i=null,o},buildFragment:function(e,t,n,r){var i,o,a,s,u,l,c,p=e.length,f=dt(t),d=[],h=0;for(;p>h;h++)if(o=e[h],o||0===o)if("object"===b.type(o))b.merge(d,o.nodeType?[o]:o);else if(wt.test(o)){s=s||f.appendChild(t.createElement("div")),u=(bt.exec(o)||["",""])[1].toLowerCase(),c=At[u]||At._def ault,s.innerHTML=c[1]+o.replace(vt,"<$1></$2>")+c[2],i=c[0];while(i--)s=s.lastChild;if(!b.support.leadingWhitespace&&yt.test(o)&&d.push(t.createTextNode(yt.exec(o)[0])),!b.support.tbody){o="table"!==u||xt.test(o)?"<table>"!==c[1]||xt.test(o)?0:s:s.firstChild,i=o&&o.childNodes.length;while(i--)b.nodeName(l=o.childNodes[i],"tbody")&&!l.childNodes.length&&o.removeChild(l) -}b.merge(d,s.childNodes),s.textContent="";while(s.firstChild)s.removeChild(s.firstChild);s=f.lastChild}else d.push(t.createTextNode(o));s&&f.removeChild(s),b.support.appendChecked||b.grep(Ot(d,"input"),Bt),h=0;while(o=d[h++])if((!r||-1===b.inArray(o,r))&&(a=b.contains(o.ownerDocument,o),s=Ot(f.appendChild(o),"script"),a&&Mt(s),n)){i=0;while(o=s[i++])kt.test(o.type||"")&&n.push(o)}return s=null,f},cleanData:function(e,t){var n,r,o,a,s=0,u=b.expando,l=b.cache,p=b.support.deleteExpando,f=b.event.special;for(;null!=(n=e[s]);s++)if((t||b.acceptData(n))&&(o=n[u],a=o&&l[o])){if(a.events)for(r in a.events)f[r]?b.event.remove(n,r):b.removeEvent(n,r,a.handle);l[o]&&(delete l[o],p?delete n[u]:typeof n.removeAttribute!==i?n.removeAttribute(u):n[u]=null,c.push(o))}}});var Pt,Rt,Wt,$t=/alpha([^)]*)/i,It=/opacity\s*=\s*([^)]*)/,zt=/^(top|right|bottom|left)$/,Xt=/^(none|table(?!-c[ea]).+)/,Ut=/^margin/,Vt=RegExp("^("+x+")(.*)$","i"),Yt=RegExp("^("+x+")(?!px)[a-z%]+$","i"),Jt=RegExp("^([+- ])=("+x+")","i"),Gt={BODY:"block"},Qt={position:"absolute",visibility:"hidden",display:"block"},Kt={letterSpacing:0,fontWeight:400},Zt=["Top","Right","Bottom","Left"],en=["Webkit","O","Moz","ms"];function tn(e,t){if(t in e)return t;var n=t.charAt(0).toUpperCase()+t.slice(1),r=t,i=en.length;while(i--)if(t=en[i]+n,t in e)return t;return r}function nn(e,t){return e=t||e,"none"===b.css(e,"display")||!b.contains(e.ownerDocument,e)}function rn(e,t){var n,r,i,o=[],a=0,s=e.length;for(;s>a;a++)r=e[a],r.style&&(o[a]=b._data(r,"olddisplay"),n=r.style.display,t?(o[a]||"none"!==n||(r.style.display=""),""===r.style.display&&nn(r)&&(o[a]=b._data(r,"olddisplay",un(r.nodeName)))):o[a]||(i=nn(r),(n&&"none"!==n||!i)&&b._data(r,"olddisplay",i?n:b.css(r,"display"))));for(a=0;s>a;a++)r=e[a],r.style&&(t&&"none"!==r.style.display&&""!==r.style.display||(r.style.display=t?o[a]||"":"none"));return e}b.fn.extend({css:function(e,n){return b.access(this,function(e,n,r){var i,o,a={},s=0;if(b.isArray(n)){ for(o=Rt(e),i=n.length;i>s;s++)a[n[s]]=b.css(e,n[s],!1,o);return a}return r!==t?b.style(e,n,r):b.css(e,n)},e,n,arguments.length>1)},show:function(){return rn(this,!0)},hide:function(){return rn(this)},toggle:function(e){var t="boolean"==typeof e;return this.each(function(){(t?e:nn(this))?b(this).show():b(this).hide()})}}),b.extend({cssHooks:{opacity:{get:function(e,t){if(t){var n=Wt(e,"opacity");return""===n?"1":n}}}},cssNumber:{columnCount:!0,fillOpacity:!0,fontWeight:!0,lineHeight:!0,opacity:!0,orphans:!0,widows:!0,zIndex:!0,zoom:!0},cssProps:{"float":b.support.cssFloat?"cssFloat":"styleFloat"},style:function(e,n,r,i){if(e&&3!==e.nodeType&&8!==e.nodeType&&e.style){var o,a,s,u=b.camelCase(n),l=e.style;if(n=b.cssProps[u]||(b.cssProps[u]=tn(l,u)),s=b.cssHooks[n]||b.cssHooks[u],r===t)return s&&"get"in s&&(o=s.get(e,!1,i))!==t?o:l[n];if(a=typeof r,"string"===a&&(o=Jt.exec(r))&&(r=(o[1]+1)*o[2]+parseFloat(b.css(e,n)),a="number"),!(null==r||"number"===a&&isNaN(r)||("number"!==a|| b.cssNumber[u]||(r+="px"),b.support.clearCloneStyle||""!==r||0!==n.indexOf("background")||(l[n]="inherit"),s&&"set"in s&&(r=s.set(e,r,i))===t)))try{l[n]=r}catch(c){}}},css:function(e,n,r,i){var o,a,s,u=b.camelCase(n);return n=b.cssProps[u]||(b.cssProps[u]=tn(e.style,u)),s=b.cssHooks[n]||b.cssHooks[u],s&&"get"in s&&(a=s.get(e,!0,r)),a===t&&(a=Wt(e,n,i)),"normal"===a&&n in Kt&&(a=Kt[n]),""===r||r?(o=parseFloat(a),r===!0||b.isNumeric(o)?o||0:a):a},swap:function(e,t,n,r){var i,o,a={};for(o in t)a[o]=e.style[o],e.style[o]=t[o];i=n.apply(e,r||[]);for(o in t)e.style[o]=a[o];return i}}),e.getComputedStyle?(Rt=function(t){return e.getComputedStyle(t,null)},Wt=function(e,n,r){var i,o,a,s=r||Rt(e),u=s?s.getPropertyValue(n)||s[n]:t,l=e.style;return s&&(""!==u||b.contains(e.ownerDocument,e)||(u=b.style(e,n)),Yt.test(u)&&Ut.test(n)&&(i=l.width,o=l.minWidth,a=l.maxWidth,l.minWidth=l.maxWidth=l.width=u,u=s.width,l.width=i,l.minWidth=o,l.maxWidth=a)),u}):o.documentElement.currentStyle&&(Rt=f unction(e){return e.currentStyle},Wt=function(e,n,r){var i,o,a,s=r||Rt(e),u=s?s[n]:t,l=e.style;return null==u&&l&&l[n]&&(u=l[n]),Yt.test(u)&&!zt.test(n)&&(i=l.left,o=e.runtimeStyle,a=o&&o.left,a&&(o.left=e.currentStyle.left),l.left="fontSize"===n?"1em":u,u=l.pixelLeft+"px",l.left=i,a&&(o.left=a)),""===u?"auto":u});function on(e,t,n){var r=Vt.exec(t);return r?Math.max(0,r[1]-(n||0))+(r[2]||"px"):t}function an(e,t,n,r,i){var o=n===(r?"border":"content")?4:"width"===t?1:0,a=0;for(;4>o;o+=2)"margin"===n&&(a+=b.css(e,n+Zt[o],!0,i)),r?("content"===n&&(a-=b.css(e,"padding"+Zt[o],!0,i)),"margin"!==n&&(a-=b.css(e,"border"+Zt[o]+"Width",!0,i))):(a+=b.css(e,"padding"+Zt[o],!0,i),"padding"!==n&&(a+=b.css(e,"border"+Zt[o]+"Width",!0,i)));return a}function sn(e,t,n){var r=!0,i="width"===t?e.offsetWidth:e.offsetHeight,o=Rt(e),a=b.support.boxSizing&&"border-box"===b.css(e,"boxSizing",!1,o);if(0>=i||null==i){if(i=Wt(e,t,o),(0>i||null==i)&&(i=e.style[t]),Yt.test(i))return i;r=a&&(b.support.bo xSizingReliable||i===e.style[t]),i=parseFloat(i)||0}return i+an(e,t,n||(a?"border":"content"),r,o)+"px"}function un(e){var t=o,n=Gt[e];return n||(n=ln(e,t),"none"!==n&&n||(Pt=(Pt||b("<iframe frameborder='0' width='0' height='0'/>").css("cssText","display:block !important")).appendTo(t.documentElement),t=(Pt[0].contentWindow||Pt[0].contentDocument).document,t.write("<!doctype html><html><body>"),t.close(),n=ln(e,t),Pt.detach()),Gt[e]=n),n}function ln(e,t){var n=b(t.createElement(e)).appendTo(t.body),r=b.css(n[0],"display");return n.remove(),r}b.each(["height","width"],function(e,n){b.cssHooks[n]={get:function(e,r,i){return r?0===e.offsetWidth&&Xt.test(b.css(e,"display"))?b.swap(e,Qt,function(){return sn(e,n,i)}):sn(e,n,i):t},set:function(e,t,r){var i=r&&Rt(e);return on(e,t,r?an(e,n,r,b.support.boxSizing&&"border-box"===b.css(e,"boxSizing",!1,i),i):0)}}}),b.support.opacity||(b.cssHooks.opacity={get:function(e,t){return It.test((t&&e.currentStyle?e.currentStyle.filter:e.style.f ilter)||"")?.01*parseFloat(RegExp.$1)+"":t?"1":""},set:function(e,t){var n=e.style,r=e.currentStyle,i=b.isNumeric(t)?"alpha(opacity="+100*t+")":"",o=r&&r.filter||n.filter||"";n.zoom=1,(t>=1||""===t)&&""===b.trim(o.replace($t,""))&&n.removeAttribute&&(n.removeAttribute("filter"),""===t||r&&!r.filter)||(n.filter=$t.test(o)?o.replace($t,i):o+" "+i)}}),b(function(){b.support.reliableMarginRight||(b.cssHooks.marginRight={get:function(e,n){return n?b.swap(e,{display:"inline-block"},Wt,[e,"marginRight"]):t}}),!b.support.pixelPosition&&b.fn.position&&b.each(["top","left"],function(e,n){b.cssHooks[n]={get:function(e,r){return r?(r=Wt(e,n),Yt.test(r)?b(e).position()[n]+"px":r):t}}})}),b.expr&&b.expr.filters&&(b.expr.filters.hidden=function(e){return 0>=e.offsetWidth&&0>=e.offsetHeight||!b.support.reliableHiddenOffsets&&"none"===(e.style&&e.style.display||b.css(e,"display"))},b.expr.filters.visible=function(e){return!b.expr.filters.hidden(e)}),b.each({margin:"",padding:"",border:"Width "},function(e,t){b.cssHooks[e+t]={expand:function(n){var r=0,i={},o="string"==typeof n?n.split(" "):[n];for(;4>r;r++)i[e+Zt[r]+t]=o[r]||o[r-2]||o[0];return i}},Ut.test(e)||(b.cssHooks[e+t].set=on)});var cn=/%20/g,pn=/[]$/,fn=/\r?\n/g,dn=/^(?:submit|button|image|reset|file)$/i,hn=/^(?:input|select|textarea|keygen)/i;b.fn.extend({serialize:function(){return b.param(this.serializeArray())},serializeArray:function(){return this.map(function(){var e=b.prop(this,"elements");return e?b.makeArray(e):this}).filter(function(){var e=this.type;return this.name&&!b(this).is(":disabled")&&hn.test(this.nodeName)&&!dn.test(e)&&(this.checked||!Nt.test(e))}).map(function(e,t){var n=b(this).val();return null==n?null:b.isArray(n)?b.map(n,function(e){return{name:t.name,value:e.replace(fn,"\r\n")}}):{name:t.name,value:n.replace(fn,"\r\n")}}).get()}}),b.param=function(e,n){var r,i=[],o=function(e,t){t=b.isFunction(t)?t():null==t?"":t,i[i.length]=encodeURIComponent(e)+"="+encodeURIComponent(t)};i f(n===t&&(n=b.ajaxSettings&&b.ajaxSettings.traditional),b.isArray(e)||e.jquery&&!b.isPlainObject(e))b.each(e,function(){o(this.name,this.value)});else for(r in e)gn(r,e[r],n,o);return i.join("&").replace(cn,"+")};function gn(e,t,n,r){var i;if(b.isArray(t))b.each(t,function(t,i){n||pn.test(e)?r(e,i):gn(e+"["+("object"==typeof i?t:"")+"]",i,n,r)});else if(n||"object"!==b.type(t))r(e,t);else for(i in t)gn(e+"["+i+"]",t[i],n,r)}b.each("blur focus focusin focusout load resize scroll unload click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup error contextmenu".split(" "),function(e,t){b.fn[t]=function(e,n){return arguments.length>0?this.on(t,null,e,n):this.trigger(t)}}),b.fn.hover=function(e,t){return this.mouseenter(e).mouseleave(t||e)};var mn,yn,vn=b.now(),bn=/?/,xn=/#.*$/,wn=/([?&])_=[^&]*/,Tn=/^(.*?):[ \t]*([^\r\n]*)\r?$/gm,Nn=/^(?:about|app|app-storage|.+-extension|file|res|widget):$/,Cn=/^(?:GET|HEA D)$/,kn=/^///,En=/^([\w.+-]+:)(?://([^/?#:]*)(?::(\d+)|)|)/,Sn=b.fn.load,An={},jn={},Dn="*/".concat("*");try{yn=a.href}catch(Ln){yn=o.createElement("a"),yn.href="",yn=yn.href}mn=En.exec(yn.toLowerCase())||[];function Hn(e){return function(t,n){"string"!=typeof t&&(n=t,t="*");var r,i=0,o=t.toLowerCase().match(w)||[];if(b.isFunction(n))while(r=o[i++])"+"===r[0]?(r=r.slice(1)||"*",(e[r]=e[r]||[]).unshift(n)):(e[r]=e[r]||[]).push(n)}}function qn(e,n,r,i){var o={},a=e===jn;function s(u){var l;return o[u]=!0,b.each(e[u]||[],function(e,u){var c=u(n,r,i);return"string"!=typeof c||a||o[c]?a?!(l=c):t:(n.dataTypes.unshift(c),s(c),!1)}),l}return s(n.dataTypes[0])||!o["*"]&&s("*")}function Mn(e,n){var r,i,o=b.ajaxSettings.flatOptions||{};for(i in n)n[i]!==t&&((o[i]?e:r||(r={}))[i]=n[i]);return r&&b.extend(!0,e,r),e}b.fn.load=function(e,n,r){if("string"!=typeof e&&Sn)return Sn.apply(this,arguments);var i,o,a,s=this,u=e.indexOf(" ");return u>=0&&(i=e.slice(u,e.length),e=e.slice(0,u)), b.isFunction(n)?(r=n,n=t):n&&"object"==typeof n&&(a="POST"),s.length>0&&b.ajax({url:e,type:a,dataType:"html",data:n}).done(function(e){o=arguments,s.html(i?b("<div>").append(b.parseHTML(e)).find(i):e)}).complete(r&&function(e,t){s.each(r,o||[e.responseText,t,e])}),this},b.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(e,t){b.fn[t]=function(e){return this.on(t,e)}}),b.each(["get","post"],function(e,n){b[n]=function(e,r,i,o){return b.isFunction(r)&&(o=o||i,i=r,r=t),b.ajax({url:e,type:n,dataType:o,data:r,success:i})}}),b.extend({active:0,lastModified:{},etag:{},ajaxSettings:{url:yn,type:"GET",isLocal:Nn.test(mn[1]),global:!0,processData:!0,async:!0,contentType:"application/x-www-form-urlencoded; charset=UTF-8",accepts:{"*":Dn,text:"text/plain",html:"text/html",xml:"application/xml, text/xml",json:"application/json, text/javascript"},contents:{xml:/xml/,html:/html/,json:/json/},responseFields:{xml:"responseXML",text:"responseText"},con verters:{"* text":e.String,"text html":!0,"text json":b.parseJSON,"text xml":b.parseXML},flatOptions:{url:!0,context:!0}},ajaxSetup:function(e,t){return t?Mn(Mn(e,b.ajaxSettings),t):Mn(b.ajaxSettings,e)},ajaxPrefilter:Hn(An),ajaxTransport:Hn(jn),ajax:function(e,n){"object"==typeof e&&(n=e,e=t),n=n||{};var r,i,o,a,s,u,l,c,p=b.ajaxSetup({},n),f=p.context||p,d=p.context&&(f.nodeType||f.jquery)?b(f):b.event,h=b.Deferred(),g=b.Callbacks("once memory"),m=p.statusCode||{},y={},v={},x=0,T="canceled",N={readyState:0,getResponseHeader:function(e){var t;if(2===x){if(!c){c={};while(t=Tn.exec(a))c[t[1].toLowerCase()]=t[2]}t=c[e.toLowerCase()]}return null==t?null:t},getAllResponseHeaders:function(){return 2===x?a:null},setRequestHeader:function(e,t){var n=e.toLowerCase();return x||(e=v[n]=v[n]||e,y[e]=t),this},overrideMimeType:function(e){return x||(p.mimeType=e),this},statusCode:function(e){var t;if(e)if(2>x)for(t in e)m[t]=[m[t],e[t]];else N.always(e[N.status]);return this},abort:functi on(e){var t=e||T;return l&&l.abort(t),k(0,t),this}};if(h.promise(N).complete=g.add,N.success=N.done,N.error=N.fail,p.url=((e||p.url||yn)+"").replace(xn,"").replace(kn,mn[1]+"//"),p.type=n.method||n.type||p.method||p.type,p.dataTypes=b.trim(p.dataType||"*").toLowerCase().match(w)||[""],null==p.crossDomain&&(r=En.exec(p.url.toLowerCase()),p.crossDomain=!(!r||r[1]===mn[1]&&r[2]===mn[2]&&(r[3]||("http:"===r[1]?80:443))==(mn[3]||("http:"===mn[1]?80:443)))),p.data&&p.processData&&"string"!=typeof p.data&&(p.data=b.param(p.data,p.traditional)),qn(An,p,n,N),2===x)return N;u=p.global,u&&0===b.active++&&b.event.trigger("ajaxStart"),p.type=p.type.toUpperCase(),p.hasContent=!Cn.test(p.type),o=p.url,p.hasContent||(p.data&&(o=p.url+=(bn.test(o)?"&":"?")+p.data,delete p.data),p.cache===!1&&(p.url=wn.test(o)?o.replace(wn,"$1_="+vn++):o+(bn.test(o)?"&":"?")+"_="+vn++)),p.ifModified&&(b.lastModified[o]&&N.setRequestHeader("If-Modified-Since",b.lastModified[o]),b.etag[o]&&N.setRequestHeader("I f-None-Match",b.etag[o])),(p.data&&p.hasContent&&p.contentType!==!1||n.contentType)&&N.setRequestHeader("Content-Type",p.contentType),N.setRequestHeader("Accept",p.dataTypes[0]&&p.accepts[p.dataTypes[0]]?p.accepts[p.dataTypes[0]]+("*"!==p.dataTypes[0]?", "+Dn+"; q=0.01":""):p.accepts["*"]);for(i in p.headers)N.setRequestHeader(i,p.headers[i]);if(p.beforeSend&&(p.beforeSend.call(f,N,p)===!1||2===x))return N.abort();T="abort";for(i in{success:1,error:1,complete:1})N[i](p[i]);if(l=qn(jn,p,n,N)){N.readyState=1,u&&d.trigger("ajaxSend",[N,p]),p.async&&p.timeout>0&&(s=setTimeout(function(){N.abort("timeout")},p.timeout));try{x=1,l.send(y,k)}catch(C){if(!(2>x))throw C;k(-1,C)}}else k(-1,"No Transport");function k(e,n,r,i){var c,y,v,w,T,C=n;2!==x&&(x=2,s&&clearTimeout(s),l=t,a=i||"",N.readyState=e>0?4:0,r&&(w=_n(p,N,r)),e>=200&&300>e||304===e?(p.ifModified&&(T=N.getResponseHeader("Last-Modified"),T&&(b.lastModified[o]=T),T=N.getResponseHeader("etag"),T&&(b.etag[o]=T)),204===e?(c=!0,C ="nocontent"):304===e?(c=!0,C="notmodified"):(c=Fn(p,w),C=c.state,y=c.data,v=c.error,c=!v)):(v=C,(e||!C)&&(C="error",0>e&&(e=0))),N.status=e,N.statusText=(n||C)+"",c?h.resolveWith(f,[y,C,N]):h.rejectWith(f,[N,C,v]),N.statusCode(m),m=t,u&&d.trigger(c?"ajaxSuccess":"ajaxError",[N,p,c?y:v]),g.fireWith(f,[N,C]),u&&(d.trigger("ajaxComplete",[N,p]),--b.active||b.event.trigger("ajaxStop")))}return N},getScript:function(e,n){return b.get(e,t,n,"script")},getJSON:function(e,t,n){return b.get(e,t,n,"json")}});function _n(e,n,r){var i,o,a,s,u=e.contents,l=e.dataTypes,c=e.responseFields;for(s in c)s in r&&(n[c[s]]=r[s]);while("*"===l[0])l.shift(),o===t&&(o=e.mimeType||n.getResponseHeader("Content-Type"));if(o)for(s in u)if(u[s]&&u[s].test(o)){l.unshift(s);break}if(l[0]in r)a=l[0];else{for(s in r){if(!l[0]||e.converters[s+" "+l[0]]){a=s;break}i||(i=s)}a=a||i}return a?(a!==l[0]&&l.unshift(a),r[a]):t}function Fn(e,t){var n,r,i,o,a={},s=0,u=e.dataTypes.slice(),l=u[0];if(e.dataFilter&&(t=e.d ataFilter(t,e.dataType)),u[1])for(i in e.converters)a[i.toLowerCase()]=e.converters[i];for(;r=u[++s];)if("*"!==r){if("*"!==l&&l!==r){if(i=a[l+" "+r]||a["* "+r],!i)for(n in a)if(o=n.split(" "),o[1]===r&&(i=a[l+" "+o[0]]||a["* "+o[0]])){i===!0?i=a[n]:a[n]!==!0&&(r=o[0],u.splice(s--,0,r));break}if(i!==!0)if(i&&e["throws"])t=i(t);else try{t=i(t)}catch(c){return{state:"parsererror",error:i?c:"No conversion from "+l+" to "+r}}}l=r}return{state:"success",data:t}}b.ajaxSetup({accepts:{script:"text/javascript, application/javascript, application/ecmascript, application/x-ecmascript"},contents:{script:/(?:java|ecma)script/},converters:{"text script":function(e){return b.globalEval(e),e}}}),b.ajaxPrefilter("script",function(e){e.cache===t&&(e.cache=!1),e.crossDomain&&(e.type="GET",e.global=!1)}),b.ajaxTransport("script",function(e){if(e.crossDomain){var n,r=o.head||b("head")[0]||o.documentElement;return{send:function(t,i){n=o.createElement("script"),n.async=!0,e.scriptCharset&&(n.chars et=e.scriptCharset),n.src=e.url,n.onload=n.onreadystatechange=function(e,t){(t||!n.readyState||/loaded|complete/.test(n.readyState))&&(n.onload=n.onreadystatechange=null,n.parentNode&&n.parentNode.removeChild(n),n=null,t||i(200,"success"))},r.insertBefore(n,r.firstChild)},abort:function(){n&&n.onload(t,!0)}}}});var On=[],Bn=/(=)?(?=&|$)|??/;b.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var e=On.pop()||b.expando+"_"+vn++;return this[e]=!0,e}}),b.ajaxPrefilter("json jsonp",function(n,r,i){var o,a,s,u=n.jsonp!==!1&&(Bn.test(n.url)?"url":"string"==typeof n.data&&!(n.contentType||"").indexOf("application/x-www-form-urlencoded")&&Bn.test(n.data)&&"data");return u||"jsonp"===n.dataTypes[0]?(o=n.jsonpCallback=b.isFunction(n.jsonpCallback)?n.jsonpCallback():n.jsonpCallback,u?n[u]=n[u].replace(Bn,"$1"+o):n.jsonp!==!1&&(n.url+=(bn.test(n.url)?"&":"?")+n.jsonp+"="+o),n.converters["script json"]=function(){return s||b.error(o+" was not called"),s[0]},n.dataTypes[0]="json",a=e [o],e[o]=function(){s=arguments},i.always(function(){e[o]=a,n[o]&&(n.jsonpCallback=r.jsonpCallback,On.push(o)),s&&b.isFunction(a)&&a(s[0]),s=a=t}),"script"):t});var Pn,Rn,Wn=0,$n=e.ActiveXObject&&function(){var e;for(e in Pn)Pn[e](t,!0)};function In(){try{return new e.XMLHttpRequest}catch(t){}}function zn(){try{return new e.ActiveXObject("Microsoft.XMLHTTP")}catch(t){}}b.ajaxSettings.xhr=e.ActiveXObject?function(){return!this.isLocal&&In()||zn()}:In,Rn=b.ajaxSettings.xhr(),b.support.cors=!!Rn&&"withCredentials"in Rn,Rn=b.support.ajax=!!Rn,Rn&&b.ajaxTransport(function(n){if(!n.crossDomain||b.support.cors){var r;return{send:function(i,o){var a,s,u=n.xhr();if(n.username?u.open(n.type,n.url,n.async,n.username,n.password):u.open(n.type,n.url,n.async),n.xhrFields)for(s in n.xhrFields)u[s]=n.xhrFields[s];n.mimeType&&u.overrideMimeType&&u.overrideMimeType(n.mimeType),n.crossDomain||i["X-Requested-With"]||(i["X-Requested-With"]="XMLHttpRequest");try{for(s in i)u.setRequestHeader(s,i[ s])}catch(l){}u.send(n.hasContent&&n.data||null),r=function(e,i){var s,l,c,p;try{if(r&&(i||4===u.readyState))if(r=t,a&&(u.onreadystatechange=b.noop,$n&&delete Pn[a]),i)4!==u.readyState&&u.abort();else{p={},s=u.status,l=u.getAllResponseHeaders(),"string"==typeof u.responseText&&(p.text=u.responseText);try{c=u.statusText}catch(f){c=""}s||!n.isLocal||n.crossDomain?1223===s&&(s=204):s=p.text?200:404}}catch(d){i||o(-1,d)}p&&o(s,c,p,l)},n.async?4===u.readyState?setTimeout(r):(a=++Wn,$n&&(Pn||(Pn={},b(e).unload($n)),Pn[a]=r),u.onreadystatechange=r):r()},abort:function(){r&&r(t,!0)}}}});var Xn,Un,Vn=/^(?:toggle|show|hide)$/,Yn=RegExp("^(?:([+-])=|)("+x+")([a-z%]*)$","i"),Jn=/queueHooks$/,Gn=[nr],Qn={"*":[function(e,t){var n,r,i=this.createTween(e,t),o=Yn.exec(t),a=i.cur(),s=+a||0,u=1,l=20;if(o){if(n=+o[2],r=o[3]||(b.cssNumber[e]?"":"px"),"px"!==r&&s){s=b.css(i.elem,e,!0)||n||1;do u=u||".5",s/=u,b.style(i.elem,e,s+r);while(u!==(u=i.cur()/a)&&1!==u&&--l)}i.unit=r,i.start=s,i.end=o[1]? s+(o[1]+1)*n:n}return i}]};function Kn(){return setTimeout(function(){Xn=t}),Xn=b.now()}function Zn(e,t){b.each(t,function(t,n){var r=(Qn[t]||[]).concat(Qn["*"]),i=0,o=r.length;for(;o>i;i++)if(r[i].call(e,t,n))return})}function er(e,t,n){var r,i,o=0,a=Gn.length,s=b.Deferred().always(function(){delete u.elem}),u=function(){if(i)return!1;var t=Xn||Kn(),n=Math.max(0,l.startTime+l.duration-t),r=n/l.duration||0,o=1-r,a=0,u=l.tweens.length;for(;u>a;a++)l.tweens[a].run(o);return s.notifyWith(e,[l,o,n]),1>o&&u?n:(s.resolveWith(e,[l]),!1)},l=s.promise({elem:e,props:b.extend({},t),opts:b.extend(!0,{specialEasing:{}},n),originalProperties:t,originalOptions:n,startTime:Xn||Kn(),duration:n.duration,tweens:[],createTween:function(t,n){var r=b.Tween(e,l.opts,t,n,l.opts.specialEasing[t]||l.opts.easing);return l.tweens.push(r),r},stop:function(t){var n=0,r=t?l.tweens.length:0;if(i)return this;for(i=!0;r>n;n++)l.tweens[n].run(1);return t?s.resolveWith(e,[l,t]):s.rejectWith(e,[l,t]),this}}),c= l.props;for(tr(c,l.opts.specialEasing);a>o;o++)if(r=Gn[o].call(l,e,c,l.opts))return r;return Zn(l,c),b.isFunction(l.opts.start)&&l.opts.start.call(e,l),b.fx.timer(b.extend(u,{elem:e,anim:l,queue:l.opts.queue})),l.progress(l.opts.progress).done(l.opts.done,l.opts.complete).fail(l.opts.fail).always(l.opts.always)}function tr(e,t){var n,r,i,o,a;for(i in e)if(r=b.camelCase(i),o=t[r],n=e[i],b.isArray(n)&&(o=n[1],n=e[i]=n[0]),i!==r&&(e[r]=n,delete e[i]),a=b.cssHooks[r],a&&"expand"in a){n=a.expand(n),delete e[r];for(i in n)i in e||(e[i]=n[i],t[i]=o)}else t[r]=o}b.Animation=b.extend(er,{tweener:function(e,t){b.isFunction(e)?(t=e,e=["*"]):e=e.split(" ");var n,r=0,i=e.length;for(;i>r;r++)n=e[r],Qn[n]=Qn[n]||[],Qn[n].unshift(t)},prefilter:function(e,t){t?Gn.unshift(e):Gn.push(e)}});function nr(e,t,n){var r,i,o,a,s,u,l,c,p,f=this,d=e.style,h={},g=[],m=e.nodeType&&nn(e);n.queue||(c=b._queueHooks(e,"fx"),null==c.unqueued&&(c.unqueued=0,p=c.empty.fire,c.empty.fire=function(){c.unqueued||p( )}),c.unqueued++,f.always(function(){f.always(function(){c.unqueued--,b.queue(e,"fx").length||c.empty.fire()})})),1===e.nodeType&&("height"in t||"width"in t)&&(n.overflow=[d.overflow,d.overflowX,d.overflowY],"inline"===b.css(e,"display")&&"none"===b.css(e,"float")&&(b.support.inlineBlockNeedsLayout&&"inline"!==un(e.nodeName)?d.zoom=1:d.display="inline-block")),n.overflow&&(d.overflow="hidden",b.support.shrinkWrapBlocks||f.always(function(){d.overflow=n.overflow[0],d.overflowX=n.overflow[1],d.overflowY=n.overflow[2]}));for(i in t)if(a=t[i],Vn.exec(a)){if(delete t[i],u=u||"toggle"===a,a===(m?"hide":"show"))continue;g.push(i)}if(o=g.length){s=b._data(e,"fxshow")||b._data(e,"fxshow",{}),"hidden"in s&&(m=s.hidden),u&&(s.hidden=!m),m?b(e).show():f.done(function(){b(e).hide()}),f.done(function(){var t;b._removeData(e,"fxshow");for(t in h)b.style(e,t,h[t])});for(i=0;o>i;i++)r=g[i],l=f.createTween(r,m?s[r]:0),h[r]=s[r]||b.style(e,r),r in s||(s[r]=l.start,m&&(l.end=l.start,l.start="wi dth"===r||"height"===r?1:0))}}function rr(e,t,n,r,i){return new rr.prototype.init(e,t,n,r,i)}b.Tween=rr,rr.prototype={constructor:rr,init:function(e,t,n,r,i,o){this.elem=e,this.prop=n,this.easing=i||"swing",this.options=t,this.start=this.now=this.cur(),this.end=r,this.unit=o||(b.cssNumber[n]?"":"px")},cur:function(){var e=rr.propHooks[this.prop];return e&&e.get?e.get(this):rr.propHooks._default.get(this)},run:function(e){var t,n=rr.propHooks[this.prop];return this.pos=t=this.options.duration?b.easing[this.easing](e,this.options.duration*e,0,1,this.options.duration):e,this.now=(this.end-this.start)*t+this.start,this.options.step&&this.options.step.call(this.elem,this.now,this),n&&n.set?n.set(this):rr.propHooks._default.set(this),this}},rr.prototype.init.prototype=rr.prototype,rr.propHooks={_default:{get:function(e){var t;return null==e.elem[e.prop]||e.elem.style&&null!=e.elem.style[e.prop]?(t=b.css(e.elem,e.prop,""),t&&"auto"!==t?t:0):e.elem[e.prop]},set:function(e){b.fx.step [e.prop]?b.fx.step[e.prop](e):e.elem.style&&(null!=e.elem.style[b.cssProps[e.prop]]||b.cssHooks[e.prop])?b.style(e.elem,e.prop,e.now+e.unit):e.elem[e.prop]=e.now}}},rr.propHooks.scrollTop=rr.propHooks.scrollLeft={set:function(e){e.elem.nodeType&&e.elem.parentNode&&(e.elem[e.prop]=e.now)}},b.each(["toggle","show","hide"],function(e,t){var n=b.fn[t];b.fn[t]=function(e,r,i){return null==e||"boolean"==typeof e?n.apply(this,arguments):this.animate(ir(t,!0),e,r,i)}}),b.fn.extend({fadeTo:function(e,t,n,r){return this.filter(nn).css("opacity",0).show().end().animate({opacity:t},e,n,r)},animate:function(e,t,n,r){var i=b.isEmptyObject(e),o=b.speed(t,n,r),a=function(){var t=er(this,b.extend({},e),o);a.finish=function(){t.stop(!0)},(i||b._data(this,"finish"))&&t.stop(!0)};return a.finish=a,i||o.queue===!1?this.each(a):this.queue(o.queue,a)},stop:function(e,n,r){var i=function(e){var t=e.stop;delete e.stop,t(r)};return"string"!=typeof e&&(r=n,n=e,e=t),n&&e!==!1&&this.queue(e||"fx",[]),th is.each(function(){var t=!0,n=null!=e&&e+"queueHooks",o=b.timers,a=b._data(this);if(n)a[n]&&a[n].stop&&i(a[n]);else for(n in a)a[n]&&a[n].stop&&Jn.test(n)&&i(a[n]);for(n=o.length;n--;)o[n].elem!==this||null!=e&&o[n].queue!==e||(o[n].anim.stop(r),t=!1,o.splice(n,1));(t||!r)&&b.dequeue(this,e)})},finish:function(e){return e!==!1&&(e=e||"fx"),this.each(function(){var t,n=b._data(this),r=n[e+"queue"],i=n[e+"queueHooks"],o=b.timers,a=r?r.length:0;for(n.finish=!0,b.queue(this,e,[]),i&&i.cur&&i.cur.finish&&i.cur.finish.call(this),t=o.length;t--;)o[t].elem===this&&o[t].queue===e&&(o[t].anim.stop(!0),o.splice(t,1));for(t=0;a>t;t++)r[t]&&r[t].finish&&r[t].finish.call(this);delete n.finish})}});function ir(e,t){var n,r={height:e},i=0;for(t=t?1:0;4>i;i+=2-t)n=Zt[i],r["margin"+n]=r["padding"+n]=e;return t&&(r.opacity=r.width=e),r}b.each({slideDown:ir("show"),slideUp:ir("hide"),slideToggle:ir("toggle"),fadeIn:{opacity:"show"},fadeOut:{opacity:"hide"},fadeToggle:{opacity:"toggle"}},functio n(e,t){b.fn[e]=function(e,n,r){return this.animate(t,e,n,r)}}),b.speed=function(e,t,n){var r=e&&"object"==typeof e?b.extend({},e):{complete:n||!n&&t||b.isFunction(e)&&e,duration:e,easing:n&&t||t&&!b.isFunction(t)&&t};return r.duration=b.fx.off?0:"number"==typeof r.duration?r.duration:r.duration in b.fx.speeds?b.fx.speeds[r.duration]:b.fx.speeds._default,(null==r.queue||r.queue===!0)&&(r.queue="fx"),r.old=r.complete,r.complete=function(){b.isFunction(r.old)&&r.old.call(this),r.queue&&b.dequeue(this,r.queue)},r},b.easing={linear:function(e){return e},swing:function(e){return.5-Math.cos(e*Math.PI)/2}},b.timers=[],b.fx=rr.prototype.init,b.fx.tick=function(){var e,n=b.timers,r=0;for(Xn=b.now();n.length>r;r++)e=n[r],e()||n[r]!==e||n.splice(r--,1);n.length||b.fx.stop(),Xn=t},b.fx.timer=function(e){e()&&b.timers.push(e)&&b.fx.start()},b.fx.interval=13,b.fx.start=function(){Un||(Un=setInterval(b.fx.tick,b.fx.interval))},b.fx.stop=function(){clearInterval(Un),Un=null},b.fx.speeds={slo w:600,fast:200,_default:400},b.fx.step={},b.expr&&b.expr.filters&&(b.expr.filters.animated=function(e){return b.grep(b.timers,function(t){return e===t.elem}).length}),b.fn.offset=function(e){if(arguments.length)return e===t?this:this.each(function(t){b.offset.setOffset(this,e,t)});var n,r,o={top:0,left:0},a=this[0],s=a&&a.ownerDocument;if(s)return n=s.documentElement,b.contains(n,a)?(typeof a.getBoundingClientRect!==i&&(o=a.getBoundingClientRect()),r=or(s),{top:o.top+(r.pageYOffset||n.scrollTop)-(n.clientTop||0),left:o.left+(r.pageXOffset||n.scrollLeft)-(n.clientLeft||0)}):o},b.offset={setOffset:function(e,t,n){var r=b.css(e,"position");"static"===r&&(e.style.position="relative");var i=b(e),o=i.offset(),a=b.css(e,"top"),s=b.css(e,"left"),u=("absolute"===r||"fixed"===r)&&b.inArray("auto",[a,s])>-1,l={},c={},p,f;u?(c=i.position(),p=c.top,f=c.left):(p=parseFloat(a)||0,f=parseFloat(s)||0),b.isFunction(t)&&(t=t.call(e,n,o)),null!=t.top&&(l.top=t.top-o.top+p),null!=t.left&&(l.left =t.left-o.left+f),"using"in t?t.using.call(e,l):i.css(l)}},b.fn.extend({position:function(){if(this[0]){var e,t,n={top:0,left:0},r=this[0];return"fixed"===b.css(r,"position")?t=r.getBoundingClientRect():(e=this.offsetParent(),t=this.offset(),b.nodeName(e[0],"html")||(n=e.offset()),n.top+=b.css(e[0],"borderTopWidth",!0),n.left+=b.css(e[0],"borderLeftWidth",!0)),{top:t.top-n.top-b.css(r,"marginTop",!0),left:t.left-n.left-b.css(r,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var e=this.offsetParent||o.documentElement;while(e&&!b.nodeName(e,"html")&&"static"===b.css(e,"position"))e=e.offsetParent;return e||o.documentElement})}}),b.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(e,n){var r=/Y/.test(n);b.fn[e]=function(i){return b.access(this,function(e,i,o){var a=or(e);return o===t?a?n in a?a[n]:a.document.documentElement[i]:e[i]:(a?a.scrollTo(r?b(a).scrollLeft():o,r?o:b(a).scrollTop()):e[i]=o,t)},e,i,arguments.length,null)}});function or (e){return b.isWindow(e)?e:9===e.nodeType?e.defaultView||e.parentWindow:!1}b.each({Height:"height",Width:"width"},function(e,n){b.each({padding:"inner"+e,content:n,"":"outer"+e},function(r,i){b.fn[i]=function(i,o){var a=arguments.length&&(r||"boolean"!=typeof i),s=r||(i===!0||o===!0?"margin":"border");return b.access(this,function(n,r,i){var o;return b.isWindow(n)?n.document.documentElement["client"+e]:9===n.nodeType?(o=n.documentElement,Math.max(n.body["scroll"+e],o["scroll"+e],n.body["offset"+e],o["offset"+e],o["client"+e])):i===t?b.css(n,r,s):b.style(n,r,i,s)},n,a?i:t,a,null)}})}),e.jQuery=e.$=b,"function"==typeof define&&define.amd&&define.amd.jQuery&&define("jquery",[],function(){return b})})(window); \ No newline at end of file diff --git a/modules/enterprise/gui/coregui/src/main/webapp/js/rhq.js b/modules/enterprise/gui/coregui/src/main/webapp/js/rhq.js index ef65e69..7e89bb1 100644 --- a/modules/enterprise/gui/coregui/src/main/webapp/js/rhq.js +++ b/modules/enterprise/gui/coregui/src/main/webapp/js/rhq.js @@ -1,12 +1,7 @@ /** - * Charting Javascript Functions. + * RHQ Charting Javascript Functions. */
-// Handle browsers not supporting console object -if (!window.console) window.console = {}; -if (!window.console.log) window.console.log = function () { -}; - /** * ChartContext Constructor Object * Contains all of the data required to render a chart. @@ -38,13 +33,13 @@ if (!window.console.log) window.console.log = function () { * @param singleValueLabel * @param chartXaxisTimeFormatHours * @param chartXaxisTimeFormatHoursMinutes - * @param showLegend + * @param hideLegend * @constructor */ var ChartContext = function (chartId, chartHeight, metricsData, xAxisLabel, chartTitle, yAxisUnits, minChartTitle, avgChartTitle, peakChartTitle, dateLabel, timeLabel, downLabel, unknownLabel, noDataLabel, hoverStartLabel, hoverEndLabel, hoverPeriodLabel, hoverBarLabel, chartHoverTimeFormat, chartHoverDateFormat, isPortalGraph, portalId, buttonBarDateTimeFormat, singleValueLabel, chartXaxisTimeFormatHours, chartXaxisTimeFormatHoursMinutes, hideLegend) { "use strict"; if (!(this instanceof ChartContext)) { - throw new Error("ChartContext function cannot be called as a function.") + throw new Error("ChartContext function cannot be called as a function."); } this.chartId = chartId; this.chartHeight = chartHeight; @@ -103,7 +98,7 @@ var ChartContext = function (chartId, chartHeight, metricsData, xAxisLabel, char AvailChartContext = function (chartId, availData, dateLabel, timeLabel, hoverStartLabel, hoverBarLabel, availabilityLabel, chartHoverTimeFormat, chartHoverDateFormat, chartTitle, chartUpLabel, chartDownLabel, chartXaxisTimeFormatHours, chartXaxisTimeFormatHoursMinutes) { "use strict"; if (!(this instanceof AvailChartContext)) { - throw new Error("AvailChartContext function cannot be called as a function.") + throw new Error("AvailChartContext function cannot be called as a function."); } this.chartId = chartId; this.chartHandle = "#availChart-" + this.chartId; @@ -133,12 +128,13 @@ var ChartContext = function (chartId, chartHeight, metricsData, xAxisLabel, char GraphDateContext = function (startDate, endDate) { "use strict"; if (!(this instanceof GraphDateContext)) { - throw new Error("GraphDateContext function cannot be called as a function.") + throw new Error("GraphDateContext function cannot be called as a function."); } this.startDate = startDate; this.endDate = endDate; }, rhqCommon = (function () { + "use strict";
var timeFormat = function (formats) { @@ -146,7 +142,7 @@ var ChartContext = function (chartId, chartHeight, metricsData, xAxisLabel, char var i = formats.length - 1, f = formats[i]; while (!f[1](date)) f = formats[--i]; return f[0](date); - } + }; };
return { @@ -179,7 +175,7 @@ var ChartContext = function (chartId, chartHeight, metricsData, xAxisLabel, char ]); }
- } + }; })();
commit 711b389b559b1cd9fb2f8e728ad1abef9c7ac7e7 Author: Lukas Krejci lkrejci@redhat.com Date: Fri Aug 2 10:24:02 2013 +0200
Updating the test recipes for the new names of compliance.
diff --git a/modules/common/ant-bundle/src/test/java/org/rhq/bundle/ant/AntLauncherTest.java b/modules/common/ant-bundle/src/test/java/org/rhq/bundle/ant/AntLauncherTest.java index 22aa418..6073be9 100644 --- a/modules/common/ant-bundle/src/test/java/org/rhq/bundle/ant/AntLauncherTest.java +++ b/modules/common/ant-bundle/src/test/java/org/rhq/bundle/ant/AntLauncherTest.java @@ -303,7 +303,7 @@ public class AntLauncherTest { }
public void testUpgradeNoManageRootDir() throws Exception { - testUpgradeNoManageRootDir(true, "test-bundle-v2-commonSubdirectories.xml"); + testUpgradeNoManageRootDir(true, "test-bundle-v2-filesAndDirectories.xml"); }
private void testUpgradeNoManageRootDir(boolean validate, String recipeFile) throws Exception { diff --git a/modules/common/ant-bundle/src/test/resources/test-bundle-v2-commonSubdirectories.xml b/modules/common/ant-bundle/src/test/resources/test-bundle-v2-commonSubdirectories.xml deleted file mode 100644 index 3a82a3d..0000000 --- a/modules/common/ant-bundle/src/test/resources/test-bundle-v2-commonSubdirectories.xml +++ /dev/null @@ -1,65 +0,0 @@ -<?xml version="1.0"?> - -<!-- - ~ RHQ Management Platform - ~ Copyright (C) 2013 Red Hat, Inc. - ~ All rights reserved. - ~ - ~ This program is free software; you can redistribute it and/or modify - ~ it under the terms of the GNU General Public License as published by - ~ the Free Software Foundation version 2 of the License. - ~ - ~ This program is distributed in the hope that it will be useful, - ~ but WITHOUT ANY WARRANTY; without even the implied warranty of - ~ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - ~ GNU General Public License for more details. - ~ - ~ You should have received a copy of the GNU General Public License - ~ along with this program; if not, write to the Free Software - ~ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - --> - -<project name="test-bundle" default="main" xmlns:rhq="antlib:org.rhq.bundle"> - - <rhq:bundle name="example.com (JBoss EAP 4.3)" version="2.5" - description="updated bundle"> - - <rhq:input-property - name="listener.port" - description="This is where the product will listen for incoming messages" - required="true" - defaultValue="9090" - type="integer"/> - - <rhq:deployment-unit name="appserver" preinstallTarget="preinstall" postinstallTarget="postinstall" - compliance="commonDirectories"> <!-- this is the only difference with test-bundle-v2.xml --> - <rhq:system-service name="foo" scriptFile="foo-script" - configFile="foo-config" overwriteScript="true" - startLevels="3,4,5" startPriority="80" stopPriority="20" root="root"/> - <rhq:file name="test-v2.properties" destinationFile="subdir/test.properties" replace="true"/> - <rhq:archive name="file.zip"> - rhq:replace - <rhq:fileset includes="**/*.properties"/> - </rhq:replace> - </rhq:archive> - <!-- the files that should be ignored during upgrades --> - rhq:ignore - <rhq:fileset includes="*.log"/> - </rhq:ignore> - </rhq:deployment-unit> - - </rhq:bundle> - - <target name="main"/> - - <target name="preinstall"> - <echo>Deploying Test Bundle v2.5 to ${rhq.deploy.dir}...</echo> - <property name="preinstallTargetExecuted" value="2a"/> - </target> - - <target name="postinstall"> - <echo>Done deploying Test Bundle v2.5 to ${rhq.deploy.dir}.</echo> - <property name="postinstallTargetExecuted" value="2b"/> - </target> - -</project> diff --git a/modules/common/ant-bundle/src/test/resources/test-bundle-v2-filesAndDirectories.xml b/modules/common/ant-bundle/src/test/resources/test-bundle-v2-filesAndDirectories.xml new file mode 100644 index 0000000..b82da55 --- /dev/null +++ b/modules/common/ant-bundle/src/test/resources/test-bundle-v2-filesAndDirectories.xml @@ -0,0 +1,65 @@ +<?xml version="1.0"?> + +<!-- + ~ RHQ Management Platform + ~ Copyright (C) 2013 Red Hat, Inc. + ~ All rights reserved. + ~ + ~ This program is free software; you can redistribute it and/or modify + ~ it under the terms of the GNU General Public License as published by + ~ the Free Software Foundation version 2 of the License. + ~ + ~ This program is distributed in the hope that it will be useful, + ~ but WITHOUT ANY WARRANTY; without even the implied warranty of + ~ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + ~ GNU General Public License for more details. + ~ + ~ You should have received a copy of the GNU General Public License + ~ along with this program; if not, write to the Free Software + ~ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + --> + +<project name="test-bundle" default="main" xmlns:rhq="antlib:org.rhq.bundle"> + + <rhq:bundle name="example.com (JBoss EAP 4.3)" version="2.5" + description="updated bundle"> + + <rhq:input-property + name="listener.port" + description="This is where the product will listen for incoming messages" + required="true" + defaultValue="9090" + type="integer"/> + + <rhq:deployment-unit name="appserver" preinstallTarget="preinstall" postinstallTarget="postinstall" + compliance="filesAndDirectories"> <!-- this is the only difference with test-bundle-v2.xml --> + <rhq:system-service name="foo" scriptFile="foo-script" + configFile="foo-config" overwriteScript="true" + startLevels="3,4,5" startPriority="80" stopPriority="20" root="root"/> + <rhq:file name="test-v2.properties" destinationFile="subdir/test.properties" replace="true"/> + <rhq:archive name="file.zip"> + rhq:replace + <rhq:fileset includes="**/*.properties"/> + </rhq:replace> + </rhq:archive> + <!-- the files that should be ignored during upgrades --> + rhq:ignore + <rhq:fileset includes="*.log"/> + </rhq:ignore> + </rhq:deployment-unit> + + </rhq:bundle> + + <target name="main"/> + + <target name="preinstall"> + <echo>Deploying Test Bundle v2.5 to ${rhq.deploy.dir}...</echo> + <property name="preinstallTargetExecuted" value="2a"/> + </target> + + <target name="postinstall"> + <echo>Done deploying Test Bundle v2.5 to ${rhq.deploy.dir}.</echo> + <property name="postinstallTargetExecuted" value="2b"/> + </target> + +</project>
commit 4e54703565e946030da436aa96d01bc7cb8d0dc2 Author: Lukas Krejci lkrejci@redhat.com Date: Fri Aug 2 00:54:49 2013 +0200
[BZ 801926] - manageRootDir deprecated, supeseded by "compliance". The compliance has now 2 possible values: * full (corresponds to manageRootDir=true, i.e. the default), * filesAndDirectories (corresponds to manageRootDir=false)
The name "full" should convey the fact that the deployment directory is in full compliance with the contents of the bundle.
The name "filesAndDirectories" should convey the behavior of manageRootDir=false - i.e. the files and directories in the root dir that are not present in the bundle are left intact. When there is a directory or file in the root directory that is both in the deployment directory and the bundle, the file or directory is made compliant to the contents in the bundle.
The other two proposed deployment behaviors are "rootDirectoryAndFiles" and "files", but those are commented out for the moment, because we don't plan to add support for them in RHQ 4.9.
diff --git a/modules/common/ant-bundle/src/main/java/org/rhq/bundle/ant/AntLauncher.java b/modules/common/ant-bundle/src/main/java/org/rhq/bundle/ant/AntLauncher.java index e26a7da..baf981c 100644 --- a/modules/common/ant-bundle/src/main/java/org/rhq/bundle/ant/AntLauncher.java +++ b/modules/common/ant-bundle/src/main/java/org/rhq/bundle/ant/AntLauncher.java @@ -71,6 +71,37 @@ public class AntLauncher { // TODO (ips, 04/28/10): Figure out a way to avoid assuming the prefix is "rhq". private static final String BUNDLE_TASK_NAME = "rhq:bundle";
+ private boolean requireExplicitCompliance; + + /** + * For backwards compatibility reasons, this calls {@link #AntLauncher(boolean) AntLauncher(false)}. + * Note that this might change in the future, because we are <b>requiring</b> the explicit declaration of the + * destination directory's compliance mode starting with RHQ 4.9.0. + * <p/> + * Nevertheless this constructor is behaving as it was before RHQ 4.9.0 so that users of it aren't surprised + * by its behavior. + * + * @deprecated since 4.9.0. You can keep using this constructor but be aware that it might change behavior in some + * future version of RHQ. It will NOT be removed though. + */ + @Deprecated + public AntLauncher() { + this(false); + } + + /** + * @since 4.9.0 + * @param requireExplicitCompliance whether or not to enforce the presence of {@code compliance} attribute in the + * deployment unit definitions. Before RHQ 4.9.0 a similar deprecated attribute + * called {@code manageRootDir} was optional and defaulted to {@code true}. Since + * RHQ 4.9.0 we require the user to explicitly specify the compliance of the + * destination directory. But to keep backwards compatibility with the older + * bundle recipes already deployed on the agents, we make this behavior optional. + */ + public AntLauncher(boolean requireExplicitCompliance) { + this.requireExplicitCompliance = requireExplicitCompliance; + } + /** * Executes the specified bundle deploy Ant build file (i.e. rhq-deploy.xml). * @@ -241,6 +272,14 @@ public class AntLauncher { "The bundle task must contain exactly one rhq:deploymentUnit child element."); } DeploymentUnitType deployment = deployments.iterator().next(); + + if (requireExplicitCompliance && deployment.getCompliance() == null) { + throw new InvalidBuildFileException( + "The deployment unit must specifically declare compliance mode of the destination directory."); + } + + project.setDestinationCompliance(deployment.getCompliance()); + Map<File, String> files = deployment.getLocalFileNames(); for (String file : files.values()) { project.getBundleFileNames().add(file); diff --git a/modules/common/ant-bundle/src/main/java/org/rhq/bundle/ant/BundleAntProject.java b/modules/common/ant-bundle/src/main/java/org/rhq/bundle/ant/BundleAntProject.java index 3c7921d..41b31d8 100644 --- a/modules/common/ant-bundle/src/main/java/org/rhq/bundle/ant/BundleAntProject.java +++ b/modules/common/ant-bundle/src/main/java/org/rhq/bundle/ant/BundleAntProject.java @@ -32,6 +32,7 @@ import org.apache.tools.ant.Project; import org.rhq.core.domain.configuration.Configuration; import org.rhq.core.domain.configuration.definition.ConfigurationDefinition; import org.rhq.core.util.updater.DeployDifferences; +import org.rhq.core.util.updater.DestinationComplianceMode;
/** * This is the Ant project object that is used when processing bundle Ant scripts @@ -71,6 +72,9 @@ public class BundleAntProject extends Project { private DeploymentPhase deploymentPhase; private boolean dryRun;
+ //note that this will have to change once we start supporting multiple deployment units. + private DestinationComplianceMode destinationCompliance; + // results of project execution private DeployDifferences deployDiffs = new DeployDifferences(); private Set<File> downloadedFiles = new HashSet<File>(); @@ -165,6 +169,14 @@ public class BundleAntProject extends Project { return deployDiffs; }
+ public DestinationComplianceMode getDestinationCompliance() { + return destinationCompliance; + } + + public void setDestinationCompliance(DestinationComplianceMode destinationCompliance) { + this.destinationCompliance = destinationCompliance; + } + /** * If there were url-file or url-archives, this returns the set of files * that were downloaded from the URLs. diff --git a/modules/common/ant-bundle/src/main/java/org/rhq/bundle/ant/type/DeploymentUnitType.java b/modules/common/ant-bundle/src/main/java/org/rhq/bundle/ant/type/DeploymentUnitType.java index 62956ef..f4a05a8 100644 --- a/modules/common/ant-bundle/src/main/java/org/rhq/bundle/ant/type/DeploymentUnitType.java +++ b/modules/common/ant-bundle/src/main/java/org/rhq/bundle/ant/type/DeploymentUnitType.java @@ -37,6 +37,7 @@ import org.apache.tools.ant.Project; import org.apache.tools.ant.Target;
import org.rhq.bundle.ant.BundleAntProject.AuditStatus; +import org.rhq.core.util.updater.DestinationComplianceMode; import org.rhq.bundle.ant.DeployPropertyNames; import org.rhq.core.domain.configuration.Configuration; import org.rhq.core.domain.configuration.PropertySimple; @@ -56,7 +57,8 @@ import org.rhq.core.util.updater.DeploymentProperties; */ public class DeploymentUnitType extends AbstractBundleType { private String name; - private String manageRootDir = Boolean.TRUE.toString(); + + private DestinationComplianceMode compliance;
private Map<File, File> files = new LinkedHashMap<File, File>(); private Map<URL, File> urlFiles = new LinkedHashMap<URL, File>(); @@ -103,13 +105,14 @@ public class DeploymentUnitType extends AbstractBundleType {
try { boolean dryRun = getProject().isDryRun(); - boolean willManageRootDir = Boolean.parseBoolean(this.manageRootDir); + + DestinationComplianceMode complianceToUse = DestinationComplianceMode.instanceOrDefault(this.compliance); + File deployDir = getProject().getDeployDir(); TemplateEngine templateEngine = createTemplateEngine(getProject().getUserProperties()); int deploymentId = getProject().getDeploymentId(); DeploymentProperties deploymentProps = new DeploymentProperties(deploymentId, getProject().getBundleName(), - getProject().getBundleVersion(), getProject().getBundleDescription()); - deploymentProps.setManageRootDir(willManageRootDir); + getProject().getBundleVersion(), getProject().getBundleDescription(), complianceToUse);
if (this.preinstallTarget != null) { getProject().auditLog(AuditStatus.SUCCESS, "Pre-Install Started", "The pre install target will start", @@ -156,23 +159,28 @@ public class DeploymentUnitType extends AbstractBundleType { "You must specify at least one file to deploy via nested file, archive, url-file, url-archive types in your recipe"); }
- if (willManageRootDir) { - log("Managing the root directory of this deployment unit - unrelated files found will be removed", - Project.MSG_VERBOSE); - // don't send an audit message on this unless we are really going to move files out of the way (i.e. !dryrun) + log("Destination compliance set to '" + complianceToUse + "'.", Project.MSG_VERBOSE); + switch (complianceToUse) { + case full: if (!dryRun) { getProject() .auditLog( AuditStatus.INFO, "Managing Top Level Deployment Directory", "The top level deployment directory will be managed - files found there will be backed up and removed!", - "The bundle recipe has requested that the top level deployment directory be fully managed by RHQ." - + "This means any files currently located in the top level deployment directory will be removed and backed up", + "The bundle recipe has requested that the top level deployment directory be fully managed by RHQ." + + "This means any files currently located in the top level deployment directory will be removed and backed up", null); } - } else { - log("Not managing the root directory of this deployment unit - unrelated files will remain intact", - Project.MSG_VERBOSE); + break; + case filesAndDirectories: + log("Files and directories in the destination directory not contained in the bundle will be kept intact.\n" + + "Note that the contents of the directories that ARE contained in the bundle will be synced with " + + "the contents as specified in the bundle. I.e. the subdirectories in the destination that are also " + + "contained in the bundle are made compliant with the bundle.", Project.MSG_VERBOSE); + break; + default: + throw new IllegalStateException("Unhandled destination compliance mode: " + complianceToUse.toString()); }
Set<File> allArchives = new HashSet<File>(this.archives); @@ -186,7 +194,7 @@ public class DeploymentUnitType extends AbstractBundleType { try { DeploymentData deploymentData = new DeploymentData(deploymentProps, allArchives, allFiles, getProject() .getBaseDir(), deployDir, allArchiveReplacePatterns, allRawFilesToReplace, templateEngine, - this.ignorePattern, willManageRootDir, allArchivesExploded); + this.ignorePattern, allArchivesExploded); Deployer deployer = new Deployer(deploymentData); DeployDifferences diffs = getProject().getDeployDifferences();
@@ -392,16 +400,42 @@ public class DeploymentUnitType extends AbstractBundleType { this.name = name; }
+ /** + * @deprecated since RHQ 4.9.0, use {@link #getCompliance()} + */ public String getManageRootDir() { - return manageRootDir; + return Boolean.toString(getCompliance() == DestinationComplianceMode.full); }
+ /** + * @deprecated since RHQ 4.9.0, use {@link #setCompliance(org.rhq.core.util.updater.DestinationComplianceMode)} + */ public void setManageRootDir(String booleanString) { if (!Boolean.TRUE.toString().equalsIgnoreCase(booleanString) && !Boolean.FALSE.toString().equalsIgnoreCase(booleanString)) { throw new BuildException("manageRootDir attribute must be 'true' or 'false': " + booleanString); } - this.manageRootDir = booleanString; + + log("The deprecated 'manageRootDir' attribute was detected. Please consider replacing it with the 'compliance' attribute.", + Project.MSG_INFO); + + boolean val = Boolean.parseBoolean(booleanString); + + setCompliance(val ? DestinationComplianceMode.full : DestinationComplianceMode.filesAndDirectories); + } + + /** + * @since 4.9.0 + */ + public DestinationComplianceMode getCompliance() { + return compliance; + } + + /** + * @since 4.9.0 + */ + public void setCompliance(DestinationComplianceMode value) { + this.compliance = value; }
/** @@ -565,4 +599,4 @@ public class DeploymentUnitType extends AbstractBundleType { getProject().getProperty(DeployPropertyNames.DEPLOY_DIR)); return templateEngine; } -} \ No newline at end of file +} diff --git a/modules/common/ant-bundle/src/test/java/org/rhq/bundle/ant/AntLauncherTest.java b/modules/common/ant-bundle/src/test/java/org/rhq/bundle/ant/AntLauncherTest.java index bd2d35d..22aa418 100644 --- a/modules/common/ant-bundle/src/test/java/org/rhq/bundle/ant/AntLauncherTest.java +++ b/modules/common/ant-bundle/src/test/java/org/rhq/bundle/ant/AntLauncherTest.java @@ -31,6 +31,7 @@ import java.util.ArrayList; import java.util.List; import java.util.Properties; import java.util.Set; +import java.util.Vector; import java.util.zip.ZipEntry; import java.util.zip.ZipInputStream; import java.util.zip.ZipOutputStream; @@ -38,10 +39,17 @@ import java.util.zip.ZipOutputStream; import org.apache.tools.ant.BuildListener; import org.apache.tools.ant.DefaultLogger; import org.apache.tools.ant.Project; +import org.apache.tools.ant.Target; +import org.apache.tools.ant.Task; +import org.apache.tools.ant.UnknownElement; +import org.apache.tools.ant.helper.AntXMLContext; +import org.testng.Assert; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test;
+import org.rhq.bundle.ant.task.BundleTask; +import org.rhq.bundle.ant.type.DeploymentUnitType; import org.rhq.core.domain.configuration.Configuration; import org.rhq.core.domain.configuration.definition.ConfigurationDefinition; import org.rhq.core.domain.configuration.definition.PropertyDefinitionSimple; @@ -49,6 +57,7 @@ import org.rhq.core.domain.configuration.definition.PropertySimpleType; import org.rhq.core.util.ZipUtil; import org.rhq.core.util.file.FileUtil; import org.rhq.core.util.updater.DeploymentsMetadata; +import org.rhq.core.util.updater.DestinationComplianceMode; import org.rhq.core.util.updater.FileHashcodeMap;
/** @@ -73,13 +82,21 @@ public class AntLauncherTest { FileUtil.purge(DEPLOY_DIR, true); }
+ public void testParse_legacy() throws Exception { + testParse(false, "legacy-test-bundle-v1.xml"); + } + public void testParse() throws Exception { + testParse(true, "test-bundle-v1.xml"); + } + + private void testParse(boolean validate, String recipeFile) throws Exception { // We want to test with an empty deploy dir to ensure nothing gets installed there after a parse FileUtil.purge(DEPLOY_DIR, true);
- AntLauncher ant = new AntLauncher(); + AntLauncher ant = new AntLauncher(validate);
- BundleAntProject project = ant.parseBundleDeployFile(getBuildXml("test-bundle-v1.xml"), null); + BundleAntProject project = ant.parseBundleDeployFile(getBuildXml(recipeFile), null); assert project != null; Set<String> bundleFiles = project.getBundleFileNames(); assert bundleFiles != null; @@ -106,7 +123,49 @@ public class AntLauncherTest { assert !DEPLOY_DIR.exists() : "Nothing should have been installed to the deploy dir"; }
+ public void testParseWithNoDestinationComplianceCheck() throws Exception { + // We want to test with an empty deploy dir to ensure nothing gets installed there after a parse + FileUtil.purge(DEPLOY_DIR, true); + + //instantiate the launcher in the new validating mode (new as of RHQ 4.9.0) + AntLauncher ant = new AntLauncher(true); + + try { + ant.parseBundleDeployFile(getBuildXml("test-bundle-no-manage-root-dir-nor-compliance.xml"), null); + Assert.fail("Parsing a bundle with no explicit manageRootDir should have failed."); + } catch (InvalidBuildFileException e) { + assert "The deployment unit must specifically declare compliance mode of the destination directory.".equals( + e.getMessage()); + } + + BundleAntProject project = ant.parseBundleDeployFile(getBuildXml( + "test-bundle-with-manage-root-dir.xml"), null); + assert project != null; + BundleTask bundleTask = findBundleTask(project); + assert bundleTask != null; + assert bundleTask.getDeploymentUnits() != null; + assert bundleTask.getDeploymentUnits().size() == 1; + DeploymentUnitType deploymentUnit = bundleTask.getDeploymentUnits().values().iterator().next(); + assert deploymentUnit != null; + + //assert the compatibility with the legacy attribute + assert "false".equals(deploymentUnit.getManageRootDir()); + assert DestinationComplianceMode.filesAndDirectories == deploymentUnit.getCompliance(); + + // all we did was parse, nothing should really have been extracted or installed + assert !DEPLOY_DIR.exists() : "Nothing should have been installed to the deploy dir"; + } + + public void testInstall_legacy() throws Exception { + testInstall(false, "legacy-test-bundle-v1.xml"); + } + + @Test(dependsOnMethods = "testUpgrade_legacy") public void testInstall() throws Exception { + testInstall(true, "test-bundle-v1.xml"); + } + + private void testInstall(boolean validate, String recipeFile) throws Exception {
if (skipNonRHLinux("testInstall")) return; @@ -117,11 +176,11 @@ public class AntLauncherTest { // but we do want to add an unrelated file to see that it goes away - since we have manageRootDir=true File unrelatedFile = writeFile("unrelated content", DEPLOY_DIR, "unrelated-file.txt");
- AntLauncher ant = new AntLauncher(); + AntLauncher ant = new AntLauncher(validate); Properties inputProps = createInputProperties("/test-bundle-v1-input.properties"); List<BuildListener> buildListeners = createBuildListeners();
- BundleAntProject project = ant.executeBundleDeployFile(getBuildXml("test-bundle-v1.xml"), inputProps, + BundleAntProject project = ant.executeBundleDeployFile(getBuildXml(recipeFile), inputProps, buildListeners); assert project != null; Set<String> bundleFiles = project.getBundleFileNames(); @@ -173,8 +232,17 @@ public class AntLauncherTest { return false; }
+ @Test(dependsOnMethods = "testInstall_legacy") + public void testUpgrade_legacy() throws Exception { + testUpgrade(false, "legacy-test-bundle-v2.xml"); + } + @Test(dependsOnMethods = "testInstall") public void testUpgrade() throws Exception { + testUpgrade(true, "test-bundle-v2.xml"); + } + + private void testUpgrade(boolean validate, String recipeFile) throws Exception {
if (skipNonRHLinux("testUpgrade")) return; @@ -182,11 +250,11 @@ public class AntLauncherTest { // add an unrelated file to see that it gets deleted as part of the upgrade File unrelatedFile = writeFile("unrelated content", DEPLOY_DIR, "unrelated-file.txt");
- AntLauncher ant = new AntLauncher(); + AntLauncher ant = new AntLauncher(validate); Properties inputProps = createInputProperties("/test-bundle-v2-input.properties"); List<BuildListener> buildListeners = createBuildListeners();
- BundleAntProject project = ant.executeBundleDeployFile(getBuildXml("test-bundle-v2.xml"), inputProps, + BundleAntProject project = ant.executeBundleDeployFile(getBuildXml(recipeFile), inputProps, buildListeners); assert project != null; Set<String> bundleFiles = project.getBundleFileNames(); @@ -230,7 +298,15 @@ public class AntLauncherTest { "templatized.variable").equals("20000"); }
+ public void testUpgradeNoManageRootDir_legacy() throws Exception { + testUpgradeNoManageRootDir(false, "legacy-test-bundle-v2-noManageRootDir.xml"); + } + public void testUpgradeNoManageRootDir() throws Exception { + testUpgradeNoManageRootDir(true, "test-bundle-v2-commonSubdirectories.xml"); + } + + private void testUpgradeNoManageRootDir(boolean validate, String recipeFile) throws Exception {
if (skipNonRHLinux("testInstall")) return; @@ -243,11 +319,11 @@ public class AntLauncherTest { File unrelatedFile = writeFile("unrelated content", DEPLOY_DIR, "unrelated-file.txt"); assert unrelatedFile.exists() : "our initial install test method should have prepared an unmanaged file";
- AntLauncher ant = new AntLauncher(); + AntLauncher ant = new AntLauncher(validate); Properties inputProps = createInputProperties("/test-bundle-v2-input.properties"); List<BuildListener> buildListeners = createBuildListeners();
- BundleAntProject project = ant.executeBundleDeployFile(getBuildXml("test-bundle-v2-noManageRootDir.xml"), + BundleAntProject project = ant.executeBundleDeployFile(getBuildXml(recipeFile), inputProps, buildListeners); assert project != null; Set<String> bundleFiles = project.getBundleFileNames(); @@ -291,23 +367,31 @@ public class AntLauncherTest { "templatized.variable").equals("20000"); }
+ public void testInstallCompressedZipNoDryRun_legacy() throws Exception { + testInstallCompressedZip(false, false, "legacy-test-bundle-compressed-archives.xml"); + } + public void testInstallCompressedZipNoDryRun() throws Exception { - testInstallCompressedZip(false); + testInstallCompressedZip(false, true, "test-bundle-compressed-archives.xml"); + } + + public void testInstallCompressedZipDryRun_legacy() throws Exception { + testInstallCompressedZip(true, false, "legacy-test-bundle-compressed-archives.xml"); }
public void testInstallCompressedZipDryRun() throws Exception { - testInstallCompressedZip(true); + testInstallCompressedZip(true, true, "test-bundle-compressed-archives.xml"); }
- private void testInstallCompressedZip(boolean dryRun) throws Exception { + private void testInstallCompressedZip(boolean dryRun, boolean validate, String recipeFile) throws Exception { // We want to test a fresh install, so make sure the deploy dir doesn't pre-exist. FileUtil.purge(DEPLOY_DIR, true);
- AntLauncher ant = new AntLauncher(); + AntLauncher ant = new AntLauncher(validate); Properties inputProps = createInputProperties("/test-bundle-compressed-archives-input.properties", dryRun); List<BuildListener> buildListeners = createBuildListeners();
- BundleAntProject project = ant.executeBundleDeployFile(getBuildXml("test-bundle-compressed-archives.xml"), + BundleAntProject project = ant.executeBundleDeployFile(getBuildXml(recipeFile), inputProps, buildListeners); assert project != null; Set<String> bundleFiles = project.getBundleFileNames(); @@ -371,24 +455,34 @@ public class AntLauncherTest { } }
+ public void testInstallCompressedZipWithTemplatizedFilesNoDryRun_legacy() throws Exception { + testInstallCompressedZipWithTemplatizedFiles(false, false, + "legacy-test-bundle-compressed-archives-with-replace.xml"); + } + public void testInstallCompressedZipWithTemplatizedFilesNoDryRun() throws Exception { - testInstallCompressedZipWithTemplatizedFiles(false); + testInstallCompressedZipWithTemplatizedFiles(false, true, "test-bundle-compressed-archives-with-replace.xml"); + } + + public void testInstallCompressedZipWithTemplatizedFilesDryRun_legacy() throws Exception { + testInstallCompressedZipWithTemplatizedFiles(true, false, + "legacy-test-bundle-compressed-archives-with-replace.xml"); }
public void testInstallCompressedZipWithTemplatizedFilesDryRun() throws Exception { - testInstallCompressedZipWithTemplatizedFiles(true); + testInstallCompressedZipWithTemplatizedFiles(true, true, "test-bundle-compressed-archives-with-replace.xml"); }
- private void testInstallCompressedZipWithTemplatizedFiles(boolean dryRun) throws Exception { + private void testInstallCompressedZipWithTemplatizedFiles(boolean dryRun, boolean validate, String recipeFile) throws Exception { // We want to test a fresh install, so make sure the deploy dir doesn't pre-exist. FileUtil.purge(DEPLOY_DIR, true);
- AntLauncher ant = new AntLauncher(); + AntLauncher ant = new AntLauncher(validate); Properties inputProps = createInputProperties("/test-bundle-compressed-archives-input.properties", dryRun); List<BuildListener> buildListeners = createBuildListeners();
BundleAntProject project = ant.executeBundleDeployFile( - getBuildXml("test-bundle-compressed-archives-with-replace.xml"), inputProps, buildListeners); + getBuildXml(recipeFile), inputProps, buildListeners); assert project != null; Set<String> bundleFiles = project.getBundleFileNames(); assert bundleFiles != null; @@ -454,20 +548,28 @@ public class AntLauncherTest { } }
+ public void testAuditMessages_legacy() throws Exception { + testAuditMessages(false, "legacy-test-bundle-audit.xml"); + } + + public void testAuditMessages() throws Exception { + testAuditMessages(true, "test-bundle-audit.xml"); + } + // this doesn't verify the audit messages getting emitted are correct // but it does verify the audit tag getting processed correctly. // you have to look at the test logs to see the audit messages // TODO: write a ant build listener to listen for this messages, parse them and verify they are correct // this test should then ask the listener at the end if everything was OK and assert false if not - public void testAuditMessages() throws Exception { + private void testAuditMessages(boolean validate, String recipeFile) throws Exception { // We want to test a fresh install, so make sure the deploy dir doesn't pre-exist. FileUtil.purge(DEPLOY_DIR, true);
- AntLauncher ant = new AntLauncher(); + AntLauncher ant = new AntLauncher(validate); Properties inputProps = createInputProperties("/test-audit-input.properties"); List<BuildListener> buildListeners = createBuildListeners();
- BundleAntProject project = ant.executeBundleDeployFile(getBuildXml("test-bundle-audit.xml"), inputProps, + BundleAntProject project = ant.executeBundleDeployFile(getBuildXml(recipeFile), inputProps, buildListeners); assert project != null; Set<String> bundleFiles = project.getBundleFileNames(); @@ -496,7 +598,15 @@ public class AntLauncherTest { "777"); }
+ public void testSubdirectoriesInRecipe_legacy() throws Exception { + testSubdirectoriesInRecipe(false, "legacy-test-bundle-subdir.xml"); + } + public void testSubdirectoriesInRecipe() throws Exception { + testSubdirectoriesInRecipe(true, "test-bundle-subdir.xml"); + } + + private void testSubdirectoriesInRecipe(boolean validate, String origRecipeFile) throws Exception { // We want to test a fresh install, so make sure the deploy dir doesn't pre-exist. FileUtil.purge(DEPLOY_DIR, true);
@@ -513,9 +623,9 @@ public class AntLauncherTest { createZip(new String[] { "3", "4" }, subdir, "test-explode.zip", new String[] { "three.txt", "four.txt" }); createZip(new String[] { "X=@@X@@\n" }, subdir, "test-replace.zip", new String[] { "template.txt" }); // will be exploded then recompressed File recipeFile = new File(antBasedir, "deploy.xml"); - FileUtil.copyFile(new File(ANT_BASEDIR, "test-bundle-subdir.xml"), recipeFile); + FileUtil.copyFile(new File(ANT_BASEDIR, origRecipeFile), recipeFile);
- AntLauncher ant = new AntLauncher(); + AntLauncher ant = new AntLauncher(validate); Properties inputProps = new Properties(); inputProps.setProperty(DeployPropertyNames.DEPLOY_DIR, DEPLOY_DIR.getPath()); inputProps.setProperty(DeployPropertyNames.DEPLOY_ID, String.valueOf(++this.deploymentId)); @@ -564,7 +674,15 @@ public class AntLauncherTest { } }
+ public void testUrlFilesAndArchives_legacy() throws Exception { + testUrlFilesAndArchives(false, "legacy-test-bundle-url.xml"); + } + public void testUrlFilesAndArchives() throws Exception { + testUrlFilesAndArchives(true, "test-bundle-url.xml"); + } + + private void testUrlFilesAndArchives(boolean validate, String recipeFile) throws Exception { // We want to test a fresh install, so make sure the deploy dir doesn't pre-exist. FileUtil.purge(DEPLOY_DIR, true);
@@ -582,12 +700,12 @@ public class AntLauncherTest { createZip(new String[] { "3", "4" }, subdir, "test-explode.zip", new String[] { "three.txt", "four.txt" }); createZip(new String[] { "X=@@X@@\n" }, subdir, "test-replace.zip", new String[] { "template.txt" }); // will be exploded then recompressed
- AntLauncher ant = new AntLauncher(); + AntLauncher ant = new AntLauncher(validate); Properties inputProps = createInputProperties("/test-bundle-url-input.properties"); inputProps.setProperty("rhq.test.url.dir", tmpUrlLocation.toURI().toURL().toString()); // we use this so our recipe can use URLs List<BuildListener> buildListeners = createBuildListeners();
- BundleAntProject project = ant.executeBundleDeployFile(getBuildXml("test-bundle-url.xml"), inputProps, + BundleAntProject project = ant.executeBundleDeployFile(getBuildXml(recipeFile), inputProps, buildListeners); assert project != null;
@@ -753,4 +871,30 @@ public class AntLauncherTest { } } } + + private BundleTask findBundleTask(BundleAntProject project) { + AntXMLContext antParsingContext = (AntXMLContext) project.getReference("ant.parsing.context"); + Vector targets = antParsingContext.getTargets(); + for (Object targetObj : targets) { + Target target = (Target) targetObj; + Task[] tasks = target.getTasks(); + for (Task task : tasks) { + if ("rhq:bundle".equals(task.getTaskName())) { + return (BundleTask) preconfigureTask(task); + } + } + } + + return null; + } + + private static Task preconfigureTask(Task task) { + if (task instanceof UnknownElement) { + task.maybeConfigure(); + Task resolvedTask = ((UnknownElement) task).getTask(); + return (resolvedTask != null) ? resolvedTask : task; + } else { + return task; + } + } } diff --git a/modules/common/ant-bundle/src/test/resources/legacy-test-bundle-audit.xml b/modules/common/ant-bundle/src/test/resources/legacy-test-bundle-audit.xml new file mode 100644 index 0000000..687c4ba --- /dev/null +++ b/modules/common/ant-bundle/src/test/resources/legacy-test-bundle-audit.xml @@ -0,0 +1,39 @@ +<?xml version="1.0"?> + +<project name="test-bundle" default="main" xmlns:rhq="antlib:org.rhq.bundle"> + + <rhq:bundle name="audit-test" version="1"> + + <rhq:input-property name="listener.port" type="integer"/> + + <rhq:deployment-unit name="test" preinstallTarget="preinstall" postinstallTarget="postinstall"> + <rhq:file name="test-audit.properties" destinationFile="test-audit.properties" replace="true"/> + </rhq:deployment-unit> + + </rhq:bundle> + + <target name="main"/> + + <target name="preinstall"> + <property name="preinstallTargetExecuted" value="1a"/> + <rhq:audit status="success" action="action1" info="info1" message="message1"> + preinstall successful message with port "${listener.port}" ! + </rhq:audit> + <rhq:audit status="failure" action="action2" info="info2" message="message2"> + preinstall failure message here! + </rhq:audit> + <rhq:audit status="warn" action="action3" info="info3" message="message3"> + preinstall warn message here! + </rhq:audit> + </target> + + <target name="postinstall"> + <property name="postinstallTargetExecuted" value="1b"/> + <rhq:audit /> + <rhq:audit status="SUCCESS" /> + <rhq:audit status="WARN" action="actionA" /> + <rhq:audit status="FAILURE" action="actionB" info="infoB" /> + <rhq:audit status="SUCCESS" action="actionC" info="infoC" message="messageC" /> + </target> + +</project> \ No newline at end of file diff --git a/modules/common/ant-bundle/src/test/resources/legacy-test-bundle-compressed-archives-with-replace.xml b/modules/common/ant-bundle/src/test/resources/legacy-test-bundle-compressed-archives-with-replace.xml new file mode 100644 index 0000000..ea4255a --- /dev/null +++ b/modules/common/ant-bundle/src/test/resources/legacy-test-bundle-compressed-archives-with-replace.xml @@ -0,0 +1,21 @@ +<?xml version="1.0"?> + +<project name="test-bundle" default="main" xmlns:rhq="antlib:org.rhq.bundle"> + + <rhq:bundle name="test compressed archive files" version="1.0"> + + <rhq:input-property name="listener.port" type="integer"/> + + <rhq:deployment-unit name="appserver"> + <rhq:archive name="file.zip" exploded="false"> + rhq:replace + <rhq:fileset includes="**/*.foo"/> + <rhq:fileset includes="**/*.properties"/> + </rhq:replace> + </rhq:archive> + </rhq:deployment-unit> + + </rhq:bundle> + + <target name="main"/> +</project> \ No newline at end of file diff --git a/modules/common/ant-bundle/src/test/resources/legacy-test-bundle-compressed-archives.xml b/modules/common/ant-bundle/src/test/resources/legacy-test-bundle-compressed-archives.xml new file mode 100644 index 0000000..a35b6ac --- /dev/null +++ b/modules/common/ant-bundle/src/test/resources/legacy-test-bundle-compressed-archives.xml @@ -0,0 +1,14 @@ +<?xml version="1.0"?> + +<project name="test-bundle" default="main" xmlns:rhq="antlib:org.rhq.bundle"> + + <rhq:bundle name="test compressed archive files" version="1.0"> + + <rhq:deployment-unit name="appserver"> + <rhq:archive name="file.zip" exploded="false"/> + </rhq:deployment-unit> + + </rhq:bundle> + + <target name="main"/> +</project> \ No newline at end of file diff --git a/modules/common/ant-bundle/src/test/resources/legacy-test-bundle-subdir.xml b/modules/common/ant-bundle/src/test/resources/legacy-test-bundle-subdir.xml new file mode 100644 index 0000000..af3dbd2 --- /dev/null +++ b/modules/common/ant-bundle/src/test/resources/legacy-test-bundle-subdir.xml @@ -0,0 +1,26 @@ +<?xml version="1.0"?> + +<project name="test-bundle" default="main" xmlns:rhq="antlib:org.rhq.bundle"> + + <rhq:bundle name="test" version="1"> + + <rhq:input-property name="X" /> + + <rhq:deployment-unit name="appserver"> + <rhq:file name="subdir/test0.txt" replace="false" /> + <rhq:file name="subdir/test1.txt" destinationFile="another/foo.txt" replace="false"/> + <rhq:file name="subdir/test2.txt" destinationDir="second.dir" replace="false"/> + <rhq:archive name="subdir/test.zip" exploded="false" /> + <rhq:archive name="subdir/test-explode.zip" exploded="true" /> + <rhq:archive name="subdir/test-replace.zip" exploded="false"> + rhq:replace + <rhq:fileset includes="template.txt"/> + </rhq:replace> + </rhq:archive> + </rhq:deployment-unit> + + </rhq:bundle> + + <target name="main"/> + +</project> \ No newline at end of file diff --git a/modules/common/ant-bundle/src/test/resources/legacy-test-bundle-url.xml b/modules/common/ant-bundle/src/test/resources/legacy-test-bundle-url.xml new file mode 100644 index 0000000..ebfdd81 --- /dev/null +++ b/modules/common/ant-bundle/src/test/resources/legacy-test-bundle-url.xml @@ -0,0 +1,26 @@ +<?xml version="1.0"?> + +<project name="test-bundle" default="main" xmlns:rhq="antlib:org.rhq.bundle"> + + <rhq:bundle name="test" version="1"> + + <rhq:input-property name="X" /> + + <rhq:deployment-unit name="appserver"> + <rhq:url-file url="${rhq.test.url.dir}/subdir/test0.txt" replace="false" /> + <rhq:url-file url="${rhq.test.url.dir}/subdir/test1.txt" destinationFile="another/foo.txt" replace="false" /> + <rhq:url-file url="${rhq.test.url.dir}/subdir/test2.txt" destinationDir="second.dir" replace="true" /> + <rhq:url-archive url="${rhq.test.url.dir}/subdir/test.zip" exploded="false" /> + <rhq:url-archive url="${rhq.test.url.dir}/subdir/test-explode.zip" exploded="true" /> + <rhq:url-archive url="${rhq.test.url.dir}/subdir/test-replace.zip" exploded="false"> + rhq:replace + <rhq:fileset includes="template.txt"/> + </rhq:replace> + </rhq:url-archive> + </rhq:deployment-unit> + + </rhq:bundle> + + <target name="main"/> + +</project> \ No newline at end of file diff --git a/modules/common/ant-bundle/src/test/resources/legacy-test-bundle-v1.xml b/modules/common/ant-bundle/src/test/resources/legacy-test-bundle-v1.xml new file mode 100644 index 0000000..6fbcf33 --- /dev/null +++ b/modules/common/ant-bundle/src/test/resources/legacy-test-bundle-v1.xml @@ -0,0 +1,45 @@ +<?xml version="1.0"?> + +<project name="test-bundle" default="main" xmlns:rhq="antlib:org.rhq.bundle"> + + <rhq:bundle name="example.com (JBoss EAP 4.3)" version="1.0" + description="example.com corporate website hosted on JBoss EAP 4.3"> + + <rhq:input-property + name="listener.port" + description="This is where the product will listen for incoming messages" + required="true" + defaultValue="8080" + type="integer"/> + + <rhq:deployment-unit name="appserver" preinstallTarget="preinstall" postinstallTarget="postinstall"> + <rhq:system-service name="foo" scriptFile="foo-script" + configFile="foo-config" overwriteScript="true" + startLevels="3,4,5" startPriority="80" stopPriority="20" root="root"/> + <rhq:file name="test-v1.properties" destinationFile="subdir/test.properties" replace="true"/> + <rhq:archive name="file.zip"> + rhq:replace + <rhq:fileset includes="**/*.properties"/> + </rhq:replace> + </rhq:archive> + <!-- the files that should be ignored during upgrades --> + rhq:ignore + <rhq:fileset includes="*.log"/> + </rhq:ignore> + </rhq:deployment-unit> + + </rhq:bundle> + + <target name="main"/> + + <target name="preinstall"> + <echo>Deploying Test Bundle v1.0 to ${rhq.deploy.dir}...</echo> + <property name="preinstallTargetExecuted" value="1a"/> + </target> + + <target name="postinstall"> + <echo>Done deploying Test Bundle v1.0 to ${rhq.deploy.dir}.</echo> + <property name="postinstallTargetExecuted" value="1b"/> + </target> + +</project> diff --git a/modules/common/ant-bundle/src/test/resources/legacy-test-bundle-v2-noManageRootDir.xml b/modules/common/ant-bundle/src/test/resources/legacy-test-bundle-v2-noManageRootDir.xml new file mode 100644 index 0000000..defcdef --- /dev/null +++ b/modules/common/ant-bundle/src/test/resources/legacy-test-bundle-v2-noManageRootDir.xml @@ -0,0 +1,46 @@ +<?xml version="1.0"?> + +<project name="test-bundle" default="main" xmlns:rhq="antlib:org.rhq.bundle"> + + <rhq:bundle name="example.com (JBoss EAP 4.3)" version="2.5" + description="updated bundle"> + + <rhq:input-property + name="listener.port" + description="This is where the product will listen for incoming messages" + required="true" + defaultValue="9090" + type="integer"/> + + <rhq:deployment-unit name="appserver" preinstallTarget="preinstall" postinstallTarget="postinstall" + manageRootDir="false"> <!-- this is the only difference with legacy-test-bundle-v2.xml --> + <rhq:system-service name="foo" scriptFile="foo-script" + configFile="foo-config" overwriteScript="true" + startLevels="3,4,5" startPriority="80" stopPriority="20" root="root"/> + <rhq:file name="test-v2.properties" destinationFile="subdir/test.properties" replace="true"/> + <rhq:archive name="file.zip"> + rhq:replace + <rhq:fileset includes="**/*.properties"/> + </rhq:replace> + </rhq:archive> + <!-- the files that should be ignored during upgrades --> + rhq:ignore + <rhq:fileset includes="*.log"/> + </rhq:ignore> + </rhq:deployment-unit> + + </rhq:bundle> + + <target name="main"/> + + <target name="preinstall"> + <echo>Deploying Test Bundle v2.5 to ${rhq.deploy.dir}...</echo> + <property name="preinstallTargetExecuted" value="2a"/> + </target> + + <target name="postinstall"> + <echo>Done deploying Test Bundle v2.5 to ${rhq.deploy.dir}.</echo> + <property name="postinstallTargetExecuted" value="2b"/> + </target> + +</project> diff --git a/modules/common/ant-bundle/src/test/resources/legacy-test-bundle-v2.xml b/modules/common/ant-bundle/src/test/resources/legacy-test-bundle-v2.xml new file mode 100644 index 0000000..e625bc1 --- /dev/null +++ b/modules/common/ant-bundle/src/test/resources/legacy-test-bundle-v2.xml @@ -0,0 +1,45 @@ +<?xml version="1.0"?> + +<project name="test-bundle" default="main" xmlns:rhq="antlib:org.rhq.bundle"> + + <rhq:bundle name="example.com (JBoss EAP 4.3)" version="2.5" + description="updated bundle"> + + <rhq:input-property + name="listener.port" + description="This is where the product will listen for incoming messages" + required="true" + defaultValue="9090" + type="integer"/> + + <rhq:deployment-unit name="appserver" preinstallTarget="preinstall" postinstallTarget="postinstall"> + <rhq:system-service name="foo" scriptFile="foo-script" + configFile="foo-config" overwriteScript="true" + startLevels="3,4,5" startPriority="80" stopPriority="20" root="root"/> + <rhq:file name="test-v2.properties" destinationFile="subdir/test.properties" replace="true"/> + <rhq:archive name="file.zip"> + rhq:replace + <rhq:fileset includes="**/*.properties"/> + </rhq:replace> + </rhq:archive> + <!-- the files that should be ignored during upgrades --> + rhq:ignore + <rhq:fileset includes="*.log"/> + </rhq:ignore> + </rhq:deployment-unit> + + </rhq:bundle> + + <target name="main"/> + + <target name="preinstall"> + <echo>Deploying Test Bundle v2.5 to ${rhq.deploy.dir}...</echo> + <property name="preinstallTargetExecuted" value="2a"/> + </target> + + <target name="postinstall"> + <echo>Done deploying Test Bundle v2.5 to ${rhq.deploy.dir}.</echo> + <property name="postinstallTargetExecuted" value="2b"/> + </target> + +</project> diff --git a/modules/common/ant-bundle/src/test/resources/test-bundle-audit.xml b/modules/common/ant-bundle/src/test/resources/test-bundle-audit.xml index 687c4ba..d7cfbff 100644 --- a/modules/common/ant-bundle/src/test/resources/test-bundle-audit.xml +++ b/modules/common/ant-bundle/src/test/resources/test-bundle-audit.xml @@ -1,12 +1,31 @@ <?xml version="1.0"?>
+<!-- + ~ RHQ Management Platform + ~ Copyright (C) 2013 Red Hat, Inc. + ~ All rights reserved. + ~ + ~ This program is free software; you can redistribute it and/or modify + ~ it under the terms of the GNU General Public License as published by + ~ the Free Software Foundation version 2 of the License. + ~ + ~ This program is distributed in the hope that it will be useful, + ~ but WITHOUT ANY WARRANTY; without even the implied warranty of + ~ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + ~ GNU General Public License for more details. + ~ + ~ You should have received a copy of the GNU General Public License + ~ along with this program; if not, write to the Free Software + ~ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + --> + <project name="test-bundle" default="main" xmlns:rhq="antlib:org.rhq.bundle">
<rhq:bundle name="audit-test" version="1">
<rhq:input-property name="listener.port" type="integer"/>
- <rhq:deployment-unit name="test" preinstallTarget="preinstall" postinstallTarget="postinstall"> + <rhq:deployment-unit name="test" preinstallTarget="preinstall" postinstallTarget="postinstall" compliance="full"> <rhq:file name="test-audit.properties" destinationFile="test-audit.properties" replace="true"/> </rhq:deployment-unit>
@@ -36,4 +55,4 @@ <rhq:audit status="SUCCESS" action="actionC" info="infoC" message="messageC" /> </target>
-</project> \ No newline at end of file +</project> diff --git a/modules/common/ant-bundle/src/test/resources/test-bundle-compressed-archives-with-replace.xml b/modules/common/ant-bundle/src/test/resources/test-bundle-compressed-archives-with-replace.xml index ea4255a..97bda67 100644 --- a/modules/common/ant-bundle/src/test/resources/test-bundle-compressed-archives-with-replace.xml +++ b/modules/common/ant-bundle/src/test/resources/test-bundle-compressed-archives-with-replace.xml @@ -1,12 +1,31 @@ <?xml version="1.0"?>
+<!-- + ~ RHQ Management Platform + ~ Copyright (C) 2013 Red Hat, Inc. + ~ All rights reserved. + ~ + ~ This program is free software; you can redistribute it and/or modify + ~ it under the terms of the GNU General Public License as published by + ~ the Free Software Foundation version 2 of the License. + ~ + ~ This program is distributed in the hope that it will be useful, + ~ but WITHOUT ANY WARRANTY; without even the implied warranty of + ~ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + ~ GNU General Public License for more details. + ~ + ~ You should have received a copy of the GNU General Public License + ~ along with this program; if not, write to the Free Software + ~ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + --> + <project name="test-bundle" default="main" xmlns:rhq="antlib:org.rhq.bundle">
<rhq:bundle name="test compressed archive files" version="1.0">
<rhq:input-property name="listener.port" type="integer"/>
- <rhq:deployment-unit name="appserver"> + <rhq:deployment-unit name="appserver" compliance="full"> <rhq:archive name="file.zip" exploded="false"> rhq:replace <rhq:fileset includes="**/*.foo"/> @@ -18,4 +37,4 @@ </rhq:bundle>
<target name="main"/> -</project> \ No newline at end of file +</project> diff --git a/modules/common/ant-bundle/src/test/resources/test-bundle-compressed-archives.xml b/modules/common/ant-bundle/src/test/resources/test-bundle-compressed-archives.xml index a35b6ac..c9c0e58 100644 --- a/modules/common/ant-bundle/src/test/resources/test-bundle-compressed-archives.xml +++ b/modules/common/ant-bundle/src/test/resources/test-bundle-compressed-archives.xml @@ -1,14 +1,33 @@ <?xml version="1.0"?>
+<!-- + ~ RHQ Management Platform + ~ Copyright (C) 2013 Red Hat, Inc. + ~ All rights reserved. + ~ + ~ This program is free software; you can redistribute it and/or modify + ~ it under the terms of the GNU General Public License as published by + ~ the Free Software Foundation version 2 of the License. + ~ + ~ This program is distributed in the hope that it will be useful, + ~ but WITHOUT ANY WARRANTY; without even the implied warranty of + ~ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + ~ GNU General Public License for more details. + ~ + ~ You should have received a copy of the GNU General Public License + ~ along with this program; if not, write to the Free Software + ~ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + --> + <project name="test-bundle" default="main" xmlns:rhq="antlib:org.rhq.bundle">
<rhq:bundle name="test compressed archive files" version="1.0">
- <rhq:deployment-unit name="appserver"> + <rhq:deployment-unit name="appserver" compliance="full"> <rhq:archive name="file.zip" exploded="false"/> </rhq:deployment-unit>
</rhq:bundle>
<target name="main"/> -</project> \ No newline at end of file +</project> diff --git a/modules/common/ant-bundle/src/test/resources/test-bundle-no-manage-root-dir-nor-compliance.xml b/modules/common/ant-bundle/src/test/resources/test-bundle-no-manage-root-dir-nor-compliance.xml new file mode 100644 index 0000000..1c51912 --- /dev/null +++ b/modules/common/ant-bundle/src/test/resources/test-bundle-no-manage-root-dir-nor-compliance.xml @@ -0,0 +1,64 @@ +<?xml version="1.0"?> + +<!-- + ~ RHQ Management Platform + ~ Copyright (C) 2013 Red Hat, Inc. + ~ All rights reserved. + ~ + ~ This program is free software; you can redistribute it and/or modify + ~ it under the terms of the GNU General Public License as published by + ~ the Free Software Foundation version 2 of the License. + ~ + ~ This program is distributed in the hope that it will be useful, + ~ but WITHOUT ANY WARRANTY; without even the implied warranty of + ~ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + ~ GNU General Public License for more details. + ~ + ~ You should have received a copy of the GNU General Public License + ~ along with this program; if not, write to the Free Software + ~ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + --> + +<project name="test-bundle" default="main" xmlns:rhq="antlib:org.rhq.bundle"> + + <rhq:bundle name="example.com (JBoss EAP 4.3)" version="1.0" + description="example.com corporate website hosted on JBoss EAP 4.3"> + + <rhq:input-property + name="listener.port" + description="This is where the product will listen for incoming messages" + required="true" + defaultValue="8080" + type="integer"/> + + <rhq:deployment-unit name="appserver" preinstallTarget="preinstall" postinstallTarget="postinstall"> + <rhq:system-service name="foo" scriptFile="foo-script" + configFile="foo-config" overwriteScript="true" + startLevels="3,4,5" startPriority="80" stopPriority="20" root="root"/> + <rhq:file name="test-v1.properties" destinationFile="subdir/test.properties" replace="true"/> + <rhq:archive name="file.zip"> + rhq:replace + <rhq:fileset includes="**/*.properties"/> + </rhq:replace> + </rhq:archive> + <!-- the files that should be ignored during upgrades --> + rhq:ignore + <rhq:fileset includes="*.log"/> + </rhq:ignore> + </rhq:deployment-unit> + + </rhq:bundle> + + <target name="main"/> + + <target name="preinstall"> + <echo>Deploying Test Bundle v1.0 to ${rhq.deploy.dir}...</echo> + <property name="preinstallTargetExecuted" value="1a"/> + </target> + + <target name="postinstall"> + <echo>Done deploying Test Bundle v1.0 to ${rhq.deploy.dir}.</echo> + <property name="postinstallTargetExecuted" value="1b"/> + </target> + +</project> diff --git a/modules/common/ant-bundle/src/test/resources/test-bundle-subdir.xml b/modules/common/ant-bundle/src/test/resources/test-bundle-subdir.xml index af3dbd2..6d3c36a 100644 --- a/modules/common/ant-bundle/src/test/resources/test-bundle-subdir.xml +++ b/modules/common/ant-bundle/src/test/resources/test-bundle-subdir.xml @@ -1,12 +1,31 @@ <?xml version="1.0"?>
+<!-- + ~ RHQ Management Platform + ~ Copyright (C) 2013 Red Hat, Inc. + ~ All rights reserved. + ~ + ~ This program is free software; you can redistribute it and/or modify + ~ it under the terms of the GNU General Public License as published by + ~ the Free Software Foundation version 2 of the License. + ~ + ~ This program is distributed in the hope that it will be useful, + ~ but WITHOUT ANY WARRANTY; without even the implied warranty of + ~ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + ~ GNU General Public License for more details. + ~ + ~ You should have received a copy of the GNU General Public License + ~ along with this program; if not, write to the Free Software + ~ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + --> + <project name="test-bundle" default="main" xmlns:rhq="antlib:org.rhq.bundle">
<rhq:bundle name="test" version="1">
<rhq:input-property name="X" />
- <rhq:deployment-unit name="appserver"> + <rhq:deployment-unit name="appserver" compliance="full"> <rhq:file name="subdir/test0.txt" replace="false" /> <rhq:file name="subdir/test1.txt" destinationFile="another/foo.txt" replace="false"/> <rhq:file name="subdir/test2.txt" destinationDir="second.dir" replace="false"/> @@ -23,4 +42,4 @@
<target name="main"/>
-</project> \ No newline at end of file +</project> diff --git a/modules/common/ant-bundle/src/test/resources/test-bundle-url.xml b/modules/common/ant-bundle/src/test/resources/test-bundle-url.xml index ebfdd81..578400d 100644 --- a/modules/common/ant-bundle/src/test/resources/test-bundle-url.xml +++ b/modules/common/ant-bundle/src/test/resources/test-bundle-url.xml @@ -1,12 +1,31 @@ <?xml version="1.0"?>
+<!-- + ~ RHQ Management Platform + ~ Copyright (C) 2013 Red Hat, Inc. + ~ All rights reserved. + ~ + ~ This program is free software; you can redistribute it and/or modify + ~ it under the terms of the GNU General Public License as published by + ~ the Free Software Foundation version 2 of the License. + ~ + ~ This program is distributed in the hope that it will be useful, + ~ but WITHOUT ANY WARRANTY; without even the implied warranty of + ~ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + ~ GNU General Public License for more details. + ~ + ~ You should have received a copy of the GNU General Public License + ~ along with this program; if not, write to the Free Software + ~ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + --> + <project name="test-bundle" default="main" xmlns:rhq="antlib:org.rhq.bundle">
<rhq:bundle name="test" version="1">
<rhq:input-property name="X" />
- <rhq:deployment-unit name="appserver"> + <rhq:deployment-unit name="appserver" compliance="full"> <rhq:url-file url="${rhq.test.url.dir}/subdir/test0.txt" replace="false" /> <rhq:url-file url="${rhq.test.url.dir}/subdir/test1.txt" destinationFile="another/foo.txt" replace="false" /> <rhq:url-file url="${rhq.test.url.dir}/subdir/test2.txt" destinationDir="second.dir" replace="true" /> @@ -23,4 +42,4 @@
<target name="main"/>
-</project> \ No newline at end of file +</project> diff --git a/modules/common/ant-bundle/src/test/resources/test-bundle-v1.xml b/modules/common/ant-bundle/src/test/resources/test-bundle-v1.xml index a6dcfec..2d38f1d 100644 --- a/modules/common/ant-bundle/src/test/resources/test-bundle-v1.xml +++ b/modules/common/ant-bundle/src/test/resources/test-bundle-v1.xml @@ -1,5 +1,24 @@ <?xml version="1.0"?>
+<!-- + ~ RHQ Management Platform + ~ Copyright (C) 2013 Red Hat, Inc. + ~ All rights reserved. + ~ + ~ This program is free software; you can redistribute it and/or modify + ~ it under the terms of the GNU General Public License as published by + ~ the Free Software Foundation version 2 of the License. + ~ + ~ This program is distributed in the hope that it will be useful, + ~ but WITHOUT ANY WARRANTY; without even the implied warranty of + ~ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + ~ GNU General Public License for more details. + ~ + ~ You should have received a copy of the GNU General Public License + ~ along with this program; if not, write to the Free Software + ~ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + --> + <project name="test-bundle" default="main" xmlns:rhq="antlib:org.rhq.bundle">
<rhq:bundle name="example.com (JBoss EAP 4.3)" version="1.0" @@ -12,7 +31,7 @@ defaultValue="8080" type="integer"/>
- <rhq:deployment-unit name="appserver" preinstallTarget="preinstall" postinstallTarget="postinstall"> + <rhq:deployment-unit name="appserver" preinstallTarget="preinstall" postinstallTarget="postinstall" compliance="full"> <rhq:system-service name="foo" scriptFile="foo-script" configFile="foo-config" overwriteScript="true" startLevels="3,4,5" startPriority="80" stopPriority="20" root="root"/> @@ -42,4 +61,4 @@ <property name="postinstallTargetExecuted" value="1b"/> </target>
-</project> \ No newline at end of file +</project> diff --git a/modules/common/ant-bundle/src/test/resources/test-bundle-v2-commonSubdirectories.xml b/modules/common/ant-bundle/src/test/resources/test-bundle-v2-commonSubdirectories.xml new file mode 100644 index 0000000..3a82a3d --- /dev/null +++ b/modules/common/ant-bundle/src/test/resources/test-bundle-v2-commonSubdirectories.xml @@ -0,0 +1,65 @@ +<?xml version="1.0"?> + +<!-- + ~ RHQ Management Platform + ~ Copyright (C) 2013 Red Hat, Inc. + ~ All rights reserved. + ~ + ~ This program is free software; you can redistribute it and/or modify + ~ it under the terms of the GNU General Public License as published by + ~ the Free Software Foundation version 2 of the License. + ~ + ~ This program is distributed in the hope that it will be useful, + ~ but WITHOUT ANY WARRANTY; without even the implied warranty of + ~ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + ~ GNU General Public License for more details. + ~ + ~ You should have received a copy of the GNU General Public License + ~ along with this program; if not, write to the Free Software + ~ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + --> + +<project name="test-bundle" default="main" xmlns:rhq="antlib:org.rhq.bundle"> + + <rhq:bundle name="example.com (JBoss EAP 4.3)" version="2.5" + description="updated bundle"> + + <rhq:input-property + name="listener.port" + description="This is where the product will listen for incoming messages" + required="true" + defaultValue="9090" + type="integer"/> + + <rhq:deployment-unit name="appserver" preinstallTarget="preinstall" postinstallTarget="postinstall" + compliance="commonDirectories"> <!-- this is the only difference with test-bundle-v2.xml --> + <rhq:system-service name="foo" scriptFile="foo-script" + configFile="foo-config" overwriteScript="true" + startLevels="3,4,5" startPriority="80" stopPriority="20" root="root"/> + <rhq:file name="test-v2.properties" destinationFile="subdir/test.properties" replace="true"/> + <rhq:archive name="file.zip"> + rhq:replace + <rhq:fileset includes="**/*.properties"/> + </rhq:replace> + </rhq:archive> + <!-- the files that should be ignored during upgrades --> + rhq:ignore + <rhq:fileset includes="*.log"/> + </rhq:ignore> + </rhq:deployment-unit> + + </rhq:bundle> + + <target name="main"/> + + <target name="preinstall"> + <echo>Deploying Test Bundle v2.5 to ${rhq.deploy.dir}...</echo> + <property name="preinstallTargetExecuted" value="2a"/> + </target> + + <target name="postinstall"> + <echo>Done deploying Test Bundle v2.5 to ${rhq.deploy.dir}.</echo> + <property name="postinstallTargetExecuted" value="2b"/> + </target> + +</project> diff --git a/modules/common/ant-bundle/src/test/resources/test-bundle-v2-noManageRootDir.xml b/modules/common/ant-bundle/src/test/resources/test-bundle-v2-noManageRootDir.xml deleted file mode 100644 index 0b07e9e..0000000 --- a/modules/common/ant-bundle/src/test/resources/test-bundle-v2-noManageRootDir.xml +++ /dev/null @@ -1,46 +0,0 @@ -<?xml version="1.0"?> - -<project name="test-bundle" default="main" xmlns:rhq="antlib:org.rhq.bundle"> - - <rhq:bundle name="example.com (JBoss EAP 4.3)" version="2.5" - description="updated bundle"> - - <rhq:input-property - name="listener.port" - description="This is where the product will listen for incoming messages" - required="true" - defaultValue="9090" - type="integer"/> - - <rhq:deployment-unit name="appserver" preinstallTarget="preinstall" postinstallTarget="postinstall" - manageRootDir="false"> <!-- this is the only difference with test-bundle-v2.xml --> - <rhq:system-service name="foo" scriptFile="foo-script" - configFile="foo-config" overwriteScript="true" - startLevels="3,4,5" startPriority="80" stopPriority="20" root="root"/> - <rhq:file name="test-v2.properties" destinationFile="subdir/test.properties" replace="true"/> - <rhq:archive name="file.zip"> - rhq:replace - <rhq:fileset includes="**/*.properties"/> - </rhq:replace> - </rhq:archive> - <!-- the files that should be ignored during upgrades --> - rhq:ignore - <rhq:fileset includes="*.log"/> - </rhq:ignore> - </rhq:deployment-unit> - - </rhq:bundle> - - <target name="main"/> - - <target name="preinstall"> - <echo>Deploying Test Bundle v2.5 to ${rhq.deploy.dir}...</echo> - <property name="preinstallTargetExecuted" value="2a"/> - </target> - - <target name="postinstall"> - <echo>Done deploying Test Bundle v2.5 to ${rhq.deploy.dir}.</echo> - <property name="postinstallTargetExecuted" value="2b"/> - </target> - -</project> \ No newline at end of file diff --git a/modules/common/ant-bundle/src/test/resources/test-bundle-v2.xml b/modules/common/ant-bundle/src/test/resources/test-bundle-v2.xml index 1bbe0d1..4d40213 100644 --- a/modules/common/ant-bundle/src/test/resources/test-bundle-v2.xml +++ b/modules/common/ant-bundle/src/test/resources/test-bundle-v2.xml @@ -1,5 +1,24 @@ <?xml version="1.0"?>
+<!-- + ~ RHQ Management Platform + ~ Copyright (C) 2013 Red Hat, Inc. + ~ All rights reserved. + ~ + ~ This program is free software; you can redistribute it and/or modify + ~ it under the terms of the GNU General Public License as published by + ~ the Free Software Foundation version 2 of the License. + ~ + ~ This program is distributed in the hope that it will be useful, + ~ but WITHOUT ANY WARRANTY; without even the implied warranty of + ~ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + ~ GNU General Public License for more details. + ~ + ~ You should have received a copy of the GNU General Public License + ~ along with this program; if not, write to the Free Software + ~ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + --> + <project name="test-bundle" default="main" xmlns:rhq="antlib:org.rhq.bundle">
<rhq:bundle name="example.com (JBoss EAP 4.3)" version="2.5" @@ -12,7 +31,7 @@ defaultValue="9090" type="integer"/>
- <rhq:deployment-unit name="appserver" preinstallTarget="preinstall" postinstallTarget="postinstall"> + <rhq:deployment-unit name="appserver" preinstallTarget="preinstall" postinstallTarget="postinstall" compliance="full"> <rhq:system-service name="foo" scriptFile="foo-script" configFile="foo-config" overwriteScript="true" startLevels="3,4,5" startPriority="80" stopPriority="20" root="root"/> @@ -42,4 +61,4 @@ <property name="postinstallTargetExecuted" value="2b"/> </target>
-</project> \ No newline at end of file +</project> diff --git a/modules/common/ant-bundle/src/test/resources/test-bundle-with-manage-root-dir.xml b/modules/common/ant-bundle/src/test/resources/test-bundle-with-manage-root-dir.xml new file mode 100644 index 0000000..68fdcc0 --- /dev/null +++ b/modules/common/ant-bundle/src/test/resources/test-bundle-with-manage-root-dir.xml @@ -0,0 +1,64 @@ +<?xml version="1.0"?> + +<!-- + ~ RHQ Management Platform + ~ Copyright (C) 2013 Red Hat, Inc. + ~ All rights reserved. + ~ + ~ This program is free software; you can redistribute it and/or modify + ~ it under the terms of the GNU General Public License as published by + ~ the Free Software Foundation version 2 of the License. + ~ + ~ This program is distributed in the hope that it will be useful, + ~ but WITHOUT ANY WARRANTY; without even the implied warranty of + ~ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + ~ GNU General Public License for more details. + ~ + ~ You should have received a copy of the GNU General Public License + ~ along with this program; if not, write to the Free Software + ~ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + --> + +<project name="test-bundle" default="main" xmlns:rhq="antlib:org.rhq.bundle"> + + <rhq:bundle name="example.com (JBoss EAP 4.3)" version="1.0" + description="example.com corporate website hosted on JBoss EAP 4.3"> + + <rhq:input-property + name="listener.port" + description="This is where the product will listen for incoming messages" + required="true" + defaultValue="8080" + type="integer"/> + + <rhq:deployment-unit name="appserver" preinstallTarget="preinstall" postinstallTarget="postinstall" manageRootDir="false"> + <rhq:system-service name="foo" scriptFile="foo-script" + configFile="foo-config" overwriteScript="true" + startLevels="3,4,5" startPriority="80" stopPriority="20" root="root"/> + <rhq:file name="test-v1.properties" destinationFile="subdir/test.properties" replace="true"/> + <rhq:archive name="file.zip"> + rhq:replace + <rhq:fileset includes="**/*.properties"/> + </rhq:replace> + </rhq:archive> + <!-- the files that should be ignored during upgrades --> + rhq:ignore + <rhq:fileset includes="*.log"/> + </rhq:ignore> + </rhq:deployment-unit> + + </rhq:bundle> + + <target name="main"/> + + <target name="preinstall"> + <echo>Deploying Test Bundle v1.0 to ${rhq.deploy.dir}...</echo> + <property name="preinstallTargetExecuted" value="1a"/> + </target> + + <target name="postinstall"> + <echo>Done deploying Test Bundle v1.0 to ${rhq.deploy.dir}.</echo> + <property name="postinstallTargetExecuted" value="1b"/> + </target> + +</project> diff --git a/modules/common/filetemplate-bundle/src/main/java/org/rhq/bundle/filetemplate/recipe/BundleRecipeCommand.java b/modules/common/filetemplate-bundle/src/main/java/org/rhq/bundle/filetemplate/recipe/BundleRecipeCommand.java index fc65162..7fe02b7 100644 --- a/modules/common/filetemplate-bundle/src/main/java/org/rhq/bundle/filetemplate/recipe/BundleRecipeCommand.java +++ b/modules/common/filetemplate-bundle/src/main/java/org/rhq/bundle/filetemplate/recipe/BundleRecipeCommand.java @@ -27,6 +27,7 @@ import gnu.getopt.Getopt; import gnu.getopt.LongOpt;
import org.rhq.core.util.updater.DeploymentProperties; +import org.rhq.core.util.updater.DestinationComplianceMode;
public class BundleRecipeCommand implements RecipeCommand {
@@ -100,6 +101,9 @@ public class BundleRecipeCommand implements RecipeCommand { } props.setDeploymentId(0);
+ //file templates don't support destination compliance, so let's just provide something dummy + props.setDestinationCompliance(DestinationComplianceMode.full); + return; } } diff --git a/modules/core/util/src/main/java/org/rhq/core/util/updater/Deployer.java b/modules/core/util/src/main/java/org/rhq/core/util/updater/Deployer.java index a5eadea..0f1f266 100644 --- a/modules/core/util/src/main/java/org/rhq/core/util/updater/Deployer.java +++ b/modules/core/util/src/main/java/org/rhq/core/util/updater/Deployer.java @@ -319,12 +319,15 @@ public class Deployer { }
private FileHashcodeMap performInitialDeployment(DeployDifferences diff, boolean dryRun) throws Exception { - if (this.deploymentData.isManageRootDir()) { + switch (deploymentData.getDeploymentProps().getDestinationCompliance()) { + case full: { // We are to fully manage the deployment dir, so we need to delete everything we find there. // Any old files do not belong here - only our bundle files should live here now. File dir = this.deploymentData.getDestinationDir(); backupAndPurgeDirectory(diff, dir, dryRun, null); - } else { + } + break; + case filesAndDirectories: { // We are not to manage files in the root deployment directory. However, we always manage // subdirectories that the bundle wants to deploy. So look in subdirectories that our bundles // plan to use and remove files that found there. @@ -336,6 +339,10 @@ public class Deployer { backupAndPurgeDirectory(diff, dir, dryRun, managedSubdir.getPath() + File.separatorChar); } } + break; + default: + throw new IllegalStateException("Unsupported destination compliance mode."); + }
FileHashcodeMap newFileHashcodeMap = extractZipAndRawFiles(new HashMap<String, String>(0), diff, dryRun);
@@ -359,10 +366,11 @@ public class Deployer { // * if a current file is ignored in the latest rescan // * if a file is realized on the filesystem before its stored on the file system // * if a current file is backed up - + boolean reportNewRootFilesAsNew = + this.deploymentData.getDeploymentProps().getDestinationCompliance() == DestinationComplianceMode.full; FileHashcodeMap original = this.deploymentsMetadata.getCurrentDeploymentFileHashcodes(); ChangesFileHashcodeMap current = original.rescan(this.deploymentData.getDestinationDir(), - this.deploymentData.getIgnoreRegex(), this.deploymentData.isManageRootDir()); + this.deploymentData.getIgnoreRegex(), reportNewRootFilesAsNew); FileHashcodeMap newFiles = getNewDeploymentFileHashcodeMap();
if (current.getUnknownContent() != null) { diff --git a/modules/core/util/src/main/java/org/rhq/core/util/updater/DeploymentData.java b/modules/core/util/src/main/java/org/rhq/core/util/updater/DeploymentData.java index 2431332..fee90bc 100644 --- a/modules/core/util/src/main/java/org/rhq/core/util/updater/DeploymentData.java +++ b/modules/core/util/src/main/java/org/rhq/core/util/updater/DeploymentData.java @@ -50,13 +50,16 @@ public class DeploymentData { private final Set<File> rawFilesToRealize; private final TemplateEngine templateEngine; private final Pattern ignoreRegex; - private final boolean manageRootDir; private final Map<File, Boolean> zipsExploded;
/** * Constructors that prepares this object with the data that is necessary in order to deploy archive/file content * a destination directory. - * + * + * Note that as of RHQ 4.9.0 the {@code manageRootDir} attribute actually writes through to the similar attribute + * in {@code deploymentProps}. It was previously possible for {@link #isManageRootDir()} to have different value + * from {@link org.rhq.core.util.updater.DeploymentProperties#getManageRootDir()} on the {@code deploymentProps}. + * * @param deploymentProps metadata about this deployment * @param zipFiles the archives containing the content to be deployed * @param rawFiles files that are to be copied into the destination directory - the keys are the current @@ -85,11 +88,62 @@ public class DeploymentData { * @param zipsExploded if not <code>null</code>, this is a map keyed on zip files whose values indicate * if the zips should be exploded (true) or remain compressed after the deployment * is finished (false). If a zip file is not found in this map, true is the default. + * + * @deprecated The {@code manageRootDir} parameter is deprecated and this constructor should not be used. The need + * for that parameter was superseded by the {@link org.rhq.core.util.updater.DeploymentProperties#getDestinationCompliance()} + * property. */ + @Deprecated public DeploymentData(DeploymentProperties deploymentProps, Set<File> zipFiles, Map<File, File> rawFiles, File sourceDir, File destinationDir, Map<File, Pattern> zipEntriesToRealizeRegex, Set<File> rawFilesToRealize, TemplateEngine templateEngine, Pattern ignoreRegex, boolean manageRootDir, Map<File, Boolean> zipsExploded) {
+ this(deploymentProps, zipFiles, rawFiles, sourceDir, destinationDir, zipEntriesToRealizeRegex, + rawFilesToRealize, + templateEngine, ignoreRegex, zipsExploded); + + deploymentProps.setManageRootDir(manageRootDir); + } + + /** + * Constructors that prepares this object with the data that is necessary in order to deploy archive/file content + * a destination directory. + * + * @param deploymentProps metadata about this deployment + * @param zipFiles the archives containing the content to be deployed + * @param rawFiles files that are to be copied into the destination directory - the keys are the + * current + * locations of the files, the values are where the files should be copied (the + * values may be relative + * in which case they are relative to destDir and can have subdirectories and/or a + * different filename + * than what the file is named currently) + * @param destinationDir the root directory where the content is to be deployed + * @param sourceDir the root directory where the source files (zips and raw files) are located + * @param zipEntriesToRealizeRegex the patterns of files (whose paths are relative to destDir) that + * must have replacement variables within them replaced with values + * obtained via the given template engine. The key is the name of the zip file + * that the regex must be applied to - in other words, the regex value is only + * applied + * to relative file names as found in their associated zip file. + * @param rawFilesToRealize identifies the raw files that need to be realized; note that each item in this + * set + * must match a key to a <code>rawFiles</code> entry + * @param templateEngine if one or more filesToRealize are specified, this template engine is used to + * determine + * the values that should replace all replacement variables found in those files + * @param ignoreRegex the files/directories to ignore when updating an existing deployment + * @param zipsExploded if not <code>null</code>, this is a map keyed on zip files whose values indicate + * if the zips should be exploded (true) or remain compressed after the deployment + * is finished (false). If a zip file is not found in this map, true is the + * default. + * + * @since 4.9.0 + */ + public DeploymentData(DeploymentProperties deploymentProps, Set<File> zipFiles, Map<File, File> rawFiles, + File sourceDir, File destinationDir, Map<File, Pattern> zipEntriesToRealizeRegex, Set<File> rawFilesToRealize, + TemplateEngine templateEngine, Pattern ignoreRegex, Map<File, Boolean> zipsExploded) { + if (deploymentProps == null) { throw new IllegalArgumentException("deploymentProps == null"); } @@ -130,7 +184,6 @@ public class DeploymentData {
this.sourceDir = sourceDir; this.ignoreRegex = ignoreRegex; - this.manageRootDir = manageRootDir; this.zipsExploded = zipsExploded;
// if there is nothing to realize or we have no template engine to obtain replacement values, then we null things out @@ -157,7 +210,8 @@ public class DeploymentData { File rawFile = entry.getValue(); String rawFilePath = rawFile.getPath();
- boolean doubledot = rawFilePath.replace('\', '/').matches(".*((/\.\.)|(\.\./)).*"); // finds "/.." or "../" in the string + boolean doubledot = rawFilePath.replace('\', '/') + .matches(".*((/\.\.)|(\.\./)).*"); // finds "/.." or "../" in the string
if (doubledot) { File fileToNormalize; @@ -171,7 +225,8 @@ public class DeploymentData {
// determine if the windows rawFile relative path specified a drive (e.g. C:foobar.txt) StringBuilder rawFilePathBuilder = new StringBuilder(rawFilePath); - String rawFileDriveLetter = FileUtil.stripDriveLetter(rawFilePathBuilder); // rawFilePathBuilder now has drive letter stripped + String rawFileDriveLetter = FileUtil + .stripDriveLetter(rawFilePathBuilder); // rawFilePathBuilder now has drive letter stripped
// determine what, if any, drive letter is specified in the destination directory StringBuilder destDirAbsPathBuilder = new StringBuilder(this.destinationDir.getAbsolutePath()); @@ -197,7 +252,8 @@ public class DeploymentData { // we can keep rawFile path relative, but we need to normalize out the ".." paths String baseDir = this.destinationDir.getAbsolutePath(); String absRawFilePath = fileToNormalize.getAbsolutePath(); - String relativePath = absRawFilePath.substring(baseDir.length() + 1); // should always return a valid path; if not, let it throw exception (which likely means there is a bug here) + String relativePath = absRawFilePath.substring(baseDir.length() + + 1); // should always return a valid path; if not, let it throw exception (which likely means there is a bug here) entry.setValue(new File(relativePath)); } else { // raw file path has ".." such that the file is really above destination dir - use an absolute, canonical path @@ -205,8 +261,6 @@ public class DeploymentData { } } } - - return; }
private static File getNormalizedFile(File fileToNormalize) { @@ -249,8 +303,14 @@ public class DeploymentData { return ignoreRegex; }
+ /** + * As of RHQ 4.9.0, this calls {@link #getDeploymentProps()}.{@link DeploymentProperties#getManageRootDir() getManageRootDir()} + * + * @deprecated use {@link #getDeploymentProps()}.{@link org.rhq.core.util.updater.DeploymentProperties#getDestinationCompliance() getDestinationCompliance()}. + */ + @Deprecated public boolean isManageRootDir() { - return manageRootDir; + return deploymentProps.getManageRootDir(); }
public Map<File, Boolean> getZipsExploded() { diff --git a/modules/core/util/src/main/java/org/rhq/core/util/updater/DeploymentProperties.java b/modules/core/util/src/main/java/org/rhq/core/util/updater/DeploymentProperties.java index 3553413..eb7aff4 100644 --- a/modules/core/util/src/main/java/org/rhq/core/util/updater/DeploymentProperties.java +++ b/modules/core/util/src/main/java/org/rhq/core/util/updater/DeploymentProperties.java @@ -39,8 +39,16 @@ public class DeploymentProperties extends Properties { private static final String BUNDLE_NAME = "bundle.name"; private static final String BUNDLE_VERSION = "bundle.version"; private static final String BUNDLE_DESCRIPTION = "bundle.description"; + //note that this really does not make sense as a generic deployment property once we support multiple deployment + //units in a bundle. + private static final String DESTINATION_COMPLIANCE = "bundle.destination.compliance";
// optional properties + + /** + * @deprecated superseded by destination compliance + */ + @Deprecated private static final String MANAGE_ROOT_DIR = "manage.root.dir";
public static DeploymentProperties loadFromFile(File file) throws Exception { @@ -51,6 +59,13 @@ public class DeploymentProperties extends Properties { } finally { is.close(); } + + //Backwards compatibility handling - manageRootDir wasn't required but compliance is.. We need to make the + //previously valid files valid now, too, with the original behavior + if (props.get(DESTINATION_COMPLIANCE) == null) { + props.setDestinationCompliance(DestinationComplianceMode.BACKWARDS_COMPATIBLE_DEFAULT); + } + props.validate(); return props; } @@ -71,13 +86,32 @@ public class DeploymentProperties extends Properties { * @param bundleName see {@link #getBundleName()} * @param bundleVersion see {@link #getBundleVersion()} * @param description see {@link #getDescription()} + * + * @deprecated use {@link #DeploymentProperties(int, String, String, String, DestinationComplianceMode)}. + * This constructor sets the compliance mode to {@link DestinationComplianceMode#full}. */ + @Deprecated public DeploymentProperties(int deploymentId, String bundleName, String bundleVersion, String description) { - super(); + this(deploymentId, bundleName, bundleVersion, description, DestinationComplianceMode.full); + } + + /** + * Convenience constructor whose parameters are all the required values that + * this object needs. + * + * @param deploymentId see {@link #getDeploymentId()} + * @param bundleName see {@link #getBundleName()} + * @param bundleVersion see {@link #getBundleVersion()} + * @param description see {@link #getDescription()} + * @param destinationCompliance see {@link #getDestinationCompliance()} + */ + public DeploymentProperties(int deploymentId, String bundleName, String bundleVersion, String description, DestinationComplianceMode destinationCompliance) { setDeploymentId(deploymentId); setBundleName(bundleName); setBundleVersion(bundleVersion); setDescription(description); + setDestinationCompliance(destinationCompliance); + try { validate(); } catch (Exception e) { @@ -119,6 +153,7 @@ public class DeploymentProperties extends Properties { getBundleName(); getBundleVersion(); getDescription(); + getDestinationCompliance(); } catch (Exception e) { throw new Exception("Deployment properties are invalid: " + e.getMessage()); } @@ -196,18 +231,76 @@ public class DeploymentProperties extends Properties { }
/** + * Note that as of RHQ 4.9.0, this attribute is deprecated. + * There is an attempt made to handle reading the old version of the attribute: + * <ol> + * <li>if "bundle.destination.compliance" attribute is set, base the value on it. If the compliance is "full" + * this method returns true, otherwise it returns false.</li> + * <li>if "manage.root.dir" attribute is set, base the return value on it (this handles the previous + * behavior).</li> + * <li>if none of the above attributes is set, return the default value of the deprecated manageRootDir attribute, + * which is true.</li> + * </ol> + * * @return the flag to indicate if the entire root directory content is to be managed. * If there is no property, this method returns a default of <code>true</code> + * + * @deprecated use {@link #getDestinationCompliance()} */ + @Deprecated public boolean getManageRootDir() { - String str = getProperty(MANAGE_ROOT_DIR); - if (str == null) { - return true; + DestinationComplianceMode mode = getDestinationComplianceNoException(); + if (mode == null) { + String str = getProperty(MANAGE_ROOT_DIR); + if (str == null) { + return true; + } + return Boolean.parseBoolean(str); } - return Boolean.parseBoolean(str); + + return mode == DestinationComplianceMode.full; }
+ /** + * As of RHQ 4.9.0, this is equivalent to {@link #setDestinationCompliance(DestinationComplianceMode) + * setDestinationCompliance(willManageRootDir ? DestinationComplianceMode.full : + * DestinationComplianceMode.filesAndDirectories)}. + * + * @param willManageRootDir whether to manage the root directory + * + * @deprecated use {@link #setDestinationCompliance(DestinationComplianceMode)} instead + */ + @Deprecated public void setManageRootDir(boolean willManageRootDir) { - setProperty(MANAGE_ROOT_DIR, Boolean.toString(willManageRootDir)); + setDestinationCompliance( + willManageRootDir ? DestinationComplianceMode.full : DestinationComplianceMode.filesAndDirectories); + } + + /** + * Returns the compliance mode of the destination. This is a required attribute. + * + * @since 4.9.0 + */ + public DestinationComplianceMode getDestinationCompliance() { + DestinationComplianceMode mode = getDestinationComplianceNoException(); + if (mode == null) { + throw new IllegalStateException("Destination compliance not specified"); + } + + return mode; + } + + public void setDestinationCompliance(DestinationComplianceMode compliance) { + String str = compliance == null ? null : compliance.name(); + setProperty(DESTINATION_COMPLIANCE, str); + } + + private DestinationComplianceMode getDestinationComplianceNoException() { + String str = getProperty(DESTINATION_COMPLIANCE); + if (str == null) { + return null; + } + + return Enum.valueOf(DestinationComplianceMode.class, str); } } diff --git a/modules/core/util/src/main/java/org/rhq/core/util/updater/DestinationComplianceMode.java b/modules/core/util/src/main/java/org/rhq/core/util/updater/DestinationComplianceMode.java new file mode 100644 index 0000000..8ddc7d8 --- /dev/null +++ b/modules/core/util/src/main/java/org/rhq/core/util/updater/DestinationComplianceMode.java @@ -0,0 +1,87 @@ +/* + * RHQ Management Platform + * Copyright (C) 2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +package org.rhq.core.util.updater; + +/** + * Lists the compliance modes of the bundle deployment destinations. See the comments at the individual enum elements + * to see what they mean. + * + * @author Lukas Krejci + * @since 4.9 + */ +public enum DestinationComplianceMode { + //NOTE: the below violation of our coding guidelines is because of ANT's requirement for direct usage of enum's + //constant names in the build files. Because this enum is used generally in all bundle handlers we need to ensure + //it works for all of them. For readability reasons in ANT bundle recipes, I opted for breaking our guidelines here + //instead of creating some kind of 'bridge' ant-specific enum. + + /** + * The full compliance means that the deployment destination is completely wiped before the bundle contents are + * deployed into it. In another words the destination contains no other files than those contained in the bundle + * and + * is therefore in full compliance with the bundle. + */ + full, + + /** + * This compliance mode makes sure that files and directories that are NOT contained in the bundle are kept in the + * destination directory. However the contents of files <b>and directories</b> that ARE present in the bundle are + * made completely compliant with the bundle. + */ + filesAndDirectories + + //NOTE: the below two modes are going to be supported in the future, but NOT as of RHQ 4.9.0 */ + + /** + * This compliance mode means that the root directory of the deployment will only contain files and directories from + * the bundle. The content of the directories is not required to be compliant with the bundle - i.e. the directories + * and files "under" some directory, that already existed in the deployment, are kept. + */ + //, rootDirectoryAndFiles + + /** + * This compliance mode means that all files from a bundle is copied into the deployment (preserving directory + * structure) (i.e. such files are compliant with the bundle). All other contents of the deployment directory is + * kept intact (i.e. this is the RPM-like behavior). + */ + //, files + + ; + + /** + * This is the default compliance mode to be used in the legacy bundle recipes which do not explicitly set neither + * the compliance nor the legacy {@code manageRootDir} attribute. + */ + public static final DestinationComplianceMode BACKWARDS_COMPATIBLE_DEFAULT = full; + + /** + * Use this method to get either the supplied compliance mode or the {@link #BACKWARDS_COMPATIBLE_DEFAULT default} + * compliance. + * <p/> + * Only use this method if you need to handle the legacy recipes. + * + * @param compliance the compliance to return or null if not known + * + * @return the supplied {@code compliance} or the default compliance mode, never null. + */ + public static DestinationComplianceMode instanceOrDefault(DestinationComplianceMode compliance) { + return compliance == null ? BACKWARDS_COMPATIBLE_DEFAULT : compliance; + } +} diff --git a/modules/core/util/src/test/java/org/rhq/core/util/updater/DeploymentPropertiesTest.java b/modules/core/util/src/test/java/org/rhq/core/util/updater/DeploymentPropertiesTest.java index dcdac65..e72bf23 100644 --- a/modules/core/util/src/test/java/org/rhq/core/util/updater/DeploymentPropertiesTest.java +++ b/modules/core/util/src/test/java/org/rhq/core/util/updater/DeploymentPropertiesTest.java @@ -34,6 +34,8 @@ public class DeploymentPropertiesTest { props.setBundleName("test-bundle-name"); props.setBundleVersion("1.0"); props.setDescription("This is a description\nof a bundle"); + props.setDestinationCompliance(DestinationComplianceMode.full); + File tmpFile = File.createTempFile("deploymentPropertiesTest", ".properties"); try { props.saveToFile(tmpFile); @@ -83,6 +85,13 @@ public class DeploymentPropertiesTest { // this is expected and ok }
+ try { + props.setDestinationCompliance(null); + assert false : "Should have thrown an exception due to null value"; + } catch (Exception ok) { + // this is expected and ok + } + File tmpFile = File.createTempFile("deploymentPropertiesTest", ".properties"); try { try { @@ -109,6 +118,14 @@ public class DeploymentPropertiesTest { }
props.setBundleVersion("1"); + try { + props.saveToFile(tmpFile); + assert false : "Should have thrown an exception since it was not valid"; + } catch (Exception ok) { + // this is expected and ok + } + + props.setDestinationCompliance(DestinationComplianceMode.filesAndDirectories);
// we set all properties we need, it should be valid and we should be able to save it now props.saveToFile(tmpFile); diff --git a/modules/core/util/src/test/java/org/rhq/core/util/updater/DeploymentsMetadataTest.java b/modules/core/util/src/test/java/org/rhq/core/util/updater/DeploymentsMetadataTest.java index 6145837..7fc23b8 100644 --- a/modules/core/util/src/test/java/org/rhq/core/util/updater/DeploymentsMetadataTest.java +++ b/modules/core/util/src/test/java/org/rhq/core/util/updater/DeploymentsMetadataTest.java @@ -45,6 +45,7 @@ public class DeploymentsMetadataTest { deploymentProps.setBundleName("test-bundle-name"); deploymentProps.setBundleVersion("1.0"); deploymentProps.setDescription("test bundle description"); + deploymentProps.setDestinationCompliance(DestinationComplianceMode.full); FileHashcodeMap map = metadata.snapshotLiveDeployment(deploymentProps, null, null); assert metadata.isManaged() : "this should be managed now : " + metadata; assert map.size() == 5 : map; // there are 5 files in our test bundle zip diff --git a/modules/core/util/src/test/java/org/rhq/core/util/updater/ManageRootDirTest.java b/modules/core/util/src/test/java/org/rhq/core/util/updater/ManageRootDirTest.java index 4ef558b..1e7bbce 100644 --- a/modules/core/util/src/test/java/org/rhq/core/util/updater/ManageRootDirTest.java +++ b/modules/core/util/src/test/java/org/rhq/core/util/updater/ManageRootDirTest.java @@ -107,7 +107,7 @@ public class ManageRootDirTest { this.originalFileHashcodeMap = deployer.deploy(null); this.currentFile = new File(deployDir, originalFileName);
- this.newDeployProps = new DeploymentProperties(2, "simple", "2.0", "new test deployment"); + this.newDeployProps = new DeploymentProperties(2, "simple", "2.0", "new test deployment", DestinationComplianceMode.filesAndDirectories); this.diff = new DeployDifferences(); this.metadata = new DeploymentsMetadata(this.deployDir);
@@ -252,7 +252,7 @@ public class ManageRootDirTest { assert unrelated2.exists() : "the deployment removed unrelated file2";
// deploy new content - this.newDeployProps = new DeploymentProperties(2, "simple", "2.0", "new test deployment"); + this.newDeployProps = new DeploymentProperties(2, "simple", "2.0", "new test deployment", DestinationComplianceMode.filesAndDirectories); this.diff = new DeployDifferences(); this.metadata = new DeploymentsMetadata(this.deployDir); String newFileName1 = "new-file1.txt"; diff --git a/modules/enterprise/server/itests-2/pom.xml b/modules/enterprise/server/itests-2/pom.xml index a3e9c7b..63eb166 100644 --- a/modules/enterprise/server/itests-2/pom.xml +++ b/modules/enterprise/server/itests-2/pom.xml @@ -21,10 +21,11 @@ </scm>
<properties> + <rhq.ant-bundle.serverplugin.path>${settings.localRepository}/org/rhq/rhq-serverplugin-ant-bundle/${project.version}/rhq-serverplugin-ant-bundle-${project.version}.jar</rhq.ant-bundle.serverplugin.path> <jboss.zip>${settings.localRepository}/org/jboss/as/jboss-as-dist/${jboss.version}/jboss-as-dist-${jboss.version}.zip</jboss.zip> <jboss.unzip.location>${basedir}/target/${jboss.eap.version}</jboss.unzip.location> <jboss.vm.args>-Xmx700m -XX:MaxPermSize=200m -Djava.security.manager - -Djava.security.policy==${jboss.unzip.location}/security.policy</jboss.vm.args> + -Djava.security.policy==${jboss.unzip.location}/security.policy -Drhq.ant-bundle.serverplugin.path=${rhq.ant-bundle.serverplugin.path}</jboss.vm.args> <jboss.vm.args.debug></jboss.vm.args.debug> <!-- -Ditest.debug can override this, see below --> <!-- These properties guide which datasources are used for integration tests. Only one should be true. The default is Postgres, specify -Pitest.oracle to use Oracle --> @@ -604,6 +605,13 @@ <!-- Note, the test dep ordering can be important. Maven orders the [test] classpath in the order listed in the pom. -->
<dependency> + <groupId>org.rhq</groupId> + <artifactId>rhq-serverplugin-ant-bundle</artifactId> + <version>${project.version}</version> + <scope>test</scope> + </dependency> + + <dependency> <groupId>org.jboss.as</groupId> <artifactId>jboss-as-dist</artifactId> <type>zip</type> diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/bundle/BundleManagerBeanTest.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/bundle/BundleManagerBeanTest.java index 2085a4d..cfb5cc1 100644 --- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/bundle/BundleManagerBeanTest.java +++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/bundle/BundleManagerBeanTest.java @@ -120,6 +120,7 @@ public class BundleManagerBeanTest extends AbstractEJB3Test { private static final boolean DISABLED = false;
private TestBundleServerPluginService ps; + private TestBundlePluginComponent bpc; private MasterServerPluginContainer pc; private Subject overlord; TestServerCommunicationsService agentServiceContainer; @@ -129,7 +130,8 @@ public class BundleManagerBeanTest extends AbstractEJB3Test { agentServiceContainer = prepareForTestAgents(); agentServiceContainer.bundleService = new TestAgentClient(null, agentServiceContainer);
- this.ps = new TestBundleServerPluginService(getTempDir()); + this.bpc = new TestBundlePluginComponent(); + this.ps = new TestBundleServerPluginService(getTempDir(), bpc); prepareCustomServerPluginService(this.ps); bundleManager = LookupUtil.getBundleManager(); resourceManager = LookupUtil.getResourceManager(); @@ -368,11 +370,11 @@ public class BundleManagerBeanTest extends AbstractEJB3Test { BundleType bt1 = createBundleType("one");
// prepare our mock bundle PC - ps.parseRecipe_returnValue = new RecipeParseResults(bundleMetadata, configDef, new HashSet<String>( + bpc.parseRecipe_returnValue = new RecipeParseResults(bundleMetadata, configDef, new HashSet<String>( bundleFiles.keySet())); - ps.processBundleDistributionFile_returnValue = new BundleDistributionInfo(recipe, - ps.parseRecipe_returnValue, bundleFiles); - ps.processBundleDistributionFile_returnValue.setBundleTypeName(bt1.getName()); + bpc.processBundleDistributionFile_returnValue = new BundleDistributionInfo(recipe, + bpc.parseRecipe_returnValue, bundleFiles); + bpc.processBundleDistributionFile_returnValue.setBundleTypeName(bt1.getName());
// now ask the SLSB to persist our bundle data given our mock distribution BundleVersion bv1 = bundleManager.createBundleVersionViaURL(overlord, bundleDistroFile.toURI().toURL() diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/bundle/TestBundlePluginComponent.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/bundle/TestBundlePluginComponent.java new file mode 100644 index 0000000..269ee73 --- /dev/null +++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/bundle/TestBundlePluginComponent.java @@ -0,0 +1,84 @@ +/* + * RHQ Management Platform + * Copyright (C) 2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +package org.rhq.enterprise.server.bundle; + +import java.io.File; +import java.util.HashSet; +import java.util.Set; + +import org.rhq.core.domain.configuration.definition.ConfigurationDefinition; +import org.rhq.core.domain.configuration.definition.PropertyDefinitionSimple; +import org.rhq.core.domain.configuration.definition.PropertySimpleType; +import org.rhq.core.util.updater.DeploymentProperties; +import org.rhq.enterprise.server.plugin.pc.bundle.BundleServerPluginFacet; +import org.rhq.enterprise.server.plugin.pc.bundle.UnknownRecipeException; +import org.rhq.enterprise.server.test.AbstractEJB3Test; + +/** + * @author Lukas Krejci + */ +public class TestBundlePluginComponent implements BundleServerPluginFacet { + public RecipeParseResults parseRecipe_returnValue; + public BundleDistributionInfo processBundleDistributionFile_returnValue; + + @Override + public RecipeParseResults parseRecipe(String recipe) throws UnknownRecipeException, Exception { + + if (parseRecipe_returnValue != null) { + return parseRecipe_returnValue; + } + + return doParseRecipe(recipe); + } + + protected RecipeParseResults doParseRecipe(String recipe) throws UnknownRecipeException, Exception { + + ConfigurationDefinition configDef; + Set<String> bundleFileNames; + DeploymentProperties metadata; + + metadata = new DeploymentProperties(0, "bundletest", "1.0", "bundle test description"); + + configDef = new ConfigurationDefinition("bundletest-configdef", "Test Config Def for testing BundleVersion"); + configDef.put(new PropertyDefinitionSimple("bundletest.property", + "Test property for BundleVersion Config Def testing", true, PropertySimpleType.STRING)); + + bundleFileNames = new HashSet<String>(); + for (int i = 0; i < AbstractEJB3Test.DEFAULT_CRITERIA_PAGE_SIZE + 2; i++) { + bundleFileNames.add("bundletest-bundlefile-" + i); + } + + return new RecipeParseResults(metadata, configDef, bundleFileNames); + } + + @Override + public BundleDistributionInfo processBundleDistributionFile(File distributionFile) + throws UnknownRecipeException, Exception { + if (processBundleDistributionFile_returnValue != null) { + return processBundleDistributionFile_returnValue; + } + + return doProcessBundleDistributionFile(distributionFile); + } + + protected BundleDistributionInfo doProcessBundleDistributionFile(File distributionFile) { + throw new UnsupportedOperationException("this mock object cannot do this"); + } +} diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/bundle/TestBundleServerPluginService.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/bundle/TestBundleServerPluginService.java index 7c95cb5..3bfc8c4 100644 --- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/bundle/TestBundleServerPluginService.java +++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/bundle/TestBundleServerPluginService.java @@ -64,12 +64,12 @@ public class TestBundleServerPluginService extends ServerPluginService implement public TestBundleServerPluginContainer bundlePC; public MasterServerPluginContainerConfiguration masterConfig;
- public RecipeParseResults parseRecipe_returnValue = null; - public BundleDistributionInfo processBundleDistributionFile_returnValue; + private BundleServerPluginFacet bundlePlugin;
- public TestBundleServerPluginService(File tmpdir) { + public TestBundleServerPluginService(File tmpdir, BundleServerPluginFacet bundlePlugin) { // build the config at constructor time so tests have it even before the PC is initialized this.masterConfig = new MasterServerPluginContainerConfiguration(tmpdir, tmpdir, tmpdir, null); + this.bundlePlugin = bundlePlugin; }
@Override @@ -196,50 +196,12 @@ public class TestBundleServerPluginService extends ServerPluginService implement
@Override public RecipeParseResults parseRecipe(String bundleTypeName, String recipe) throws Exception { - return new TestBundlePluginComponent().parseRecipe(recipe); + return bundlePlugin.parseRecipe(recipe); }
@Override public BundleDistributionInfo processBundleDistributionFile(File distributionFile) throws Exception { - return new TestBundlePluginComponent().processBundleDistributionFile(distributionFile); + return bundlePlugin.processBundleDistributionFile(distributionFile); } } - - class TestBundlePluginComponent implements BundleServerPluginFacet { - - public TestBundlePluginComponent() { - }; - - public RecipeParseResults parseRecipe(String recipe) throws Exception { - - if (parseRecipe_returnValue != null) { - return parseRecipe_returnValue; - } - - ConfigurationDefinition configDef; - Set<String> bundleFileNames; - DeploymentProperties metadata; - - metadata = new DeploymentProperties(0, "bundletest", "1.0", "bundle test description"); - - configDef = new ConfigurationDefinition("bundletest-configdef", "Test Config Def for testing BundleVersion"); - configDef.put(new PropertyDefinitionSimple("bundletest.property", - "Test property for BundleVersion Config Def testing", true, PropertySimpleType.STRING)); - - bundleFileNames = new HashSet<String>(); - for (int i = 0; i < AbstractEJB3Test.DEFAULT_CRITERIA_PAGE_SIZE + 2; i++) { - bundleFileNames.add("bundletest-bundlefile-" + i); - } - - return new RecipeParseResults(metadata, configDef, bundleFileNames); - } - - public BundleDistributionInfo processBundleDistributionFile(File uberBundleFile) throws Exception { - if (processBundleDistributionFile_returnValue != null) { - return processBundleDistributionFile_returnValue; - } - - throw new UnsupportedOperationException("this mock object cannot do this"); - } - } -} \ No newline at end of file +} diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/plugins/ant/RecipeValidationTest.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/plugins/ant/RecipeValidationTest.java new file mode 100644 index 0000000..502ab03 --- /dev/null +++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/plugins/ant/RecipeValidationTest.java @@ -0,0 +1,292 @@ +/* + * RHQ Management Platform + * Copyright (C) 2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +package org.rhq.enterprise.server.plugins.ant; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.net.URISyntaxException; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.zip.ZipEntry; +import java.util.zip.ZipOutputStream; + +import javax.persistence.Query; +import javax.transaction.TransactionManager; + +import org.testng.annotations.Test; + +import org.rhq.core.domain.auth.Subject; +import org.rhq.core.domain.bundle.BundleType; +import org.rhq.core.domain.resource.ResourceCategory; +import org.rhq.core.domain.resource.ResourceType; +import org.rhq.core.util.file.FileUtil; +import org.rhq.core.util.stream.StreamUtil; +import org.rhq.enterprise.server.bundle.BundleManagerLocal; +import org.rhq.enterprise.server.core.CoreServer; +import org.rhq.enterprise.server.core.CoreServerMBean; +import org.rhq.enterprise.server.core.plugin.PluginDeploymentScanner; +import org.rhq.enterprise.server.core.plugin.PluginDeploymentScannerMBean; +import org.rhq.enterprise.server.plugin.pc.MasterServerPluginContainer; +import org.rhq.enterprise.server.plugin.pc.ServerPluginService; +import org.rhq.enterprise.server.test.AbstractEJB3Test; +import org.rhq.enterprise.server.test.TestAgentClient; +import org.rhq.enterprise.server.test.TestServerCommunicationsService; +import org.rhq.enterprise.server.util.LookupUtil; + +/** + * @author Lukas Krejci + */ +public class RecipeValidationTest extends AbstractEJB3Test { + + public static final String ITESTS = "itests"; + private static final String ENTITY_NAME_PREFIX = "recipeValidationTest"; + private static final String FAKE_RESOURCE_TYPE_NAME = "recipeValidationTest-antbundle-resourcetype"; + private static final String ANT_BUNDLE_TYPE_NAME = "Ant Bundle"; + + private TestServerCommunicationsService agentServiceContainer; + private MasterServerPluginContainer pc; + private ServerPluginService ps; + private BundleManagerLocal bundleManager; + private File serverPluginsDir; + + @Override + protected void beforeMethod() throws Exception { + // try and clean up any junk that may be lying around from a failed run + cleanupDatabase(); + + bundleManager = LookupUtil.getBundleManager(); + createAntBundleType(); + + prepareCustomServerService(new CoreServer(), CoreServerMBean.OBJECT_NAME); + prepareCustomServerService(new PluginDeploymentScanner(), PluginDeploymentScannerMBean.OBJECT_NAME); + + prepareCustomServerPluginService(new ServerPluginService()); + + agentServiceContainer = prepareForTestAgents(); + agentServiceContainer.bundleService = new TestAgentClient(null, agentServiceContainer); + + ps = new ServerPluginService(); + serverPluginsDir = ps.getServerPluginsDirectory(); + serverPluginsDir.mkdirs(); + File agentPluginsDir = new File(serverPluginsDir.getParentFile(), "agentplugins"); + agentPluginsDir.mkdirs(); + + File antBundlePlugin = new File(System.getProperty("rhq.ant-bundle.serverplugin.path")); + + PluginDeploymentScannerMBean scanner = LookupUtil.getPluginDeploymentScanner(); + + //needed by server plugin lifecycle + prepareScheduler(); + + File targetFile = new File(serverPluginsDir, antBundlePlugin.getName()); + //touch the file so that the plugin scanner picks it up again + targetFile.setLastModified(System.currentTimeMillis()); + + FileUtil.copyFile(antBundlePlugin, targetFile); + + scanner.setAgentPluginDir(agentPluginsDir.getAbsolutePath()); + scanner.setServerPluginDir(serverPluginsDir.getAbsolutePath()); + + //actually, this is resetting the plugin service to the real thing, because we need to deploy the + //real ant bundle server plugin + prepareCustomServerPluginService(ps); +// resourceManager = LookupUtil.getResourceManager(); +// overlord = c + ps.startMasterPluginContainer(); + + LookupUtil.getPluginDeploymentScanner().startDeployment(); + LookupUtil.getPluginDeploymentScanner().scanAndRegister(); + } + + @Override + protected void afterMethod() throws Exception { + FileUtil.purge(serverPluginsDir.getParentFile(), true); + + unprepareForTestAgents(); + unprepareScheduler(); + + try { + cleanupDatabase(); + } finally { + unprepareServerPluginService(); + unprepareCustomServerService(CoreServerMBean.OBJECT_NAME); + unprepareCustomServerService(PluginDeploymentScannerMBean.OBJECT_NAME); + } + } + + private void cleanupDatabase() throws Exception { + try { + getTransactionManager().begin(); + + Query q; + List<?> doomed; + + // remove ResourceTypes which cascade remove BundleTypes + q = em.createQuery("SELECT rt FROM ResourceType rt WHERE rt.deleted = false and rt.name = '" + + FAKE_RESOURCE_TYPE_NAME + "'"); + doomed = q.getResultList(); + for (Object removeMe : doomed) { + em.remove(em.getReference(ResourceType.class, ((ResourceType) removeMe).getId())); + } + em.flush(); + // remove any orphaned BundleTypes + q = em.createQuery("SELECT bt FROM BundleType bt WHERE bt.name = '" + ANT_BUNDLE_TYPE_NAME + "'"); + doomed = q.getResultList(); + for (Object removeMe : doomed) { + em.remove(em.getReference(BundleType.class, ((BundleType) removeMe).getId())); + } + getTransactionManager().commit(); + } catch (Exception e) { + try { + System.out.println("CANNOT CLEAN UP TEST: Cause: " + e); + getTransactionManager().rollback(); + } catch (Exception ignore) { + } + } + } + + @Test(groups = RecipeValidationTest.ITESTS) + public void testManageRootDirMandatoryOnBundleVersionCreation() throws Exception { + File root = FileUtil.createTempDirectory(getClass().getName(), null, null); + copyFromClasspath("recipe-no-manageRootDir.xml", "deploy.xml", root); + + File bundleZip = createDistributionZip(root); + + try { + bundleManager.createBundleVersionViaURL(getFreshOverlord(), bundleZip.toURI().toURL().toString()); + fail("A recipe without explicit managerRootDir should not have been created"); + } catch (Exception e) { + //expected + checkForExpectedException(e, "org.rhq.bundle.ant.InvalidBuildFileException", "The deployment unit must specifically declare compliance mode of the destination directory."); + } finally { + FileUtil.purge(root, true); + } + } + + @Test(groups = RecipeValidationTest.ITESTS) + public void testManageRootDirAbsenceToleratedDuringUpdate() { + // TODO does this even make sense? + } + + @Test(groups = RecipeValidationTest.ITESTS) + public void testManageRootDirAbsenceToleratedDuringRevert() { + // TODO implement + } + + private Subject getFreshOverlord() { + return LookupUtil.getSubjectManager().getOverlord(); + } + + private File createDistributionZip(File root) throws IOException { + File ret = File.createTempFile(getClass().getName(), + "zip"); + ZipOutputStream distribFile = new ZipOutputStream(new FileOutputStream(ret)); + + for (File f : getAllFilesRelativeToRoot(root, root)) { + distribFile.putNextEntry(new ZipEntry(f.getPath())); + + File resultFile = new File(root, f.getPath()); + + FileInputStream in = new FileInputStream(resultFile); + + try { + StreamUtil.copy(in, distribFile, false); + } finally { + in.close(); + } + + distribFile.closeEntry(); + } + + distribFile.close(); + + return ret; + } + + private static Set<File> getAllFilesRelativeToRoot(File parent, File root) { + HashSet<File> ret = new HashSet<File>(); + getAllFilesRelativeToRoot(parent, root, ret); + return ret; + } + + private static void getAllFilesRelativeToRoot(File parent, File root, Set<File> out) { + for (File f : parent.listFiles()) { + if (f.isDirectory()) { + getAllFilesRelativeToRoot(f, root, out); + } else { + //getRelativePath always prefixes the path with './'. We don't need that. + String path = FileUtil.getRelativePath(f, root).substring(2); + out.add(new File(path)); + } + } + } + + private void copyFromClasspath(String resourceUrl, String filename, File target) + throws FileNotFoundException, URISyntaxException { + target.getParentFile().mkdirs(); + InputStream content = getClass().getResourceAsStream(resourceUrl); + + FileOutputStream out = new FileOutputStream(new File(target, filename)); + + StreamUtil.copy(content, out, true); + } + + private void createAntBundleType() throws Exception { + ResourceType rt = createResourceTypeForBundleType(); + Subject overlord = LookupUtil.getSubjectManager().getOverlord(); + BundleType bt = bundleManager.createBundleType(overlord, ANT_BUNDLE_TYPE_NAME, rt.getId()); + + assert bt.getId() > 0; + } + + private ResourceType createResourceTypeForBundleType() throws Exception { + final String fullName = FAKE_RESOURCE_TYPE_NAME; + ResourceType rt = new ResourceType(fullName, RecipeValidationTest.class.getSimpleName(), + ResourceCategory.PLATFORM, null); + + TransactionManager txMgr = getTransactionManager(); + txMgr.begin(); + em.persist(rt); + txMgr.commit(); + return rt; + } + + private void checkForExpectedException(Throwable t, String expectedExceptionClassName, + String expectedMessage) { + Throwable test = t; + do { + if (expectedExceptionClassName.equals(test.getClass().getName()) && + (expectedMessage == null || expectedMessage.equals(test.getMessage()))) { + return; + } + + test = test.getCause(); + } while (test != null); + + fail("Exception " + expectedExceptionClassName + + (expectedMessage == null ? "" : " with message [" + expectedMessage + "]") + + " not detected in the thrown exception " + t); + } +} diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/resource/metadata/MetadataBeanTest.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/resource/metadata/MetadataBeanTest.java index 1664c9f..a0b6620 100644 --- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/resource/metadata/MetadataBeanTest.java +++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/resource/metadata/MetadataBeanTest.java @@ -38,6 +38,7 @@ import org.rhq.core.domain.resource.ResourceType; import org.rhq.core.util.MessageDigestGenerator; import org.rhq.core.util.stream.StreamUtil; import org.rhq.enterprise.server.auth.SubjectManagerLocal; +import org.rhq.enterprise.server.bundle.TestBundlePluginComponent; import org.rhq.enterprise.server.bundle.TestBundleServerPluginService; import org.rhq.enterprise.server.resource.ResourceTypeManagerLocal; import org.rhq.enterprise.server.scheduler.jobs.PurgePluginsJob; @@ -79,7 +80,7 @@ public class MetadataBeanTest extends AbstractEJB3Test {
setupDB();
- TestBundleServerPluginService bundleService = new TestBundleServerPluginService(getTempDir()); + TestBundleServerPluginService bundleService = new TestBundleServerPluginService(getTempDir(), new TestBundlePluginComponent()); prepareCustomServerPluginService(bundleService); bundleService.startMasterPluginContainerWithoutSchedulingJobs(); prepareScheduler(); diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/tagging/TagManagerBeanTest.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/tagging/TagManagerBeanTest.java index bef8921..01c58a0 100644 --- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/tagging/TagManagerBeanTest.java +++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/tagging/TagManagerBeanTest.java @@ -45,6 +45,7 @@ import org.rhq.core.domain.tagging.Tag; import org.rhq.core.domain.util.PageList; import org.rhq.enterprise.server.auth.SubjectManagerLocal; import org.rhq.enterprise.server.bundle.BundleManagerLocal; +import org.rhq.enterprise.server.bundle.TestBundlePluginComponent; import org.rhq.enterprise.server.bundle.TestBundleServerPluginService; import org.rhq.enterprise.server.test.AbstractEJB3Test; import org.rhq.enterprise.server.test.TransactionCallback; @@ -75,7 +76,7 @@ public class TagManagerBeanTest extends AbstractEJB3Test { SubjectManagerLocal subjectManager = LookupUtil.getSubjectManager(); bundleManager = LookupUtil.getBundleManager(); overlord = subjectManager.getOverlord(); - TestBundleServerPluginService bundleServerPluginService = new TestBundleServerPluginService(getTempDir()); + TestBundleServerPluginService bundleServerPluginService = new TestBundleServerPluginService(getTempDir(), new TestBundlePluginComponent()); prepareCustomServerPluginService(bundleServerPluginService); bundleServerPluginService.startMasterPluginContainer(); } diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/AbstractEJB3Test.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/AbstractEJB3Test.java index af01136..d13fddb 100644 --- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/AbstractEJB3Test.java +++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/AbstractEJB3Test.java @@ -40,6 +40,7 @@ import java.util.regex.Pattern; import javax.ejb.EJB; import javax.management.MBeanServer; import javax.management.MalformedObjectNameException; +import javax.management.ObjectInstance; import javax.management.ObjectName; import javax.naming.Context; import javax.naming.InitialContext; @@ -330,6 +331,8 @@ public abstract class AbstractEJB3Test extends Arquillian { testClassesJar.addAsResource("test/metadata/resource-type/updateResourceTypeBundleTarget-v1.xml"); testClassesJar.addAsResource("test/metadata/resource-type/updateResourceTypeBundleTarget-v2.xml");
+ testClassesJar.addAsResource("org/rhq/enterprise/server/plugins/ant/recipe-no-manageRootDir.xml"); + // create test ear by starting with rhq.ear and thinning it String projectVersion = System.getProperty("project.version"); MavenResolverSystem earResolver = Resolvers.use(MavenResolverSystem.class); diff --git a/modules/enterprise/server/itests-2/src/test/resources/org/rhq/enterprise/server/plugins/ant/recipe-no-manageRootDir.xml b/modules/enterprise/server/itests-2/src/test/resources/org/rhq/enterprise/server/plugins/ant/recipe-no-manageRootDir.xml new file mode 100644 index 0000000..5fb564c --- /dev/null +++ b/modules/enterprise/server/itests-2/src/test/resources/org/rhq/enterprise/server/plugins/ant/recipe-no-manageRootDir.xml @@ -0,0 +1,34 @@ +<?xml version="1.0"?> +<!-- + ~ RHQ Management Platform + ~ Copyright (C) 2013 Red Hat, Inc. + ~ All rights reserved. + ~ + ~ This program is free software; you can redistribute it and/or modify + ~ it under the terms of the GNU General Public License as published by + ~ the Free Software Foundation version 2 of the License. + ~ + ~ This program is distributed in the hope that it will be useful, + ~ but WITHOUT ANY WARRANTY; without even the implied warranty of + ~ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + ~ GNU General Public License for more details. + ~ + ~ You should have received a copy of the GNU General Public License + ~ along with this program; if not, write to the Free Software + ~ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + --> + +<project name="simple-build" default="main" + xmlns:rhq="antlib:org.rhq.bundle"> + + <rhq:bundle name="testNoManageRootDirBundle" version="1.0"> + <rhq:deployment-unit name="simulated-war"> + <rhq:file name="zero.properties" destinationFile="zero.properties"/> + <rhq:file name="one.properties" destinationFile="subdir1/one.properties"/> + <rhq:file name="two.properties" destinationFile="subdir2/two.properties"/> + </rhq:deployment-unit> + </rhq:bundle> + + <target name="main"/> + +</project> diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/core/CoreServerMBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/core/CoreServerMBean.java index 6396f6e..fc112ea 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/core/CoreServerMBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/core/CoreServerMBean.java @@ -98,4 +98,4 @@ public interface CoreServerMBean { * @return product information - the product name, homepage URL, docs URL, etc. */ ProductInfo getProductInfo(); -} \ No newline at end of file +} diff --git a/modules/enterprise/server/plugins/ant-bundle/src/main/java/org/rhq/enterprise/server/plugins/ant/AntBundleServerPluginComponent.java b/modules/enterprise/server/plugins/ant-bundle/src/main/java/org/rhq/enterprise/server/plugins/ant/AntBundleServerPluginComponent.java index 82354b6..be72042 100644 --- a/modules/enterprise/server/plugins/ant-bundle/src/main/java/org/rhq/enterprise/server/plugins/ant/AntBundleServerPluginComponent.java +++ b/modules/enterprise/server/plugins/ant-bundle/src/main/java/org/rhq/enterprise/server/plugins/ant/AntBundleServerPluginComponent.java @@ -105,12 +105,13 @@ public class AntBundleServerPluginComponent implements ServerPluginComponent, Bu StreamUtil.copy(in, out);
// parse, but do not execute, the Ant script - AntLauncher antLauncher = new AntLauncher(); + AntLauncher antLauncher = new AntLauncher(true); BundleAntProject project = antLauncher.parseBundleDeployFile(recipeFile, null);
// obtain the parse results deploymentProps = new DeploymentProperties(0, project.getBundleName(), project.getBundleVersion(), project - .getBundleDescription()); + .getBundleDescription(), project.getDestinationCompliance()); + bundleFiles = project.getBundleFileNames(); configDef = project.getConfigurationDefinition(); } catch (Throwable t) { diff --git a/modules/helpers/perftest-support/pom.xml b/modules/helpers/perftest-support/pom.xml index 80bdc15..1ea35be 100644 --- a/modules/helpers/perftest-support/pom.xml +++ b/modules/helpers/perftest-support/pom.xml @@ -14,14 +14,14 @@ </description> <dependencies> <dependency> - <groupId>ant</groupId> + <groupId>org.apache.ant</groupId> <artifactId>ant</artifactId> - <version>1.6.5</version> + <version>1.8.0</version> </dependency> <dependency> - <groupId>ant</groupId> + <groupId>org.apache.ant</groupId> <artifactId>ant-launcher</artifactId> - <version>1.6.5</version> + <version>1.8.0</version> <scope>runtime</scope> </dependency> <dependency> diff --git a/modules/plugins/ant-bundle/src/main/java/org/rhq/plugins/ant/AntBundlePluginComponent.java b/modules/plugins/ant-bundle/src/main/java/org/rhq/plugins/ant/AntBundlePluginComponent.java index cd34fb0..c723ae2 100644 --- a/modules/plugins/ant-bundle/src/main/java/org/rhq/plugins/ant/AntBundlePluginComponent.java +++ b/modules/plugins/ant-bundle/src/main/java/org/rhq/plugins/ant/AntBundlePluginComponent.java @@ -64,6 +64,7 @@ import org.rhq.core.util.file.FileUtil; import org.rhq.core.util.stream.StreamUtil; import org.rhq.core.util.updater.DeployDifferences; import org.rhq.core.util.updater.DeploymentsMetadata; +import org.rhq.core.util.updater.DestinationComplianceMode; import org.rhq.core.util.updater.FileHashcodeMap;
/** @@ -190,7 +191,12 @@ public class AntBundlePluginComponent implements ResourceComponent, BundleFacet DeploymentsMetadata metadata = new DeploymentsMetadata(deployDir); if (metadata.isManaged()) { metadataDirectoryToPurge = metadata.getMetadataDirectory(); - manageAllDeployDir = metadata.getCurrentDeploymentProperties().getManageRootDir(); + + //as of RHQ 4.9.0, we only only support "full" and "filesAndDirectories" destination compliance modes + //which we used to describe by boolean "manageRootDir"... Let's not use the deprecated API's but not + // change the code too much... + manageAllDeployDir = metadata.getCurrentDeploymentProperties().getDestinationCompliance() == DestinationComplianceMode.full; + int totalExternalFiles = 0; ArrayList<String> externalDeleteSuccesses = new ArrayList<String>(0); ArrayList<String> externalDeleteFailures = new ArrayList<String>(0);
commit 1ad43fd38d868509ce6a2bc4d3919435fa136e73 Author: Mike Thompson mithomps@redhat.com Date: Thu Aug 8 15:07:07 2013 -0700
Replaced UserPreferencesMeasurementRangeEditor with ButtonBarDateTimeRangeEditor in Group --> Monitoring --> Metrics tab. Altered RedrawGraphs to a more general Refreshable so it can be used with tables and not just graphs.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/table/Table.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/table/Table.java index 4ce05d2..0036dfc 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/table/Table.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/table/Table.java @@ -143,8 +143,10 @@ public class Table<DS extends RPCDataSource> extends EnhancedHLayout implements private DoubleClickHandler doubleClickHandler; private List<TableActionInfo> tableActions = new ArrayList<TableActionInfo>(); private boolean tableActionDisableOverride = false; + protected List<Canvas> extraWidgetsAtTop = new ArrayList<Canvas>(); protected List<Canvas> extraWidgetsAboveFooter = new ArrayList<Canvas>(); protected List<Canvas> extraWidgetsInMainFooter = new ArrayList<Canvas>(); + private EnhancedToolStrip topExtraWidgets; private EnhancedToolStrip footer; private EnhancedToolStrip footerExtraWidgets; private EnhancedIButton refreshButton; @@ -415,6 +417,15 @@ public class Table<DS extends RPCDataSource> extends EnhancedHLayout implements contents.removeChild(child); }
+ // add a toolstrip at the top of screen for navigation, date range controls, etc... + this.topExtraWidgets = new EnhancedToolStrip(); + topExtraWidgets.setPadding(5); + topExtraWidgets.setWidth100(); + topExtraWidgets.setMembersMargin(15); + topExtraWidgets.hide(); + contents.addMember(topExtraWidgets); + + // Title this.titleCanvas = new HTMLFlow(); updateTitleCanvas(this.titleString); @@ -537,6 +548,16 @@ public class Table<DS extends RPCDataSource> extends EnhancedHLayout implements }
private void drawHeader() { + + // add toolstrip to the top of screen + topExtraWidgets.removeMembers(topExtraWidgets.getMembers()); + if (!extraWidgetsAtTop.isEmpty()) { + for (Canvas extraWidgetCanvas : extraWidgetsAtTop) { + topExtraWidgets.addMember(extraWidgetCanvas); + } + topExtraWidgets.show(); + } + // just use the first icon (not sure use case for multiple icons in title) titleBar = new TitleBar(titleString); if (titleIcon != null) { @@ -1038,6 +1059,14 @@ public class Table<DS extends RPCDataSource> extends EnhancedHLayout implements } }
+ /** + * Add any widgets to the top of the table for filtering, etc... + * @param widget + */ + public void addTopWidget(Canvas widget ) { + this.extraWidgetsAtTop.add(widget); + } + public void setHeaderIcon(String headerIcon) { if (this.headerIcons.size() > 0) { this.headerIcons.clear(); @@ -1080,7 +1109,6 @@ public class Table<DS extends RPCDataSource> extends EnhancedHLayout implements this.listGrid.setSelectionType(getDefaultSelectionStyle()); }
- //int selectionCount = this.listGrid.getSelectedRecords().length; for (TableActionInfo tableAction : this.tableActions) { if (tableAction.actionCanvas != null) { // if null, we haven't initialized our buttons yet, so skip this boolean enabled = (!this.tableActionDisableOverride && tableAction.action.isEnabled(this.listGrid diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/AbstractD3GraphListView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/AbstractD3GraphListView.java index 2ce63aa..a81f1ca 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/AbstractD3GraphListView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/AbstractD3GraphListView.java @@ -32,7 +32,7 @@ import org.rhq.enterprise.gui.coregui.client.components.measurement.AbstractMeas import org.rhq.enterprise.gui.coregui.client.dashboard.AutoRefreshUtil; import org.rhq.enterprise.gui.coregui.client.inventory.AutoRefresh; import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.ButtonBarDateTimeRangeEditor; -import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.RedrawGraphs; +import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.Refreshable; import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.avail.AvailabilityD3GraphView; import org.rhq.enterprise.gui.coregui.client.util.async.CountDownLatch; import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedVLayout; @@ -42,7 +42,7 @@ import org.rhq.enterprise.gui.coregui.client.util.preferences.MeasurementUserPre * Provide the shared stuff for create GraphListViews like Availability graphs * and User Preferences pickers for the date range. */ -public abstract class AbstractD3GraphListView extends EnhancedVLayout implements AutoRefresh,RedrawGraphs { +public abstract class AbstractD3GraphListView extends EnhancedVLayout implements AutoRefresh,Refreshable { protected final static int SINGLE_CHART_HEIGHT = 225; protected final static int MULTI_CHART_HEIGHT = 210; protected static final Label loadingLabel = new Label(MSG.common_msg_loading()); @@ -61,7 +61,7 @@ public abstract class AbstractD3GraphListView extends EnhancedVLayout implements startRefreshCycle(); }
- public abstract void redrawGraphs(); + public abstract void refreshData();
protected abstract void queryAvailability(final EntityContext context, Long startTime, Long endTime, final CountDownLatch countDownLatch); @@ -95,7 +95,7 @@ public abstract class AbstractD3GraphListView extends EnhancedVLayout implements buttonBarDateTimeRangeEditor.updateDateTimeRangeDisplay(newStartDate, now); buttonBarDateTimeRangeEditor.saveDateRange(newStartDate.getTime(), now.getTime());
- redrawGraphs(); + refreshData(); } } } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/AbstractMetricGraph.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/AbstractMetricGraph.java index ace6154..7855545 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/AbstractMetricGraph.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/AbstractMetricGraph.java @@ -235,6 +235,6 @@ public abstract class AbstractMetricGraph extends VLayout implements HasD3Metric }
public void redrawGraphs(){ - graphListView.redrawGraphs(); + graphListView.refreshData(); } } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/ButtonBarDateTimeRangeEditor.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/ButtonBarDateTimeRangeEditor.java index b4f1354..e6578ed 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/ButtonBarDateTimeRangeEditor.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/ButtonBarDateTimeRangeEditor.java @@ -59,7 +59,7 @@ public class ButtonBarDateTimeRangeEditor extends EnhancedVLayout { static final int BUTTON_WIDTH = 28;
private MeasurementUserPreferences measurementUserPreferences; - private RedrawGraphs d3GraphListView; + private Refreshable d3GraphListView; private static final Messages MSG = CoreGUI.getMessages(); private Label dateRangeLabel; private static final DateTimeFormat fmt = DateTimeFormat.getFormat(MSG.common_buttonbar_datetime_format()); @@ -68,7 +68,7 @@ public class ButtonBarDateTimeRangeEditor extends EnhancedVLayout { final private ButtonBarDateTimeRangeEditor self;
public ButtonBarDateTimeRangeEditor(MeasurementUserPreferences measurementUserPrefs, - RedrawGraphs d3GraphListView) { + Refreshable d3GraphListView) { this.self = this; this.measurementUserPreferences = measurementUserPrefs; this.d3GraphListView = d3GraphListView; @@ -128,7 +128,7 @@ public class ButtonBarDateTimeRangeEditor extends EnhancedVLayout { }
public void redrawGraphs() { - d3GraphListView.redrawGraphs(); + d3GraphListView.refreshData(); }
@Override diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/RedrawGraphs.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/RedrawGraphs.java deleted file mode 100644 index e590281..0000000 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/RedrawGraphs.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * RHQ Management Platform - * Copyright (C) 2005-2013 Red Hat, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ -package org.rhq.enterprise.gui.coregui.client.inventory.common.graph; - -/** - * Define the capability to Redraw a Graph. - * @author Mike Thompson - */ -public interface RedrawGraphs { - - void redrawGraphs(); - -} diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/Refreshable.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/Refreshable.java new file mode 100644 index 0000000..e170b44 --- /dev/null +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/Refreshable.java @@ -0,0 +1,30 @@ +/* + * RHQ Management Platform + * Copyright (C) 2005-2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +package org.rhq.enterprise.gui.coregui.client.inventory.common.graph; + +/** + * Define the capability to Refresh data in a grid or Redraw a Graph. + * + * @author Mike Thompson + */ +public interface Refreshable { + + void refreshData(); + +} diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/D3GroupGraphListView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/D3GroupGraphListView.java index 16670aa..bcb223a 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/D3GroupGraphListView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/D3GroupGraphListView.java @@ -84,7 +84,7 @@ public final class D3GroupGraphListView extends AbstractD3GraphListView implemen addMember(graphsVLayout); }
- public void redrawGraphs() { + public void refreshData() { this.onDraw(); }
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3GraphListView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3GraphListView.java index 204d2ec..274354a 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3GraphListView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3GraphListView.java @@ -45,7 +45,7 @@ import org.rhq.enterprise.gui.coregui.client.UserSessionManager; import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; import org.rhq.enterprise.gui.coregui.client.gwt.ResourceGroupGWTServiceAsync; import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.ButtonBarDateTimeRangeEditor; -import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.RedrawGraphs; +import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.Refreshable; import org.rhq.enterprise.gui.coregui.client.inventory.resource.type.ResourceTypeRepository; import org.rhq.enterprise.gui.coregui.client.util.Log; import org.rhq.enterprise.gui.coregui.client.util.MeasurementConverterClient; @@ -62,7 +62,7 @@ import org.rhq.enterprise.gui.coregui.client.util.preferences.MeasurementUserPre * * @author Mike Thompson */ -public abstract class CompositeGroupD3GraphListView extends EnhancedVLayout implements JsonMetricProducer, RedrawGraphs { +public abstract class CompositeGroupD3GraphListView extends EnhancedVLayout implements JsonMetricProducer, Refreshable {
static protected final Messages MSG = CoreGUI.getMessages(); // string labels @@ -250,7 +250,7 @@ public abstract class CompositeGroupD3GraphListView extends EnhancedVLayout impl }
@Override - public void redrawGraphs() { + public void refreshData() { populateData(); drawGraph(); } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/GroupMeasurementTableView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/GroupMeasurementTableView.java index efb7b72..50a8819 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/GroupMeasurementTableView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/GroupMeasurementTableView.java @@ -20,27 +20,40 @@ package org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table;
import java.util.ArrayList; +import java.util.Date;
+import com.google.gwt.user.client.Timer; import com.smartgwt.client.data.Record; import com.smartgwt.client.widgets.grid.ListGridField; import com.smartgwt.client.widgets.grid.events.CellClickEvent; import com.smartgwt.client.widgets.grid.events.CellClickHandler;
import org.rhq.core.domain.resource.group.composite.ResourceGroupComposite; +import org.rhq.enterprise.gui.coregui.client.UserSessionManager; +import org.rhq.enterprise.gui.coregui.client.components.measurement.AbstractMeasurementRangeEditor; import org.rhq.enterprise.gui.coregui.client.components.measurement.UserPreferencesMeasurementRangeEditor; import org.rhq.enterprise.gui.coregui.client.components.table.Table; +import org.rhq.enterprise.gui.coregui.client.dashboard.AutoRefreshUtil; +import org.rhq.enterprise.gui.coregui.client.inventory.AutoRefresh; import org.rhq.enterprise.gui.coregui.client.inventory.common.detail.summary.AbstractActivityView.ChartViewWindow; +import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.ButtonBarDateTimeRangeEditor; +import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.Refreshable; +import org.rhq.enterprise.gui.coregui.client.util.preferences.MeasurementUserPreferences;
/** * Views a resource's measurements in a tabular view. * * @author John Mazzitelli * @author Simeon Pinder + * @author Mike Thompson */ -public class GroupMeasurementTableView extends Table<GroupMetricsTableDataSource> { +public class GroupMeasurementTableView extends Table<GroupMetricsTableDataSource> implements AutoRefresh, Refreshable {
private final int groupId; private final boolean isAutogroup; + protected final MeasurementUserPreferences measurementUserPrefs; + protected final ButtonBarDateTimeRangeEditor buttonBarDateTimeRangeEditor; + protected Timer refreshTimer;
public GroupMeasurementTableView(ResourceGroupComposite groupComposite, int groupId) { super(); @@ -50,10 +63,52 @@ public class GroupMeasurementTableView extends Table<GroupMetricsTableDataSource //disable fields used when is full screen setShowFooterRefresh(true); setTitle(MSG.common_title_numeric_metrics()); + + measurementUserPrefs = new MeasurementUserPreferences(UserSessionManager.getUserPreferences()); + buttonBarDateTimeRangeEditor = new ButtonBarDateTimeRangeEditor(measurementUserPrefs,this); + } + + @Override + public void refreshData() { + + } + + @Override + public void startRefreshCycle() { + refreshTimer = AutoRefreshUtil.startRefreshCycle(this, this, refreshTimer); + } + + @Override + protected void onDestroy() { + AutoRefreshUtil.onDestroy( refreshTimer); + + super.onDestroy(); + } + + @Override + public boolean isRefreshing() { + return false; + } + + //Custom refresh operation as we are not directly extending Table + @Override + public void refresh() { + if (isVisible() && !isRefreshing()) { + Date now = new Date(); + AbstractMeasurementRangeEditor.MetricRangePreferences metricRangePreferences = measurementUserPrefs.getMetricRangePreferences(); + long timeRange = metricRangePreferences.end - metricRangePreferences.begin; + Date newStartDate = new Date(now.getTime() - timeRange); + buttonBarDateTimeRangeEditor.updateDateTimeRangeDisplay(newStartDate, now); + buttonBarDateTimeRangeEditor.saveDateRange(newStartDate.getTime(), now.getTime()); + + refreshData(); + } }
@Override protected void configureTable() { + addTopWidget(buttonBarDateTimeRangeEditor); + ArrayList<ListGridField> fields = getDataSource().getListGridFields();
//add cell click handler to execute on Table data entries. @@ -72,7 +127,6 @@ public class GroupMeasurementTableView extends Table<GroupMetricsTableDataSource } }); setListGridFields(fields.toArray(new ListGridField[getDataSource().getListGridFields().size()])); - addExtraWidget(new UserPreferencesMeasurementRangeEditor(), true); }
} diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/D3GraphListView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/D3GraphListView.java index 5635d88..6eab1e9 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/D3GraphListView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/D3GraphListView.java @@ -134,7 +134,7 @@ public class D3GraphListView extends AbstractD3GraphListView { addMember(vLayout); }
- public void redrawGraphs() { + public void refreshData() { this.onDraw(); }
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/MetricD3Graph.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/MetricD3Graph.java index 65807f9..4dad9b1 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/MetricD3Graph.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/MetricD3Graph.java @@ -22,7 +22,7 @@ import com.google.gwt.user.client.Timer; import com.smartgwt.client.widgets.HTMLFlow;
import org.rhq.enterprise.gui.coregui.client.inventory.common.AbstractD3GraphListView; -import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.RedrawGraphs; +import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.Refreshable; import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.StackedBarMetricGraphImpl; import org.rhq.enterprise.gui.coregui.client.util.Log; import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedVLayout; @@ -31,7 +31,7 @@ import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedVLayout; * A D3 graph implementation for graphing Resource metrics. * Just the graph only. No avail graph no buttons just he graph. */ -public class MetricD3Graph<T extends AbstractD3GraphListView> extends EnhancedVLayout implements RedrawGraphs{ +public class MetricD3Graph<T extends AbstractD3GraphListView> extends EnhancedVLayout implements Refreshable {
protected StackedBarMetricGraphImpl graph; private HTMLFlow graphDiv = null; @@ -205,8 +205,8 @@ public class MetricD3Graph<T extends AbstractD3GraphListView> extends EnhancedVL /** * Allow the graph to refresh the whole d3GraphListView. */ - public void redrawGraphs(){ - d3GraphListView.redrawGraphs(); + public void refreshData(){ + d3GraphListView.refreshData(); }
} diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsResourceView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsResourceView.java index bcea825..d856410 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsResourceView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsResourceView.java @@ -68,7 +68,7 @@ public class MetricsResourceView extends AbstractD3GraphListView { }
- public void redrawGraphs() { + public void refreshData() { this.onDraw(); }
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableView.java index bb67587..db9abd4 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableView.java @@ -64,7 +64,7 @@ import org.rhq.enterprise.gui.coregui.client.components.table.TableAction; import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; import org.rhq.enterprise.gui.coregui.client.inventory.common.AbstractD3GraphListView; import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.MetricGraphData; -import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.RedrawGraphs; +import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.Refreshable; import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.StackedBarMetricGraphImpl; import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.MetricD3Graph; import org.rhq.enterprise.gui.coregui.client.util.BrowserUtility; @@ -78,7 +78,7 @@ import org.rhq.enterprise.gui.coregui.client.util.preferences.MeasurementUserPre * @author John Mazzitelli * @author Mike Thompson */ -public class MetricsTableView extends Table<MetricsViewDataSource> implements RedrawGraphs { +public class MetricsTableView extends Table<MetricsViewDataSource> implements Refreshable {
private final Resource resource; private final AbstractD3GraphListView abstractD3GraphListView; @@ -248,7 +248,7 @@ public class MetricsTableView extends Table<MetricsViewDataSource> implements Re /** * Redraw Graphs in this context means to refresh the table and redraw open graphs. */ - public void redrawGraphs() { + public void refreshData() { Log.debug("MetricsView.redrawGraphs.");
new Timer() { @@ -281,7 +281,7 @@ public class MetricsTableView extends Table<MetricsViewDataSource> implements Re public void onRecordExpand(RecordExpandEvent recordExpandEvent) { metricsTableView.expandedRows.add(recordExpandEvent.getRecord().getAttributeAsInt( MetricsViewDataSource.FIELD_METRIC_DEF_ID)); - redrawGraphs(); + refreshData(); }
}); @@ -290,13 +290,13 @@ public class MetricsTableView extends Table<MetricsViewDataSource> implements Re public void onRecordCollapse(RecordCollapseEvent recordCollapseEvent) { metricsTableView.expandedRows.remove(recordCollapseEvent.getRecord().getAttributeAsInt( MetricsViewDataSource.FIELD_METRIC_DEF_ID)); - redrawGraphs(); + refreshData(); } }); addSortChangedHandler(new SortChangedHandler() { @Override public void onSortChanged(SortEvent sortEvent) { - redrawGraphs(); + refreshData(); } }); addDataArrivedHandler(new DataArrivedHandler() {
commit 1ccc454cc2716228818b44304e1a85152002ea25 Author: Heiko W. Rupp hwr@redhat.com Date: Thu Aug 8 22:13:03 2013 +0200
BZ 994537 correctly compute the 'lastPage' in paging.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/AbstractRestBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/AbstractRestBean.java index 62a85ca..7f0eea3 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/AbstractRestBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/AbstractRestBean.java @@ -379,7 +379,7 @@ public class AbstractRestBean {
// A link to the last page if (!pc.isUnlimited()) { - int lastPage = resultList.getTotalSize() / pc.getPageSize(); + int lastPage = (resultList.getTotalSize() / pc.getPageSize() ) -1; uriBuilder = uriInfo.getRequestUriBuilder(); // adds ?q, ?ps and ?category if needed uriBuilder.replaceQueryParam("page",lastPage); builder.header("Link", new Link("last",uriBuilder.build().toString()).rfc5988String()); @@ -409,7 +409,8 @@ public class AbstractRestBean { pColl.setPageSize(pageControl.getPageSize()); int page = pageControl.getPageNumber(); pColl.setCurrentPage(page); - pColl.setLastPage(originalList.getTotalSize()/pageControl.getPageSize()); + int lastPage = (originalList.getTotalSize() / pageControl.getPageSize()) -1 ; // -1 as page # is 0 based + pColl.setLastPage(lastPage);
UriBuilder uriBuilder; if (originalList.getTotalSize() > (page +1 ) * pageControl.getPageSize()) { @@ -427,7 +428,6 @@ public class AbstractRestBean {
// A link to the last page if (!pageControl.isUnlimited()) { - int lastPage = originalList.getTotalSize() / pageControl.getPageSize(); uriBuilder = uriInfo.getRequestUriBuilder(); // adds ?q, ?ps and ?category if needed uriBuilder.replaceQueryParam("page",lastPage); pColl.addLink( new Link("last",uriBuilder.build().toString())); diff --git a/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/ResourcesTest.java b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/ResourcesTest.java index bb7e2c2..a5c888f 100644 --- a/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/ResourcesTest.java +++ b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/ResourcesTest.java @@ -30,11 +30,14 @@ import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.regex.Matcher; +import java.util.regex.Pattern;
import com.jayway.restassured.http.ContentType; import com.jayway.restassured.path.json.JsonPath; import com.jayway.restassured.path.xml.XmlPath; import com.jayway.restassured.path.xml.element.Node; +import com.jayway.restassured.response.Headers; import com.jayway.restassured.response.Response;
import org.apache.http.HttpStatus; @@ -328,6 +331,119 @@ public class ResourcesTest extends AbstractBase { }
@Test + public void testPagingWrappingCorrectness() throws Exception { + + // First get the lastPage from the paging side + + JsonPath path = + given() + .header("Accept", "application/vnd.rhq.wrapped+json") + .with() + .queryParam("page", 0) + .queryParam("ps", 5) // Unusually small to provoke having more than 1 page + .queryParam("status","COMMITTED") + .expect() + .statusCode(200) + .log().ifError() + .when() + .get("/resource") + .jsonPath(); + + int pagingLastPage = path.getInt("lastPage"); + int pagingTotalSize = path.getInt("totalSize"); + + // Now get resource counts from status + + JsonPath statusPath = + given() + .header(acceptJson) + .expect() + .statusCode(200) + .log().ifError() + .when() + .get("/status") + .jsonPath(); + + int platforms = statusPath.getInt("values.PlatformCount"); + int servers = statusPath.getInt("values.ServerCount"); + int services = statusPath.getInt("values.ServiceCount"); + + int resources = platforms + servers + services; + + assert resources == pagingTotalSize; + + int statusLastPage = (resources/5)-1; // Page numbers start at 0 + + assert statusLastPage == pagingLastPage : statusLastPage + " != " + pagingLastPage; + } + + @Test + public void testPagingHeaderCorrectness() throws Exception { + + // First get the lastPage from the paging headers + + Response response = + given() + .header(acceptJson) + .with() + .queryParam("page", 0) + .queryParam("ps", 5) // Unusually small to provoke having more than 1 page + .queryParam("status", "COMMITTED") + .expect() + .statusCode(200) + .log().everything() + .when() + .get("/resource"); + + String tmp = response.getHeader("X-collection-size"); + int pagingTotalSize = Integer.parseInt(tmp); + + Headers responseHeaders = response.getHeaders(); + List<String> headers = responseHeaders.getValues("Link"); + tmp = null; + for (String header : headers) { + if (header.contains("rel="last"")) { + tmp = header; + break; + } + } + assert tmp != null : "Found no Link header for rel=last"; + + Matcher m = Pattern.compile(".*page=([0-9]+).*").matcher(tmp); + assert m.matches(); + + tmp = m.group(1); + + System.out.println(tmp); + System.out.flush(); + int pagingLastPage = Integer.parseInt(tmp); + + // Now get resource counts from status + + JsonPath statusPath = + given() + .header(acceptJson) + .expect() + .statusCode(200) + .log().ifError() + .when() + .get("/status") + .jsonPath(); + + int platforms = statusPath.getInt("values.PlatformCount"); + int servers = statusPath.getInt("values.ServerCount"); + int services = statusPath.getInt("values.ServiceCount"); + + int resources = platforms + servers + services; + + assert resources == pagingTotalSize; + + int statusLastPage = (resources/5)-1; // Page numbers start at 0 + + assert statusLastPage == pagingLastPage : statusLastPage + " != " + pagingLastPage; + } + + @Test public void testGetResourcesWithPagingAndWrapping() throws Exception {
given()
commit f5f25f9a139868b9f7453a9aeef04560086a0470 Author: Jirka Kremser jkremser@redhat.com Date: Thu Aug 8 21:04:29 2013 +0200
New component for editing the storage node properties. Adding the retrieveConfiguration() to GWT service impl class.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeConfigurationEditor.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeConfigurationEditor.java new file mode 100644 index 0000000..15bb412 --- /dev/null +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeConfigurationEditor.java @@ -0,0 +1,205 @@ +/* + * RHQ Management Platform + * Copyright (C) 2005-2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +package org.rhq.enterprise.gui.coregui.client.admin.storage; + +import java.util.ArrayList; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Set; + +import com.smartgwt.client.types.Alignment; +import com.smartgwt.client.types.Overflow; +import com.smartgwt.client.widgets.events.ClickEvent; +import com.smartgwt.client.widgets.events.ClickHandler; +import com.smartgwt.client.widgets.form.fields.FormItem; +import com.smartgwt.client.widgets.form.fields.StaticTextItem; +import com.smartgwt.client.widgets.form.fields.TextItem; +import com.smartgwt.client.widgets.layout.LayoutSpacer; + +import org.rhq.core.domain.cloud.StorageNodeConfigurationComposite; +import org.rhq.core.domain.measurement.MeasurementUnits; +import org.rhq.enterprise.gui.coregui.client.RefreshableView; +import org.rhq.enterprise.gui.coregui.client.components.configuration.PropertyValueChangeEvent; +import org.rhq.enterprise.gui.coregui.client.components.configuration.PropertyValueChangeListener; +import org.rhq.enterprise.gui.coregui.client.components.form.EnhancedDynamicForm; +import org.rhq.enterprise.gui.coregui.client.components.form.ValueWithUnitsItem; +import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedIButton; +import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedToolStrip; +import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedVLayout; + +/** + * The component for editing the storage node configuration + * + * @author Jirka Kremser + */ +public class StorageNodeConfigurationEditor extends EnhancedVLayout implements PropertyValueChangeListener, + RefreshableView { + + private EnhancedDynamicForm form; + private EnhancedToolStrip toolStrip; + private boolean oddRow; + private final StorageNodeConfigurationComposite configuration; + + public StorageNodeConfigurationEditor(final StorageNodeConfigurationComposite configuration) { + super(); + this.configuration = configuration; + + } + + private void save() { + + } + + private List<FormItem> buildOneFormRow(String name, String title, String value, String description, + boolean unitsDropdown) { + List<FormItem> fields = new ArrayList<FormItem>(); + StaticTextItem nameItem = new StaticTextItem(); + nameItem.setStartRow(true); + nameItem.setValue("<b>" + title + "</b>"); + nameItem.setShowTitle(false); + nameItem.setCellStyle(oddRow ? "OddRow" : "EvenRow"); + fields.add(nameItem); + + FormItem valueItem = null; + if (unitsDropdown) { + valueItem = buildJMXMemoryItem(name, value); + } else { + valueItem = new TextItem(); + valueItem.setName(name); + valueItem.setValue(value); + valueItem.setWidth(220); + } + valueItem.setAlign(Alignment.CENTER); + valueItem.setShowTitle(false); + valueItem.setRequired(true); + valueItem.setCellStyle(oddRow ? "OddRow" : "EvenRow"); + fields.add(valueItem); + + StaticTextItem descriptionItem = new StaticTextItem(); + descriptionItem.setValue(description); + descriptionItem.setShowTitle(false); + descriptionItem.setEndRow(true); + descriptionItem.setCellStyle(oddRow ? "OddRow" : "EvenRow"); + fields.add(descriptionItem); + + oddRow = !oddRow; + return fields; + } + + private FormItem buildJMXMemoryItem(String name, String value) { + Set<MeasurementUnits> supportedUnits = new LinkedHashSet<MeasurementUnits>(); + supportedUnits.add(MeasurementUnits.MEGABYTES); + supportedUnits.add(MeasurementUnits.GIGABYTES); + + ValueWithUnitsItem valueItem = new ValueWithUnitsItem(name, null, supportedUnits); + if (value != null && !value.isEmpty()) { + boolean megs = value.trim().substring(value.trim().length() - 1).equalsIgnoreCase("m"); + MeasurementUnits units = megs ? MeasurementUnits.MEGABYTES : MeasurementUnits.GIGABYTES; + try { + int intVal = Integer.parseInt(value.substring(0, value.toLowerCase().indexOf(megs ? "m" : "g"))); + valueItem.setValue(intVal, units); + } catch (StringIndexOutOfBoundsException e) { + //nothing + } + } + return valueItem; + } + + private List<FormItem> buildHeaderItems() { + List<FormItem> fields = new ArrayList<FormItem>(); + fields.add(createHeaderTextItem(MSG.view_configEdit_property())); + fields.add(createHeaderTextItem(MSG.common_title_value())); + fields.add(createHeaderTextItem(MSG.common_title_description())); + return fields; + } + + private StaticTextItem createHeaderTextItem(String value) { + StaticTextItem unsetHeader = new StaticTextItem(); + unsetHeader.setValue(value); + unsetHeader.setShowTitle(false); + unsetHeader.setCellStyle("configurationEditorHeaderCell"); + return unsetHeader; + } + + @Override + protected void onDraw() { + super.onDraw(); + refresh(); + } + + @Override + public void refresh() { + form = new EnhancedDynamicForm(); + form.setHiliteRequiredFields(true); + form.setNumCols(3); + form.setCellPadding(5); + form.setColWidths(190, 220, "*"); + form.setIsGroup(true); + form.setGroupTitle("Storage Node Specific Settings"); + form.setBorder("1px solid #AAA"); + oddRow = true; + + List<FormItem> items = buildHeaderItems(); + items + .addAll(buildOneFormRow( + "foo2", + "Max Heap Size", + configuration.getHeapSize(), + "The maximum heap size. This value will be used with the -Xmx JVM option. The value should be an integer with a suffix of M or G to indicate megabytes or gigabytes.", + true)); + items + .addAll(buildOneFormRow( + "foo", + "Heap New Size", + configuration.getHeapNewSize(), + "The size of the new generation portion of the heap. This value will be used with the -Xmn JVM option. The value should be an integer with a suffix of M or G to indicate megabytes or gigabytes.", + true)); + + items.addAll(buildOneFormRow("foo3", "Thread Stack Size", configuration.getThreadStackSize(), + "asdfsdfffa df sdbla", false)); + items.addAll(buildOneFormRow("foo4", "JMX Port", String.valueOf(configuration.getJmxPort()), + "sdfla ffa blsdfa", false)); + form.setFields(items.toArray(new FormItem[items.size()])); + form.validate(); + + EnhancedIButton saveButton = new EnhancedIButton(MSG.common_button_save()); + saveButton.addClickHandler(new ClickHandler() { + public void onClick(ClickEvent clickEvent) { + save(); + } + }); + toolStrip = new EnhancedToolStrip(); + toolStrip.setWidth100(); + toolStrip.setMembersMargin(5); + toolStrip.setLayoutMargin(5); + toolStrip.addMember(saveButton); + form.setWidth100(); + form.setOverflow(Overflow.VISIBLE); + setWidth100(); + LayoutSpacer spacer = new LayoutSpacer(); + spacer.setWidth100(); + setMembers(form, spacer, toolStrip); + markForRedraw(); + } + + @Override + public void propertyValueChanged(PropertyValueChangeEvent event) { + + } +} diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDetailView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDetailView.java index 7d0b3a9..e395b2b 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDetailView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDetailView.java @@ -29,38 +29,32 @@ import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDat
import java.util.ArrayList; import java.util.Arrays; -import java.util.EnumSet; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Set; +import java.util.Map.Entry;
import com.google.gwt.user.client.Timer; import com.google.gwt.user.client.rpc.AsyncCallback; -import com.smartgwt.client.types.Alignment; -import com.smartgwt.client.types.ContentsType; import com.smartgwt.client.types.Overflow; -import com.smartgwt.client.types.VerticalAlignment; import com.smartgwt.client.types.VisibilityMode; import com.smartgwt.client.widgets.HTMLFlow; import com.smartgwt.client.widgets.form.DynamicForm; -import com.smartgwt.client.widgets.form.fields.CanvasItem; import com.smartgwt.client.widgets.form.fields.FormItem; -import com.smartgwt.client.widgets.form.fields.LinkItem; import com.smartgwt.client.widgets.form.fields.StaticTextItem; -import com.smartgwt.client.widgets.form.fields.events.ClickEvent; -import com.smartgwt.client.widgets.form.fields.events.ClickHandler; import com.smartgwt.client.widgets.layout.LayoutSpacer; import com.smartgwt.client.widgets.layout.SectionStack; import com.smartgwt.client.widgets.layout.SectionStackSection;
import org.rhq.core.domain.cloud.StorageNode; +import org.rhq.core.domain.cloud.StorageNodeConfigurationComposite; +import org.rhq.core.domain.configuration.definition.ConfigurationDefinition; +import org.rhq.core.domain.configuration.definition.PropertyDefinition; +import org.rhq.core.domain.configuration.definition.PropertyGroupDefinition; import org.rhq.core.domain.criteria.ResourceCriteria; import org.rhq.core.domain.criteria.StorageNodeCriteria; -import org.rhq.core.domain.measurement.MeasurementDefinition; import org.rhq.core.domain.measurement.composite.MeasurementDataNumericHighLowComposite; import org.rhq.core.domain.resource.Resource; -import org.rhq.core.domain.resource.ResourceType; import org.rhq.core.domain.resource.composite.ResourceComposite; import org.rhq.core.domain.util.PageList; import org.rhq.enterprise.gui.coregui.client.BookmarkableView; @@ -70,13 +64,11 @@ import org.rhq.enterprise.gui.coregui.client.ViewPath; import org.rhq.enterprise.gui.coregui.client.components.table.TimestampCellFormatter; import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; import org.rhq.enterprise.gui.coregui.client.inventory.InventoryView; -import org.rhq.enterprise.gui.coregui.client.inventory.common.detail.summary.AbstractActivityView; import org.rhq.enterprise.gui.coregui.client.inventory.common.detail.summary.AbstractActivityView.ChartViewWindow; +import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.configuration.ConfigurationFilter; import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.configuration.ResourceConfigurationEditView; import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.D3GraphListView; import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.operation.history.ResourceOperationHistoryListView; -import org.rhq.enterprise.gui.coregui.client.inventory.resource.type.ResourceTypeRepository; -import org.rhq.enterprise.gui.coregui.client.util.BrowserUtility; import org.rhq.enterprise.gui.coregui.client.util.Log; import org.rhq.enterprise.gui.coregui.client.util.MeasurementUtility; import org.rhq.enterprise.gui.coregui.client.util.StringUtility; @@ -150,13 +142,14 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab + node.getAddress() + ")</div>"); Resource res = node.getResource(); if (res != null) { - fetchResourceComposite(res.getId()); +// fetchResourceComposite(res.getId()); } else { // skip this if the resource id is not there initSectionCount++; } - prepareDetailsSection(sectionStack, node); - fetchSparkLineDataForLoadComponent(sectionStack, node); + fetchStorageNodeConfigurationComposite(node); + prepareDetailsSection(node); + fetchSparkLineDataForLoadComponent(node);
}
@@ -170,33 +163,51 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab fetchUnackAlerts(storageNodeId); }
- private void fetchResourceComposite(final int resourceId) { - ResourceCriteria resourceCriteria = new ResourceCriteria(); - resourceCriteria.addFilterId(resourceId); - GWTServiceLookup.getResourceService().findResourceCompositesByCriteria(resourceCriteria, - new AsyncCallback<PageList<ResourceComposite>>() { + + private void fetchStorageNodeConfigurationComposite(final StorageNode node) { + GWTServiceLookup.getStorageService().retrieveConfiguration(node, + new AsyncCallback<StorageNodeConfigurationComposite>() { @Override public void onFailure(Throwable caught) { - Message message = new Message(MSG.view_inventory_resource_loadFailed(String.valueOf(resourceId)), + Message message = new Message(MSG.view_configurationHistoryDetails_error_loadFailure(), Message.Severity.Warning); - CoreGUI.goToView(InventoryView.VIEW_ID.getName(), message); initSectionCount = SECTION_COUNT; }
@Override - public void onSuccess(PageList<ResourceComposite> result) { - if (result.isEmpty()) { - onFailure(new Exception("Resource with id [" + resourceId + "] does not exist.")); - } else { - final ResourceComposite resourceComposite = result.get(0); -// prepareOperationHistory(resourceComposite); - prepareResourceConfigEditor(resourceComposite); - } + public void onSuccess(StorageNodeConfigurationComposite result) { + prepareResourceConfigEditor(result); } }); }
- private void fetchSparkLineDataForLoadComponent(final SectionStack stack, final StorageNode storageNode) { +// private void fetchResourceComposite(final int resourceId) { +// ResourceCriteria resourceCriteria = new ResourceCriteria(); +// resourceCriteria.addFilterId(resourceId); +// GWTServiceLookup.getResourceService().findResourceCompositesByCriteria(resourceCriteria, +// new AsyncCallback<PageList<ResourceComposite>>() { +// @Override +// public void onFailure(Throwable caught) { +// Message message = new Message(MSG.view_inventory_resource_loadFailed(String.valueOf(resourceId)), +// Message.Severity.Warning); +// CoreGUI.goToView(InventoryView.VIEW_ID.getName(), message); +// initSectionCount = SECTION_COUNT; +// } +// +// @Override +// public void onSuccess(PageList<ResourceComposite> result) { +// if (result.isEmpty()) { +// onFailure(new Exception("Resource with id [" + resourceId + "] does not exist.")); +// } else { +// final ResourceComposite resourceComposite = result.get(0); +//// prepareOperationHistory(resourceComposite); +// prepareResourceConfigEditor(resourceComposite); +// } +// } +// }); +// } + + private void fetchSparkLineDataForLoadComponent(final StorageNode storageNode) {
GWTServiceLookup.getStorageService().findStorageNodeLoadDataForLast(storageNode, 8, MeasurementUtility.UNIT_HOURS, 60, new AsyncCallback<Map<String, List<MeasurementDataNumericHighLowComposite>>>() { @@ -287,7 +298,7 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab }.run(); // fire the timer immediately }
- private void prepareDetailsSection(SectionStack stack, final StorageNode storageNode) { + private void prepareDetailsSection(final StorageNode storageNode) { final DynamicForm form = new DynamicForm(); form.setMargin(10); form.setWidth100(); @@ -388,8 +399,40 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab initSectionCount++; }
- private void prepareResourceConfigEditor(ResourceComposite resourceComposite) { - ResourceConfigurationEditView editorView = new ResourceConfigurationEditView(resourceComposite); +// private void prepareResourceConfigEditor(ResourceComposite resourceComposite) { + private void prepareResourceConfigEditor(final StorageNodeConfigurationComposite configuration) { + + StorageNodeConfigurationEditor editorView = new StorageNodeConfigurationEditor(configuration); + +// ResourceConfigurationEditView editorView = new ResourceConfigurationEditView(resourceComposite); +// ConfigurationFilter filter = new ConfigurationFilter() { +// @Override +// public ConfigurationDefinition filter(ConfigurationDefinition definition) { +// Map<String, PropertyDefinition> filteredConfigurationDefinition = new HashMap<String, PropertyDefinition>(); +// PropertyGroupDefinition groupDef = null; +// for (Entry<String, PropertyDefinition> propertyDefinitionEntry : definition.getPropertyDefinitions().entrySet()) { +// PropertyDefinition propertyDefinition = propertyDefinitionEntry.getValue(); +// if (propertyDefinition.getPropertyGroupDefinition() != null) { +// if (groupDef == null) { +// groupDef = propertyDefinition.getPropertyGroupDefinition(); +//// groupDef.setName("Storage Node Settings"); +// } +// propertyDefinition.setPropertyGroupDefinition(groupDef); +// } +// if (!"heapDumpOnOOMError".equals(propertyDefinition.getName()) +// && !"heapDumpDir".equals(propertyDefinition.getName()) +// && !"minHeapSize".equals(propertyDefinition.getName()) +// && !"gossipPort".equals(propertyDefinition.getName()) +// && !"cqlPort".equals(propertyDefinition.getName())) { +// filteredConfigurationDefinition.put(propertyDefinitionEntry.getKey(), +// propertyDefinitionEntry.getValue()); +// } +// } +// definition.setPropertyDefinitions(filteredConfigurationDefinition); +// return definition; +// } +// }; +// editorView.setFilter(filter); SectionStackSection section = new SectionStackSection("Configuration"); section.setItems(editorView); section.setExpanded(true); diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeLoadComponent.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeLoadComponent.java index 7cfb278..d1b94a1 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeLoadComponent.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeLoadComponent.java @@ -21,29 +21,16 @@ package org.rhq.enterprise.gui.coregui.client.admin.storage; import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasource.DONT_MISS_ME_COLOR; import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasource.OK_COLOR; import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasource.WARN_COLOR; -import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_ALERTS;
-import java.util.ArrayList; -import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Map.Entry;
import com.google.gwt.user.client.Timer; -import com.google.gwt.user.client.rpc.AsyncCallback; -import com.smartgwt.client.types.Alignment; import com.smartgwt.client.types.Autofit; -import com.smartgwt.client.types.ContentsType; -import com.smartgwt.client.types.Overflow; -import com.smartgwt.client.types.VerticalAlignment; -import com.smartgwt.client.widgets.HTMLFlow; import com.smartgwt.client.widgets.IButton; import com.smartgwt.client.widgets.events.ClickEvent; import com.smartgwt.client.widgets.events.ClickHandler; -import com.smartgwt.client.widgets.form.DynamicForm; -import com.smartgwt.client.widgets.form.fields.CanvasItem; -import com.smartgwt.client.widgets.form.fields.LinkItem; -import com.smartgwt.client.widgets.form.fields.StaticTextItem; import com.smartgwt.client.widgets.grid.ListGrid; import com.smartgwt.client.widgets.grid.ListGridField; import com.smartgwt.client.widgets.grid.ListGridRecord; @@ -51,15 +38,9 @@ import com.smartgwt.client.widgets.grid.events.DataArrivedEvent; import com.smartgwt.client.widgets.grid.events.DataArrivedHandler; import com.smartgwt.client.widgets.toolbar.ToolStrip;
-import org.rhq.core.domain.measurement.MeasurementDefinition; import org.rhq.core.domain.measurement.composite.MeasurementDataNumericHighLowComposite; import org.rhq.enterprise.gui.coregui.client.CoreGUI; -import org.rhq.enterprise.gui.coregui.client.LinkManager; import org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasource.StorageNodeLoadCompositeDatasource; -import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; -import org.rhq.enterprise.gui.coregui.client.inventory.common.detail.summary.AbstractActivityView; -import org.rhq.enterprise.gui.coregui.client.inventory.common.detail.summary.AbstractActivityView.ChartViewWindow; -import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.D3GraphListView; import org.rhq.enterprise.gui.coregui.client.util.BrowserUtility; import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedVLayout;
@@ -115,7 +96,7 @@ public class StorageNodeLoadComponent extends EnhancedVLayout { StorageNodeLoadCompositeDatasource datasource = StorageNodeLoadCompositeDatasource.getInstance(storageNodeId); List<ListGridField> fields = datasource.getListGridFields(); if (showSparkLine) { - fields.add(new ListGridField("sparkline", 90)); + fields.add(0, new ListGridField("sparkline", "Chart", 75)); } loadGrid.setFields(fields.toArray(new ListGridField[fields.size()])); loadGrid.setAutoFetchData(true); @@ -184,149 +165,19 @@ public class StorageNodeLoadComponent extends EnhancedVLayout { someChartedData = lastValue != -1;
if (someChartedData && records.length > i) { - String contents = "<span id='sparkline_" + entry.getKey() + "' class='dynamicsparkline' width='0' " + String contents = "<span id='sparkline_" + entry.getKey() + "' class='dynamicsparkline' width='70' " + "values='" + commaDelimitedList + "'>...</span>"; records[i].setAttribute("sparkline", contents); } i++; } loadGrid.setData(records); - - - - - -// -// -// -// -// -// if (!results.isEmpty()) { -// -// //iterate over the retrieved charting data -// for (int index = 0; index < displayOrder.length; index++) { -// //retrieve the correct measurement definition -// final MeasurementDefinition md = measurementDefMap -// .get(displayOrder[index]); -// -// //load the data results for the given metric definition -// List<MeasurementDataNumericHighLowComposite> data = results -// .get(index); -// -// //locate last and minimum values. -// double lastValue = -1; -// double minValue = Double.MAX_VALUE;// -// for (MeasurementDataNumericHighLowComposite d : data) { -// if ((!Double.isNaN(d.getValue())) -// && (!String.valueOf(d.getValue()).contains("NaN"))) { -// if (d.getValue() < minValue) { -// minValue = d.getValue(); -// } -// lastValue = d.getValue(); -// } -// } -// -// //collapse the data into comma delimited list for consumption by third party javascript library(jquery.sparkline) -// String commaDelimitedList = ""; -// -// for (MeasurementDataNumericHighLowComposite d : data) { -// if ((!Double.isNaN(d.getValue())) -// && (!String.valueOf(d.getValue()).contains("NaN"))) { -// commaDelimitedList += d.getValue() + ","; -// } -// } -// DynamicForm row = new DynamicForm(); -// row.setNumCols(3); -// row.setColWidths(65, "*", 100); -// row.setWidth100(); -// row.setAutoHeight(); -// row.setOverflow(Overflow.VISIBLE); -// HTMLFlow sparklineGraph = new HTMLFlow(); -// String contents = "<span id='sparkline_" + index -// + "' class='dynamicsparkline' width='0' " + "values='" -// + commaDelimitedList + "'>...</span>"; -// sparklineGraph.setContents(contents); -// sparklineGraph.setContentsType(ContentsType.PAGE); -// //disable scrollbars on span -// sparklineGraph.setScrollbarSize(0); -// -// CanvasItem sparklineContainer = new CanvasItem(); -// sparklineContainer.setShowTitle(false); -// sparklineContainer.setHeight(16); -// sparklineContainer.setWidth(60); -// sparklineContainer.setCanvas(sparklineGraph); -// -// //Link/title element -// final String title = md.getDisplayName(); -// LinkItem link = AbstractActivityView.newLinkItem(title, null); -// link.setTooltip(title); -// link.setTitleVAlign(VerticalAlignment.TOP); -// link.setAlign(Alignment.LEFT); -// link.setClipValue(true); -// link.setWrap(true); -// link.setHeight(26); -// link.setWidth("100%"); -// if (!BrowserUtility.isBrowserPreIE9()){ -// link.addClickHandler(new ClickHandler() { -// @Override -// public void onClick(ClickEvent event) { -// window = new ChartViewWindow(title); -// -// graphView = D3GraphListView -// .createSingleGraph(resourceComposite.getResource(), -// md.getId(), true); -// -// window.addItem(graphView); -// window.show(); -// } -// }); -// } else{ -// link.disable(); -// } -// -// -// //Value -// String convertedValue; -// convertedValue = AbstractActivityView.convertLastValueForDisplay( -// lastValue, md); -// StaticTextItem value = AbstractActivityView -// .newTextItem(convertedValue); -// value.setVAlign(VerticalAlignment.TOP); -// value.setAlign(Alignment.RIGHT); -// -// row.setItems(sparklineContainer, link, value); -// row.setWidth100(); -// -// //if graph content returned -// if ((!md.getName().trim().contains("Trait.")) && (lastValue != -1)) { -// column.addMember(row); -// someChartedData = true; -// } -// } -// if (!someChartedData) {// when there are results but no chartable entries. -// DynamicForm row = AbstractActivityView.createEmptyDisplayRow( -// -// AbstractActivityView.RECENT_MEASUREMENTS_NONE); -// column.addMember(row); -// } else { -// //insert see more link -// DynamicForm row = new DynamicForm(); -// String link = LinkManager -// .getResourceMonitoringGraphsLink(resourceId); -// AbstractActivityView.addSeeMoreLink(row, link, column); -// } -// //call out to 3rd party javascript lib -// new Timer(){ -// @Override -// public void run() { -// BrowserUtility.graphSparkLines(); -// } -// }.schedule(200); -// } else { -// DynamicForm row = AbstractActivityView -// .createEmptyDisplayRow(AbstractActivityView.RECENT_MEASUREMENTS_NONE); -// column.addMember(row); -// } -// setRefreshing(false); + new Timer() { + @Override + public void run() { + BrowserUtility.graphSparkLines(); + scheduleRepeating(5000); + } + }.schedule(150); } } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/configuration/ConfigurationEditor.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/configuration/ConfigurationEditor.java index 4423623..7019d77 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/configuration/ConfigurationEditor.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/configuration/ConfigurationEditor.java @@ -454,10 +454,8 @@ public class ConfigurationEditor extends EnhancedVLayout { sectionStack.addSection(buildGroupSection(definition)); }
-// if (groupDefinitions.size() > 1) { - this.toolStrip = buildToolStrip(layout, sectionStack); - layout.addMember(toolStrip); -// } + this.toolStrip = buildToolStrip(layout, sectionStack); + layout.addMember(toolStrip); layout.addMember(sectionStack); }
@@ -569,6 +567,7 @@ public class ConfigurationEditor extends EnhancedVLayout { form.setNumCols(4); form.setCellPadding(5); form.setColWidths(190, 28, 210); +
List<FormItem> fields = new ArrayList<FormItem>(); addHeaderItems(fields); diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/form/ValueWithUnitsItem.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/form/ValueWithUnitsItem.java new file mode 100644 index 0000000..a49600e --- /dev/null +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/form/ValueWithUnitsItem.java @@ -0,0 +1,133 @@ +/* + * RHQ Management Platform + * Copyright (C) 2005-2011 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation, and/or the GNU Lesser + * General Public License, version 2.1, also as published by the Free + * Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License and the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * and the GNU Lesser General Public License along with this program; + * if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ +package org.rhq.enterprise.gui.coregui.client.components.form; + +import java.util.LinkedHashMap; +import java.util.Set; + +import com.smartgwt.client.widgets.form.DynamicForm; +import com.smartgwt.client.widgets.form.fields.CanvasItem; +import com.smartgwt.client.widgets.form.fields.FormItem; +import com.smartgwt.client.widgets.form.fields.IntegerItem; +import com.smartgwt.client.widgets.form.fields.SelectItem; +import com.smartgwt.client.widgets.form.validator.IntegerRangeValidator; + +import org.rhq.core.domain.measurement.MeasurementUnits; +import org.rhq.enterprise.gui.coregui.client.CoreGUI; +import org.rhq.enterprise.gui.coregui.client.Messages; +import org.rhq.enterprise.gui.coregui.client.util.FormUtility; + +/** + * A form item for entering a n arbitrary value - consists of an IntegerItem for entering the value and a + * ComboBoxItem for entering the units. + * + * @author Jirka Kremser + */ +public class ValueWithUnitsItem extends CanvasItem { + + private static final Messages MSG = CoreGUI.getMessages(); + + private static final String FIELD_VALUE = "value"; + private static final String FIELD_UNITS = "units"; + + private final DynamicForm form; + private Set<MeasurementUnits> supportedUnits; + private MeasurementUnits valueUnit; + + public ValueWithUnitsItem(String name, String title, Set<MeasurementUnits> supportedUnits) { + super(name, title); + + if (supportedUnits != null && !supportedUnits.isEmpty()) { + this.supportedUnits = supportedUnits; + if (null == this.valueUnit) { + this.valueUnit = supportedUnits.iterator().next(); + } + } + + this.form = new EnhancedDynamicForm(false, false); + this.form.setNumCols(2); + this.form.setColWidths("126", "60"); + + final IntegerItem valueItem = new IntegerItem(FIELD_VALUE, title); + valueItem.setShowTitle(getShowTitle()); + valueItem.setValue(getValue()); + IntegerRangeValidator integerRangeValidator = new IntegerRangeValidator(); + integerRangeValidator.setMin(1); + integerRangeValidator.setMax(Integer.MAX_VALUE); + valueItem.setValidators(integerRangeValidator); + valueItem.setValidateOnChange(true); + + SelectItem unitsItem = new SelectItem(FIELD_UNITS); + unitsItem.setShowTitle(false); + + LinkedHashMap<String, String> valueMap = new LinkedHashMap<String, String>(); + for (MeasurementUnits unit : supportedUnits) { + valueMap.put(unit.name().toLowerCase(), unit.toString()); + } + unitsItem.setValueMap(valueMap); + unitsItem.setDefaultToFirstOption(true); + + this.form.setFields(valueItem, unitsItem); + valueItem.setWidth(126); + unitsItem.setWidth(60); + + setCanvas(this.form); + } + + @Override + public void setValidateOnChange(Boolean validateOnChange) { + form.setValidateOnChange(validateOnChange); + } + + @Override + public void setValidateOnExit(Boolean validateOnExit) { + form.setValidateOnChange(validateOnExit); + } + + public void setValue(Integer value, MeasurementUnits unitType) { + if (!this.supportedUnits.contains(unitType)) { + throw new IllegalArgumentException(MSG.widget_durationItem_unitTypeNotSupported(unitType.name())); + } + if (value != null) { + this.form.setValue(FIELD_VALUE, value); + } else { + this.form.setValue(FIELD_VALUE, (String) null); + } + this.form.setValue(FIELD_UNITS, this.valueUnit.name().toLowerCase()); + + setValue(value); + } + + @Override + public Boolean validate() { + return this.form.validate(); + } + + public void setContextualHelp(String contextualHelp) { + if (contextualHelp != null) { + FormItem item; + item = this.form.getItem(FIELD_UNITS); + FormUtility.addContextualHelp(item, contextualHelp); + } + } +} diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/StorageGWTService.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/StorageGWTService.java index 1e3376c..abe759c 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/StorageGWTService.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/StorageGWTService.java @@ -28,6 +28,7 @@ import java.util.Map; import com.google.gwt.user.client.rpc.RemoteService;
import org.rhq.core.domain.cloud.StorageNode; +import org.rhq.core.domain.cloud.StorageNodeConfigurationComposite; import org.rhq.core.domain.cloud.StorageNodeLoadComposite; import org.rhq.core.domain.criteria.StorageNodeCriteria; import org.rhq.core.domain.measurement.composite.MeasurementDataNumericHighLowComposite; @@ -80,4 +81,6 @@ public interface StorageGWTService extends RemoteService { List<Integer> findNotAcknowledgedStorageNodeAlertsCounts(List<Integer> storageNodeIds) throws RuntimeException;
Map<String, List<MeasurementDataNumericHighLowComposite>> findStorageNodeLoadDataForLast(StorageNode node, int lastN, int unit, int numPoints) throws RuntimeException; + + StorageNodeConfigurationComposite retrieveConfiguration(StorageNode storageNode) throws RuntimeException; } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/configuration/ConfigurationFilter.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/configuration/ConfigurationFilter.java new file mode 100644 index 0000000..2bca203 --- /dev/null +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/configuration/ConfigurationFilter.java @@ -0,0 +1,32 @@ +/* + * RHQ Management Platform + * Copyright (C) 2005-2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +package org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.configuration; + +import org.rhq.core.domain.configuration.definition.ConfigurationDefinition; + +/** + * Simple interface encapsulating the filter operation + * for restricting the fields to be passed to ConfigurationEditor + * + * @author Jirka Kremser + * + */ +public interface ConfigurationFilter { + ConfigurationDefinition filter(ConfigurationDefinition definition); +} diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/configuration/ResourceConfigurationEditView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/configuration/ResourceConfigurationEditView.java index e3b119c..6a51e3d 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/configuration/ResourceConfigurationEditView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/configuration/ResourceConfigurationEditView.java @@ -67,6 +67,7 @@ public class ResourceConfigurationEditView extends EnhancedVLayout implements Pr private ToolStrip buttonbar; private IButton saveButton; private boolean refreshing = false; + private ConfigurationFilter filter;
public ResourceConfigurationEditView(ResourceComposite resourceComposite) { super(); @@ -91,7 +92,7 @@ public class ResourceConfigurationEditView extends EnhancedVLayout implements Pr private ToolStrip createButtonBar() { this.buttonbar = new ToolStrip(); buttonbar.setWidth100(); - buttonbar.setExtraSpace(10); +// buttonbar.setExtraSpace(10); buttonbar.setMembersMargin(5); buttonbar.setLayoutMargin(5);
@@ -153,6 +154,9 @@ public class ResourceConfigurationEditView extends EnhancedVLayout implements Pr @Override public void onSuccess(ConfigurationDefinition configurationDefinition) { Configuration configuration = configurationUpdate.getConfiguration(); + if (filter != null) { + configurationDefinition = filter.filter(configurationDefinition); + } editor = new ConfigurationEditor(configurationDefinition, configuration); editor.setOverflow(Overflow.AUTO); editor.addPropertyValueChangeListener(ResourceConfigurationEditView.this); @@ -229,4 +233,7 @@ public class ResourceConfigurationEditView extends EnhancedVLayout implements Pr } }
+ public void setFilter(ConfigurationFilter filter) { + this.filter = filter; + } } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/StorageGWTServiceImpl.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/StorageGWTServiceImpl.java index 7f3093b..28ea78e 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/StorageGWTServiceImpl.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/StorageGWTServiceImpl.java @@ -26,7 +26,9 @@ import java.util.ArrayList; import java.util.List; import java.util.Map;
+import org.rhq.core.domain.auth.Subject; import org.rhq.core.domain.cloud.StorageNode; +import org.rhq.core.domain.cloud.StorageNodeConfigurationComposite; import org.rhq.core.domain.cloud.StorageNodeLoadComposite; import org.rhq.core.domain.criteria.ResourceCriteria; import org.rhq.core.domain.criteria.StorageNodeCriteria; @@ -150,4 +152,14 @@ public class StorageGWTServiceImpl extends AbstractGWTServiceImpl implements Sto throw getExceptionToThrowToClient(t); } } + + @Override + public StorageNodeConfigurationComposite retrieveConfiguration(StorageNode storageNode) throws RuntimeException { + try { + return SerialUtility.prepare(storageNodeManager.retrieveConfiguration(getSessionSubject(), storageNode), + "StorageGWTServiceImpl.retrieveConfiguration"); + } catch (Throwable t) { + throw getExceptionToThrowToClient(t); + } + } }
commit 19f1069afd4e9bf4d049893ddd742be335c1a9bc Author: Mike Thompson mithomps@redhat.com Date: Thu Aug 8 10:47:22 2013 -0700
IE UI fixes
diff --git a/modules/enterprise/gui/coregui/src/main/webapp/CoreGUI.html b/modules/enterprise/gui/coregui/src/main/webapp/CoreGUI.html index 96e50de..32c5789 100644 --- a/modules/enterprise/gui/coregui/src/main/webapp/CoreGUI.html +++ b/modules/enterprise/gui/coregui/src/main/webapp/CoreGUI.html @@ -27,6 +27,29 @@ }
</script> + <script type="text/javascript"> + (function() { + var method; + var noop = function () {}; + var methods = [ + 'assert', 'clear', 'count', 'debug', 'dir', 'dirxml', 'error', + 'exception', 'group', 'groupCollapsed', 'groupEnd', 'info', 'log', + 'markTimeline', 'profile', 'profileEnd', 'table', 'time', 'timeEnd', + 'timeStamp', 'trace', 'warn' + ]; + var length = methods.length; + var console = (window.console = window.console || {}); + + while (length--) { + method = methods[length]; + + // Only stub undefined methods. + if (!console[method]) { + console[method] = noop; + } + } + }()); + </script>
<title>RHQ</title> <link rel="icon" type="image/png" href="/images/favicon.png" /> diff --git a/modules/enterprise/gui/coregui/src/main/webapp/js/jquery-1.9.1.js b/modules/enterprise/gui/coregui/src/main/webapp/js/jquery-1.9.1.js deleted file mode 100644 index bc8f36a..0000000 --- a/modules/enterprise/gui/coregui/src/main/webapp/js/jquery-1.9.1.js +++ /dev/null @@ -1,9597 +0,0 @@ -/*! - * jQuery JavaScript Library v1.9.1 - * http://jquery.com/ - * - * Includes Sizzle.js - * http://sizzlejs.com/ - * - * Copyright 2005, 2012 jQuery Foundation, Inc. and other contributors - * Released under the MIT license - * http://jquery.org/license - * - * Date: 2013-2-4 - */ -(function( window, undefined ) { - -// Can't do this because several apps including ASP.NET trace -// the stack via arguments.caller.callee and Firefox dies if -// you try to trace through "use strict" call chains. (#13335) -// Support: Firefox 18+ -//"use strict"; - var - // The deferred used on DOM ready - readyList, - - // A central reference to the root jQuery(document) - rootjQuery, - - // Support: IE<9 - // For `typeof node.method` instead of `node.method !== undefined` - core_strundefined = typeof undefined, - - // Use the correct document accordingly with window argument (sandbox) - document = window.document, - location = window.location, - - // Map over jQuery in case of overwrite - _jQuery = window.jQuery, - - // Map over the $ in case of overwrite - _$ = window.$, - - // [[Class]] -> type pairs - class2type = {}, - - // List of deleted data cache ids, so we can reuse them - core_deletedIds = [], - - core_version = "1.9.1", - - // Save a reference to some core methods - core_concat = core_deletedIds.concat, - core_push = core_deletedIds.push, - core_slice = core_deletedIds.slice, - core_indexOf = core_deletedIds.indexOf, - core_toString = class2type.toString, - core_hasOwn = class2type.hasOwnProperty, - core_trim = core_version.trim, - - // Define a local copy of jQuery - jQuery = function( selector, context ) { - // The jQuery object is actually just the init constructor 'enhanced' - return new jQuery.fn.init( selector, context, rootjQuery ); - }, - - // Used for matching numbers - core_pnum = /[+-]?(?:\d*.|)\d+(?:[eE][+-]?\d+|)/.source, - - // Used for splitting on whitespace - core_rnotwhite = /\S+/g, - - // Make sure we trim BOM and NBSP (here's looking at you, Safari 5.0 and IE) - rtrim = /^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g, - - // A simple way to check for HTML strings - // Prioritize #id over <tag> to avoid XSS via location.hash (#9521) - // Strict HTML recognition (#11290: must start with <) - rquickExpr = /^(?:(<[\w\W]+>)[^>]*|#([\w-]*))$/, - - // Match a standalone tag - rsingleTag = /^<(\w+)\s*/?>(?:</\1>|)$/, - - // JSON RegExp - rvalidchars = /^[],:{}\s]*$/, - rvalidbraces = /(?:^|:|,)(?:\s*[)+/g, - rvalidescape = /\(?:["\/bfnrt]|u[\da-fA-F]{4})/g, - rvalidtokens = /"[^"\\r\n]*"|true|false|null|-?(?:\d+.|)\d+(?:[eE][+-]?\d+|)/g, - - // Matches dashed string for camelizing - rmsPrefix = /^-ms-/, - rdashAlpha = /-([\da-z])/gi, - - // Used by jQuery.camelCase as callback to replace() - fcamelCase = function( all, letter ) { - return letter.toUpperCase(); - }, - - // The ready event handler - completed = function( event ) { - - // readyState === "complete" is good enough for us to call the dom ready in oldIE - if ( document.addEventListener || event.type === "load" || document.readyState === "complete" ) { - detach(); - jQuery.ready(); - } - }, - // Clean-up method for dom ready events - detach = function() { - if ( document.addEventListener ) { - document.removeEventListener( "DOMContentLoaded", completed, false ); - window.removeEventListener( "load", completed, false ); - - } else { - document.detachEvent( "onreadystatechange", completed ); - window.detachEvent( "onload", completed ); - } - }; - - jQuery.fn = jQuery.prototype = { - // The current version of jQuery being used - jquery: core_version, - - constructor: jQuery, - init: function( selector, context, rootjQuery ) { - var match, elem; - - // HANDLE: $(""), $(null), $(undefined), $(false) - if ( !selector ) { - return this; - } - - // Handle HTML strings - if ( typeof selector === "string" ) { - if ( selector.charAt(0) === "<" && selector.charAt( selector.length - 1 ) === ">" && selector.length >= 3 ) { - // Assume that strings that start and end with <> are HTML and skip the regex check - match = [ null, selector, null ]; - - } else { - match = rquickExpr.exec( selector ); - } - - // Match html or make sure no context is specified for #id - if ( match && (match[1] || !context) ) { - - // HANDLE: $(html) -> $(array) - if ( match[1] ) { - context = context instanceof jQuery ? context[0] : context; - - // scripts is true for back-compat - jQuery.merge( this, jQuery.parseHTML( - match[1], - context && context.nodeType ? context.ownerDocument || context : document, - true - ) ); - - // HANDLE: $(html, props) - if ( rsingleTag.test( match[1] ) && jQuery.isPlainObject( context ) ) { - for ( match in context ) { - // Properties of context are called as methods if possible - if ( jQuery.isFunction( this[ match ] ) ) { - this[ match ]( context[ match ] ); - - // ...and otherwise set as attributes - } else { - this.attr( match, context[ match ] ); - } - } - } - - return this; - - // HANDLE: $(#id) - } else { - elem = document.getElementById( match[2] ); - - // Check parentNode to catch when Blackberry 4.6 returns - // nodes that are no longer in the document #6963 - if ( elem && elem.parentNode ) { - // Handle the case where IE and Opera return items - // by name instead of ID - if ( elem.id !== match[2] ) { - return rootjQuery.find( selector ); - } - - // Otherwise, we inject the element directly into the jQuery object - this.length = 1; - this[0] = elem; - } - - this.context = document; - this.selector = selector; - return this; - } - - // HANDLE: $(expr, $(...)) - } else if ( !context || context.jquery ) { - return ( context || rootjQuery ).find( selector ); - - // HANDLE: $(expr, context) - // (which is just equivalent to: $(context).find(expr) - } else { - return this.constructor( context ).find( selector ); - } - - // HANDLE: $(DOMElement) - } else if ( selector.nodeType ) { - this.context = this[0] = selector; - this.length = 1; - return this; - - // HANDLE: $(function) - // Shortcut for document ready - } else if ( jQuery.isFunction( selector ) ) { - return rootjQuery.ready( selector ); - } - - if ( selector.selector !== undefined ) { - this.selector = selector.selector; - this.context = selector.context; - } - - return jQuery.makeArray( selector, this ); - }, - - // Start with an empty selector - selector: "", - - // The default length of a jQuery object is 0 - length: 0, - - // The number of elements contained in the matched element set - size: function() { - return this.length; - }, - - toArray: function() { - return core_slice.call( this ); - }, - - // Get the Nth element in the matched element set OR - // Get the whole matched element set as a clean array - get: function( num ) { - return num == null ? - - // Return a 'clean' array - this.toArray() : - - // Return just the object - ( num < 0 ? this[ this.length + num ] : this[ num ] ); - }, - - // Take an array of elements and push it onto the stack - // (returning the new matched element set) - pushStack: function( elems ) { - - // Build a new jQuery matched element set - var ret = jQuery.merge( this.constructor(), elems ); - - // Add the old object onto the stack (as a reference) - ret.prevObject = this; - ret.context = this.context; - - // Return the newly-formed element set - return ret; - }, - - // Execute a callback for every element in the matched set. - // (You can seed the arguments with an array of args, but this is - // only used internally.) - each: function( callback, args ) { - return jQuery.each( this, callback, args ); - }, - - ready: function( fn ) { - // Add the callback - jQuery.ready.promise().done( fn ); - - return this; - }, - - slice: function() { - return this.pushStack( core_slice.apply( this, arguments ) ); - }, - - first: function() { - return this.eq( 0 ); - }, - - last: function() { - return this.eq( -1 ); - }, - - eq: function( i ) { - var len = this.length, - j = +i + ( i < 0 ? len : 0 ); - return this.pushStack( j >= 0 && j < len ? [ this[j] ] : [] ); - }, - - map: function( callback ) { - return this.pushStack( jQuery.map(this, function( elem, i ) { - return callback.call( elem, i, elem ); - })); - }, - - end: function() { - return this.prevObject || this.constructor(null); - }, - - // For internal use only. - // Behaves like an Array's method, not like a jQuery method. - push: core_push, - sort: [].sort, - splice: [].splice - }; - -// Give the init function the jQuery prototype for later instantiation - jQuery.fn.init.prototype = jQuery.fn; - - jQuery.extend = jQuery.fn.extend = function() { - var src, copyIsArray, copy, name, options, clone, - target = arguments[0] || {}, - i = 1, - length = arguments.length, - deep = false; - - // Handle a deep copy situation - if ( typeof target === "boolean" ) { - deep = target; - target = arguments[1] || {}; - // skip the boolean and the target - i = 2; - } - - // Handle case when target is a string or something (possible in deep copy) - if ( typeof target !== "object" && !jQuery.isFunction(target) ) { - target = {}; - } - - // extend jQuery itself if only one argument is passed - if ( length === i ) { - target = this; - --i; - } - - for ( ; i < length; i++ ) { - // Only deal with non-null/undefined values - if ( (options = arguments[ i ]) != null ) { - // Extend the base object - for ( name in options ) { - src = target[ name ]; - copy = options[ name ]; - - // Prevent never-ending loop - if ( target === copy ) { - continue; - } - - // Recurse if we're merging plain objects or arrays - if ( deep && copy && ( jQuery.isPlainObject(copy) || (copyIsArray = jQuery.isArray(copy)) ) ) { - if ( copyIsArray ) { - copyIsArray = false; - clone = src && jQuery.isArray(src) ? src : []; - - } else { - clone = src && jQuery.isPlainObject(src) ? src : {}; - } - - // Never move original objects, clone them - target[ name ] = jQuery.extend( deep, clone, copy ); - - // Don't bring in undefined values - } else if ( copy !== undefined ) { - target[ name ] = copy; - } - } - } - } - - // Return the modified object - return target; - }; - - jQuery.extend({ - noConflict: function( deep ) { - if ( window.$ === jQuery ) { - window.$ = _$; - } - - if ( deep && window.jQuery === jQuery ) { - window.jQuery = _jQuery; - } - - return jQuery; - }, - - // Is the DOM ready to be used? Set to true once it occurs. - isReady: false, - - // A counter to track how many items to wait for before - // the ready event fires. See #6781 - readyWait: 1, - - // Hold (or release) the ready event - holdReady: function( hold ) { - if ( hold ) { - jQuery.readyWait++; - } else { - jQuery.ready( true ); - } - }, - - // Handle when the DOM is ready - ready: function( wait ) { - - // Abort if there are pending holds or we're already ready - if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { - return; - } - - // Make sure body exists, at least, in case IE gets a little overzealous (ticket #5443). - if ( !document.body ) { - return setTimeout( jQuery.ready ); - } - - // Remember that the DOM is ready - jQuery.isReady = true; - - // If a normal DOM Ready event fired, decrement, and wait if need be - if ( wait !== true && --jQuery.readyWait > 0 ) { - return; - } - - // If there are functions bound, to execute - readyList.resolveWith( document, [ jQuery ] ); - - // Trigger any bound ready events - if ( jQuery.fn.trigger ) { - jQuery( document ).trigger("ready").off("ready"); - } - }, - - // See test/unit/core.js for details concerning isFunction. - // Since version 1.3, DOM methods and functions like alert - // aren't supported. They return false on IE (#2968). - isFunction: function( obj ) { - return jQuery.type(obj) === "function"; - }, - - isArray: Array.isArray || function( obj ) { - return jQuery.type(obj) === "array"; - }, - - isWindow: function( obj ) { - return obj != null && obj == obj.window; - }, - - isNumeric: function( obj ) { - return !isNaN( parseFloat(obj) ) && isFinite( obj ); - }, - - type: function( obj ) { - if ( obj == null ) { - return String( obj ); - } - return typeof obj === "object" || typeof obj === "function" ? - class2type[ core_toString.call(obj) ] || "object" : - typeof obj; - }, - - isPlainObject: function( obj ) { - // Must be an Object. - // Because of IE, we also have to check the presence of the constructor property. - // Make sure that DOM nodes and window objects don't pass through, as well - if ( !obj || jQuery.type(obj) !== "object" || obj.nodeType || jQuery.isWindow( obj ) ) { - return false; - } - - try { - // Not own constructor property must be Object - if ( obj.constructor && - !core_hasOwn.call(obj, "constructor") && - !core_hasOwn.call(obj.constructor.prototype, "isPrototypeOf") ) { - return false; - } - } catch ( e ) { - // IE8,9 Will throw exceptions on certain host objects #9897 - return false; - } - - // Own properties are enumerated firstly, so to speed up, - // if last one is own, then all properties are own. - - var key; - for ( key in obj ) {} - - return key === undefined || core_hasOwn.call( obj, key ); - }, - - isEmptyObject: function( obj ) { - var name; - for ( name in obj ) { - return false; - } - return true; - }, - - error: function( msg ) { - throw new Error( msg ); - }, - - // data: string of html - // context (optional): If specified, the fragment will be created in this context, defaults to document - // keepScripts (optional): If true, will include scripts passed in the html string - parseHTML: function( data, context, keepScripts ) { - if ( !data || typeof data !== "string" ) { - return null; - } - if ( typeof context === "boolean" ) { - keepScripts = context; - context = false; - } - context = context || document; - - var parsed = rsingleTag.exec( data ), - scripts = !keepScripts && []; - - // Single tag - if ( parsed ) { - return [ context.createElement( parsed[1] ) ]; - } - - parsed = jQuery.buildFragment( [ data ], context, scripts ); - if ( scripts ) { - jQuery( scripts ).remove(); - } - return jQuery.merge( [], parsed.childNodes ); - }, - - parseJSON: function( data ) { - // Attempt to parse using the native JSON parser first - if ( window.JSON && window.JSON.parse ) { - return window.JSON.parse( data ); - } - - if ( data === null ) { - return data; - } - - if ( typeof data === "string" ) { - - // Make sure leading/trailing whitespace is removed (IE can't handle it) - data = jQuery.trim( data ); - - if ( data ) { - // Make sure the incoming data is actual JSON - // Logic borrowed from http://json.org/json2.js - if ( rvalidchars.test( data.replace( rvalidescape, "@" ) - .replace( rvalidtokens, "]" ) - .replace( rvalidbraces, "")) ) { - - return ( new Function( "return " + data ) )(); - } - } - } - - jQuery.error( "Invalid JSON: " + data ); - }, - - // Cross-browser xml parsing - parseXML: function( data ) { - var xml, tmp; - if ( !data || typeof data !== "string" ) { - return null; - } - try { - if ( window.DOMParser ) { // Standard - tmp = new DOMParser(); - xml = tmp.parseFromString( data , "text/xml" ); - } else { // IE - xml = new ActiveXObject( "Microsoft.XMLDOM" ); - xml.async = "false"; - xml.loadXML( data ); - } - } catch( e ) { - xml = undefined; - } - if ( !xml || !xml.documentElement || xml.getElementsByTagName( "parsererror" ).length ) { - jQuery.error( "Invalid XML: " + data ); - } - return xml; - }, - - noop: function() {}, - - // Evaluates a script in a global context - // Workarounds based on findings by Jim Driscoll - // http://weblogs.java.net/blog/driscoll/archive/2009/09/08/eval-javascript-glo... - globalEval: function( data ) { - if ( data && jQuery.trim( data ) ) { - // We use execScript on Internet Explorer - // We use an anonymous function so that context is window - // rather than jQuery in Firefox - ( window.execScript || function( data ) { - window[ "eval" ].call( window, data ); - } )( data ); - } - }, - - // Convert dashed to camelCase; used by the css and data modules - // Microsoft forgot to hump their vendor prefix (#9572) - camelCase: function( string ) { - return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); - }, - - nodeName: function( elem, name ) { - return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); - }, - - // args is for internal usage only - each: function( obj, callback, args ) { - var value, - i = 0, - length = obj.length, - isArray = isArraylike( obj ); - - if ( args ) { - if ( isArray ) { - for ( ; i < length; i++ ) { - value = callback.apply( obj[ i ], args ); - - if ( value === false ) { - break; - } - } - } else { - for ( i in obj ) { - value = callback.apply( obj[ i ], args ); - - if ( value === false ) { - break; - } - } - } - - // A special, fast, case for the most common use of each - } else { - if ( isArray ) { - for ( ; i < length; i++ ) { - value = callback.call( obj[ i ], i, obj[ i ] ); - - if ( value === false ) { - break; - } - } - } else { - for ( i in obj ) { - value = callback.call( obj[ i ], i, obj[ i ] ); - - if ( value === false ) { - break; - } - } - } - } - - return obj; - }, - - // Use native String.trim function wherever possible - trim: core_trim && !core_trim.call("\uFEFF\xA0") ? - function( text ) { - return text == null ? - "" : - core_trim.call( text ); - } : - - // Otherwise use our own trimming functionality - function( text ) { - return text == null ? - "" : - ( text + "" ).replace( rtrim, "" ); - }, - - // results is for internal usage only - makeArray: function( arr, results ) { - var ret = results || []; - - if ( arr != null ) { - if ( isArraylike( Object(arr) ) ) { - jQuery.merge( ret, - typeof arr === "string" ? - [ arr ] : arr - ); - } else { - core_push.call( ret, arr ); - } - } - - return ret; - }, - - inArray: function( elem, arr, i ) { - var len; - - if ( arr ) { - if ( core_indexOf ) { - return core_indexOf.call( arr, elem, i ); - } - - len = arr.length; - i = i ? i < 0 ? Math.max( 0, len + i ) : i : 0; - - for ( ; i < len; i++ ) { - // Skip accessing in sparse arrays - if ( i in arr && arr[ i ] === elem ) { - return i; - } - } - } - - return -1; - }, - - merge: function( first, second ) { - var l = second.length, - i = first.length, - j = 0; - - if ( typeof l === "number" ) { - for ( ; j < l; j++ ) { - first[ i++ ] = second[ j ]; - } - } else { - while ( second[j] !== undefined ) { - first[ i++ ] = second[ j++ ]; - } - } - - first.length = i; - - return first; - }, - - grep: function( elems, callback, inv ) { - var retVal, - ret = [], - i = 0, - length = elems.length; - inv = !!inv; - - // Go through the array, only saving the items - // that pass the validator function - for ( ; i < length; i++ ) { - retVal = !!callback( elems[ i ], i ); - if ( inv !== retVal ) { - ret.push( elems[ i ] ); - } - } - - return ret; - }, - - // arg is for internal usage only - map: function( elems, callback, arg ) { - var value, - i = 0, - length = elems.length, - isArray = isArraylike( elems ), - ret = []; - - // Go through the array, translating each of the items to their - if ( isArray ) { - for ( ; i < length; i++ ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret[ ret.length ] = value; - } - } - - // Go through every key on the object, - } else { - for ( i in elems ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret[ ret.length ] = value; - } - } - } - - // Flatten any nested arrays - return core_concat.apply( [], ret ); - }, - - // A global GUID counter for objects - guid: 1, - - // Bind a function to a context, optionally partially applying any - // arguments. - proxy: function( fn, context ) { - var args, proxy, tmp; - - if ( typeof context === "string" ) { - tmp = fn[ context ]; - context = fn; - fn = tmp; - } - - // Quick check to determine if target is callable, in the spec - // this throws a TypeError, but we will just return undefined. - if ( !jQuery.isFunction( fn ) ) { - return undefined; - } - - // Simulated bind - args = core_slice.call( arguments, 2 ); - proxy = function() { - return fn.apply( context || this, args.concat( core_slice.call( arguments ) ) ); - }; - - // Set the guid of unique handler to the same of original handler, so it can be removed - proxy.guid = fn.guid = fn.guid || jQuery.guid++; - - return proxy; - }, - - // Multifunctional method to get and set values of a collection - // The value/s can optionally be executed if it's a function - access: function( elems, fn, key, value, chainable, emptyGet, raw ) { - var i = 0, - length = elems.length, - bulk = key == null; - - // Sets many values - if ( jQuery.type( key ) === "object" ) { - chainable = true; - for ( i in key ) { - jQuery.access( elems, fn, i, key[i], true, emptyGet, raw ); - } - - // Sets one value - } else if ( value !== undefined ) { - chainable = true; - - if ( !jQuery.isFunction( value ) ) { - raw = true; - } - - if ( bulk ) { - // Bulk operations run against the entire set - if ( raw ) { - fn.call( elems, value ); - fn = null; - - // ...except when executing function values - } else { - bulk = fn; - fn = function( elem, key, value ) { - return bulk.call( jQuery( elem ), value ); - }; - } - } - - if ( fn ) { - for ( ; i < length; i++ ) { - fn( elems[i], key, raw ? value : value.call( elems[i], i, fn( elems[i], key ) ) ); - } - } - } - - return chainable ? - elems : - - // Gets - bulk ? - fn.call( elems ) : - length ? fn( elems[0], key ) : emptyGet; - }, - - now: function() { - return ( new Date() ).getTime(); - } - }); - - jQuery.ready.promise = function( obj ) { - if ( !readyList ) { - - readyList = jQuery.Deferred(); - - // Catch cases where $(document).ready() is called after the browser event has already occurred. - // we once tried to use readyState "interactive" here, but it caused issues like the one - // discovered by ChrisS here: http://bugs.jquery.com/ticket/12282#comment:15 - if ( document.readyState === "complete" ) { - // Handle it asynchronously to allow scripts the opportunity to delay ready - setTimeout( jQuery.ready ); - - // Standards-based browsers support DOMContentLoaded - } else if ( document.addEventListener ) { - // Use the handy event callback - document.addEventListener( "DOMContentLoaded", completed, false ); - - // A fallback to window.onload, that will always work - window.addEventListener( "load", completed, false ); - - // If IE event model is used - } else { - // Ensure firing before onload, maybe late but safe also for iframes - document.attachEvent( "onreadystatechange", completed ); - - // A fallback to window.onload, that will always work - window.attachEvent( "onload", completed ); - - // If IE and not a frame - // continually check to see if the document is ready - var top = false; - - try { - top = window.frameElement == null && document.documentElement; - } catch(e) {} - - if ( top && top.doScroll ) { - (function doScrollCheck() { - if ( !jQuery.isReady ) { - - try { - // Use the trick by Diego Perini - // http://javascript.nwbox.com/IEContentLoaded/ - top.doScroll("left"); - } catch(e) { - return setTimeout( doScrollCheck, 50 ); - } - - // detach all dom ready events - detach(); - - // and execute any waiting functions - jQuery.ready(); - } - })(); - } - } - } - return readyList.promise( obj ); - }; - -// Populate the class2type map - jQuery.each("Boolean Number String Function Array Date RegExp Object Error".split(" "), function(i, name) { - class2type[ "[object " + name + "]" ] = name.toLowerCase(); - }); - - function isArraylike( obj ) { - var length = obj.length, - type = jQuery.type( obj ); - - if ( jQuery.isWindow( obj ) ) { - return false; - } - - if ( obj.nodeType === 1 && length ) { - return true; - } - - return type === "array" || type !== "function" && - ( length === 0 || - typeof length === "number" && length > 0 && ( length - 1 ) in obj ); - } - -// All jQuery objects should point back to these - rootjQuery = jQuery(document); -// String to Object options format cache - var optionsCache = {}; - -// Convert String-formatted options into Object-formatted ones and store in cache - function createOptions( options ) { - var object = optionsCache[ options ] = {}; - jQuery.each( options.match( core_rnotwhite ) || [], function( _, flag ) { - object[ flag ] = true; - }); - return object; - } - - /* - * Create a callback list using the following parameters: - * - * options: an optional list of space-separated options that will change how - * the callback list behaves or a more traditional option object - * - * By default a callback list will act like an event callback list and can be - * "fired" multiple times. - * - * Possible options: - * - * once: will ensure the callback list can only be fired once (like a Deferred) - * - * memory: will keep track of previous values and will call any callback added - * after the list has been fired right away with the latest "memorized" - * values (like a Deferred) - * - * unique: will ensure a callback can only be added once (no duplicate in the list) - * - * stopOnFalse: interrupt callings when a callback returns false - * - */ - jQuery.Callbacks = function( options ) { - - // Convert options from String-formatted to Object-formatted if needed - // (we check in cache first) - options = typeof options === "string" ? - ( optionsCache[ options ] || createOptions( options ) ) : - jQuery.extend( {}, options ); - - var // Flag to know if list is currently firing - firing, - // Last fire value (for non-forgettable lists) - memory, - // Flag to know if list was already fired - fired, - // End of the loop when firing - firingLength, - // Index of currently firing callback (modified by remove if needed) - firingIndex, - // First callback to fire (used internally by add and fireWith) - firingStart, - // Actual callback list - list = [], - // Stack of fire calls for repeatable lists - stack = !options.once && [], - // Fire callbacks - fire = function( data ) { - memory = options.memory && data; - fired = true; - firingIndex = firingStart || 0; - firingStart = 0; - firingLength = list.length; - firing = true; - for ( ; list && firingIndex < firingLength; firingIndex++ ) { - if ( list[ firingIndex ].apply( data[ 0 ], data[ 1 ] ) === false && options.stopOnFalse ) { - memory = false; // To prevent further calls using add - break; - } - } - firing = false; - if ( list ) { - if ( stack ) { - if ( stack.length ) { - fire( stack.shift() ); - } - } else if ( memory ) { - list = []; - } else { - self.disable(); - } - } - }, - // Actual Callbacks object - self = { - // Add a callback or a collection of callbacks to the list - add: function() { - if ( list ) { - // First, we save the current length - var start = list.length; - (function add( args ) { - jQuery.each( args, function( _, arg ) { - var type = jQuery.type( arg ); - if ( type === "function" ) { - if ( !options.unique || !self.has( arg ) ) { - list.push( arg ); - } - } else if ( arg && arg.length && type !== "string" ) { - // Inspect recursively - add( arg ); - } - }); - })( arguments ); - // Do we need to add the callbacks to the - // current firing batch? - if ( firing ) { - firingLength = list.length; - // With memory, if we're not firing then - // we should call right away - } else if ( memory ) { - firingStart = start; - fire( memory ); - } - } - return this; - }, - // Remove a callback from the list - remove: function() { - if ( list ) { - jQuery.each( arguments, function( _, arg ) { - var index; - while( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { - list.splice( index, 1 ); - // Handle firing indexes - if ( firing ) { - if ( index <= firingLength ) { - firingLength--; - } - if ( index <= firingIndex ) { - firingIndex--; - } - } - } - }); - } - return this; - }, - // Check if a given callback is in the list. - // If no argument is given, return whether or not list has callbacks attached. - has: function( fn ) { - return fn ? jQuery.inArray( fn, list ) > -1 : !!( list && list.length ); - }, - // Remove all callbacks from the list - empty: function() { - list = []; - return this; - }, - // Have the list do nothing anymore - disable: function() { - list = stack = memory = undefined; - return this; - }, - // Is it disabled? - disabled: function() { - return !list; - }, - // Lock the list in its current state - lock: function() { - stack = undefined; - if ( !memory ) { - self.disable(); - } - return this; - }, - // Is it locked? - locked: function() { - return !stack; - }, - // Call all callbacks with the given context and arguments - fireWith: function( context, args ) { - args = args || []; - args = [ context, args.slice ? args.slice() : args ]; - if ( list && ( !fired || stack ) ) { - if ( firing ) { - stack.push( args ); - } else { - fire( args ); - } - } - return this; - }, - // Call all the callbacks with the given arguments - fire: function() { - self.fireWith( this, arguments ); - return this; - }, - // To know if the callbacks have already been called at least once - fired: function() { - return !!fired; - } - }; - - return self; - }; - jQuery.extend({ - - Deferred: function( func ) { - var tuples = [ - // action, add listener, listener list, final state - [ "resolve", "done", jQuery.Callbacks("once memory"), "resolved" ], - [ "reject", "fail", jQuery.Callbacks("once memory"), "rejected" ], - [ "notify", "progress", jQuery.Callbacks("memory") ] - ], - state = "pending", - promise = { - state: function() { - return state; - }, - always: function() { - deferred.done( arguments ).fail( arguments ); - return this; - }, - then: function( /* fnDone, fnFail, fnProgress */ ) { - var fns = arguments; - return jQuery.Deferred(function( newDefer ) { - jQuery.each( tuples, function( i, tuple ) { - var action = tuple[ 0 ], - fn = jQuery.isFunction( fns[ i ] ) && fns[ i ]; - // deferred[ done | fail | progress ] for forwarding actions to newDefer - deferred[ tuple[1] ](function() { - var returned = fn && fn.apply( this, arguments ); - if ( returned && jQuery.isFunction( returned.promise ) ) { - returned.promise() - .done( newDefer.resolve ) - .fail( newDefer.reject ) - .progress( newDefer.notify ); - } else { - newDefer[ action + "With" ]( this === promise ? newDefer.promise() : this, fn ? [ returned ] : arguments ); - } - }); - }); - fns = null; - }).promise(); - }, - // Get a promise for this deferred - // If obj is provided, the promise aspect is added to the object - promise: function( obj ) { - return obj != null ? jQuery.extend( obj, promise ) : promise; - } - }, - deferred = {}; - - // Keep pipe for back-compat - promise.pipe = promise.then; - - // Add list-specific methods - jQuery.each( tuples, function( i, tuple ) { - var list = tuple[ 2 ], - stateString = tuple[ 3 ]; - - // promise[ done | fail | progress ] = list.add - promise[ tuple[1] ] = list.add; - - // Handle state - if ( stateString ) { - list.add(function() { - // state = [ resolved | rejected ] - state = stateString; - - // [ reject_list | resolve_list ].disable; progress_list.lock - }, tuples[ i ^ 1 ][ 2 ].disable, tuples[ 2 ][ 2 ].lock ); - } - - // deferred[ resolve | reject | notify ] - deferred[ tuple[0] ] = function() { - deferred[ tuple[0] + "With" ]( this === deferred ? promise : this, arguments ); - return this; - }; - deferred[ tuple[0] + "With" ] = list.fireWith; - }); - - // Make the deferred a promise - promise.promise( deferred ); - - // Call given func if any - if ( func ) { - func.call( deferred, deferred ); - } - - // All done! - return deferred; - }, - - // Deferred helper - when: function( subordinate /* , ..., subordinateN */ ) { - var i = 0, - resolveValues = core_slice.call( arguments ), - length = resolveValues.length, - - // the count of uncompleted subordinates - remaining = length !== 1 || ( subordinate && jQuery.isFunction( subordinate.promise ) ) ? length : 0, - - // the master Deferred. If resolveValues consist of only a single Deferred, just use that. - deferred = remaining === 1 ? subordinate : jQuery.Deferred(), - - // Update function for both resolve and progress values - updateFunc = function( i, contexts, values ) { - return function( value ) { - contexts[ i ] = this; - values[ i ] = arguments.length > 1 ? core_slice.call( arguments ) : value; - if( values === progressValues ) { - deferred.notifyWith( contexts, values ); - } else if ( !( --remaining ) ) { - deferred.resolveWith( contexts, values ); - } - }; - }, - - progressValues, progressContexts, resolveContexts; - - // add listeners to Deferred subordinates; treat others as resolved - if ( length > 1 ) { - progressValues = new Array( length ); - progressContexts = new Array( length ); - resolveContexts = new Array( length ); - for ( ; i < length; i++ ) { - if ( resolveValues[ i ] && jQuery.isFunction( resolveValues[ i ].promise ) ) { - resolveValues[ i ].promise() - .done( updateFunc( i, resolveContexts, resolveValues ) ) - .fail( deferred.reject ) - .progress( updateFunc( i, progressContexts, progressValues ) ); - } else { - --remaining; - } - } - } - - // if we're not waiting on anything, resolve the master - if ( !remaining ) { - deferred.resolveWith( resolveContexts, resolveValues ); - } - - return deferred.promise(); - } - }); - jQuery.support = (function() { - - var support, all, a, - input, select, fragment, - opt, eventName, isSupported, i, - div = document.createElement("div"); - - // Setup - div.setAttribute( "className", "t" ); - div.innerHTML = " <link/><table></table><a href='/a'>a</a><input type='checkbox'/>"; - - // Support tests won't run in some limited or non-browser environments - all = div.getElementsByTagName("*"); - a = div.getElementsByTagName("a")[ 0 ]; - if ( !all || !a || !all.length ) { - return {}; - } - - // First batch of tests - select = document.createElement("select"); - opt = select.appendChild( document.createElement("option") ); - input = div.getElementsByTagName("input")[ 0 ]; - - a.style.cssText = "top:1px;float:left;opacity:.5"; - support = { - // Test setAttribute on camelCase class. If it works, we need attrFixes when doing get/setAttribute (ie6/7) - getSetAttribute: div.className !== "t", - - // IE strips leading whitespace when .innerHTML is used - leadingWhitespace: div.firstChild.nodeType === 3, - - // Make sure that tbody elements aren't automatically inserted - // IE will insert them into empty tables - tbody: !div.getElementsByTagName("tbody").length, - - // Make sure that link elements get serialized correctly by innerHTML - // This requires a wrapper element in IE - htmlSerialize: !!div.getElementsByTagName("link").length, - - // Get the style information from getAttribute - // (IE uses .cssText instead) - style: /top/.test( a.getAttribute("style") ), - - // Make sure that URLs aren't manipulated - // (IE normalizes it by default) - hrefNormalized: a.getAttribute("href") === "/a", - - // Make sure that element opacity exists - // (IE uses filter instead) - // Use a regex to work around a WebKit issue. See #5145 - opacity: /^0.5/.test( a.style.opacity ), - - // Verify style float existence - // (IE uses styleFloat instead of cssFloat) - cssFloat: !!a.style.cssFloat, - - // Check the default checkbox/radio value ("" on WebKit; "on" elsewhere) - checkOn: !!input.value, - - // Make sure that a selected-by-default option has a working selected property. - // (WebKit defaults to false instead of true, IE too, if it's in an optgroup) - optSelected: opt.selected, - - // Tests for enctype support on a form (#6743) - enctype: !!document.createElement("form").enctype, - - // Makes sure cloning an html5 element does not cause problems - // Where outerHTML is undefined, this still works - html5Clone: document.createElement("nav").cloneNode( true ).outerHTML !== "<:nav></:nav>", - - // jQuery.support.boxModel DEPRECATED in 1.8 since we don't support Quirks Mode - boxModel: document.compatMode === "CSS1Compat", - - // Will be defined later - deleteExpando: true, - noCloneEvent: true, - inlineBlockNeedsLayout: false, - shrinkWrapBlocks: false, - reliableMarginRight: true, - boxSizingReliable: true, - pixelPosition: false - }; - - // Make sure checked status is properly cloned - input.checked = true; - support.noCloneChecked = input.cloneNode( true ).checked; - - // Make sure that the options inside disabled selects aren't marked as disabled - // (WebKit marks them as disabled) - select.disabled = true; - support.optDisabled = !opt.disabled; - - // Support: IE<9 - try { - delete div.test; - } catch( e ) { - support.deleteExpando = false; - } - - // Check if we can trust getAttribute("value") - input = document.createElement("input"); - input.setAttribute( "value", "" ); - support.input = input.getAttribute( "value" ) === ""; - - // Check if an input maintains its value after becoming a radio - input.value = "t"; - input.setAttribute( "type", "radio" ); - support.radioValue = input.value === "t"; - - // #11217 - WebKit loses check when the name is after the checked attribute - input.setAttribute( "checked", "t" ); - input.setAttribute( "name", "t" ); - - fragment = document.createDocumentFragment(); - fragment.appendChild( input ); - - // Check if a disconnected checkbox will retain its checked - // value of true after appended to the DOM (IE6/7) - support.appendChecked = input.checked; - - // WebKit doesn't clone checked state correctly in fragments - support.checkClone = fragment.cloneNode( true ).cloneNode( true ).lastChild.checked; - - // Support: IE<9 - // Opera does not clone events (and typeof div.attachEvent === undefined). - // IE9-10 clones events bound via attachEvent, but they don't trigger with .click() - if ( div.attachEvent ) { - div.attachEvent( "onclick", function() { - support.noCloneEvent = false; - }); - - div.cloneNode( true ).click(); - } - - // Support: IE<9 (lack submit/change bubble), Firefox 17+ (lack focusin event) - // Beware of CSP restrictions (https://developer.mozilla.org/en/Security/CSP), test/csp.php - for ( i in { submit: true, change: true, focusin: true }) { - div.setAttribute( eventName = "on" + i, "t" ); - - support[ i + "Bubbles" ] = eventName in window || div.attributes[ eventName ].expando === false; - } - - div.style.backgroundClip = "content-box"; - div.cloneNode( true ).style.backgroundClip = ""; - support.clearCloneStyle = div.style.backgroundClip === "content-box"; - - // Run tests that need a body at doc ready - jQuery(function() { - var container, marginDiv, tds, - divReset = "padding:0;margin:0;border:0;display:block;box-sizing:content-box;-moz-box-sizing:content-box;-webkit-box-sizing:content-box;", - body = document.getElementsByTagName("body")[0]; - - if ( !body ) { - // Return for frameset docs that don't have a body - return; - } - - container = document.createElement("div"); - container.style.cssText = "border:0;width:0;height:0;position:absolute;top:0;left:-9999px;margin-top:1px"; - - body.appendChild( container ).appendChild( div ); - - // Support: IE8 - // Check if table cells still have offsetWidth/Height when they are set - // to display:none and there are still other visible table cells in a - // table row; if so, offsetWidth/Height are not reliable for use when - // determining if an element has been hidden directly using - // display:none (it is still safe to use offsets if a parent element is - // hidden; don safety goggles and see bug #4512 for more information). - div.innerHTML = "<table><tr><td></td><td>t</td></tr></table>"; - tds = div.getElementsByTagName("td"); - tds[ 0 ].style.cssText = "padding:0;margin:0;border:0;display:none"; - isSupported = ( tds[ 0 ].offsetHeight === 0 ); - - tds[ 0 ].style.display = ""; - tds[ 1 ].style.display = "none"; - - // Support: IE8 - // Check if empty table cells still have offsetWidth/Height - support.reliableHiddenOffsets = isSupported && ( tds[ 0 ].offsetHeight === 0 ); - - // Check box-sizing and margin behavior - div.innerHTML = ""; - div.style.cssText = "box-sizing:border-box;-moz-box-sizing:border-box;-webkit-box-sizing:border-box;padding:1px;border:1px;display:block;width:4px;margin-top:1%;position:absolute;top:1%;"; - support.boxSizing = ( div.offsetWidth === 4 ); - support.doesNotIncludeMarginInBodyOffset = ( body.offsetTop !== 1 ); - - // Use window.getComputedStyle because jsdom on node.js will break without it. - if ( window.getComputedStyle ) { - support.pixelPosition = ( window.getComputedStyle( div, null ) || {} ).top !== "1%"; - support.boxSizingReliable = ( window.getComputedStyle( div, null ) || { width: "4px" } ).width === "4px"; - - // Check if div with explicit width and no margin-right incorrectly - // gets computed margin-right based on width of container. (#3333) - // Fails in WebKit before Feb 2011 nightlies - // WebKit Bug 13343 - getComputedStyle returns wrong value for margin-right - marginDiv = div.appendChild( document.createElement("div") ); - marginDiv.style.cssText = div.style.cssText = divReset; - marginDiv.style.marginRight = marginDiv.style.width = "0"; - div.style.width = "1px"; - - support.reliableMarginRight = - !parseFloat( ( window.getComputedStyle( marginDiv, null ) || {} ).marginRight ); - } - - if ( typeof div.style.zoom !== core_strundefined ) { - // Support: IE<8 - // Check if natively block-level elements act like inline-block - // elements when setting their display to 'inline' and giving - // them layout - div.innerHTML = ""; - div.style.cssText = divReset + "width:1px;padding:1px;display:inline;zoom:1"; - support.inlineBlockNeedsLayout = ( div.offsetWidth === 3 ); - - // Support: IE6 - // Check if elements with layout shrink-wrap their children - div.style.display = "block"; - div.innerHTML = "<div></div>"; - div.firstChild.style.width = "5px"; - support.shrinkWrapBlocks = ( div.offsetWidth !== 3 ); - - if ( support.inlineBlockNeedsLayout ) { - // Prevent IE 6 from affecting layout for positioned elements #11048 - // Prevent IE from shrinking the body in IE 7 mode #12869 - // Support: IE<8 - body.style.zoom = 1; - } - } - - body.removeChild( container ); - - // Null elements to avoid leaks in IE - container = div = tds = marginDiv = null; - }); - - // Null elements to avoid leaks in IE - all = select = fragment = opt = a = input = null; - - return support; - })(); - - var rbrace = /(?:{[\s\S]*}|[[\s\S]*])$/, - rmultiDash = /([A-Z])/g; - - function internalData( elem, name, data, pvt /* Internal Use Only */ ){ - if ( !jQuery.acceptData( elem ) ) { - return; - } - - var thisCache, ret, - internalKey = jQuery.expando, - getByName = typeof name === "string", - - // We have to handle DOM nodes and JS objects differently because IE6-7 - // can't GC object references properly across the DOM-JS boundary - isNode = elem.nodeType, - - // Only DOM nodes need the global jQuery cache; JS object data is - // attached directly to the object so GC can occur automatically - cache = isNode ? jQuery.cache : elem, - - // Only defining an ID for JS objects if its cache already exists allows - // the code to shortcut on the same path as a DOM node with no cache - id = isNode ? elem[ internalKey ] : elem[ internalKey ] && internalKey; - - // Avoid doing any more work than we need to when trying to get data on an - // object that has no data at all - if ( (!id || !cache[id] || (!pvt && !cache[id].data)) && getByName && data === undefined ) { - return; - } - - if ( !id ) { - // Only DOM nodes need a new unique ID for each element since their data - // ends up in the global cache - if ( isNode ) { - elem[ internalKey ] = id = core_deletedIds.pop() || jQuery.guid++; - } else { - id = internalKey; - } - } - - if ( !cache[ id ] ) { - cache[ id ] = {}; - - // Avoids exposing jQuery metadata on plain JS objects when the object - // is serialized using JSON.stringify - if ( !isNode ) { - cache[ id ].toJSON = jQuery.noop; - } - } - - // An object can be passed to jQuery.data instead of a key/value pair; this gets - // shallow copied over onto the existing cache - if ( typeof name === "object" || typeof name === "function" ) { - if ( pvt ) { - cache[ id ] = jQuery.extend( cache[ id ], name ); - } else { - cache[ id ].data = jQuery.extend( cache[ id ].data, name ); - } - } - - thisCache = cache[ id ]; - - // jQuery data() is stored in a separate object inside the object's internal data - // cache in order to avoid key collisions between internal data and user-defined - // data. - if ( !pvt ) { - if ( !thisCache.data ) { - thisCache.data = {}; - } - - thisCache = thisCache.data; - } - - if ( data !== undefined ) { - thisCache[ jQuery.camelCase( name ) ] = data; - } - - // Check for both converted-to-camel and non-converted data property names - // If a data property was specified - if ( getByName ) { - - // First Try to find as-is property data - ret = thisCache[ name ]; - - // Test for null|undefined property data - if ( ret == null ) { - - // Try to find the camelCased property - ret = thisCache[ jQuery.camelCase( name ) ]; - } - } else { - ret = thisCache; - } - - return ret; - } - - function internalRemoveData( elem, name, pvt ) { - if ( !jQuery.acceptData( elem ) ) { - return; - } - - var i, l, thisCache, - isNode = elem.nodeType, - - // See jQuery.data for more information - cache = isNode ? jQuery.cache : elem, - id = isNode ? elem[ jQuery.expando ] : jQuery.expando; - - // If there is already no cache entry for this object, there is no - // purpose in continuing - if ( !cache[ id ] ) { - return; - } - - if ( name ) { - - thisCache = pvt ? cache[ id ] : cache[ id ].data; - - if ( thisCache ) { - - // Support array or space separated string names for data keys - if ( !jQuery.isArray( name ) ) { - - // try the string as a key before any manipulation - if ( name in thisCache ) { - name = [ name ]; - } else { - - // split the camel cased version by spaces unless a key with the spaces exists - name = jQuery.camelCase( name ); - if ( name in thisCache ) { - name = [ name ]; - } else { - name = name.split(" "); - } - } - } else { - // If "name" is an array of keys... - // When data is initially created, via ("key", "val") signature, - // keys will be converted to camelCase. - // Since there is no way to tell _how_ a key was added, remove - // both plain key and camelCase key. #12786 - // This will only penalize the array argument path. - name = name.concat( jQuery.map( name, jQuery.camelCase ) ); - } - - for ( i = 0, l = name.length; i < l; i++ ) { - delete thisCache[ name[i] ]; - } - - // If there is no data left in the cache, we want to continue - // and let the cache object itself get destroyed - if ( !( pvt ? isEmptyDataObject : jQuery.isEmptyObject )( thisCache ) ) { - return; - } - } - } - - // See jQuery.data for more information - if ( !pvt ) { - delete cache[ id ].data; - - // Don't destroy the parent cache unless the internal data object - // had been the only thing left in it - if ( !isEmptyDataObject( cache[ id ] ) ) { - return; - } - } - - // Destroy the cache - if ( isNode ) { - jQuery.cleanData( [ elem ], true ); - - // Use delete when supported for expandos or `cache` is not a window per isWindow (#10080) - } else if ( jQuery.support.deleteExpando || cache != cache.window ) { - delete cache[ id ]; - - // When all else fails, null - } else { - cache[ id ] = null; - } - } - - jQuery.extend({ - cache: {}, - - // Unique for each copy of jQuery on the page - // Non-digits removed to match rinlinejQuery - expando: "jQuery" + ( core_version + Math.random() ).replace( /\D/g, "" ), - - // The following elements throw uncatchable exceptions if you - // attempt to add expando properties to them. - noData: { - "embed": true, - // Ban all objects except for Flash (which handle expandos) - "object": "clsid:D27CDB6E-AE6D-11cf-96B8-444553540000", - "applet": true - }, - - hasData: function( elem ) { - elem = elem.nodeType ? jQuery.cache[ elem[jQuery.expando] ] : elem[ jQuery.expando ]; - return !!elem && !isEmptyDataObject( elem ); - }, - - data: function( elem, name, data ) { - return internalData( elem, name, data ); - }, - - removeData: function( elem, name ) { - return internalRemoveData( elem, name ); - }, - - // For internal use only. - _data: function( elem, name, data ) { - return internalData( elem, name, data, true ); - }, - - _removeData: function( elem, name ) { - return internalRemoveData( elem, name, true ); - }, - - // A method for determining if a DOM node can handle the data expando - acceptData: function( elem ) { - // Do not set data on non-element because it will not be cleared (#8335). - if ( elem.nodeType && elem.nodeType !== 1 && elem.nodeType !== 9 ) { - return false; - } - - var noData = elem.nodeName && jQuery.noData[ elem.nodeName.toLowerCase() ]; - - // nodes accept data unless otherwise specified; rejection can be conditional - return !noData || noData !== true && elem.getAttribute("classid") === noData; - } - }); - - jQuery.fn.extend({ - data: function( key, value ) { - var attrs, name, - elem = this[0], - i = 0, - data = null; - - // Gets all values - if ( key === undefined ) { - if ( this.length ) { - data = jQuery.data( elem ); - - if ( elem.nodeType === 1 && !jQuery._data( elem, "parsedAttrs" ) ) { - attrs = elem.attributes; - for ( ; i < attrs.length; i++ ) { - name = attrs[i].name; - - if ( !name.indexOf( "data-" ) ) { - name = jQuery.camelCase( name.slice(5) ); - - dataAttr( elem, name, data[ name ] ); - } - } - jQuery._data( elem, "parsedAttrs", true ); - } - } - - return data; - } - - // Sets multiple values - if ( typeof key === "object" ) { - return this.each(function() { - jQuery.data( this, key ); - }); - } - - return jQuery.access( this, function( value ) { - - if ( value === undefined ) { - // Try to fetch any internally stored data first - return elem ? dataAttr( elem, key, jQuery.data( elem, key ) ) : null; - } - - this.each(function() { - jQuery.data( this, key, value ); - }); - }, null, value, arguments.length > 1, null, true ); - }, - - removeData: function( key ) { - return this.each(function() { - jQuery.removeData( this, key ); - }); - } - }); - - function dataAttr( elem, key, data ) { - // If nothing was found internally, try to fetch any - // data from the HTML5 data-* attribute - if ( data === undefined && elem.nodeType === 1 ) { - - var name = "data-" + key.replace( rmultiDash, "-$1" ).toLowerCase(); - - data = elem.getAttribute( name ); - - if ( typeof data === "string" ) { - try { - data = data === "true" ? true : - data === "false" ? false : - data === "null" ? null : - // Only convert to a number if it doesn't change the string - +data + "" === data ? +data : - rbrace.test( data ) ? jQuery.parseJSON( data ) : - data; - } catch( e ) {} - - // Make sure we set the data so it isn't changed later - jQuery.data( elem, key, data ); - - } else { - data = undefined; - } - } - - return data; - } - -// checks a cache object for emptiness - function isEmptyDataObject( obj ) { - var name; - for ( name in obj ) { - - // if the public data object is empty, the private is still empty - if ( name === "data" && jQuery.isEmptyObject( obj[name] ) ) { - continue; - } - if ( name !== "toJSON" ) { - return false; - } - } - - return true; - } - jQuery.extend({ - queue: function( elem, type, data ) { - var queue; - - if ( elem ) { - type = ( type || "fx" ) + "queue"; - queue = jQuery._data( elem, type ); - - // Speed up dequeue by getting out quickly if this is just a lookup - if ( data ) { - if ( !queue || jQuery.isArray(data) ) { - queue = jQuery._data( elem, type, jQuery.makeArray(data) ); - } else { - queue.push( data ); - } - } - return queue || []; - } - }, - - dequeue: function( elem, type ) { - type = type || "fx"; - - var queue = jQuery.queue( elem, type ), - startLength = queue.length, - fn = queue.shift(), - hooks = jQuery._queueHooks( elem, type ), - next = function() { - jQuery.dequeue( elem, type ); - }; - - // If the fx queue is dequeued, always remove the progress sentinel - if ( fn === "inprogress" ) { - fn = queue.shift(); - startLength--; - } - - hooks.cur = fn; - if ( fn ) { - - // Add a progress sentinel to prevent the fx queue from being - // automatically dequeued - if ( type === "fx" ) { - queue.unshift( "inprogress" ); - } - - // clear up the last queue stop function - delete hooks.stop; - fn.call( elem, next, hooks ); - } - - if ( !startLength && hooks ) { - hooks.empty.fire(); - } - }, - - // not intended for public consumption - generates a queueHooks object, or returns the current one - _queueHooks: function( elem, type ) { - var key = type + "queueHooks"; - return jQuery._data( elem, key ) || jQuery._data( elem, key, { - empty: jQuery.Callbacks("once memory").add(function() { - jQuery._removeData( elem, type + "queue" ); - jQuery._removeData( elem, key ); - }) - }); - } - }); - - jQuery.fn.extend({ - queue: function( type, data ) { - var setter = 2; - - if ( typeof type !== "string" ) { - data = type; - type = "fx"; - setter--; - } - - if ( arguments.length < setter ) { - return jQuery.queue( this[0], type ); - } - - return data === undefined ? - this : - this.each(function() { - var queue = jQuery.queue( this, type, data ); - - // ensure a hooks for this queue - jQuery._queueHooks( this, type ); - - if ( type === "fx" && queue[0] !== "inprogress" ) { - jQuery.dequeue( this, type ); - } - }); - }, - dequeue: function( type ) { - return this.each(function() { - jQuery.dequeue( this, type ); - }); - }, - // Based off of the plugin by Clint Helfers, with permission. - // http://blindsignals.com/index.php/2009/07/jquery-delay/ - delay: function( time, type ) { - time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time; - type = type || "fx"; - - return this.queue( type, function( next, hooks ) { - var timeout = setTimeout( next, time ); - hooks.stop = function() { - clearTimeout( timeout ); - }; - }); - }, - clearQueue: function( type ) { - return this.queue( type || "fx", [] ); - }, - // Get a promise resolved when queues of a certain type - // are emptied (fx is the type by default) - promise: function( type, obj ) { - var tmp, - count = 1, - defer = jQuery.Deferred(), - elements = this, - i = this.length, - resolve = function() { - if ( !( --count ) ) { - defer.resolveWith( elements, [ elements ] ); - } - }; - - if ( typeof type !== "string" ) { - obj = type; - type = undefined; - } - type = type || "fx"; - - while( i-- ) { - tmp = jQuery._data( elements[ i ], type + "queueHooks" ); - if ( tmp && tmp.empty ) { - count++; - tmp.empty.add( resolve ); - } - } - resolve(); - return defer.promise( obj ); - } - }); - var nodeHook, boolHook, - rclass = /[\t\r\n]/g, - rreturn = /\r/g, - rfocusable = /^(?:input|select|textarea|button|object)$/i, - rclickable = /^(?:a|area)$/i, - rboolean = /^(?:checked|selected|autofocus|autoplay|async|controls|defer|disabled|hidden|loop|multiple|open|readonly|required|scoped)$/i, - ruseDefault = /^(?:checked|selected)$/i, - getSetAttribute = jQuery.support.getSetAttribute, - getSetInput = jQuery.support.input; - - jQuery.fn.extend({ - attr: function( name, value ) { - return jQuery.access( this, jQuery.attr, name, value, arguments.length > 1 ); - }, - - removeAttr: function( name ) { - return this.each(function() { - jQuery.removeAttr( this, name ); - }); - }, - - prop: function( name, value ) { - return jQuery.access( this, jQuery.prop, name, value, arguments.length > 1 ); - }, - - removeProp: function( name ) { - name = jQuery.propFix[ name ] || name; - return this.each(function() { - // try/catch handles cases where IE balks (such as removing a property on window) - try { - this[ name ] = undefined; - delete this[ name ]; - } catch( e ) {} - }); - }, - - addClass: function( value ) { - var classes, elem, cur, clazz, j, - i = 0, - len = this.length, - proceed = typeof value === "string" && value; - - if ( jQuery.isFunction( value ) ) { - return this.each(function( j ) { - jQuery( this ).addClass( value.call( this, j, this.className ) ); - }); - } - - if ( proceed ) { - // The disjunction here is for better compressibility (see removeClass) - classes = ( value || "" ).match( core_rnotwhite ) || []; - - for ( ; i < len; i++ ) { - elem = this[ i ]; - cur = elem.nodeType === 1 && ( elem.className ? - ( " " + elem.className + " " ).replace( rclass, " " ) : - " " - ); - - if ( cur ) { - j = 0; - while ( (clazz = classes[j++]) ) { - if ( cur.indexOf( " " + clazz + " " ) < 0 ) { - cur += clazz + " "; - } - } - elem.className = jQuery.trim( cur ); - - } - } - } - - return this; - }, - - removeClass: function( value ) { - var classes, elem, cur, clazz, j, - i = 0, - len = this.length, - proceed = arguments.length === 0 || typeof value === "string" && value; - - if ( jQuery.isFunction( value ) ) { - return this.each(function( j ) { - jQuery( this ).removeClass( value.call( this, j, this.className ) ); - }); - } - if ( proceed ) { - classes = ( value || "" ).match( core_rnotwhite ) || []; - - for ( ; i < len; i++ ) { - elem = this[ i ]; - // This expression is here for better compressibility (see addClass) - cur = elem.nodeType === 1 && ( elem.className ? - ( " " + elem.className + " " ).replace( rclass, " " ) : - "" - ); - - if ( cur ) { - j = 0; - while ( (clazz = classes[j++]) ) { - // Remove *all* instances - while ( cur.indexOf( " " + clazz + " " ) >= 0 ) { - cur = cur.replace( " " + clazz + " ", " " ); - } - } - elem.className = value ? jQuery.trim( cur ) : ""; - } - } - } - - return this; - }, - - toggleClass: function( value, stateVal ) { - var type = typeof value, - isBool = typeof stateVal === "boolean"; - - if ( jQuery.isFunction( value ) ) { - return this.each(function( i ) { - jQuery( this ).toggleClass( value.call(this, i, this.className, stateVal), stateVal ); - }); - } - - return this.each(function() { - if ( type === "string" ) { - // toggle individual class names - var className, - i = 0, - self = jQuery( this ), - state = stateVal, - classNames = value.match( core_rnotwhite ) || []; - - while ( (className = classNames[ i++ ]) ) { - // check each className given, space separated list - state = isBool ? state : !self.hasClass( className ); - self[ state ? "addClass" : "removeClass" ]( className ); - } - - // Toggle whole class name - } else if ( type === core_strundefined || type === "boolean" ) { - if ( this.className ) { - // store className if set - jQuery._data( this, "__className__", this.className ); - } - - // If the element has a class name or if we're passed "false", - // then remove the whole classname (if there was one, the above saved it). - // Otherwise bring back whatever was previously saved (if anything), - // falling back to the empty string if nothing was stored. - this.className = this.className || value === false ? "" : jQuery._data( this, "__className__" ) || ""; - } - }); - }, - - hasClass: function( selector ) { - var className = " " + selector + " ", - i = 0, - l = this.length; - for ( ; i < l; i++ ) { - if ( this[i].nodeType === 1 && (" " + this[i].className + " ").replace(rclass, " ").indexOf( className ) >= 0 ) { - return true; - } - } - - return false; - }, - - val: function( value ) { - var ret, hooks, isFunction, - elem = this[0]; - - if ( !arguments.length ) { - if ( elem ) { - hooks = jQuery.valHooks[ elem.type ] || jQuery.valHooks[ elem.nodeName.toLowerCase() ]; - - if ( hooks && "get" in hooks && (ret = hooks.get( elem, "value" )) !== undefined ) { - return ret; - } - - ret = elem.value; - - return typeof ret === "string" ? - // handle most common string cases - ret.replace(rreturn, "") : - // handle cases where value is null/undef or number - ret == null ? "" : ret; - } - - return; - } - - isFunction = jQuery.isFunction( value ); - - return this.each(function( i ) { - var val, - self = jQuery(this); - - if ( this.nodeType !== 1 ) { - return; - } - - if ( isFunction ) { - val = value.call( this, i, self.val() ); - } else { - val = value; - } - - // Treat null/undefined as ""; convert numbers to string - if ( val == null ) { - val = ""; - } else if ( typeof val === "number" ) { - val += ""; - } else if ( jQuery.isArray( val ) ) { - val = jQuery.map(val, function ( value ) { - return value == null ? "" : value + ""; - }); - } - - hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ]; - - // If set returns undefined, fall back to normal setting - if ( !hooks || !("set" in hooks) || hooks.set( this, val, "value" ) === undefined ) { - this.value = val; - } - }); - } - }); - - jQuery.extend({ - valHooks: { - option: { - get: function( elem ) { - // attributes.value is undefined in Blackberry 4.7 but - // uses .value. See #6932 - var val = elem.attributes.value; - return !val || val.specified ? elem.value : elem.text; - } - }, - select: { - get: function( elem ) { - var value, option, - options = elem.options, - index = elem.selectedIndex, - one = elem.type === "select-one" || index < 0, - values = one ? null : [], - max = one ? index + 1 : options.length, - i = index < 0 ? - max : - one ? index : 0; - - // Loop through all the selected options - for ( ; i < max; i++ ) { - option = options[ i ]; - - // oldIE doesn't update selected after form reset (#2551) - if ( ( option.selected || i === index ) && - // Don't return options that are disabled or in a disabled optgroup - ( jQuery.support.optDisabled ? !option.disabled : option.getAttribute("disabled") === null ) && - ( !option.parentNode.disabled || !jQuery.nodeName( option.parentNode, "optgroup" ) ) ) { - - // Get the specific value for the option - value = jQuery( option ).val(); - - // We don't need an array for one selects - if ( one ) { - return value; - } - - // Multi-Selects return an array - values.push( value ); - } - } - - return values; - }, - - set: function( elem, value ) { - var values = jQuery.makeArray( value ); - - jQuery(elem).find("option").each(function() { - this.selected = jQuery.inArray( jQuery(this).val(), values ) >= 0; - }); - - if ( !values.length ) { - elem.selectedIndex = -1; - } - return values; - } - } - }, - - attr: function( elem, name, value ) { - var hooks, notxml, ret, - nType = elem.nodeType; - - // don't get/set attributes on text, comment and attribute nodes - if ( !elem || nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - // Fallback to prop when attributes are not supported - if ( typeof elem.getAttribute === core_strundefined ) { - return jQuery.prop( elem, name, value ); - } - - notxml = nType !== 1 || !jQuery.isXMLDoc( elem ); - - // All attributes are lowercase - // Grab necessary hook if one is defined - if ( notxml ) { - name = name.toLowerCase(); - hooks = jQuery.attrHooks[ name ] || ( rboolean.test( name ) ? boolHook : nodeHook ); - } - - if ( value !== undefined ) { - - if ( value === null ) { - jQuery.removeAttr( elem, name ); - - } else if ( hooks && notxml && "set" in hooks && (ret = hooks.set( elem, value, name )) !== undefined ) { - return ret; - - } else { - elem.setAttribute( name, value + "" ); - return value; - } - - } else if ( hooks && notxml && "get" in hooks && (ret = hooks.get( elem, name )) !== null ) { - return ret; - - } else { - - // In IE9+, Flash objects don't have .getAttribute (#12945) - // Support: IE9+ - if ( typeof elem.getAttribute !== core_strundefined ) { - ret = elem.getAttribute( name ); - } - - // Non-existent attributes return null, we normalize to undefined - return ret == null ? - undefined : - ret; - } - }, - - removeAttr: function( elem, value ) { - var name, propName, - i = 0, - attrNames = value && value.match( core_rnotwhite ); - - if ( attrNames && elem.nodeType === 1 ) { - while ( (name = attrNames[i++]) ) { - propName = jQuery.propFix[ name ] || name; - - // Boolean attributes get special treatment (#10870) - if ( rboolean.test( name ) ) { - // Set corresponding property to false for boolean attributes - // Also clear defaultChecked/defaultSelected (if appropriate) for IE<8 - if ( !getSetAttribute && ruseDefault.test( name ) ) { - elem[ jQuery.camelCase( "default-" + name ) ] = - elem[ propName ] = false; - } else { - elem[ propName ] = false; - } - - // See #9699 for explanation of this approach (setting first, then removal) - } else { - jQuery.attr( elem, name, "" ); - } - - elem.removeAttribute( getSetAttribute ? name : propName ); - } - } - }, - - attrHooks: { - type: { - set: function( elem, value ) { - if ( !jQuery.support.radioValue && value === "radio" && jQuery.nodeName(elem, "input") ) { - // Setting the type on a radio button after the value resets the value in IE6-9 - // Reset value to default in case type is set after value during creation - var val = elem.value; - elem.setAttribute( "type", value ); - if ( val ) { - elem.value = val; - } - return value; - } - } - } - }, - - propFix: { - tabindex: "tabIndex", - readonly: "readOnly", - "for": "htmlFor", - "class": "className", - maxlength: "maxLength", - cellspacing: "cellSpacing", - cellpadding: "cellPadding", - rowspan: "rowSpan", - colspan: "colSpan", - usemap: "useMap", - frameborder: "frameBorder", - contenteditable: "contentEditable" - }, - - prop: function( elem, name, value ) { - var ret, hooks, notxml, - nType = elem.nodeType; - - // don't get/set properties on text, comment and attribute nodes - if ( !elem || nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - notxml = nType !== 1 || !jQuery.isXMLDoc( elem ); - - if ( notxml ) { - // Fix name and attach hooks - name = jQuery.propFix[ name ] || name; - hooks = jQuery.propHooks[ name ]; - } - - if ( value !== undefined ) { - if ( hooks && "set" in hooks && (ret = hooks.set( elem, value, name )) !== undefined ) { - return ret; - - } else { - return ( elem[ name ] = value ); - } - - } else { - if ( hooks && "get" in hooks && (ret = hooks.get( elem, name )) !== null ) { - return ret; - - } else { - return elem[ name ]; - } - } - }, - - propHooks: { - tabIndex: { - get: function( elem ) { - // elem.tabIndex doesn't always return the correct value when it hasn't been explicitly set - // http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabinde... - var attributeNode = elem.getAttributeNode("tabindex"); - - return attributeNode && attributeNode.specified ? - parseInt( attributeNode.value, 10 ) : - rfocusable.test( elem.nodeName ) || rclickable.test( elem.nodeName ) && elem.href ? - 0 : - undefined; - } - } - } - }); - -// Hook for boolean attributes - boolHook = { - get: function( elem, name ) { - var - // Use .prop to determine if this attribute is understood as boolean - prop = jQuery.prop( elem, name ), - - // Fetch it accordingly - attr = typeof prop === "boolean" && elem.getAttribute( name ), - detail = typeof prop === "boolean" ? - - getSetInput && getSetAttribute ? - attr != null : - // oldIE fabricates an empty string for missing boolean attributes - // and conflates checked/selected into attroperties - ruseDefault.test( name ) ? - elem[ jQuery.camelCase( "default-" + name ) ] : - !!attr : - - // fetch an attribute node for properties not recognized as boolean - elem.getAttributeNode( name ); - - return detail && detail.value !== false ? - name.toLowerCase() : - undefined; - }, - set: function( elem, value, name ) { - if ( value === false ) { - // Remove boolean attributes when set to false - jQuery.removeAttr( elem, name ); - } else if ( getSetInput && getSetAttribute || !ruseDefault.test( name ) ) { - // IE<8 needs the *property* name - elem.setAttribute( !getSetAttribute && jQuery.propFix[ name ] || name, name ); - - // Use defaultChecked and defaultSelected for oldIE - } else { - elem[ jQuery.camelCase( "default-" + name ) ] = elem[ name ] = true; - } - - return name; - } - }; - -// fix oldIE value attroperty - if ( !getSetInput || !getSetAttribute ) { - jQuery.attrHooks.value = { - get: function( elem, name ) { - var ret = elem.getAttributeNode( name ); - return jQuery.nodeName( elem, "input" ) ? - - // Ignore the value *property* by using defaultValue - elem.defaultValue : - - ret && ret.specified ? ret.value : undefined; - }, - set: function( elem, value, name ) { - if ( jQuery.nodeName( elem, "input" ) ) { - // Does not return so that setAttribute is also used - elem.defaultValue = value; - } else { - // Use nodeHook if defined (#1954); otherwise setAttribute is fine - return nodeHook && nodeHook.set( elem, value, name ); - } - } - }; - } - -// IE6/7 do not support getting/setting some attributes with get/setAttribute - if ( !getSetAttribute ) { - - // Use this for any attribute in IE6/7 - // This fixes almost every IE6/7 issue - nodeHook = jQuery.valHooks.button = { - get: function( elem, name ) { - var ret = elem.getAttributeNode( name ); - return ret && ( name === "id" || name === "name" || name === "coords" ? ret.value !== "" : ret.specified ) ? - ret.value : - undefined; - }, - set: function( elem, value, name ) { - // Set the existing or create a new attribute node - var ret = elem.getAttributeNode( name ); - if ( !ret ) { - elem.setAttributeNode( - (ret = elem.ownerDocument.createAttribute( name )) - ); - } - - ret.value = value += ""; - - // Break association with cloned elements by also using setAttribute (#9646) - return name === "value" || value === elem.getAttribute( name ) ? - value : - undefined; - } - }; - - // Set contenteditable to false on removals(#10429) - // Setting to empty string throws an error as an invalid value - jQuery.attrHooks.contenteditable = { - get: nodeHook.get, - set: function( elem, value, name ) { - nodeHook.set( elem, value === "" ? false : value, name ); - } - }; - - // Set width and height to auto instead of 0 on empty string( Bug #8150 ) - // This is for removals - jQuery.each([ "width", "height" ], function( i, name ) { - jQuery.attrHooks[ name ] = jQuery.extend( jQuery.attrHooks[ name ], { - set: function( elem, value ) { - if ( value === "" ) { - elem.setAttribute( name, "auto" ); - return value; - } - } - }); - }); - } - - -// Some attributes require a special call on IE -// http://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx - if ( !jQuery.support.hrefNormalized ) { - jQuery.each([ "href", "src", "width", "height" ], function( i, name ) { - jQuery.attrHooks[ name ] = jQuery.extend( jQuery.attrHooks[ name ], { - get: function( elem ) { - var ret = elem.getAttribute( name, 2 ); - return ret == null ? undefined : ret; - } - }); - }); - - // href/src property should get the full normalized URL (#10299/#12915) - jQuery.each([ "href", "src" ], function( i, name ) { - jQuery.propHooks[ name ] = { - get: function( elem ) { - return elem.getAttribute( name, 4 ); - } - }; - }); - } - - if ( !jQuery.support.style ) { - jQuery.attrHooks.style = { - get: function( elem ) { - // Return undefined in the case of empty string - // Note: IE uppercases css property names, but if we were to .toLowerCase() - // .cssText, that would destroy case senstitivity in URL's, like in "background" - return elem.style.cssText || undefined; - }, - set: function( elem, value ) { - return ( elem.style.cssText = value + "" ); - } - }; - } - -// Safari mis-reports the default selected property of an option -// Accessing the parent's selectedIndex property fixes it - if ( !jQuery.support.optSelected ) { - jQuery.propHooks.selected = jQuery.extend( jQuery.propHooks.selected, { - get: function( elem ) { - var parent = elem.parentNode; - - if ( parent ) { - parent.selectedIndex; - - // Make sure that it also works with optgroups, see #5701 - if ( parent.parentNode ) { - parent.parentNode.selectedIndex; - } - } - return null; - } - }); - } - -// IE6/7 call enctype encoding - if ( !jQuery.support.enctype ) { - jQuery.propFix.enctype = "encoding"; - } - -// Radios and checkboxes getter/setter - if ( !jQuery.support.checkOn ) { - jQuery.each([ "radio", "checkbox" ], function() { - jQuery.valHooks[ this ] = { - get: function( elem ) { - // Handle the case where in Webkit "" is returned instead of "on" if a value isn't specified - return elem.getAttribute("value") === null ? "on" : elem.value; - } - }; - }); - } - jQuery.each([ "radio", "checkbox" ], function() { - jQuery.valHooks[ this ] = jQuery.extend( jQuery.valHooks[ this ], { - set: function( elem, value ) { - if ( jQuery.isArray( value ) ) { - return ( elem.checked = jQuery.inArray( jQuery(elem).val(), value ) >= 0 ); - } - } - }); - }); - var rformElems = /^(?:input|select|textarea)$/i, - rkeyEvent = /^key/, - rmouseEvent = /^(?:mouse|contextmenu)|click/, - rfocusMorph = /^(?:focusinfocus|focusoutblur)$/, - rtypenamespace = /^([^.]*)(?:.(.+)|)$/; - - function returnTrue() { - return true; - } - - function returnFalse() { - return false; - } - - /* - * Helper functions for managing events -- not part of the public interface. - * Props to Dean Edwards' addEvent library for many of the ideas. - */ - jQuery.event = { - - global: {}, - - add: function( elem, types, handler, data, selector ) { - var tmp, events, t, handleObjIn, - special, eventHandle, handleObj, - handlers, type, namespaces, origType, - elemData = jQuery._data( elem ); - - // Don't attach events to noData or text/comment nodes (but allow plain objects) - if ( !elemData ) { - return; - } - - // Caller can pass in an object of custom data in lieu of the handler - if ( handler.handler ) { - handleObjIn = handler; - handler = handleObjIn.handler; - selector = handleObjIn.selector; - } - - // Make sure that the handler has a unique ID, used to find/remove it later - if ( !handler.guid ) { - handler.guid = jQuery.guid++; - } - - // Init the element's event structure and main handler, if this is the first - if ( !(events = elemData.events) ) { - events = elemData.events = {}; - } - if ( !(eventHandle = elemData.handle) ) { - eventHandle = elemData.handle = function( e ) { - // Discard the second event of a jQuery.event.trigger() and - // when an event is called after a page has unloaded - return typeof jQuery !== core_strundefined && (!e || jQuery.event.triggered !== e.type) ? - jQuery.event.dispatch.apply( eventHandle.elem, arguments ) : - undefined; - }; - // Add elem as a property of the handle fn to prevent a memory leak with IE non-native events - eventHandle.elem = elem; - } - - // Handle multiple events separated by a space - // jQuery(...).bind("mouseover mouseout", fn); - types = ( types || "" ).match( core_rnotwhite ) || [""]; - t = types.length; - while ( t-- ) { - tmp = rtypenamespace.exec( types[t] ) || []; - type = origType = tmp[1]; - namespaces = ( tmp[2] || "" ).split( "." ).sort(); - - // If event changes its type, use the special event handlers for the changed type - special = jQuery.event.special[ type ] || {}; - - // If selector defined, determine special event api type, otherwise given type - type = ( selector ? special.delegateType : special.bindType ) || type; - - // Update special based on newly reset type - special = jQuery.event.special[ type ] || {}; - - // handleObj is passed to all event handlers - handleObj = jQuery.extend({ - type: type, - origType: origType, - data: data, - handler: handler, - guid: handler.guid, - selector: selector, - needsContext: selector && jQuery.expr.match.needsContext.test( selector ), - namespace: namespaces.join(".") - }, handleObjIn ); - - // Init the event handler queue if we're the first - if ( !(handlers = events[ type ]) ) { - handlers = events[ type ] = []; - handlers.delegateCount = 0; - - // Only use addEventListener/attachEvent if the special events handler returns false - if ( !special.setup || special.setup.call( elem, data, namespaces, eventHandle ) === false ) { - // Bind the global event handler to the element - if ( elem.addEventListener ) { - elem.addEventListener( type, eventHandle, false ); - - } else if ( elem.attachEvent ) { - elem.attachEvent( "on" + type, eventHandle ); - } - } - } - - if ( special.add ) { - special.add.call( elem, handleObj ); - - if ( !handleObj.handler.guid ) { - handleObj.handler.guid = handler.guid; - } - } - - // Add to the element's handler list, delegates in front - if ( selector ) { - handlers.splice( handlers.delegateCount++, 0, handleObj ); - } else { - handlers.push( handleObj ); - } - - // Keep track of which events have ever been used, for event optimization - jQuery.event.global[ type ] = true; - } - - // Nullify elem to prevent memory leaks in IE - elem = null; - }, - - // Detach an event or set of events from an element - remove: function( elem, types, handler, selector, mappedTypes ) { - var j, handleObj, tmp, - origCount, t, events, - special, handlers, type, - namespaces, origType, - elemData = jQuery.hasData( elem ) && jQuery._data( elem ); - - if ( !elemData || !(events = elemData.events) ) { - return; - } - - // Once for each type.namespace in types; type may be omitted - types = ( types || "" ).match( core_rnotwhite ) || [""]; - t = types.length; - while ( t-- ) { - tmp = rtypenamespace.exec( types[t] ) || []; - type = origType = tmp[1]; - namespaces = ( tmp[2] || "" ).split( "." ).sort(); - - // Unbind all events (on this namespace, if provided) for the element - if ( !type ) { - for ( type in events ) { - jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); - } - continue; - } - - special = jQuery.event.special[ type ] || {}; - type = ( selector ? special.delegateType : special.bindType ) || type; - handlers = events[ type ] || []; - tmp = tmp[2] && new RegExp( "(^|\.)" + namespaces.join("\.(?:.*\.|)") + "(\.|$)" ); - - // Remove matching events - origCount = j = handlers.length; - while ( j-- ) { - handleObj = handlers[ j ]; - - if ( ( mappedTypes || origType === handleObj.origType ) && - ( !handler || handler.guid === handleObj.guid ) && - ( !tmp || tmp.test( handleObj.namespace ) ) && - ( !selector || selector === handleObj.selector || selector === "**" && handleObj.selector ) ) { - handlers.splice( j, 1 ); - - if ( handleObj.selector ) { - handlers.delegateCount--; - } - if ( special.remove ) { - special.remove.call( elem, handleObj ); - } - } - } - - // Remove generic event handler if we removed something and no more handlers exist - // (avoids potential for endless recursion during removal of special event handlers) - if ( origCount && !handlers.length ) { - if ( !special.teardown || special.teardown.call( elem, namespaces, elemData.handle ) === false ) { - jQuery.removeEvent( elem, type, elemData.handle ); - } - - delete events[ type ]; - } - } - - // Remove the expando if it's no longer used - if ( jQuery.isEmptyObject( events ) ) { - delete elemData.handle; - - // removeData also checks for emptiness and clears the expando if empty - // so use it instead of delete - jQuery._removeData( elem, "events" ); - } - }, - - trigger: function( event, data, elem, onlyHandlers ) { - var handle, ontype, cur, - bubbleType, special, tmp, i, - eventPath = [ elem || document ], - type = core_hasOwn.call( event, "type" ) ? event.type : event, - namespaces = core_hasOwn.call( event, "namespace" ) ? event.namespace.split(".") : []; - - cur = tmp = elem = elem || document; - - // Don't do events on text and comment nodes - if ( elem.nodeType === 3 || elem.nodeType === 8 ) { - return; - } - - // focus/blur morphs to focusin/out; ensure we're not firing them right now - if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { - return; - } - - if ( type.indexOf(".") >= 0 ) { - // Namespaced trigger; create a regexp to match event type in handle() - namespaces = type.split("."); - type = namespaces.shift(); - namespaces.sort(); - } - ontype = type.indexOf(":") < 0 && "on" + type; - - // Caller can pass in a jQuery.Event object, Object, or just an event type string - event = event[ jQuery.expando ] ? - event : - new jQuery.Event( type, typeof event === "object" && event ); - - event.isTrigger = true; - event.namespace = namespaces.join("."); - event.namespace_re = event.namespace ? - new RegExp( "(^|\.)" + namespaces.join("\.(?:.*\.|)") + "(\.|$)" ) : - null; - - // Clean up the event in case it is being reused - event.result = undefined; - if ( !event.target ) { - event.target = elem; - } - - // Clone any incoming data and prepend the event, creating the handler arg list - data = data == null ? - [ event ] : - jQuery.makeArray( data, [ event ] ); - - // Allow special events to draw outside the lines - special = jQuery.event.special[ type ] || {}; - if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) { - return; - } - - // Determine event propagation path in advance, per W3C events spec (#9951) - // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) - if ( !onlyHandlers && !special.noBubble && !jQuery.isWindow( elem ) ) { - - bubbleType = special.delegateType || type; - if ( !rfocusMorph.test( bubbleType + type ) ) { - cur = cur.parentNode; - } - for ( ; cur; cur = cur.parentNode ) { - eventPath.push( cur ); - tmp = cur; - } - - // Only add window if we got to document (e.g., not plain obj or detached DOM) - if ( tmp === (elem.ownerDocument || document) ) { - eventPath.push( tmp.defaultView || tmp.parentWindow || window ); - } - } - - // Fire handlers on the event path - i = 0; - while ( (cur = eventPath[i++]) && !event.isPropagationStopped() ) { - - event.type = i > 1 ? - bubbleType : - special.bindType || type; - - // jQuery handler - handle = ( jQuery._data( cur, "events" ) || {} )[ event.type ] && jQuery._data( cur, "handle" ); - if ( handle ) { - handle.apply( cur, data ); - } - - // Native handler - handle = ontype && cur[ ontype ]; - if ( handle && jQuery.acceptData( cur ) && handle.apply && handle.apply( cur, data ) === false ) { - event.preventDefault(); - } - } - event.type = type; - - // If nobody prevented the default action, do it now - if ( !onlyHandlers && !event.isDefaultPrevented() ) { - - if ( (!special._default || special._default.apply( elem.ownerDocument, data ) === false) && - !(type === "click" && jQuery.nodeName( elem, "a" )) && jQuery.acceptData( elem ) ) { - - // Call a native DOM method on the target with the same name name as the event. - // Can't use an .isFunction() check here because IE6/7 fails that test. - // Don't do default actions on window, that's where global variables be (#6170) - if ( ontype && elem[ type ] && !jQuery.isWindow( elem ) ) { - - // Don't re-trigger an onFOO event when we call its FOO() method - tmp = elem[ ontype ]; - - if ( tmp ) { - elem[ ontype ] = null; - } - - // Prevent re-triggering of the same event, since we already bubbled it above - jQuery.event.triggered = type; - try { - elem[ type ](); - } catch ( e ) { - // IE<9 dies on focus/blur to hidden element (#1486,#12518) - // only reproducible on winXP IE8 native, not IE9 in IE8 mode - } - jQuery.event.triggered = undefined; - - if ( tmp ) { - elem[ ontype ] = tmp; - } - } - } - } - - return event.result; - }, - - dispatch: function( event ) { - - // Make a writable jQuery.Event from the native event object - event = jQuery.event.fix( event ); - - var i, ret, handleObj, matched, j, - handlerQueue = [], - args = core_slice.call( arguments ), - handlers = ( jQuery._data( this, "events" ) || {} )[ event.type ] || [], - special = jQuery.event.special[ event.type ] || {}; - - // Use the fix-ed jQuery.Event rather than the (read-only) native event - args[0] = event; - event.delegateTarget = this; - - // Call the preDispatch hook for the mapped type, and let it bail if desired - if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { - return; - } - - // Determine handlers - handlerQueue = jQuery.event.handlers.call( this, event, handlers ); - - // Run delegates first; they may want to stop propagation beneath us - i = 0; - while ( (matched = handlerQueue[ i++ ]) && !event.isPropagationStopped() ) { - event.currentTarget = matched.elem; - - j = 0; - while ( (handleObj = matched.handlers[ j++ ]) && !event.isImmediatePropagationStopped() ) { - - // Triggered event must either 1) have no namespace, or - // 2) have namespace(s) a subset or equal to those in the bound event (both can have no namespace). - if ( !event.namespace_re || event.namespace_re.test( handleObj.namespace ) ) { - - event.handleObj = handleObj; - event.data = handleObj.data; - - ret = ( (jQuery.event.special[ handleObj.origType ] || {}).handle || handleObj.handler ) - .apply( matched.elem, args ); - - if ( ret !== undefined ) { - if ( (event.result = ret) === false ) { - event.preventDefault(); - event.stopPropagation(); - } - } - } - } - } - - // Call the postDispatch hook for the mapped type - if ( special.postDispatch ) { - special.postDispatch.call( this, event ); - } - - return event.result; - }, - - handlers: function( event, handlers ) { - var sel, handleObj, matches, i, - handlerQueue = [], - delegateCount = handlers.delegateCount, - cur = event.target; - - // Find delegate handlers - // Black-hole SVG <use> instance trees (#13180) - // Avoid non-left-click bubbling in Firefox (#3861) - if ( delegateCount && cur.nodeType && (!event.button || event.type !== "click") ) { - - for ( ; cur != this; cur = cur.parentNode || this ) { - - // Don't check non-elements (#13208) - // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764) - if ( cur.nodeType === 1 && (cur.disabled !== true || event.type !== "click") ) { - matches = []; - for ( i = 0; i < delegateCount; i++ ) { - handleObj = handlers[ i ]; - - // Don't conflict with Object.prototype properties (#13203) - sel = handleObj.selector + " "; - - if ( matches[ sel ] === undefined ) { - matches[ sel ] = handleObj.needsContext ? - jQuery( sel, this ).index( cur ) >= 0 : - jQuery.find( sel, this, null, [ cur ] ).length; - } - if ( matches[ sel ] ) { - matches.push( handleObj ); - } - } - if ( matches.length ) { - handlerQueue.push({ elem: cur, handlers: matches }); - } - } - } - } - - // Add the remaining (directly-bound) handlers - if ( delegateCount < handlers.length ) { - handlerQueue.push({ elem: this, handlers: handlers.slice( delegateCount ) }); - } - - return handlerQueue; - }, - - fix: function( event ) { - if ( event[ jQuery.expando ] ) { - return event; - } - - // Create a writable copy of the event object and normalize some properties - var i, prop, copy, - type = event.type, - originalEvent = event, - fixHook = this.fixHooks[ type ]; - - if ( !fixHook ) { - this.fixHooks[ type ] = fixHook = - rmouseEvent.test( type ) ? this.mouseHooks : - rkeyEvent.test( type ) ? this.keyHooks : - {}; - } - copy = fixHook.props ? this.props.concat( fixHook.props ) : this.props; - - event = new jQuery.Event( originalEvent ); - - i = copy.length; - while ( i-- ) { - prop = copy[ i ]; - event[ prop ] = originalEvent[ prop ]; - } - - // Support: IE<9 - // Fix target property (#1925) - if ( !event.target ) { - event.target = originalEvent.srcElement || document; - } - - // Support: Chrome 23+, Safari? - // Target should not be a text node (#504, #13143) - if ( event.target.nodeType === 3 ) { - event.target = event.target.parentNode; - } - - // Support: IE<9 - // For mouse/key events, metaKey==false if it's undefined (#3368, #11328) - event.metaKey = !!event.metaKey; - - return fixHook.filter ? fixHook.filter( event, originalEvent ) : event; - }, - - // Includes some event props shared by KeyEvent and MouseEvent - props: "altKey bubbles cancelable ctrlKey currentTarget eventPhase metaKey relatedTarget shiftKey target timeStamp view which".split(" "), - - fixHooks: {}, - - keyHooks: { - props: "char charCode key keyCode".split(" "), - filter: function( event, original ) { - - // Add which for key events - if ( event.which == null ) { - event.which = original.charCode != null ? original.charCode : original.keyCode; - } - - return event; - } - }, - - mouseHooks: { - props: "button buttons clientX clientY fromElement offsetX offsetY pageX pageY screenX screenY toElement".split(" "), - filter: function( event, original ) { - var body, eventDoc, doc, - button = original.button, - fromElement = original.fromElement; - - // Calculate pageX/Y if missing and clientX/Y available - if ( event.pageX == null && original.clientX != null ) { - eventDoc = event.target.ownerDocument || document; - doc = eventDoc.documentElement; - body = eventDoc.body; - - event.pageX = original.clientX + ( doc && doc.scrollLeft || body && body.scrollLeft || 0 ) - ( doc && doc.clientLeft || body && body.clientLeft || 0 ); - event.pageY = original.clientY + ( doc && doc.scrollTop || body && body.scrollTop || 0 ) - ( doc && doc.clientTop || body && body.clientTop || 0 ); - } - - // Add relatedTarget, if necessary - if ( !event.relatedTarget && fromElement ) { - event.relatedTarget = fromElement === event.target ? original.toElement : fromElement; - } - - // Add which for click: 1 === left; 2 === middle; 3 === right - // Note: button is not normalized, so don't use it - if ( !event.which && button !== undefined ) { - event.which = ( button & 1 ? 1 : ( button & 2 ? 3 : ( button & 4 ? 2 : 0 ) ) ); - } - - return event; - } - }, - - special: { - load: { - // Prevent triggered image.load events from bubbling to window.load - noBubble: true - }, - click: { - // For checkbox, fire native event so checked state will be right - trigger: function() { - if ( jQuery.nodeName( this, "input" ) && this.type === "checkbox" && this.click ) { - this.click(); - return false; - } - } - }, - focus: { - // Fire native event if possible so blur/focus sequence is correct - trigger: function() { - if ( this !== document.activeElement && this.focus ) { - try { - this.focus(); - return false; - } catch ( e ) { - // Support: IE<9 - // If we error on focus to hidden element (#1486, #12518), - // let .trigger() run the handlers - } - } - }, - delegateType: "focusin" - }, - blur: { - trigger: function() { - if ( this === document.activeElement && this.blur ) { - this.blur(); - return false; - } - }, - delegateType: "focusout" - }, - - beforeunload: { - postDispatch: function( event ) { - - // Even when returnValue equals to undefined Firefox will still show alert - if ( event.result !== undefined ) { - event.originalEvent.returnValue = event.result; - } - } - } - }, - - simulate: function( type, elem, event, bubble ) { - // Piggyback on a donor event to simulate a different one. - // Fake originalEvent to avoid donor's stopPropagation, but if the - // simulated event prevents default then we do the same on the donor. - var e = jQuery.extend( - new jQuery.Event(), - event, - { type: type, - isSimulated: true, - originalEvent: {} - } - ); - if ( bubble ) { - jQuery.event.trigger( e, null, elem ); - } else { - jQuery.event.dispatch.call( elem, e ); - } - if ( e.isDefaultPrevented() ) { - event.preventDefault(); - } - } - }; - - jQuery.removeEvent = document.removeEventListener ? - function( elem, type, handle ) { - if ( elem.removeEventListener ) { - elem.removeEventListener( type, handle, false ); - } - } : - function( elem, type, handle ) { - var name = "on" + type; - - if ( elem.detachEvent ) { - - // #8545, #7054, preventing memory leaks for custom events in IE6-8 - // detachEvent needed property on element, by name of that event, to properly expose it to GC - if ( typeof elem[ name ] === core_strundefined ) { - elem[ name ] = null; - } - - elem.detachEvent( name, handle ); - } - }; - - jQuery.Event = function( src, props ) { - // Allow instantiation without the 'new' keyword - if ( !(this instanceof jQuery.Event) ) { - return new jQuery.Event( src, props ); - } - - // Event object - if ( src && src.type ) { - this.originalEvent = src; - this.type = src.type; - - // Events bubbling up the document may have been marked as prevented - // by a handler lower down the tree; reflect the correct value. - this.isDefaultPrevented = ( src.defaultPrevented || src.returnValue === false || - src.getPreventDefault && src.getPreventDefault() ) ? returnTrue : returnFalse; - - // Event type - } else { - this.type = src; - } - - // Put explicitly provided properties onto the event object - if ( props ) { - jQuery.extend( this, props ); - } - - // Create a timestamp if incoming event doesn't have one - this.timeStamp = src && src.timeStamp || jQuery.now(); - - // Mark it as fixed - this[ jQuery.expando ] = true; - }; - -// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding -// http://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding... - jQuery.Event.prototype = { - isDefaultPrevented: returnFalse, - isPropagationStopped: returnFalse, - isImmediatePropagationStopped: returnFalse, - - preventDefault: function() { - var e = this.originalEvent; - - this.isDefaultPrevented = returnTrue; - if ( !e ) { - return; - } - - // If preventDefault exists, run it on the original event - if ( e.preventDefault ) { - e.preventDefault(); - - // Support: IE - // Otherwise set the returnValue property of the original event to false - } else { - e.returnValue = false; - } - }, - stopPropagation: function() { - var e = this.originalEvent; - - this.isPropagationStopped = returnTrue; - if ( !e ) { - return; - } - // If stopPropagation exists, run it on the original event - if ( e.stopPropagation ) { - e.stopPropagation(); - } - - // Support: IE - // Set the cancelBubble property of the original event to true - e.cancelBubble = true; - }, - stopImmediatePropagation: function() { - this.isImmediatePropagationStopped = returnTrue; - this.stopPropagation(); - } - }; - -// Create mouseenter/leave events using mouseover/out and event-time checks - jQuery.each({ - mouseenter: "mouseover", - mouseleave: "mouseout" - }, function( orig, fix ) { - jQuery.event.special[ orig ] = { - delegateType: fix, - bindType: fix, - - handle: function( event ) { - var ret, - target = this, - related = event.relatedTarget, - handleObj = event.handleObj; - - // For mousenter/leave call the handler if related is outside the target. - // NB: No relatedTarget if the mouse left/entered the browser window - if ( !related || (related !== target && !jQuery.contains( target, related )) ) { - event.type = handleObj.origType; - ret = handleObj.handler.apply( this, arguments ); - event.type = fix; - } - return ret; - } - }; - }); - -// IE submit delegation - if ( !jQuery.support.submitBubbles ) { - - jQuery.event.special.submit = { - setup: function() { - // Only need this for delegated form submit events - if ( jQuery.nodeName( this, "form" ) ) { - return false; - } - - // Lazy-add a submit handler when a descendant form may potentially be submitted - jQuery.event.add( this, "click._submit keypress._submit", function( e ) { - // Node name check avoids a VML-related crash in IE (#9807) - var elem = e.target, - form = jQuery.nodeName( elem, "input" ) || jQuery.nodeName( elem, "button" ) ? elem.form : undefined; - if ( form && !jQuery._data( form, "submitBubbles" ) ) { - jQuery.event.add( form, "submit._submit", function( event ) { - event._submit_bubble = true; - }); - jQuery._data( form, "submitBubbles", true ); - } - }); - // return undefined since we don't need an event listener - }, - - postDispatch: function( event ) { - // If form was submitted by the user, bubble the event up the tree - if ( event._submit_bubble ) { - delete event._submit_bubble; - if ( this.parentNode && !event.isTrigger ) { - jQuery.event.simulate( "submit", this.parentNode, event, true ); - } - } - }, - - teardown: function() { - // Only need this for delegated form submit events - if ( jQuery.nodeName( this, "form" ) ) { - return false; - } - - // Remove delegated handlers; cleanData eventually reaps submit handlers attached above - jQuery.event.remove( this, "._submit" ); - } - }; - } - -// IE change delegation and checkbox/radio fix - if ( !jQuery.support.changeBubbles ) { - - jQuery.event.special.change = { - - setup: function() { - - if ( rformElems.test( this.nodeName ) ) { - // IE doesn't fire change on a check/radio until blur; trigger it on click - // after a propertychange. Eat the blur-change in special.change.handle. - // This still fires onchange a second time for check/radio after blur. - if ( this.type === "checkbox" || this.type === "radio" ) { - jQuery.event.add( this, "propertychange._change", function( event ) { - if ( event.originalEvent.propertyName === "checked" ) { - this._just_changed = true; - } - }); - jQuery.event.add( this, "click._change", function( event ) { - if ( this._just_changed && !event.isTrigger ) { - this._just_changed = false; - } - // Allow triggered, simulated change events (#11500) - jQuery.event.simulate( "change", this, event, true ); - }); - } - return false; - } - // Delegated event; lazy-add a change handler on descendant inputs - jQuery.event.add( this, "beforeactivate._change", function( e ) { - var elem = e.target; - - if ( rformElems.test( elem.nodeName ) && !jQuery._data( elem, "changeBubbles" ) ) { - jQuery.event.add( elem, "change._change", function( event ) { - if ( this.parentNode && !event.isSimulated && !event.isTrigger ) { - jQuery.event.simulate( "change", this.parentNode, event, true ); - } - }); - jQuery._data( elem, "changeBubbles", true ); - } - }); - }, - - handle: function( event ) { - var elem = event.target; - - // Swallow native change events from checkbox/radio, we already triggered them above - if ( this !== elem || event.isSimulated || event.isTrigger || (elem.type !== "radio" && elem.type !== "checkbox") ) { - return event.handleObj.handler.apply( this, arguments ); - } - }, - - teardown: function() { - jQuery.event.remove( this, "._change" ); - - return !rformElems.test( this.nodeName ); - } - }; - } - -// Create "bubbling" focus and blur events - if ( !jQuery.support.focusinBubbles ) { - jQuery.each({ focus: "focusin", blur: "focusout" }, function( orig, fix ) { - - // Attach a single capturing handler while someone wants focusin/focusout - var attaches = 0, - handler = function( event ) { - jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ), true ); - }; - - jQuery.event.special[ fix ] = { - setup: function() { - if ( attaches++ === 0 ) { - document.addEventListener( orig, handler, true ); - } - }, - teardown: function() { - if ( --attaches === 0 ) { - document.removeEventListener( orig, handler, true ); - } - } - }; - }); - } - - jQuery.fn.extend({ - - on: function( types, selector, data, fn, /*INTERNAL*/ one ) { - var type, origFn; - - // Types can be a map of types/handlers - if ( typeof types === "object" ) { - // ( types-Object, selector, data ) - if ( typeof selector !== "string" ) { - // ( types-Object, data ) - data = data || selector; - selector = undefined; - } - for ( type in types ) { - this.on( type, selector, data, types[ type ], one ); - } - return this; - } - - if ( data == null && fn == null ) { - // ( types, fn ) - fn = selector; - data = selector = undefined; - } else if ( fn == null ) { - if ( typeof selector === "string" ) { - // ( types, selector, fn ) - fn = data; - data = undefined; - } else { - // ( types, data, fn ) - fn = data; - data = selector; - selector = undefined; - } - } - if ( fn === false ) { - fn = returnFalse; - } else if ( !fn ) { - return this; - } - - if ( one === 1 ) { - origFn = fn; - fn = function( event ) { - // Can use an empty set, since event contains the info - jQuery().off( event ); - return origFn.apply( this, arguments ); - }; - // Use same guid so caller can remove using origFn - fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); - } - return this.each( function() { - jQuery.event.add( this, types, fn, data, selector ); - }); - }, - one: function( types, selector, data, fn ) { - return this.on( types, selector, data, fn, 1 ); - }, - off: function( types, selector, fn ) { - var handleObj, type; - if ( types && types.preventDefault && types.handleObj ) { - // ( event ) dispatched jQuery.Event - handleObj = types.handleObj; - jQuery( types.delegateTarget ).off( - handleObj.namespace ? handleObj.origType + "." + handleObj.namespace : handleObj.origType, - handleObj.selector, - handleObj.handler - ); - return this; - } - if ( typeof types === "object" ) { - // ( types-object [, selector] ) - for ( type in types ) { - this.off( type, selector, types[ type ] ); - } - return this; - } - if ( selector === false || typeof selector === "function" ) { - // ( types [, fn] ) - fn = selector; - selector = undefined; - } - if ( fn === false ) { - fn = returnFalse; - } - return this.each(function() { - jQuery.event.remove( this, types, fn, selector ); - }); - }, - - bind: function( types, data, fn ) { - return this.on( types, null, data, fn ); - }, - unbind: function( types, fn ) { - return this.off( types, null, fn ); - }, - - delegate: function( selector, types, data, fn ) { - return this.on( types, selector, data, fn ); - }, - undelegate: function( selector, types, fn ) { - // ( namespace ) or ( selector, types [, fn] ) - return arguments.length === 1 ? this.off( selector, "**" ) : this.off( types, selector || "**", fn ); - }, - - trigger: function( type, data ) { - return this.each(function() { - jQuery.event.trigger( type, data, this ); - }); - }, - triggerHandler: function( type, data ) { - var elem = this[0]; - if ( elem ) { - return jQuery.event.trigger( type, data, elem, true ); - } - } - }); - /*! - * Sizzle CSS Selector Engine - * Copyright 2012 jQuery Foundation and other contributors - * Released under the MIT license - * http://sizzlejs.com/ - */ - (function( window, undefined ) { - - var i, - cachedruns, - Expr, - getText, - isXML, - compile, - hasDuplicate, - outermostContext, - - // Local document vars - setDocument, - document, - docElem, - documentIsXML, - rbuggyQSA, - rbuggyMatches, - matches, - contains, - sortOrder, - - // Instance-specific data - expando = "sizzle" + -(new Date()), - preferredDoc = window.document, - support = {}, - dirruns = 0, - done = 0, - classCache = createCache(), - tokenCache = createCache(), - compilerCache = createCache(), - - // General-purpose constants - strundefined = typeof undefined, - MAX_NEGATIVE = 1 << 31, - - // Array methods - arr = [], - pop = arr.pop, - push = arr.push, - slice = arr.slice, - // Use a stripped-down indexOf if we can't use a native one - indexOf = arr.indexOf || function( elem ) { - var i = 0, - len = this.length; - for ( ; i < len; i++ ) { - if ( this[i] === elem ) { - return i; - } - } - return -1; - }, - - - // Regular expressions - - // Whitespace characters http://www.w3.org/TR/css3-selectors/#whitespace - whitespace = "[\x20\t\r\n\f]", - // http://www.w3.org/TR/css3-syntax/#characters - characterEncoding = "(?:\\.|[\w-]|[^\x00-\xa0])+", - - // Loosely modeled on CSS identifier characters - // An unquoted value should be a CSS identifier http://www.w3.org/TR/css3-selectors/#attribute-selectors - // Proper syntax: http://www.w3.org/TR/CSS21/syndata.html#value-def-identifier - identifier = characterEncoding.replace( "w", "w#" ), - - // Acceptable operators http://www.w3.org/TR/selectors/#attribute-selectors - operators = "([*^$|!~]?=)", - attributes = "\[" + whitespace + "*(" + characterEncoding + ")" + whitespace + - "*(?:" + operators + whitespace + "*(?:(['"])((?:\\.|[^\\])*?)\3|(" + identifier + ")|)|)" + whitespace + "*\]", - - // Prefer arguments quoted, - // then not containing pseudos/brackets, - // then attribute selectors/non-parenthetical expressions, - // then anything else - // These preferences are here to reduce the number of selectors - // needing tokenize in the PSEUDO preFilter - pseudos = ":(" + characterEncoding + ")(?:\(((['"])((?:\\.|[^\\])*?)\3|((?:\\.|[^\\()[\]]|" + attributes.replace( 3, 8 ) + ")*)|.*)\)|)", - - // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter - rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\])(?:\\.)*)" + whitespace + "+$", "g" ), - - rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), - rcombinators = new RegExp( "^" + whitespace + "*([\x20\t\r\n\f>+~])" + whitespace + "*" ), - rpseudo = new RegExp( pseudos ), - ridentifier = new RegExp( "^" + identifier + "$" ), - - matchExpr = { - "ID": new RegExp( "^#(" + characterEncoding + ")" ), - "CLASS": new RegExp( "^\.(" + characterEncoding + ")" ), - "NAME": new RegExp( "^\[name=['"]?(" + characterEncoding + ")['"]?\]" ), - "TAG": new RegExp( "^(" + characterEncoding.replace( "w", "w*" ) + ")" ), - "ATTR": new RegExp( "^" + attributes ), - "PSEUDO": new RegExp( "^" + pseudos ), - "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\(" + whitespace + - "*(even|odd|(([+-]|)(\d*)n|)" + whitespace + "*(?:([+-]|)" + whitespace + - "*(\d+)|))" + whitespace + "*\)|)", "i" ), - // For use in libraries implementing .is() - // We use this for POS matching in `select` - "needsContext": new RegExp( "^" + whitespace + "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\(" + - whitespace + "*((?:-\d)?\d*)" + whitespace + "*\)|)(?=[^-]|$)", "i" ) - }, - - rsibling = /[\x20\t\r\n\f]*[+~]/, - - rnative = /^[^{]+{\s*[native code/, - - // Easily-parseable/retrievable ID or TAG or CLASS selectors - rquickExpr = /^(?:#([\w-]+)|(\w+)|.([\w-]+))$/, - - rinputs = /^(?:input|select|textarea|button)$/i, - rheader = /^h\d$/i, - - rescape = /'|\/g, - rattributeQuotes = /=[\x20\t\r\n\f]*([^'"]]*)[\x20\t\r\n\f]*]/g, - - // CSS escapes http://www.w3.org/TR/CSS21/syndata.html#escaped-characters - runescape = /\([\da-fA-F]{1,6}[\x20\t\r\n\f]?|.)/g, - funescape = function( _, escaped ) { - var high = "0x" + escaped - 0x10000; - // NaN means non-codepoint - return high !== high ? - escaped : - // BMP codepoint - high < 0 ? - String.fromCharCode( high + 0x10000 ) : - // Supplemental Plane codepoint (surrogate pair) - String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); - }; - -// Use a stripped-down slice if we can't use a native one - try { - slice.call( preferredDoc.documentElement.childNodes, 0 )[0].nodeType; - } catch ( e ) { - slice = function( i ) { - var elem, - results = []; - while ( (elem = this[i++]) ) { - results.push( elem ); - } - return results; - }; - } - - /** - * For feature detection - * @param {Function} fn The function to test for native support - */ - function isNative( fn ) { - return rnative.test( fn + "" ); - } - - /** - * Create key-value caches of limited size - * @returns {Function(string, Object)} Returns the Object data after storing it on itself with - * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) - * deleting the oldest entry - */ - function createCache() { - var cache, - keys = []; - - return (cache = function( key, value ) { - // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) - if ( keys.push( key += " " ) > Expr.cacheLength ) { - // Only keep the most recent entries - delete cache[ keys.shift() ]; - } - return (cache[ key ] = value); - }); - } - - /** - * Mark a function for special use by Sizzle - * @param {Function} fn The function to mark - */ - function markFunction( fn ) { - fn[ expando ] = true; - return fn; - } - - /** - * Support testing using an element - * @param {Function} fn Passed the created div and expects a boolean result - */ - function assert( fn ) { - var div = document.createElement("div"); - - try { - return fn( div ); - } catch (e) { - return false; - } finally { - // release memory in IE - div = null; - } - } - - function Sizzle( selector, context, results, seed ) { - var match, elem, m, nodeType, - // QSA vars - i, groups, old, nid, newContext, newSelector; - - if ( ( context ? context.ownerDocument || context : preferredDoc ) !== document ) { - setDocument( context ); - } - - context = context || document; - results = results || []; - - if ( !selector || typeof selector !== "string" ) { - return results; - } - - if ( (nodeType = context.nodeType) !== 1 && nodeType !== 9 ) { - return []; - } - - if ( !documentIsXML && !seed ) { - - // Shortcuts - if ( (match = rquickExpr.exec( selector )) ) { - // Speed-up: Sizzle("#ID") - if ( (m = match[1]) ) { - if ( nodeType === 9 ) { - elem = context.getElementById( m ); - // Check parentNode to catch when Blackberry 4.6 returns - // nodes that are no longer in the document #6963 - if ( elem && elem.parentNode ) { - // Handle the case where IE, Opera, and Webkit return items - // by name instead of ID - if ( elem.id === m ) { - results.push( elem ); - return results; - } - } else { - return results; - } - } else { - // Context is not a document - if ( context.ownerDocument && (elem = context.ownerDocument.getElementById( m )) && - contains( context, elem ) && elem.id === m ) { - results.push( elem ); - return results; - } - } - - // Speed-up: Sizzle("TAG") - } else if ( match[2] ) { - push.apply( results, slice.call(context.getElementsByTagName( selector ), 0) ); - return results; - - // Speed-up: Sizzle(".CLASS") - } else if ( (m = match[3]) && support.getByClassName && context.getElementsByClassName ) { - push.apply( results, slice.call(context.getElementsByClassName( m ), 0) ); - return results; - } - } - - // QSA path - if ( support.qsa && !rbuggyQSA.test(selector) ) { - old = true; - nid = expando; - newContext = context; - newSelector = nodeType === 9 && selector; - - // qSA works strangely on Element-rooted queries - // We can work around this by specifying an extra ID on the root - // and working up from there (Thanks to Andrew Dupont for the technique) - // IE 8 doesn't work on object elements - if ( nodeType === 1 && context.nodeName.toLowerCase() !== "object" ) { - groups = tokenize( selector ); - - if ( (old = context.getAttribute("id")) ) { - nid = old.replace( rescape, "\$&" ); - } else { - context.setAttribute( "id", nid ); - } - nid = "[id='" + nid + "'] "; - - i = groups.length; - while ( i-- ) { - groups[i] = nid + toSelector( groups[i] ); - } - newContext = rsibling.test( selector ) && context.parentNode || context; - newSelector = groups.join(","); - } - - if ( newSelector ) { - try { - push.apply( results, slice.call( newContext.querySelectorAll( - newSelector - ), 0 ) ); - return results; - } catch(qsaError) { - } finally { - if ( !old ) { - context.removeAttribute("id"); - } - } - } - } - } - - // All others - return select( selector.replace( rtrim, "$1" ), context, results, seed ); - } - - /** - * Detect xml - * @param {Element|Object} elem An element or a document - */ - isXML = Sizzle.isXML = function( elem ) { - // documentElement is verified for cases where it doesn't yet exist - // (such as loading iframes in IE - #4833) - var documentElement = elem && (elem.ownerDocument || elem).documentElement; - return documentElement ? documentElement.nodeName !== "HTML" : false; - }; - - /** - * Sets document-related variables once based on the current document - * @param {Element|Object} [doc] An element or document object to use to set the document - * @returns {Object} Returns the current document - */ - setDocument = Sizzle.setDocument = function( node ) { - var doc = node ? node.ownerDocument || node : preferredDoc; - - // If no document and documentElement is available, return - if ( doc === document || doc.nodeType !== 9 || !doc.documentElement ) { - return document; - } - - // Set our document - document = doc; - docElem = doc.documentElement; - - // Support tests - documentIsXML = isXML( doc ); - - // Check if getElementsByTagName("*") returns only elements - support.tagNameNoComments = assert(function( div ) { - div.appendChild( doc.createComment("") ); - return !div.getElementsByTagName("*").length; - }); - - // Check if attributes should be retrieved by attribute nodes - support.attributes = assert(function( div ) { - div.innerHTML = "<select></select>"; - var type = typeof div.lastChild.getAttribute("multiple"); - // IE8 returns a string for some attributes even when not present - return type !== "boolean" && type !== "string"; - }); - - // Check if getElementsByClassName can be trusted - support.getByClassName = assert(function( div ) { - // Opera can't find a second classname (in 9.6) - div.innerHTML = "<div class='hidden e'></div><div class='hidden'></div>"; - if ( !div.getElementsByClassName || !div.getElementsByClassName("e").length ) { - return false; - } - - // Safari 3.2 caches class attributes and doesn't catch changes - div.lastChild.className = "e"; - return div.getElementsByClassName("e").length === 2; - }); - - // Check if getElementById returns elements by name - // Check if getElementsByName privileges form controls or returns elements by ID - support.getByName = assert(function( div ) { - // Inject content - div.id = expando + 0; - div.innerHTML = "<a name='" + expando + "'></a><div name='" + expando + "'></div>"; - docElem.insertBefore( div, docElem.firstChild ); - - // Test - var pass = doc.getElementsByName && - // buggy browsers will return fewer than the correct 2 - doc.getElementsByName( expando ).length === 2 + - // buggy browsers will return more than the correct 0 - doc.getElementsByName( expando + 0 ).length; - support.getIdNotName = !doc.getElementById( expando ); - - // Cleanup - docElem.removeChild( div ); - - return pass; - }); - - // IE6/7 return modified attributes - Expr.attrHandle = assert(function( div ) { - div.innerHTML = "<a href='#'></a>"; - return div.firstChild && typeof div.firstChild.getAttribute !== strundefined && - div.firstChild.getAttribute("href") === "#"; - }) ? - {} : - { - "href": function( elem ) { - return elem.getAttribute( "href", 2 ); - }, - "type": function( elem ) { - return elem.getAttribute("type"); - } - }; - - // ID find and filter - if ( support.getIdNotName ) { - Expr.find["ID"] = function( id, context ) { - if ( typeof context.getElementById !== strundefined && !documentIsXML ) { - var m = context.getElementById( id ); - // Check parentNode to catch when Blackberry 4.6 returns - // nodes that are no longer in the document #6963 - return m && m.parentNode ? [m] : []; - } - }; - Expr.filter["ID"] = function( id ) { - var attrId = id.replace( runescape, funescape ); - return function( elem ) { - return elem.getAttribute("id") === attrId; - }; - }; - } else { - Expr.find["ID"] = function( id, context ) { - if ( typeof context.getElementById !== strundefined && !documentIsXML ) { - var m = context.getElementById( id ); - - return m ? - m.id === id || typeof m.getAttributeNode !== strundefined && m.getAttributeNode("id").value === id ? - [m] : - undefined : - []; - } - }; - Expr.filter["ID"] = function( id ) { - var attrId = id.replace( runescape, funescape ); - return function( elem ) { - var node = typeof elem.getAttributeNode !== strundefined && elem.getAttributeNode("id"); - return node && node.value === attrId; - }; - }; - } - - // Tag - Expr.find["TAG"] = support.tagNameNoComments ? - function( tag, context ) { - if ( typeof context.getElementsByTagName !== strundefined ) { - return context.getElementsByTagName( tag ); - } - } : - function( tag, context ) { - var elem, - tmp = [], - i = 0, - results = context.getElementsByTagName( tag ); - - // Filter out possible comments - if ( tag === "*" ) { - while ( (elem = results[i++]) ) { - if ( elem.nodeType === 1 ) { - tmp.push( elem ); - } - } - - return tmp; - } - return results; - }; - - // Name - Expr.find["NAME"] = support.getByName && function( tag, context ) { - if ( typeof context.getElementsByName !== strundefined ) { - return context.getElementsByName( name ); - } - }; - - // Class - Expr.find["CLASS"] = support.getByClassName && function( className, context ) { - if ( typeof context.getElementsByClassName !== strundefined && !documentIsXML ) { - return context.getElementsByClassName( className ); - } - }; - - // QSA and matchesSelector support - - // matchesSelector(:active) reports false when true (IE9/Opera 11.5) - rbuggyMatches = []; - - // qSa(:focus) reports false when true (Chrome 21), - // no need to also add to buggyMatches since matches checks buggyQSA - // A support test would require too much code (would include document ready) - rbuggyQSA = [ ":focus" ]; - - if ( (support.qsa = isNative(doc.querySelectorAll)) ) { - // Build QSA regex - // Regex strategy adopted from Diego Perini - assert(function( div ) { - // Select is set to empty string on purpose - // This is to test IE's treatment of not explictly - // setting a boolean content attribute, - // since its presence should be enough - // http://bugs.jquery.com/ticket/12359 - div.innerHTML = "<select><option selected=''></option></select>"; - - // IE8 - Some boolean attributes are not treated correctly - if ( !div.querySelectorAll("[selected]").length ) { - rbuggyQSA.push( "\[" + whitespace + "*(?:checked|disabled|ismap|multiple|readonly|selected|value)" ); - } - - // Webkit/Opera - :checked should return selected option elements - // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked - // IE8 throws error here and will not see later tests - if ( !div.querySelectorAll(":checked").length ) { - rbuggyQSA.push(":checked"); - } - }); - - assert(function( div ) { - - // Opera 10-12/IE8 - ^= $= *= and empty values - // Should not select anything - div.innerHTML = "<input type='hidden' i=''/>"; - if ( div.querySelectorAll("[i^='']").length ) { - rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:""|'')" ); - } - - // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) - // IE8 throws error here and will not see later tests - if ( !div.querySelectorAll(":enabled").length ) { - rbuggyQSA.push( ":enabled", ":disabled" ); - } - - // Opera 10-11 does not throw on post-comma invalid pseudos - div.querySelectorAll("*,:x"); - rbuggyQSA.push(",.*:"); - }); - } - - if ( (support.matchesSelector = isNative( (matches = docElem.matchesSelector || - docElem.mozMatchesSelector || - docElem.webkitMatchesSelector || - docElem.oMatchesSelector || - docElem.msMatchesSelector) )) ) { - - assert(function( div ) { - // Check to see if it's possible to do matchesSelector - // on a disconnected node (IE 9) - support.disconnectedMatch = matches.call( div, "div" ); - - // This should fail with an exception - // Gecko does not error, returns false instead - matches.call( div, "[s!='']:x" ); - rbuggyMatches.push( "!=", pseudos ); - }); - } - - rbuggyQSA = new RegExp( rbuggyQSA.join("|") ); - rbuggyMatches = new RegExp( rbuggyMatches.join("|") ); - - // Element contains another - // Purposefully does not implement inclusive descendent - // As in, an element does not contain itself - contains = isNative(docElem.contains) || docElem.compareDocumentPosition ? - function( a, b ) { - var adown = a.nodeType === 9 ? a.documentElement : a, - bup = b && b.parentNode; - return a === bup || !!( bup && bup.nodeType === 1 && ( - adown.contains ? - adown.contains( bup ) : - a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 - )); - } : - function( a, b ) { - if ( b ) { - while ( (b = b.parentNode) ) { - if ( b === a ) { - return true; - } - } - } - return false; - }; - - // Document order sorting - sortOrder = docElem.compareDocumentPosition ? - function( a, b ) { - var compare; - - if ( a === b ) { - hasDuplicate = true; - return 0; - } - - if ( (compare = b.compareDocumentPosition && a.compareDocumentPosition && a.compareDocumentPosition( b )) ) { - if ( compare & 1 || a.parentNode && a.parentNode.nodeType === 11 ) { - if ( a === doc || contains( preferredDoc, a ) ) { - return -1; - } - if ( b === doc || contains( preferredDoc, b ) ) { - return 1; - } - return 0; - } - return compare & 4 ? -1 : 1; - } - - return a.compareDocumentPosition ? -1 : 1; - } : - function( a, b ) { - var cur, - i = 0, - aup = a.parentNode, - bup = b.parentNode, - ap = [ a ], - bp = [ b ]; - - // Exit early if the nodes are identical - if ( a === b ) { - hasDuplicate = true; - return 0; - - // Parentless nodes are either documents or disconnected - } else if ( !aup || !bup ) { - return a === doc ? -1 : - b === doc ? 1 : - aup ? -1 : - bup ? 1 : - 0; - - // If the nodes are siblings, we can do a quick check - } else if ( aup === bup ) { - return siblingCheck( a, b ); - } - - // Otherwise we need full lists of their ancestors for comparison - cur = a; - while ( (cur = cur.parentNode) ) { - ap.unshift( cur ); - } - cur = b; - while ( (cur = cur.parentNode) ) { - bp.unshift( cur ); - } - - // Walk down the tree looking for a discrepancy - while ( ap[i] === bp[i] ) { - i++; - } - - return i ? - // Do a sibling check if the nodes have a common ancestor - siblingCheck( ap[i], bp[i] ) : - - // Otherwise nodes in our document sort first - ap[i] === preferredDoc ? -1 : - bp[i] === preferredDoc ? 1 : - 0; - }; - - // Always assume the presence of duplicates if sort doesn't - // pass them to our comparison function (as in Google Chrome). - hasDuplicate = false; - [0, 0].sort( sortOrder ); - support.detectDuplicates = hasDuplicate; - - return document; - }; - - Sizzle.matches = function( expr, elements ) { - return Sizzle( expr, null, null, elements ); - }; - - Sizzle.matchesSelector = function( elem, expr ) { - // Set document vars if needed - if ( ( elem.ownerDocument || elem ) !== document ) { - setDocument( elem ); - } - - // Make sure that attribute selectors are quoted - expr = expr.replace( rattributeQuotes, "='$1']" ); - - // rbuggyQSA always contains :focus, so no need for an existence check - if ( support.matchesSelector && !documentIsXML && (!rbuggyMatches || !rbuggyMatches.test(expr)) && !rbuggyQSA.test(expr) ) { - try { - var ret = matches.call( elem, expr ); - - // IE 9's matchesSelector returns false on disconnected nodes - if ( ret || support.disconnectedMatch || - // As well, disconnected nodes are said to be in a document - // fragment in IE 9 - elem.document && elem.document.nodeType !== 11 ) { - return ret; - } - } catch(e) {} - } - - return Sizzle( expr, document, null, [elem] ).length > 0; - }; - - Sizzle.contains = function( context, elem ) { - // Set document vars if needed - if ( ( context.ownerDocument || context ) !== document ) { - setDocument( context ); - } - return contains( context, elem ); - }; - - Sizzle.attr = function( elem, name ) { - var val; - - // Set document vars if needed - if ( ( elem.ownerDocument || elem ) !== document ) { - setDocument( elem ); - } - - if ( !documentIsXML ) { - name = name.toLowerCase(); - } - if ( (val = Expr.attrHandle[ name ]) ) { - return val( elem ); - } - if ( documentIsXML || support.attributes ) { - return elem.getAttribute( name ); - } - return ( (val = elem.getAttributeNode( name )) || elem.getAttribute( name ) ) && elem[ name ] === true ? - name : - val && val.specified ? val.value : null; - }; - - Sizzle.error = function( msg ) { - throw new Error( "Syntax error, unrecognized expression: " + msg ); - }; - -// Document sorting and removing duplicates - Sizzle.uniqueSort = function( results ) { - var elem, - duplicates = [], - i = 1, - j = 0; - - // Unless we *know* we can detect duplicates, assume their presence - hasDuplicate = !support.detectDuplicates; - results.sort( sortOrder ); - - if ( hasDuplicate ) { - for ( ; (elem = results[i]); i++ ) { - if ( elem === results[ i - 1 ] ) { - j = duplicates.push( i ); - } - } - while ( j-- ) { - results.splice( duplicates[ j ], 1 ); - } - } - - return results; - }; - - function siblingCheck( a, b ) { - var cur = b && a, - diff = cur && ( ~b.sourceIndex || MAX_NEGATIVE ) - ( ~a.sourceIndex || MAX_NEGATIVE ); - - // Use IE sourceIndex if available on both nodes - if ( diff ) { - return diff; - } - - // Check if b follows a - if ( cur ) { - while ( (cur = cur.nextSibling) ) { - if ( cur === b ) { - return -1; - } - } - } - - return a ? 1 : -1; - } - -// Returns a function to use in pseudos for input types - function createInputPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && elem.type === type; - }; - } - -// Returns a function to use in pseudos for buttons - function createButtonPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return (name === "input" || name === "button") && elem.type === type; - }; - } - -// Returns a function to use in pseudos for positionals - function createPositionalPseudo( fn ) { - return markFunction(function( argument ) { - argument = +argument; - return markFunction(function( seed, matches ) { - var j, - matchIndexes = fn( [], seed.length, argument ), - i = matchIndexes.length; - - // Match elements found at the specified indexes - while ( i-- ) { - if ( seed[ (j = matchIndexes[i]) ] ) { - seed[j] = !(matches[j] = seed[j]); - } - } - }); - }); - } - - /** - * Utility function for retrieving the text value of an array of DOM nodes - * @param {Array|Element} elem - */ - getText = Sizzle.getText = function( elem ) { - var node, - ret = "", - i = 0, - nodeType = elem.nodeType; - - if ( !nodeType ) { - // If no nodeType, this is expected to be an array - for ( ; (node = elem[i]); i++ ) { - // Do not traverse comment nodes - ret += getText( node ); - } - } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { - // Use textContent for elements - // innerText usage removed for consistency of new lines (see #11153) - if ( typeof elem.textContent === "string" ) { - return elem.textContent; - } else { - // Traverse its children - for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { - ret += getText( elem ); - } - } - } else if ( nodeType === 3 || nodeType === 4 ) { - return elem.nodeValue; - } - // Do not include comment or processing instruction nodes - - return ret; - }; - - Expr = Sizzle.selectors = { - - // Can be adjusted by the user - cacheLength: 50, - - createPseudo: markFunction, - - match: matchExpr, - - find: {}, - - relative: { - ">": { dir: "parentNode", first: true }, - " ": { dir: "parentNode" }, - "+": { dir: "previousSibling", first: true }, - "~": { dir: "previousSibling" } - }, - - preFilter: { - "ATTR": function( match ) { - match[1] = match[1].replace( runescape, funescape ); - - // Move the given value to match[3] whether quoted or unquoted - match[3] = ( match[4] || match[5] || "" ).replace( runescape, funescape ); - - if ( match[2] === "~=" ) { - match[3] = " " + match[3] + " "; - } - - return match.slice( 0, 4 ); - }, - - "CHILD": function( match ) { - /* matches from matchExpr["CHILD"] - 1 type (only|nth|...) - 2 what (child|of-type) - 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...) - 4 xn-component of xn+y argument ([+-]?\d*n|) - 5 sign of xn-component - 6 x of xn-component - 7 sign of y-component - 8 y of y-component - */ - match[1] = match[1].toLowerCase(); - - if ( match[1].slice( 0, 3 ) === "nth" ) { - // nth-* requires argument - if ( !match[3] ) { - Sizzle.error( match[0] ); - } - - // numeric x and y parameters for Expr.filter.CHILD - // remember that false/true cast respectively to 0/1 - match[4] = +( match[4] ? match[5] + (match[6] || 1) : 2 * ( match[3] === "even" || match[3] === "odd" ) ); - match[5] = +( ( match[7] + match[8] ) || match[3] === "odd" ); - - // other types prohibit arguments - } else if ( match[3] ) { - Sizzle.error( match[0] ); - } - - return match; - }, - - "PSEUDO": function( match ) { - var excess, - unquoted = !match[5] && match[2]; - - if ( matchExpr["CHILD"].test( match[0] ) ) { - return null; - } - - // Accept quoted arguments as-is - if ( match[4] ) { - match[2] = match[4]; - - // Strip excess characters from unquoted arguments - } else if ( unquoted && rpseudo.test( unquoted ) && - // Get excess from tokenize (recursively) - (excess = tokenize( unquoted, true )) && - // advance to the next closing parenthesis - (excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length) ) { - - // excess is a negative index - match[0] = match[0].slice( 0, excess ); - match[2] = unquoted.slice( 0, excess ); - } - - // Return only captures needed by the pseudo filter method (type and argument) - return match.slice( 0, 3 ); - } - }, - - filter: { - - "TAG": function( nodeName ) { - if ( nodeName === "*" ) { - return function() { return true; }; - } - - nodeName = nodeName.replace( runescape, funescape ).toLowerCase(); - return function( elem ) { - return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; - }; - }, - - "CLASS": function( className ) { - var pattern = classCache[ className + " " ]; - - return pattern || - (pattern = new RegExp( "(^|" + whitespace + ")" + className + "(" + whitespace + "|$)" )) && - classCache( className, function( elem ) { - return pattern.test( elem.className || (typeof elem.getAttribute !== strundefined && elem.getAttribute("class")) || "" ); - }); - }, - - "ATTR": function( name, operator, check ) { - return function( elem ) { - var result = Sizzle.attr( elem, name ); - - if ( result == null ) { - return operator === "!="; - } - if ( !operator ) { - return true; - } - - result += ""; - - return operator === "=" ? result === check : - operator === "!=" ? result !== check : - operator === "^=" ? check && result.indexOf( check ) === 0 : - operator === "*=" ? check && result.indexOf( check ) > -1 : - operator === "$=" ? check && result.slice( -check.length ) === check : - operator === "~=" ? ( " " + result + " " ).indexOf( check ) > -1 : - operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : - false; - }; - }, - - "CHILD": function( type, what, argument, first, last ) { - var simple = type.slice( 0, 3 ) !== "nth", - forward = type.slice( -4 ) !== "last", - ofType = what === "of-type"; - - return first === 1 && last === 0 ? - - // Shortcut for :nth-*(n) - function( elem ) { - return !!elem.parentNode; - } : - - function( elem, context, xml ) { - var cache, outerCache, node, diff, nodeIndex, start, - dir = simple !== forward ? "nextSibling" : "previousSibling", - parent = elem.parentNode, - name = ofType && elem.nodeName.toLowerCase(), - useCache = !xml && !ofType; - - if ( parent ) { - - // :(first|last|only)-(child|of-type) - if ( simple ) { - while ( dir ) { - node = elem; - while ( (node = node[ dir ]) ) { - if ( ofType ? node.nodeName.toLowerCase() === name : node.nodeType === 1 ) { - return false; - } - } - // Reverse direction for :only-* (if we haven't yet done so) - start = dir = type === "only" && !start && "nextSibling"; - } - return true; - } - - start = [ forward ? parent.firstChild : parent.lastChild ]; - - // non-xml :nth-child(...) stores cache data on `parent` - if ( forward && useCache ) { - // Seek `elem` from a previously-cached index - outerCache = parent[ expando ] || (parent[ expando ] = {}); - cache = outerCache[ type ] || []; - nodeIndex = cache[0] === dirruns && cache[1]; - diff = cache[0] === dirruns && cache[2]; - node = nodeIndex && parent.childNodes[ nodeIndex ]; - - while ( (node = ++nodeIndex && node && node[ dir ] || - - // Fallback to seeking `elem` from the start - (diff = nodeIndex = 0) || start.pop()) ) { - - // When found, cache indexes on `parent` and break - if ( node.nodeType === 1 && ++diff && node === elem ) { - outerCache[ type ] = [ dirruns, nodeIndex, diff ]; - break; - } - } - - // Use previously-cached element index if available - } else if ( useCache && (cache = (elem[ expando ] || (elem[ expando ] = {}))[ type ]) && cache[0] === dirruns ) { - diff = cache[1]; - - // xml :nth-child(...) or :nth-last-child(...) or :nth(-last)?-of-type(...) - } else { - // Use the same loop as above to seek `elem` from the start - while ( (node = ++nodeIndex && node && node[ dir ] || - (diff = nodeIndex = 0) || start.pop()) ) { - - if ( ( ofType ? node.nodeName.toLowerCase() === name : node.nodeType === 1 ) && ++diff ) { - // Cache the index of each encountered element - if ( useCache ) { - (node[ expando ] || (node[ expando ] = {}))[ type ] = [ dirruns, diff ]; - } - - if ( node === elem ) { - break; - } - } - } - } - - // Incorporate the offset, then check against cycle size - diff -= last; - return diff === first || ( diff % first === 0 && diff / first >= 0 ); - } - }; - }, - - "PSEUDO": function( pseudo, argument ) { - // pseudo-class names are case-insensitive - // http://www.w3.org/TR/selectors/#pseudo-classes - // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters - // Remember that setFilters inherits from pseudos - var args, - fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || - Sizzle.error( "unsupported pseudo: " + pseudo ); - - // The user may use createPseudo to indicate that - // arguments are needed to create the filter function - // just as Sizzle does - if ( fn[ expando ] ) { - return fn( argument ); - } - - // But maintain support for old signatures - if ( fn.length > 1 ) { - args = [ pseudo, pseudo, "", argument ]; - return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? - markFunction(function( seed, matches ) { - var idx, - matched = fn( seed, argument ), - i = matched.length; - while ( i-- ) { - idx = indexOf.call( seed, matched[i] ); - seed[ idx ] = !( matches[ idx ] = matched[i] ); - } - }) : - function( elem ) { - return fn( elem, 0, args ); - }; - } - - return fn; - } - }, - - pseudos: { - // Potentially complex pseudos - "not": markFunction(function( selector ) { - // Trim the selector passed to compile - // to avoid treating leading and trailing - // spaces as combinators - var input = [], - results = [], - matcher = compile( selector.replace( rtrim, "$1" ) ); - - return matcher[ expando ] ? - markFunction(function( seed, matches, context, xml ) { - var elem, - unmatched = matcher( seed, null, xml, [] ), - i = seed.length; - - // Match elements unmatched by `matcher` - while ( i-- ) { - if ( (elem = unmatched[i]) ) { - seed[i] = !(matches[i] = elem); - } - } - }) : - function( elem, context, xml ) { - input[0] = elem; - matcher( input, null, xml, results ); - return !results.pop(); - }; - }), - - "has": markFunction(function( selector ) { - return function( elem ) { - return Sizzle( selector, elem ).length > 0; - }; - }), - - "contains": markFunction(function( text ) { - return function( elem ) { - return ( elem.textContent || elem.innerText || getText( elem ) ).indexOf( text ) > -1; - }; - }), - - // "Whether an element is represented by a :lang() selector - // is based solely on the element's language value - // being equal to the identifier C, - // or beginning with the identifier C immediately followed by "-". - // The matching of C against the element's language value is performed case-insensitively. - // The identifier C does not have to be a valid language name." - // http://www.w3.org/TR/selectors/#lang-pseudo - "lang": markFunction( function( lang ) { - // lang value must be a valid identifider - if ( !ridentifier.test(lang || "") ) { - Sizzle.error( "unsupported lang: " + lang ); - } - lang = lang.replace( runescape, funescape ).toLowerCase(); - return function( elem ) { - var elemLang; - do { - if ( (elemLang = documentIsXML ? - elem.getAttribute("xml:lang") || elem.getAttribute("lang") : - elem.lang) ) { - - elemLang = elemLang.toLowerCase(); - return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; - } - } while ( (elem = elem.parentNode) && elem.nodeType === 1 ); - return false; - }; - }), - - // Miscellaneous - "target": function( elem ) { - var hash = window.location && window.location.hash; - return hash && hash.slice( 1 ) === elem.id; - }, - - "root": function( elem ) { - return elem === docElem; - }, - - "focus": function( elem ) { - return elem === document.activeElement && (!document.hasFocus || document.hasFocus()) && !!(elem.type || elem.href || ~elem.tabIndex); - }, - - // Boolean properties - "enabled": function( elem ) { - return elem.disabled === false; - }, - - "disabled": function( elem ) { - return elem.disabled === true; - }, - - "checked": function( elem ) { - // In CSS3, :checked should return both checked and selected elements - // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked - var nodeName = elem.nodeName.toLowerCase(); - return (nodeName === "input" && !!elem.checked) || (nodeName === "option" && !!elem.selected); - }, - - "selected": function( elem ) { - // Accessing this property makes selected-by-default - // options in Safari work properly - if ( elem.parentNode ) { - elem.parentNode.selectedIndex; - } - - return elem.selected === true; - }, - - // Contents - "empty": function( elem ) { - // http://www.w3.org/TR/selectors/#empty-pseudo - // :empty is only affected by element nodes and content nodes(including text(3), cdata(4)), - // not comment, processing instructions, or others - // Thanks to Diego Perini for the nodeName shortcut - // Greater than "@" means alpha characters (specifically not starting with "#" or "?") - for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { - if ( elem.nodeName > "@" || elem.nodeType === 3 || elem.nodeType === 4 ) { - return false; - } - } - return true; - }, - - "parent": function( elem ) { - return !Expr.pseudos["empty"]( elem ); - }, - - // Element/input types - "header": function( elem ) { - return rheader.test( elem.nodeName ); - }, - - "input": function( elem ) { - return rinputs.test( elem.nodeName ); - }, - - "button": function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && elem.type === "button" || name === "button"; - }, - - "text": function( elem ) { - var attr; - // IE6 and 7 will map elem.type to 'text' for new HTML5 types (search, etc) - // use getAttribute instead to test this case - return elem.nodeName.toLowerCase() === "input" && - elem.type === "text" && - ( (attr = elem.getAttribute("type")) == null || attr.toLowerCase() === elem.type ); - }, - - // Position-in-collection - "first": createPositionalPseudo(function() { - return [ 0 ]; - }), - - "last": createPositionalPseudo(function( matchIndexes, length ) { - return [ length - 1 ]; - }), - - "eq": createPositionalPseudo(function( matchIndexes, length, argument ) { - return [ argument < 0 ? argument + length : argument ]; - }), - - "even": createPositionalPseudo(function( matchIndexes, length ) { - var i = 0; - for ( ; i < length; i += 2 ) { - matchIndexes.push( i ); - } - return matchIndexes; - }), - - "odd": createPositionalPseudo(function( matchIndexes, length ) { - var i = 1; - for ( ; i < length; i += 2 ) { - matchIndexes.push( i ); - } - return matchIndexes; - }), - - "lt": createPositionalPseudo(function( matchIndexes, length, argument ) { - var i = argument < 0 ? argument + length : argument; - for ( ; --i >= 0; ) { - matchIndexes.push( i ); - } - return matchIndexes; - }), - - "gt": createPositionalPseudo(function( matchIndexes, length, argument ) { - var i = argument < 0 ? argument + length : argument; - for ( ; ++i < length; ) { - matchIndexes.push( i ); - } - return matchIndexes; - }) - } - }; - -// Add button/input type pseudos - for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { - Expr.pseudos[ i ] = createInputPseudo( i ); - } - for ( i in { submit: true, reset: true } ) { - Expr.pseudos[ i ] = createButtonPseudo( i ); - } - - function tokenize( selector, parseOnly ) { - var matched, match, tokens, type, - soFar, groups, preFilters, - cached = tokenCache[ selector + " " ]; - - if ( cached ) { - return parseOnly ? 0 : cached.slice( 0 ); - } - - soFar = selector; - groups = []; - preFilters = Expr.preFilter; - - while ( soFar ) { - - // Comma and first run - if ( !matched || (match = rcomma.exec( soFar )) ) { - if ( match ) { - // Don't consume trailing commas as valid - soFar = soFar.slice( match[0].length ) || soFar; - } - groups.push( tokens = [] ); - } - - matched = false; - - // Combinators - if ( (match = rcombinators.exec( soFar )) ) { - matched = match.shift(); - tokens.push( { - value: matched, - // Cast descendant combinators to space - type: match[0].replace( rtrim, " " ) - } ); - soFar = soFar.slice( matched.length ); - } - - // Filters - for ( type in Expr.filter ) { - if ( (match = matchExpr[ type ].exec( soFar )) && (!preFilters[ type ] || - (match = preFilters[ type ]( match ))) ) { - matched = match.shift(); - tokens.push( { - value: matched, - type: type, - matches: match - } ); - soFar = soFar.slice( matched.length ); - } - } - - if ( !matched ) { - break; - } - } - - // Return the length of the invalid excess - // if we're just parsing - // Otherwise, throw an error or return tokens - return parseOnly ? - soFar.length : - soFar ? - Sizzle.error( selector ) : - // Cache the tokens - tokenCache( selector, groups ).slice( 0 ); - } - - function toSelector( tokens ) { - var i = 0, - len = tokens.length, - selector = ""; - for ( ; i < len; i++ ) { - selector += tokens[i].value; - } - return selector; - } - - function addCombinator( matcher, combinator, base ) { - var dir = combinator.dir, - checkNonElements = base && dir === "parentNode", - doneName = done++; - - return combinator.first ? - // Check against closest ancestor/preceding element - function( elem, context, xml ) { - while ( (elem = elem[ dir ]) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - return matcher( elem, context, xml ); - } - } - } : - - // Check against all ancestor/preceding elements - function( elem, context, xml ) { - var data, cache, outerCache, - dirkey = dirruns + " " + doneName; - - // We can't set arbitrary data on XML nodes, so they don't benefit from dir caching - if ( xml ) { - while ( (elem = elem[ dir ]) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - if ( matcher( elem, context, xml ) ) { - return true; - } - } - } - } else { - while ( (elem = elem[ dir ]) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - outerCache = elem[ expando ] || (elem[ expando ] = {}); - if ( (cache = outerCache[ dir ]) && cache[0] === dirkey ) { - if ( (data = cache[1]) === true || data === cachedruns ) { - return data === true; - } - } else { - cache = outerCache[ dir ] = [ dirkey ]; - cache[1] = matcher( elem, context, xml ) || cachedruns; - if ( cache[1] === true ) { - return true; - } - } - } - } - } - }; - } - - function elementMatcher( matchers ) { - return matchers.length > 1 ? - function( elem, context, xml ) { - var i = matchers.length; - while ( i-- ) { - if ( !matchers[i]( elem, context, xml ) ) { - return false; - } - } - return true; - } : - matchers[0]; - } - - function condense( unmatched, map, filter, context, xml ) { - var elem, - newUnmatched = [], - i = 0, - len = unmatched.length, - mapped = map != null; - - for ( ; i < len; i++ ) { - if ( (elem = unmatched[i]) ) { - if ( !filter || filter( elem, context, xml ) ) { - newUnmatched.push( elem ); - if ( mapped ) { - map.push( i ); - } - } - } - } - - return newUnmatched; - } - - function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { - if ( postFilter && !postFilter[ expando ] ) { - postFilter = setMatcher( postFilter ); - } - if ( postFinder && !postFinder[ expando ] ) { - postFinder = setMatcher( postFinder, postSelector ); - } - return markFunction(function( seed, results, context, xml ) { - var temp, i, elem, - preMap = [], - postMap = [], - preexisting = results.length, - - // Get initial elements from seed or context - elems = seed || multipleContexts( selector || "*", context.nodeType ? [ context ] : context, [] ), - - // Prefilter to get matcher input, preserving a map for seed-results synchronization - matcherIn = preFilter && ( seed || !selector ) ? - condense( elems, preMap, preFilter, context, xml ) : - elems, - - matcherOut = matcher ? - // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, - postFinder || ( seed ? preFilter : preexisting || postFilter ) ? - - // ...intermediate processing is necessary - [] : - - // ...otherwise use results directly - results : - matcherIn; - - // Find primary matches - if ( matcher ) { - matcher( matcherIn, matcherOut, context, xml ); - } - - // Apply postFilter - if ( postFilter ) { - temp = condense( matcherOut, postMap ); - postFilter( temp, [], context, xml ); - - // Un-match failing elements by moving them back to matcherIn - i = temp.length; - while ( i-- ) { - if ( (elem = temp[i]) ) { - matcherOut[ postMap[i] ] = !(matcherIn[ postMap[i] ] = elem); - } - } - } - - if ( seed ) { - if ( postFinder || preFilter ) { - if ( postFinder ) { - // Get the final matcherOut by condensing this intermediate into postFinder contexts - temp = []; - i = matcherOut.length; - while ( i-- ) { - if ( (elem = matcherOut[i]) ) { - // Restore matcherIn since elem is not yet a final match - temp.push( (matcherIn[i] = elem) ); - } - } - postFinder( null, (matcherOut = []), temp, xml ); - } - - // Move matched elements from seed to results to keep them synchronized - i = matcherOut.length; - while ( i-- ) { - if ( (elem = matcherOut[i]) && - (temp = postFinder ? indexOf.call( seed, elem ) : preMap[i]) > -1 ) { - - seed[temp] = !(results[temp] = elem); - } - } - } - - // Add elements to results, through postFinder if defined - } else { - matcherOut = condense( - matcherOut === results ? - matcherOut.splice( preexisting, matcherOut.length ) : - matcherOut - ); - if ( postFinder ) { - postFinder( null, results, matcherOut, xml ); - } else { - push.apply( results, matcherOut ); - } - } - }); - } - - function matcherFromTokens( tokens ) { - var checkContext, matcher, j, - len = tokens.length, - leadingRelative = Expr.relative[ tokens[0].type ], - implicitRelative = leadingRelative || Expr.relative[" "], - i = leadingRelative ? 1 : 0, - - // The foundational matcher ensures that elements are reachable from top-level context(s) - matchContext = addCombinator( function( elem ) { - return elem === checkContext; - }, implicitRelative, true ), - matchAnyContext = addCombinator( function( elem ) { - return indexOf.call( checkContext, elem ) > -1; - }, implicitRelative, true ), - matchers = [ function( elem, context, xml ) { - return ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( - (checkContext = context).nodeType ? - matchContext( elem, context, xml ) : - matchAnyContext( elem, context, xml ) ); - } ]; - - for ( ; i < len; i++ ) { - if ( (matcher = Expr.relative[ tokens[i].type ]) ) { - matchers = [ addCombinator(elementMatcher( matchers ), matcher) ]; - } else { - matcher = Expr.filter[ tokens[i].type ].apply( null, tokens[i].matches ); - - // Return special upon seeing a positional matcher - if ( matcher[ expando ] ) { - // Find the next relative operator (if any) for proper handling - j = ++i; - for ( ; j < len; j++ ) { - if ( Expr.relative[ tokens[j].type ] ) { - break; - } - } - return setMatcher( - i > 1 && elementMatcher( matchers ), - i > 1 && toSelector( tokens.slice( 0, i - 1 ) ).replace( rtrim, "$1" ), - matcher, - i < j && matcherFromTokens( tokens.slice( i, j ) ), - j < len && matcherFromTokens( (tokens = tokens.slice( j )) ), - j < len && toSelector( tokens ) - ); - } - matchers.push( matcher ); - } - } - - return elementMatcher( matchers ); - } - - function matcherFromGroupMatchers( elementMatchers, setMatchers ) { - // A counter to specify which element is currently being matched - var matcherCachedRuns = 0, - bySet = setMatchers.length > 0, - byElement = elementMatchers.length > 0, - superMatcher = function( seed, context, xml, results, expandContext ) { - var elem, j, matcher, - setMatched = [], - matchedCount = 0, - i = "0", - unmatched = seed && [], - outermost = expandContext != null, - contextBackup = outermostContext, - // We must always have either seed elements or context - elems = seed || byElement && Expr.find["TAG"]( "*", expandContext && context.parentNode || context ), - // Use integer dirruns iff this is the outermost matcher - dirrunsUnique = (dirruns += contextBackup == null ? 1 : Math.random() || 0.1); - - if ( outermost ) { - outermostContext = context !== document && context; - cachedruns = matcherCachedRuns; - } - - // Add elements passing elementMatchers directly to results - // Keep `i` a string if there are no elements so `matchedCount` will be "00" below - for ( ; (elem = elems[i]) != null; i++ ) { - if ( byElement && elem ) { - j = 0; - while ( (matcher = elementMatchers[j++]) ) { - if ( matcher( elem, context, xml ) ) { - results.push( elem ); - break; - } - } - if ( outermost ) { - dirruns = dirrunsUnique; - cachedruns = ++matcherCachedRuns; - } - } - - // Track unmatched elements for set filters - if ( bySet ) { - // They will have gone through all possible matchers - if ( (elem = !matcher && elem) ) { - matchedCount--; - } - - // Lengthen the array for every element, matched or not - if ( seed ) { - unmatched.push( elem ); - } - } - } - - // Apply set filters to unmatched elements - matchedCount += i; - if ( bySet && i !== matchedCount ) { - j = 0; - while ( (matcher = setMatchers[j++]) ) { - matcher( unmatched, setMatched, context, xml ); - } - - if ( seed ) { - // Reintegrate element matches to eliminate the need for sorting - if ( matchedCount > 0 ) { - while ( i-- ) { - if ( !(unmatched[i] || setMatched[i]) ) { - setMatched[i] = pop.call( results ); - } - } - } - - // Discard index placeholder values to get only actual matches - setMatched = condense( setMatched ); - } - - // Add matches to results - push.apply( results, setMatched ); - - // Seedless set matches succeeding multiple successful matchers stipulate sorting - if ( outermost && !seed && setMatched.length > 0 && - ( matchedCount + setMatchers.length ) > 1 ) { - - Sizzle.uniqueSort( results ); - } - } - - // Override manipulation of globals by nested matchers - if ( outermost ) { - dirruns = dirrunsUnique; - outermostContext = contextBackup; - } - - return unmatched; - }; - - return bySet ? - markFunction( superMatcher ) : - superMatcher; - } - - compile = Sizzle.compile = function( selector, group /* Internal Use Only */ ) { - var i, - setMatchers = [], - elementMatchers = [], - cached = compilerCache[ selector + " " ]; - - if ( !cached ) { - // Generate a function of recursive functions that can be used to check each element - if ( !group ) { - group = tokenize( selector ); - } - i = group.length; - while ( i-- ) { - cached = matcherFromTokens( group[i] ); - if ( cached[ expando ] ) { - setMatchers.push( cached ); - } else { - elementMatchers.push( cached ); - } - } - - // Cache the compiled function - cached = compilerCache( selector, matcherFromGroupMatchers( elementMatchers, setMatchers ) ); - } - return cached; - }; - - function multipleContexts( selector, contexts, results ) { - var i = 0, - len = contexts.length; - for ( ; i < len; i++ ) { - Sizzle( selector, contexts[i], results ); - } - return results; - } - - function select( selector, context, results, seed ) { - var i, tokens, token, type, find, - match = tokenize( selector ); - - if ( !seed ) { - // Try to minimize operations if there is only one group - if ( match.length === 1 ) { - - // Take a shortcut and set the context if the root selector is an ID - tokens = match[0] = match[0].slice( 0 ); - if ( tokens.length > 2 && (token = tokens[0]).type === "ID" && - context.nodeType === 9 && !documentIsXML && - Expr.relative[ tokens[1].type ] ) { - - context = Expr.find["ID"]( token.matches[0].replace( runescape, funescape ), context )[0]; - if ( !context ) { - return results; - } - - selector = selector.slice( tokens.shift().value.length ); - } - - // Fetch a seed set for right-to-left matching - i = matchExpr["needsContext"].test( selector ) ? 0 : tokens.length; - while ( i-- ) { - token = tokens[i]; - - // Abort if we hit a combinator - if ( Expr.relative[ (type = token.type) ] ) { - break; - } - if ( (find = Expr.find[ type ]) ) { - // Search, expanding context for leading sibling combinators - if ( (seed = find( - token.matches[0].replace( runescape, funescape ), - rsibling.test( tokens[0].type ) && context.parentNode || context - )) ) { - - // If seed is empty or no tokens remain, we can return early - tokens.splice( i, 1 ); - selector = seed.length && toSelector( tokens ); - if ( !selector ) { - push.apply( results, slice.call( seed, 0 ) ); - return results; - } - - break; - } - } - } - } - } - - // Compile and execute a filtering function - // Provide `match` to avoid retokenization if we modified the selector above - compile( selector, match )( - seed, - context, - documentIsXML, - results, - rsibling.test( selector ) - ); - return results; - } - -// Deprecated - Expr.pseudos["nth"] = Expr.pseudos["eq"]; - -// Easy API for creating new setFilters - function setFilters() {} - Expr.filters = setFilters.prototype = Expr.pseudos; - Expr.setFilters = new setFilters(); - -// Initialize with the default document - setDocument(); - -// Override sizzle attribute retrieval - Sizzle.attr = jQuery.attr; - jQuery.find = Sizzle; - jQuery.expr = Sizzle.selectors; - jQuery.expr[":"] = jQuery.expr.pseudos; - jQuery.unique = Sizzle.uniqueSort; - jQuery.text = Sizzle.getText; - jQuery.isXMLDoc = Sizzle.isXML; - jQuery.contains = Sizzle.contains; - - - })( window ); - var runtil = /Until$/, - rparentsprev = /^(?:parents|prev(?:Until|All))/, - isSimple = /^.[^:#[.,]*$/, - rneedsContext = jQuery.expr.match.needsContext, - // methods guaranteed to produce a unique set when starting from a unique set - guaranteedUnique = { - children: true, - contents: true, - next: true, - prev: true - }; - - jQuery.fn.extend({ - find: function( selector ) { - var i, ret, self, - len = this.length; - - if ( typeof selector !== "string" ) { - self = this; - return this.pushStack( jQuery( selector ).filter(function() { - for ( i = 0; i < len; i++ ) { - if ( jQuery.contains( self[ i ], this ) ) { - return true; - } - } - }) ); - } - - ret = []; - for ( i = 0; i < len; i++ ) { - jQuery.find( selector, this[ i ], ret ); - } - - // Needed because $( selector, context ) becomes $( context ).find( selector ) - ret = this.pushStack( len > 1 ? jQuery.unique( ret ) : ret ); - ret.selector = ( this.selector ? this.selector + " " : "" ) + selector; - return ret; - }, - - has: function( target ) { - var i, - targets = jQuery( target, this ), - len = targets.length; - - return this.filter(function() { - for ( i = 0; i < len; i++ ) { - if ( jQuery.contains( this, targets[i] ) ) { - return true; - } - } - }); - }, - - not: function( selector ) { - return this.pushStack( winnow(this, selector, false) ); - }, - - filter: function( selector ) { - return this.pushStack( winnow(this, selector, true) ); - }, - - is: function( selector ) { - return !!selector && ( - typeof selector === "string" ? - // If this is a positional/relative selector, check membership in the returned set - // so $("p:first").is("p:last") won't return true for a doc with two "p". - rneedsContext.test( selector ) ? - jQuery( selector, this.context ).index( this[0] ) >= 0 : - jQuery.filter( selector, this ).length > 0 : - this.filter( selector ).length > 0 ); - }, - - closest: function( selectors, context ) { - var cur, - i = 0, - l = this.length, - ret = [], - pos = rneedsContext.test( selectors ) || typeof selectors !== "string" ? - jQuery( selectors, context || this.context ) : - 0; - - for ( ; i < l; i++ ) { - cur = this[i]; - - while ( cur && cur.ownerDocument && cur !== context && cur.nodeType !== 11 ) { - if ( pos ? pos.index(cur) > -1 : jQuery.find.matchesSelector(cur, selectors) ) { - ret.push( cur ); - break; - } - cur = cur.parentNode; - } - } - - return this.pushStack( ret.length > 1 ? jQuery.unique( ret ) : ret ); - }, - - // Determine the position of an element within - // the matched set of elements - index: function( elem ) { - - // No argument, return index in parent - if ( !elem ) { - return ( this[0] && this[0].parentNode ) ? this.first().prevAll().length : -1; - } - - // index in selector - if ( typeof elem === "string" ) { - return jQuery.inArray( this[0], jQuery( elem ) ); - } - - // Locate the position of the desired element - return jQuery.inArray( - // If it receives a jQuery object, the first element is used - elem.jquery ? elem[0] : elem, this ); - }, - - add: function( selector, context ) { - var set = typeof selector === "string" ? - jQuery( selector, context ) : - jQuery.makeArray( selector && selector.nodeType ? [ selector ] : selector ), - all = jQuery.merge( this.get(), set ); - - return this.pushStack( jQuery.unique(all) ); - }, - - addBack: function( selector ) { - return this.add( selector == null ? - this.prevObject : this.prevObject.filter(selector) - ); - } - }); - - jQuery.fn.andSelf = jQuery.fn.addBack; - - function sibling( cur, dir ) { - do { - cur = cur[ dir ]; - } while ( cur && cur.nodeType !== 1 ); - - return cur; - } - - jQuery.each({ - parent: function( elem ) { - var parent = elem.parentNode; - return parent && parent.nodeType !== 11 ? parent : null; - }, - parents: function( elem ) { - return jQuery.dir( elem, "parentNode" ); - }, - parentsUntil: function( elem, i, until ) { - return jQuery.dir( elem, "parentNode", until ); - }, - next: function( elem ) { - return sibling( elem, "nextSibling" ); - }, - prev: function( elem ) { - return sibling( elem, "previousSibling" ); - }, - nextAll: function( elem ) { - return jQuery.dir( elem, "nextSibling" ); - }, - prevAll: function( elem ) { - return jQuery.dir( elem, "previousSibling" ); - }, - nextUntil: function( elem, i, until ) { - return jQuery.dir( elem, "nextSibling", until ); - }, - prevUntil: function( elem, i, until ) { - return jQuery.dir( elem, "previousSibling", until ); - }, - siblings: function( elem ) { - return jQuery.sibling( ( elem.parentNode || {} ).firstChild, elem ); - }, - children: function( elem ) { - return jQuery.sibling( elem.firstChild ); - }, - contents: function( elem ) { - return jQuery.nodeName( elem, "iframe" ) ? - elem.contentDocument || elem.contentWindow.document : - jQuery.merge( [], elem.childNodes ); - } - }, function( name, fn ) { - jQuery.fn[ name ] = function( until, selector ) { - var ret = jQuery.map( this, fn, until ); - - if ( !runtil.test( name ) ) { - selector = until; - } - - if ( selector && typeof selector === "string" ) { - ret = jQuery.filter( selector, ret ); - } - - ret = this.length > 1 && !guaranteedUnique[ name ] ? jQuery.unique( ret ) : ret; - - if ( this.length > 1 && rparentsprev.test( name ) ) { - ret = ret.reverse(); - } - - return this.pushStack( ret ); - }; - }); - - jQuery.extend({ - filter: function( expr, elems, not ) { - if ( not ) { - expr = ":not(" + expr + ")"; - } - - return elems.length === 1 ? - jQuery.find.matchesSelector(elems[0], expr) ? [ elems[0] ] : [] : - jQuery.find.matches(expr, elems); - }, - - dir: function( elem, dir, until ) { - var matched = [], - cur = elem[ dir ]; - - while ( cur && cur.nodeType !== 9 && (until === undefined || cur.nodeType !== 1 || !jQuery( cur ).is( until )) ) { - if ( cur.nodeType === 1 ) { - matched.push( cur ); - } - cur = cur[dir]; - } - return matched; - }, - - sibling: function( n, elem ) { - var r = []; - - for ( ; n; n = n.nextSibling ) { - if ( n.nodeType === 1 && n !== elem ) { - r.push( n ); - } - } - - return r; - } - }); - -// Implement the identical functionality for filter and not - function winnow( elements, qualifier, keep ) { - - // Can't pass null or undefined to indexOf in Firefox 4 - // Set to 0 to skip string check - qualifier = qualifier || 0; - - if ( jQuery.isFunction( qualifier ) ) { - return jQuery.grep(elements, function( elem, i ) { - var retVal = !!qualifier.call( elem, i, elem ); - return retVal === keep; - }); - - } else if ( qualifier.nodeType ) { - return jQuery.grep(elements, function( elem ) { - return ( elem === qualifier ) === keep; - }); - - } else if ( typeof qualifier === "string" ) { - var filtered = jQuery.grep(elements, function( elem ) { - return elem.nodeType === 1; - }); - - if ( isSimple.test( qualifier ) ) { - return jQuery.filter(qualifier, filtered, !keep); - } else { - qualifier = jQuery.filter( qualifier, filtered ); - } - } - - return jQuery.grep(elements, function( elem ) { - return ( jQuery.inArray( elem, qualifier ) >= 0 ) === keep; - }); - } - function createSafeFragment( document ) { - var list = nodeNames.split( "|" ), - safeFrag = document.createDocumentFragment(); - - if ( safeFrag.createElement ) { - while ( list.length ) { - safeFrag.createElement( - list.pop() - ); - } - } - return safeFrag; - } - - var nodeNames = "abbr|article|aside|audio|bdi|canvas|data|datalist|details|figcaption|figure|footer|" + - "header|hgroup|mark|meter|nav|output|progress|section|summary|time|video", - rinlinejQuery = / jQuery\d+="(?:null|\d+)"/g, - rnoshimcache = new RegExp("<(?:" + nodeNames + ")[\s/>]", "i"), - rleadingWhitespace = /^\s+/, - rxhtmlTag = /<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)/>/gi, - rtagName = /<([\w:]+)/, - rtbody = /<tbody/i, - rhtml = /<|&#?\w+;/, - rnoInnerhtml = /<(?:script|style|link)/i, - manipulation_rcheckableType = /^(?:checkbox|radio)$/i, - // checked="checked" or checked - rchecked = /checked\s*(?:[^=]|=\s*.checked.)/i, - rscriptType = /^$|/(?:java|ecma)script/i, - rscriptTypeMasked = /^true/(.*)/, - rcleanScript = /^\s*<!(?:[CDATA[|--)|(?:]]|--)>\s*$/g, - - // We have to close these tags to support XHTML (#13200) - wrapMap = { - option: [ 1, "<select multiple='multiple'>", "</select>" ], - legend: [ 1, "<fieldset>", "</fieldset>" ], - area: [ 1, "<map>", "</map>" ], - param: [ 1, "<object>", "</object>" ], - thead: [ 1, "<table>", "</table>" ], - tr: [ 2, "<table><tbody>", "</tbody></table>" ], - col: [ 2, "<table><tbody></tbody><colgroup>", "</colgroup></table>" ], - td: [ 3, "<table><tbody><tr>", "</tr></tbody></table>" ], - - // IE6-8 can't serialize link, script, style, or any html5 (NoScope) tags, - // unless wrapped in a div with non-breaking characters in front of it. - _default: jQuery.support.htmlSerialize ? [ 0, "", "" ] : [ 1, "X<div>", "</div>" ] - }, - safeFragment = createSafeFragment( document ), - fragmentDiv = safeFragment.appendChild( document.createElement("div") ); - - wrapMap.optgroup = wrapMap.option; - wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; - wrapMap.th = wrapMap.td; - - jQuery.fn.extend({ - text: function( value ) { - return jQuery.access( this, function( value ) { - return value === undefined ? - jQuery.text( this ) : - this.empty().append( ( this[0] && this[0].ownerDocument || document ).createTextNode( value ) ); - }, null, value, arguments.length ); - }, - - wrapAll: function( html ) { - if ( jQuery.isFunction( html ) ) { - return this.each(function(i) { - jQuery(this).wrapAll( html.call(this, i) ); - }); - } - - if ( this[0] ) { - // The elements to wrap the target around - var wrap = jQuery( html, this[0].ownerDocument ).eq(0).clone(true); - - if ( this[0].parentNode ) { - wrap.insertBefore( this[0] ); - } - - wrap.map(function() { - var elem = this; - - while ( elem.firstChild && elem.firstChild.nodeType === 1 ) { - elem = elem.firstChild; - } - - return elem; - }).append( this ); - } - - return this; - }, - - wrapInner: function( html ) { - if ( jQuery.isFunction( html ) ) { - return this.each(function(i) { - jQuery(this).wrapInner( html.call(this, i) ); - }); - } - - return this.each(function() { - var self = jQuery( this ), - contents = self.contents(); - - if ( contents.length ) { - contents.wrapAll( html ); - - } else { - self.append( html ); - } - }); - }, - - wrap: function( html ) { - var isFunction = jQuery.isFunction( html ); - - return this.each(function(i) { - jQuery( this ).wrapAll( isFunction ? html.call(this, i) : html ); - }); - }, - - unwrap: function() { - return this.parent().each(function() { - if ( !jQuery.nodeName( this, "body" ) ) { - jQuery( this ).replaceWith( this.childNodes ); - } - }).end(); - }, - - append: function() { - return this.domManip(arguments, true, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - this.appendChild( elem ); - } - }); - }, - - prepend: function() { - return this.domManip(arguments, true, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - this.insertBefore( elem, this.firstChild ); - } - }); - }, - - before: function() { - return this.domManip( arguments, false, function( elem ) { - if ( this.parentNode ) { - this.parentNode.insertBefore( elem, this ); - } - }); - }, - - after: function() { - return this.domManip( arguments, false, function( elem ) { - if ( this.parentNode ) { - this.parentNode.insertBefore( elem, this.nextSibling ); - } - }); - }, - - // keepData is for internal use only--do not document - remove: function( selector, keepData ) { - var elem, - i = 0; - - for ( ; (elem = this[i]) != null; i++ ) { - if ( !selector || jQuery.filter( selector, [ elem ] ).length > 0 ) { - if ( !keepData && elem.nodeType === 1 ) { - jQuery.cleanData( getAll( elem ) ); - } - - if ( elem.parentNode ) { - if ( keepData && jQuery.contains( elem.ownerDocument, elem ) ) { - setGlobalEval( getAll( elem, "script" ) ); - } - elem.parentNode.removeChild( elem ); - } - } - } - - return this; - }, - - empty: function() { - var elem, - i = 0; - - for ( ; (elem = this[i]) != null; i++ ) { - // Remove element nodes and prevent memory leaks - if ( elem.nodeType === 1 ) { - jQuery.cleanData( getAll( elem, false ) ); - } - - // Remove any remaining nodes - while ( elem.firstChild ) { - elem.removeChild( elem.firstChild ); - } - - // If this is a select, ensure that it displays empty (#12336) - // Support: IE<9 - if ( elem.options && jQuery.nodeName( elem, "select" ) ) { - elem.options.length = 0; - } - } - - return this; - }, - - clone: function( dataAndEvents, deepDataAndEvents ) { - dataAndEvents = dataAndEvents == null ? false : dataAndEvents; - deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; - - return this.map( function () { - return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); - }); - }, - - html: function( value ) { - return jQuery.access( this, function( value ) { - var elem = this[0] || {}, - i = 0, - l = this.length; - - if ( value === undefined ) { - return elem.nodeType === 1 ? - elem.innerHTML.replace( rinlinejQuery, "" ) : - undefined; - } - - // See if we can take a shortcut and just use innerHTML - if ( typeof value === "string" && !rnoInnerhtml.test( value ) && - ( jQuery.support.htmlSerialize || !rnoshimcache.test( value ) ) && - ( jQuery.support.leadingWhitespace || !rleadingWhitespace.test( value ) ) && - !wrapMap[ ( rtagName.exec( value ) || ["", ""] )[1].toLowerCase() ] ) { - - value = value.replace( rxhtmlTag, "<$1></$2>" ); - - try { - for (; i < l; i++ ) { - // Remove element nodes and prevent memory leaks - elem = this[i] || {}; - if ( elem.nodeType === 1 ) { - jQuery.cleanData( getAll( elem, false ) ); - elem.innerHTML = value; - } - } - - elem = 0; - - // If using innerHTML throws an exception, use the fallback method - } catch(e) {} - } - - if ( elem ) { - this.empty().append( value ); - } - }, null, value, arguments.length ); - }, - - replaceWith: function( value ) { - var isFunc = jQuery.isFunction( value ); - - // Make sure that the elements are removed from the DOM before they are inserted - // this can help fix replacing a parent with child elements - if ( !isFunc && typeof value !== "string" ) { - value = jQuery( value ).not( this ).detach(); - } - - return this.domManip( [ value ], true, function( elem ) { - var next = this.nextSibling, - parent = this.parentNode; - - if ( parent ) { - jQuery( this ).remove(); - parent.insertBefore( elem, next ); - } - }); - }, - - detach: function( selector ) { - return this.remove( selector, true ); - }, - - domManip: function( args, table, callback ) { - - // Flatten any nested arrays - args = core_concat.apply( [], args ); - - var first, node, hasScripts, - scripts, doc, fragment, - i = 0, - l = this.length, - set = this, - iNoClone = l - 1, - value = args[0], - isFunction = jQuery.isFunction( value ); - - // We can't cloneNode fragments that contain checked, in WebKit - if ( isFunction || !( l <= 1 || typeof value !== "string" || jQuery.support.checkClone || !rchecked.test( value ) ) ) { - return this.each(function( index ) { - var self = set.eq( index ); - if ( isFunction ) { - args[0] = value.call( this, index, table ? self.html() : undefined ); - } - self.domManip( args, table, callback ); - }); - } - - if ( l ) { - fragment = jQuery.buildFragment( args, this[ 0 ].ownerDocument, false, this ); - first = fragment.firstChild; - - if ( fragment.childNodes.length === 1 ) { - fragment = first; - } - - if ( first ) { - table = table && jQuery.nodeName( first, "tr" ); - scripts = jQuery.map( getAll( fragment, "script" ), disableScript ); - hasScripts = scripts.length; - - // Use the original fragment for the last item instead of the first because it can end up - // being emptied incorrectly in certain situations (#8070). - for ( ; i < l; i++ ) { - node = fragment; - - if ( i !== iNoClone ) { - node = jQuery.clone( node, true, true ); - - // Keep references to cloned scripts for later restoration - if ( hasScripts ) { - jQuery.merge( scripts, getAll( node, "script" ) ); - } - } - - callback.call( - table && jQuery.nodeName( this[i], "table" ) ? - findOrAppend( this[i], "tbody" ) : - this[i], - node, - i - ); - } - - if ( hasScripts ) { - doc = scripts[ scripts.length - 1 ].ownerDocument; - - // Reenable scripts - jQuery.map( scripts, restoreScript ); - - // Evaluate executable scripts on first document insertion - for ( i = 0; i < hasScripts; i++ ) { - node = scripts[ i ]; - if ( rscriptType.test( node.type || "" ) && - !jQuery._data( node, "globalEval" ) && jQuery.contains( doc, node ) ) { - - if ( node.src ) { - // Hope ajax is available... - jQuery.ajax({ - url: node.src, - type: "GET", - dataType: "script", - async: false, - global: false, - "throws": true - }); - } else { - jQuery.globalEval( ( node.text || node.textContent || node.innerHTML || "" ).replace( rcleanScript, "" ) ); - } - } - } - } - - // Fix #11809: Avoid leaking memory - fragment = first = null; - } - } - - return this; - } - }); - - function findOrAppend( elem, tag ) { - return elem.getElementsByTagName( tag )[0] || elem.appendChild( elem.ownerDocument.createElement( tag ) ); - } - -// Replace/restore the type attribute of script elements for safe DOM manipulation - function disableScript( elem ) { - var attr = elem.getAttributeNode("type"); - elem.type = ( attr && attr.specified ) + "/" + elem.type; - return elem; - } - function restoreScript( elem ) { - var match = rscriptTypeMasked.exec( elem.type ); - if ( match ) { - elem.type = match[1]; - } else { - elem.removeAttribute("type"); - } - return elem; - } - -// Mark scripts as having already been evaluated - function setGlobalEval( elems, refElements ) { - var elem, - i = 0; - for ( ; (elem = elems[i]) != null; i++ ) { - jQuery._data( elem, "globalEval", !refElements || jQuery._data( refElements[i], "globalEval" ) ); - } - } - - function cloneCopyEvent( src, dest ) { - - if ( dest.nodeType !== 1 || !jQuery.hasData( src ) ) { - return; - } - - var type, i, l, - oldData = jQuery._data( src ), - curData = jQuery._data( dest, oldData ), - events = oldData.events; - - if ( events ) { - delete curData.handle; - curData.events = {}; - - for ( type in events ) { - for ( i = 0, l = events[ type ].length; i < l; i++ ) { - jQuery.event.add( dest, type, events[ type ][ i ] ); - } - } - } - - // make the cloned public data object a copy from the original - if ( curData.data ) { - curData.data = jQuery.extend( {}, curData.data ); - } - } - - function fixCloneNodeIssues( src, dest ) { - var nodeName, e, data; - - // We do not need to do anything for non-Elements - if ( dest.nodeType !== 1 ) { - return; - } - - nodeName = dest.nodeName.toLowerCase(); - - // IE6-8 copies events bound via attachEvent when using cloneNode. - if ( !jQuery.support.noCloneEvent && dest[ jQuery.expando ] ) { - data = jQuery._data( dest ); - - for ( e in data.events ) { - jQuery.removeEvent( dest, e, data.handle ); - } - - // Event data gets referenced instead of copied if the expando gets copied too - dest.removeAttribute( jQuery.expando ); - } - - // IE blanks contents when cloning scripts, and tries to evaluate newly-set text - if ( nodeName === "script" && dest.text !== src.text ) { - disableScript( dest ).text = src.text; - restoreScript( dest ); - - // IE6-10 improperly clones children of object elements using classid. - // IE10 throws NoModificationAllowedError if parent is null, #12132. - } else if ( nodeName === "object" ) { - if ( dest.parentNode ) { - dest.outerHTML = src.outerHTML; - } - - // This path appears unavoidable for IE9. When cloning an object - // element in IE9, the outerHTML strategy above is not sufficient. - // If the src has innerHTML and the destination does not, - // copy the src.innerHTML into the dest.innerHTML. #10324 - if ( jQuery.support.html5Clone && ( src.innerHTML && !jQuery.trim(dest.innerHTML) ) ) { - dest.innerHTML = src.innerHTML; - } - - } else if ( nodeName === "input" && manipulation_rcheckableType.test( src.type ) ) { - // IE6-8 fails to persist the checked state of a cloned checkbox - // or radio button. Worse, IE6-7 fail to give the cloned element - // a checked appearance if the defaultChecked value isn't also set - - dest.defaultChecked = dest.checked = src.checked; - - // IE6-7 get confused and end up setting the value of a cloned - // checkbox/radio button to an empty string instead of "on" - if ( dest.value !== src.value ) { - dest.value = src.value; - } - - // IE6-8 fails to return the selected option to the default selected - // state when cloning options - } else if ( nodeName === "option" ) { - dest.defaultSelected = dest.selected = src.defaultSelected; - - // IE6-8 fails to set the defaultValue to the correct value when - // cloning other types of input fields - } else if ( nodeName === "input" || nodeName === "textarea" ) { - dest.defaultValue = src.defaultValue; - } - } - - jQuery.each({ - appendTo: "append", - prependTo: "prepend", - insertBefore: "before", - insertAfter: "after", - replaceAll: "replaceWith" - }, function( name, original ) { - jQuery.fn[ name ] = function( selector ) { - var elems, - i = 0, - ret = [], - insert = jQuery( selector ), - last = insert.length - 1; - - for ( ; i <= last; i++ ) { - elems = i === last ? this : this.clone(true); - jQuery( insert[i] )[ original ]( elems ); - - // Modern browsers can apply jQuery collections as arrays, but oldIE needs a .get() - core_push.apply( ret, elems.get() ); - } - - return this.pushStack( ret ); - }; - }); - - function getAll( context, tag ) { - var elems, elem, - i = 0, - found = typeof context.getElementsByTagName !== core_strundefined ? context.getElementsByTagName( tag || "*" ) : - typeof context.querySelectorAll !== core_strundefined ? context.querySelectorAll( tag || "*" ) : - undefined; - - if ( !found ) { - for ( found = [], elems = context.childNodes || context; (elem = elems[i]) != null; i++ ) { - if ( !tag || jQuery.nodeName( elem, tag ) ) { - found.push( elem ); - } else { - jQuery.merge( found, getAll( elem, tag ) ); - } - } - } - - return tag === undefined || tag && jQuery.nodeName( context, tag ) ? - jQuery.merge( [ context ], found ) : - found; - } - -// Used in buildFragment, fixes the defaultChecked property - function fixDefaultChecked( elem ) { - if ( manipulation_rcheckableType.test( elem.type ) ) { - elem.defaultChecked = elem.checked; - } - } - - jQuery.extend({ - clone: function( elem, dataAndEvents, deepDataAndEvents ) { - var destElements, node, clone, i, srcElements, - inPage = jQuery.contains( elem.ownerDocument, elem ); - - if ( jQuery.support.html5Clone || jQuery.isXMLDoc(elem) || !rnoshimcache.test( "<" + elem.nodeName + ">" ) ) { - clone = elem.cloneNode( true ); - - // IE<=8 does not properly clone detached, unknown element nodes - } else { - fragmentDiv.innerHTML = elem.outerHTML; - fragmentDiv.removeChild( clone = fragmentDiv.firstChild ); - } - - if ( (!jQuery.support.noCloneEvent || !jQuery.support.noCloneChecked) && - (elem.nodeType === 1 || elem.nodeType === 11) && !jQuery.isXMLDoc(elem) ) { - - // We eschew Sizzle here for performance reasons: http://jsperf.com/getall-vs-sizzle/2 - destElements = getAll( clone ); - srcElements = getAll( elem ); - - // Fix all IE cloning issues - for ( i = 0; (node = srcElements[i]) != null; ++i ) { - // Ensure that the destination node is not null; Fixes #9587 - if ( destElements[i] ) { - fixCloneNodeIssues( node, destElements[i] ); - } - } - } - - // Copy the events from the original to the clone - if ( dataAndEvents ) { - if ( deepDataAndEvents ) { - srcElements = srcElements || getAll( elem ); - destElements = destElements || getAll( clone ); - - for ( i = 0; (node = srcElements[i]) != null; i++ ) { - cloneCopyEvent( node, destElements[i] ); - } - } else { - cloneCopyEvent( elem, clone ); - } - } - - // Preserve script evaluation history - destElements = getAll( clone, "script" ); - if ( destElements.length > 0 ) { - setGlobalEval( destElements, !inPage && getAll( elem, "script" ) ); - } - - destElements = srcElements = node = null; - - // Return the cloned set - return clone; - }, - - buildFragment: function( elems, context, scripts, selection ) { - var j, elem, contains, - tmp, tag, tbody, wrap, - l = elems.length, - - // Ensure a safe fragment - safe = createSafeFragment( context ), - - nodes = [], - i = 0; - - for ( ; i < l; i++ ) { - elem = elems[ i ]; - - if ( elem || elem === 0 ) { - - // Add nodes directly - if ( jQuery.type( elem ) === "object" ) { - jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem ); - - // Convert non-html into a text node - } else if ( !rhtml.test( elem ) ) { - nodes.push( context.createTextNode( elem ) ); - - // Convert html into DOM nodes - } else { - tmp = tmp || safe.appendChild( context.createElement("div") ); - - // Deserialize a standard representation - tag = ( rtagName.exec( elem ) || ["", ""] )[1].toLowerCase(); - wrap = wrapMap[ tag ] || wrapMap._default; - - tmp.innerHTML = wrap[1] + elem.replace( rxhtmlTag, "<$1></$2>" ) + wrap[2]; - - // Descend through wrappers to the right content - j = wrap[0]; - while ( j-- ) { - tmp = tmp.lastChild; - } - - // Manually add leading whitespace removed by IE - if ( !jQuery.support.leadingWhitespace && rleadingWhitespace.test( elem ) ) { - nodes.push( context.createTextNode( rleadingWhitespace.exec( elem )[0] ) ); - } - - // Remove IE's autoinserted <tbody> from table fragments - if ( !jQuery.support.tbody ) { - - // String was a <table>, *may* have spurious <tbody> - elem = tag === "table" && !rtbody.test( elem ) ? - tmp.firstChild : - - // String was a bare <thead> or <tfoot> - wrap[1] === "<table>" && !rtbody.test( elem ) ? - tmp : - 0; - - j = elem && elem.childNodes.length; - while ( j-- ) { - if ( jQuery.nodeName( (tbody = elem.childNodes[j]), "tbody" ) && !tbody.childNodes.length ) { - elem.removeChild( tbody ); - } - } - } - - jQuery.merge( nodes, tmp.childNodes ); - - // Fix #12392 for WebKit and IE > 9 - tmp.textContent = ""; - - // Fix #12392 for oldIE - while ( tmp.firstChild ) { - tmp.removeChild( tmp.firstChild ); - } - - // Remember the top-level container for proper cleanup - tmp = safe.lastChild; - } - } - } - - // Fix #11356: Clear elements from fragment - if ( tmp ) { - safe.removeChild( tmp ); - } - - // Reset defaultChecked for any radios and checkboxes - // about to be appended to the DOM in IE 6/7 (#8060) - if ( !jQuery.support.appendChecked ) { - jQuery.grep( getAll( nodes, "input" ), fixDefaultChecked ); - } - - i = 0; - while ( (elem = nodes[ i++ ]) ) { - - // #4087 - If origin and destination elements are the same, and this is - // that element, do not do anything - if ( selection && jQuery.inArray( elem, selection ) !== -1 ) { - continue; - } - - contains = jQuery.contains( elem.ownerDocument, elem ); - - // Append to fragment - tmp = getAll( safe.appendChild( elem ), "script" ); - - // Preserve script evaluation history - if ( contains ) { - setGlobalEval( tmp ); - } - - // Capture executables - if ( scripts ) { - j = 0; - while ( (elem = tmp[ j++ ]) ) { - if ( rscriptType.test( elem.type || "" ) ) { - scripts.push( elem ); - } - } - } - } - - tmp = null; - - return safe; - }, - - cleanData: function( elems, /* internal */ acceptData ) { - var elem, type, id, data, - i = 0, - internalKey = jQuery.expando, - cache = jQuery.cache, - deleteExpando = jQuery.support.deleteExpando, - special = jQuery.event.special; - - for ( ; (elem = elems[i]) != null; i++ ) { - - if ( acceptData || jQuery.acceptData( elem ) ) { - - id = elem[ internalKey ]; - data = id && cache[ id ]; - - if ( data ) { - if ( data.events ) { - for ( type in data.events ) { - if ( special[ type ] ) { - jQuery.event.remove( elem, type ); - - // This is a shortcut to avoid jQuery.event.remove's overhead - } else { - jQuery.removeEvent( elem, type, data.handle ); - } - } - } - - // Remove cache only if it was not already removed by jQuery.event.remove - if ( cache[ id ] ) { - - delete cache[ id ]; - - // IE does not allow us to delete expando properties from nodes, - // nor does it have a removeAttribute function on Document nodes; - // we must handle all of these cases - if ( deleteExpando ) { - delete elem[ internalKey ]; - - } else if ( typeof elem.removeAttribute !== core_strundefined ) { - elem.removeAttribute( internalKey ); - - } else { - elem[ internalKey ] = null; - } - - core_deletedIds.push( id ); - } - } - } - } - } - }); - var iframe, getStyles, curCSS, - ralpha = /alpha([^)]*)/i, - ropacity = /opacity\s*=\s*([^)]*)/, - rposition = /^(top|right|bottom|left)$/, - // swappable if display is none or starts with table except "table", "table-cell", or "table-caption" - // see here for display values: https://developer.mozilla.org/en-US/docs/CSS/display - rdisplayswap = /^(none|table(?!-c[ea]).+)/, - rmargin = /^margin/, - rnumsplit = new RegExp( "^(" + core_pnum + ")(.*)$", "i" ), - rnumnonpx = new RegExp( "^(" + core_pnum + ")(?!px)[a-z%]+$", "i" ), - rrelNum = new RegExp( "^([+-])=(" + core_pnum + ")", "i" ), - elemdisplay = { BODY: "block" }, - - cssShow = { position: "absolute", visibility: "hidden", display: "block" }, - cssNormalTransform = { - letterSpacing: 0, - fontWeight: 400 - }, - - cssExpand = [ "Top", "Right", "Bottom", "Left" ], - cssPrefixes = [ "Webkit", "O", "Moz", "ms" ]; - -// return a css property mapped to a potentially vendor prefixed property - function vendorPropName( style, name ) { - - // shortcut for names that are not vendor prefixed - if ( name in style ) { - return name; - } - - // check for vendor prefixed names - var capName = name.charAt(0).toUpperCase() + name.slice(1), - origName = name, - i = cssPrefixes.length; - - while ( i-- ) { - name = cssPrefixes[ i ] + capName; - if ( name in style ) { - return name; - } - } - - return origName; - } - - function isHidden( elem, el ) { - // isHidden might be called from jQuery#filter function; - // in that case, element will be second argument - elem = el || elem; - return jQuery.css( elem, "display" ) === "none" || !jQuery.contains( elem.ownerDocument, elem ); - } - - function showHide( elements, show ) { - var display, elem, hidden, - values = [], - index = 0, - length = elements.length; - - for ( ; index < length; index++ ) { - elem = elements[ index ]; - if ( !elem.style ) { - continue; - } - - values[ index ] = jQuery._data( elem, "olddisplay" ); - display = elem.style.display; - if ( show ) { - // Reset the inline display of this element to learn if it is - // being hidden by cascaded rules or not - if ( !values[ index ] && display === "none" ) { - elem.style.display = ""; - } - - // Set elements which have been overridden with display: none - // in a stylesheet to whatever the default browser style is - // for such an element - if ( elem.style.display === "" && isHidden( elem ) ) { - values[ index ] = jQuery._data( elem, "olddisplay", css_defaultDisplay(elem.nodeName) ); - } - } else { - - if ( !values[ index ] ) { - hidden = isHidden( elem ); - - if ( display && display !== "none" || !hidden ) { - jQuery._data( elem, "olddisplay", hidden ? display : jQuery.css( elem, "display" ) ); - } - } - } - } - - // Set the display of most of the elements in a second loop - // to avoid the constant reflow - for ( index = 0; index < length; index++ ) { - elem = elements[ index ]; - if ( !elem.style ) { - continue; - } - if ( !show || elem.style.display === "none" || elem.style.display === "" ) { - elem.style.display = show ? values[ index ] || "" : "none"; - } - } - - return elements; - } - - jQuery.fn.extend({ - css: function( name, value ) { - return jQuery.access( this, function( elem, name, value ) { - var len, styles, - map = {}, - i = 0; - - if ( jQuery.isArray( name ) ) { - styles = getStyles( elem ); - len = name.length; - - for ( ; i < len; i++ ) { - map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles ); - } - - return map; - } - - return value !== undefined ? - jQuery.style( elem, name, value ) : - jQuery.css( elem, name ); - }, name, value, arguments.length > 1 ); - }, - show: function() { - return showHide( this, true ); - }, - hide: function() { - return showHide( this ); - }, - toggle: function( state ) { - var bool = typeof state === "boolean"; - - return this.each(function() { - if ( bool ? state : isHidden( this ) ) { - jQuery( this ).show(); - } else { - jQuery( this ).hide(); - } - }); - } - }); - - jQuery.extend({ - // Add in style property hooks for overriding the default - // behavior of getting and setting a style property - cssHooks: { - opacity: { - get: function( elem, computed ) { - if ( computed ) { - // We should always get a number back from opacity - var ret = curCSS( elem, "opacity" ); - return ret === "" ? "1" : ret; - } - } - } - }, - - // Exclude the following css properties to add px - cssNumber: { - "columnCount": true, - "fillOpacity": true, - "fontWeight": true, - "lineHeight": true, - "opacity": true, - "orphans": true, - "widows": true, - "zIndex": true, - "zoom": true - }, - - // Add in properties whose names you wish to fix before - // setting or getting the value - cssProps: { - // normalize float css property - "float": jQuery.support.cssFloat ? "cssFloat" : "styleFloat" - }, - - // Get and set the style property on a DOM Node - style: function( elem, name, value, extra ) { - // Don't set styles on text and comment nodes - if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) { - return; - } - - // Make sure that we're working with the right name - var ret, type, hooks, - origName = jQuery.camelCase( name ), - style = elem.style; - - name = jQuery.cssProps[ origName ] || ( jQuery.cssProps[ origName ] = vendorPropName( style, origName ) ); - - // gets hook for the prefixed version - // followed by the unprefixed version - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // Check if we're setting a value - if ( value !== undefined ) { - type = typeof value; - - // convert relative number strings (+= or -=) to relative numbers. #7345 - if ( type === "string" && (ret = rrelNum.exec( value )) ) { - value = ( ret[1] + 1 ) * ret[2] + parseFloat( jQuery.css( elem, name ) ); - // Fixes bug #9237 - type = "number"; - } - - // Make sure that NaN and null values aren't set. See: #7116 - if ( value == null || type === "number" && isNaN( value ) ) { - return; - } - - // If a number was passed in, add 'px' to the (except for certain CSS properties) - if ( type === "number" && !jQuery.cssNumber[ origName ] ) { - value += "px"; - } - - // Fixes #8908, it can be done more correctly by specifing setters in cssHooks, - // but it would mean to define eight (for every problematic property) identical functions - if ( !jQuery.support.clearCloneStyle && value === "" && name.indexOf("background") === 0 ) { - style[ name ] = "inherit"; - } - - // If a hook was provided, use that value, otherwise just set the specified value - if ( !hooks || !("set" in hooks) || (value = hooks.set( elem, value, extra )) !== undefined ) { - - // Wrapped to prevent IE from throwing errors when 'invalid' values are provided - // Fixes bug #5509 - try { - style[ name ] = value; - } catch(e) {} - } - - } else { - // If a hook was provided get the non-computed value from there - if ( hooks && "get" in hooks && (ret = hooks.get( elem, false, extra )) !== undefined ) { - return ret; - } - - // Otherwise just get the value from the style object - return style[ name ]; - } - }, - - css: function( elem, name, extra, styles ) { - var num, val, hooks, - origName = jQuery.camelCase( name ); - - // Make sure that we're working with the right name - name = jQuery.cssProps[ origName ] || ( jQuery.cssProps[ origName ] = vendorPropName( elem.style, origName ) ); - - // gets hook for the prefixed version - // followed by the unprefixed version - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // If a hook was provided get the computed value from there - if ( hooks && "get" in hooks ) { - val = hooks.get( elem, true, extra ); - } - - // Otherwise, if a way to get the computed value exists, use that - if ( val === undefined ) { - val = curCSS( elem, name, styles ); - } - - //convert "normal" to computed value - if ( val === "normal" && name in cssNormalTransform ) { - val = cssNormalTransform[ name ]; - } - - // Return, converting to number if forced or a qualifier was provided and val looks numeric - if ( extra === "" || extra ) { - num = parseFloat( val ); - return extra === true || jQuery.isNumeric( num ) ? num || 0 : val; - } - return val; - }, - - // A method for quickly swapping in/out CSS properties to get correct calculations - swap: function( elem, options, callback, args ) { - var ret, name, - old = {}; - - // Remember the old values, and insert the new ones - for ( name in options ) { - old[ name ] = elem.style[ name ]; - elem.style[ name ] = options[ name ]; - } - - ret = callback.apply( elem, args || [] ); - - // Revert the old values - for ( name in options ) { - elem.style[ name ] = old[ name ]; - } - - return ret; - } - }); - -// NOTE: we've included the "window" in window.getComputedStyle -// because jsdom on node.js will break without it. - if ( window.getComputedStyle ) { - getStyles = function( elem ) { - return window.getComputedStyle( elem, null ); - }; - - curCSS = function( elem, name, _computed ) { - var width, minWidth, maxWidth, - computed = _computed || getStyles( elem ), - - // getPropertyValue is only needed for .css('filter') in IE9, see #12537 - ret = computed ? computed.getPropertyValue( name ) || computed[ name ] : undefined, - style = elem.style; - - if ( computed ) { - - if ( ret === "" && !jQuery.contains( elem.ownerDocument, elem ) ) { - ret = jQuery.style( elem, name ); - } - - // A tribute to the "awesome hack by Dean Edwards" - // Chrome < 17 and Safari 5.0 uses "computed value" instead of "used value" for margin-right - // Safari 5.1.7 (at least) returns percentage for a larger set of values, but width seems to be reliably pixels - // this is against the CSSOM draft spec: http://dev.w3.org/csswg/cssom/#resolved-values - if ( rnumnonpx.test( ret ) && rmargin.test( name ) ) { - - // Remember the original values - width = style.width; - minWidth = style.minWidth; - maxWidth = style.maxWidth; - - // Put in the new values to get a computed value out - style.minWidth = style.maxWidth = style.width = ret; - ret = computed.width; - - // Revert the changed values - style.width = width; - style.minWidth = minWidth; - style.maxWidth = maxWidth; - } - } - - return ret; - }; - } else if ( document.documentElement.currentStyle ) { - getStyles = function( elem ) { - return elem.currentStyle; - }; - - curCSS = function( elem, name, _computed ) { - var left, rs, rsLeft, - computed = _computed || getStyles( elem ), - ret = computed ? computed[ name ] : undefined, - style = elem.style; - - // Avoid setting ret to empty string here - // so we don't default to auto - if ( ret == null && style && style[ name ] ) { - ret = style[ name ]; - } - - // From the awesome hack by Dean Edwards - // http://erik.eae.net/archives/2007/07/27/18.54.15/#comment-102291 - - // If we're not dealing with a regular pixel number - // but a number that has a weird ending, we need to convert it to pixels - // but not position css attributes, as those are proportional to the parent element instead - // and we can't measure the parent instead because it might trigger a "stacking dolls" problem - if ( rnumnonpx.test( ret ) && !rposition.test( name ) ) { - - // Remember the original values - left = style.left; - rs = elem.runtimeStyle; - rsLeft = rs && rs.left; - - // Put in the new values to get a computed value out - if ( rsLeft ) { - rs.left = elem.currentStyle.left; - } - style.left = name === "fontSize" ? "1em" : ret; - ret = style.pixelLeft + "px"; - - // Revert the changed values - style.left = left; - if ( rsLeft ) { - rs.left = rsLeft; - } - } - - return ret === "" ? "auto" : ret; - }; - } - - function setPositiveNumber( elem, value, subtract ) { - var matches = rnumsplit.exec( value ); - return matches ? - // Guard against undefined "subtract", e.g., when used as in cssHooks - Math.max( 0, matches[ 1 ] - ( subtract || 0 ) ) + ( matches[ 2 ] || "px" ) : - value; - } - - function augmentWidthOrHeight( elem, name, extra, isBorderBox, styles ) { - var i = extra === ( isBorderBox ? "border" : "content" ) ? - // If we already have the right measurement, avoid augmentation - 4 : - // Otherwise initialize for horizontal or vertical properties - name === "width" ? 1 : 0, - - val = 0; - - for ( ; i < 4; i += 2 ) { - // both box models exclude margin, so add it if we want it - if ( extra === "margin" ) { - val += jQuery.css( elem, extra + cssExpand[ i ], true, styles ); - } - - if ( isBorderBox ) { - // border-box includes padding, so remove it if we want content - if ( extra === "content" ) { - val -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); - } - - // at this point, extra isn't border nor margin, so remove border - if ( extra !== "margin" ) { - val -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - } - } else { - // at this point, extra isn't content, so add padding - val += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); - - // at this point, extra isn't content nor padding, so add border - if ( extra !== "padding" ) { - val += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - } - } - } - - return val; - } - - function getWidthOrHeight( elem, name, extra ) { - - // Start with offset property, which is equivalent to the border-box value - var valueIsBorderBox = true, - val = name === "width" ? elem.offsetWidth : elem.offsetHeight, - styles = getStyles( elem ), - isBorderBox = jQuery.support.boxSizing && jQuery.css( elem, "boxSizing", false, styles ) === "border-box"; - - // some non-html elements return undefined for offsetWidth, so check for null/undefined - // svg - https://bugzilla.mozilla.org/show_bug.cgi?id=649285 - // MathML - https://bugzilla.mozilla.org/show_bug.cgi?id=491668 - if ( val <= 0 || val == null ) { - // Fall back to computed then uncomputed css if necessary - val = curCSS( elem, name, styles ); - if ( val < 0 || val == null ) { - val = elem.style[ name ]; - } - - // Computed unit is not pixels. Stop here and return. - if ( rnumnonpx.test(val) ) { - return val; - } - - // we need the check for style in case a browser which returns unreliable values - // for getComputedStyle silently falls back to the reliable elem.style - valueIsBorderBox = isBorderBox && ( jQuery.support.boxSizingReliable || val === elem.style[ name ] ); - - // Normalize "", auto, and prepare for extra - val = parseFloat( val ) || 0; - } - - // use the active box-sizing model to add/subtract irrelevant styles - return ( val + - augmentWidthOrHeight( - elem, - name, - extra || ( isBorderBox ? "border" : "content" ), - valueIsBorderBox, - styles - ) - ) + "px"; - } - -// Try to determine the default display value of an element - function css_defaultDisplay( nodeName ) { - var doc = document, - display = elemdisplay[ nodeName ]; - - if ( !display ) { - display = actualDisplay( nodeName, doc ); - - // If the simple way fails, read from inside an iframe - if ( display === "none" || !display ) { - // Use the already-created iframe if possible - iframe = ( iframe || - jQuery("<iframe frameborder='0' width='0' height='0'/>") - .css( "cssText", "display:block !important" ) - ).appendTo( doc.documentElement ); - - // Always write a new HTML skeleton so Webkit and Firefox don't choke on reuse - doc = ( iframe[0].contentWindow || iframe[0].contentDocument ).document; - doc.write("<!doctype html><html><body>"); - doc.close(); - - display = actualDisplay( nodeName, doc ); - iframe.detach(); - } - - // Store the correct default display - elemdisplay[ nodeName ] = display; - } - - return display; - } - -// Called ONLY from within css_defaultDisplay - function actualDisplay( name, doc ) { - var elem = jQuery( doc.createElement( name ) ).appendTo( doc.body ), - display = jQuery.css( elem[0], "display" ); - elem.remove(); - return display; - } - - jQuery.each([ "height", "width" ], function( i, name ) { - jQuery.cssHooks[ name ] = { - get: function( elem, computed, extra ) { - if ( computed ) { - // certain elements can have dimension info if we invisibly show them - // however, it must have a current display style that would benefit from this - return elem.offsetWidth === 0 && rdisplayswap.test( jQuery.css( elem, "display" ) ) ? - jQuery.swap( elem, cssShow, function() { - return getWidthOrHeight( elem, name, extra ); - }) : - getWidthOrHeight( elem, name, extra ); - } - }, - - set: function( elem, value, extra ) { - var styles = extra && getStyles( elem ); - return setPositiveNumber( elem, value, extra ? - augmentWidthOrHeight( - elem, - name, - extra, - jQuery.support.boxSizing && jQuery.css( elem, "boxSizing", false, styles ) === "border-box", - styles - ) : 0 - ); - } - }; - }); - - if ( !jQuery.support.opacity ) { - jQuery.cssHooks.opacity = { - get: function( elem, computed ) { - // IE uses filters for opacity - return ropacity.test( (computed && elem.currentStyle ? elem.currentStyle.filter : elem.style.filter) || "" ) ? - ( 0.01 * parseFloat( RegExp.$1 ) ) + "" : - computed ? "1" : ""; - }, - - set: function( elem, value ) { - var style = elem.style, - currentStyle = elem.currentStyle, - opacity = jQuery.isNumeric( value ) ? "alpha(opacity=" + value * 100 + ")" : "", - filter = currentStyle && currentStyle.filter || style.filter || ""; - - // IE has trouble with opacity if it does not have layout - // Force it by setting the zoom level - style.zoom = 1; - - // if setting opacity to 1, and no other filters exist - attempt to remove filter attribute #6652 - // if value === "", then remove inline opacity #12685 - if ( ( value >= 1 || value === "" ) && - jQuery.trim( filter.replace( ralpha, "" ) ) === "" && - style.removeAttribute ) { - - // Setting style.filter to null, "" & " " still leave "filter:" in the cssText - // if "filter:" is present at all, clearType is disabled, we want to avoid this - // style.removeAttribute is IE Only, but so apparently is this code path... - style.removeAttribute( "filter" ); - - // if there is no filter style applied in a css rule or unset inline opacity, we are done - if ( value === "" || currentStyle && !currentStyle.filter ) { - return; - } - } - - // otherwise, set new filter values - style.filter = ralpha.test( filter ) ? - filter.replace( ralpha, opacity ) : - filter + " " + opacity; - } - }; - } - -// These hooks cannot be added until DOM ready because the support test -// for it is not run until after DOM ready - jQuery(function() { - if ( !jQuery.support.reliableMarginRight ) { - jQuery.cssHooks.marginRight = { - get: function( elem, computed ) { - if ( computed ) { - // WebKit Bug 13343 - getComputedStyle returns wrong value for margin-right - // Work around by temporarily setting element display to inline-block - return jQuery.swap( elem, { "display": "inline-block" }, - curCSS, [ elem, "marginRight" ] ); - } - } - }; - } - - // Webkit bug: https://bugs.webkit.org/show_bug.cgi?id=29084 - // getComputedStyle returns percent when specified for top/left/bottom/right - // rather than make the css module depend on the offset module, we just check for it here - if ( !jQuery.support.pixelPosition && jQuery.fn.position ) { - jQuery.each( [ "top", "left" ], function( i, prop ) { - jQuery.cssHooks[ prop ] = { - get: function( elem, computed ) { - if ( computed ) { - computed = curCSS( elem, prop ); - // if curCSS returns percentage, fallback to offset - return rnumnonpx.test( computed ) ? - jQuery( elem ).position()[ prop ] + "px" : - computed; - } - } - }; - }); - } - - }); - - if ( jQuery.expr && jQuery.expr.filters ) { - jQuery.expr.filters.hidden = function( elem ) { - // Support: Opera <= 12.12 - // Opera reports offsetWidths and offsetHeights less than zero on some elements - return elem.offsetWidth <= 0 && elem.offsetHeight <= 0 || - (!jQuery.support.reliableHiddenOffsets && ((elem.style && elem.style.display) || jQuery.css( elem, "display" )) === "none"); - }; - - jQuery.expr.filters.visible = function( elem ) { - return !jQuery.expr.filters.hidden( elem ); - }; - } - -// These hooks are used by animate to expand properties - jQuery.each({ - margin: "", - padding: "", - border: "Width" - }, function( prefix, suffix ) { - jQuery.cssHooks[ prefix + suffix ] = { - expand: function( value ) { - var i = 0, - expanded = {}, - - // assumes a single number if not a string - parts = typeof value === "string" ? value.split(" ") : [ value ]; - - for ( ; i < 4; i++ ) { - expanded[ prefix + cssExpand[ i ] + suffix ] = - parts[ i ] || parts[ i - 2 ] || parts[ 0 ]; - } - - return expanded; - } - }; - - if ( !rmargin.test( prefix ) ) { - jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber; - } - }); - var r20 = /%20/g, - rbracket = /[]$/, - rCRLF = /\r?\n/g, - rsubmitterTypes = /^(?:submit|button|image|reset|file)$/i, - rsubmittable = /^(?:input|select|textarea|keygen)/i; - - jQuery.fn.extend({ - serialize: function() { - return jQuery.param( this.serializeArray() ); - }, - serializeArray: function() { - return this.map(function(){ - // Can add propHook for "elements" to filter or add form elements - var elements = jQuery.prop( this, "elements" ); - return elements ? jQuery.makeArray( elements ) : this; - }) - .filter(function(){ - var type = this.type; - // Use .is(":disabled") so that fieldset[disabled] works - return this.name && !jQuery( this ).is( ":disabled" ) && - rsubmittable.test( this.nodeName ) && !rsubmitterTypes.test( type ) && - ( this.checked || !manipulation_rcheckableType.test( type ) ); - }) - .map(function( i, elem ){ - var val = jQuery( this ).val(); - - return val == null ? - null : - jQuery.isArray( val ) ? - jQuery.map( val, function( val ){ - return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; - }) : - { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; - }).get(); - } - }); - -//Serialize an array of form elements or a set of -//key/values into a query string - jQuery.param = function( a, traditional ) { - var prefix, - s = [], - add = function( key, value ) { - // If value is a function, invoke it and return its value - value = jQuery.isFunction( value ) ? value() : ( value == null ? "" : value ); - s[ s.length ] = encodeURIComponent( key ) + "=" + encodeURIComponent( value ); - }; - - // Set traditional to true for jQuery <= 1.3.2 behavior. - if ( traditional === undefined ) { - traditional = jQuery.ajaxSettings && jQuery.ajaxSettings.traditional; - } - - // If an array was passed in, assume that it is an array of form elements. - if ( jQuery.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) { - // Serialize the form elements - jQuery.each( a, function() { - add( this.name, this.value ); - }); - - } else { - // If traditional, encode the "old" way (the way 1.3.2 or older - // did it), otherwise encode params recursively. - for ( prefix in a ) { - buildParams( prefix, a[ prefix ], traditional, add ); - } - } - - // Return the resulting serialization - return s.join( "&" ).replace( r20, "+" ); - }; - - function buildParams( prefix, obj, traditional, add ) { - var name; - - if ( jQuery.isArray( obj ) ) { - // Serialize array item. - jQuery.each( obj, function( i, v ) { - if ( traditional || rbracket.test( prefix ) ) { - // Treat each array item as a scalar. - add( prefix, v ); - - } else { - // Item is non-scalar (array or object), encode its numeric index. - buildParams( prefix + "[" + ( typeof v === "object" ? i : "" ) + "]", v, traditional, add ); - } - }); - - } else if ( !traditional && jQuery.type( obj ) === "object" ) { - // Serialize object item. - for ( name in obj ) { - buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add ); - } - - } else { - // Serialize scalar item. - add( prefix, obj ); - } - } - jQuery.each( ("blur focus focusin focusout load resize scroll unload click dblclick " + - "mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave " + - "change select submit keydown keypress keyup error contextmenu").split(" "), function( i, name ) { - - // Handle event binding - jQuery.fn[ name ] = function( data, fn ) { - return arguments.length > 0 ? - this.on( name, null, data, fn ) : - this.trigger( name ); - }; - }); - - jQuery.fn.hover = function( fnOver, fnOut ) { - return this.mouseenter( fnOver ).mouseleave( fnOut || fnOver ); - }; - var - // Document location - ajaxLocParts, - ajaxLocation, - ajax_nonce = jQuery.now(), - - ajax_rquery = /?/, - rhash = /#.*$/, - rts = /([?&])_=[^&]*/, - rheaders = /^(.*?):[ \t]*([^\r\n]*)\r?$/mg, // IE leaves an \r character at EOL - // #7653, #8125, #8152: local protocol detection - rlocalProtocol = /^(?:about|app|app-storage|.+-extension|file|res|widget):$/, - rnoContent = /^(?:GET|HEAD)$/, - rprotocol = /^///, - rurl = /^([\w.+-]+:)(?://([^/?#:]*)(?::(\d+)|)|)/, - - // Keep a copy of the old load method - _load = jQuery.fn.load, - - /* Prefilters - * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example) - * 2) These are called: - * - BEFORE asking for a transport - * - AFTER param serialization (s.data is a string if s.processData is true) - * 3) key is the dataType - * 4) the catchall symbol "*" can be used - * 5) execution will start with transport dataType and THEN continue down to "*" if needed - */ - prefilters = {}, - - /* Transports bindings - * 1) key is the dataType - * 2) the catchall symbol "*" can be used - * 3) selection will start with transport dataType and THEN go to "*" if needed - */ - transports = {}, - - // Avoid comment-prolog char sequence (#10098); must appease lint and evade compression - allTypes = "*/".concat("*"); - -// #8138, IE may throw an exception when accessing -// a field from window.location if document.domain has been set - try { - ajaxLocation = location.href; - } catch( e ) { - // Use the href attribute of an A element - // since IE will modify it given document.location - ajaxLocation = document.createElement( "a" ); - ajaxLocation.href = ""; - ajaxLocation = ajaxLocation.href; - } - -// Segment location into parts - ajaxLocParts = rurl.exec( ajaxLocation.toLowerCase() ) || []; - -// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport - function addToPrefiltersOrTransports( structure ) { - - // dataTypeExpression is optional and defaults to "*" - return function( dataTypeExpression, func ) { - - if ( typeof dataTypeExpression !== "string" ) { - func = dataTypeExpression; - dataTypeExpression = "*"; - } - - var dataType, - i = 0, - dataTypes = dataTypeExpression.toLowerCase().match( core_rnotwhite ) || []; - - if ( jQuery.isFunction( func ) ) { - // For each dataType in the dataTypeExpression - while ( (dataType = dataTypes[i++]) ) { - // Prepend if requested - if ( dataType[0] === "+" ) { - dataType = dataType.slice( 1 ) || "*"; - (structure[ dataType ] = structure[ dataType ] || []).unshift( func ); - - // Otherwise append - } else { - (structure[ dataType ] = structure[ dataType ] || []).push( func ); - } - } - } - }; - } - -// Base inspection function for prefilters and transports - function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR ) { - - var inspected = {}, - seekingTransport = ( structure === transports ); - - function inspect( dataType ) { - var selected; - inspected[ dataType ] = true; - jQuery.each( structure[ dataType ] || [], function( _, prefilterOrFactory ) { - var dataTypeOrTransport = prefilterOrFactory( options, originalOptions, jqXHR ); - if( typeof dataTypeOrTransport === "string" && !seekingTransport && !inspected[ dataTypeOrTransport ] ) { - options.dataTypes.unshift( dataTypeOrTransport ); - inspect( dataTypeOrTransport ); - return false; - } else if ( seekingTransport ) { - return !( selected = dataTypeOrTransport ); - } - }); - return selected; - } - - return inspect( options.dataTypes[ 0 ] ) || !inspected[ "*" ] && inspect( "*" ); - } - -// A special extend for ajax options -// that takes "flat" options (not to be deep extended) -// Fixes #9887 - function ajaxExtend( target, src ) { - var deep, key, - flatOptions = jQuery.ajaxSettings.flatOptions || {}; - - for ( key in src ) { - if ( src[ key ] !== undefined ) { - ( flatOptions[ key ] ? target : ( deep || (deep = {}) ) )[ key ] = src[ key ]; - } - } - if ( deep ) { - jQuery.extend( true, target, deep ); - } - - return target; - } - - jQuery.fn.load = function( url, params, callback ) { - if ( typeof url !== "string" && _load ) { - return _load.apply( this, arguments ); - } - - var selector, response, type, - self = this, - off = url.indexOf(" "); - - if ( off >= 0 ) { - selector = url.slice( off, url.length ); - url = url.slice( 0, off ); - } - - // If it's a function - if ( jQuery.isFunction( params ) ) { - - // We assume that it's the callback - callback = params; - params = undefined; - - // Otherwise, build a param string - } else if ( params && typeof params === "object" ) { - type = "POST"; - } - - // If we have elements to modify, make the request - if ( self.length > 0 ) { - jQuery.ajax({ - url: url, - - // if "type" variable is undefined, then "GET" method will be used - type: type, - dataType: "html", - data: params - }).done(function( responseText ) { - - // Save response for use in complete callback - response = arguments; - - self.html( selector ? - - // If a selector was specified, locate the right elements in a dummy div - // Exclude scripts to avoid IE 'Permission Denied' errors - jQuery("<div>").append( jQuery.parseHTML( responseText ) ).find( selector ) : - - // Otherwise use the full result - responseText ); - - }).complete( callback && function( jqXHR, status ) { - self.each( callback, response || [ jqXHR.responseText, status, jqXHR ] ); - }); - } - - return this; - }; - -// Attach a bunch of functions for handling common AJAX events - jQuery.each( [ "ajaxStart", "ajaxStop", "ajaxComplete", "ajaxError", "ajaxSuccess", "ajaxSend" ], function( i, type ){ - jQuery.fn[ type ] = function( fn ){ - return this.on( type, fn ); - }; - }); - - jQuery.each( [ "get", "post" ], function( i, method ) { - jQuery[ method ] = function( url, data, callback, type ) { - // shift arguments if data argument was omitted - if ( jQuery.isFunction( data ) ) { - type = type || callback; - callback = data; - data = undefined; - } - - return jQuery.ajax({ - url: url, - type: method, - dataType: type, - data: data, - success: callback - }); - }; - }); - - jQuery.extend({ - - // Counter for holding the number of active queries - active: 0, - - // Last-Modified header cache for next request - lastModified: {}, - etag: {}, - - ajaxSettings: { - url: ajaxLocation, - type: "GET", - isLocal: rlocalProtocol.test( ajaxLocParts[ 1 ] ), - global: true, - processData: true, - async: true, - contentType: "application/x-www-form-urlencoded; charset=UTF-8", - /* - timeout: 0, - data: null, - dataType: null, - username: null, - password: null, - cache: null, - throws: false, - traditional: false, - headers: {}, - */ - - accepts: { - "*": allTypes, - text: "text/plain", - html: "text/html", - xml: "application/xml, text/xml", - json: "application/json, text/javascript" - }, - - contents: { - xml: /xml/, - html: /html/, - json: /json/ - }, - - responseFields: { - xml: "responseXML", - text: "responseText" - }, - - // Data converters - // Keys separate source (or catchall "*") and destination types with a single space - converters: { - - // Convert anything to text - "* text": window.String, - - // Text to html (true = no transformation) - "text html": true, - - // Evaluate text as a json expression - "text json": jQuery.parseJSON, - - // Parse text as xml - "text xml": jQuery.parseXML - }, - - // For options that shouldn't be deep extended: - // you can add your own custom options here if - // and when you create one that shouldn't be - // deep extended (see ajaxExtend) - flatOptions: { - url: true, - context: true - } - }, - - // Creates a full fledged settings object into target - // with both ajaxSettings and settings fields. - // If target is omitted, writes into ajaxSettings. - ajaxSetup: function( target, settings ) { - return settings ? - - // Building a settings object - ajaxExtend( ajaxExtend( target, jQuery.ajaxSettings ), settings ) : - - // Extending ajaxSettings - ajaxExtend( jQuery.ajaxSettings, target ); - }, - - ajaxPrefilter: addToPrefiltersOrTransports( prefilters ), - ajaxTransport: addToPrefiltersOrTransports( transports ), - - // Main method - ajax: function( url, options ) { - - // If url is an object, simulate pre-1.5 signature - if ( typeof url === "object" ) { - options = url; - url = undefined; - } - - // Force options to be an object - options = options || {}; - - var // Cross-domain detection vars - parts, - // Loop variable - i, - // URL without anti-cache param - cacheURL, - // Response headers as string - responseHeadersString, - // timeout handle - timeoutTimer, - - // To know if global events are to be dispatched - fireGlobals, - - transport, - // Response headers - responseHeaders, - // Create the final options object - s = jQuery.ajaxSetup( {}, options ), - // Callbacks context - callbackContext = s.context || s, - // Context for global events is callbackContext if it is a DOM node or jQuery collection - globalEventContext = s.context && ( callbackContext.nodeType || callbackContext.jquery ) ? - jQuery( callbackContext ) : - jQuery.event, - // Deferreds - deferred = jQuery.Deferred(), - completeDeferred = jQuery.Callbacks("once memory"), - // Status-dependent callbacks - statusCode = s.statusCode || {}, - // Headers (they are sent all at once) - requestHeaders = {}, - requestHeadersNames = {}, - // The jqXHR state - state = 0, - // Default abort message - strAbort = "canceled", - // Fake xhr - jqXHR = { - readyState: 0, - - // Builds headers hashtable if needed - getResponseHeader: function( key ) { - var match; - if ( state === 2 ) { - if ( !responseHeaders ) { - responseHeaders = {}; - while ( (match = rheaders.exec( responseHeadersString )) ) { - responseHeaders[ match[1].toLowerCase() ] = match[ 2 ]; - } - } - match = responseHeaders[ key.toLowerCase() ]; - } - return match == null ? null : match; - }, - - // Raw string - getAllResponseHeaders: function() { - return state === 2 ? responseHeadersString : null; - }, - - // Caches the header - setRequestHeader: function( name, value ) { - var lname = name.toLowerCase(); - if ( !state ) { - name = requestHeadersNames[ lname ] = requestHeadersNames[ lname ] || name; - requestHeaders[ name ] = value; - } - return this; - }, - - // Overrides response content-type header - overrideMimeType: function( type ) { - if ( !state ) { - s.mimeType = type; - } - return this; - }, - - // Status-dependent callbacks - statusCode: function( map ) { - var code; - if ( map ) { - if ( state < 2 ) { - for ( code in map ) { - // Lazy-add the new callback in a way that preserves old ones - statusCode[ code ] = [ statusCode[ code ], map[ code ] ]; - } - } else { - // Execute the appropriate callbacks - jqXHR.always( map[ jqXHR.status ] ); - } - } - return this; - }, - - // Cancel the request - abort: function( statusText ) { - var finalText = statusText || strAbort; - if ( transport ) { - transport.abort( finalText ); - } - done( 0, finalText ); - return this; - } - }; - - // Attach deferreds - deferred.promise( jqXHR ).complete = completeDeferred.add; - jqXHR.success = jqXHR.done; - jqXHR.error = jqXHR.fail; - - // Remove hash character (#7531: and string promotion) - // Add protocol if not provided (#5866: IE7 issue with protocol-less urls) - // Handle falsy url in the settings object (#10093: consistency with old signature) - // We also use the url parameter if available - s.url = ( ( url || s.url || ajaxLocation ) + "" ).replace( rhash, "" ).replace( rprotocol, ajaxLocParts[ 1 ] + "//" ); - - // Alias method option to type as per ticket #12004 - s.type = options.method || options.type || s.method || s.type; - - // Extract dataTypes list - s.dataTypes = jQuery.trim( s.dataType || "*" ).toLowerCase().match( core_rnotwhite ) || [""]; - - // A cross-domain request is in order when we have a protocol:host:port mismatch - if ( s.crossDomain == null ) { - parts = rurl.exec( s.url.toLowerCase() ); - s.crossDomain = !!( parts && - ( parts[ 1 ] !== ajaxLocParts[ 1 ] || parts[ 2 ] !== ajaxLocParts[ 2 ] || - ( parts[ 3 ] || ( parts[ 1 ] === "http:" ? 80 : 443 ) ) != - ( ajaxLocParts[ 3 ] || ( ajaxLocParts[ 1 ] === "http:" ? 80 : 443 ) ) ) - ); - } - - // Convert data if not already a string - if ( s.data && s.processData && typeof s.data !== "string" ) { - s.data = jQuery.param( s.data, s.traditional ); - } - - // Apply prefilters - inspectPrefiltersOrTransports( prefilters, s, options, jqXHR ); - - // If request was aborted inside a prefilter, stop there - if ( state === 2 ) { - return jqXHR; - } - - // We can fire global events as of now if asked to - fireGlobals = s.global; - - // Watch for a new set of requests - if ( fireGlobals && jQuery.active++ === 0 ) { - jQuery.event.trigger("ajaxStart"); - } - - // Uppercase the type - s.type = s.type.toUpperCase(); - - // Determine if request has content - s.hasContent = !rnoContent.test( s.type ); - - // Save the URL in case we're toying with the If-Modified-Since - // and/or If-None-Match header later on - cacheURL = s.url; - - // More options handling for requests with no content - if ( !s.hasContent ) { - - // If data is available, append data to url - if ( s.data ) { - cacheURL = ( s.url += ( ajax_rquery.test( cacheURL ) ? "&" : "?" ) + s.data ); - // #9682: remove data so that it's not used in an eventual retry - delete s.data; - } - - // Add anti-cache in url if needed - if ( s.cache === false ) { - s.url = rts.test( cacheURL ) ? - - // If there is already a '_' parameter, set its value - cacheURL.replace( rts, "$1_=" + ajax_nonce++ ) : - - // Otherwise add one to the end - cacheURL + ( ajax_rquery.test( cacheURL ) ? "&" : "?" ) + "_=" + ajax_nonce++; - } - } - - // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. - if ( s.ifModified ) { - if ( jQuery.lastModified[ cacheURL ] ) { - jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ cacheURL ] ); - } - if ( jQuery.etag[ cacheURL ] ) { - jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ cacheURL ] ); - } - } - - // Set the correct header, if data is being sent - if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) { - jqXHR.setRequestHeader( "Content-Type", s.contentType ); - } - - // Set the Accepts header for the server, depending on the dataType - jqXHR.setRequestHeader( - "Accept", - s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[0] ] ? - s.accepts[ s.dataTypes[0] ] + ( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) : - s.accepts[ "*" ] - ); - - // Check for headers option - for ( i in s.headers ) { - jqXHR.setRequestHeader( i, s.headers[ i ] ); - } - - // Allow custom headers/mimetypes and early abort - if ( s.beforeSend && ( s.beforeSend.call( callbackContext, jqXHR, s ) === false || state === 2 ) ) { - // Abort if not done already and return - return jqXHR.abort(); - } - - // aborting is no longer a cancellation - strAbort = "abort"; - - // Install callbacks on deferreds - for ( i in { success: 1, error: 1, complete: 1 } ) { - jqXHR[ i ]( s[ i ] ); - } - - // Get transport - transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR ); - - // If no transport, we auto-abort - if ( !transport ) { - done( -1, "No Transport" ); - } else { - jqXHR.readyState = 1; - - // Send global event - if ( fireGlobals ) { - globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] ); - } - // Timeout - if ( s.async && s.timeout > 0 ) { - timeoutTimer = setTimeout(function() { - jqXHR.abort("timeout"); - }, s.timeout ); - } - - try { - state = 1; - transport.send( requestHeaders, done ); - } catch ( e ) { - // Propagate exception as error if not done - if ( state < 2 ) { - done( -1, e ); - // Simply rethrow otherwise - } else { - throw e; - } - } - } - - // Callback for when everything is done - function done( status, nativeStatusText, responses, headers ) { - var isSuccess, success, error, response, modified, - statusText = nativeStatusText; - - // Called once - if ( state === 2 ) { - return; - } - - // State is "done" now - state = 2; - - // Clear timeout if it exists - if ( timeoutTimer ) { - clearTimeout( timeoutTimer ); - } - - // Dereference transport for early garbage collection - // (no matter how long the jqXHR object will be used) - transport = undefined; - - // Cache response headers - responseHeadersString = headers || ""; - - // Set readyState - jqXHR.readyState = status > 0 ? 4 : 0; - - // Get response data - if ( responses ) { - response = ajaxHandleResponses( s, jqXHR, responses ); - } - - // If successful, handle type chaining - if ( status >= 200 && status < 300 || status === 304 ) { - - // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. - if ( s.ifModified ) { - modified = jqXHR.getResponseHeader("Last-Modified"); - if ( modified ) { - jQuery.lastModified[ cacheURL ] = modified; - } - modified = jqXHR.getResponseHeader("etag"); - if ( modified ) { - jQuery.etag[ cacheURL ] = modified; - } - } - - // if no content - if ( status === 204 ) { - isSuccess = true; - statusText = "nocontent"; - - // if not modified - } else if ( status === 304 ) { - isSuccess = true; - statusText = "notmodified"; - - // If we have data, let's convert it - } else { - isSuccess = ajaxConvert( s, response ); - statusText = isSuccess.state; - success = isSuccess.data; - error = isSuccess.error; - isSuccess = !error; - } - } else { - // We extract error from statusText - // then normalize statusText and status for non-aborts - error = statusText; - if ( status || !statusText ) { - statusText = "error"; - if ( status < 0 ) { - status = 0; - } - } - } - - // Set data for the fake xhr object - jqXHR.status = status; - jqXHR.statusText = ( nativeStatusText || statusText ) + ""; - - // Success/Error - if ( isSuccess ) { - deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] ); - } else { - deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] ); - } - - // Status-dependent callbacks - jqXHR.statusCode( statusCode ); - statusCode = undefined; - - if ( fireGlobals ) { - globalEventContext.trigger( isSuccess ? "ajaxSuccess" : "ajaxError", - [ jqXHR, s, isSuccess ? success : error ] ); - } - - // Complete - completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] ); - - if ( fireGlobals ) { - globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] ); - // Handle the global AJAX counter - if ( !( --jQuery.active ) ) { - jQuery.event.trigger("ajaxStop"); - } - } - } - - return jqXHR; - }, - - getScript: function( url, callback ) { - return jQuery.get( url, undefined, callback, "script" ); - }, - - getJSON: function( url, data, callback ) { - return jQuery.get( url, data, callback, "json" ); - } - }); - - /* Handles responses to an ajax request: - * - sets all responseXXX fields accordingly - * - finds the right dataType (mediates between content-type and expected dataType) - * - returns the corresponding response - */ - function ajaxHandleResponses( s, jqXHR, responses ) { - var firstDataType, ct, finalDataType, type, - contents = s.contents, - dataTypes = s.dataTypes, - responseFields = s.responseFields; - - // Fill responseXXX fields - for ( type in responseFields ) { - if ( type in responses ) { - jqXHR[ responseFields[type] ] = responses[ type ]; - } - } - - // Remove auto dataType and get content-type in the process - while( dataTypes[ 0 ] === "*" ) { - dataTypes.shift(); - if ( ct === undefined ) { - ct = s.mimeType || jqXHR.getResponseHeader("Content-Type"); - } - } - - // Check if we're dealing with a known content-type - if ( ct ) { - for ( type in contents ) { - if ( contents[ type ] && contents[ type ].test( ct ) ) { - dataTypes.unshift( type ); - break; - } - } - } - - // Check to see if we have a response for the expected dataType - if ( dataTypes[ 0 ] in responses ) { - finalDataType = dataTypes[ 0 ]; - } else { - // Try convertible dataTypes - for ( type in responses ) { - if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[0] ] ) { - finalDataType = type; - break; - } - if ( !firstDataType ) { - firstDataType = type; - } - } - // Or just use first one - finalDataType = finalDataType || firstDataType; - } - - // If we found a dataType - // We add the dataType to the list if needed - // and return the corresponding response - if ( finalDataType ) { - if ( finalDataType !== dataTypes[ 0 ] ) { - dataTypes.unshift( finalDataType ); - } - return responses[ finalDataType ]; - } - } - -// Chain conversions given the request and the original response - function ajaxConvert( s, response ) { - var conv2, current, conv, tmp, - converters = {}, - i = 0, - // Work with a copy of dataTypes in case we need to modify it for conversion - dataTypes = s.dataTypes.slice(), - prev = dataTypes[ 0 ]; - - // Apply the dataFilter if provided - if ( s.dataFilter ) { - response = s.dataFilter( response, s.dataType ); - } - - // Create converters map with lowercased keys - if ( dataTypes[ 1 ] ) { - for ( conv in s.converters ) { - converters[ conv.toLowerCase() ] = s.converters[ conv ]; - } - } - - // Convert to each sequential dataType, tolerating list modification - for ( ; (current = dataTypes[++i]); ) { - - // There's only work to do if current dataType is non-auto - if ( current !== "*" ) { - - // Convert response if prev dataType is non-auto and differs from current - if ( prev !== "*" && prev !== current ) { - - // Seek a direct converter - conv = converters[ prev + " " + current ] || converters[ "* " + current ]; - - // If none found, seek a pair - if ( !conv ) { - for ( conv2 in converters ) { - - // If conv2 outputs current - tmp = conv2.split(" "); - if ( tmp[ 1 ] === current ) { - - // If prev can be converted to accepted input - conv = converters[ prev + " " + tmp[ 0 ] ] || - converters[ "* " + tmp[ 0 ] ]; - if ( conv ) { - // Condense equivalence converters - if ( conv === true ) { - conv = converters[ conv2 ]; - - // Otherwise, insert the intermediate dataType - } else if ( converters[ conv2 ] !== true ) { - current = tmp[ 0 ]; - dataTypes.splice( i--, 0, current ); - } - - break; - } - } - } - } - - // Apply converter (if not an equivalence) - if ( conv !== true ) { - - // Unless errors are allowed to bubble, catch and return them - if ( conv && s["throws"] ) { - response = conv( response ); - } else { - try { - response = conv( response ); - } catch ( e ) { - return { state: "parsererror", error: conv ? e : "No conversion from " + prev + " to " + current }; - } - } - } - } - - // Update prev for next iteration - prev = current; - } - } - - return { state: "success", data: response }; - } -// Install script dataType - jQuery.ajaxSetup({ - accepts: { - script: "text/javascript, application/javascript, application/ecmascript, application/x-ecmascript" - }, - contents: { - script: /(?:java|ecma)script/ - }, - converters: { - "text script": function( text ) { - jQuery.globalEval( text ); - return text; - } - } - }); - -// Handle cache's special case and global - jQuery.ajaxPrefilter( "script", function( s ) { - if ( s.cache === undefined ) { - s.cache = false; - } - if ( s.crossDomain ) { - s.type = "GET"; - s.global = false; - } - }); - -// Bind script tag hack transport - jQuery.ajaxTransport( "script", function(s) { - - // This transport only deals with cross domain requests - if ( s.crossDomain ) { - - var script, - head = document.head || jQuery("head")[0] || document.documentElement; - - return { - - send: function( _, callback ) { - - script = document.createElement("script"); - - script.async = true; - - if ( s.scriptCharset ) { - script.charset = s.scriptCharset; - } - - script.src = s.url; - - // Attach handlers for all browsers - script.onload = script.onreadystatechange = function( _, isAbort ) { - - if ( isAbort || !script.readyState || /loaded|complete/.test( script.readyState ) ) { - - // Handle memory leak in IE - script.onload = script.onreadystatechange = null; - - // Remove the script - if ( script.parentNode ) { - script.parentNode.removeChild( script ); - } - - // Dereference the script - script = null; - - // Callback if not abort - if ( !isAbort ) { - callback( 200, "success" ); - } - } - }; - - // Circumvent IE6 bugs with base elements (#2709 and #4378) by prepending - // Use native DOM manipulation to avoid our domManip AJAX trickery - head.insertBefore( script, head.firstChild ); - }, - - abort: function() { - if ( script ) { - script.onload( undefined, true ); - } - } - }; - } - }); - var oldCallbacks = [], - rjsonp = /(=)?(?=&|$)|??/; - -// Default jsonp settings - jQuery.ajaxSetup({ - jsonp: "callback", - jsonpCallback: function() { - var callback = oldCallbacks.pop() || ( jQuery.expando + "_" + ( ajax_nonce++ ) ); - this[ callback ] = true; - return callback; - } - }); - -// Detect, normalize options and install callbacks for jsonp requests - jQuery.ajaxPrefilter( "json jsonp", function( s, originalSettings, jqXHR ) { - - var callbackName, overwritten, responseContainer, - jsonProp = s.jsonp !== false && ( rjsonp.test( s.url ) ? - "url" : - typeof s.data === "string" && !( s.contentType || "" ).indexOf("application/x-www-form-urlencoded") && rjsonp.test( s.data ) && "data" - ); - - // Handle iff the expected data type is "jsonp" or we have a parameter to set - if ( jsonProp || s.dataTypes[ 0 ] === "jsonp" ) { - - // Get callback name, remembering preexisting value associated with it - callbackName = s.jsonpCallback = jQuery.isFunction( s.jsonpCallback ) ? - s.jsonpCallback() : - s.jsonpCallback; - - // Insert callback into url or form data - if ( jsonProp ) { - s[ jsonProp ] = s[ jsonProp ].replace( rjsonp, "$1" + callbackName ); - } else if ( s.jsonp !== false ) { - s.url += ( ajax_rquery.test( s.url ) ? "&" : "?" ) + s.jsonp + "=" + callbackName; - } - - // Use data converter to retrieve json after script execution - s.converters["script json"] = function() { - if ( !responseContainer ) { - jQuery.error( callbackName + " was not called" ); - } - return responseContainer[ 0 ]; - }; - - // force json dataType - s.dataTypes[ 0 ] = "json"; - - // Install callback - overwritten = window[ callbackName ]; - window[ callbackName ] = function() { - responseContainer = arguments; - }; - - // Clean-up function (fires after converters) - jqXHR.always(function() { - // Restore preexisting value - window[ callbackName ] = overwritten; - - // Save back as free - if ( s[ callbackName ] ) { - // make sure that re-using the options doesn't screw things around - s.jsonpCallback = originalSettings.jsonpCallback; - - // save the callback name for future use - oldCallbacks.push( callbackName ); - } - - // Call if it was a function and we have a response - if ( responseContainer && jQuery.isFunction( overwritten ) ) { - overwritten( responseContainer[ 0 ] ); - } - - responseContainer = overwritten = undefined; - }); - - // Delegate to script - return "script"; - } - }); - var xhrCallbacks, xhrSupported, - xhrId = 0, - // #5280: Internet Explorer will keep connections alive if we don't abort on unload - xhrOnUnloadAbort = window.ActiveXObject && function() { - // Abort all pending requests - var key; - for ( key in xhrCallbacks ) { - xhrCallbacks[ key ]( undefined, true ); - } - }; - -// Functions to create xhrs - function createStandardXHR() { - try { - return new window.XMLHttpRequest(); - } catch( e ) {} - } - - function createActiveXHR() { - try { - return new window.ActiveXObject("Microsoft.XMLHTTP"); - } catch( e ) {} - } - -// Create the request object -// (This is still attached to ajaxSettings for backward compatibility) - jQuery.ajaxSettings.xhr = window.ActiveXObject ? - /* Microsoft failed to properly - * implement the XMLHttpRequest in IE7 (can't request local files), - * so we use the ActiveXObject when it is available - * Additionally XMLHttpRequest can be disabled in IE7/IE8 so - * we need a fallback. - */ - function() { - return !this.isLocal && createStandardXHR() || createActiveXHR(); - } : - // For all other browsers, use the standard XMLHttpRequest object - createStandardXHR; - -// Determine support properties - xhrSupported = jQuery.ajaxSettings.xhr(); - jQuery.support.cors = !!xhrSupported && ( "withCredentials" in xhrSupported ); - xhrSupported = jQuery.support.ajax = !!xhrSupported; - -// Create transport if the browser can provide an xhr - if ( xhrSupported ) { - - jQuery.ajaxTransport(function( s ) { - // Cross domain only allowed if supported through XMLHttpRequest - if ( !s.crossDomain || jQuery.support.cors ) { - - var callback; - - return { - send: function( headers, complete ) { - - // Get a new xhr - var handle, i, - xhr = s.xhr(); - - // Open the socket - // Passing null username, generates a login popup on Opera (#2865) - if ( s.username ) { - xhr.open( s.type, s.url, s.async, s.username, s.password ); - } else { - xhr.open( s.type, s.url, s.async ); - } - - // Apply custom fields if provided - if ( s.xhrFields ) { - for ( i in s.xhrFields ) { - xhr[ i ] = s.xhrFields[ i ]; - } - } - - // Override mime type if needed - if ( s.mimeType && xhr.overrideMimeType ) { - xhr.overrideMimeType( s.mimeType ); - } - - // X-Requested-With header - // For cross-domain requests, seeing as conditions for a preflight are - // akin to a jigsaw puzzle, we simply never set it to be sure. - // (it can always be set on a per-request basis or even using ajaxSetup) - // For same-domain requests, won't change header if already provided. - if ( !s.crossDomain && !headers["X-Requested-With"] ) { - headers["X-Requested-With"] = "XMLHttpRequest"; - } - - // Need an extra try/catch for cross domain requests in Firefox 3 - try { - for ( i in headers ) { - xhr.setRequestHeader( i, headers[ i ] ); - } - } catch( err ) {} - - // Do send the request - // This may raise an exception which is actually - // handled in jQuery.ajax (so no try/catch here) - xhr.send( ( s.hasContent && s.data ) || null ); - - // Listener - callback = function( _, isAbort ) { - var status, responseHeaders, statusText, responses; - - // Firefox throws exceptions when accessing properties - // of an xhr when a network error occurred - // http://helpful.knobs-dials.com/index.php/Component_returned_failure_code:_0x...) - try { - - // Was never called and is aborted or complete - if ( callback && ( isAbort || xhr.readyState === 4 ) ) { - - // Only called once - callback = undefined; - - // Do not keep as active anymore - if ( handle ) { - xhr.onreadystatechange = jQuery.noop; - if ( xhrOnUnloadAbort ) { - delete xhrCallbacks[ handle ]; - } - } - - // If it's an abort - if ( isAbort ) { - // Abort it manually if needed - if ( xhr.readyState !== 4 ) { - xhr.abort(); - } - } else { - responses = {}; - status = xhr.status; - responseHeaders = xhr.getAllResponseHeaders(); - - // When requesting binary data, IE6-9 will throw an exception - // on any attempt to access responseText (#11426) - if ( typeof xhr.responseText === "string" ) { - responses.text = xhr.responseText; - } - - // Firefox throws an exception when accessing - // statusText for faulty cross-domain requests - try { - statusText = xhr.statusText; - } catch( e ) { - // We normalize with Webkit giving an empty statusText - statusText = ""; - } - - // Filter status for non standard behaviors - - // If the request is local and we have data: assume a success - // (success with no data won't get notified, that's the best we - // can do given current implementations) - if ( !status && s.isLocal && !s.crossDomain ) { - status = responses.text ? 200 : 404; - // IE - #1450: sometimes returns 1223 when it should be 204 - } else if ( status === 1223 ) { - status = 204; - } - } - } - } catch( firefoxAccessException ) { - if ( !isAbort ) { - complete( -1, firefoxAccessException ); - } - } - - // Call complete if needed - if ( responses ) { - complete( status, statusText, responses, responseHeaders ); - } - }; - - if ( !s.async ) { - // if we're in sync mode we fire the callback - callback(); - } else if ( xhr.readyState === 4 ) { - // (IE6 & IE7) if it's in cache and has been - // retrieved directly we need to fire the callback - setTimeout( callback ); - } else { - handle = ++xhrId; - if ( xhrOnUnloadAbort ) { - // Create the active xhrs callbacks list if needed - // and attach the unload handler - if ( !xhrCallbacks ) { - xhrCallbacks = {}; - jQuery( window ).unload( xhrOnUnloadAbort ); - } - // Add to list of active xhrs callbacks - xhrCallbacks[ handle ] = callback; - } - xhr.onreadystatechange = callback; - } - }, - - abort: function() { - if ( callback ) { - callback( undefined, true ); - } - } - }; - } - }); - } - var fxNow, timerId, - rfxtypes = /^(?:toggle|show|hide)$/, - rfxnum = new RegExp( "^(?:([+-])=|)(" + core_pnum + ")([a-z%]*)$", "i" ), - rrun = /queueHooks$/, - animationPrefilters = [ defaultPrefilter ], - tweeners = { - "*": [function( prop, value ) { - var end, unit, - tween = this.createTween( prop, value ), - parts = rfxnum.exec( value ), - target = tween.cur(), - start = +target || 0, - scale = 1, - maxIterations = 20; - - if ( parts ) { - end = +parts[2]; - unit = parts[3] || ( jQuery.cssNumber[ prop ] ? "" : "px" ); - - // We need to compute starting value - if ( unit !== "px" && start ) { - // Iteratively approximate from a nonzero starting point - // Prefer the current property, because this process will be trivial if it uses the same units - // Fallback to end or a simple constant - start = jQuery.css( tween.elem, prop, true ) || end || 1; - - do { - // If previous iteration zeroed out, double until we get *something* - // Use a string for doubling factor so we don't accidentally see scale as unchanged below - scale = scale || ".5"; - - // Adjust and apply - start = start / scale; - jQuery.style( tween.elem, prop, start + unit ); - - // Update scale, tolerating zero or NaN from tween.cur() - // And breaking the loop if scale is unchanged or perfect, or if we've just had enough - } while ( scale !== (scale = tween.cur() / target) && scale !== 1 && --maxIterations ); - } - - tween.unit = unit; - tween.start = start; - // If a +=/-= token was provided, we're doing a relative animation - tween.end = parts[1] ? start + ( parts[1] + 1 ) * end : end; - } - return tween; - }] - }; - -// Animations created synchronously will run synchronously - function createFxNow() { - setTimeout(function() { - fxNow = undefined; - }); - return ( fxNow = jQuery.now() ); - } - - function createTweens( animation, props ) { - jQuery.each( props, function( prop, value ) { - var collection = ( tweeners[ prop ] || [] ).concat( tweeners[ "*" ] ), - index = 0, - length = collection.length; - for ( ; index < length; index++ ) { - if ( collection[ index ].call( animation, prop, value ) ) { - - // we're done with this property - return; - } - } - }); - } - - function Animation( elem, properties, options ) { - var result, - stopped, - index = 0, - length = animationPrefilters.length, - deferred = jQuery.Deferred().always( function() { - // don't match elem in the :animated selector - delete tick.elem; - }), - tick = function() { - if ( stopped ) { - return false; - } - var currentTime = fxNow || createFxNow(), - remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ), - // archaic crash bug won't allow us to use 1 - ( 0.5 || 0 ) (#12497) - temp = remaining / animation.duration || 0, - percent = 1 - temp, - index = 0, - length = animation.tweens.length; - - for ( ; index < length ; index++ ) { - animation.tweens[ index ].run( percent ); - } - - deferred.notifyWith( elem, [ animation, percent, remaining ]); - - if ( percent < 1 && length ) { - return remaining; - } else { - deferred.resolveWith( elem, [ animation ] ); - return false; - } - }, - animation = deferred.promise({ - elem: elem, - props: jQuery.extend( {}, properties ), - opts: jQuery.extend( true, { specialEasing: {} }, options ), - originalProperties: properties, - originalOptions: options, - startTime: fxNow || createFxNow(), - duration: options.duration, - tweens: [], - createTween: function( prop, end ) { - var tween = jQuery.Tween( elem, animation.opts, prop, end, - animation.opts.specialEasing[ prop ] || animation.opts.easing ); - animation.tweens.push( tween ); - return tween; - }, - stop: function( gotoEnd ) { - var index = 0, - // if we are going to the end, we want to run all the tweens - // otherwise we skip this part - length = gotoEnd ? animation.tweens.length : 0; - if ( stopped ) { - return this; - } - stopped = true; - for ( ; index < length ; index++ ) { - animation.tweens[ index ].run( 1 ); - } - - // resolve when we played the last frame - // otherwise, reject - if ( gotoEnd ) { - deferred.resolveWith( elem, [ animation, gotoEnd ] ); - } else { - deferred.rejectWith( elem, [ animation, gotoEnd ] ); - } - return this; - } - }), - props = animation.props; - - propFilter( props, animation.opts.specialEasing ); - - for ( ; index < length ; index++ ) { - result = animationPrefilters[ index ].call( animation, elem, props, animation.opts ); - if ( result ) { - return result; - } - } - - createTweens( animation, props ); - - if ( jQuery.isFunction( animation.opts.start ) ) { - animation.opts.start.call( elem, animation ); - } - - jQuery.fx.timer( - jQuery.extend( tick, { - elem: elem, - anim: animation, - queue: animation.opts.queue - }) - ); - - // attach callbacks from options - return animation.progress( animation.opts.progress ) - .done( animation.opts.done, animation.opts.complete ) - .fail( animation.opts.fail ) - .always( animation.opts.always ); - } - - function propFilter( props, specialEasing ) { - var value, name, index, easing, hooks; - - // camelCase, specialEasing and expand cssHook pass - for ( index in props ) { - name = jQuery.camelCase( index ); - easing = specialEasing[ name ]; - value = props[ index ]; - if ( jQuery.isArray( value ) ) { - easing = value[ 1 ]; - value = props[ index ] = value[ 0 ]; - } - - if ( index !== name ) { - props[ name ] = value; - delete props[ index ]; - } - - hooks = jQuery.cssHooks[ name ]; - if ( hooks && "expand" in hooks ) { - value = hooks.expand( value ); - delete props[ name ]; - - // not quite $.extend, this wont overwrite keys already present. - // also - reusing 'index' from above because we have the correct "name" - for ( index in value ) { - if ( !( index in props ) ) { - props[ index ] = value[ index ]; - specialEasing[ index ] = easing; - } - } - } else { - specialEasing[ name ] = easing; - } - } - } - - jQuery.Animation = jQuery.extend( Animation, { - - tweener: function( props, callback ) { - if ( jQuery.isFunction( props ) ) { - callback = props; - props = [ "*" ]; - } else { - props = props.split(" "); - } - - var prop, - index = 0, - length = props.length; - - for ( ; index < length ; index++ ) { - prop = props[ index ]; - tweeners[ prop ] = tweeners[ prop ] || []; - tweeners[ prop ].unshift( callback ); - } - }, - - prefilter: function( callback, prepend ) { - if ( prepend ) { - animationPrefilters.unshift( callback ); - } else { - animationPrefilters.push( callback ); - } - } - }); - - function defaultPrefilter( elem, props, opts ) { - /*jshint validthis:true */ - var prop, index, length, - value, dataShow, toggle, - tween, hooks, oldfire, - anim = this, - style = elem.style, - orig = {}, - handled = [], - hidden = elem.nodeType && isHidden( elem ); - - // handle queue: false promises - if ( !opts.queue ) { - hooks = jQuery._queueHooks( elem, "fx" ); - if ( hooks.unqueued == null ) { - hooks.unqueued = 0; - oldfire = hooks.empty.fire; - hooks.empty.fire = function() { - if ( !hooks.unqueued ) { - oldfire(); - } - }; - } - hooks.unqueued++; - - anim.always(function() { - // doing this makes sure that the complete handler will be called - // before this completes - anim.always(function() { - hooks.unqueued--; - if ( !jQuery.queue( elem, "fx" ).length ) { - hooks.empty.fire(); - } - }); - }); - } - - // height/width overflow pass - if ( elem.nodeType === 1 && ( "height" in props || "width" in props ) ) { - // Make sure that nothing sneaks out - // Record all 3 overflow attributes because IE does not - // change the overflow attribute when overflowX and - // overflowY are set to the same value - opts.overflow = [ style.overflow, style.overflowX, style.overflowY ]; - - // Set display property to inline-block for height/width - // animations on inline elements that are having width/height animated - if ( jQuery.css( elem, "display" ) === "inline" && - jQuery.css( elem, "float" ) === "none" ) { - - // inline-level elements accept inline-block; - // block-level elements need to be inline with layout - if ( !jQuery.support.inlineBlockNeedsLayout || css_defaultDisplay( elem.nodeName ) === "inline" ) { - style.display = "inline-block"; - - } else { - style.zoom = 1; - } - } - } - - if ( opts.overflow ) { - style.overflow = "hidden"; - if ( !jQuery.support.shrinkWrapBlocks ) { - anim.always(function() { - style.overflow = opts.overflow[ 0 ]; - style.overflowX = opts.overflow[ 1 ]; - style.overflowY = opts.overflow[ 2 ]; - }); - } - } - - - // show/hide pass - for ( index in props ) { - value = props[ index ]; - if ( rfxtypes.exec( value ) ) { - delete props[ index ]; - toggle = toggle || value === "toggle"; - if ( value === ( hidden ? "hide" : "show" ) ) { - continue; - } - handled.push( index ); - } - } - - length = handled.length; - if ( length ) { - dataShow = jQuery._data( elem, "fxshow" ) || jQuery._data( elem, "fxshow", {} ); - if ( "hidden" in dataShow ) { - hidden = dataShow.hidden; - } - - // store state if its toggle - enables .stop().toggle() to "reverse" - if ( toggle ) { - dataShow.hidden = !hidden; - } - if ( hidden ) { - jQuery( elem ).show(); - } else { - anim.done(function() { - jQuery( elem ).hide(); - }); - } - anim.done(function() { - var prop; - jQuery._removeData( elem, "fxshow" ); - for ( prop in orig ) { - jQuery.style( elem, prop, orig[ prop ] ); - } - }); - for ( index = 0 ; index < length ; index++ ) { - prop = handled[ index ]; - tween = anim.createTween( prop, hidden ? dataShow[ prop ] : 0 ); - orig[ prop ] = dataShow[ prop ] || jQuery.style( elem, prop ); - - if ( !( prop in dataShow ) ) { - dataShow[ prop ] = tween.start; - if ( hidden ) { - tween.end = tween.start; - tween.start = prop === "width" || prop === "height" ? 1 : 0; - } - } - } - } - } - - function Tween( elem, options, prop, end, easing ) { - return new Tween.prototype.init( elem, options, prop, end, easing ); - } - jQuery.Tween = Tween; - - Tween.prototype = { - constructor: Tween, - init: function( elem, options, prop, end, easing, unit ) { - this.elem = elem; - this.prop = prop; - this.easing = easing || "swing"; - this.options = options; - this.start = this.now = this.cur(); - this.end = end; - this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" ); - }, - cur: function() { - var hooks = Tween.propHooks[ this.prop ]; - - return hooks && hooks.get ? - hooks.get( this ) : - Tween.propHooks._default.get( this ); - }, - run: function( percent ) { - var eased, - hooks = Tween.propHooks[ this.prop ]; - - if ( this.options.duration ) { - this.pos = eased = jQuery.easing[ this.easing ]( - percent, this.options.duration * percent, 0, 1, this.options.duration - ); - } else { - this.pos = eased = percent; - } - this.now = ( this.end - this.start ) * eased + this.start; - - if ( this.options.step ) { - this.options.step.call( this.elem, this.now, this ); - } - - if ( hooks && hooks.set ) { - hooks.set( this ); - } else { - Tween.propHooks._default.set( this ); - } - return this; - } - }; - - Tween.prototype.init.prototype = Tween.prototype; - - Tween.propHooks = { - _default: { - get: function( tween ) { - var result; - - if ( tween.elem[ tween.prop ] != null && - (!tween.elem.style || tween.elem.style[ tween.prop ] == null) ) { - return tween.elem[ tween.prop ]; - } - - // passing an empty string as a 3rd parameter to .css will automatically - // attempt a parseFloat and fallback to a string if the parse fails - // so, simple values such as "10px" are parsed to Float. - // complex values such as "rotate(1rad)" are returned as is. - result = jQuery.css( tween.elem, tween.prop, "" ); - // Empty strings, null, undefined and "auto" are converted to 0. - return !result || result === "auto" ? 0 : result; - }, - set: function( tween ) { - // use step hook for back compat - use cssHook if its there - use .style if its - // available and use plain properties where available - if ( jQuery.fx.step[ tween.prop ] ) { - jQuery.fx.step[ tween.prop ]( tween ); - } else if ( tween.elem.style && ( tween.elem.style[ jQuery.cssProps[ tween.prop ] ] != null || jQuery.cssHooks[ tween.prop ] ) ) { - jQuery.style( tween.elem, tween.prop, tween.now + tween.unit ); - } else { - tween.elem[ tween.prop ] = tween.now; - } - } - } - }; - -// Remove in 2.0 - this supports IE8's panic based approach -// to setting things on disconnected nodes - - Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = { - set: function( tween ) { - if ( tween.elem.nodeType && tween.elem.parentNode ) { - tween.elem[ tween.prop ] = tween.now; - } - } - }; - - jQuery.each([ "toggle", "show", "hide" ], function( i, name ) { - var cssFn = jQuery.fn[ name ]; - jQuery.fn[ name ] = function( speed, easing, callback ) { - return speed == null || typeof speed === "boolean" ? - cssFn.apply( this, arguments ) : - this.animate( genFx( name, true ), speed, easing, callback ); - }; - }); - - jQuery.fn.extend({ - fadeTo: function( speed, to, easing, callback ) { - - // show any hidden elements after setting opacity to 0 - return this.filter( isHidden ).css( "opacity", 0 ).show() - - // animate to the value specified - .end().animate({ opacity: to }, speed, easing, callback ); - }, - animate: function( prop, speed, easing, callback ) { - var empty = jQuery.isEmptyObject( prop ), - optall = jQuery.speed( speed, easing, callback ), - doAnimation = function() { - // Operate on a copy of prop so per-property easing won't be lost - var anim = Animation( this, jQuery.extend( {}, prop ), optall ); - doAnimation.finish = function() { - anim.stop( true ); - }; - // Empty animations, or finishing resolves immediately - if ( empty || jQuery._data( this, "finish" ) ) { - anim.stop( true ); - } - }; - doAnimation.finish = doAnimation; - - return empty || optall.queue === false ? - this.each( doAnimation ) : - this.queue( optall.queue, doAnimation ); - }, - stop: function( type, clearQueue, gotoEnd ) { - var stopQueue = function( hooks ) { - var stop = hooks.stop; - delete hooks.stop; - stop( gotoEnd ); - }; - - if ( typeof type !== "string" ) { - gotoEnd = clearQueue; - clearQueue = type; - type = undefined; - } - if ( clearQueue && type !== false ) { - this.queue( type || "fx", [] ); - } - - return this.each(function() { - var dequeue = true, - index = type != null && type + "queueHooks", - timers = jQuery.timers, - data = jQuery._data( this ); - - if ( index ) { - if ( data[ index ] && data[ index ].stop ) { - stopQueue( data[ index ] ); - } - } else { - for ( index in data ) { - if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) { - stopQueue( data[ index ] ); - } - } - } - - for ( index = timers.length; index--; ) { - if ( timers[ index ].elem === this && (type == null || timers[ index ].queue === type) ) { - timers[ index ].anim.stop( gotoEnd ); - dequeue = false; - timers.splice( index, 1 ); - } - } - - // start the next in the queue if the last step wasn't forced - // timers currently will call their complete callbacks, which will dequeue - // but only if they were gotoEnd - if ( dequeue || !gotoEnd ) { - jQuery.dequeue( this, type ); - } - }); - }, - finish: function( type ) { - if ( type !== false ) { - type = type || "fx"; - } - return this.each(function() { - var index, - data = jQuery._data( this ), - queue = data[ type + "queue" ], - hooks = data[ type + "queueHooks" ], - timers = jQuery.timers, - length = queue ? queue.length : 0; - - // enable finishing flag on private data - data.finish = true; - - // empty the queue first - jQuery.queue( this, type, [] ); - - if ( hooks && hooks.cur && hooks.cur.finish ) { - hooks.cur.finish.call( this ); - } - - // look for any active animations, and finish them - for ( index = timers.length; index--; ) { - if ( timers[ index ].elem === this && timers[ index ].queue === type ) { - timers[ index ].anim.stop( true ); - timers.splice( index, 1 ); - } - } - - // look for any animations in the old queue and finish them - for ( index = 0; index < length; index++ ) { - if ( queue[ index ] && queue[ index ].finish ) { - queue[ index ].finish.call( this ); - } - } - - // turn off finishing flag - delete data.finish; - }); - } - }); - -// Generate parameters to create a standard animation - function genFx( type, includeWidth ) { - var which, - attrs = { height: type }, - i = 0; - - // if we include width, step value is 1 to do all cssExpand values, - // if we don't include width, step value is 2 to skip over Left and Right - includeWidth = includeWidth? 1 : 0; - for( ; i < 4 ; i += 2 - includeWidth ) { - which = cssExpand[ i ]; - attrs[ "margin" + which ] = attrs[ "padding" + which ] = type; - } - - if ( includeWidth ) { - attrs.opacity = attrs.width = type; - } - - return attrs; - } - -// Generate shortcuts for custom animations - jQuery.each({ - slideDown: genFx("show"), - slideUp: genFx("hide"), - slideToggle: genFx("toggle"), - fadeIn: { opacity: "show" }, - fadeOut: { opacity: "hide" }, - fadeToggle: { opacity: "toggle" } - }, function( name, props ) { - jQuery.fn[ name ] = function( speed, easing, callback ) { - return this.animate( props, speed, easing, callback ); - }; - }); - - jQuery.speed = function( speed, easing, fn ) { - var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : { - complete: fn || !fn && easing || - jQuery.isFunction( speed ) && speed, - duration: speed, - easing: fn && easing || easing && !jQuery.isFunction( easing ) && easing - }; - - opt.duration = jQuery.fx.off ? 0 : typeof opt.duration === "number" ? opt.duration : - opt.duration in jQuery.fx.speeds ? jQuery.fx.speeds[ opt.duration ] : jQuery.fx.speeds._default; - - // normalize opt.queue - true/undefined/null -> "fx" - if ( opt.queue == null || opt.queue === true ) { - opt.queue = "fx"; - } - - // Queueing - opt.old = opt.complete; - - opt.complete = function() { - if ( jQuery.isFunction( opt.old ) ) { - opt.old.call( this ); - } - - if ( opt.queue ) { - jQuery.dequeue( this, opt.queue ); - } - }; - - return opt; - }; - - jQuery.easing = { - linear: function( p ) { - return p; - }, - swing: function( p ) { - return 0.5 - Math.cos( p*Math.PI ) / 2; - } - }; - - jQuery.timers = []; - jQuery.fx = Tween.prototype.init; - jQuery.fx.tick = function() { - var timer, - timers = jQuery.timers, - i = 0; - - fxNow = jQuery.now(); - - for ( ; i < timers.length; i++ ) { - timer = timers[ i ]; - // Checks the timer has not already been removed - if ( !timer() && timers[ i ] === timer ) { - timers.splice( i--, 1 ); - } - } - - if ( !timers.length ) { - jQuery.fx.stop(); - } - fxNow = undefined; - }; - - jQuery.fx.timer = function( timer ) { - if ( timer() && jQuery.timers.push( timer ) ) { - jQuery.fx.start(); - } - }; - - jQuery.fx.interval = 13; - - jQuery.fx.start = function() { - if ( !timerId ) { - timerId = setInterval( jQuery.fx.tick, jQuery.fx.interval ); - } - }; - - jQuery.fx.stop = function() { - clearInterval( timerId ); - timerId = null; - }; - - jQuery.fx.speeds = { - slow: 600, - fast: 200, - // Default speed - _default: 400 - }; - -// Back Compat <1.8 extension point - jQuery.fx.step = {}; - - if ( jQuery.expr && jQuery.expr.filters ) { - jQuery.expr.filters.animated = function( elem ) { - return jQuery.grep(jQuery.timers, function( fn ) { - return elem === fn.elem; - }).length; - }; - } - jQuery.fn.offset = function( options ) { - if ( arguments.length ) { - return options === undefined ? - this : - this.each(function( i ) { - jQuery.offset.setOffset( this, options, i ); - }); - } - - var docElem, win, - box = { top: 0, left: 0 }, - elem = this[ 0 ], - doc = elem && elem.ownerDocument; - - if ( !doc ) { - return; - } - - docElem = doc.documentElement; - - // Make sure it's not a disconnected DOM node - if ( !jQuery.contains( docElem, elem ) ) { - return box; - } - - // If we don't have gBCR, just use 0,0 rather than error - // BlackBerry 5, iOS 3 (original iPhone) - if ( typeof elem.getBoundingClientRect !== core_strundefined ) { - box = elem.getBoundingClientRect(); - } - win = getWindow( doc ); - return { - top: box.top + ( win.pageYOffset || docElem.scrollTop ) - ( docElem.clientTop || 0 ), - left: box.left + ( win.pageXOffset || docElem.scrollLeft ) - ( docElem.clientLeft || 0 ) - }; - }; - - jQuery.offset = { - - setOffset: function( elem, options, i ) { - var position = jQuery.css( elem, "position" ); - - // set position first, in-case top/left are set even on static elem - if ( position === "static" ) { - elem.style.position = "relative"; - } - - var curElem = jQuery( elem ), - curOffset = curElem.offset(), - curCSSTop = jQuery.css( elem, "top" ), - curCSSLeft = jQuery.css( elem, "left" ), - calculatePosition = ( position === "absolute" || position === "fixed" ) && jQuery.inArray("auto", [curCSSTop, curCSSLeft]) > -1, - props = {}, curPosition = {}, curTop, curLeft; - - // need to be able to calculate position if either top or left is auto and position is either absolute or fixed - if ( calculatePosition ) { - curPosition = curElem.position(); - curTop = curPosition.top; - curLeft = curPosition.left; - } else { - curTop = parseFloat( curCSSTop ) || 0; - curLeft = parseFloat( curCSSLeft ) || 0; - } - - if ( jQuery.isFunction( options ) ) { - options = options.call( elem, i, curOffset ); - } - - if ( options.top != null ) { - props.top = ( options.top - curOffset.top ) + curTop; - } - if ( options.left != null ) { - props.left = ( options.left - curOffset.left ) + curLeft; - } - - if ( "using" in options ) { - options.using.call( elem, props ); - } else { - curElem.css( props ); - } - } - }; - - - jQuery.fn.extend({ - - position: function() { - if ( !this[ 0 ] ) { - return; - } - - var offsetParent, offset, - parentOffset = { top: 0, left: 0 }, - elem = this[ 0 ]; - - // fixed elements are offset from window (parentOffset = {top:0, left: 0}, because it is it's only offset parent - if ( jQuery.css( elem, "position" ) === "fixed" ) { - // we assume that getBoundingClientRect is available when computed position is fixed - offset = elem.getBoundingClientRect(); - } else { - // Get *real* offsetParent - offsetParent = this.offsetParent(); - - // Get correct offsets - offset = this.offset(); - if ( !jQuery.nodeName( offsetParent[ 0 ], "html" ) ) { - parentOffset = offsetParent.offset(); - } - - // Add offsetParent borders - parentOffset.top += jQuery.css( offsetParent[ 0 ], "borderTopWidth", true ); - parentOffset.left += jQuery.css( offsetParent[ 0 ], "borderLeftWidth", true ); - } - - // Subtract parent offsets and element margins - // note: when an element has margin: auto the offsetLeft and marginLeft - // are the same in Safari causing offset.left to incorrectly be 0 - return { - top: offset.top - parentOffset.top - jQuery.css( elem, "marginTop", true ), - left: offset.left - parentOffset.left - jQuery.css( elem, "marginLeft", true) - }; - }, - - offsetParent: function() { - return this.map(function() { - var offsetParent = this.offsetParent || document.documentElement; - while ( offsetParent && ( !jQuery.nodeName( offsetParent, "html" ) && jQuery.css( offsetParent, "position") === "static" ) ) { - offsetParent = offsetParent.offsetParent; - } - return offsetParent || document.documentElement; - }); - } - }); - - -// Create scrollLeft and scrollTop methods - jQuery.each( {scrollLeft: "pageXOffset", scrollTop: "pageYOffset"}, function( method, prop ) { - var top = /Y/.test( prop ); - - jQuery.fn[ method ] = function( val ) { - return jQuery.access( this, function( elem, method, val ) { - var win = getWindow( elem ); - - if ( val === undefined ) { - return win ? (prop in win) ? win[ prop ] : - win.document.documentElement[ method ] : - elem[ method ]; - } - - if ( win ) { - win.scrollTo( - !top ? val : jQuery( win ).scrollLeft(), - top ? val : jQuery( win ).scrollTop() - ); - - } else { - elem[ method ] = val; - } - }, method, val, arguments.length, null ); - }; - }); - - function getWindow( elem ) { - return jQuery.isWindow( elem ) ? - elem : - elem.nodeType === 9 ? - elem.defaultView || elem.parentWindow : - false; - } -// Create innerHeight, innerWidth, height, width, outerHeight and outerWidth methods - jQuery.each( { Height: "height", Width: "width" }, function( name, type ) { - jQuery.each( { padding: "inner" + name, content: type, "": "outer" + name }, function( defaultExtra, funcName ) { - // margin is only for outerHeight, outerWidth - jQuery.fn[ funcName ] = function( margin, value ) { - var chainable = arguments.length && ( defaultExtra || typeof margin !== "boolean" ), - extra = defaultExtra || ( margin === true || value === true ? "margin" : "border" ); - - return jQuery.access( this, function( elem, type, value ) { - var doc; - - if ( jQuery.isWindow( elem ) ) { - // As of 5/8/2012 this will yield incorrect results for Mobile Safari, but there - // isn't a whole lot we can do. See pull request at this URL for discussion: - // https://github.com/jquery/jquery/pull/764 - return elem.document.documentElement[ "client" + name ]; - } - - // Get document width or height - if ( elem.nodeType === 9 ) { - doc = elem.documentElement; - - // Either scroll[Width/Height] or offset[Width/Height] or client[Width/Height], whichever is greatest - // unfortunately, this causes bug #3838 in IE6/8 only, but there is currently no good, small way to fix it. - return Math.max( - elem.body[ "scroll" + name ], doc[ "scroll" + name ], - elem.body[ "offset" + name ], doc[ "offset" + name ], - doc[ "client" + name ] - ); - } - - return value === undefined ? - // Get width or height on the element, requesting but not forcing parseFloat - jQuery.css( elem, type, extra ) : - - // Set width or height on the element - jQuery.style( elem, type, value, extra ); - }, type, chainable ? margin : undefined, chainable, null ); - }; - }); - }); -// Limit scope pollution from any deprecated API -// (function() { - -// })(); -// Expose jQuery to the global object - window.jQuery = window.$ = jQuery; - -// Expose jQuery as an AMD module, but only for AMD loaders that -// understand the issues with loading multiple versions of jQuery -// in a page that all might call define(). The loader will indicate -// they have special allowances for multiple jQuery versions by -// specifying define.amd.jQuery = true. Register as a named module, -// since jQuery can be concatenated with other files that may use define, -// but not use a proper concatenation script that understands anonymous -// AMD modules. A named AMD is safest and most robust way to register. -// Lowercase jquery is used because AMD module names are derived from -// file names, and jQuery is normally delivered in a lowercase file name. -// Do this after creating the global so that if an AMD module wants to call -// noConflict to hide this version of jQuery, it will work. - if ( typeof define === "function" && define.amd && define.amd.jQuery ) { - define( "jquery", [], function () { return jQuery; } ); - } - -})( window ); \ No newline at end of file diff --git a/modules/enterprise/gui/coregui/src/main/webapp/js/jquery-1.9.1.min.js b/modules/enterprise/gui/coregui/src/main/webapp/js/jquery-1.9.1.min.js deleted file mode 100644 index 5d9ee08..0000000 --- a/modules/enterprise/gui/coregui/src/main/webapp/js/jquery-1.9.1.min.js +++ /dev/null @@ -1,5 +0,0 @@ -/*! jQuery v1.9.1 | (c) 2005, 2012 jQuery Foundation, Inc. | jquery.org/license - //@ sourceMappingURL=jquery.min.map - */(function(e,t){var n,r,i=typeof t,o=e.document,a=e.location,s=e.jQuery,u=e.$,l={},c=[],p="1.9.1",f=c.concat,d=c.push,h=c.slice,g=c.indexOf,m=l.toString,y=l.hasOwnProperty,v=p.trim,b=function(e,t){return new b.fn.init(e,t,r)},x=/[+-]?(?:\d*.|)\d+(?:[eE][+-]?\d+|)/.source,w=/\S+/g,T=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g,N=/^(?:(<[\w\W]+>)[^>]*|#([\w-]*))$/,C=/^<(\w+)\s*/?>(?:</\1>|)$/,k=/^[],:{}\s]*$/,E=/(?:^|:|,)(?:\s*[)+/g,S=/\(?:["\/bfnrt]|u[\da-fA-F]{4})/g,A=/"[^"\\r\n]*"|true|false|null|-?(?:\d+.|)\d+(?:[eE][+-]?\d+|)/g,j=/^-ms-/,D=/-([\da-z])/gi,L=function(e,t){return t.toUpperCase()},H=function(e){(o.addEventListener||"load"===e.type||"complete"===o.readyState)&&(q(),b.ready())},q=function(){o.addEventListener?(o.removeEventListener("DOMContentLoaded",H,!1),e.removeEventListener("load",H,!1)):(o.detachEvent("onreadystatechange",H),e.detachEvent("onload",H))};b.fn=b.prototype={jquery:p,constructor:b,init:function(e,n,r){var i,a;if(!e)return this;if("string"== typeof e){if(i="<"===e.charAt(0)&&">"===e.charAt(e.length-1)&&e.length>=3?[null,e,null]:N.exec(e),!i||!i[1]&&n)return!n||n.jquery?(n||r).find(e):this.constructor(n).find(e);if(i[1]){if(n=n instanceof b?n[0]:n,b.merge(this,b.parseHTML(i[1],n&&n.nodeType?n.ownerDocument||n:o,!0)),C.test(i[1])&&b.isPlainObject(n))for(i in n)b.isFunction(this[i])?this[i](n[i]):this.attr(i,n[i]);return this}if(a=o.getElementById(i[2]),a&&a.parentNode){if(a.id!==i[2])return r.find(e);this.length=1,this[0]=a}return this.context=o,this.selector=e,this}return e.nodeType?(this.context=this[0]=e,this.length=1,this):b.isFunction(e)?r.ready(e):(e.selector!==t&&(this.selector=e.selector,this.context=e.context),b.makeArray(e,this))},selector:"",length:0,size:function(){return this.length},toArray:function(){return h.call(this)},get:function(e){return null==e?this.toArray():0>e?this[this.length+e]:this[e]},pushStack:function(e){var t=b.merge(this.constructor(),e);return t.prevObject=this,t.context=this.cont ext,t},each:function(e,t){return b.each(this,e,t)},ready:function(e){return b.ready.promise().done(e),this},slice:function(){return this.pushStack(h.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(e){var t=this.length,n=+e+(0>e?t:0);return this.pushStack(n>=0&&t>n?[this[n]]:[])},map:function(e){return this.pushStack(b.map(this,function(t,n){return e.call(t,n,t)}))},end:function(){return this.prevObject||this.constructor(null)},push:d,sort:[].sort,splice:[].splice},b.fn.init.prototype=b.fn,b.extend=b.fn.extend=function(){var e,n,r,i,o,a,s=arguments[0]||{},u=1,l=arguments.length,c=!1;for("boolean"==typeof s&&(c=s,s=arguments[1]||{},u=2),"object"==typeof s||b.isFunction(s)||(s={}),l===u&&(s=this,--u);l>u;u++)if(null!=(o=arguments[u]))for(i in o)e=s[i],r=o[i],s!==r&&(c&&r&&(b.isPlainObject(r)||(n=b.isArray(r)))?(n?(n=!1,a=e&&b.isArray(e)?e:[]):a=e&&b.isPlainObject(e)?e:{},s[i]=b.extend(c,a,r)):r!==t&&(s[i]=r));return s} ,b.extend({noConflict:function(t){return e.$===b&&(e.$=u),t&&e.jQuery===b&&(e.jQuery=s),b},isReady:!1,readyWait:1,holdReady:function(e){e?b.readyWait++:b.ready(!0)},ready:function(e){if(e===!0?!--b.readyWait:!b.isReady){if(!o.body)return setTimeout(b.ready);b.isReady=!0,e!==!0&&--b.readyWait>0||(n.resolveWith(o,[b]),b.fn.trigger&&b(o).trigger("ready").off("ready"))}},isFunction:function(e){return"function"===b.type(e)},isArray:Array.isArray||function(e){return"array"===b.type(e)},isWindow:function(e){return null!=e&&e==e.window},isNumeric:function(e){return!isNaN(parseFloat(e))&&isFinite(e)},type:function(e){return null==e?e+"":"object"==typeof e||"function"==typeof e?l[m.call(e)]||"object":typeof e},isPlainObject:function(e){if(!e||"object"!==b.type(e)||e.nodeType||b.isWindow(e))return!1;try{if(e.constructor&&!y.call(e,"constructor")&&!y.call(e.constructor.prototype,"isPrototypeOf"))return!1}catch(n){return!1}var r;for(r in e);return r===t||y.call(e,r)},isEmptyObject:functi on(e){var t;for(t in e)return!1;return!0},error:function(e){throw Error(e)},parseHTML:function(e,t,n){if(!e||"string"!=typeof e)return null;"boolean"==typeof t&&(n=t,t=!1),t=t||o;var r=C.exec(e),i=!n&&[];return r?[t.createElement(r[1])]:(r=b.buildFragment([e],t,i),i&&b(i).remove(),b.merge([],r.childNodes))},parseJSON:function(n){return e.JSON&&e.JSON.parse?e.JSON.parse(n):null===n?n:"string"==typeof n&&(n=b.trim(n),n&&k.test(n.replace(S,"@").replace(A,"]").replace(E,"")))?Function("return "+n)():(b.error("Invalid JSON: "+n),t)},parseXML:function(n){var r,i;if(!n||"string"!=typeof n)return null;try{e.DOMParser?(i=new DOMParser,r=i.parseFromString(n,"text/xml")):(r=new ActiveXObject("Microsoft.XMLDOM"),r.async="false",r.loadXML(n))}catch(o){r=t}return r&&r.documentElement&&!r.getElementsByTagName("parsererror").length||b.error("Invalid XML: "+n),r},noop:function(){},globalEval:function(t){t&&b.trim(t)&&(e.execScript||function(t){e.eval.call(e,t)})(t)},camelCase:function(e){ret urn e.replace(j,"ms-").replace(D,L)},nodeName:function(e,t){return e.nodeName&&e.nodeName.toLowerCase()===t.toLowerCase()},each:function(e,t,n){var r,i=0,o=e.length,a=M(e);if(n){if(a){for(;o>i;i++)if(r=t.apply(e[i],n),r===!1)break}else for(i in e)if(r=t.apply(e[i],n),r===!1)break}else if(a){for(;o>i;i++)if(r=t.call(e[i],i,e[i]),r===!1)break}else for(i in e)if(r=t.call(e[i],i,e[i]),r===!1)break;return e},trim:v&&!v.call("\ufeff\u00a0")?function(e){return null==e?"":v.call(e)}:function(e){return null==e?"":(e+"").replace(T,"")},makeArray:function(e,t){var n=t||[];return null!=e&&(M(Object(e))?b.merge(n,"string"==typeof e?[e]:e):d.call(n,e)),n},inArray:function(e,t,n){var r;if(t){if(g)return g.call(t,e,n);for(r=t.length,n=n?0>n?Math.max(0,r+n):n:0;r>n;n++)if(n in t&&t[n]===e)return n}return-1},merge:function(e,n){var r=n.length,i=e.length,o=0;if("number"==typeof r)for(;r>o;o++)e[i++]=n[o];else while(n[o]!==t)e[i++]=n[o++];return e.length=i,e},grep:function(e,t,n){var r,i=[],o=0 ,a=e.length;for(n=!!n;a>o;o++)r=!!t(e[o],o),n!==r&&i.push(e[o]);return i},map:function(e,t,n){var r,i=0,o=e.length,a=M(e),s=[];if(a)for(;o>i;i++)r=t(e[i],i,n),null!=r&&(s[s.length]=r);else for(i in e)r=t(e[i],i,n),null!=r&&(s[s.length]=r);return f.apply([],s)},guid:1,proxy:function(e,n){var r,i,o;return"string"==typeof n&&(o=e[n],n=e,e=o),b.isFunction(e)?(r=h.call(arguments,2),i=function(){return e.apply(n||this,r.concat(h.call(arguments)))},i.guid=e.guid=e.guid||b.guid++,i):t},access:function(e,n,r,i,o,a,s){var u=0,l=e.length,c=null==r;if("object"===b.type(r)){o=!0;for(u in r)b.access(e,n,u,r[u],!0,a,s)}else if(i!==t&&(o=!0,b.isFunction(i)||(s=!0),c&&(s?(n.call(e,i),n=null):(c=n,n=function(e,t,n){return c.call(b(e),n)})),n))for(;l>u;u++)n(e[u],r,s?i:i.call(e[u],u,n(e[u],r)));return o?e:c?n.call(e):l?n(e[0],r):a},now:function(){return(new Date).getTime()}}),b.ready.promise=function(t){if(!n)if(n=b.Deferred(),"complete"===o.readyState)setTimeout(b.ready);else if(o.addEventLis tener)o.addEventListener("DOMContentLoaded",H,!1),e.addEventListener("load",H,!1);else{o.attachEvent("onreadystatechange",H),e.attachEvent("onload",H);var r=!1;try{r=null==e.frameElement&&o.documentElement}catch(i){}r&&r.doScroll&&function a(){if(!b.isReady){try{r.doScroll("left")}catch(e){return setTimeout(a,50)}q(),b.ready()}}()}return n.promise(t)},b.each("Boolean Number String Function Array Date RegExp Object Error".split(" "),function(e,t){l["[object "+t+"]"]=t.toLowerCase()});function M(e){var t=e.length,n=b.type(e);return b.isWindow(e)?!1:1===e.nodeType&&t?!0:"array"===n||"function"!==n&&(0===t||"number"==typeof t&&t>0&&t-1 in e)}r=b(o);var _={};function F(e){var t=_[e]={};return b.each(e.match(w)||[],function(e,n){t[n]=!0}),t}b.Callbacks=function(e){e="string"==typeof e?_[e]||F(e):b.extend({},e);var n,r,i,o,a,s,u=[],l=!e.once&&[],c=function(t){for(r=e.memory&&t,i=!0,a=s||0,s=0,o=u.length,n=!0;u&&o>a;a++)if(u[a].apply(t[0],t[1])===!1&&e.stopOnFalse){r=!1;break}n=!1,u &&(l?l.length&&c(l.shift()):r?u=[]:p.disable())},p={add:function(){if(u){var t=u.length;(function i(t){b.each(t,function(t,n){var r=b.type(n);"function"===r?e.unique&&p.has(n)||u.push(n):n&&n.length&&"string"!==r&&i(n)})})(arguments),n?o=u.length:r&&(s=t,c(r))}return this},remove:function(){return u&&b.each(arguments,function(e,t){var r;while((r=b.inArray(t,u,r))>-1)u.splice(r,1),n&&(o>=r&&o--,a>=r&&a--)}),this},has:function(e){return e?b.inArray(e,u)>-1:!(!u||!u.length)},empty:function(){return u=[],this},disable:function(){return u=l=r=t,this},disabled:function(){return!u},lock:function(){return l=t,r||p.disable(),this},locked:function(){return!l},fireWith:function(e,t){return t=t||[],t=[e,t.slice?t.slice():t],!u||i&&!l||(n?l.push(t):c(t)),this},fire:function(){return p.fireWith(this,arguments),this},fired:function(){return!!i}};return p},b.extend({Deferred:function(e){var t=[["resolve","done",b.Callbacks("once memory"),"resolved"],["reject","fail",b.Callbacks("once memory "),"rejected"],["notify","progress",b.Callbacks("memory")]],n="pending",r={state:function(){return n},always:function(){return i.done(arguments).fail(arguments),this},then:function(){var e=arguments;return b.Deferred(function(n){b.each(t,function(t,o){var a=o[0],s=b.isFunction(e[t])&&e[t];i[o[1]](function(){var e=s&&s.apply(this,arguments);e&&b.isFunction(e.promise)?e.promise().done(n.resolve).fail(n.reject).progress(n.notify):n[a+"With"](this===r?n.promise():this,s?[e]:arguments)})}),e=null}).promise()},promise:function(e){return null!=e?b.extend(e,r):r}},i={};return r.pipe=r.then,b.each(t,function(e,o){var a=o[2],s=o[3];r[o[1]]=a.add,s&&a.add(function(){n=s},t[1^e][2].disable,t[2][2].lock),i[o[0]]=function(){return i[o[0]+"With"](this===i?r:this,arguments),this},i[o[0]+"With"]=a.fireWith}),r.promise(i),e&&e.call(i,i),i},when:function(e){var t=0,n=h.call(arguments),r=n.length,i=1!==r||e&&b.isFunction(e.promise)?r:0,o=1===i?e:b.Deferred(),a=function(e,t,n){return function(r) {t[e]=this,n[e]=arguments.length>1?h.call(arguments):r,n===s?o.notifyWith(t,n):--i||o.resolveWith(t,n)}},s,u,l;if(r>1)for(s=Array(r),u=Array(r),l=Array(r);r>t;t++)n[t]&&b.isFunction(n[t].promise)?n[t].promise().done(a(t,l,n)).fail(o.reject).progress(a(t,u,s)):--i;return i||o.resolveWith(l,n),o.promise()}}),b.support=function(){var t,n,r,a,s,u,l,c,p,f,d=o.createElement("div");if(d.setAttribute("className","t"),d.innerHTML=" <link/><table></table><a href='/a'>a</a><input type='checkbox'/>",n=d.getElementsByTagName("*"),r=d.getElementsByTagName("a")[0],!n||!r||!n.length)return{};s=o.createElement("select"),l=s.appendChild(o.createElement("option")),a=d.getElementsByTagName("input")[0],r.style.cssText="top:1px;float:left;opacity:.5",t={getSetAttribute:"t"!==d.className,leadingWhitespace:3===d.firstChild.nodeType,tbody:!d.getElementsByTagName("tbody").length,htmlSerialize:!!d.getElementsByTagName("link").length,style:/top/.test(r.getAttribute("style")),hrefNormalized:"/a"===r.ge tAttribute("href"),opacity:/^0.5/.test(r.style.opacity),cssFloat:!!r.style.cssFloat,checkOn:!!a.value,optSelected:l.selected,enctype:!!o.createElement("form").enctype,html5Clone:"<:nav></:nav>"!==o.createElement("nav").cloneNode(!0).outerHTML,boxModel:"CSS1Compat"===o.compatMode,deleteExpando:!0,noCloneEvent:!0,inlineBlockNeedsLayout:!1,shrinkWrapBlocks:!1,reliableMarginRight:!0,boxSizingReliable:!0,pixelPosition:!1},a.checked=!0,t.noCloneChecked=a.cloneNode(!0).checked,s.disabled=!0,t.optDisabled=!l.disabled;try{delete d.test}catch(h){t.deleteExpando=!1}a=o.createElement("input"),a.setAttribute("value",""),t.input=""===a.getAttribute("value"),a.value="t",a.setAttribute("type","radio"),t.radioValue="t"===a.value,a.setAttribute("checked","t"),a.setAttribute("name","t"),u=o.createDocumentFragment(),u.appendChild(a),t.appendChecked=a.checked,t.checkClone=u.cloneNode(!0).cloneNode(!0).lastChild.checked,d.attachEvent&&(d.attachEvent("onclick",function(){t.noCloneEvent=!1}),d.clon eNode(!0).click());for(f in{submit:!0,change:!0,focusin:!0})d.setAttribute(c="on"+f,"t"),t[f+"Bubbles"]=c in e||d.attributes[c].expando===!1;return d.style.backgroundClip="content-box",d.cloneNode(!0).style.backgroundClip="",t.clearCloneStyle="content-box"===d.style.backgroundClip,b(function(){var n,r,a,s="padding:0;margin:0;border:0;display:block;box-sizing:content-box;-moz-box-sizing:content-box;-webkit-box-sizing:content-box;",u=o.getElementsByTagName("body")[0];u&&(n=o.createElement("div"),n.style.cssText="border:0;width:0;height:0;position:absolute;top:0;left:-9999px;margin-top:1px",u.appendChild(n).appendChild(d),d.innerHTML="<table><tr><td></td><td>t</td></tr></table>",a=d.getElementsByTagName("td"),a[0].style.cssText="padding:0;margin:0;border:0;display:none",p=0===a[0].offsetHeight,a[0].style.display="",a[1].style.display="none",t.reliableHiddenOffsets=p&&0===a[0].offsetHeight,d.innerHTML="",d.style.cssText="box-sizing:border-box;-moz-box-sizing:border-box;-webkit-b ox-sizing:border-box;padding:1px;border:1px;display:block;width:4px;margin-top:1%;position:absolute;top:1%;",t.boxSizing=4===d.offsetWidth,t.doesNotIncludeMarginInBodyOffset=1!==u.offsetTop,e.getComputedStyle&&(t.pixelPosition="1%"!==(e.getComputedStyle(d,null)||{}).top,t.boxSizingReliable="4px"===(e.getComputedStyle(d,null)||{width:"4px"}).width,r=d.appendChild(o.createElement("div")),r.style.cssText=d.style.cssText=s,r.style.marginRight=r.style.width="0",d.style.width="1px",t.reliableMarginRight=!parseFloat((e.getComputedStyle(r,null)||{}).marginRight)),typeof d.style.zoom!==i&&(d.innerHTML="",d.style.cssText=s+"width:1px;padding:1px;display:inline;zoom:1",t.inlineBlockNeedsLayout=3===d.offsetWidth,d.style.display="block",d.innerHTML="<div></div>",d.firstChild.style.width="5px",t.shrinkWrapBlocks=3!==d.offsetWidth,t.inlineBlockNeedsLayout&&(u.style.zoom=1)),u.removeChild(n),n=d=a=r=null)}),n=s=u=l=r=a=null,t}();var O=/(?:{[\s\S]*}|[[\s\S]*])$/,B=/([A-Z])/g;function P(e ,n,r,i){if(b.acceptData(e)){var o,a,s=b.expando,u="string"==typeof n,l=e.nodeType,p=l?b.cache:e,f=l?e[s]:e[s]&&s;if(f&&p[f]&&(i||p[f].data)||!u||r!==t)return f||(l?e[s]=f=c.pop()||b.guid++:f=s),p[f]||(p[f]={},l||(p[f].toJSON=b.noop)),("object"==typeof n||"function"==typeof n)&&(i?p[f]=b.extend(p[f],n):p[f].data=b.extend(p[f].data,n)),o=p[f],i||(o.data||(o.data={}),o=o.data),r!==t&&(o[b.camelCase(n)]=r),u?(a=o[n],null==a&&(a=o[b.camelCase(n)])):a=o,a}}function R(e,t,n){if(b.acceptData(e)){var r,i,o,a=e.nodeType,s=a?b.cache:e,u=a?e[b.expando]:b.expando;if(s[u]){if(t&&(o=n?s[u]:s[u].data)){b.isArray(t)?t=t.concat(b.map(t,b.camelCase)):t in o?t=[t]:(t=b.camelCase(t),t=t in o?[t]:t.split(" "));for(r=0,i=t.length;i>r;r++)delete o[t[r]];if(!(n?$:b.isEmptyObject)(o))return}(n||(delete s[u].data,$(s[u])))&&(a?b.cleanData([e],!0):b.support.deleteExpando||s!=s.window?delete s[u]:s[u]=null)}}}b.extend({cache:{},expando:"jQuery"+(p+Math.random()).replace(/\D/g,""),noData:{embed:!0,object :"clsid:D27CDB6E-AE6D-11cf-96B8-444553540000",applet:!0},hasData:function(e){return e=e.nodeType?b.cache[e[b.expando]]:e[b.expando],!!e&&!$(e)},data:function(e,t,n){return P(e,t,n)},removeData:function(e,t){return R(e,t)},_data:function(e,t,n){return P(e,t,n,!0)},_removeData:function(e,t){return R(e,t,!0)},acceptData:function(e){if(e.nodeType&&1!==e.nodeType&&9!==e.nodeType)return!1;var t=e.nodeName&&b.noData[e.nodeName.toLowerCase()];return!t||t!==!0&&e.getAttribute("classid")===t}}),b.fn.extend({data:function(e,n){var r,i,o=this[0],a=0,s=null;if(e===t){if(this.length&&(s=b.data(o),1===o.nodeType&&!b._data(o,"parsedAttrs"))){for(r=o.attributes;r.length>a;a++)i=r[a].name,i.indexOf("data-")||(i=b.camelCase(i.slice(5)),W(o,i,s[i]));b._data(o,"parsedAttrs",!0)}return s}return"object"==typeof e?this.each(function(){b.data(this,e)}):b.access(this,function(n){return n===t?o?W(o,e,b.data(o,e)):null:(this.each(function(){b.data(this,e,n)}),t)},null,n,arguments.length>1,null,!0)},rem oveData:function(e){return this.each(function(){b.removeData(this,e)})}});function W(e,n,r){if(r===t&&1===e.nodeType){var i="data-"+n.replace(B,"-$1").toLowerCase();if(r=e.getAttribute(i),"string"==typeof r){try{r="true"===r?!0:"false"===r?!1:"null"===r?null:+r+""===r?+r:O.test(r)?b.parseJSON(r):r}catch(o){}b.data(e,n,r)}else r=t}return r}function $(e){var t;for(t in e)if(("data"!==t||!b.isEmptyObject(e[t]))&&"toJSON"!==t)return!1;return!0}b.extend({queue:function(e,n,r){var i;return e?(n=(n||"fx")+"queue",i=b._data(e,n),r&&(!i||b.isArray(r)?i=b._data(e,n,b.makeArray(r)):i.push(r)),i||[]):t},dequeue:function(e,t){t=t||"fx";var n=b.queue(e,t),r=n.length,i=n.shift(),o=b._queueHooks(e,t),a=function(){b.dequeue(e,t)};"inprogress"===i&&(i=n.shift(),r--),o.cur=i,i&&("fx"===t&&n.unshift("inprogress"),delete o.stop,i.call(e,a,o)),!r&&o&&o.empty.fire()},_queueHooks:function(e,t){var n=t+"queueHooks";return b._data(e,n)||b._data(e,n,{empty:b.Callbacks("once memory").add(function(){b._ removeData(e,t+"queue"),b._removeData(e,n)})})}}),b.fn.extend({queue:function(e,n){var r=2;return"string"!=typeof e&&(n=e,e="fx",r--),r>arguments.length?b.queue(this[0],e):n===t?this:this.each(function(){var t=b.queue(this,e,n);b._queueHooks(this,e),"fx"===e&&"inprogress"!==t[0]&&b.dequeue(this,e)})},dequeue:function(e){return this.each(function(){b.dequeue(this,e)})},delay:function(e,t){return e=b.fx?b.fx.speeds[e]||e:e,t=t||"fx",this.queue(t,function(t,n){var r=setTimeout(t,e);n.stop=function(){clearTimeout(r)}})},clearQueue:function(e){return this.queue(e||"fx",[])},promise:function(e,n){var r,i=1,o=b.Deferred(),a=this,s=this.length,u=function(){--i||o.resolveWith(a,[a])};"string"!=typeof e&&(n=e,e=t),e=e||"fx";while(s--)r=b._data(a[s],e+"queueHooks"),r&&r.empty&&(i++,r.empty.add(u));return u(),o.promise(n)}});var I,z,X=/[\t\r\n]/g,U=/\r/g,V=/^(?:input|select|textarea|button|object)$/i,Y=/^(?:a|area)$/i,J=/^(?:checked|selected|autofocus|autoplay|async|controls|defer|disab led|hidden|loop|multiple|open|readonly|required|scoped)$/i,G=/^(?:checked|selected)$/i,Q=b.support.getSetAttribute,K=b.support.input;b.fn.extend({attr:function(e,t){return b.access(this,b.attr,e,t,arguments.length>1)},removeAttr:function(e){return this.each(function(){b.removeAttr(this,e)})},prop:function(e,t){return b.access(this,b.prop,e,t,arguments.length>1)},removeProp:function(e){return e=b.propFix[e]||e,this.each(function(){try{this[e]=t,delete this[e]}catch(n){}})},addClass:function(e){var t,n,r,i,o,a=0,s=this.length,u="string"==typeof e&&e;if(b.isFunction(e))return this.each(function(t){b(this).addClass(e.call(this,t,this.className))});if(u)for(t=(e||"").match(w)||[];s>a;a++)if(n=this[a],r=1===n.nodeType&&(n.className?(" "+n.className+" ").replace(X," "):" ")){o=0;while(i=t[o++])0>r.indexOf(" "+i+" ")&&(r+=i+" ");n.className=b.trim(r)}return this},removeClass:function(e){var t,n,r,i,o,a=0,s=this.length,u=0===arguments.length||"string"==typeof e&&e;if(b.isFunction(e)) return this.each(function(t){b(this).removeClass(e.call(this,t,this.className))});if(u)for(t=(e||"").match(w)||[];s>a;a++)if(n=this[a],r=1===n.nodeType&&(n.className?(" "+n.className+" ").replace(X," "):"")){o=0;while(i=t[o++])while(r.indexOf(" "+i+" ")>=0)r=r.replace(" "+i+" "," ");n.className=e?b.trim(r):""}return this},toggleClass:function(e,t){var n=typeof e,r="boolean"==typeof t;return b.isFunction(e)?this.each(function(n){b(this).toggleClass(e.call(this,n,this.className,t),t)}):this.each(function(){if("string"===n){var o,a=0,s=b(this),u=t,l=e.match(w)||[];while(o=l[a++])u=r?u:!s.hasClass(o),s[u?"addClass":"removeClass"](o)}else(n===i||"boolean"===n)&&(this.className&&b._data(this,"__className__",this.className),this.className=this.className||e===!1?"":b._data(this,"__className__")||"")})},hasClass:function(e){var t=" "+e+" ",n=0,r=this.length;for(;r>n;n++)if(1===this[n].nodeType&&(" "+this[n].className+" ").replace(X," ").indexOf(t)>=0)return!0;return!1},val:function(e ){var n,r,i,o=this[0];{if(arguments.length)return i=b.isFunction(e),this.each(function(n){var o,a=b(this);1===this.nodeType&&(o=i?e.call(this,n,a.val()):e,null==o?o="":"number"==typeof o?o+="":b.isArray(o)&&(o=b.map(o,function(e){return null==e?"":e+""})),r=b.valHooks[this.type]||b.valHooks[this.nodeName.toLowerCase()],r&&"set"in r&&r.set(this,o,"value")!==t||(this.value=o))});if(o)return r=b.valHooks[o.type]||b.valHooks[o.nodeName.toLowerCase()],r&&"get"in r&&(n=r.get(o,"value"))!==t?n:(n=o.value,"string"==typeof n?n.replace(U,""):null==n?"":n)}}}),b.extend({valHooks:{option:{get:function(e){var t=e.attributes.value;return!t||t.specified?e.value:e.text}},select:{get:function(e){var t,n,r=e.options,i=e.selectedIndex,o="select-one"===e.type||0>i,a=o?null:[],s=o?i+1:r.length,u=0>i?s:o?i:0;for(;s>u;u++)if(n=r[u],!(!n.selected&&u!==i||(b.support.optDisabled?n.disabled:null!==n.getAttribute("disabled"))||n.parentNode.disabled&&b.nodeName(n.parentNode,"optgroup"))){if(t=b(n).val() ,o)return t;a.push(t)}return a},set:function(e,t){var n=b.makeArray(t);return b(e).find("option").each(function(){this.selected=b.inArray(b(this).val(),n)>=0}),n.length||(e.selectedIndex=-1),n}}},attr:function(e,n,r){var o,a,s,u=e.nodeType;if(e&&3!==u&&8!==u&&2!==u)return typeof e.getAttribute===i?b.prop(e,n,r):(a=1!==u||!b.isXMLDoc(e),a&&(n=n.toLowerCase(),o=b.attrHooks[n]||(J.test(n)?z:I)),r===t?o&&a&&"get"in o&&null!==(s=o.get(e,n))?s:(typeof e.getAttribute!==i&&(s=e.getAttribute(n)),null==s?t:s):null!==r?o&&a&&"set"in o&&(s=o.set(e,r,n))!==t?s:(e.setAttribute(n,r+""),r):(b.removeAttr(e,n),t))},removeAttr:function(e,t){var n,r,i=0,o=t&&t.match(w);if(o&&1===e.nodeType)while(n=o[i++])r=b.propFix[n]||n,J.test(n)?!Q&&G.test(n)?e[b.camelCase("default-"+n)]=e[r]=!1:e[r]=!1:b.attr(e,n,""),e.removeAttribute(Q?n:r)},attrHooks:{type:{set:function(e,t){if(!b.support.radioValue&&"radio"===t&&b.nodeName(e,"input")){var n=e.value;return e.setAttribute("type",t),n&&(e.value=n),t}}}},pro pFix:{tabindex:"tabIndex",readonly:"readOnly","for":"htmlFor","class":"className",maxlength:"maxLength",cellspacing:"cellSpacing",cellpadding:"cellPadding",rowspan:"rowSpan",colspan:"colSpan",usemap:"useMap",frameborder:"frameBorder",contenteditable:"contentEditable"},prop:function(e,n,r){var i,o,a,s=e.nodeType;if(e&&3!==s&&8!==s&&2!==s)return a=1!==s||!b.isXMLDoc(e),a&&(n=b.propFix[n]||n,o=b.propHooks[n]),r!==t?o&&"set"in o&&(i=o.set(e,r,n))!==t?i:e[n]=r:o&&"get"in o&&null!==(i=o.get(e,n))?i:e[n]},propHooks:{tabIndex:{get:function(e){var n=e.getAttributeNode("tabindex");return n&&n.specified?parseInt(n.value,10):V.test(e.nodeName)||Y.test(e.nodeName)&&e.href?0:t}}}}),z={get:function(e,n){var r=b.prop(e,n),i="boolean"==typeof r&&e.getAttribute(n),o="boolean"==typeof r?K&&Q?null!=i:G.test(n)?e[b.camelCase("default-"+n)]:!!i:e.getAttributeNode(n);return o&&o.value!==!1?n.toLowerCase():t},set:function(e,t,n){return t===!1?b.removeAttr(e,n):K&&Q||!G.test(n)?e.setAttribute(!Q&&b. propFix[n]||n,n):e[b.camelCase("default-"+n)]=e[n]=!0,n}},K&&Q||(b.attrHooks.value={get:function(e,n){var r=e.getAttributeNode(n);return b.nodeName(e,"input")?e.defaultValue:r&&r.specified?r.value:t},set:function(e,n,r){return b.nodeName(e,"input")?(e.defaultValue=n,t):I&&I.set(e,n,r)}}),Q||(I=b.valHooks.button={get:function(e,n){var r=e.getAttributeNode(n);return r&&("id"===n||"name"===n||"coords"===n?""!==r.value:r.specified)?r.value:t},set:function(e,n,r){var i=e.getAttributeNode(r);return i||e.setAttributeNode(i=e.ownerDocument.createAttribute(r)),i.value=n+="","value"===r||n===e.getAttribute(r)?n:t}},b.attrHooks.contenteditable={get:I.get,set:function(e,t,n){I.set(e,""===t?!1:t,n)}},b.each(["width","height"],function(e,n){b.attrHooks[n]=b.extend(b.attrHooks[n],{set:function(e,r){return""===r?(e.setAttribute(n,"auto"),r):t}})})),b.support.hrefNormalized||(b.each(["href","src","width","height"],function(e,n){b.attrHooks[n]=b.extend(b.attrHooks[n],{get:function(e){var r=e. getAttribute(n,2);return null==r?t:r}})}),b.each(["href","src"],function(e,t){b.propHooks[t]={get:function(e){return e.getAttribute(t,4)}}})),b.support.style||(b.attrHooks.style={get:function(e){return e.style.cssText||t},set:function(e,t){return e.style.cssText=t+""}}),b.support.optSelected||(b.propHooks.selected=b.extend(b.propHooks.selected,{get:function(e){var t=e.parentNode;return t&&(t.selectedIndex,t.parentNode&&t.parentNode.selectedIndex),null}})),b.support.enctype||(b.propFix.enctype="encoding"),b.support.checkOn||b.each(["radio","checkbox"],function(){b.valHooks[this]={get:function(e){return null===e.getAttribute("value")?"on":e.value}}}),b.each(["radio","checkbox"],function(){b.valHooks[this]=b.extend(b.valHooks[this],{set:function(e,n){return b.isArray(n)?e.checked=b.inArray(b(e).val(),n)>=0:t}})});var Z=/^(?:input|select|textarea)$/i,et=/^key/,tt=/^(?:mouse|contextmenu)|click/,nt=/^(?:focusinfocus|focusoutblur)$/,rt=/^([^.]*)(?:.(.+)|)$/;function it(){return!0} function ot(){return!1}b.event={global:{},add:function(e,n,r,o,a){var s,u,l,c,p,f,d,h,g,m,y,v=b._data(e);if(v){r.handler&&(c=r,r=c.handler,a=c.selector),r.guid||(r.guid=b.guid++),(u=v.events)||(u=v.events={}),(f=v.handle)||(f=v.handle=function(e){return typeof b===i||e&&b.event.triggered===e.type?t:b.event.dispatch.apply(f.elem,arguments)},f.elem=e),n=(n||"").match(w)||[""],l=n.length;while(l--)s=rt.exec(n[l])||[],g=y=s[1],m=(s[2]||"").split(".").sort(),p=b.event.special[g]||{},g=(a?p.delegateType:p.bindType)||g,p=b.event.special[g]||{},d=b.extend({type:g,origType:y,data:o,handler:r,guid:r.guid,selector:a,needsContext:a&&b.expr.match.needsContext.test(a),namespace:m.join(".")},c),(h=u[g])||(h=u[g]=[],h.delegateCount=0,p.setup&&p.setup.call(e,o,m,f)!==!1||(e.addEventListener?e.addEventListener(g,f,!1):e.attachEvent&&e.attachEvent("on"+g,f))),p.add&&(p.add.call(e,d),d.handler.guid||(d.handler.guid=r.guid)),a?h.splice(h.delegateCount++,0,d):h.push(d),b.event.global[g]=!0;e=null }},remove:function(e,t,n,r,i){var o,a,s,u,l,c,p,f,d,h,g,m=b.hasData(e)&&b._data(e);if(m&&(c=m.events)){t=(t||"").match(w)||[""],l=t.length;while(l--)if(s=rt.exec(t[l])||[],d=g=s[1],h=(s[2]||"").split(".").sort(),d){p=b.event.special[d]||{},d=(r?p.delegateType:p.bindType)||d,f=c[d]||[],s=s[2]&&RegExp("(^|\.)"+h.join("\.(?:.*\.|)")+"(\.|$)"),u=o=f.length;while(o--)a=f[o],!i&&g!==a.origType||n&&n.guid!==a.guid||s&&!s.test(a.namespace)||r&&r!==a.selector&&("**"!==r||!a.selector)||(f.splice(o,1),a.selector&&f.delegateCount--,p.remove&&p.remove.call(e,a));u&&!f.length&&(p.teardown&&p.teardown.call(e,h,m.handle)!==!1||b.removeEvent(e,d,m.handle),delete c[d])}else for(d in c)b.event.remove(e,d+t[l],n,r,!0);b.isEmptyObject(c)&&(delete m.handle,b._removeData(e,"events"))}},trigger:function(n,r,i,a){var s,u,l,c,p,f,d,h=[i||o],g=y.call(n,"type")?n.type:n,m=y.call(n,"namespace")?n.namespace.split("."):[];if(l=f=i=i||o,3!==i.nodeType&&8!==i.nodeType&&!nt.test(g+b.event.triggered)&&(g. indexOf(".")>=0&&(m=g.split("."),g=m.shift(),m.sort()),u=0>g.indexOf(":")&&"on"+g,n=n[b.expando]?n:new b.Event(g,"object"==typeof n&&n),n.isTrigger=!0,n.namespace=m.join("."),n.namespace_re=n.namespace?RegExp("(^|\.)"+m.join("\.(?:.*\.|)")+"(\.|$)"):null,n.result=t,n.target||(n.target=i),r=null==r?[n]:b.makeArray(r,[n]),p=b.event.special[g]||{},a||!p.trigger||p.trigger.apply(i,r)!==!1)){if(!a&&!p.noBubble&&!b.isWindow(i)){for(c=p.delegateType||g,nt.test(c+g)||(l=l.parentNode);l;l=l.parentNode)h.push(l),f=l;f===(i.ownerDocument||o)&&h.push(f.defaultView||f.parentWindow||e)}d=0;while((l=h[d++])&&!n.isPropagationStopped())n.type=d>1?c:p.bindType||g,s=(b._data(l,"events")||{})[n.type]&&b._data(l,"handle"),s&&s.apply(l,r),s=u&&l[u],s&&b.acceptData(l)&&s.apply&&s.apply(l,r)===!1&&n.preventDefault();if(n.type=g,!(a||n.isDefaultPrevented()||p._default&&p._default.apply(i.ownerDocument,r)!==!1||"click"===g&&b.nodeName(i,"a")||!b.acceptData(i)||!u||!i[g]||b.isWindow(i))){f=i[u],f& &(i[u]=null),b.event.triggered=g;try{i[g]()}catch(v){}b.event.triggered=t,f&&(i[u]=f)}return n.result}},dispatch:function(e){e=b.event.fix(e);var n,r,i,o,a,s=[],u=h.call(arguments),l=(b._data(this,"events")||{})[e.type]||[],c=b.event.special[e.type]||{};if(u[0]=e,e.delegateTarget=this,!c.preDispatch||c.preDispatch.call(this,e)!==!1){s=b.event.handlers.call(this,e,l),n=0;while((o=s[n++])&&!e.isPropagationStopped()){e.currentTarget=o.elem,a=0;while((i=o.handlers[a++])&&!e.isImmediatePropagationStopped())(!e.namespace_re||e.namespace_re.test(i.namespace))&&(e.handleObj=i,e.data=i.data,r=((b.event.special[i.origType]||{}).handle||i.handler).apply(o.elem,u),r!==t&&(e.result=r)===!1&&(e.preventDefault(),e.stopPropagation()))}return c.postDispatch&&c.postDispatch.call(this,e),e.result}},handlers:function(e,n){var r,i,o,a,s=[],u=n.delegateCount,l=e.target;if(u&&l.nodeType&&(!e.button||"click"!==e.type))for(;l!=this;l=l.parentNode||this)if(1===l.nodeType&&(l.disabled!==!0||"click"!== e.type)){for(o=[],a=0;u>a;a++)i=n[a],r=i.selector+" ",o[r]===t&&(o[r]=i.needsContext?b(r,this).index(l)>=0:b.find(r,this,null,[l]).length),o[r]&&o.push(i);o.length&&s.push({elem:l,handlers:o})}return n.length>u&&s.push({elem:this,handlers:n.slice(u)}),s},fix:function(e){if(e[b.expando])return e;var t,n,r,i=e.type,a=e,s=this.fixHooks[i];s||(this.fixHooks[i]=s=tt.test(i)?this.mouseHooks:et.test(i)?this.keyHooks:{}),r=s.props?this.props.concat(s.props):this.props,e=new b.Event(a),t=r.length;while(t--)n=r[t],e[n]=a[n];return e.target||(e.target=a.srcElement||o),3===e.target.nodeType&&(e.target=e.target.parentNode),e.metaKey=!!e.metaKey,s.filter?s.filter(e,a):e},props:"altKey bubbles cancelable ctrlKey currentTarget eventPhase metaKey relatedTarget shiftKey target timeStamp view which".split(" "),fixHooks:{},keyHooks:{props:"char charCode key keyCode".split(" "),filter:function(e,t){return null==e.which&&(e.which=null!=t.charCode?t.charCode:t.keyCode),e}},mouseHooks:{props:"butto n buttons clientX clientY fromElement offsetX offsetY pageX pageY screenX screenY toElement".split(" "),filter:function(e,n){var r,i,a,s=n.button,u=n.fromElement;return null==e.pageX&&null!=n.clientX&&(i=e.target.ownerDocument||o,a=i.documentElement,r=i.body,e.pageX=n.clientX+(a&&a.scrollLeft||r&&r.scrollLeft||0)-(a&&a.clientLeft||r&&r.clientLeft||0),e.pageY=n.clientY+(a&&a.scrollTop||r&&r.scrollTop||0)-(a&&a.clientTop||r&&r.clientTop||0)),!e.relatedTarget&&u&&(e.relatedTarget=u===e.target?n.toElement:u),e.which||s===t||(e.which=1&s?1:2&s?3:4&s?2:0),e}},special:{load:{noBubble:!0},click:{trigger:function(){return b.nodeName(this,"input")&&"checkbox"===this.type&&this.click?(this.click(),!1):t}},focus:{trigger:function(){if(this!==o.activeElement&&this.focus)try{return this.focus(),!1}catch(e){}},delegateType:"focusin"},blur:{trigger:function(){return this===o.activeElement&&this.blur?(this.blur(),!1):t},delegateType:"focusout"},beforeunload:{postDispatch:function(e){e.result !==t&&(e.originalEvent.returnValue=e.result)}}},simulate:function(e,t,n,r){var i=b.extend(new b.Event,n,{type:e,isSimulated:!0,originalEvent:{}});r?b.event.trigger(i,null,t):b.event.dispatch.call(t,i),i.isDefaultPrevented()&&n.preventDefault()}},b.removeEvent=o.removeEventListener?function(e,t,n){e.removeEventListener&&e.removeEventListener(t,n,!1)}:function(e,t,n){var r="on"+t;e.detachEvent&&(typeof e[r]===i&&(e[r]=null),e.detachEvent(r,n))},b.Event=function(e,n){return this instanceof b.Event?(e&&e.type?(this.originalEvent=e,this.type=e.type,this.isDefaultPrevented=e.defaultPrevented||e.returnValue===!1||e.getPreventDefault&&e.getPreventDefault()?it:ot):this.type=e,n&&b.extend(this,n),this.timeStamp=e&&e.timeStamp||b.now(),this[b.expando]=!0,t):new b.Event(e,n)},b.Event.prototype={isDefaultPrevented:ot,isPropagationStopped:ot,isImmediatePropagationStopped:ot,preventDefault:function(){var e=this.originalEvent;this.isDefaultPrevented=it,e&&(e.preventDefault?e.preventDefault( ):e.returnValue=!1)},stopPropagation:function(){var e=this.originalEvent;this.isPropagationStopped=it,e&&(e.stopPropagation&&e.stopPropagation(),e.cancelBubble=!0)},stopImmediatePropagation:function(){this.isImmediatePropagationStopped=it,this.stopPropagation()}},b.each({mouseenter:"mouseover",mouseleave:"mouseout"},function(e,t){b.event.special[e]={delegateType:t,bindType:t,handle:function(e){var n,r=this,i=e.relatedTarget,o=e.handleObj; - return(!i||i!==r&&!b.contains(r,i))&&(e.type=o.origType,n=o.handler.apply(this,arguments),e.type=t),n}}}),b.support.submitBubbles||(b.event.special.submit={setup:function(){return b.nodeName(this,"form")?!1:(b.event.add(this,"click._submit keypress._submit",function(e){var n=e.target,r=b.nodeName(n,"input")||b.nodeName(n,"button")?n.form:t;r&&!b._data(r,"submitBubbles")&&(b.event.add(r,"submit._submit",function(e){e._submit_bubble=!0}),b._data(r,"submitBubbles",!0))}),t)},postDispatch:function(e){e._submit_bubble&&(delete e._submit_bubble,this.parentNode&&!e.isTrigger&&b.event.simulate("submit",this.parentNode,e,!0))},teardown:function(){return b.nodeName(this,"form")?!1:(b.event.remove(this,"._submit"),t)}}),b.support.changeBubbles||(b.event.special.change={setup:function(){return Z.test(this.nodeName)?(("checkbox"===this.type||"radio"===this.type)&&(b.event.add(this,"propertychange._change",function(e){"checked"===e.originalEvent.propertyName&&(this._just_changed=!0)}) ,b.event.add(this,"click._change",function(e){this._just_changed&&!e.isTrigger&&(this._just_changed=!1),b.event.simulate("change",this,e,!0)})),!1):(b.event.add(this,"beforeactivate._change",function(e){var t=e.target;Z.test(t.nodeName)&&!b._data(t,"changeBubbles")&&(b.event.add(t,"change._change",function(e){!this.parentNode||e.isSimulated||e.isTrigger||b.event.simulate("change",this.parentNode,e,!0)}),b._data(t,"changeBubbles",!0))}),t)},handle:function(e){var n=e.target;return this!==n||e.isSimulated||e.isTrigger||"radio"!==n.type&&"checkbox"!==n.type?e.handleObj.handler.apply(this,arguments):t},teardown:function(){return b.event.remove(this,"._change"),!Z.test(this.nodeName)}}),b.support.focusinBubbles||b.each({focus:"focusin",blur:"focusout"},function(e,t){var n=0,r=function(e){b.event.simulate(t,e.target,b.event.fix(e),!0)};b.event.special[t]={setup:function(){0===n++&&o.addEventListener(e,r,!0)},teardown:function(){0===--n&&o.removeEventListener(e,r,!0)}}}),b.fn.exten d({on:function(e,n,r,i,o){var a,s;if("object"==typeof e){"string"!=typeof n&&(r=r||n,n=t);for(a in e)this.on(a,n,r,e[a],o);return this}if(null==r&&null==i?(i=n,r=n=t):null==i&&("string"==typeof n?(i=r,r=t):(i=r,r=n,n=t)),i===!1)i=ot;else if(!i)return this;return 1===o&&(s=i,i=function(e){return b().off(e),s.apply(this,arguments)},i.guid=s.guid||(s.guid=b.guid++)),this.each(function(){b.event.add(this,e,i,r,n)})},one:function(e,t,n,r){return this.on(e,t,n,r,1)},off:function(e,n,r){var i,o;if(e&&e.preventDefault&&e.handleObj)return i=e.handleObj,b(e.delegateTarget).off(i.namespace?i.origType+"."+i.namespace:i.origType,i.selector,i.handler),this;if("object"==typeof e){for(o in e)this.off(o,n,e[o]);return this}return(n===!1||"function"==typeof n)&&(r=n,n=t),r===!1&&(r=ot),this.each(function(){b.event.remove(this,e,r,n)})},bind:function(e,t,n){return this.on(e,null,t,n)},unbind:function(e,t){return this.off(e,null,t)},delegate:function(e,t,n,r){return this.on(t,e,n,r)},undelegate :function(e,t,n){return 1===arguments.length?this.off(e,"**"):this.off(t,e||"**",n)},trigger:function(e,t){return this.each(function(){b.event.trigger(e,t,this)})},triggerHandler:function(e,n){var r=this[0];return r?b.event.trigger(e,n,r,!0):t}}),function(e,t){var n,r,i,o,a,s,u,l,c,p,f,d,h,g,m,y,v,x="sizzle"+-new Date,w=e.document,T={},N=0,C=0,k=it(),E=it(),S=it(),A=typeof t,j=1<<31,D=[],L=D.pop,H=D.push,q=D.slice,M=D.indexOf||function(e){var t=0,n=this.length;for(;n>t;t++)if(this[t]===e)return t;return-1},_="[\x20\t\r\n\f]",F="(?:\\.|[\w-]|[^\x00-\xa0])+",O=F.replace("w","w#"),B="([*^$|!~]?=)",P="\["+_+"*("+F+")"+_+"*(?:"+B+_+"*(?:(['"])((?:\\.|[^\\])*?)\3|("+O+")|)|)"+_+"*\]",R=":("+F+")(?:\(((['"])((?:\\.|[^\\])*?)\3|((?:\\.|[^\\()[\]]|"+P.replace(3,8)+")*)|.*)\)|)",W=RegExp("^"+_+"+|((?:^|[^\\])(?:\\.)*)"+_+"+$","g"),$=RegExp("^"+_+"*,"+_+"*"),I=RegExp("^"+_+"*([\x20\t\r\n\f>+~])"+_+"*"),z=RegExp(R),X=RegExp("^"+O+"$"),U={ID:RegExp ("^#("+F+")"),CLASS:RegExp("^\.("+F+")"),NAME:RegExp("^\[name=['"]?("+F+")['"]?\]"),TAG:RegExp("^("+F.replace("w","w*")+")"),ATTR:RegExp("^"+P),PSEUDO:RegExp("^"+R),CHILD:RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\("+_+"*(even|odd|(([+-]|)(\d*)n|)"+_+"*(?:([+-]|)"+_+"*(\d+)|))"+_+"*\)|)","i"),needsContext:RegExp("^"+_+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\("+_+"*((?:-\d)?\d*)"+_+"*\)|)(?=[^-]|$)","i")},V=/[\x20\t\r\n\f]*[+~]/,Y=/^[^{]+{\s*[native code/,J=/^(?:#([\w-]+)|(\w+)|.([\w-]+))$/,G=/^(?:input|select|textarea|button)$/i,Q=/^h\d$/i,K=/'|\/g,Z=/=[\x20\t\r\n\f]*([^'"]]*)[\x20\t\r\n\f]*]/g,et=/\([\da-fA-F]{1,6}[\x20\t\r\n\f]?|.)/g,tt=function(e,t){var n="0x"+t-65536;return n!==n?t:0>n?String.fromCharCode(n+65536):String.fromCharCode(55296|n>>10,56320|1023&n)};try{q.call(w.documentElement.childNodes,0)[0].nodeType}catch(nt){q=function(e){var t,n=[];while(t=this[e++])n.push(t);return n}}function rt(e){return Y.test(e+"")}functi on it(){var e,t=[];return e=function(n,r){return t.push(n+=" ")>i.cacheLength&&delete e[t.shift()],e[n]=r}}function ot(e){return e[x]=!0,e}function at(e){var t=p.createElement("div");try{return e(t)}catch(n){return!1}finally{t=null}}function st(e,t,n,r){var i,o,a,s,u,l,f,g,m,v;if((t?t.ownerDocument||t:w)!==p&&c(t),t=t||p,n=n||[],!e||"string"!=typeof e)return n;if(1!==(s=t.nodeType)&&9!==s)return[];if(!d&&!r){if(i=J.exec(e))if(a=i[1]){if(9===s){if(o=t.getElementById(a),!o||!o.parentNode)return n;if(o.id===a)return n.push(o),n}else if(t.ownerDocument&&(o=t.ownerDocument.getElementById(a))&&y(t,o)&&o.id===a)return n.push(o),n}else{if(i[2])return H.apply(n,q.call(t.getElementsByTagName(e),0)),n;if((a=i[3])&&T.getByClassName&&t.getElementsByClassName)return H.apply(n,q.call(t.getElementsByClassName(a),0)),n}if(T.qsa&&!h.test(e)){if(f=!0,g=x,m=t,v=9===s&&e,1===s&&"object"!==t.nodeName.toLowerCase()){l=ft(e),(f=t.getAttribute("id"))?g=f.replace(K,"\$&"):t.setAttribute("id",g),g="[ id='"+g+"'] ",u=l.length;while(u--)l[u]=g+dt(l[u]);m=V.test(e)&&t.parentNode||t,v=l.join(",")}if(v)try{return H.apply(n,q.call(m.querySelectorAll(v),0)),n}catch(b){}finally{f||t.removeAttribute("id")}}}return wt(e.replace(W,"$1"),t,n,r)}a=st.isXML=function(e){var t=e&&(e.ownerDocument||e).documentElement;return t?"HTML"!==t.nodeName:!1},c=st.setDocument=function(e){var n=e?e.ownerDocument||e:w;return n!==p&&9===n.nodeType&&n.documentElement?(p=n,f=n.documentElement,d=a(n),T.tagNameNoComments=at(function(e){return e.appendChild(n.createComment("")),!e.getElementsByTagName("*").length}),T.attributes=at(function(e){e.innerHTML="<select></select>";var t=typeof e.lastChild.getAttribute("multiple");return"boolean"!==t&&"string"!==t}),T.getByClassName=at(function(e){return e.innerHTML="<div class='hidden e'></div><div class='hidden'></div>",e.getElementsByClassName&&e.getElementsByClassName("e").length?(e.lastChild.className="e",2===e.getElementsByClassName("e").length):!1}),T.getB yName=at(function(e){e.id=x+0,e.innerHTML="<a name='"+x+"'></a><div name='"+x+"'></div>",f.insertBefore(e,f.firstChild);var t=n.getElementsByName&&n.getElementsByName(x).length===2+n.getElementsByName(x+0).length;return T.getIdNotName=!n.getElementById(x),f.removeChild(e),t}),i.attrHandle=at(function(e){return e.innerHTML="<a href='#'></a>",e.firstChild&&typeof e.firstChild.getAttribute!==A&&"#"===e.firstChild.getAttribute("href")})?{}:{href:function(e){return e.getAttribute("href",2)},type:function(e){return e.getAttribute("type")}},T.getIdNotName?(i.find.ID=function(e,t){if(typeof t.getElementById!==A&&!d){var n=t.getElementById(e);return n&&n.parentNode?[n]:[]}},i.filter.ID=function(e){var t=e.replace(et,tt);return function(e){return e.getAttribute("id")===t}}):(i.find.ID=function(e,n){if(typeof n.getElementById!==A&&!d){var r=n.getElementById(e);return r?r.id===e||typeof r.getAttributeNode!==A&&r.getAttributeNode("id").value===e?[r]:t:[]}},i.filter.ID=function(e){var t=e .replace(et,tt);return function(e){var n=typeof e.getAttributeNode!==A&&e.getAttributeNode("id");return n&&n.value===t}}),i.find.TAG=T.tagNameNoComments?function(e,n){return typeof n.getElementsByTagName!==A?n.getElementsByTagName(e):t}:function(e,t){var n,r=[],i=0,o=t.getElementsByTagName(e);if("*"===e){while(n=o[i++])1===n.nodeType&&r.push(n);return r}return o},i.find.NAME=T.getByName&&function(e,n){return typeof n.getElementsByName!==A?n.getElementsByName(name):t},i.find.CLASS=T.getByClassName&&function(e,n){return typeof n.getElementsByClassName===A||d?t:n.getElementsByClassName(e)},g=[],h=[":focus"],(T.qsa=rt(n.querySelectorAll))&&(at(function(e){e.innerHTML="<select><option selected=''></option></select>",e.querySelectorAll("[selected]").length||h.push("\["+_+"*(?:checked|disabled|ismap|multiple|readonly|selected|value)"),e.querySelectorAll(":checked").length||h.push(":checked")}),at(function(e){e.innerHTML="<input type='hidden' i=''/>",e.querySelectorAll("[i^='']").l ength&&h.push("[*^$]="+_+"*(?:""|'')"),e.querySelectorAll(":enabled").length||h.push(":enabled",":disabled"),e.querySelectorAll("*,:x"),h.push(",.*:")})),(T.matchesSelector=rt(m=f.matchesSelector||f.mozMatchesSelector||f.webkitMatchesSelector||f.oMatchesSelector||f.msMatchesSelector))&&at(function(e){T.disconnectedMatch=m.call(e,"div"),m.call(e,"[s!='']:x"),g.push("!=",R)}),h=RegExp(h.join("|")),g=RegExp(g.join("|")),y=rt(f.contains)||f.compareDocumentPosition?function(e,t){var n=9===e.nodeType?e.documentElement:e,r=t&&t.parentNode;return e===r||!(!r||1!==r.nodeType||!(n.contains?n.contains(r):e.compareDocumentPosition&&16&e.compareDocumentPosition(r)))}:function(e,t){if(t)while(t=t.parentNode)if(t===e)return!0;return!1},v=f.compareDocumentPosition?function(e,t){var r;return e===t?(u=!0,0):(r=t.compareDocumentPosition&&e.compareDocumentPosition&&e.compareDocumentPosition(t))?1&r||e.parentNode&&11===e.parentNode.nodeType?e===n||y(w,e)?-1:t===n||y(w,t)?1:0:4&r?-1:1:e.compare DocumentPosition?-1:1}:function(e,t){var r,i=0,o=e.parentNode,a=t.parentNode,s=[e],l=[t];if(e===t)return u=!0,0;if(!o||!a)return e===n?-1:t===n?1:o?-1:a?1:0;if(o===a)return ut(e,t);r=e;while(r=r.parentNode)s.unshift(r);r=t;while(r=r.parentNode)l.unshift(r);while(s[i]===l[i])i++;return i?ut(s[i],l[i]):s[i]===w?-1:l[i]===w?1:0},u=!1,[0,0].sort(v),T.detectDuplicates=u,p):p},st.matches=function(e,t){return st(e,null,null,t)},st.matchesSelector=function(e,t){if((e.ownerDocument||e)!==p&&c(e),t=t.replace(Z,"='$1']"),!(!T.matchesSelector||d||g&&g.test(t)||h.test(t)))try{var n=m.call(e,t);if(n||T.disconnectedMatch||e.document&&11!==e.document.nodeType)return n}catch(r){}return st(t,p,null,[e]).length>0},st.contains=function(e,t){return(e.ownerDocument||e)!==p&&c(e),y(e,t)},st.attr=function(e,t){var n;return(e.ownerDocument||e)!==p&&c(e),d||(t=t.toLowerCase()),(n=i.attrHandle[t])?n(e):d||T.attributes?e.getAttribute(t):((n=e.getAttributeNode(t))||e.getAttribute(t))&&e[t]===!0?t:n&&n.s pecified?n.value:null},st.error=function(e){throw Error("Syntax error, unrecognized expression: "+e)},st.uniqueSort=function(e){var t,n=[],r=1,i=0;if(u=!T.detectDuplicates,e.sort(v),u){for(;t=e[r];r++)t===e[r-1]&&(i=n.push(r));while(i--)e.splice(n[i],1)}return e};function ut(e,t){var n=t&&e,r=n&&(~t.sourceIndex||j)-(~e.sourceIndex||j);if(r)return r;if(n)while(n=n.nextSibling)if(n===t)return-1;return e?1:-1}function lt(e){return function(t){var n=t.nodeName.toLowerCase();return"input"===n&&t.type===e}}function ct(e){return function(t){var n=t.nodeName.toLowerCase();return("input"===n||"button"===n)&&t.type===e}}function pt(e){return ot(function(t){return t=+t,ot(function(n,r){var i,o=e([],n.length,t),a=o.length;while(a--)n[i=o[a]]&&(n[i]=!(r[i]=n[i]))})})}o=st.getText=function(e){var t,n="",r=0,i=e.nodeType;if(i){if(1===i||9===i||11===i){if("string"==typeof e.textContent)return e.textContent;for(e=e.firstChild;e;e=e.nextSibling)n+=o(e)}else if(3===i||4===i)return e.nodeValue} else for(;t=e[r];r++)n+=o(t);return n},i=st.selectors={cacheLength:50,createPseudo:ot,match:U,find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(e){return e[1]=e[1].replace(et,tt),e[3]=(e[4]||e[5]||"").replace(et,tt),"~="===e[2]&&(e[3]=" "+e[3]+" "),e.slice(0,4)},CHILD:function(e){return e[1]=e[1].toLowerCase(),"nth"===e[1].slice(0,3)?(e[3]||st.error(e[0]),e[4]=+(e[4]?e[5]+(e[6]||1):2*("even"===e[3]||"odd"===e[3])),e[5]=+(e[7]+e[8]||"odd"===e[3])):e[3]&&st.error(e[0]),e},PSEUDO:function(e){var t,n=!e[5]&&e[2];return U.CHILD.test(e[0])?null:(e[4]?e[2]=e[4]:n&&z.test(n)&&(t=ft(n,!0))&&(t=n.indexOf(")",n.length-t)-n.length)&&(e[0]=e[0].slice(0,t),e[2]=n.slice(0,t)),e.slice(0,3))}},filter:{TAG:function(e){return"*"===e?function(){return!0}:(e=e.replace(et,tt).toLowerCase(),function(t){return t.nodeName&&t.nodeName.toLowerCase()===e})},CLASS:function(e){var t=k[e+" "]; return t||(t=RegExp("(^|"+_+")"+e+"("+_+"|$)"))&&k(e,function(e){return t.test(e.className||typeof e.getAttribute!==A&&e.getAttribute("class")||"")})},ATTR:function(e,t,n){return function(r){var i=st.attr(r,e);return null==i?"!="===t:t?(i+="","="===t?i===n:"!="===t?i!==n:"^="===t?n&&0===i.indexOf(n):"*="===t?n&&i.indexOf(n)>-1:"$="===t?n&&i.slice(-n.length)===n:"~="===t?(" "+i+" ").indexOf(n)>-1:"|="===t?i===n||i.slice(0,n.length+1)===n+"-":!1):!0}},CHILD:function(e,t,n,r,i){var o="nth"!==e.slice(0,3),a="last"!==e.slice(-4),s="of-type"===t;return 1===r&&0===i?function(e){return!!e.parentNode}:function(t,n,u){var l,c,p,f,d,h,g=o!==a?"nextSibling":"previousSibling",m=t.parentNode,y=s&&t.nodeName.toLowerCase(),v=!u&&!s;if(m){if(o){while(g){p=t;while(p=p[g])if(s?p.nodeName.toLowerCase()===y:1===p.nodeType)return!1;h=g="only"===e&&!h&&"nextSibling"}return!0}if(h=[a?m.firstChild:m.lastChild],a&&v){c=m[x]||(m[x]={}),l=c[e]||[],d=l[0]===N&&l[1],f=l[0]===N&&l[2],p=d&&m.childNodes[d]; while(p=++d&&p&&p[g]||(f=d=0)||h.pop())if(1===p.nodeType&&++f&&p===t){c[e]=[N,d,f];break}}else if(v&&(l=(t[x]||(t[x]={}))[e])&&l[0]===N)f=l[1];else while(p=++d&&p&&p[g]||(f=d=0)||h.pop())if((s?p.nodeName.toLowerCase()===y:1===p.nodeType)&&++f&&(v&&((p[x]||(p[x]={}))[e]=[N,f]),p===t))break;return f-=i,f===r||0===f%r&&f/r>=0}}},PSEUDO:function(e,t){var n,r=i.pseudos[e]||i.setFilters[e.toLowerCase()]||st.error("unsupported pseudo: "+e);return r[x]?r(t):r.length>1?(n=[e,e,"",t],i.setFilters.hasOwnProperty(e.toLowerCase())?ot(function(e,n){var i,o=r(e,t),a=o.length;while(a--)i=M.call(e,o[a]),e[i]=!(n[i]=o[a])}):function(e){return r(e,0,n)}):r}},pseudos:{not:ot(function(e){var t=[],n=[],r=s(e.replace(W,"$1"));return r[x]?ot(function(e,t,n,i){var o,a=r(e,null,i,[]),s=e.length;while(s--)(o=a[s])&&(e[s]=!(t[s]=o))}):function(e,i,o){return t[0]=e,r(t,null,o,n),!n.pop()}}),has:ot(function(e){return function(t){return st(e,t).length>0}}),contains:ot(function(e){return function(t){return (t.textContent||t.innerText||o(t)).indexOf(e)>-1}}),lang:ot(function(e){return X.test(e||"")||st.error("unsupported lang: "+e),e=e.replace(et,tt).toLowerCase(),function(t){var n;do if(n=d?t.getAttribute("xml:lang")||t.getAttribute("lang"):t.lang)return n=n.toLowerCase(),n===e||0===n.indexOf(e+"-");while((t=t.parentNode)&&1===t.nodeType);return!1}}),target:function(t){var n=e.location&&e.location.hash;return n&&n.slice(1)===t.id},root:function(e){return e===f},focus:function(e){return e===p.activeElement&&(!p.hasFocus||p.hasFocus())&&!!(e.type||e.href||~e.tabIndex)},enabled:function(e){return e.disabled===!1},disabled:function(e){return e.disabled===!0},checked:function(e){var t=e.nodeName.toLowerCase();return"input"===t&&!!e.checked||"option"===t&&!!e.selected},selected:function(e){return e.parentNode&&e.parentNode.selectedIndex,e.selected===!0},empty:function(e){for(e=e.firstChild;e;e=e.nextSibling)if(e.nodeName>"@"||3===e.nodeType||4===e.nodeType)return!1;return!0},parent: function(e){return!i.pseudos.empty(e)},header:function(e){return Q.test(e.nodeName)},input:function(e){return G.test(e.nodeName)},button:function(e){var t=e.nodeName.toLowerCase();return"input"===t&&"button"===e.type||"button"===t},text:function(e){var t;return"input"===e.nodeName.toLowerCase()&&"text"===e.type&&(null==(t=e.getAttribute("type"))||t.toLowerCase()===e.type)},first:pt(function(){return[0]}),last:pt(function(e,t){return[t-1]}),eq:pt(function(e,t,n){return[0>n?n+t:n]}),even:pt(function(e,t){var n=0;for(;t>n;n+=2)e.push(n);return e}),odd:pt(function(e,t){var n=1;for(;t>n;n+=2)e.push(n);return e}),lt:pt(function(e,t,n){var r=0>n?n+t:n;for(;--r>=0;)e.push(r);return e}),gt:pt(function(e,t,n){var r=0>n?n+t:n;for(;t>++r;)e.push(r);return e})}};for(n in{radio:!0,checkbox:!0,file:!0,password:!0,image:!0})i.pseudos[n]=lt(n);for(n in{submit:!0,reset:!0})i.pseudos[n]=ct(n);function ft(e,t){var n,r,o,a,s,u,l,c=E[e+" "];if(c)return t?0:c.slice(0);s=e,u=[],l=i.preFilter;while( s){(!n||(r=$.exec(s)))&&(r&&(s=s.slice(r[0].length)||s),u.push(o=[])),n=!1,(r=I.exec(s))&&(n=r.shift(),o.push({value:n,type:r[0].replace(W," ")}),s=s.slice(n.length));for(a in i.filter)!(r=U[a].exec(s))||l[a]&&!(r=l[a](r))||(n=r.shift(),o.push({value:n,type:a,matches:r}),s=s.slice(n.length));if(!n)break}return t?s.length:s?st.error(e):E(e,u).slice(0)}function dt(e){var t=0,n=e.length,r="";for(;n>t;t++)r+=e[t].value;return r}function ht(e,t,n){var i=t.dir,o=n&&"parentNode"===i,a=C++;return t.first?function(t,n,r){while(t=t[i])if(1===t.nodeType||o)return e(t,n,r)}:function(t,n,s){var u,l,c,p=N+" "+a;if(s){while(t=t[i])if((1===t.nodeType||o)&&e(t,n,s))return!0}else while(t=t[i])if(1===t.nodeType||o)if(c=t[x]||(t[x]={}),(l=c[i])&&l[0]===p){if((u=l[1])===!0||u===r)return u===!0}else if(l=c[i]=[p],l[1]=e(t,n,s)||r,l[1]===!0)return!0}}function gt(e){return e.length>1?function(t,n,r){var i=e.length;while(i--)if(!e[i](t,n,r))return!1;return!0}:e[0]}function mt(e,t,n,r,i){var o,a=[],s =0,u=e.length,l=null!=t;for(;u>s;s++)(o=e[s])&&(!n||n(o,r,i))&&(a.push(o),l&&t.push(s));return a}function yt(e,t,n,r,i,o){return r&&!r[x]&&(r=yt(r)),i&&!i[x]&&(i=yt(i,o)),ot(function(o,a,s,u){var l,c,p,f=[],d=[],h=a.length,g=o||xt(t||"*",s.nodeType?[s]:s,[]),m=!e||!o&&t?g:mt(g,f,e,s,u),y=n?i||(o?e:h||r)?[]:a:m;if(n&&n(m,y,s,u),r){l=mt(y,d),r(l,[],s,u),c=l.length;while(c--)(p=l[c])&&(y[d[c]]=!(m[d[c]]=p))}if(o){if(i||e){if(i){l=[],c=y.length;while(c--)(p=y[c])&&l.push(m[c]=p);i(null,y=[],l,u)}c=y.length;while(c--)(p=y[c])&&(l=i?M.call(o,p):f[c])>-1&&(o[l]=!(a[l]=p))}}else y=mt(y===a?y.splice(h,y.length):y),i?i(null,a,y,u):H.apply(a,y)})}function vt(e){var t,n,r,o=e.length,a=i.relative[e[0].type],s=a||i.relative[" "],u=a?1:0,c=ht(function(e){return e===t},s,!0),p=ht(function(e){return M.call(t,e)>-1},s,!0),f=[function(e,n,r){return!a&&(r||n!==l)||((t=n).nodeType?c(e,n,r):p(e,n,r))}];for(;o>u;u++)if(n=i.relative[e[u].type])f=[ht(gt(f),n)];else{if(n=i.filter[e[u].type].apply(nul l,e[u].matches),n[x]){for(r=++u;o>r;r++)if(i.relative[e[r].type])break;return yt(u>1&>(f),u>1&&dt(e.slice(0,u-1)).replace(W,"$1"),n,r>u&&vt(e.slice(u,r)),o>r&&vt(e=e.slice(r)),o>r&&dt(e))}f.push(n)}return gt(f)}function bt(e,t){var n=0,o=t.length>0,a=e.length>0,s=function(s,u,c,f,d){var h,g,m,y=[],v=0,b="0",x=s&&[],w=null!=d,T=l,C=s||a&&i.find.TAG("*",d&&u.parentNode||u),k=N+=null==T?1:Math.random()||.1;for(w&&(l=u!==p&&u,r=n);null!=(h=C[b]);b++){if(a&&h){g=0;while(m=e[g++])if(m(h,u,c)){f.push(h);break}w&&(N=k,r=++n)}o&&((h=!m&&h)&&v--,s&&x.push(h))}if(v+=b,o&&b!==v){g=0;while(m=t[g++])m(x,y,u,c);if(s){if(v>0)while(b--)x[b]||y[b]||(y[b]=L.call(f));y=mt(y)}H.apply(f,y),w&&!s&&y.length>0&&v+t.length>1&&st.uniqueSort(f)}return w&&(N=k,l=T),x};return o?ot(s):s}s=st.compile=function(e,t){var n,r=[],i=[],o=S[e+" "];if(!o){t||(t=ft(e)),n=t.length;while(n--)o=vt(t[n]),o[x]?r.push(o):i.push(o);o=S(e,bt(i,r))}return o};function xt(e,t,n){var r=0,i=t.length;for(;i>r;r++)st(e,t[r],n); return n}function wt(e,t,n,r){var o,a,u,l,c,p=ft(e);if(!r&&1===p.length){if(a=p[0]=p[0].slice(0),a.length>2&&"ID"===(u=a[0]).type&&9===t.nodeType&&!d&&i.relative[a[1].type]){if(t=i.find.ID(u.matches[0].replace(et,tt),t)[0],!t)return n;e=e.slice(a.shift().value.length)}o=U.needsContext.test(e)?0:a.length;while(o--){if(u=a[o],i.relative[l=u.type])break;if((c=i.find[l])&&(r=c(u.matches[0].replace(et,tt),V.test(a[0].type)&&t.parentNode||t))){if(a.splice(o,1),e=r.length&&dt(a),!e)return H.apply(n,q.call(r,0)),n;break}}}return s(e,p)(r,t,d,n,V.test(e)),n}i.pseudos.nth=i.pseudos.eq;function Tt(){}i.filters=Tt.prototype=i.pseudos,i.setFilters=new Tt,c(),st.attr=b.attr,b.find=st,b.expr=st.selectors,b.expr[":"]=b.expr.pseudos,b.unique=st.uniqueSort,b.text=st.getText,b.isXMLDoc=st.isXML,b.contains=st.contains}(e);var at=/Until$/,st=/^(?:parents|prev(?:Until|All))/,ut=/^.[^:#[.,]*$/,lt=b.expr.match.needsContext,ct={children:!0,contents:!0,next:!0,prev:!0};b.fn.extend({find:function(e) {var t,n,r,i=this.length;if("string"!=typeof e)return r=this,this.pushStack(b(e).filter(function(){for(t=0;i>t;t++)if(b.contains(r[t],this))return!0}));for(n=[],t=0;i>t;t++)b.find(e,this[t],n);return n=this.pushStack(i>1?b.unique(n):n),n.selector=(this.selector?this.selector+" ":"")+e,n},has:function(e){var t,n=b(e,this),r=n.length;return this.filter(function(){for(t=0;r>t;t++)if(b.contains(this,n[t]))return!0})},not:function(e){return this.pushStack(ft(this,e,!1))},filter:function(e){return this.pushStack(ft(this,e,!0))},is:function(e){return!!e&&("string"==typeof e?lt.test(e)?b(e,this.context).index(this[0])>=0:b.filter(e,this).length>0:this.filter(e).length>0)},closest:function(e,t){var n,r=0,i=this.length,o=[],a=lt.test(e)||"string"!=typeof e?b(e,t||this.context):0;for(;i>r;r++){n=this[r];while(n&&n.ownerDocument&&n!==t&&11!==n.nodeType){if(a?a.index(n)>-1:b.find.matchesSelector(n,e)){o.push(n);break}n=n.parentNode}}return this.pushStack(o.length>1?b.unique(o):o)},index: function(e){return e?"string"==typeof e?b.inArray(this[0],b(e)):b.inArray(e.jquery?e[0]:e,this):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(e,t){var n="string"==typeof e?b(e,t):b.makeArray(e&&e.nodeType?[e]:e),r=b.merge(this.get(),n);return this.pushStack(b.unique(r))},addBack:function(e){return this.add(null==e?this.prevObject:this.prevObject.filter(e))}}),b.fn.andSelf=b.fn.addBack;function pt(e,t){do e=e[t];while(e&&1!==e.nodeType);return e}b.each({parent:function(e){var t=e.parentNode;return t&&11!==t.nodeType?t:null},parents:function(e){return b.dir(e,"parentNode")},parentsUntil:function(e,t,n){return b.dir(e,"parentNode",n)},next:function(e){return pt(e,"nextSibling")},prev:function(e){return pt(e,"previousSibling")},nextAll:function(e){return b.dir(e,"nextSibling")},prevAll:function(e){return b.dir(e,"previousSibling")},nextUntil:function(e,t,n){return b.dir(e,"nextSibling",n)},prevUntil:function(e,t,n){return b.dir(e,"previousSibling",n) },siblings:function(e){return b.sibling((e.parentNode||{}).firstChild,e)},children:function(e){return b.sibling(e.firstChild)},contents:function(e){return b.nodeName(e,"iframe")?e.contentDocument||e.contentWindow.document:b.merge([],e.childNodes)}},function(e,t){b.fn[e]=function(n,r){var i=b.map(this,t,n);return at.test(e)||(r=n),r&&"string"==typeof r&&(i=b.filter(r,i)),i=this.length>1&&!ct[e]?b.unique(i):i,this.length>1&&st.test(e)&&(i=i.reverse()),this.pushStack(i)}}),b.extend({filter:function(e,t,n){return n&&(e=":not("+e+")"),1===t.length?b.find.matchesSelector(t[0],e)?[t[0]]:[]:b.find.matches(e,t)},dir:function(e,n,r){var i=[],o=e[n];while(o&&9!==o.nodeType&&(r===t||1!==o.nodeType||!b(o).is(r)))1===o.nodeType&&i.push(o),o=o[n];return i},sibling:function(e,t){var n=[];for(;e;e=e.nextSibling)1===e.nodeType&&e!==t&&n.push(e);return n}});function ft(e,t,n){if(t=t||0,b.isFunction(t))return b.grep(e,function(e,r){var i=!!t.call(e,r,e);return i===n});if(t.nodeType)return b.gre p(e,function(e){return e===t===n});if("string"==typeof t){var r=b.grep(e,function(e){return 1===e.nodeType});if(ut.test(t))return b.filter(t,r,!n);t=b.filter(t,r)}return b.grep(e,function(e){return b.inArray(e,t)>=0===n})}function dt(e){var t=ht.split("|"),n=e.createDocumentFragment();if(n.createElement)while(t.length)n.createElement(t.pop());return n}var ht="abbr|article|aside|audio|bdi|canvas|data|datalist|details|figcaption|figure|footer|header|hgroup|mark|meter|nav|output|progress|section|summary|time|video",gt=/ jQuery\d+="(?:null|\d+)"/g,mt=RegExp("<(?:"+ht+")[\s/>]","i"),yt=/^\s+/,vt=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)/>/gi,bt=/<([\w:]+)/,xt=/<tbody/i,wt=/<|&#?\w+;/,Tt=/<(?:script|style|link)/i,Nt=/^(?:checkbox|radio)$/i,Ct=/checked\s*(?:[^=]|=\s*.checked.)/i,kt=/^$|/(?:java|ecma)script/i,Et=/^true/(.*)/,St=/^\s*<!(?:[CDATA[|--)|(?:]]|--)>\s*$/g,At={option:[1,"<select multiple='multiple'>","</select>"],legend:[1,"<fieldset>","</ fieldset>"],area:[1,"<map>","</map>"],param:[1,"<object>","</object>"],thead:[1,"<table>","</table>"],tr:[2,"<table><tbody>","</tbody></table>"],col:[2,"<table><tbody></tbody><colgroup>","</colgroup></table>"],td:[3,"<table><tbody><tr>","</tr></tbody></table>"],_default:b.support.htmlSerialize?[0,"",""]:[1,"X<div>","</div>"]},jt=dt(o),Dt=jt.appendChild(o.createElement("div"));At.optgroup=At.option,At.tbody=At.tfoot=At.colgroup=At.caption=At.thead,At.th=At.td,b.fn.extend({text:function(e){return b.access(this,function(e){return e===t?b.text(this):this.empty().append((this[0]&&this[0].ownerDocument||o).createTextNode(e))},null,e,arguments.length)},wrapAll:function(e){if(b.isFunction(e))return this.each(function(t){b(this).wrapAll(e.call(this,t))});if(this[0]){var t=b(e,this[0].ownerDocument).eq(0).clone(!0);this[0].parentNode&&t.insertBefore(this[0]),t.map(function(){var e=this;while(e.firstChild&&1===e.firstChild.nodeType)e=e.firstChild;return e}).append(this)}return this},wr apInner:function(e){return b.isFunction(e)?this.each(function(t){b(this).wrapInner(e.call(this,t))}):this.each(function(){var t=b(this),n=t.contents();n.length?n.wrapAll(e):t.append(e)})},wrap:function(e){var t=b.isFunction(e);return this.each(function(n){b(this).wrapAll(t?e.call(this,n):e)})},unwrap:function(){return this.parent().each(function(){b.nodeName(this,"body")||b(this).replaceWith(this.childNodes)}).end()},append:function(){return this.domManip(arguments,!0,function(e){(1===this.nodeType||11===this.nodeType||9===this.nodeType)&&this.appendChild(e)})},prepend:function(){return this.domManip(arguments,!0,function(e){(1===this.nodeType||11===this.nodeType||9===this.nodeType)&&this.insertBefore(e,this.firstChild)})},before:function(){return this.domManip(arguments,!1,function(e){this.parentNode&&this.parentNode.insertBefore(e,this)})},after:function(){return this.domManip(arguments,!1,function(e){this.parentNode&&this.parentNode.insertBefore(e,this.nextSibling)})},rem ove:function(e,t){var n,r=0;for(;null!=(n=this[r]);r++)(!e||b.filter(e,[n]).length>0)&&(t||1!==n.nodeType||b.cleanData(Ot(n)),n.parentNode&&(t&&b.contains(n.ownerDocument,n)&&Mt(Ot(n,"script")),n.parentNode.removeChild(n)));return this},empty:function(){var e,t=0;for(;null!=(e=this[t]);t++){1===e.nodeType&&b.cleanData(Ot(e,!1));while(e.firstChild)e.removeChild(e.firstChild);e.options&&b.nodeName(e,"select")&&(e.options.length=0)}return this},clone:function(e,t){return e=null==e?!1:e,t=null==t?e:t,this.map(function(){return b.clone(this,e,t)})},html:function(e){return b.access(this,function(e){var n=this[0]||{},r=0,i=this.length;if(e===t)return 1===n.nodeType?n.innerHTML.replace(gt,""):t;if(!("string"!=typeof e||Tt.test(e)||!b.support.htmlSerialize&&mt.test(e)||!b.support.leadingWhitespace&&yt.test(e)||At[(bt.exec(e)||["",""])[1].toLowerCase()])){e=e.replace(vt,"<$1></$2>");try{for(;i>r;r++)n=this[r]||{},1===n.nodeType&&(b.cleanData(Ot(n,!1)),n.innerHTML=e);n=0}catch(o){}}n&& this.empty().append(e)},null,e,arguments.length)},replaceWith:function(e){var t=b.isFunction(e);return t||"string"==typeof e||(e=b(e).not(this).detach()),this.domManip([e],!0,function(e){var t=this.nextSibling,n=this.parentNode;n&&(b(this).remove(),n.insertBefore(e,t))})},detach:function(e){return this.remove(e,!0)},domManip:function(e,n,r){e=f.apply([],e);var i,o,a,s,u,l,c=0,p=this.length,d=this,h=p-1,g=e[0],m=b.isFunction(g);if(m||!(1>=p||"string"!=typeof g||b.support.checkClone)&&Ct.test(g))return this.each(function(i){var o=d.eq(i);m&&(e[0]=g.call(this,i,n?o.html():t)),o.domManip(e,n,r)});if(p&&(l=b.buildFragment(e,this[0].ownerDocument,!1,this),i=l.firstChild,1===l.childNodes.length&&(l=i),i)){for(n=n&&b.nodeName(i,"tr"),s=b.map(Ot(l,"script"),Ht),a=s.length;p>c;c++)o=l,c!==h&&(o=b.clone(o,!0,!0),a&&b.merge(s,Ot(o,"script"))),r.call(n&&b.nodeName(this[c],"table")?Lt(this[c],"tbody"):this[c],o,c);if(a)for(u=s[s.length-1].ownerDocument,b.map(s,qt),c=0;a>c;c++)o=s[c],kt.te st(o.type||"")&&!b._data(o,"globalEval")&&b.contains(u,o)&&(o.src?b.ajax({url:o.src,type:"GET",dataType:"script",async:!1,global:!1,"throws":!0}):b.globalEval((o.text||o.textContent||o.innerHTML||"").replace(St,"")));l=i=null}return this}});function Lt(e,t){return e.getElementsByTagName(t)[0]||e.appendChild(e.ownerDocument.createElement(t))}function Ht(e){var t=e.getAttributeNode("type");return e.type=(t&&t.specified)+"/"+e.type,e}function qt(e){var t=Et.exec(e.type);return t?e.type=t[1]:e.removeAttribute("type"),e}function Mt(e,t){var n,r=0;for(;null!=(n=e[r]);r++)b._data(n,"globalEval",!t||b._data(t[r],"globalEval"))}function _t(e,t){if(1===t.nodeType&&b.hasData(e)){var n,r,i,o=b._data(e),a=b._data(t,o),s=o.events;if(s){delete a.handle,a.events={};for(n in s)for(r=0,i=s[n].length;i>r;r++)b.event.add(t,n,s[n][r])}a.data&&(a.data=b.extend({},a.data))}}function Ft(e,t){var n,r,i;if(1===t.nodeType){if(n=t.nodeName.toLowerCase(),!b.support.noCloneEvent&&t[b.expando]){i=b._data( t);for(r in i.events)b.removeEvent(t,r,i.handle);t.removeAttribute(b.expando)}"script"===n&&t.text!==e.text?(Ht(t).text=e.text,qt(t)):"object"===n?(t.parentNode&&(t.outerHTML=e.outerHTML),b.support.html5Clone&&e.innerHTML&&!b.trim(t.innerHTML)&&(t.innerHTML=e.innerHTML)):"input"===n&&Nt.test(e.type)?(t.defaultChecked=t.checked=e.checked,t.value!==e.value&&(t.value=e.value)):"option"===n?t.defaultSelected=t.selected=e.defaultSelected:("input"===n||"textarea"===n)&&(t.defaultValue=e.defaultValue)}}b.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(e,t){b.fn[e]=function(e){var n,r=0,i=[],o=b(e),a=o.length-1;for(;a>=r;r++)n=r===a?this:this.clone(!0),b(o[r])[t](n),d.apply(i,n.get());return this.pushStack(i)}});function Ot(e,n){var r,o,a=0,s=typeof e.getElementsByTagName!==i?e.getElementsByTagName(n||"*"):typeof e.querySelectorAll!==i?e.querySelectorAll(n||"*"):t;if(!s)for(s=[],r=e.childNodes||e;null!=(o=r[a]) ;a++)!n||b.nodeName(o,n)?s.push(o):b.merge(s,Ot(o,n));return n===t||n&&b.nodeName(e,n)?b.merge([e],s):s}function Bt(e){Nt.test(e.type)&&(e.defaultChecked=e.checked)}b.extend({clone:function(e,t,n){var r,i,o,a,s,u=b.contains(e.ownerDocument,e);if(b.support.html5Clone||b.isXMLDoc(e)||!mt.test("<"+e.nodeName+">")?o=e.cloneNode(!0):(Dt.innerHTML=e.outerHTML,Dt.removeChild(o=Dt.firstChild)),!(b.support.noCloneEvent&&b.support.noCloneChecked||1!==e.nodeType&&11!==e.nodeType||b.isXMLDoc(e)))for(r=Ot(o),s=Ot(e),a=0;null!=(i=s[a]);++a)r[a]&&Ft(i,r[a]);if(t)if(n)for(s=s||Ot(e),r=r||Ot(o),a=0;null!=(i=s[a]);a++)_t(i,r[a]);else _t(e,o);return r=Ot(o,"script"),r.length>0&&Mt(r,!u&&Ot(e,"script")),r=s=i=null,o},buildFragment:function(e,t,n,r){var i,o,a,s,u,l,c,p=e.length,f=dt(t),d=[],h=0;for(;p>h;h++)if(o=e[h],o||0===o)if("object"===b.type(o))b.merge(d,o.nodeType?[o]:o);else if(wt.test(o)){s=s||f.appendChild(t.createElement("div")),u=(bt.exec(o)||["",""])[1].toLowerCase(),c=At[u]||At._def ault,s.innerHTML=c[1]+o.replace(vt,"<$1></$2>")+c[2],i=c[0];while(i--)s=s.lastChild;if(!b.support.leadingWhitespace&&yt.test(o)&&d.push(t.createTextNode(yt.exec(o)[0])),!b.support.tbody){o="table"!==u||xt.test(o)?"<table>"!==c[1]||xt.test(o)?0:s:s.firstChild,i=o&&o.childNodes.length;while(i--)b.nodeName(l=o.childNodes[i],"tbody")&&!l.childNodes.length&&o.removeChild(l) -}b.merge(d,s.childNodes),s.textContent="";while(s.firstChild)s.removeChild(s.firstChild);s=f.lastChild}else d.push(t.createTextNode(o));s&&f.removeChild(s),b.support.appendChecked||b.grep(Ot(d,"input"),Bt),h=0;while(o=d[h++])if((!r||-1===b.inArray(o,r))&&(a=b.contains(o.ownerDocument,o),s=Ot(f.appendChild(o),"script"),a&&Mt(s),n)){i=0;while(o=s[i++])kt.test(o.type||"")&&n.push(o)}return s=null,f},cleanData:function(e,t){var n,r,o,a,s=0,u=b.expando,l=b.cache,p=b.support.deleteExpando,f=b.event.special;for(;null!=(n=e[s]);s++)if((t||b.acceptData(n))&&(o=n[u],a=o&&l[o])){if(a.events)for(r in a.events)f[r]?b.event.remove(n,r):b.removeEvent(n,r,a.handle);l[o]&&(delete l[o],p?delete n[u]:typeof n.removeAttribute!==i?n.removeAttribute(u):n[u]=null,c.push(o))}}});var Pt,Rt,Wt,$t=/alpha([^)]*)/i,It=/opacity\s*=\s*([^)]*)/,zt=/^(top|right|bottom|left)$/,Xt=/^(none|table(?!-c[ea]).+)/,Ut=/^margin/,Vt=RegExp("^("+x+")(.*)$","i"),Yt=RegExp("^("+x+")(?!px)[a-z%]+$","i"),Jt=RegExp("^([+- ])=("+x+")","i"),Gt={BODY:"block"},Qt={position:"absolute",visibility:"hidden",display:"block"},Kt={letterSpacing:0,fontWeight:400},Zt=["Top","Right","Bottom","Left"],en=["Webkit","O","Moz","ms"];function tn(e,t){if(t in e)return t;var n=t.charAt(0).toUpperCase()+t.slice(1),r=t,i=en.length;while(i--)if(t=en[i]+n,t in e)return t;return r}function nn(e,t){return e=t||e,"none"===b.css(e,"display")||!b.contains(e.ownerDocument,e)}function rn(e,t){var n,r,i,o=[],a=0,s=e.length;for(;s>a;a++)r=e[a],r.style&&(o[a]=b._data(r,"olddisplay"),n=r.style.display,t?(o[a]||"none"!==n||(r.style.display=""),""===r.style.display&&nn(r)&&(o[a]=b._data(r,"olddisplay",un(r.nodeName)))):o[a]||(i=nn(r),(n&&"none"!==n||!i)&&b._data(r,"olddisplay",i?n:b.css(r,"display"))));for(a=0;s>a;a++)r=e[a],r.style&&(t&&"none"!==r.style.display&&""!==r.style.display||(r.style.display=t?o[a]||"":"none"));return e}b.fn.extend({css:function(e,n){return b.access(this,function(e,n,r){var i,o,a={},s=0;if(b.isArray(n)){ for(o=Rt(e),i=n.length;i>s;s++)a[n[s]]=b.css(e,n[s],!1,o);return a}return r!==t?b.style(e,n,r):b.css(e,n)},e,n,arguments.length>1)},show:function(){return rn(this,!0)},hide:function(){return rn(this)},toggle:function(e){var t="boolean"==typeof e;return this.each(function(){(t?e:nn(this))?b(this).show():b(this).hide()})}}),b.extend({cssHooks:{opacity:{get:function(e,t){if(t){var n=Wt(e,"opacity");return""===n?"1":n}}}},cssNumber:{columnCount:!0,fillOpacity:!0,fontWeight:!0,lineHeight:!0,opacity:!0,orphans:!0,widows:!0,zIndex:!0,zoom:!0},cssProps:{"float":b.support.cssFloat?"cssFloat":"styleFloat"},style:function(e,n,r,i){if(e&&3!==e.nodeType&&8!==e.nodeType&&e.style){var o,a,s,u=b.camelCase(n),l=e.style;if(n=b.cssProps[u]||(b.cssProps[u]=tn(l,u)),s=b.cssHooks[n]||b.cssHooks[u],r===t)return s&&"get"in s&&(o=s.get(e,!1,i))!==t?o:l[n];if(a=typeof r,"string"===a&&(o=Jt.exec(r))&&(r=(o[1]+1)*o[2]+parseFloat(b.css(e,n)),a="number"),!(null==r||"number"===a&&isNaN(r)||("number"!==a|| b.cssNumber[u]||(r+="px"),b.support.clearCloneStyle||""!==r||0!==n.indexOf("background")||(l[n]="inherit"),s&&"set"in s&&(r=s.set(e,r,i))===t)))try{l[n]=r}catch(c){}}},css:function(e,n,r,i){var o,a,s,u=b.camelCase(n);return n=b.cssProps[u]||(b.cssProps[u]=tn(e.style,u)),s=b.cssHooks[n]||b.cssHooks[u],s&&"get"in s&&(a=s.get(e,!0,r)),a===t&&(a=Wt(e,n,i)),"normal"===a&&n in Kt&&(a=Kt[n]),""===r||r?(o=parseFloat(a),r===!0||b.isNumeric(o)?o||0:a):a},swap:function(e,t,n,r){var i,o,a={};for(o in t)a[o]=e.style[o],e.style[o]=t[o];i=n.apply(e,r||[]);for(o in t)e.style[o]=a[o];return i}}),e.getComputedStyle?(Rt=function(t){return e.getComputedStyle(t,null)},Wt=function(e,n,r){var i,o,a,s=r||Rt(e),u=s?s.getPropertyValue(n)||s[n]:t,l=e.style;return s&&(""!==u||b.contains(e.ownerDocument,e)||(u=b.style(e,n)),Yt.test(u)&&Ut.test(n)&&(i=l.width,o=l.minWidth,a=l.maxWidth,l.minWidth=l.maxWidth=l.width=u,u=s.width,l.width=i,l.minWidth=o,l.maxWidth=a)),u}):o.documentElement.currentStyle&&(Rt=f unction(e){return e.currentStyle},Wt=function(e,n,r){var i,o,a,s=r||Rt(e),u=s?s[n]:t,l=e.style;return null==u&&l&&l[n]&&(u=l[n]),Yt.test(u)&&!zt.test(n)&&(i=l.left,o=e.runtimeStyle,a=o&&o.left,a&&(o.left=e.currentStyle.left),l.left="fontSize"===n?"1em":u,u=l.pixelLeft+"px",l.left=i,a&&(o.left=a)),""===u?"auto":u});function on(e,t,n){var r=Vt.exec(t);return r?Math.max(0,r[1]-(n||0))+(r[2]||"px"):t}function an(e,t,n,r,i){var o=n===(r?"border":"content")?4:"width"===t?1:0,a=0;for(;4>o;o+=2)"margin"===n&&(a+=b.css(e,n+Zt[o],!0,i)),r?("content"===n&&(a-=b.css(e,"padding"+Zt[o],!0,i)),"margin"!==n&&(a-=b.css(e,"border"+Zt[o]+"Width",!0,i))):(a+=b.css(e,"padding"+Zt[o],!0,i),"padding"!==n&&(a+=b.css(e,"border"+Zt[o]+"Width",!0,i)));return a}function sn(e,t,n){var r=!0,i="width"===t?e.offsetWidth:e.offsetHeight,o=Rt(e),a=b.support.boxSizing&&"border-box"===b.css(e,"boxSizing",!1,o);if(0>=i||null==i){if(i=Wt(e,t,o),(0>i||null==i)&&(i=e.style[t]),Yt.test(i))return i;r=a&&(b.support.bo xSizingReliable||i===e.style[t]),i=parseFloat(i)||0}return i+an(e,t,n||(a?"border":"content"),r,o)+"px"}function un(e){var t=o,n=Gt[e];return n||(n=ln(e,t),"none"!==n&&n||(Pt=(Pt||b("<iframe frameborder='0' width='0' height='0'/>").css("cssText","display:block !important")).appendTo(t.documentElement),t=(Pt[0].contentWindow||Pt[0].contentDocument).document,t.write("<!doctype html><html><body>"),t.close(),n=ln(e,t),Pt.detach()),Gt[e]=n),n}function ln(e,t){var n=b(t.createElement(e)).appendTo(t.body),r=b.css(n[0],"display");return n.remove(),r}b.each(["height","width"],function(e,n){b.cssHooks[n]={get:function(e,r,i){return r?0===e.offsetWidth&&Xt.test(b.css(e,"display"))?b.swap(e,Qt,function(){return sn(e,n,i)}):sn(e,n,i):t},set:function(e,t,r){var i=r&&Rt(e);return on(e,t,r?an(e,n,r,b.support.boxSizing&&"border-box"===b.css(e,"boxSizing",!1,i),i):0)}}}),b.support.opacity||(b.cssHooks.opacity={get:function(e,t){return It.test((t&&e.currentStyle?e.currentStyle.filter:e.style.f ilter)||"")?.01*parseFloat(RegExp.$1)+"":t?"1":""},set:function(e,t){var n=e.style,r=e.currentStyle,i=b.isNumeric(t)?"alpha(opacity="+100*t+")":"",o=r&&r.filter||n.filter||"";n.zoom=1,(t>=1||""===t)&&""===b.trim(o.replace($t,""))&&n.removeAttribute&&(n.removeAttribute("filter"),""===t||r&&!r.filter)||(n.filter=$t.test(o)?o.replace($t,i):o+" "+i)}}),b(function(){b.support.reliableMarginRight||(b.cssHooks.marginRight={get:function(e,n){return n?b.swap(e,{display:"inline-block"},Wt,[e,"marginRight"]):t}}),!b.support.pixelPosition&&b.fn.position&&b.each(["top","left"],function(e,n){b.cssHooks[n]={get:function(e,r){return r?(r=Wt(e,n),Yt.test(r)?b(e).position()[n]+"px":r):t}}})}),b.expr&&b.expr.filters&&(b.expr.filters.hidden=function(e){return 0>=e.offsetWidth&&0>=e.offsetHeight||!b.support.reliableHiddenOffsets&&"none"===(e.style&&e.style.display||b.css(e,"display"))},b.expr.filters.visible=function(e){return!b.expr.filters.hidden(e)}),b.each({margin:"",padding:"",border:"Width "},function(e,t){b.cssHooks[e+t]={expand:function(n){var r=0,i={},o="string"==typeof n?n.split(" "):[n];for(;4>r;r++)i[e+Zt[r]+t]=o[r]||o[r-2]||o[0];return i}},Ut.test(e)||(b.cssHooks[e+t].set=on)});var cn=/%20/g,pn=/[]$/,fn=/\r?\n/g,dn=/^(?:submit|button|image|reset|file)$/i,hn=/^(?:input|select|textarea|keygen)/i;b.fn.extend({serialize:function(){return b.param(this.serializeArray())},serializeArray:function(){return this.map(function(){var e=b.prop(this,"elements");return e?b.makeArray(e):this}).filter(function(){var e=this.type;return this.name&&!b(this).is(":disabled")&&hn.test(this.nodeName)&&!dn.test(e)&&(this.checked||!Nt.test(e))}).map(function(e,t){var n=b(this).val();return null==n?null:b.isArray(n)?b.map(n,function(e){return{name:t.name,value:e.replace(fn,"\r\n")}}):{name:t.name,value:n.replace(fn,"\r\n")}}).get()}}),b.param=function(e,n){var r,i=[],o=function(e,t){t=b.isFunction(t)?t():null==t?"":t,i[i.length]=encodeURIComponent(e)+"="+encodeURIComponent(t)};i f(n===t&&(n=b.ajaxSettings&&b.ajaxSettings.traditional),b.isArray(e)||e.jquery&&!b.isPlainObject(e))b.each(e,function(){o(this.name,this.value)});else for(r in e)gn(r,e[r],n,o);return i.join("&").replace(cn,"+")};function gn(e,t,n,r){var i;if(b.isArray(t))b.each(t,function(t,i){n||pn.test(e)?r(e,i):gn(e+"["+("object"==typeof i?t:"")+"]",i,n,r)});else if(n||"object"!==b.type(t))r(e,t);else for(i in t)gn(e+"["+i+"]",t[i],n,r)}b.each("blur focus focusin focusout load resize scroll unload click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup error contextmenu".split(" "),function(e,t){b.fn[t]=function(e,n){return arguments.length>0?this.on(t,null,e,n):this.trigger(t)}}),b.fn.hover=function(e,t){return this.mouseenter(e).mouseleave(t||e)};var mn,yn,vn=b.now(),bn=/?/,xn=/#.*$/,wn=/([?&])_=[^&]*/,Tn=/^(.*?):[ \t]*([^\r\n]*)\r?$/gm,Nn=/^(?:about|app|app-storage|.+-extension|file|res|widget):$/,Cn=/^(?:GET|HEA D)$/,kn=/^///,En=/^([\w.+-]+:)(?://([^/?#:]*)(?::(\d+)|)|)/,Sn=b.fn.load,An={},jn={},Dn="*/".concat("*");try{yn=a.href}catch(Ln){yn=o.createElement("a"),yn.href="",yn=yn.href}mn=En.exec(yn.toLowerCase())||[];function Hn(e){return function(t,n){"string"!=typeof t&&(n=t,t="*");var r,i=0,o=t.toLowerCase().match(w)||[];if(b.isFunction(n))while(r=o[i++])"+"===r[0]?(r=r.slice(1)||"*",(e[r]=e[r]||[]).unshift(n)):(e[r]=e[r]||[]).push(n)}}function qn(e,n,r,i){var o={},a=e===jn;function s(u){var l;return o[u]=!0,b.each(e[u]||[],function(e,u){var c=u(n,r,i);return"string"!=typeof c||a||o[c]?a?!(l=c):t:(n.dataTypes.unshift(c),s(c),!1)}),l}return s(n.dataTypes[0])||!o["*"]&&s("*")}function Mn(e,n){var r,i,o=b.ajaxSettings.flatOptions||{};for(i in n)n[i]!==t&&((o[i]?e:r||(r={}))[i]=n[i]);return r&&b.extend(!0,e,r),e}b.fn.load=function(e,n,r){if("string"!=typeof e&&Sn)return Sn.apply(this,arguments);var i,o,a,s=this,u=e.indexOf(" ");return u>=0&&(i=e.slice(u,e.length),e=e.slice(0,u)), b.isFunction(n)?(r=n,n=t):n&&"object"==typeof n&&(a="POST"),s.length>0&&b.ajax({url:e,type:a,dataType:"html",data:n}).done(function(e){o=arguments,s.html(i?b("<div>").append(b.parseHTML(e)).find(i):e)}).complete(r&&function(e,t){s.each(r,o||[e.responseText,t,e])}),this},b.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(e,t){b.fn[t]=function(e){return this.on(t,e)}}),b.each(["get","post"],function(e,n){b[n]=function(e,r,i,o){return b.isFunction(r)&&(o=o||i,i=r,r=t),b.ajax({url:e,type:n,dataType:o,data:r,success:i})}}),b.extend({active:0,lastModified:{},etag:{},ajaxSettings:{url:yn,type:"GET",isLocal:Nn.test(mn[1]),global:!0,processData:!0,async:!0,contentType:"application/x-www-form-urlencoded; charset=UTF-8",accepts:{"*":Dn,text:"text/plain",html:"text/html",xml:"application/xml, text/xml",json:"application/json, text/javascript"},contents:{xml:/xml/,html:/html/,json:/json/},responseFields:{xml:"responseXML",text:"responseText"},con verters:{"* text":e.String,"text html":!0,"text json":b.parseJSON,"text xml":b.parseXML},flatOptions:{url:!0,context:!0}},ajaxSetup:function(e,t){return t?Mn(Mn(e,b.ajaxSettings),t):Mn(b.ajaxSettings,e)},ajaxPrefilter:Hn(An),ajaxTransport:Hn(jn),ajax:function(e,n){"object"==typeof e&&(n=e,e=t),n=n||{};var r,i,o,a,s,u,l,c,p=b.ajaxSetup({},n),f=p.context||p,d=p.context&&(f.nodeType||f.jquery)?b(f):b.event,h=b.Deferred(),g=b.Callbacks("once memory"),m=p.statusCode||{},y={},v={},x=0,T="canceled",N={readyState:0,getResponseHeader:function(e){var t;if(2===x){if(!c){c={};while(t=Tn.exec(a))c[t[1].toLowerCase()]=t[2]}t=c[e.toLowerCase()]}return null==t?null:t},getAllResponseHeaders:function(){return 2===x?a:null},setRequestHeader:function(e,t){var n=e.toLowerCase();return x||(e=v[n]=v[n]||e,y[e]=t),this},overrideMimeType:function(e){return x||(p.mimeType=e),this},statusCode:function(e){var t;if(e)if(2>x)for(t in e)m[t]=[m[t],e[t]];else N.always(e[N.status]);return this},abort:functi on(e){var t=e||T;return l&&l.abort(t),k(0,t),this}};if(h.promise(N).complete=g.add,N.success=N.done,N.error=N.fail,p.url=((e||p.url||yn)+"").replace(xn,"").replace(kn,mn[1]+"//"),p.type=n.method||n.type||p.method||p.type,p.dataTypes=b.trim(p.dataType||"*").toLowerCase().match(w)||[""],null==p.crossDomain&&(r=En.exec(p.url.toLowerCase()),p.crossDomain=!(!r||r[1]===mn[1]&&r[2]===mn[2]&&(r[3]||("http:"===r[1]?80:443))==(mn[3]||("http:"===mn[1]?80:443)))),p.data&&p.processData&&"string"!=typeof p.data&&(p.data=b.param(p.data,p.traditional)),qn(An,p,n,N),2===x)return N;u=p.global,u&&0===b.active++&&b.event.trigger("ajaxStart"),p.type=p.type.toUpperCase(),p.hasContent=!Cn.test(p.type),o=p.url,p.hasContent||(p.data&&(o=p.url+=(bn.test(o)?"&":"?")+p.data,delete p.data),p.cache===!1&&(p.url=wn.test(o)?o.replace(wn,"$1_="+vn++):o+(bn.test(o)?"&":"?")+"_="+vn++)),p.ifModified&&(b.lastModified[o]&&N.setRequestHeader("If-Modified-Since",b.lastModified[o]),b.etag[o]&&N.setRequestHeader("I f-None-Match",b.etag[o])),(p.data&&p.hasContent&&p.contentType!==!1||n.contentType)&&N.setRequestHeader("Content-Type",p.contentType),N.setRequestHeader("Accept",p.dataTypes[0]&&p.accepts[p.dataTypes[0]]?p.accepts[p.dataTypes[0]]+("*"!==p.dataTypes[0]?", "+Dn+"; q=0.01":""):p.accepts["*"]);for(i in p.headers)N.setRequestHeader(i,p.headers[i]);if(p.beforeSend&&(p.beforeSend.call(f,N,p)===!1||2===x))return N.abort();T="abort";for(i in{success:1,error:1,complete:1})N[i](p[i]);if(l=qn(jn,p,n,N)){N.readyState=1,u&&d.trigger("ajaxSend",[N,p]),p.async&&p.timeout>0&&(s=setTimeout(function(){N.abort("timeout")},p.timeout));try{x=1,l.send(y,k)}catch(C){if(!(2>x))throw C;k(-1,C)}}else k(-1,"No Transport");function k(e,n,r,i){var c,y,v,w,T,C=n;2!==x&&(x=2,s&&clearTimeout(s),l=t,a=i||"",N.readyState=e>0?4:0,r&&(w=_n(p,N,r)),e>=200&&300>e||304===e?(p.ifModified&&(T=N.getResponseHeader("Last-Modified"),T&&(b.lastModified[o]=T),T=N.getResponseHeader("etag"),T&&(b.etag[o]=T)),204===e?(c=!0,C ="nocontent"):304===e?(c=!0,C="notmodified"):(c=Fn(p,w),C=c.state,y=c.data,v=c.error,c=!v)):(v=C,(e||!C)&&(C="error",0>e&&(e=0))),N.status=e,N.statusText=(n||C)+"",c?h.resolveWith(f,[y,C,N]):h.rejectWith(f,[N,C,v]),N.statusCode(m),m=t,u&&d.trigger(c?"ajaxSuccess":"ajaxError",[N,p,c?y:v]),g.fireWith(f,[N,C]),u&&(d.trigger("ajaxComplete",[N,p]),--b.active||b.event.trigger("ajaxStop")))}return N},getScript:function(e,n){return b.get(e,t,n,"script")},getJSON:function(e,t,n){return b.get(e,t,n,"json")}});function _n(e,n,r){var i,o,a,s,u=e.contents,l=e.dataTypes,c=e.responseFields;for(s in c)s in r&&(n[c[s]]=r[s]);while("*"===l[0])l.shift(),o===t&&(o=e.mimeType||n.getResponseHeader("Content-Type"));if(o)for(s in u)if(u[s]&&u[s].test(o)){l.unshift(s);break}if(l[0]in r)a=l[0];else{for(s in r){if(!l[0]||e.converters[s+" "+l[0]]){a=s;break}i||(i=s)}a=a||i}return a?(a!==l[0]&&l.unshift(a),r[a]):t}function Fn(e,t){var n,r,i,o,a={},s=0,u=e.dataTypes.slice(),l=u[0];if(e.dataFilter&&(t=e.d ataFilter(t,e.dataType)),u[1])for(i in e.converters)a[i.toLowerCase()]=e.converters[i];for(;r=u[++s];)if("*"!==r){if("*"!==l&&l!==r){if(i=a[l+" "+r]||a["* "+r],!i)for(n in a)if(o=n.split(" "),o[1]===r&&(i=a[l+" "+o[0]]||a["* "+o[0]])){i===!0?i=a[n]:a[n]!==!0&&(r=o[0],u.splice(s--,0,r));break}if(i!==!0)if(i&&e["throws"])t=i(t);else try{t=i(t)}catch(c){return{state:"parsererror",error:i?c:"No conversion from "+l+" to "+r}}}l=r}return{state:"success",data:t}}b.ajaxSetup({accepts:{script:"text/javascript, application/javascript, application/ecmascript, application/x-ecmascript"},contents:{script:/(?:java|ecma)script/},converters:{"text script":function(e){return b.globalEval(e),e}}}),b.ajaxPrefilter("script",function(e){e.cache===t&&(e.cache=!1),e.crossDomain&&(e.type="GET",e.global=!1)}),b.ajaxTransport("script",function(e){if(e.crossDomain){var n,r=o.head||b("head")[0]||o.documentElement;return{send:function(t,i){n=o.createElement("script"),n.async=!0,e.scriptCharset&&(n.chars et=e.scriptCharset),n.src=e.url,n.onload=n.onreadystatechange=function(e,t){(t||!n.readyState||/loaded|complete/.test(n.readyState))&&(n.onload=n.onreadystatechange=null,n.parentNode&&n.parentNode.removeChild(n),n=null,t||i(200,"success"))},r.insertBefore(n,r.firstChild)},abort:function(){n&&n.onload(t,!0)}}}});var On=[],Bn=/(=)?(?=&|$)|??/;b.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var e=On.pop()||b.expando+"_"+vn++;return this[e]=!0,e}}),b.ajaxPrefilter("json jsonp",function(n,r,i){var o,a,s,u=n.jsonp!==!1&&(Bn.test(n.url)?"url":"string"==typeof n.data&&!(n.contentType||"").indexOf("application/x-www-form-urlencoded")&&Bn.test(n.data)&&"data");return u||"jsonp"===n.dataTypes[0]?(o=n.jsonpCallback=b.isFunction(n.jsonpCallback)?n.jsonpCallback():n.jsonpCallback,u?n[u]=n[u].replace(Bn,"$1"+o):n.jsonp!==!1&&(n.url+=(bn.test(n.url)?"&":"?")+n.jsonp+"="+o),n.converters["script json"]=function(){return s||b.error(o+" was not called"),s[0]},n.dataTypes[0]="json",a=e [o],e[o]=function(){s=arguments},i.always(function(){e[o]=a,n[o]&&(n.jsonpCallback=r.jsonpCallback,On.push(o)),s&&b.isFunction(a)&&a(s[0]),s=a=t}),"script"):t});var Pn,Rn,Wn=0,$n=e.ActiveXObject&&function(){var e;for(e in Pn)Pn[e](t,!0)};function In(){try{return new e.XMLHttpRequest}catch(t){}}function zn(){try{return new e.ActiveXObject("Microsoft.XMLHTTP")}catch(t){}}b.ajaxSettings.xhr=e.ActiveXObject?function(){return!this.isLocal&&In()||zn()}:In,Rn=b.ajaxSettings.xhr(),b.support.cors=!!Rn&&"withCredentials"in Rn,Rn=b.support.ajax=!!Rn,Rn&&b.ajaxTransport(function(n){if(!n.crossDomain||b.support.cors){var r;return{send:function(i,o){var a,s,u=n.xhr();if(n.username?u.open(n.type,n.url,n.async,n.username,n.password):u.open(n.type,n.url,n.async),n.xhrFields)for(s in n.xhrFields)u[s]=n.xhrFields[s];n.mimeType&&u.overrideMimeType&&u.overrideMimeType(n.mimeType),n.crossDomain||i["X-Requested-With"]||(i["X-Requested-With"]="XMLHttpRequest");try{for(s in i)u.setRequestHeader(s,i[ s])}catch(l){}u.send(n.hasContent&&n.data||null),r=function(e,i){var s,l,c,p;try{if(r&&(i||4===u.readyState))if(r=t,a&&(u.onreadystatechange=b.noop,$n&&delete Pn[a]),i)4!==u.readyState&&u.abort();else{p={},s=u.status,l=u.getAllResponseHeaders(),"string"==typeof u.responseText&&(p.text=u.responseText);try{c=u.statusText}catch(f){c=""}s||!n.isLocal||n.crossDomain?1223===s&&(s=204):s=p.text?200:404}}catch(d){i||o(-1,d)}p&&o(s,c,p,l)},n.async?4===u.readyState?setTimeout(r):(a=++Wn,$n&&(Pn||(Pn={},b(e).unload($n)),Pn[a]=r),u.onreadystatechange=r):r()},abort:function(){r&&r(t,!0)}}}});var Xn,Un,Vn=/^(?:toggle|show|hide)$/,Yn=RegExp("^(?:([+-])=|)("+x+")([a-z%]*)$","i"),Jn=/queueHooks$/,Gn=[nr],Qn={"*":[function(e,t){var n,r,i=this.createTween(e,t),o=Yn.exec(t),a=i.cur(),s=+a||0,u=1,l=20;if(o){if(n=+o[2],r=o[3]||(b.cssNumber[e]?"":"px"),"px"!==r&&s){s=b.css(i.elem,e,!0)||n||1;do u=u||".5",s/=u,b.style(i.elem,e,s+r);while(u!==(u=i.cur()/a)&&1!==u&&--l)}i.unit=r,i.start=s,i.end=o[1]? s+(o[1]+1)*n:n}return i}]};function Kn(){return setTimeout(function(){Xn=t}),Xn=b.now()}function Zn(e,t){b.each(t,function(t,n){var r=(Qn[t]||[]).concat(Qn["*"]),i=0,o=r.length;for(;o>i;i++)if(r[i].call(e,t,n))return})}function er(e,t,n){var r,i,o=0,a=Gn.length,s=b.Deferred().always(function(){delete u.elem}),u=function(){if(i)return!1;var t=Xn||Kn(),n=Math.max(0,l.startTime+l.duration-t),r=n/l.duration||0,o=1-r,a=0,u=l.tweens.length;for(;u>a;a++)l.tweens[a].run(o);return s.notifyWith(e,[l,o,n]),1>o&&u?n:(s.resolveWith(e,[l]),!1)},l=s.promise({elem:e,props:b.extend({},t),opts:b.extend(!0,{specialEasing:{}},n),originalProperties:t,originalOptions:n,startTime:Xn||Kn(),duration:n.duration,tweens:[],createTween:function(t,n){var r=b.Tween(e,l.opts,t,n,l.opts.specialEasing[t]||l.opts.easing);return l.tweens.push(r),r},stop:function(t){var n=0,r=t?l.tweens.length:0;if(i)return this;for(i=!0;r>n;n++)l.tweens[n].run(1);return t?s.resolveWith(e,[l,t]):s.rejectWith(e,[l,t]),this}}),c= l.props;for(tr(c,l.opts.specialEasing);a>o;o++)if(r=Gn[o].call(l,e,c,l.opts))return r;return Zn(l,c),b.isFunction(l.opts.start)&&l.opts.start.call(e,l),b.fx.timer(b.extend(u,{elem:e,anim:l,queue:l.opts.queue})),l.progress(l.opts.progress).done(l.opts.done,l.opts.complete).fail(l.opts.fail).always(l.opts.always)}function tr(e,t){var n,r,i,o,a;for(i in e)if(r=b.camelCase(i),o=t[r],n=e[i],b.isArray(n)&&(o=n[1],n=e[i]=n[0]),i!==r&&(e[r]=n,delete e[i]),a=b.cssHooks[r],a&&"expand"in a){n=a.expand(n),delete e[r];for(i in n)i in e||(e[i]=n[i],t[i]=o)}else t[r]=o}b.Animation=b.extend(er,{tweener:function(e,t){b.isFunction(e)?(t=e,e=["*"]):e=e.split(" ");var n,r=0,i=e.length;for(;i>r;r++)n=e[r],Qn[n]=Qn[n]||[],Qn[n].unshift(t)},prefilter:function(e,t){t?Gn.unshift(e):Gn.push(e)}});function nr(e,t,n){var r,i,o,a,s,u,l,c,p,f=this,d=e.style,h={},g=[],m=e.nodeType&&nn(e);n.queue||(c=b._queueHooks(e,"fx"),null==c.unqueued&&(c.unqueued=0,p=c.empty.fire,c.empty.fire=function(){c.unqueued||p( )}),c.unqueued++,f.always(function(){f.always(function(){c.unqueued--,b.queue(e,"fx").length||c.empty.fire()})})),1===e.nodeType&&("height"in t||"width"in t)&&(n.overflow=[d.overflow,d.overflowX,d.overflowY],"inline"===b.css(e,"display")&&"none"===b.css(e,"float")&&(b.support.inlineBlockNeedsLayout&&"inline"!==un(e.nodeName)?d.zoom=1:d.display="inline-block")),n.overflow&&(d.overflow="hidden",b.support.shrinkWrapBlocks||f.always(function(){d.overflow=n.overflow[0],d.overflowX=n.overflow[1],d.overflowY=n.overflow[2]}));for(i in t)if(a=t[i],Vn.exec(a)){if(delete t[i],u=u||"toggle"===a,a===(m?"hide":"show"))continue;g.push(i)}if(o=g.length){s=b._data(e,"fxshow")||b._data(e,"fxshow",{}),"hidden"in s&&(m=s.hidden),u&&(s.hidden=!m),m?b(e).show():f.done(function(){b(e).hide()}),f.done(function(){var t;b._removeData(e,"fxshow");for(t in h)b.style(e,t,h[t])});for(i=0;o>i;i++)r=g[i],l=f.createTween(r,m?s[r]:0),h[r]=s[r]||b.style(e,r),r in s||(s[r]=l.start,m&&(l.end=l.start,l.start="wi dth"===r||"height"===r?1:0))}}function rr(e,t,n,r,i){return new rr.prototype.init(e,t,n,r,i)}b.Tween=rr,rr.prototype={constructor:rr,init:function(e,t,n,r,i,o){this.elem=e,this.prop=n,this.easing=i||"swing",this.options=t,this.start=this.now=this.cur(),this.end=r,this.unit=o||(b.cssNumber[n]?"":"px")},cur:function(){var e=rr.propHooks[this.prop];return e&&e.get?e.get(this):rr.propHooks._default.get(this)},run:function(e){var t,n=rr.propHooks[this.prop];return this.pos=t=this.options.duration?b.easing[this.easing](e,this.options.duration*e,0,1,this.options.duration):e,this.now=(this.end-this.start)*t+this.start,this.options.step&&this.options.step.call(this.elem,this.now,this),n&&n.set?n.set(this):rr.propHooks._default.set(this),this}},rr.prototype.init.prototype=rr.prototype,rr.propHooks={_default:{get:function(e){var t;return null==e.elem[e.prop]||e.elem.style&&null!=e.elem.style[e.prop]?(t=b.css(e.elem,e.prop,""),t&&"auto"!==t?t:0):e.elem[e.prop]},set:function(e){b.fx.step [e.prop]?b.fx.step[e.prop](e):e.elem.style&&(null!=e.elem.style[b.cssProps[e.prop]]||b.cssHooks[e.prop])?b.style(e.elem,e.prop,e.now+e.unit):e.elem[e.prop]=e.now}}},rr.propHooks.scrollTop=rr.propHooks.scrollLeft={set:function(e){e.elem.nodeType&&e.elem.parentNode&&(e.elem[e.prop]=e.now)}},b.each(["toggle","show","hide"],function(e,t){var n=b.fn[t];b.fn[t]=function(e,r,i){return null==e||"boolean"==typeof e?n.apply(this,arguments):this.animate(ir(t,!0),e,r,i)}}),b.fn.extend({fadeTo:function(e,t,n,r){return this.filter(nn).css("opacity",0).show().end().animate({opacity:t},e,n,r)},animate:function(e,t,n,r){var i=b.isEmptyObject(e),o=b.speed(t,n,r),a=function(){var t=er(this,b.extend({},e),o);a.finish=function(){t.stop(!0)},(i||b._data(this,"finish"))&&t.stop(!0)};return a.finish=a,i||o.queue===!1?this.each(a):this.queue(o.queue,a)},stop:function(e,n,r){var i=function(e){var t=e.stop;delete e.stop,t(r)};return"string"!=typeof e&&(r=n,n=e,e=t),n&&e!==!1&&this.queue(e||"fx",[]),th is.each(function(){var t=!0,n=null!=e&&e+"queueHooks",o=b.timers,a=b._data(this);if(n)a[n]&&a[n].stop&&i(a[n]);else for(n in a)a[n]&&a[n].stop&&Jn.test(n)&&i(a[n]);for(n=o.length;n--;)o[n].elem!==this||null!=e&&o[n].queue!==e||(o[n].anim.stop(r),t=!1,o.splice(n,1));(t||!r)&&b.dequeue(this,e)})},finish:function(e){return e!==!1&&(e=e||"fx"),this.each(function(){var t,n=b._data(this),r=n[e+"queue"],i=n[e+"queueHooks"],o=b.timers,a=r?r.length:0;for(n.finish=!0,b.queue(this,e,[]),i&&i.cur&&i.cur.finish&&i.cur.finish.call(this),t=o.length;t--;)o[t].elem===this&&o[t].queue===e&&(o[t].anim.stop(!0),o.splice(t,1));for(t=0;a>t;t++)r[t]&&r[t].finish&&r[t].finish.call(this);delete n.finish})}});function ir(e,t){var n,r={height:e},i=0;for(t=t?1:0;4>i;i+=2-t)n=Zt[i],r["margin"+n]=r["padding"+n]=e;return t&&(r.opacity=r.width=e),r}b.each({slideDown:ir("show"),slideUp:ir("hide"),slideToggle:ir("toggle"),fadeIn:{opacity:"show"},fadeOut:{opacity:"hide"},fadeToggle:{opacity:"toggle"}},functio n(e,t){b.fn[e]=function(e,n,r){return this.animate(t,e,n,r)}}),b.speed=function(e,t,n){var r=e&&"object"==typeof e?b.extend({},e):{complete:n||!n&&t||b.isFunction(e)&&e,duration:e,easing:n&&t||t&&!b.isFunction(t)&&t};return r.duration=b.fx.off?0:"number"==typeof r.duration?r.duration:r.duration in b.fx.speeds?b.fx.speeds[r.duration]:b.fx.speeds._default,(null==r.queue||r.queue===!0)&&(r.queue="fx"),r.old=r.complete,r.complete=function(){b.isFunction(r.old)&&r.old.call(this),r.queue&&b.dequeue(this,r.queue)},r},b.easing={linear:function(e){return e},swing:function(e){return.5-Math.cos(e*Math.PI)/2}},b.timers=[],b.fx=rr.prototype.init,b.fx.tick=function(){var e,n=b.timers,r=0;for(Xn=b.now();n.length>r;r++)e=n[r],e()||n[r]!==e||n.splice(r--,1);n.length||b.fx.stop(),Xn=t},b.fx.timer=function(e){e()&&b.timers.push(e)&&b.fx.start()},b.fx.interval=13,b.fx.start=function(){Un||(Un=setInterval(b.fx.tick,b.fx.interval))},b.fx.stop=function(){clearInterval(Un),Un=null},b.fx.speeds={slo w:600,fast:200,_default:400},b.fx.step={},b.expr&&b.expr.filters&&(b.expr.filters.animated=function(e){return b.grep(b.timers,function(t){return e===t.elem}).length}),b.fn.offset=function(e){if(arguments.length)return e===t?this:this.each(function(t){b.offset.setOffset(this,e,t)});var n,r,o={top:0,left:0},a=this[0],s=a&&a.ownerDocument;if(s)return n=s.documentElement,b.contains(n,a)?(typeof a.getBoundingClientRect!==i&&(o=a.getBoundingClientRect()),r=or(s),{top:o.top+(r.pageYOffset||n.scrollTop)-(n.clientTop||0),left:o.left+(r.pageXOffset||n.scrollLeft)-(n.clientLeft||0)}):o},b.offset={setOffset:function(e,t,n){var r=b.css(e,"position");"static"===r&&(e.style.position="relative");var i=b(e),o=i.offset(),a=b.css(e,"top"),s=b.css(e,"left"),u=("absolute"===r||"fixed"===r)&&b.inArray("auto",[a,s])>-1,l={},c={},p,f;u?(c=i.position(),p=c.top,f=c.left):(p=parseFloat(a)||0,f=parseFloat(s)||0),b.isFunction(t)&&(t=t.call(e,n,o)),null!=t.top&&(l.top=t.top-o.top+p),null!=t.left&&(l.left =t.left-o.left+f),"using"in t?t.using.call(e,l):i.css(l)}},b.fn.extend({position:function(){if(this[0]){var e,t,n={top:0,left:0},r=this[0];return"fixed"===b.css(r,"position")?t=r.getBoundingClientRect():(e=this.offsetParent(),t=this.offset(),b.nodeName(e[0],"html")||(n=e.offset()),n.top+=b.css(e[0],"borderTopWidth",!0),n.left+=b.css(e[0],"borderLeftWidth",!0)),{top:t.top-n.top-b.css(r,"marginTop",!0),left:t.left-n.left-b.css(r,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var e=this.offsetParent||o.documentElement;while(e&&!b.nodeName(e,"html")&&"static"===b.css(e,"position"))e=e.offsetParent;return e||o.documentElement})}}),b.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(e,n){var r=/Y/.test(n);b.fn[e]=function(i){return b.access(this,function(e,i,o){var a=or(e);return o===t?a?n in a?a[n]:a.document.documentElement[i]:e[i]:(a?a.scrollTo(r?b(a).scrollLeft():o,r?o:b(a).scrollTop()):e[i]=o,t)},e,i,arguments.length,null)}});function or (e){return b.isWindow(e)?e:9===e.nodeType?e.defaultView||e.parentWindow:!1}b.each({Height:"height",Width:"width"},function(e,n){b.each({padding:"inner"+e,content:n,"":"outer"+e},function(r,i){b.fn[i]=function(i,o){var a=arguments.length&&(r||"boolean"!=typeof i),s=r||(i===!0||o===!0?"margin":"border");return b.access(this,function(n,r,i){var o;return b.isWindow(n)?n.document.documentElement["client"+e]:9===n.nodeType?(o=n.documentElement,Math.max(n.body["scroll"+e],o["scroll"+e],n.body["offset"+e],o["offset"+e],o["client"+e])):i===t?b.css(n,r,s):b.style(n,r,i,s)},n,a?i:t,a,null)}})}),e.jQuery=e.$=b,"function"==typeof define&&define.amd&&define.amd.jQuery&&define("jquery",[],function(){return b})})(window); \ No newline at end of file diff --git a/modules/enterprise/gui/coregui/src/main/webapp/js/rhq.js b/modules/enterprise/gui/coregui/src/main/webapp/js/rhq.js index ef65e69..7e89bb1 100644 --- a/modules/enterprise/gui/coregui/src/main/webapp/js/rhq.js +++ b/modules/enterprise/gui/coregui/src/main/webapp/js/rhq.js @@ -1,12 +1,7 @@ /** - * Charting Javascript Functions. + * RHQ Charting Javascript Functions. */
-// Handle browsers not supporting console object -if (!window.console) window.console = {}; -if (!window.console.log) window.console.log = function () { -}; - /** * ChartContext Constructor Object * Contains all of the data required to render a chart. @@ -38,13 +33,13 @@ if (!window.console.log) window.console.log = function () { * @param singleValueLabel * @param chartXaxisTimeFormatHours * @param chartXaxisTimeFormatHoursMinutes - * @param showLegend + * @param hideLegend * @constructor */ var ChartContext = function (chartId, chartHeight, metricsData, xAxisLabel, chartTitle, yAxisUnits, minChartTitle, avgChartTitle, peakChartTitle, dateLabel, timeLabel, downLabel, unknownLabel, noDataLabel, hoverStartLabel, hoverEndLabel, hoverPeriodLabel, hoverBarLabel, chartHoverTimeFormat, chartHoverDateFormat, isPortalGraph, portalId, buttonBarDateTimeFormat, singleValueLabel, chartXaxisTimeFormatHours, chartXaxisTimeFormatHoursMinutes, hideLegend) { "use strict"; if (!(this instanceof ChartContext)) { - throw new Error("ChartContext function cannot be called as a function.") + throw new Error("ChartContext function cannot be called as a function."); } this.chartId = chartId; this.chartHeight = chartHeight; @@ -103,7 +98,7 @@ var ChartContext = function (chartId, chartHeight, metricsData, xAxisLabel, char AvailChartContext = function (chartId, availData, dateLabel, timeLabel, hoverStartLabel, hoverBarLabel, availabilityLabel, chartHoverTimeFormat, chartHoverDateFormat, chartTitle, chartUpLabel, chartDownLabel, chartXaxisTimeFormatHours, chartXaxisTimeFormatHoursMinutes) { "use strict"; if (!(this instanceof AvailChartContext)) { - throw new Error("AvailChartContext function cannot be called as a function.") + throw new Error("AvailChartContext function cannot be called as a function."); } this.chartId = chartId; this.chartHandle = "#availChart-" + this.chartId; @@ -133,12 +128,13 @@ var ChartContext = function (chartId, chartHeight, metricsData, xAxisLabel, char GraphDateContext = function (startDate, endDate) { "use strict"; if (!(this instanceof GraphDateContext)) { - throw new Error("GraphDateContext function cannot be called as a function.") + throw new Error("GraphDateContext function cannot be called as a function."); } this.startDate = startDate; this.endDate = endDate; }, rhqCommon = (function () { + "use strict";
var timeFormat = function (formats) { @@ -146,7 +142,7 @@ var ChartContext = function (chartId, chartHeight, metricsData, xAxisLabel, char var i = formats.length - 1, f = formats[i]; while (!f[1](date)) f = formats[--i]; return f[0](date); - } + }; };
return { @@ -179,7 +175,7 @@ var ChartContext = function (chartId, chartHeight, metricsData, xAxisLabel, char ]); }
- } + }; })();
commit abc2065d98ddcfc1dde13f116190c63db70d9122 Author: Thomas Segismont tsegismo@redhat.com Date: Thu Aug 8 17:02:15 2013 +0200
Bug 994640 - Postfix Server's Banner basic configuration property "default" value is empty
Banner property is no longer required
Additionally, Postfix server resource type now has a process scan (component and discovery component updated). This allows Postfix resources: * to be discovered only if a server is running * to give a real availability value (always answered UP before)
diff --git a/modules/core/plugin-api/src/main/java/org/rhq/core/pluginapi/inventory/DiscoveredResourceDetails.java b/modules/core/plugin-api/src/main/java/org/rhq/core/pluginapi/inventory/DiscoveredResourceDetails.java index f04c757..25d4d1f 100644 --- a/modules/core/plugin-api/src/main/java/org/rhq/core/pluginapi/inventory/DiscoveredResourceDetails.java +++ b/modules/core/plugin-api/src/main/java/org/rhq/core/pluginapi/inventory/DiscoveredResourceDetails.java @@ -1,29 +1,26 @@ /* * RHQ Management Platform - * Copyright (C) 2005-2012 Red Hat, Inc. + * Copyright (C) 2005-2013 Red Hat, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation, and/or the GNU Lesser - * General Public License, version 2.1, also as published by the Free - * Software Foundation. + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License and the GNU Lesser General Public License - * for more details. + * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * and the GNU Lesser General Public License along with this program; - * if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * along with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ package org.rhq.core.pluginapi.inventory;
import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; + import org.rhq.core.domain.configuration.Configuration; import org.rhq.core.domain.configuration.definition.ConfigurationDefinition; import org.rhq.core.domain.configuration.definition.ConfigurationTemplate; @@ -276,6 +273,10 @@ public class DiscoveredResourceDetails { return processInfo; }
+ public void setProcessInfo(ProcessInfo processInfo) { + this.processInfo = processInfo; + } + /** * Defines the discovered resource's plugin configuration. You normally call {@link #getPluginConfiguration()} first * to get a copy of the resource's default plugin configuration, and then modify that default configuration with @@ -359,4 +360,4 @@ public class DiscoveredResourceDetails { // There is no default plugin config template defined - return an empty one. return new Configuration(); } -} \ No newline at end of file +} diff --git a/modules/plugins/postfix/src/main/java/org/rhq/plugins/postfix/PostfixServerComponent.java b/modules/plugins/postfix/src/main/java/org/rhq/plugins/postfix/PostfixServerComponent.java index 7a91c24..484b0cd 100644 --- a/modules/plugins/postfix/src/main/java/org/rhq/plugins/postfix/PostfixServerComponent.java +++ b/modules/plugins/postfix/src/main/java/org/rhq/plugins/postfix/PostfixServerComponent.java @@ -1,6 +1,6 @@ /* * RHQ Management Platform - * Copyright (C) 2005-2008 Red Hat, Inc. + * Copyright (C) 2005-2013 Red Hat, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify @@ -13,11 +13,14 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * along with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ package org.rhq.plugins.postfix;
+import static org.rhq.core.domain.measurement.AvailabilityType.DOWN; +import static org.rhq.core.domain.measurement.AvailabilityType.UP; + import net.augeas.Augeas;
import org.rhq.core.domain.configuration.Configuration; @@ -27,24 +30,38 @@ import org.rhq.core.domain.configuration.definition.PropertySimpleType; import org.rhq.core.domain.measurement.AvailabilityType; import org.rhq.core.pluginapi.configuration.ConfigurationUpdateReport; import org.rhq.core.pluginapi.inventory.ResourceContext; +import org.rhq.core.system.ProcessInfo; +import org.rhq.core.system.ProcessInfo.ProcessInfoSnapshot; import org.rhq.plugins.augeas.AugeasConfigurationComponent; import org.rhq.plugins.augeas.helper.AugeasNode;
-/** - * TODO - */ public class PostfixServerComponent extends AugeasConfigurationComponent {
+ private ProcessInfo processInfo; + public void start(ResourceContext resourceContext) throws Exception { super.start(resourceContext); + processInfo = resourceContext.getNativeProcess(); }
public void stop() { + processInfo = null; super.stop(); }
public AvailabilityType getAvailability() { - return super.getAvailability(); + ProcessInfoSnapshot processInfoSnapshot = getProcessInfoSnapshot(); + return (processInfoSnapshot != null && processInfoSnapshot.isRunning()) ? UP : DOWN; + } + + private ProcessInfoSnapshot getProcessInfoSnapshot() { + ProcessInfoSnapshot processInfoSnapshot = (processInfo == null) ? null : processInfo.freshSnapshot(); + if (processInfoSnapshot == null || !processInfoSnapshot.isRunning()) { + processInfo = getResourceContext().getNativeProcess(); + // Safe to get prior snapshot here, we've just recreated the process info instance + processInfoSnapshot = (processInfo == null) ? null : processInfo.priorSnaphot(); + } + return processInfoSnapshot; }
public Configuration loadResourceConfiguration() throws Exception { @@ -74,34 +91,4 @@ public class PostfixServerComponent extends AugeasConfigurationComponent { return super.toNodeValue(augeas, node, propDefSimple, propSimple); }
- /* - - @Override - public CreateResourceReport createResource(CreateResourceReport reportIn) { - CreateResourceReport report = reportIn; - Configuration config = report.getResourceConfiguration(); - String name = config.getSimple(SambaShareComponent.NAME_RESOURCE_CONFIG_PROP).getStringValue(); - report.setResourceKey(name); - report.setResourceName(name); - return super.createResource(report); - } - @Override - protected String getChildResourceConfigurationRootPath(ResourceType resourceType, Configuration resourceConfig) { - if (resourceType.getName().equals(SambaShareComponent.RESOURCE_TYPE_NAME)) { - String targetName = resourceConfig.getSimple(SambaShareComponent.NAME_RESOURCE_CONFIG_PROP).getStringValue(); - return "/files/etc/samba/smb.conf/target[.='" + targetName + "']"; - } else { - throw new IllegalArgumentException("Unsupported child Resource type: " + resourceType); - } - } - @Override - protected String getChildResourceConfigurationRootLabel(ResourceType resourceType, Configuration resourceConfig) { - if (resourceType.getName().equals(SambaShareComponent.RESOURCE_TYPE_NAME)) { - return resourceConfig.getSimple(SambaShareComponent.NAME_RESOURCE_CONFIG_PROP).getStringValue(); - } else { - throw new IllegalArgumentException("Unsupported child Resource type: " + resourceType); - } - } - */ - } diff --git a/modules/plugins/postfix/src/main/java/org/rhq/plugins/postfix/PostfixServerDiscoveryComponent.java b/modules/plugins/postfix/src/main/java/org/rhq/plugins/postfix/PostfixServerDiscoveryComponent.java index 91490c0..3f42da5 100644 --- a/modules/plugins/postfix/src/main/java/org/rhq/plugins/postfix/PostfixServerDiscoveryComponent.java +++ b/modules/plugins/postfix/src/main/java/org/rhq/plugins/postfix/PostfixServerDiscoveryComponent.java @@ -1,6 +1,6 @@ /* * RHQ Management Platform - * Copyright (C) 2005-2008 Red Hat, Inc. + * Copyright (C) 2005-2013 Red Hat, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify @@ -13,34 +13,50 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * along with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ package org.rhq.plugins.postfix;
+import java.io.BufferedReader; +import java.io.File; +import java.io.FileInputStream; +import java.io.InputStreamReader; +import java.util.Collections; +import java.util.List; +import java.util.Set; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + import org.rhq.core.domain.configuration.Configuration; import org.rhq.core.domain.configuration.PropertySimple; import org.rhq.core.pluginapi.inventory.DiscoveredResourceDetails; import org.rhq.core.pluginapi.inventory.InvalidPluginConfigurationException; +import org.rhq.core.pluginapi.inventory.ProcessScanResult; import org.rhq.core.pluginapi.inventory.ResourceDiscoveryContext; import org.rhq.core.util.stream.StreamUtil; import org.rhq.plugins.augeas.AugeasConfigurationComponent; import org.rhq.plugins.augeas.AugeasConfigurationDiscoveryComponent;
-import java.io.BufferedReader; -import java.io.File; -import java.io.FileInputStream; -import java.io.InputStreamReader; -import java.util.Set; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - public class PostfixServerDiscoveryComponent extends AugeasConfigurationDiscoveryComponent {
+ private static final Log LOG = LogFactory.getLog(PostfixServerDiscoveryComponent.class); + private static final Pattern HOSTNAME_PATTERN = Pattern.compile("[\s]*myhostname[\s]*=[\s]*([^$].*)[\s]*");
public Set discoverResources(ResourceDiscoveryContext resourceDiscoveryContext) - throws InvalidPluginConfigurationException, Exception { + throws InvalidPluginConfigurationException, Exception { + List discoveredProcesses = resourceDiscoveryContext.getAutoDiscoveredProcesses(); + if (discoveredProcesses.isEmpty()) { + return Collections.emptySet(); + } + if (discoveredProcesses.size() != 1) { + LOG.warn("Found more than one Postfix process running"); + return Collections.emptySet(); + } Set<DiscoveredResourceDetails> resources = super.discoverResources(resourceDiscoveryContext); for (DiscoveredResourceDetails detail : resources) { Configuration config = detail.getPluginConfiguration(); @@ -54,6 +70,7 @@ public class PostfixServerDiscoveryComponent extends AugeasConfigurationDiscover resourceName = resourceDiscoveryContext.getSystemInformation().getHostname(); } detail.setResourceName(resourceName); + detail.setProcessInfo(((ProcessScanResult) discoveredProcesses.get(0)).getProcessInfo()); } return resources; } diff --git a/modules/plugins/postfix/src/main/resources/META-INF/rhq-plugin.xml b/modules/plugins/postfix/src/main/resources/META-INF/rhq-plugin.xml index 00366ff..3831fa8 100644 --- a/modules/plugins/postfix/src/main/resources/META-INF/rhq-plugin.xml +++ b/modules/plugins/postfix/src/main/resources/META-INF/rhq-plugin.xml @@ -28,6 +28,8 @@ default="/files/etc/postfix/main.cf"/> </plugin-configuration>
+ <process-scan query="process|name|match=.*postfix/master.*" /> + <resource-configuration> <c:group name="Basic Configurations"> <c:simple-property displayName="Hostname" name="myhostname" description="Server's fully qualified domain name" required="false"/> @@ -35,7 +37,7 @@ <c:simple-property displayName="Networks" name="mynetworks" description="List of clients that can relay mail" required="false"/> <c:simple-property displayName="Destinations" name="mydestination" description="Space delimited list of domains for which server will accept delivered. Postfix must be restarted." required="false"/> <c:simple-property displayName="Interfaces" name="inet_interfaces" description="Network interfaces that Postfix will bind to" required="false"/> - <c:simple-property displayName="Banner" name="smtpd_banner" description="SMTP Banner (RFC requires hostname and ESMTP prompt)" default="$myhostname ESMTP" required="true"/> + <c:simple-property displayName="Banner" name="smtpd_banner" description="SMTP Banner (RFC requires hostname and ESMTP prompt)" default="$myhostname ESMTP" required="false"/> </c:group> <c:group name="Security"> <c:simple-property displayName="Suppress VRFY" name="disable_vrfy_command" type="boolean" default="true" required="false" description="Supress Response to SMTP VRFY requests by default"/> diff --git a/modules/plugins/postfix/src/test/java/org/rhq/plugins/postfix/PostfixComponentTest.java b/modules/plugins/postfix/src/test/java/org/rhq/plugins/postfix/PostfixComponentTest.java index 6724673..da271b2 100644 --- a/modules/plugins/postfix/src/test/java/org/rhq/plugins/postfix/PostfixComponentTest.java +++ b/modules/plugins/postfix/src/test/java/org/rhq/plugins/postfix/PostfixComponentTest.java @@ -1,6 +1,6 @@ /* * RHQ Management Platform - * Copyright (C) 2005-2008 Red Hat, Inc. + * Copyright (C) 2005-2013 Red Hat, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify @@ -13,8 +13,8 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * along with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ package org.rhq.plugins.postfix;
@@ -23,7 +23,7 @@ import org.rhq.core.domain.configuration.PropertySimple; import org.rhq.plugins.augeas.test.AbstractAugeasConfigurationComponentTest;
/** - * An integration test for {@link PostfixComponent}. + * An integration test for {@link PostfixServerComponent}. */ public class PostfixComponentTest extends AbstractAugeasConfigurationComponentTest { @Override
commit 261efe8c0dd989e2dfda61a496d37d6ae23acbab Author: Stefan Negrea snegrea@redhat.com Date: Thu Aug 8 00:48:47 2013 -0500
Fix rebase issues due to git merge tree.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index 423218a..ba94bb7 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -28,8 +28,8 @@ import java.net.InetAddress; import java.net.UnknownHostException; import java.util.ArrayList; import java.util.Arrays; -import java.util.Date; import java.util.HashMap; +import java.util.LinkedHashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; @@ -46,15 +46,11 @@ import javax.persistence.TypedQuery;
import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.quartz.JobDataMap; -import org.quartz.SimpleTrigger; -import org.quartz.Trigger;
import org.rhq.cassandra.schema.SchemaManager; import org.rhq.core.domain.alert.Alert; import org.rhq.core.domain.auth.Subject; import org.rhq.core.domain.authz.Permission; -import org.rhq.core.domain.cloud.Server; import org.rhq.core.domain.cloud.StorageNode; import org.rhq.core.domain.cloud.StorageNode.OperationMode; import org.rhq.core.domain.cloud.StorageNodeConfigurationComposite; @@ -69,6 +65,7 @@ import org.rhq.core.domain.criteria.ResourceOperationHistoryCriteria; import org.rhq.core.domain.criteria.StorageNodeCriteria; import org.rhq.core.domain.measurement.MeasurementAggregate; import org.rhq.core.domain.measurement.MeasurementUnits; +import org.rhq.core.domain.measurement.composite.MeasurementDataNumericHighLowComposite; import org.rhq.core.domain.operation.OperationRequestStatus; import org.rhq.core.domain.operation.ResourceOperationHistory; import org.rhq.core.domain.operation.bean.GroupOperationSchedule; @@ -85,7 +82,6 @@ import org.rhq.enterprise.server.alert.AlertManagerLocal; import org.rhq.enterprise.server.auth.SubjectManagerLocal; import org.rhq.enterprise.server.authz.RequiredPermission; import org.rhq.enterprise.server.authz.RequiredPermissions; -import org.rhq.enterprise.server.cloud.instance.ServerManagerLocal; import org.rhq.enterprise.server.configuration.ConfigurationManagerLocal; import org.rhq.enterprise.server.measurement.MeasurementDataManagerLocal; import org.rhq.enterprise.server.operation.OperationManagerLocal; @@ -94,8 +90,8 @@ import org.rhq.enterprise.server.resource.ResourceTypeManagerLocal; import org.rhq.enterprise.server.resource.group.ResourceGroupManagerLocal; import org.rhq.enterprise.server.rest.reporting.MeasurementConverter; import org.rhq.enterprise.server.scheduler.SchedulerLocal; -import org.rhq.enterprise.server.scheduler.jobs.StorageNodeMaintenanceJob; -import org.rhq.enterprise.server.storage.StorageConfigurationException; +import org.rhq.enterprise.server.storage.StorageClusterSettings; +import org.rhq.enterprise.server.storage.StorageClusterSettingsManagerBean; import org.rhq.enterprise.server.util.CriteriaQueryGenerator; import org.rhq.enterprise.server.util.CriteriaQueryRunner; import org.rhq.enterprise.server.util.LookupUtil; @@ -118,6 +114,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN private final static String SEEDS_LIST = "seedsList";
private static final String RHQ_STORAGE_CQL_PORT_PROPERTY = "nativeTransportPort"; + private static final String RHQ_STORAGE_GOSSIP_PORT_PROPERTY = "storagePort"; private static final String RHQ_STORAGE_JMX_PORT_PROPERTY = "jmxPort"; private static final String RHQ_STORAGE_ADDRESS_PROPERTY = "host";
@@ -126,6 +123,18 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN private static final String UPDATE_CONFIGURATION_OPERATION = "updateConfiguration"; private static final String RESTART_OPERATION = "restart";
+ // metric names on Storage Service resource + private static final String METRIC_TOKENS = "Tokens", METRIC_OWNERSHIP = "Ownership"; + private static final String METRIC_DATA_DISK_USED_PERCENTAGE = "Calculated.DataDiskUsedPercentage"; + private static final String METRIC_TOTAL_DISK_USED_PERCENTAGE = "Calculated.TotalDiskUsedPercentage"; + private static final String METRIC_FREE_DISK_TO_DATA_RATIO = "Calculated.FreeDiskToDataSizeRatio"; + private static final String METRIC_LOAD = "Load", METRIC_KEY_CACHE_SIZE = "KeyCacheSize", + METRIC_ROW_CACHE_SIZE = "RowCacheSize", METRIC_TOTAL_COMMIT_LOG_SIZE = "TotalCommitlogSize"; + + //metric names on Memory Subsystem resource + private static final String METRIC_HEAP_COMMITED = "{HeapMemoryUsage.committed}", + METRIC_HEAP_USED = "{HeapMemoryUsage.used}", METRIC_HEAP_USED_PERCENTAGE = "Calculated.HeapUsagePercentage"; + @PersistenceContext(unitName = RHQConstants.PERSISTENCE_UNIT_NAME) private EntityManager entityManager;
@@ -159,10 +168,13 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN @EJB private ResourceManagerLocal resourceManager;
+ @EJB + private StorageClusterSettingsManagerBean storageClusterSettingsManager; + @Override public void linkResource(Resource resource) { - Configuration resourceConfig = resource.getPluginConfiguration(); - String address = resourceConfig.getSimpleValue(RHQ_STORAGE_ADDRESS_PROPERTY); + Configuration pluginConfig = resource.getPluginConfiguration(); + String address = pluginConfig.getSimpleValue(RHQ_STORAGE_ADDRESS_PROPERTY);
if (log.isInfoEnabled()) { log.info("Linking " + resource + " to storage node at " + address); @@ -176,12 +188,13 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN } storageNode.setResource(resource); storageNode.setOperationMode(OperationMode.NORMAL); + initClusterSettingsIfNecessary(pluginConfig); addStorageNodeToGroup(resource); } else { storageNode = new StorageNode(); storageNode.setAddress(address); - storageNode.setCqlPort(Integer.parseInt(resourceConfig.getSimpleValue(RHQ_STORAGE_CQL_PORT_PROPERTY))); - storageNode.setJmxPort(Integer.parseInt(resourceConfig.getSimpleValue(RHQ_STORAGE_JMX_PORT_PROPERTY))); + storageNode.setCqlPort(Integer.parseInt(pluginConfig.getSimpleValue(RHQ_STORAGE_CQL_PORT_PROPERTY))); + storageNode.setJmxPort(Integer.parseInt(pluginConfig.getSimpleValue(RHQ_STORAGE_JMX_PORT_PROPERTY))); storageNode.setResource(resource); storageNode.setOperationMode(OperationMode.INSTALLED);
@@ -200,6 +213,31 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN } }
+ private void initClusterSettingsIfNecessary(Configuration pluginConfig) { + // TODO Need to handle non-repeatable reads here (probably a post 4.9 task) + // + // If a user deploys two storage nodes prior to installing the RHQ server, then we + // could end up in this method concurrently for both storage nodes. The settings + // would be committed for each node with the second commit winning. The problem is + // that is the cluster settings differ for the two nodes, it will be silently + // ignored. This scenario will happen infrequently so it should be sufficient to + // resolve it with optimistic locking. The second writer should fail with an + // OptimisticLockException. + + log.info("Initializing storage cluster settings"); + + StorageClusterSettings clusterSettings = storageClusterSettingsManager.getClusterSettings(subjectManager + .getOverlord()); + if (clusterSettings != null) { + log.info("Cluster settings have already been set. Skipping initialization."); + return; + } + clusterSettings = new StorageClusterSettings(); + clusterSettings.setCqlPort(Integer.parseInt(pluginConfig.getSimpleValue(RHQ_STORAGE_CQL_PORT_PROPERTY))); + clusterSettings.setGossipPort(Integer.parseInt(pluginConfig.getSimpleValue(RHQ_STORAGE_GOSSIP_PORT_PROPERTY))); + storageClusterSettingsManager.setClusterSettings(subjectManager.getOverlord(), clusterSettings); + } + private void announceNewNode(StorageNode newStorageNode) { if (log.isInfoEnabled()) { log.info("Announcing " + newStorageNode + " to storage node cluster."); @@ -321,106 +359,90 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN int resourceId = getResourceIdFromStorageNode(node); Map<String, Integer> scheduleIdsMap = new HashMap<String, Integer>();
- // get the schedule ids for Storage Service resource - final String tokensMetric = "Tokens", ownershipMetric = "Ownership"; - final String dataDiskUsedPercentageMetric = "Calculated.DataDiskUsedPercentage"; - final String totalDiskUsedPercentageMetric = "Calculated.TotalDiskUsedPercentage"; - final String freeDiskToDataRatioMetric = "Calculated.FreeDiskToDataSizeRatio"; - final String loadMetric = "Load", keyCacheSize = "KeyCacheSize", rowCacheSize = "RowCacheSize", totalCommitLogSize = "TotalCommitlogSize"; - TypedQuery<Object[]> query = entityManager.<Object[]> createNamedQuery( - StorageNode.QUERY_FIND_SCHEDULE_IDS_BY_PARENT_RESOURCE_ID_AND_MEASUREMENT_DEFINITION_NAMES, Object[].class); - query.setParameter("parrentId", resourceId).setParameter("metricNames", - Arrays.asList(tokensMetric, ownershipMetric, loadMetric, keyCacheSize, rowCacheSize, totalCommitLogSize, - dataDiskUsedPercentageMetric, totalDiskUsedPercentageMetric, freeDiskToDataRatioMetric)); - for (Object[] pair : query.getResultList()) { - scheduleIdsMap.put((String) pair[0], (Integer) pair[1]); + for (Object[] tupple : getStorageServiceScheduleIds(resourceId)) { + String definitionName = (String) tupple[0]; + Integer scheduleId = (Integer) tupple[2]; + scheduleIdsMap.put(definitionName, scheduleId); } - - // get the schedule ids for Memory Subsystem resource - final String heapCommittedMetric = "{HeapMemoryUsage.committed}", heapUsedMetric = "{HeapMemoryUsage.used}", heapUsedPercentageMetric = "Calculated.HeapUsagePercentage"; - query = entityManager.<Object[]> createNamedQuery( - StorageNode.QUERY_FIND_SCHEDULE_IDS_BY_GRANDPARENT_RESOURCE_ID_AND_MEASUREMENT_DEFINITION_NAMES, - Object[].class); - query.setParameter("grandparrentId", resourceId).setParameter("metricNames", - Arrays.asList(heapCommittedMetric, heapUsedMetric, heapUsedPercentageMetric)); - for (Object[] pair : query.getResultList()) { - scheduleIdsMap.put((String) pair[0], (Integer) pair[1]); + for (Object[] tupple : getMemorySubsystemScheduleIds(resourceId)) { + String definitionName = (String) tupple[0]; + Integer scheduleId = (Integer) tupple[2]; + scheduleIdsMap.put(definitionName, scheduleId); }
- StorageNodeLoadComposite result = new StorageNodeLoadComposite(node, beginTime, endTime); - MeasurementAggregate totalDiskUsedaggregate = new MeasurementAggregate(0d, 0d, 0d); + MeasurementAggregate totalDiskUsedAggregate = new MeasurementAggregate(0d, 0d, 0d); Integer scheduleId = null;
// find the aggregates and enrich the result instance if (!scheduleIdsMap.isEmpty()) { - if ((scheduleId = scheduleIdsMap.get(tokensMetric)) != null) { + if ((scheduleId = scheduleIdsMap.get(METRIC_TOKENS)) != null) { MeasurementAggregate tokensAggregate = measurementManager.getAggregate(subject, scheduleId, beginTime, endTime); result.setTokens(tokensAggregate); } - if ((scheduleId = scheduleIdsMap.get(ownershipMetric)) != null) { + if ((scheduleId = scheduleIdsMap.get(METRIC_OWNERSHIP)) != null) { StorageNodeLoadComposite.MeasurementAggregateWithUnits ownershipAggregateWithUnits = getMeasurementAggregateWithUnits( subject, scheduleId, MeasurementUnits.PERCENTAGE, beginTime, endTime); result.setActuallyOwns(ownershipAggregateWithUnits); }
//calculated disk space related metrics - if ((scheduleId = scheduleIdsMap.get(dataDiskUsedPercentageMetric)) != null) { + if ((scheduleId = scheduleIdsMap.get(METRIC_DATA_DISK_USED_PERCENTAGE)) != null) { StorageNodeLoadComposite.MeasurementAggregateWithUnits dataDiskUsedPercentageAggregateWithUnits = getMeasurementAggregateWithUnits( subject, scheduleId, MeasurementUnits.PERCENTAGE, beginTime, endTime); result.setDataDiskUsedPercentage(dataDiskUsedPercentageAggregateWithUnits); } - if ((scheduleId = scheduleIdsMap.get(totalDiskUsedPercentageMetric)) != null) { + if ((scheduleId = scheduleIdsMap.get(METRIC_TOTAL_DISK_USED_PERCENTAGE)) != null) { StorageNodeLoadComposite.MeasurementAggregateWithUnits totalDiskUsedPercentageAggregateWithUnits = getMeasurementAggregateWithUnits( subject, scheduleId, MeasurementUnits.PERCENTAGE, beginTime, endTime); result.setTotalDiskUsedPercentage(totalDiskUsedPercentageAggregateWithUnits); } - if ((scheduleId = scheduleIdsMap.get(freeDiskToDataRatioMetric)) != null) { + if ((scheduleId = scheduleIdsMap.get(METRIC_FREE_DISK_TO_DATA_RATIO)) != null) { MeasurementAggregate freeDiskToDataRatioAggregate = measurementManager.getAggregate(subject, scheduleId, beginTime, endTime); result.setFreeDiskToDataSizeRatio(freeDiskToDataRatioAggregate); }
- if ((scheduleId = scheduleIdsMap.get(loadMetric)) != null) { + if ((scheduleId = scheduleIdsMap.get(METRIC_LOAD)) != null) { StorageNodeLoadComposite.MeasurementAggregateWithUnits loadAggregateWithUnits = getMeasurementAggregateWithUnits( subject, scheduleId, MeasurementUnits.BYTES, beginTime, endTime); result.setLoad(loadAggregateWithUnits);
- updateAggregateTotal(totalDiskUsedaggregate, loadAggregateWithUnits.getAggregate()); - } - if ((scheduleId = scheduleIdsMap.get(keyCacheSize)) != null) { - updateAggregateTotal(totalDiskUsedaggregate, - measurementManager.getAggregate(subject, scheduleId, beginTime, endTime)); - } - if ((scheduleId = scheduleIdsMap.get(rowCacheSize)) != null) { - updateAggregateTotal(totalDiskUsedaggregate, - measurementManager.getAggregate(subject, scheduleId, beginTime, endTime)); + updateAggregateTotal(totalDiskUsedAggregate, loadAggregateWithUnits.getAggregate()); } - if ((scheduleId = scheduleIdsMap.get(totalCommitLogSize)) != null) { - updateAggregateTotal(totalDiskUsedaggregate, - measurementManager.getAggregate(subject, scheduleId, beginTime, endTime)); - } - - if (totalDiskUsedaggregate.getMax() > 0) { + // if ((scheduleId = scheduleIdsMap.get(METRIC_KEY_CACHE_SIZE)) != null) { + // updateAggregateTotal(totalDiskUsedAggregate, + // measurementManager.getAggregate(subject, scheduleId, beginTime, endTime)); + // } + // if ((scheduleId = scheduleIdsMap.get(METRIC_ROW_CACHE_SIZE)) != null) { + // updateAggregateTotal(totalDiskUsedAggregate, + // measurementManager.getAggregate(subject, scheduleId, beginTime, endTime)); + // } + // if ((scheduleId = scheduleIdsMap.get(METRIC_TOTAL_COMMIT_LOG_SIZE)) != null) { + // updateAggregateTotal(totalDiskUsedAggregate, + // measurementManager.getAggregate(subject, scheduleId, beginTime, endTime)); + // } + + if (totalDiskUsedAggregate.getMax() > 0) { StorageNodeLoadComposite.MeasurementAggregateWithUnits totalDiskUsedAggregateWithUnits = new StorageNodeLoadComposite.MeasurementAggregateWithUnits( - totalDiskUsedaggregate, MeasurementUnits.BYTES); - totalDiskUsedAggregateWithUnits.setFormattedValue(getSummaryString(totalDiskUsedaggregate, + totalDiskUsedAggregate, MeasurementUnits.BYTES); + totalDiskUsedAggregateWithUnits.setFormattedValue(getSummaryString(totalDiskUsedAggregate, MeasurementUnits.BYTES)); result.setDataDiskUsed(totalDiskUsedAggregateWithUnits); }
- if ((scheduleId = scheduleIdsMap.get(heapCommittedMetric)) != null) { + if ((scheduleId = scheduleIdsMap.get(METRIC_HEAP_COMMITED)) != null) { StorageNodeLoadComposite.MeasurementAggregateWithUnits heapCommittedAggregateWithUnits = getMeasurementAggregateWithUnits( subject, scheduleId, MeasurementUnits.BYTES, beginTime, endTime); result.setHeapCommitted(heapCommittedAggregateWithUnits); } - if ((scheduleId = scheduleIdsMap.get(heapUsedMetric)) != null) { + if ((scheduleId = scheduleIdsMap.get(METRIC_HEAP_USED)) != null) { StorageNodeLoadComposite.MeasurementAggregateWithUnits heapUsedAggregateWithUnits = getMeasurementAggregateWithUnits( subject, scheduleId, MeasurementUnits.BYTES, beginTime, endTime); result.setHeapUsed(heapUsedAggregateWithUnits); } - if ((scheduleId = scheduleIdsMap.get(heapUsedPercentageMetric)) != null) { + if ((scheduleId = scheduleIdsMap.get(METRIC_HEAP_USED_PERCENTAGE)) != null) { StorageNodeLoadComposite.MeasurementAggregateWithUnits heapUsedPercentageAggregateWithUnits = getMeasurementAggregateWithUnits( subject, scheduleId, MeasurementUnits.PERCENTAGE, beginTime, endTime); @@ -431,6 +453,28 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN return result; }
+ private List<Object[]> getStorageServiceScheduleIds(int storageNodeResourceId) { + // get the schedule ids for Storage Service resource + TypedQuery<Object[]> query = entityManager.<Object[]> createNamedQuery( + StorageNode.QUERY_FIND_SCHEDULE_IDS_BY_PARENT_RESOURCE_ID_AND_MEASUREMENT_DEFINITION_NAMES, Object[].class); + query.setParameter("parrentId", storageNodeResourceId).setParameter( + "metricNames", + Arrays.asList(METRIC_TOKENS, METRIC_OWNERSHIP, + METRIC_LOAD/*, METRIC_KEY_CACHE_SIZE, METRIC_ROW_CACHE_SIZE, METRIC_TOTAL_COMMIT_LOG_SIZE*/, + METRIC_DATA_DISK_USED_PERCENTAGE, METRIC_TOTAL_DISK_USED_PERCENTAGE, METRIC_FREE_DISK_TO_DATA_RATIO)); + return query.getResultList(); + } + + private List<Object[]> getMemorySubsystemScheduleIds(int storageNodeResourceId) { + // get the schedule ids for Memory Subsystem resource + TypedQuery<Object[]> query = entityManager.<Object[]> createNamedQuery( + StorageNode.QUERY_FIND_SCHEDULE_IDS_BY_GRANDPARENT_RESOURCE_ID_AND_MEASUREMENT_DEFINITION_NAMES, + Object[].class); + query.setParameter("grandparrentId", storageNodeResourceId).setParameter("metricNames", + Arrays.asList(METRIC_HEAP_COMMITED, METRIC_HEAP_USED, METRIC_HEAP_USED_PERCENTAGE)); + return query.getResultList(); + } + /** * @param accumulator * @param input @@ -453,6 +497,21 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN return query.getResultList(); }
+ @Override + public PageList<StorageNodeLoadComposite> getStorageNodeComposites() { + List<StorageNode> nodes = getStorageNodes(); + PageList<StorageNodeLoadComposite> result = new PageList<StorageNodeLoadComposite>(); + long endTime = System.currentTimeMillis(); + long beginTime = endTime - (8 * 60 * 60 * 1000); + for (StorageNode node : nodes) { + StorageNodeLoadComposite composite = getLoad(subjectManager.getOverlord(), node, beginTime, endTime); + int unackAlerts = findNotAcknowledgedStorageNodeAlerts(subjectManager.getOverlord(), node).size(); + composite.setUnackAlerts(unackAlerts); + result.add(composite); + } + return result; + } + private List<StorageNode> getClusteredStorageNodes() { return entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE, StorageNode.class) .setParameter("operationMode", OperationMode.NORMAL).getResultList(); @@ -485,12 +544,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN @RequiredPermission(Permission.MANAGE_INVENTORY) }) public void prepareNodeForUpgrade(Subject subject, StorageNode storageNode) { int storageNodeResourceId = getResourceIdFromStorageNode(storageNode); - TopologyManagerLocal topologyManager = LookupUtil.getTopologyManager(); - ServerManagerLocal serverManager = LookupUtil.getServerManager(); OperationManagerLocal operationManager = LookupUtil.getOperationManager(); - Server server = serverManager.getServer(); - // setting the server mode to maintenance - topologyManager.updateServerMode(subject, new Integer[] { server.getId() }, Server.OperationMode.MAINTENANCE);
Configuration parameters = new Configuration(); parameters.setSimpleValue("snapshotName", String.valueOf(System.currentTimeMillis())); @@ -617,12 +671,8 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN if (storageNode == null) { initialStorageNodes = getStorageNodes(); } else { - int index = initialStorageNodes.indexOf(storageNode); - if (index >= 0) { - initialStorageNodes = Arrays.asList(initialStorageNodes.get(index)); - } else { - initialStorageNodes = new ArrayList<StorageNode>(); - } + initialStorageNodes = Arrays.asList(storageNode.getResource() == null ? entityManager.find( + StorageNode.class, storageNode.getId()) : storageNode); }
Queue<Resource> unvisitedResources = new LinkedList<Resource>(); @@ -740,6 +790,55 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN operationManager.scheduleResourceOperation(subject, schedule); }
+ @Override + @RequiredPermissions({ @RequiredPermission(Permission.MANAGE_SETTINGS), + @RequiredPermission(Permission.MANAGE_INVENTORY) }) + public Map<String, List<MeasurementDataNumericHighLowComposite>> findStorageNodeLoadDataForLast(Subject subject, + StorageNode node, long beginTime, long endTime, int numPoints) { + int storageNodeResourceId = getResourceIdFromStorageNode(node); + Map<String, List<MeasurementDataNumericHighLowComposite>> result = new LinkedHashMap<String, List<MeasurementDataNumericHighLowComposite>>(); + + List<Object[]> tupples = getStorageServiceScheduleIds(storageNodeResourceId); + List<String> defNames = new ArrayList<String>(); + int[] definitionIds = new int[tupples.size()]; + int resId = -1; + int index = 0; + for (Object[] tupple : tupples) { + String defName = (String) tupple[0]; + int definitionId = (Integer) tupple[1]; + resId = (Integer) tupple[3]; + defNames.add(defName); + definitionIds[index++] = definitionId; + } + List<List<MeasurementDataNumericHighLowComposite>> storageServiceData = measurementManager.findDataForResource( + subject, resId, definitionIds, beginTime, endTime, numPoints); + for (int i = 0; i < storageServiceData.size(); i++) { + List<MeasurementDataNumericHighLowComposite> oneRecord = storageServiceData.get(i); + result.put(defNames.get(i), oneRecord); + } + + tupples = getMemorySubsystemScheduleIds(storageNodeResourceId); + defNames = new ArrayList<String>(); + definitionIds = new int[tupples.size()]; + resId = -1; + index = 0; + for (Object[] tupple : tupples) { + String defName = (String) tupple[0]; + int definitionId = (Integer) tupple[1]; + resId = (Integer) tupple[3]; + defNames.add(defName); + definitionIds[index++] = definitionId; + } + List<List<MeasurementDataNumericHighLowComposite>> memorySubsystemData = measurementManager + .findDataForResource(subject, resId, definitionIds, beginTime, endTime, numPoints); + for (int i = 0; i < memorySubsystemData.size(); i++) { + List<MeasurementDataNumericHighLowComposite> oneRecord = memorySubsystemData.get(i); + result.put(defNames.get(i), oneRecord); + } + + return result; + } + private boolean runOperationAndWaitForResult(Subject subject, Resource storageNodeResource, String operationToRun, Configuration parameters) {
@@ -807,17 +906,17 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN log.info("Preparing to bootstrap " + storageNode + " into cluster..."); }
- List<StorageNode> existingStorageNodes = getClusteredStorageNodes(); - ResourceOperationSchedule schedule = new ResourceOperationSchedule(); schedule.setResource(storageNode.getResource()); schedule.setJobTrigger(JobTrigger.createNowTrigger()); schedule.setSubject(subjectManager.getOverlord()); schedule.setOperationName("prepareForBootstrap");
+ StorageClusterSettings clusterSettings = storageClusterSettingsManager.getClusterSettings(subjectManager + .getOverlord()); Configuration parameters = new Configuration(); - parameters.put(new PropertySimple("cqlPort", existingStorageNodes.get(0).getCqlPort())); - parameters.put(new PropertySimple("gossipPort", getGossipPort(storageNode, existingStorageNodes))); + parameters.put(new PropertySimple("cqlPort", clusterSettings.getCqlPort())); + parameters.put(new PropertySimple("gossipPort", clusterSettings.getGossipPort())); parameters.put(createPropertyListOfAddresses("storageNodeIPAddresses", getClusteredStorageNodes()));
schedule.setParameters(parameters); @@ -825,44 +924,6 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN operationManager.scheduleResourceOperation(subjectManager.getOverlord(), schedule); }
- private Integer getGossipPort(StorageNode newStorageNode, List<StorageNode> storageNodes) { - if (log.isInfoEnabled()) { - log.info("Looking up gossip port for new storage node " + newStorageNode); - } - try { - StorageNode node = null; - Configuration resourceConfig = null; - for (StorageNode storageNode : storageNodes) { - resourceConfig = configurationManager.getLiveResourceConfiguration(subjectManager.getOverlord(), - storageNode.getResource().getId(), false); - if (resourceConfig == null) { - log.warn("Failed to load resource configuration for storage node " + newStorageNode.getResource()); - } else { - node = storageNode; - break; - } - } - if (resourceConfig == null) { - log.error("Failed to obtain gossip port from existing storage nodes"); - throw new StorageConfigurationException("Failed to obtain gossip port from existing storage nodes"); - } - - PropertySimple property = resourceConfig.getSimple("gossipPort"); - if (property == null) { - throw new StorageConfigurationException("The resource configuration for " + node.getResource() + - "did not include the required property [gossipPort]"); - } - Integer port = property.getIntegerValue(); - log.info("Found gossip port set to " + port); - return property.getIntegerValue(); - } catch (Exception e) { - if (e instanceof StorageConfigurationException) { - throw (StorageConfigurationException) e; - } - throw new RuntimeException("An error occurred while trying to obtain the gossip port", e); - } - } - @Override public void runAddNodeMaintenance() { log.info("Preparing to schedule addNodeMaintenance on the storage cluster..."); diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClientManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClientManagerBean.java index 83ef02c..ce3a6bd 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClientManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClientManagerBean.java @@ -41,11 +41,9 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory;
import org.rhq.cassandra.util.ClusterBuilder; -import org.rhq.core.domain.cloud.Server; import org.rhq.core.domain.cloud.StorageNode; import org.rhq.core.util.StringUtil; import org.rhq.enterprise.server.cloud.StorageNodeManagerLocal; -import org.rhq.enterprise.server.cloud.instance.ServerManagerLocal; import org.rhq.server.metrics.DateTimeService; import org.rhq.server.metrics.MetricsConfiguration; import org.rhq.server.metrics.MetricsDAO; @@ -66,9 +64,6 @@ public class StorageClientManagerBean { private static final String RHQ_KEYSPACE = "rhq";
@EJB - private ServerManagerLocal serverManager; - - @EJB private StorageNodeManagerLocal storageNodeManager;
private StorageSession session; @@ -149,6 +144,10 @@ public class StorageClientManagerBean { return this.metricsConfiguration; }
+ public boolean isClusterAvailable() { + return storageClusterMonitor != null && storageClusterMonitor.isClusterAvailable(); + } + private Session createSession(String username, String password, List<StorageNode> storageNodes) { if (log.isDebugEnabled()) { log.debug("Initializing session to connect to storage node cluster"); diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterMonitor.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterMonitor.java index 98d40cb..190bfe0 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterMonitor.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterMonitor.java @@ -8,13 +8,6 @@ import com.datastax.driver.core.exceptions.NoHostAvailableException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory;
-import org.rhq.core.domain.cloud.Server; -import org.rhq.core.domain.cloud.StorageNode; -import org.rhq.enterprise.server.auth.SubjectManagerLocal; -import org.rhq.enterprise.server.cloud.StorageNodeManagerLocal; -import org.rhq.enterprise.server.cloud.TopologyManagerLocal; -import org.rhq.enterprise.server.cloud.instance.ServerManagerLocal; -import org.rhq.enterprise.server.util.LookupUtil; import org.rhq.server.metrics.StorageStateListener;
/** @@ -24,10 +17,10 @@ public class StorageClusterMonitor implements StorageStateListener {
private Log log = LogFactory.getLog(StorageClusterMonitor.class);
- private AtomicBoolean isClusterDown = new AtomicBoolean(false); + private AtomicBoolean isClusterAvailable = new AtomicBoolean(false);
- public boolean isClusterDown() { - return isClusterDown.get(); + public boolean isClusterAvailable() { + return isClusterAvailable.get(); }
@Override @@ -46,7 +39,7 @@ public class StorageClusterMonitor implements StorageStateListener { log.info("Adding " + newClusterNode + " to storage cluster and scheduling cluster maintenance..."); storageNodeManager.addToStorageNodeGroup(newClusterNode); storageNodeManager.runAddNodeMaintenance(); - } + }*/ }
@Override
commit a4b78eb6e1adeffaa519115a9ef07b3f00025168 Author: Stefan Negrea snegrea@redhat.com Date: Wed Aug 7 23:51:04 2013 -0500
Update the operation mode for servers to correctly use storage node cluster information: 1) The maintenance mode set by the user (via UI or properties file) is now stored in the server status bitmask 2) The transition between Maintenance and Normal operation modes is done automatically. 3) The transition between Maintenance and Normal operation modes depends on storage node cluster availability and set user maintenance mode status. 4) Cleanup the lifecycles and state transitions for the storage node client manager, server manager bean, and startup bean. 5) Updated the UI to make use newly added methods that control only the user set maintenance mode status.
Also included various other updates....
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/Server.java b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/Server.java index e755597..c43a234 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/Server.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/Server.java @@ -235,20 +235,26 @@ public class Server implements Serializable {
public enum OperationMode {
- DOWN("This server is down member of the HA server cloud"), // - INSTALLED("This server is newly installed but not yet fully operating"), // - MAINTENANCE("This server is a Maintenance Mode member of the HA server cloud"), // - NORMAL("This server is a Normal Member of the HA server cloud"); + DOWN("This server is down member of the HA server cloud", true), // + INSTALLED("This server is newly installed but not yet fully operating", true), // + MAINTENANCE("This server is a Maintenance Mode member of the HA server cloud", false), // + NORMAL("This server is a Normal Member of the HA server cloud", true);
public final String message; + private final boolean configurable;
- private OperationMode(String message) { + private OperationMode(String message, boolean configurable) { this.message = message; + this.configurable = configurable; }
public String getMessage() { return message; } + + public boolean isConfigurable() { + return configurable; + } }
public List<Agent> getAgents() { @@ -279,13 +285,22 @@ public class Server implements Serializable { }
/** - * If this status was non-zero, some scheduled job would have had to come along to perform - * some work on behalf of this server. After that work is complete, the status can be reset - * (set to 0) signifying that no further work needs to be done on this server (as long as the - * status remains 0). + * Verifies if bitmask status is set. + * + * @param queryStatus status + * @return true if status set, false otherwise + */ + public boolean hasStatus(Status queryStatus) { + return (this.status & queryStatus.mask) == queryStatus.mask; + } + + /** + * Clears the specified bitmask status. + * + * @param removeStatus status to be removed */ - public void clearStatus() { - status = 0; + public void clearStatus(Status removeStatus) { + this.status &= ~removeStatus.mask; }
/** @@ -298,6 +313,9 @@ public class Server implements Serializable { this.status |= newStatus.mask; }
+ /** + * @return list all messages for the status mask + */ public List<String> getStatusMessages() { return Status.getMessages(status); } @@ -305,7 +323,8 @@ public class Server implements Serializable { public enum Status {
RESOURCE_HIERARCHY_UPDATED(1, "The resource hierarchy has been updated"), // - ALERT_DEFINITION(2, "Some alert definition with a global condition category was updated"); + ALERT_DEFINITION(2, "Some alert definition with a global condition category was updated"), + MANUAL_MAINTENANCE_MODE(4,"Manual Maintenance mode setup by the user either via UI or properties file.");
public final int mask; public final String message; diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/topology/ServerTableView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/topology/ServerTableView.java index c46c925..77aee3d 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/topology/ServerTableView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/topology/ServerTableView.java @@ -153,11 +153,6 @@ public class ServerTableView extends return new ServerDetailView(id); }
- // @Override - // public abstract void showDetails(ID id) { - // - // } - private void showCommonActions() { addChangeOperationModeAction(OperationMode.NORMAL, MSG.view_adminTopology_server_setNormal()); addChangeOperationModeAction(OperationMode.MAINTENANCE, MSG.view_adminTopology_server_setMaintenance()); @@ -204,7 +199,8 @@ public class ServerTableView extends public void execute(Boolean confirmed) { if (confirmed) { int[] selectedIds = getSelectedIds(selections); - GWTServiceLookup.getTopologyService().updateServerMode(selectedIds, mode, + boolean manualMaintenance = mode == OperationMode.MAINTENANCE; + GWTServiceLookup.getTopologyService().updateServerManualMaintenance(selectedIds, manualMaintenance, new AsyncCallback<Void>() { public void onSuccess(Void result) { Message msg = new Message(MSG.view_adminTopology_message_setMode( diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/TopologyGWTService.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/TopologyGWTService.java index 8fa364d..ef193da 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/TopologyGWTService.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/TopologyGWTService.java @@ -72,7 +72,7 @@ public interface TopologyGWTService extends RemoteService { * @param mode the new operation mode * @throws RuntimeException */ - void updateServerMode(int[] serverIds, Server.OperationMode mode) throws RuntimeException; + void updateServerManualMaintenance(int[] serverIds, boolean manualMaintenance) throws RuntimeException;
/** * Updates the server. diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/TopologyGWTServiceImpl.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/TopologyGWTServiceImpl.java index 06108a0..34d42f9 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/TopologyGWTServiceImpl.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/TopologyGWTServiceImpl.java @@ -32,7 +32,6 @@ import org.rhq.core.domain.cloud.PartitionEvent; import org.rhq.core.domain.cloud.PartitionEventDetails; import org.rhq.core.domain.cloud.PartitionEventType; import org.rhq.core.domain.cloud.Server; -import org.rhq.core.domain.cloud.Server.OperationMode; import org.rhq.core.domain.cloud.composite.AffinityGroupCountComposite; import org.rhq.core.domain.cloud.composite.ServerWithAgentCountComposite; import org.rhq.core.domain.criteria.AgentCriteria; @@ -84,9 +83,10 @@ public class TopologyGWTServiceImpl extends AbstractGWTServiceImpl implements To }
@Override - public void updateServerMode(int[] serverIds, OperationMode mode) throws RuntimeException { + public void updateServerManualMaintenance(int[] serverIds, boolean manualMaintenance) throws RuntimeException { try { - topologyManager.updateServerMode(getSessionSubject(), ArrayUtils.toObject(serverIds), mode); + topologyManager.updateServerManualMaintenance(getSessionSubject(), ArrayUtils.toObject(serverIds), + manualMaintenance); } catch (Throwable t) { throw getExceptionToThrowToClient(t); } diff --git a/modules/enterprise/gui/portal-war/src/main/java/org/rhq/enterprise/gui/ha/ListServersUIBean.java b/modules/enterprise/gui/portal-war/src/main/java/org/rhq/enterprise/gui/ha/ListServersUIBean.java index cc85b4d..9f5801a 100644 --- a/modules/enterprise/gui/portal-war/src/main/java/org/rhq/enterprise/gui/ha/ListServersUIBean.java +++ b/modules/enterprise/gui/portal-war/src/main/java/org/rhq/enterprise/gui/ha/ListServersUIBean.java @@ -22,7 +22,6 @@ import javax.faces.application.FacesMessage; import javax.faces.model.DataModel;
import org.rhq.core.domain.auth.Subject; -import org.rhq.core.domain.cloud.Server; import org.rhq.core.domain.cloud.composite.ServerWithAgentCountComposite; import org.rhq.core.domain.util.PageControl; import org.rhq.core.domain.util.PageList; @@ -63,17 +62,24 @@ public class ListServersUIBean extends PagedDataTableUIBean { return "success"; }
- public String setSelectedServersMode(Server.OperationMode mode) { + public String updateServerManualMaintenance(boolean manualMaintenance) { // Subject subject = EnterpriseFacesContextUtility.getSubject(); String[] selected = getSelectedServers(); Integer[] ids = getIntegerArray(selected);
if (ids.length > 0) { try { - topologyManager.updateServerMode(EnterpriseFacesContextUtility.getSubject(), ids, mode); + topologyManager.updateServerManualMaintenance(EnterpriseFacesContextUtility.getSubject(), ids, + manualMaintenance); + + if (manualMaintenance) { + FacesContextUtility.addMessage(FacesMessage.SEVERITY_INFO, "Set [" + ids.length + + "] servers' manual maintenance status."); + } else { + FacesContextUtility.addMessage(FacesMessage.SEVERITY_INFO, "Removed [" + ids.length + + "] servers' manual maintenance status."); + }
- FacesContextUtility.addMessage(FacesMessage.SEVERITY_INFO, "Set [" + ids.length + "] servers to mode " - + mode); } catch (Exception e) { FacesContextUtility.addMessage(FacesMessage.SEVERITY_ERROR, "Failed to set selected server modes", e); } @@ -83,11 +89,11 @@ public class ListServersUIBean extends PagedDataTableUIBean { }
public String setSelectedServersModeMaintenance() { - return setSelectedServersMode(Server.OperationMode.MAINTENANCE); + return updateServerManualMaintenance(true); }
public String setSelectedServersModeNormal() { - return setSelectedServersMode(Server.OperationMode.NORMAL); + return updateServerManualMaintenance(false); }
@Override diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/AbstractEJB3Test.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/AbstractEJB3Test.java index 8010c2b..af01136 100644 --- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/AbstractEJB3Test.java +++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/AbstractEJB3Test.java @@ -524,7 +524,7 @@ public abstract class AbstractEJB3Test extends Arquillian { } } } - storageClientManager.init(); + storageClientManager.init(System.currentTimeMillis() - 100000); beforeMethod(); beforeMethod(method);
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index ae1e69a..423218a 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -28,8 +28,8 @@ import java.net.InetAddress; import java.net.UnknownHostException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Date; import java.util.HashMap; -import java.util.LinkedHashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; @@ -44,9 +44,11 @@ import javax.persistence.EntityManager; import javax.persistence.PersistenceContext; import javax.persistence.TypedQuery;
-import org.apache.commons.collections.map.LinkedMap; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.quartz.JobDataMap; +import org.quartz.SimpleTrigger; +import org.quartz.Trigger;
import org.rhq.cassandra.schema.SchemaManager; import org.rhq.core.domain.alert.Alert; @@ -67,7 +69,6 @@ import org.rhq.core.domain.criteria.ResourceOperationHistoryCriteria; import org.rhq.core.domain.criteria.StorageNodeCriteria; import org.rhq.core.domain.measurement.MeasurementAggregate; import org.rhq.core.domain.measurement.MeasurementUnits; -import org.rhq.core.domain.measurement.composite.MeasurementDataNumericHighLowComposite; import org.rhq.core.domain.operation.OperationRequestStatus; import org.rhq.core.domain.operation.ResourceOperationHistory; import org.rhq.core.domain.operation.bean.GroupOperationSchedule; @@ -93,8 +94,8 @@ import org.rhq.enterprise.server.resource.ResourceTypeManagerLocal; import org.rhq.enterprise.server.resource.group.ResourceGroupManagerLocal; import org.rhq.enterprise.server.rest.reporting.MeasurementConverter; import org.rhq.enterprise.server.scheduler.SchedulerLocal; -import org.rhq.enterprise.server.storage.StorageClusterSettings; -import org.rhq.enterprise.server.storage.StorageClusterSettingsManagerBean; +import org.rhq.enterprise.server.scheduler.jobs.StorageNodeMaintenanceJob; +import org.rhq.enterprise.server.storage.StorageConfigurationException; import org.rhq.enterprise.server.util.CriteriaQueryGenerator; import org.rhq.enterprise.server.util.CriteriaQueryRunner; import org.rhq.enterprise.server.util.LookupUtil; @@ -117,7 +118,6 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN private final static String SEEDS_LIST = "seedsList";
private static final String RHQ_STORAGE_CQL_PORT_PROPERTY = "nativeTransportPort"; - private static final String RHQ_STORAGE_GOSSIP_PORT_PROPERTY = "storagePort"; private static final String RHQ_STORAGE_JMX_PORT_PROPERTY = "jmxPort"; private static final String RHQ_STORAGE_ADDRESS_PROPERTY = "host";
@@ -125,19 +125,6 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN private static final int MAX_ITERATIONS = 10; private static final String UPDATE_CONFIGURATION_OPERATION = "updateConfiguration"; private static final String RESTART_OPERATION = "restart"; - - // metric names on Storage Service resource - private static final String METRIC_TOKENS = "Tokens", METRIC_OWNERSHIP = "Ownership"; - private static final String METRIC_DATA_DISK_USED_PERCENTAGE = "Calculated.DataDiskUsedPercentage"; - private static final String METRIC_TOTAL_DISK_USED_PERCENTAGE = "Calculated.TotalDiskUsedPercentage"; - private static final String METRIC_FREE_DISK_TO_DATA_RATIO = "Calculated.FreeDiskToDataSizeRatio"; - private static final String METRIC_LOAD = "Load", METRIC_KEY_CACHE_SIZE = "KeyCacheSize", - METRIC_ROW_CACHE_SIZE = "RowCacheSize", METRIC_TOTAL_COMMIT_LOG_SIZE = "TotalCommitlogSize"; - - //metric names on Memory Subsystem resource - private static final String METRIC_HEAP_COMMITED = "{HeapMemoryUsage.committed}", - METRIC_HEAP_USED = "{HeapMemoryUsage.used}", METRIC_HEAP_USED_PERCENTAGE = "Calculated.HeapUsagePercentage"; -
@PersistenceContext(unitName = RHQConstants.PERSISTENCE_UNIT_NAME) private EntityManager entityManager; @@ -172,13 +159,10 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN @EJB private ResourceManagerLocal resourceManager;
- @EJB - private StorageClusterSettingsManagerBean storageClusterSettingsManager; - @Override public void linkResource(Resource resource) { - Configuration pluginConfig = resource.getPluginConfiguration(); - String address = pluginConfig.getSimpleValue(RHQ_STORAGE_ADDRESS_PROPERTY); + Configuration resourceConfig = resource.getPluginConfiguration(); + String address = resourceConfig.getSimpleValue(RHQ_STORAGE_ADDRESS_PROPERTY);
if (log.isInfoEnabled()) { log.info("Linking " + resource + " to storage node at " + address); @@ -192,13 +176,12 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN } storageNode.setResource(resource); storageNode.setOperationMode(OperationMode.NORMAL); - initClusterSettingsIfNecessary(pluginConfig); addStorageNodeToGroup(resource); } else { storageNode = new StorageNode(); storageNode.setAddress(address); - storageNode.setCqlPort(Integer.parseInt(pluginConfig.getSimpleValue(RHQ_STORAGE_CQL_PORT_PROPERTY))); - storageNode.setJmxPort(Integer.parseInt(pluginConfig.getSimpleValue(RHQ_STORAGE_JMX_PORT_PROPERTY))); + storageNode.setCqlPort(Integer.parseInt(resourceConfig.getSimpleValue(RHQ_STORAGE_CQL_PORT_PROPERTY))); + storageNode.setJmxPort(Integer.parseInt(resourceConfig.getSimpleValue(RHQ_STORAGE_JMX_PORT_PROPERTY))); storageNode.setResource(resource); storageNode.setOperationMode(OperationMode.INSTALLED);
@@ -217,31 +200,6 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN } }
- private void initClusterSettingsIfNecessary(Configuration pluginConfig) { - // TODO Need to handle non-repeatable reads here (probably a post 4.9 task) - // - // If a user deploys two storage nodes prior to installing the RHQ server, then we - // could end up in this method concurrently for both storage nodes. The settings - // would be committed for each node with the second commit winning. The problem is - // that is the cluster settings differ for the two nodes, it will be silently - // ignored. This scenario will happen infrequently so it should be sufficient to - // resolve it with optimistic locking. The second writer should fail with an - // OptimisticLockException. - - log.info("Initializing storage cluster settings"); - - StorageClusterSettings clusterSettings = storageClusterSettingsManager.getClusterSettings( - subjectManager.getOverlord()); - if (clusterSettings != null) { - log.info("Cluster settings have already been set. Skipping initialization."); - return; - } - clusterSettings = new StorageClusterSettings(); - clusterSettings.setCqlPort(Integer.parseInt(pluginConfig.getSimpleValue(RHQ_STORAGE_CQL_PORT_PROPERTY))); - clusterSettings.setGossipPort(Integer.parseInt(pluginConfig.getSimpleValue(RHQ_STORAGE_GOSSIP_PORT_PROPERTY))); - storageClusterSettingsManager.setClusterSettings(subjectManager.getOverlord(), clusterSettings); - } - private void announceNewNode(StorageNode newStorageNode) { if (log.isInfoEnabled()) { log.info("Announcing " + newStorageNode + " to storage node cluster."); @@ -363,90 +321,106 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN int resourceId = getResourceIdFromStorageNode(node); Map<String, Integer> scheduleIdsMap = new HashMap<String, Integer>();
- for (Object[] tupple : getStorageServiceScheduleIds(resourceId)) { - String definitionName = (String) tupple[0]; - Integer scheduleId = (Integer) tupple[2]; - scheduleIdsMap.put(definitionName, scheduleId); + // get the schedule ids for Storage Service resource + final String tokensMetric = "Tokens", ownershipMetric = "Ownership"; + final String dataDiskUsedPercentageMetric = "Calculated.DataDiskUsedPercentage"; + final String totalDiskUsedPercentageMetric = "Calculated.TotalDiskUsedPercentage"; + final String freeDiskToDataRatioMetric = "Calculated.FreeDiskToDataSizeRatio"; + final String loadMetric = "Load", keyCacheSize = "KeyCacheSize", rowCacheSize = "RowCacheSize", totalCommitLogSize = "TotalCommitlogSize"; + TypedQuery<Object[]> query = entityManager.<Object[]> createNamedQuery( + StorageNode.QUERY_FIND_SCHEDULE_IDS_BY_PARENT_RESOURCE_ID_AND_MEASUREMENT_DEFINITION_NAMES, Object[].class); + query.setParameter("parrentId", resourceId).setParameter("metricNames", + Arrays.asList(tokensMetric, ownershipMetric, loadMetric, keyCacheSize, rowCacheSize, totalCommitLogSize, + dataDiskUsedPercentageMetric, totalDiskUsedPercentageMetric, freeDiskToDataRatioMetric)); + for (Object[] pair : query.getResultList()) { + scheduleIdsMap.put((String) pair[0], (Integer) pair[1]); } - for (Object[] tupple : getMemorySubsystemScheduleIds(resourceId)) { - String definitionName = (String) tupple[0]; - Integer scheduleId = (Integer) tupple[2]; - scheduleIdsMap.put(definitionName, scheduleId); + + // get the schedule ids for Memory Subsystem resource + final String heapCommittedMetric = "{HeapMemoryUsage.committed}", heapUsedMetric = "{HeapMemoryUsage.used}", heapUsedPercentageMetric = "Calculated.HeapUsagePercentage"; + query = entityManager.<Object[]> createNamedQuery( + StorageNode.QUERY_FIND_SCHEDULE_IDS_BY_GRANDPARENT_RESOURCE_ID_AND_MEASUREMENT_DEFINITION_NAMES, + Object[].class); + query.setParameter("grandparrentId", resourceId).setParameter("metricNames", + Arrays.asList(heapCommittedMetric, heapUsedMetric, heapUsedPercentageMetric)); + for (Object[] pair : query.getResultList()) { + scheduleIdsMap.put((String) pair[0], (Integer) pair[1]); }
+ StorageNodeLoadComposite result = new StorageNodeLoadComposite(node, beginTime, endTime); - MeasurementAggregate totalDiskUsedAggregate = new MeasurementAggregate(0d, 0d, 0d); + MeasurementAggregate totalDiskUsedaggregate = new MeasurementAggregate(0d, 0d, 0d); Integer scheduleId = null;
// find the aggregates and enrich the result instance if (!scheduleIdsMap.isEmpty()) { - if ((scheduleId = scheduleIdsMap.get(METRIC_TOKENS)) != null) { + if ((scheduleId = scheduleIdsMap.get(tokensMetric)) != null) { MeasurementAggregate tokensAggregate = measurementManager.getAggregate(subject, scheduleId, beginTime, endTime); result.setTokens(tokensAggregate); } - if ((scheduleId = scheduleIdsMap.get(METRIC_OWNERSHIP)) != null) { + if ((scheduleId = scheduleIdsMap.get(ownershipMetric)) != null) { StorageNodeLoadComposite.MeasurementAggregateWithUnits ownershipAggregateWithUnits = getMeasurementAggregateWithUnits( subject, scheduleId, MeasurementUnits.PERCENTAGE, beginTime, endTime); result.setActuallyOwns(ownershipAggregateWithUnits); }
//calculated disk space related metrics - if ((scheduleId = scheduleIdsMap.get(METRIC_DATA_DISK_USED_PERCENTAGE)) != null) { + if ((scheduleId = scheduleIdsMap.get(dataDiskUsedPercentageMetric)) != null) { StorageNodeLoadComposite.MeasurementAggregateWithUnits dataDiskUsedPercentageAggregateWithUnits = getMeasurementAggregateWithUnits( subject, scheduleId, MeasurementUnits.PERCENTAGE, beginTime, endTime); result.setDataDiskUsedPercentage(dataDiskUsedPercentageAggregateWithUnits); } - if ((scheduleId = scheduleIdsMap.get(METRIC_TOTAL_DISK_USED_PERCENTAGE)) != null) { + if ((scheduleId = scheduleIdsMap.get(totalDiskUsedPercentageMetric)) != null) { StorageNodeLoadComposite.MeasurementAggregateWithUnits totalDiskUsedPercentageAggregateWithUnits = getMeasurementAggregateWithUnits( subject, scheduleId, MeasurementUnits.PERCENTAGE, beginTime, endTime); result.setTotalDiskUsedPercentage(totalDiskUsedPercentageAggregateWithUnits); } - if ((scheduleId = scheduleIdsMap.get(METRIC_FREE_DISK_TO_DATA_RATIO)) != null) { + if ((scheduleId = scheduleIdsMap.get(freeDiskToDataRatioMetric)) != null) { MeasurementAggregate freeDiskToDataRatioAggregate = measurementManager.getAggregate(subject, scheduleId, beginTime, endTime); result.setFreeDiskToDataSizeRatio(freeDiskToDataRatioAggregate); }
- if ((scheduleId = scheduleIdsMap.get(METRIC_LOAD)) != null) { + if ((scheduleId = scheduleIdsMap.get(loadMetric)) != null) { StorageNodeLoadComposite.MeasurementAggregateWithUnits loadAggregateWithUnits = getMeasurementAggregateWithUnits( subject, scheduleId, MeasurementUnits.BYTES, beginTime, endTime); result.setLoad(loadAggregateWithUnits);
- updateAggregateTotal(totalDiskUsedAggregate, loadAggregateWithUnits.getAggregate()); + updateAggregateTotal(totalDiskUsedaggregate, loadAggregateWithUnits.getAggregate()); + } + if ((scheduleId = scheduleIdsMap.get(keyCacheSize)) != null) { + updateAggregateTotal(totalDiskUsedaggregate, + measurementManager.getAggregate(subject, scheduleId, beginTime, endTime)); + } + if ((scheduleId = scheduleIdsMap.get(rowCacheSize)) != null) { + updateAggregateTotal(totalDiskUsedaggregate, + measurementManager.getAggregate(subject, scheduleId, beginTime, endTime)); + } + if ((scheduleId = scheduleIdsMap.get(totalCommitLogSize)) != null) { + updateAggregateTotal(totalDiskUsedaggregate, + measurementManager.getAggregate(subject, scheduleId, beginTime, endTime)); } -// if ((scheduleId = scheduleIdsMap.get(METRIC_KEY_CACHE_SIZE)) != null) { -// updateAggregateTotal(totalDiskUsedAggregate, -// measurementManager.getAggregate(subject, scheduleId, beginTime, endTime)); -// } -// if ((scheduleId = scheduleIdsMap.get(METRIC_ROW_CACHE_SIZE)) != null) { -// updateAggregateTotal(totalDiskUsedAggregate, -// measurementManager.getAggregate(subject, scheduleId, beginTime, endTime)); -// } -// if ((scheduleId = scheduleIdsMap.get(METRIC_TOTAL_COMMIT_LOG_SIZE)) != null) { -// updateAggregateTotal(totalDiskUsedAggregate, -// measurementManager.getAggregate(subject, scheduleId, beginTime, endTime)); -// } - - if (totalDiskUsedAggregate.getMax() > 0) { + + if (totalDiskUsedaggregate.getMax() > 0) { StorageNodeLoadComposite.MeasurementAggregateWithUnits totalDiskUsedAggregateWithUnits = new StorageNodeLoadComposite.MeasurementAggregateWithUnits( - totalDiskUsedAggregate, MeasurementUnits.BYTES); - totalDiskUsedAggregateWithUnits.setFormattedValue(getSummaryString(totalDiskUsedAggregate, + totalDiskUsedaggregate, MeasurementUnits.BYTES); + totalDiskUsedAggregateWithUnits.setFormattedValue(getSummaryString(totalDiskUsedaggregate, MeasurementUnits.BYTES)); result.setDataDiskUsed(totalDiskUsedAggregateWithUnits); }
- if ((scheduleId = scheduleIdsMap.get(METRIC_HEAP_COMMITED)) != null) { + if ((scheduleId = scheduleIdsMap.get(heapCommittedMetric)) != null) { StorageNodeLoadComposite.MeasurementAggregateWithUnits heapCommittedAggregateWithUnits = getMeasurementAggregateWithUnits( subject, scheduleId, MeasurementUnits.BYTES, beginTime, endTime); result.setHeapCommitted(heapCommittedAggregateWithUnits); } - if ((scheduleId = scheduleIdsMap.get(METRIC_HEAP_USED)) != null) { + if ((scheduleId = scheduleIdsMap.get(heapUsedMetric)) != null) { StorageNodeLoadComposite.MeasurementAggregateWithUnits heapUsedAggregateWithUnits = getMeasurementAggregateWithUnits( subject, scheduleId, MeasurementUnits.BYTES, beginTime, endTime); result.setHeapUsed(heapUsedAggregateWithUnits); } - if ((scheduleId = scheduleIdsMap.get(METRIC_HEAP_USED_PERCENTAGE)) != null) { + if ((scheduleId = scheduleIdsMap.get(heapUsedPercentageMetric)) != null) { StorageNodeLoadComposite.MeasurementAggregateWithUnits heapUsedPercentageAggregateWithUnits = getMeasurementAggregateWithUnits( subject, scheduleId, MeasurementUnits.PERCENTAGE, beginTime, endTime); @@ -456,26 +430,6 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
return result; } - - private List<Object[]> getStorageServiceScheduleIds(int storageNodeResourceId) { - // get the schedule ids for Storage Service resource - TypedQuery<Object[]> query = entityManager.<Object[]> createNamedQuery( - StorageNode.QUERY_FIND_SCHEDULE_IDS_BY_PARENT_RESOURCE_ID_AND_MEASUREMENT_DEFINITION_NAMES, Object[].class); - query.setParameter("parrentId", storageNodeResourceId).setParameter("metricNames", - Arrays.asList(METRIC_TOKENS, METRIC_OWNERSHIP, METRIC_LOAD/*, METRIC_KEY_CACHE_SIZE, METRIC_ROW_CACHE_SIZE, METRIC_TOTAL_COMMIT_LOG_SIZE*/, - METRIC_DATA_DISK_USED_PERCENTAGE, METRIC_TOTAL_DISK_USED_PERCENTAGE, METRIC_FREE_DISK_TO_DATA_RATIO)); - return query.getResultList(); - } - - private List<Object[]> getMemorySubsystemScheduleIds(int storageNodeResourceId) { - // get the schedule ids for Memory Subsystem resource - TypedQuery<Object[]> query = entityManager.<Object[]> createNamedQuery( - StorageNode.QUERY_FIND_SCHEDULE_IDS_BY_GRANDPARENT_RESOURCE_ID_AND_MEASUREMENT_DEFINITION_NAMES, - Object[].class); - query.setParameter("grandparrentId", storageNodeResourceId).setParameter("metricNames", - Arrays.asList(METRIC_HEAP_COMMITED, METRIC_HEAP_USED, METRIC_HEAP_USED_PERCENTAGE)); - return query.getResultList(); - }
/** * @param accumulator @@ -498,21 +452,6 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN StorageNode.class); return query.getResultList(); } - - @Override - public PageList<StorageNodeLoadComposite> getStorageNodeComposites() { - List<StorageNode> nodes = getStorageNodes(); - PageList<StorageNodeLoadComposite> result = new PageList<StorageNodeLoadComposite>(); - long endTime = System.currentTimeMillis(); - long beginTime = endTime - (8 * 60 * 60 * 1000); - for (StorageNode node : nodes) { - StorageNodeLoadComposite composite = getLoad(subjectManager.getOverlord(), node, beginTime, endTime); - int unackAlerts = findNotAcknowledgedStorageNodeAlerts(subjectManager.getOverlord(), node).size(); - composite.setUnackAlerts(unackAlerts); - result.add(composite); - } - return result; - }
private List<StorageNode> getClusteredStorageNodes() { return entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE, StorageNode.class) @@ -678,10 +617,14 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN if (storageNode == null) { initialStorageNodes = getStorageNodes(); } else { - initialStorageNodes = Arrays.asList(storageNode.getResource() == null ? entityManager.find( - StorageNode.class, storageNode.getId()) : storageNode); + int index = initialStorageNodes.indexOf(storageNode); + if (index >= 0) { + initialStorageNodes = Arrays.asList(initialStorageNodes.get(index)); + } else { + initialStorageNodes = new ArrayList<StorageNode>(); + } } - + Queue<Resource> unvisitedResources = new LinkedList<Resource>(); for (StorageNode initialStorageNode : initialStorageNodes) { if (initialStorageNode.getResource() != null) { @@ -796,55 +739,6 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN public void scheduleOperationInNewTransaction(Subject subject, ResourceOperationSchedule schedule) { operationManager.scheduleResourceOperation(subject, schedule); } - - @Override - @RequiredPermissions({ @RequiredPermission(Permission.MANAGE_SETTINGS), - @RequiredPermission(Permission.MANAGE_INVENTORY) }) - public Map<String, List<MeasurementDataNumericHighLowComposite>> findStorageNodeLoadDataForLast(Subject subject, - StorageNode node, long beginTime, long endTime, int numPoints) { - int storageNodeResourceId = getResourceIdFromStorageNode(node); - Map<String, List<MeasurementDataNumericHighLowComposite>> result = new LinkedHashMap<String, List<MeasurementDataNumericHighLowComposite>>(); - - List<Object[]> tupples = getStorageServiceScheduleIds(storageNodeResourceId); - List<String> defNames = new ArrayList<String>(); - int[] definitionIds = new int[tupples.size()]; - int resId = -1; - int index = 0; - for (Object[] tupple : tupples) { - String defName = (String) tupple[0]; - int definitionId = (Integer) tupple[1]; - resId = (Integer) tupple[3]; - defNames.add(defName); - definitionIds[index++] = definitionId; - } - List<List<MeasurementDataNumericHighLowComposite>> storageServiceData = measurementManager.findDataForResource( - subject, resId, definitionIds, beginTime, endTime, numPoints); - for (int i = 0; i < storageServiceData.size(); i ++) { - List<MeasurementDataNumericHighLowComposite> oneRecord = storageServiceData.get(i); - result.put(defNames.get(i), oneRecord); - } - - tupples = getMemorySubsystemScheduleIds(storageNodeResourceId); - defNames = new ArrayList<String>(); - definitionIds = new int[tupples.size()]; - resId = -1; - index = 0; - for (Object[] tupple : tupples) { - String defName = (String) tupple[0]; - int definitionId = (Integer) tupple[1]; - resId = (Integer) tupple[3]; - defNames.add(defName); - definitionIds[index++] = definitionId; - } - List<List<MeasurementDataNumericHighLowComposite>> memorySubsystemData = measurementManager.findDataForResource( - subject, resId, definitionIds, beginTime, endTime, numPoints); - for (int i = 0; i < memorySubsystemData.size(); i ++) { - List<MeasurementDataNumericHighLowComposite> oneRecord = memorySubsystemData.get(i); - result.put(defNames.get(i), oneRecord); - } - - return result; - }
private boolean runOperationAndWaitForResult(Subject subject, Resource storageNodeResource, String operationToRun, Configuration parameters) { @@ -913,17 +807,17 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN log.info("Preparing to bootstrap " + storageNode + " into cluster..."); }
+ List<StorageNode> existingStorageNodes = getClusteredStorageNodes(); + ResourceOperationSchedule schedule = new ResourceOperationSchedule(); schedule.setResource(storageNode.getResource()); schedule.setJobTrigger(JobTrigger.createNowTrigger()); schedule.setSubject(subjectManager.getOverlord()); schedule.setOperationName("prepareForBootstrap");
- StorageClusterSettings clusterSettings = storageClusterSettingsManager.getClusterSettings( - subjectManager.getOverlord()); Configuration parameters = new Configuration(); - parameters.put(new PropertySimple("cqlPort", clusterSettings.getCqlPort())); - parameters.put(new PropertySimple("gossipPort", clusterSettings.getGossipPort())); + parameters.put(new PropertySimple("cqlPort", existingStorageNodes.get(0).getCqlPort())); + parameters.put(new PropertySimple("gossipPort", getGossipPort(storageNode, existingStorageNodes))); parameters.put(createPropertyListOfAddresses("storageNodeIPAddresses", getClusteredStorageNodes()));
schedule.setParameters(parameters); @@ -931,6 +825,44 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN operationManager.scheduleResourceOperation(subjectManager.getOverlord(), schedule); }
+ private Integer getGossipPort(StorageNode newStorageNode, List<StorageNode> storageNodes) { + if (log.isInfoEnabled()) { + log.info("Looking up gossip port for new storage node " + newStorageNode); + } + try { + StorageNode node = null; + Configuration resourceConfig = null; + for (StorageNode storageNode : storageNodes) { + resourceConfig = configurationManager.getLiveResourceConfiguration(subjectManager.getOverlord(), + storageNode.getResource().getId(), false); + if (resourceConfig == null) { + log.warn("Failed to load resource configuration for storage node " + newStorageNode.getResource()); + } else { + node = storageNode; + break; + } + } + if (resourceConfig == null) { + log.error("Failed to obtain gossip port from existing storage nodes"); + throw new StorageConfigurationException("Failed to obtain gossip port from existing storage nodes"); + } + + PropertySimple property = resourceConfig.getSimple("gossipPort"); + if (property == null) { + throw new StorageConfigurationException("The resource configuration for " + node.getResource() + + "did not include the required property [gossipPort]"); + } + Integer port = property.getIntegerValue(); + log.info("Found gossip port set to " + port); + return property.getIntegerValue(); + } catch (Exception e) { + if (e instanceof StorageConfigurationException) { + throw (StorageConfigurationException) e; + } + throw new RuntimeException("An error occurred while trying to obtain the gossip port", e); + } + } + @Override public void runAddNodeMaintenance() { log.info("Preparing to schedule addNodeMaintenance on the storage cluster..."); @@ -1018,4 +950,4 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN return value; }
-} +} \ No newline at end of file diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/TopologyManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/TopologyManagerBean.java index 98aa9e6..4db1697 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/TopologyManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/TopologyManagerBean.java @@ -35,10 +35,8 @@ import org.rhq.core.domain.authz.Permission; import org.rhq.core.domain.cloud.FailoverListDetails; import org.rhq.core.domain.cloud.PartitionEventType; import org.rhq.core.domain.cloud.Server; -import org.rhq.core.domain.cloud.StorageNode; import org.rhq.core.domain.cloud.composite.ServerWithAgentCountComposite; import org.rhq.core.domain.criteria.ServerCriteria; -import org.rhq.core.domain.criteria.StorageNodeCriteria; import org.rhq.core.domain.resource.Agent; import org.rhq.core.domain.server.PersistenceUtility; import org.rhq.core.domain.util.PageControl; @@ -216,13 +214,36 @@ public class TopologyManagerBean implements TopologyManagerLocal {
@RequiredPermissions({ @RequiredPermission(Permission.MANAGE_SETTINGS), @RequiredPermission(Permission.MANAGE_INVENTORY) }) + public void updateServerManualMaintenance(Subject subject, Integer[] serverIds, boolean manualMaintenance) { + if (serverIds.length > 0) { + try { + for (Integer id : serverIds) { + Server server = entityManager.find(Server.class, id); + if (manualMaintenance) { + server.addStatus(Server.Status.MANUAL_MAINTENANCE_MODE); + } else { + server.clearStatus(Server.Status.MANUAL_MAINTENANCE_MODE); + } + } + } catch (Exception e) { + log.debug("Failed to update HA server modes: " + e); + } + } + } + + @RequiredPermissions({ @RequiredPermission(Permission.MANAGE_SETTINGS), + @RequiredPermission(Permission.MANAGE_INVENTORY) }) public void updateServerMode(Subject subject, Integer[] serverIds, Server.OperationMode mode) { if (serverIds == null) { return; }
if (mode == null) { - throw new IllegalArgumentException("mode can not be null"); + throw new IllegalArgumentException("Mode cannot be null."); + } + if (!mode.isConfigurable()) { + throw new IllegalArgumentException("Cannot directly set a mode that is not configurable. Mode " + + mode.name() + " is not configurable."); }
if (serverIds.length > 0) { diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/TopologyManagerLocal.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/TopologyManagerLocal.java index 7d5f76a..11b9f95 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/TopologyManagerLocal.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/TopologyManagerLocal.java @@ -25,10 +25,8 @@ import javax.ejb.Local; import org.rhq.core.domain.auth.Subject; import org.rhq.core.domain.cloud.FailoverListDetails; import org.rhq.core.domain.cloud.Server; -import org.rhq.core.domain.cloud.StorageNode; import org.rhq.core.domain.cloud.composite.ServerWithAgentCountComposite; import org.rhq.core.domain.criteria.ServerCriteria; -import org.rhq.core.domain.criteria.StorageNodeCriteria; import org.rhq.core.domain.resource.Agent; import org.rhq.core.domain.util.PageControl; import org.rhq.core.domain.util.PageList; @@ -121,6 +119,17 @@ public interface TopologyManagerLocal { void updateServerMode(Subject subject, Integer[] serverIds, Server.OperationMode mode);
/** + * Updates the manual maintenance flag for multiple servers. + * + * the subject needs to have MANAGE_INVENTORY and MANAGE_SETTINGS permissions. + * + * @param subject the caller + * @param serverIds + * @param manualMaintenance manual maintenance + */ + void updateServerManualMaintenance(Subject subject, Integer[] serverIds, boolean manualMainatenance); + + /** * Updates the server. * * the subject needs to have MANAGE_INVENTORY and MANAGE_SETTINGS permissions. diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/instance/ServerManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/instance/ServerManagerBean.java index 4300313..44bd557 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/instance/ServerManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/instance/ServerManagerBean.java @@ -41,6 +41,8 @@ import org.apache.commons.logging.LogFactory;
import org.rhq.core.domain.cloud.PartitionEventType; import org.rhq.core.domain.cloud.Server; +import org.rhq.core.domain.cloud.Server.OperationMode; +import org.rhq.core.domain.cloud.Server.Status; import org.rhq.core.domain.resource.Agent; import org.rhq.core.util.exception.ThrowableUtil; import org.rhq.enterprise.communications.GlobalSuspendCommandListener; @@ -50,6 +52,7 @@ import org.rhq.enterprise.server.cloud.PartitionEventManagerLocal; import org.rhq.enterprise.server.cloud.StatusManagerLocal; import org.rhq.enterprise.server.cloud.TopologyManagerLocal; import org.rhq.enterprise.server.core.comm.ServerCommunicationsServiceUtil; +import org.rhq.enterprise.server.storage.StorageClientManagerBean;
/** * If you want to manipulate or report on the {@link Server} instance that @@ -82,6 +85,9 @@ public class ServerManagerBean implements ServerManagerLocal { private TopologyManagerLocal topologyManager;
@EJB + private StorageClientManagerBean storageClientManager; + + @EJB private StatusManagerLocal agentStatusManager;
@EJB @@ -156,8 +162,10 @@ public class ServerManagerBean implements ServerManagerLocal { if (server == null) { return false; // don't reload caches if we don't know who we are } - boolean hadStatus = (server.getStatus() != 0); - server.clearStatus(); + boolean hadStatus = (server.hasStatus(Status.ALERT_DEFINITION) || server + .hasStatus(Status.RESOURCE_HIERARCHY_UPDATED)); + server.clearStatus(Status.ALERT_DEFINITION); + server.clearStatus(Status.RESOURCE_HIERARCHY_UPDATED); return hadStatus; }
@@ -182,7 +190,9 @@ public class ServerManagerBean implements ServerManagerLocal {
public void establishCurrentServerMode() { Server server = getServer(); - Server.OperationMode serverMode = server.getOperationMode(); + Server.OperationMode serverMode = determineServerOperationMode( + server.hasStatus(Server.Status.MANUAL_MAINTENANCE_MODE), storageClientManager.isClusterAvailable(), + server.getOperationMode());
// no state change means no work if (serverMode == lastEstablishedServerMode) @@ -221,31 +231,25 @@ public class ServerManagerBean implements ServerManagerLocal {
log.info("Notified communication layer of server operation mode " + serverMode);
- } else if (Server.OperationMode.INSTALLED == serverMode) { - + } else if (Server.OperationMode.INSTALLED == serverMode // The server must have just been installed and must be coming for the first time - // up as of this call. So, update the mode to NORMAL and update mtime as an initial heart beat. + // up as of this call. So, attempt to update the mode to NORMAL. // This will prevent a running CloudManagerJob from resetting to DOWN before the real // ServerManagerJob starts updating the heart beat regularly. - lastEstablishedServerMode = serverMode; - serverMode = Server.OperationMode.NORMAL; - server.setOperationMode(serverMode); - server.setMtime(System.currentTimeMillis()); - - } else if (Server.OperationMode.DOWN == serverMode) {
+ || Server.OperationMode.DOWN == serverMode) { // The server can't be DOWN if this code is executing, it means the server must be coming - // up as of this call. So, update the mode to NORMAL and update mtime as an initial heart beat. + // up as of this call. So, attempt to update the mode to NORMAL. // This will prevent a running CloudManagerJob from resetting to DOWN before the real // ServerManagerJob starts updating the heart beat regularly. + lastEstablishedServerMode = serverMode; - serverMode = Server.OperationMode.NORMAL; - server.setOperationMode(serverMode); - server.setMtime(System.currentTimeMillis()); + serverMode = determineServerOperationMode(server.hasStatus(Server.Status.MANUAL_MAINTENANCE_MODE), + storageClientManager.isClusterAvailable(), OperationMode.NORMAL); }
- // If this server just transitioned from INSTALLED to NORMAL operation mode then it - // has just been added to the cloud. Changing the number of servers in the cloud requires agent + // If this server just transitioned from INSTALLED to NORMAL operation mode then it + // has just been added to the cloud. Changing the number of servers in the cloud requires agent // distribution work, even if this is a 1-Server cloud. Generate a request for a repartitioning // of agent load, it will be executed on the next invocation of the cluster manager job. // Otherwise, audit the operation mode change as a partition event of interest. @@ -264,7 +268,8 @@ public class ServerManagerBean implements ServerManagerLocal { }
lastEstablishedServerMode = serverMode; - + server.setOperationMode(lastEstablishedServerMode); + server.setMtime(System.currentTimeMillis()); } catch (Exception e) { log.error("Unable to change HA Server Mode from " + lastEstablishedServerMode + " to " + serverMode + ": " + e); @@ -310,4 +315,28 @@ public class ServerManagerBean implements ServerManagerLocal { establishCurrentServerMode(); }
+ /** + * @param manualMaintenance + * @param storageNodeUp + * @param currentOperationMode + */ + private Server.OperationMode determineServerOperationMode(boolean isManualMaintenance, + boolean isStorageClusterAvailable, Server.OperationMode requestedOperationMode) { + + if (Server.OperationMode.DOWN == requestedOperationMode + || Server.OperationMode.INSTALLED == requestedOperationMode) { + return requestedOperationMode; + } + + if (Server.OperationMode.NORMAL == requestedOperationMode + || Server.OperationMode.MAINTENANCE == requestedOperationMode) { + if (!isManualMaintenance && isStorageClusterAvailable) { + return OperationMode.NORMAL; + } else { + return OperationMode.MAINTENANCE; + } + } + + throw new RuntimeException("Unable to determine new server operation mode."); + } } diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/core/StartupBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/core/StartupBean.java index fa88263..646a9fd 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/core/StartupBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/core/StartupBean.java @@ -53,7 +53,6 @@ import org.quartz.SchedulerException; import org.rhq.core.db.DatabaseTypeFactory; import org.rhq.core.domain.auth.Subject; import org.rhq.core.domain.cloud.Server; -import org.rhq.core.domain.cloud.Server.OperationMode; import org.rhq.core.domain.common.ProductInfo; import org.rhq.core.domain.resource.Agent; import org.rhq.core.util.ObjectNameFactory; @@ -91,7 +90,6 @@ import org.rhq.enterprise.server.scheduler.jobs.SavedSearchResultCountRecalculat import org.rhq.enterprise.server.scheduler.jobs.StorageClusterReadRepairJob; import org.rhq.enterprise.server.scheduler.jobs.StorageNodeMaintenanceJob; import org.rhq.enterprise.server.storage.StorageClientManagerBean; -import org.rhq.enterprise.server.storage.StorageClusterHeartBeatJob; import org.rhq.enterprise.server.system.SystemManagerLocal; import org.rhq.enterprise.server.util.LookupUtil; import org.rhq.enterprise.server.util.concurrent.AlertSerializer; @@ -144,9 +142,6 @@ public class StartupBean implements StartupLocal { @EJB private StorageClientManagerBean storageClientManager;
- @EJB - private StorageClusterHeartBeatJob storageClusterHeartBeatJob; - @Resource private TimerService timerService; // needed to schedule our plugin scanner
@@ -275,8 +270,7 @@ public class StartupBean implements StartupLocal { log.info("Server is configured to start up in MAINTENANCE mode."); Server server = serverManager.getServer(); Integer[] serverId = new Integer[] { server.getId() }; - topologyManager.updateServerMode(LookupUtil.getSubjectManager().getOverlord(), serverId, - OperationMode.MAINTENANCE); + topologyManager.updateServerManualMaintenance(LookupUtil.getSubjectManager().getOverlord(), serverId, true); }
// Establish the current server mode for the server. This will move the server to NORMAL @@ -449,7 +443,8 @@ public class StartupBean implements StartupLocal { log.error("Cannot create storage node maintenance job.", e); }
- storageClientManager.init(); + storageClientManager.init(serverManager.getServer().getCtime()); + serverManager.establishCurrentServerMode(); }
/** diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClientManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClientManagerBean.java index 3f1943d..83ef02c 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClientManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClientManagerBean.java @@ -76,8 +76,9 @@ public class StorageClientManagerBean { private MetricsDAO metricsDAO; private MetricsServer metricsServer; private boolean initialized; + private StorageClusterMonitor storageClusterMonitor;
- public synchronized void init() { + public synchronized void init(long ctime) { if (initialized) { if (log.isDebugEnabled()) { log.debug("Storage client subsystem is already initialized. Skipping initialization."); @@ -103,11 +104,15 @@ public class StorageClientManagerBean { + "result of running dbsetup or deleting rows from rhq_storage_node table. Please re-install the " + "storage node to fix this issue."); } - session = createSession(username, password, storageNodes); + Session wrappedSession = createSession(username, password, storageNodeManager.getStorageNodes()); + session = new StorageSession(wrappedSession); + + storageClusterMonitor = new StorageClusterMonitor(); + session.addStorageStateListener(storageClusterMonitor); + metricsDAO = new MetricsDAO(session, metricsConfiguration);
- Server server = serverManager.getServer(); - initMetricsServer(isNewServerInstall, server.getCtime()); + initMetricsServer(isNewServerInstall, ctime);
initialized = true; log.info("Storage client subsystem is now initialized"); diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterHeartBeatJob.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterHeartBeatJob.java deleted file mode 100644 index 6f58a06..0000000 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterHeartBeatJob.java +++ /dev/null @@ -1,126 +0,0 @@ -package org.rhq.enterprise.server.storage; - -import static org.rhq.core.domain.cloud.Server.OperationMode.MAINTENANCE; -import static org.rhq.core.domain.cloud.Server.OperationMode.NORMAL; - -import java.util.List; - -import javax.annotation.Resource; -import javax.ejb.EJB; -import javax.ejb.Singleton; -import javax.ejb.Timeout; -import javax.ejb.TimerConfig; -import javax.ejb.TimerService; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; - -import org.rhq.cassandra.ClusterInitService; -import org.rhq.core.domain.cloud.Server; -import org.rhq.core.domain.cloud.StorageNode; -import org.rhq.enterprise.server.auth.SubjectManagerLocal; -import org.rhq.enterprise.server.cloud.StorageNodeManagerLocal; -import org.rhq.enterprise.server.cloud.TopologyManagerLocal; -import org.rhq.enterprise.server.cloud.instance.ServerManagerLocal; - -/** - * This job runs periodically to verify that the server can connect to the storage cluster. - * If the server cannot connect to any nodes in the cluster, the server will be put into - * maintenance mode until it is able to connect to the cluster. See - * {@link #checkClusterHeartBeat()} for more details. - * - * @author John Sanda - */ -@Singleton -public class StorageClusterHeartBeatJob { - - private final Log log = LogFactory.getLog(StorageClusterHeartBeatJob.class); - - @EJB - private ServerManagerLocal serverManager; - - @EJB - private StorageNodeManagerLocal storageNodeManager; - - @EJB - private TopologyManagerLocal topologyManager; - - @EJB - private SubjectManagerLocal subjectManager; - - @EJB - private StorageClientManagerBean storageClientManager; - - @Resource - private TimerService timerService; - - public void scheduleJob() { - long initialDelay = 3000; - long interval = 1000 * 60; - - timerService.createIntervalTimer(initialDelay, interval, new TimerConfig(null, false)); - } - - /** - * <p> - * Runs periodically to verify that the server can connect to the storage cluster. If - * the server cannot connect to any nodes in the cluster, the server will be put into - * maintenance mode until a connection can be made to the cluster. - * </p> - * <p> - * While client requests are made using CQL commands going over the native transport - * layer introduced in Cassandra 1.2, connectivity is checked via JMX. The JMX call - * is made to determine whether or not the native transport is running. Trying to do - * the check using the CQL driver gets complicated and introduces some non-trivial - * overhead due to the fact that the driver is async. - * </p> - */ - @Timeout - public void checkClusterHeartBeat() { - ClusterInitService clusterInitService = new ClusterInitService(); - Server server = serverManager.getServer(); - List<StorageNode> storageNodes = storageNodeManager.getStorageNodes(); - - if (storageNodes.isEmpty()) { - log.error("No storage nodes were found in the RHQ database. If this is your only RHQ server make sure " + - "that the rhq.cassandra.seeds property in <rhq-server-basedir>/bin/rhq-server.properties is " + - "properly configured. If you edit this property, you will have to restart the server for the change " + - "to take effect. The server will now go into maintenance mode since connectivity to storage " + - "nodes cannot be verified."); - putServerInMaintenanceMode(server); - } else { - boolean pingable = clusterInitService.ping(storageNodes, 1); - if (pingable) { - if (server.getOperationMode() != NORMAL) { - changeServerMode(server, NORMAL); - log.info("Restarting storage client subsystem..."); - storageClientManager.init(); - } - return; - } - - if (log.isWarnEnabled()) { - log.warn(server + " is unable to connect to any Cassandra node. Server will go into maintenance mode."); - } - putServerInMaintenanceMode(server); - } - } - - private void putServerInMaintenanceMode(Server rhqServer) { - changeServerMode(rhqServer, MAINTENANCE); - log.info("Preparing to shut down storage client subsystem"); - storageClientManager.shutdown(); - } - - private void changeServerMode(Server rhqServer, Server.OperationMode mode) { - if (rhqServer.getOperationMode() == mode) { - return; - } - - if (log.isInfoEnabled()) { - log.info("Moving " + rhqServer + " from " + rhqServer.getOperationMode() + " to " + mode); - } - topologyManager.updateServerMode(subjectManager.getOverlord(), new Integer[] {rhqServer.getId()}, mode); - } - -} diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterMonitor.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterMonitor.java index 543036e..98d40cb 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterMonitor.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterMonitor.java @@ -33,13 +33,10 @@ public class StorageClusterMonitor implements StorageStateListener { @Override public void onStorageNodeUp(InetAddress address) { log.info("Storage node at " + address.getHostAddress() + " is up"); + isClusterAvailable.set(true);
- if (isClusterDown.compareAndSet(true, false)) { - log.info("Taking server out of maintenance mode"); - updateServerMode(Server.OperationMode.NORMAL); - } - - StorageNodeManagerLocal storageNodeManager = LookupUtil.getStorageNodeManager(); + //TODO: Add these back at a later time + /*StorageNodeManagerLocal storageNodeManager = LookupUtil.getStorageNodeManager(); StorageNode newClusterNode = storageNodeManager.findStorageNodeByAddress(address);
if (newClusterNode == null) { @@ -64,19 +61,6 @@ public class StorageClusterMonitor implements StorageStateListener {
@Override public void onStorageClusterDown(NoHostAvailableException e) { - if (isClusterDown.compareAndSet(false, true)) { - log.error("The server cannot connect to any storage nodes. The server will now go into maintenance mode."); - updateServerMode(Server.OperationMode.MAINTENANCE); - } - } - - private void updateServerMode(Server.OperationMode mode) { - ServerManagerLocal serverManager = LookupUtil.getServerManager(); - TopologyManagerLocal topologyManager = LookupUtil.getTopologyManager(); - SubjectManagerLocal subjectManager = LookupUtil.getSubjectManager(); - - Server server = serverManager.getServer(); - - topologyManager.updateServerMode(subjectManager.getOverlord(), new Integer[] {server.getId()}, mode); + isClusterAvailable.set(false); } } diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/StorageSession.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/StorageSession.java index 3f7af3f..c8f44d9 100644 --- a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/StorageSession.java +++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/StorageSession.java @@ -29,6 +29,14 @@ public class StorageSession implements Host.StateListener {
public void addStorageStateListener(StorageStateListener listener) { listeners.add(listener); + + for (Host host : wrappedSession.getCluster().getMetadata().getAllHosts()) { + if(host.getMonitor().isUp()){ + listener.onStorageNodeUp(host.getAddress()); + } else { + listener.onStorageNodeUp(host.getAddress()); + } + } }
public ResultSet execute(String query) {
commit 70318f445dde4c57ce7f9aacc0fdd22dbc336c2a Author: Stefan Negrea snegrea@redhat.com Date: Mon Aug 5 19:02:16 2013 -0500
Add restart to the set of rhqctl commands.
diff --git a/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/Commands.java b/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/Commands.java index ddee088..60dc67c 100644 --- a/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/Commands.java +++ b/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/Commands.java @@ -36,6 +36,7 @@ import org.apache.commons.cli.Options; import org.rhq.server.control.command.Console; import org.rhq.server.control.command.Install; import org.rhq.server.control.command.Remove; +import org.rhq.server.control.command.Restart; import org.rhq.server.control.command.Start; import org.rhq.server.control.command.Status; import org.rhq.server.control.command.Stop; @@ -57,6 +58,7 @@ public class Commands { registerCommand(new Install()); registerCommand(new Start()); registerCommand(new Stop()); + registerCommand(new Restart()); registerCommand(new Status()); registerCommand(new Console()); // Add the service removal command only on windows diff --git a/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Restart.java b/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Restart.java new file mode 100644 index 0000000..dbf53c5 --- /dev/null +++ b/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Restart.java @@ -0,0 +1,69 @@ +/* + * + * * RHQ Management Platform + * * Copyright (C) 2005-2013 Red Hat, Inc. + * * All rights reserved. + * * + * * This program is free software; you can redistribute it and/or modify + * * it under the terms of the GNU General Public License, version 2, as + * * published by the Free Software Foundation, and/or the GNU Lesser + * * General Public License, version 2.1, also as published by the Free + * * Software Foundation. + * * + * * This program is distributed in the hope that it will be useful, + * * but WITHOUT ANY WARRANTY; without even the implied warranty of + * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * * GNU General Public License and the GNU Lesser General Public License + * * for more details. + * * + * * You should have received a copy of the GNU General Public License + * * and the GNU Lesser General Public License along with this program; + * * if not, write to the Free Software Foundation, Inc., + * * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +package org.rhq.server.control.command; + +import org.apache.commons.cli.CommandLine; +import org.apache.commons.cli.Options; + +import org.rhq.server.control.ControlCommand; + +/** + * @author Stefan Negrea + */ +public class Restart extends ControlCommand { + + private Options options; + + public Restart() { + options = new Options().addOption(null, STORAGE_OPTION, false, "Restart RHQ storage node") + .addOption(null, SERVER_OPTION, false, "Restart RHQ server") + .addOption(null, AGENT_OPTION, false, "Restart RHQ agent"); + } + + @Override + public String getName() { + return "restart"; + } + + @Override + public String getDescription() { + return "Restarts RHQ services."; + } + + @Override + public Options getOptions() { + return options; + } + + @Override + protected void exec(CommandLine commandLine) { + Stop stop = new Stop(); + stop.exec(commandLine); + + Start start = new Start(); + start.exec(commandLine); + } +}
commit 0d897752fbe25a2aa946aea419392917b87b2075 Author: Jay Shaughnessy jshaughn@redhat.com Date: Wed Aug 7 17:31:11 2013 -0400
- Update MANAGE_BUNDLE_GROUPS perm handling to grany VIEW_BUNDLES and to allow assign/unassign of bundles to bundle groups, as per design - add testcode - Change MANAGE_BUNDLE to grant only the new global bundle perms, which should be sufficient for all bundle tasks - update dbsetup and dbupgrade appropriately - Fix Rolemanager.updateRole to apply permission grants for MANAGE_BUNDLE and MANAGE_BUNDLE_GROUPS - Review and update bundle perm I18N descriptions - Add new bundlegroup icons - make a few changes to permissions editor in gui as suggested by UX - fix some jdoc formatting
diff --git a/modules/core/dbutils/src/main/scripts/dbsetup/authz-data.xml b/modules/core/dbutils/src/main/scripts/dbsetup/authz-data.xml index ecc7d4a..75c3042 100644 --- a/modules/core/dbutils/src/main/scripts/dbsetup/authz-data.xml +++ b/modules/core/dbutils/src/main/scripts/dbsetup/authz-data.xml @@ -44,7 +44,7 @@ <table name="RHQ_PERMISSION"> <!-- give super-user-role all permissions (explicitly add resource perms, even though they are implied by MANAGE_INVENTORY) - (explicitly add bundle perms, even though they are implied by MANAGE_BUNDLE) --> + (explicitly add global bundle perms, even though they are implied by MANAGE_BUNDLE) --> <data ROLE_ID="1" OPERATION="0"/> <!-- Permission.MANAGE_SECURITY --> <data ROLE_ID="1" OPERATION="1"/> <!-- Permission.MANAGE_INVENTORY --> <data ROLE_ID="1" OPERATION="2"/> <!-- Permission.MANAGE_SETTINGS --> @@ -69,18 +69,14 @@ <data ROLE_ID="1" OPERATION="13"/> <!-- Permission.CONFIGURE_READ --> <data ROLE_ID="1" OPERATION="14"/> <!-- Permission.MANAGE_EVENTS --> <data ROLE_ID="1" OPERATION="16"/> <!-- Permission.MANAGE_DRIFT --> - <!-- bundle permissions start here--> - <data ROLE_ID="1" OPERATION="23"/> <!-- Permission.ASSIGN_BUNDLES_TO_GROUP --> - <data ROLE_ID="1" OPERATION="24"/> <!-- Permission.UNASSIGN_BUNDLES_FROM_GROUP --> - <data ROLE_ID="1" OPERATION="25"/> <!-- Permission.CREATE_BUNDLES_IN_GROUP --> - <data ROLE_ID="1" OPERATION="26"/> <!-- Permission.DELETE_BUNDLES_FROM_GROUP --> - <data ROLE_ID="1" OPERATION="27"/> <!-- Permission.VIEW_BUNDLES_IN_GROUP -->
- <!-- give all-resources-role MANAGE_INVENTORY and all bundle permissions other than MANAGE_BUNDLE_GROUPS - (explicitly add resource perms as well, even though they are implied by MANAGE_INVENTORY) --> + <!-- give all-resources-role MANAGE_INVENTORY + (explicitly add resource perms as well, even though they are implied by MANAGE_INVENTORY) + (explicitly add global bundle perms, even though they are implied by MANAGE_BUNDLE) --> <data ROLE_ID="2" OPERATION="1"/> <!-- Permission.MANAGE_INVENTORY --> <data ROLE_ID="2" OPERATION="12"/> <!-- Permission.MANAGE_BUNDLE --> <data ROLE_ID="2" OPERATION="17"/> <!-- Permission.VIEW_USERS --> + <data ROLE_ID="2" OPERATION="18"/> <!-- Permission.MANAGE_BUNDLE_GROUPS --> <data ROLE_ID="2" OPERATION="19"/> <!-- Permission.CREATE_BUNDLES --> <data ROLE_ID="2" OPERATION="20"/> <!-- Permission.DELETE_BUNDLES --> <data ROLE_ID="2" OPERATION="21"/> <!-- Permission.DEPLOY_BUNDLES --> @@ -97,13 +93,7 @@ <data ROLE_ID="2" OPERATION="11"/> <!-- Permission.CONFIGURE_WRITE --> <data ROLE_ID="2" OPERATION="13"/> <!-- Permission.CONFIGURE_READ --> <data ROLE_ID="2" OPERATION="14"/> <!-- Permission.MANAGE_EVENTS --> - <data ROLE_ID="2" OPERATION="16"/> <!-- Permission.MANAGE_DRIFT --> - <!-- bundle permissions start here--> - <data ROLE_ID="2" OPERATION="23"/> <!-- Permission.ASSIGN_BUNDLES_TO_GROUP --> - <data ROLE_ID="2" OPERATION="24"/> <!-- Permission.UNASSIGN_BUNDLES_FROM_GROUP --> - <data ROLE_ID="2" OPERATION="25"/> <!-- Permission.CREATE_BUNDLES_IN_GROUP --> - <data ROLE_ID="2" OPERATION="26"/> <!-- Permission.DELETE_BUNDLES_FROM_GROUP --> - <data ROLE_ID="2" OPERATION="27"/> <!-- Permission.VIEW_BUNDLES_IN_GROUP --> + <data ROLE_ID="2" OPERATION="16"/> <!-- Permission.MANAGE_DRIFT --> </table>
</dbsetup> diff --git a/modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml b/modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml index 9f8d018..341f0b2 100644 --- a/modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml +++ b/modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml @@ -2084,10 +2084,9 @@
<!-- Add new perms to superuser/all-resources roles --> <schema-directSQL> - <statement desc="Inserting MANAGE_BUNDLE_GROUPS permission for 'Super User' role only"> - INSERT INTO RHQ_PERMISSION (ROLE_ID, OPERATION) VALUES (1, 18) - </statement> - + <statement desc="Inserting MANAGE_BUNDLE_GROUPS permission for all MANAGE_BUNDLE (12) roles"> + INSERT INTO rhq_permission (role_id, operation) SELECT role_id, 18 FROM rhq_permission p where p.operation = 12 + </statement> <statement desc="Inserting CREATE_BUNDLES permission for all MANAGE_BUNDLE (12) roles"> INSERT INTO rhq_permission (role_id, operation) SELECT role_id, 19 FROM rhq_permission p where p.operation = 12 </statement> @@ -2100,21 +2099,6 @@ <statement desc="Inserting VIEW_BUNDLES permission for all MANAGE_BUNDLE roles"> INSERT INTO rhq_permission (role_id, operation) SELECT role_id, 22 FROM rhq_permission p where p.operation = 12 </statement> - <statement desc="Inserting ASSIGN_BUNDLES_TO_GROUP permission for all MANAGE_BUNDLE roles"> - INSERT INTO rhq_permission (role_id, operation) SELECT role_id, 23 FROM rhq_permission p where p.operation = 12 - </statement> - <statement desc="Inserting UNASSIGN_BUNDLES_FROM_GROUP permission for all MANAGE_BUNDLE roles"> - INSERT INTO rhq_permission (role_id, operation) SELECT role_id, 24 FROM rhq_permission p where p.operation = 12 - </statement> - <statement desc="Inserting CREATE_BUNDLES_IN_GROUP permission for all MANAGE_BUNDLE roles"> - INSERT INTO rhq_permission (role_id, operation) SELECT role_id, 25 FROM rhq_permission p where p.operation = 12 - </statement> - <statement desc="Inserting DELETE_BUNDLES_FROM_GROUP permission for all MANAGE_BUNDLE roles"> - INSERT INTO rhq_permission (role_id, operation) SELECT role_id, 26 FROM rhq_permission p where p.operation = 12 - </statement> - <statement desc="Inserting VIEW_BUNDLES_IN_GROUP permission for all MANAGE_BUNDLE roles"> - INSERT INTO rhq_permission (role_id, operation) SELECT role_id, 27 FROM rhq_permission p where p.operation = 12 - </statement> </schema-directSQL>
<!-- RHQ_BUNDLE_GROUP --> diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/authz/Permission.java b/modules/core/domain/src/main/java/org/rhq/core/domain/authz/Permission.java index 77f66fc..373c369 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/authz/Permission.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/authz/Permission.java @@ -242,8 +242,8 @@ public enum Permission { case RESOURCE: RESOURCE_ALL.add(permission); break; - case BUNDLE: - BUNDLE_ALL.add(permission); + default: + // bundle level perms do not need any aggregation break; } } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/ImageManager.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/ImageManager.java index 76ee6ae..cf2655c 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/ImageManager.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/ImageManager.java @@ -579,9 +579,12 @@ public class ImageManager {
public static String getBundleIcon() { - return "subsystems/content/Content_16.png"; + return "subsystems/bundle/Bundle_16.png"; }
+ public static String getBundleGroupIcon() { + return "subsystems/bundle/BundleGroup_16.png"; + }
public static String getConfigureIcon() { return "subsystems/configure/Configure_16.png"; diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/PermissionsEditor.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/PermissionsEditor.java index 77beefb..d270844 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/PermissionsEditor.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/PermissionsEditor.java @@ -101,8 +101,10 @@ public class PermissionsEditor extends EnhancedVStack { this.resourcePermissionsGrid = createResourcePermissionsGrid(); addMember(this.resourcePermissionsGrid);
- Label bundleGroupPermissionsHeader = new Label("<h4>" - + MSG.view_adminRoles_permissions_bundleGroupPermissions() + "</h4>"); + addMember(spacer); + + Label bundleGroupPermissionsHeader = new Label("<h4>" + MSG.view_adminRoles_permissions_bundlePermissions() + + "</h4>"); bundleGroupPermissionsHeader.setHeight(17); addMember(bundleGroupPermissionsHeader);
@@ -204,30 +206,10 @@ public class PermissionsEditor extends EnhancedVStack { records.add(record);
record = createPermissionRecord(MSG.view_adminRoles_permissions_perm_manageBundleGroups(), - "subsystems/content/Content", Permission.MANAGE_BUNDLE_GROUPS, + "subsystems/bundle/BundleGroup", Permission.MANAGE_BUNDLE_GROUPS, MSG.view_adminRoles_permissions_permDesc_manageBundleGroups()); records.add(record);
- record = createPermissionRecord(MSG.view_adminRoles_permissions_perm_createBundles(), - "subsystems/content/Content", Permission.CREATE_BUNDLES, - MSG.view_adminRoles_permissions_permDesc_createBundles()); - records.add(record); - - record = createPermissionRecord(MSG.view_adminRoles_permissions_perm_deleteBundles(), - "subsystems/content/Content", Permission.DELETE_BUNDLES, - MSG.view_adminRoles_permissions_permDesc_deleteBundles()); - records.add(record); - - record = createPermissionRecord(MSG.view_adminRoles_permissions_perm_viewBundles(), - "subsystems/content/Content", Permission.VIEW_BUNDLES, - MSG.view_adminRoles_permissions_permDesc_viewBundles()); - records.add(record); - - record = createPermissionRecord(MSG.view_adminRoles_permissions_perm_deployBundles(), - "subsystems/content/Content", Permission.DEPLOY_BUNDLES, - MSG.view_adminRoles_permissions_permDesc_deployBundles()); - records.add(record); - record = createPermissionRecord(MSG.view_adminRoles_permissions_perm_viewUsers(), "global/User", Permission.VIEW_USERS, MSG.view_adminRoles_permissions_permDesc_viewUsers()); records.add(record); @@ -340,28 +322,48 @@ public class PermissionsEditor extends EnhancedVStack {
List<ListGridRecord> records = new ArrayList<ListGridRecord>();
- ListGridRecord record = createPermissionRecord(MSG.view_adminRoles_permissions_perm_assignBundlesToGroup(), - "subsystems/bundle/Bundle", Permission.ASSIGN_BUNDLES_TO_GROUP, + ListGridRecord record = createPermissionRecord(MSG.view_adminRoles_permissions_perm_createBundles(), + "subsystems/content/Content", Permission.CREATE_BUNDLES, + MSG.view_adminRoles_permissions_permDesc_createBundles()); + records.add(record); + + record = createPermissionRecord(MSG.view_adminRoles_permissions_perm_deleteBundles(), + "subsystems/content/Content", Permission.DELETE_BUNDLES, + MSG.view_adminRoles_permissions_permDesc_deleteBundles()); + records.add(record); + + record = createPermissionRecord(MSG.view_adminRoles_permissions_perm_viewBundles(), + "subsystems/content/Content", Permission.VIEW_BUNDLES, + MSG.view_adminRoles_permissions_permDesc_viewBundles()); + records.add(record); + + record = createPermissionRecord(MSG.view_adminRoles_permissions_perm_deployBundles(), + "subsystems/content/Content", Permission.DEPLOY_BUNDLES, + MSG.view_adminRoles_permissions_permDesc_deployBundles()); + records.add(record); + + record = createPermissionRecord(MSG.view_adminRoles_permissions_perm_assignBundlesToGroup(), + "subsystems/bundle/BundleGroup", Permission.ASSIGN_BUNDLES_TO_GROUP, MSG.view_adminRoles_permissions_permDesc_assignBundlesToGroup()); records.add(record);
record = createPermissionRecord(MSG.view_adminRoles_permissions_perm_unassignBundlesFromGroup(), - "subsystems/bundle/Bundle", Permission.UNASSIGN_BUNDLES_FROM_GROUP, + "subsystems/bundle/BundleGroup", Permission.UNASSIGN_BUNDLES_FROM_GROUP, MSG.view_adminRoles_permissions_permDesc_unassignBundlesFromGroup()); records.add(record);
record = createPermissionRecord(MSG.view_adminRoles_permissions_perm_createBundlesInGroup(), - "subsystems/bundle/Bundle", Permission.CREATE_BUNDLES_IN_GROUP, + "subsystems/bundle/BundleGroup", Permission.CREATE_BUNDLES_IN_GROUP, MSG.view_adminRoles_permissions_permDesc_createBundlesInGroup()); records.add(record);
record = createPermissionRecord(MSG.view_adminRoles_permissions_perm_deleteBundlesFromGroup(), - "subsystems/bundle/Bundle", Permission.DELETE_BUNDLES_FROM_GROUP, + "subsystems/bundle/BundleGroup", Permission.DELETE_BUNDLES_FROM_GROUP, MSG.view_adminRoles_permissions_permDesc_deleteBundlesFromGroup()); records.add(record);
record = createPermissionRecord(MSG.view_adminRoles_permissions_perm_viewBundlesInGroup(), - "subsystems/bundle/Bundle", Permission.VIEW_BUNDLES_IN_GROUP, + "subsystems/bundle/BundleGroup", Permission.VIEW_BUNDLES_IN_GROUP, MSG.view_adminRoles_permissions_permDesc_viewBundlesInGroup()); records.add(record);
@@ -451,10 +453,15 @@ public class PermissionsEditor extends EnhancedVStack { .view_adminRoles_permissions_illegalDeselectionDueToCorrespondingWritePermSelection(permissionDisplayName); handleIllegalPermissionSelection(event, messageString); } else if (!authorized && selectedPermissions.contains(Permission.MANAGE_BUNDLE) - && Permission.BUNDLE_ALL.contains(permission)) { + && permission != Permission.MANAGE_BUNDLE && Permission.BUNDLE_ALL.contains(permission)) { String messageString = MSG .view_adminRoles_permissions_illegalDeselectionDueToManageBundleSelection(permissionDisplayName); handleIllegalPermissionSelection(event, messageString); + } else if (!authorized && selectedPermissions.contains(Permission.MANAGE_BUNDLE_GROUPS) + && permission == Permission.VIEW_BUNDLES) { + String messageString = MSG + .view_adminRoles_permissions_illegalDeselectionDueToManageBundleGroupsSelection(permissionDisplayName); + handleIllegalPermissionSelection(event, messageString); } else { updatePermissions(authorized, permission);
@@ -504,6 +511,12 @@ public class PermissionsEditor extends EnhancedVStack { messageString = MSG.view_adminRoles_permissions_autoselecting_manageBundle_implied(); redrawRequired = true; } + } else if (permission == Permission.MANAGE_BUNDLE_GROUPS) { + // MANAGE_BUNDLE_GROUPS implies VIEW_BUNDLES + if (this.selectedPermissions.add(Permission.VIEW_BUNDLES)) { + messageString = MSG.view_adminRoles_permissions_autoselecting_manageBundleGroups_implied(); + redrawRequired = true; + } } } else { this.selectedPermissions.remove(permission); diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RoleEditView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RoleEditView.java index 9c2dc96..ee2073b 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RoleEditView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RoleEditView.java @@ -197,7 +197,7 @@ public class RoleEditView extends AbstractRecordEditor<RolesDataSource> implemen }
private Tab buildBundleGroupsTab(TabSet tabSet) { - Tab tab = new Tab(MSG.common_title_bundleGroups(), ImageManager.getBundleIcon()); + Tab tab = new Tab(MSG.common_title_bundleGroups(), ImageManager.getBundleGroupIcon()); // NOTE: We will set the tab content to the bundle group selector later once the Role has been fetched.
return tab; diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties index 8555ca2..67ec087 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties @@ -20,11 +20,11 @@ chart_hover_period_label = Period chart_hover_start_label = Start chart_hover_time_format = %I:%M:%S %p chart_ie_not_supported = Charting is not available for this browser. -chart_metrics= Metrics -chart_metrics_add_to_dashboard_label= Dashboards -chart_metrics_collapse_tooltip= Click here to see additional tabular availability data. -chart_metrics_expand_tooltip= Click here to collapse additional availability detail. -chart_metrics_sparkline_header= Chart +chart_metrics = Metrics +chart_metrics_add_to_dashboard_label = Dashboards +chart_metrics_collapse_tooltip = Click here to see additional tabular availability data. +chart_metrics_expand_tooltip = Click here to collapse additional availability detail. +chart_metrics_sparkline_header = Chart chart_no_data_label = No Data chart_single_value_label = Value chart_slider_button_bar_day = Day @@ -512,12 +512,14 @@ view_adminRoles_ldapGroupsReadOnly = LDAP group data is read only view_adminRoles_noLdap = The LDAP security integration is not configured. To configure LDAP, go to <a {0}>{1}</a>. view_adminRoles_permissions_autoselecting_configureRead_implied = Autodeselected CONFIGURE_WRITE permission, since lack of CONFIGURE_READ implies lack of it... view_adminRoles_permissions_autoselecting_configureWrite_implied = Autoselected CONFIGURE_READ permission, since CONFIGURE_WRITE implies it... -view_adminRoles_permissions_autoselecting_manageBundle_implied = Autoselected unselected permissions, since MANAGE_BUNDLE implies all other bundle permissions... +view_adminRoles_permissions_autoselecting_manageBundleGroups_implied = Autoselected View Bundles, which is granted by Manage Bundle Groups... +view_adminRoles_permissions_autoselecting_manageBundle_implied = Autoselected unselected permissions, since Manage Bundle permission grants Manage Bundle Groups, Create Bundles, Delete Bundles, View Bundles and Deploy_Bundles permissions... view_adminRoles_permissions_autoselecting_manageInventory_implied = Autoselected unselected Resource permissions, since MANAGE_INVENTORY implies all Resource permissions... view_adminRoles_permissions_autoselecting_manageSecurity_implied = Autoselected unselected permissions, since MANAGE_SECURITY implies all other permissions... -view_adminRoles_permissions_bundleGroupPermissions = Bundle Group Permissions +view_adminRoles_permissions_bundlePermissions = Bundle Permissions view_adminRoles_permissions_globalPermissions = Global Permissions view_adminRoles_permissions_illegalDeselectionDueToCorrespondingWritePermSelection = {0} read permission cannot be deselected, unless the {0} write permission, which implies the read permission, is deselected first. +view_adminRoles_permissions_illegalDeselectionDueToManageBundleGroupsSelection = {0} permission cannot be deselected, unless Manage Bundle Groups, which implies {0} permission, is deselected first. view_adminRoles_permissions_illegalDeselectionDueToManageBundleSelection = {0} permission cannot be deselected, unless Manage Bundle, which implies all Bundle permissions, is deselected first. view_adminRoles_permissions_illegalDeselectionDueToManageInventorySelection = {0} permission cannot be deselected, unless Manage Inventory, which implies all Resource permissions, is deselected first. view_adminRoles_permissions_illegalDeselectionDueToManageSecuritySelection = {0} permission cannot be deselected, unless the Manage Security permission, which implies all other permissions, is deselected first. @@ -525,20 +527,20 @@ view_adminRoles_permissions_isAuthorized = Authorized? view_adminRoles_permissions_isRead = Read? view_adminRoles_permissions_isWrite = Write? view_adminRoles_permissions_permDesc_assignBundlesToGroup = can copy a viewable bundle to the bundle group -view_adminRoles_permissions_permDesc_createBundles = can create new bundle [version]s. can copy existing bundles between viewable groups +view_adminRoles_permissions_permDesc_createBundles = can create new bundle [version]s. can assign viewable bundles to viewable groups view_adminRoles_permissions_permDesc_createBundlesInGroup = can create new bundle [version]s for the bundle group. can copy a viewable bundle to the bundle group. view_adminRoles_permissions_permDesc_deleteBundles = can delete or unassign viewable bundle [version]s view_adminRoles_permissions_permDesc_deleteBundlesFromGroup = can delete bundle [version]s from the bundle group (implicitly deleting then from other assigned groups) view_adminRoles_permissions_permDesc_deployBundles = can deploy any viewable bundle version to any viewable, deployable, compatible, resource group -view_adminRoles_permissions_permDesc_manageBundleGroups = can create, update, or delete bundle groups -view_adminRoles_permissions_permDesc_manageBundles = can create, update, or delete provisioning bundles (viewing is implied for everyone) +view_adminRoles_permissions_permDesc_manageBundleGroups = can create and delete bundle groups. can assign bundles to bundle groups. grants View Bundles permissions +view_adminRoles_permissions_permDesc_manageBundles = can perform any bundle task. a convenience permission that grants Manage Bundle Groups, Create Bundles, Delete Bundles, Deploy Bundles and View Bundles permissions. view_adminRoles_permissions_permDesc_manageInventory = has all Resource permissions, as described below, for all Resources; can create, update, and delete groups; and can import auto-discovered or manually discovered Resources view_adminRoles_permissions_permDesc_manageRepositories = can create, update, or delete repositories of any user (everyone can create their own repositories), can associate content sources to repositories. view_adminRoles_permissions_permDesc_manageSecurity = can create, update, or delete users and roles - implies all other permissions view_adminRoles_permissions_permDesc_manageSettings = can modify the {0} Server configuration and perform any Server-related functionality view_adminRoles_permissions_permDesc_unassignBundlesFromGroup = can unassign (not delete) a bundle from the bundle group -view_adminRoles_permissions_permDesc_viewBundles = can view any bundle including global bundles (those not assigned to any bundle group) -view_adminRoles_permissions_permDesc_viewBundlesInGroup = can view any bundle in the group +view_adminRoles_permissions_permDesc_viewBundles = can view bundle details, deployments, etc for any bundle, including unassigned bundles (those not assigned to any bundle group) +view_adminRoles_permissions_permDesc_viewBundlesInGroup = (IMPLIED) can view bundle details, deployments, etc for any bundle in bundle groups associated with the relevant roles. view_adminRoles_permissions_permDesc_viewUsers = can view other users, with the exception of their assigned roles view_adminRoles_permissions_permReadDesc_configure = view Resource configuration and Resource configuration revision history view_adminRoles_permissions_permReadDesc_control = (IMPLIED) view available operations and operation execution history @@ -2255,8 +2257,8 @@ widget_resourceFactoryWizard_importWizardTitle = Import Resource of Type [{0}] widget_resourceFactoryWizard_importWizardWindowTitle = Resource Import Wizard widget_resourceFactoryWizard_infoStepName = Resource Information widget_resourceFactoryWizard_infoStep_loadFail = Failed to get available Architectures -widget_resourceFactoryWizard_namePrompt = New Resource Name widget_resourceFactoryWizard_nameComment = Not all management plug-ins or their managed resources allow the agent to set the name for a new resource. This value will only be used by agent plug-ins that support the capability. For plug-ins that do not support the capability, the resource may receive a generic or different name when it is discovered. +widget_resourceFactoryWizard_namePrompt = New Resource Name widget_resourceFactoryWizard_templatePrompt = Connection Settings Template widget_resourceFactoryWizard_timeoutFailure = Timed out widget_resourceFactoryWizard_timeoutHelp = A timeout duration that if specified will override the default timeout for child resource creation (on the {0} Agent). The default timeout is set to 60 seconds. A higher value may be useful for particularly long create actions, like deployment of a large application. Usually used if a previous attempt suffered a timeout failure. Note that if there is a timeout failure, it is still possible that the resource deployment succeeded. In the event of a timeout you may want to execute a discovery scan before attempting to redeploy the resource. diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties index c43817d..2c97c84 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties @@ -48,11 +48,11 @@ chart_hover_date_format = %d.%m.%y ##chart_hover_start_label = Start chart_hover_time_format = %H:%M:%S ##chart_ie_not_supported = Charting is not available for this browser. -##chart_metrics= Metrics -##chart_metrics_add_to_dashboard_label= Dashboards -##chart_metrics_collapse_tooltip= Click here to see additional tabular availability data. -##chart_metrics_expand_tooltip= Click here to collapse additional availability detail. -##chart_metrics_sparkline_header= Chart +##chart_metrics = Metrics +##chart_metrics_add_to_dashboard_label = Dashboards +##chart_metrics_collapse_tooltip = Click here to see additional tabular availability data. +##chart_metrics_expand_tooltip = Click here to collapse additional availability detail. +##chart_metrics_sparkline_header = Chart ##chart_no_data_label = No Data ##chart_single_value_label = Value ##chart_slider_button_bar_day = Day @@ -531,12 +531,14 @@ view_adminRoles_ldapGroupsReadOnly = data LDAP skupiny jsou jen pro Ätenà view_adminRoles_noLdap = Integrace LDAP nenà nakonfigurována. K nastavenà ÅÃzenà bezpeÄnosti pÅes LDAP prosÃm navÅ¡tivte <a {0}>{1}</a>. view_adminRoles_permissions_autoselecting_configureRead_implied = Automaticky odoznaÄeno CONFIGURE_WRITE povolenÃ, protoÅŸe absence CONFIGURE_READ to implikuje... view_adminRoles_permissions_autoselecting_configureWrite_implied = Automaticky oznaÄeno CONFIGURE_READ povolenÃ, protoÅŸe CONFIGURE_WRITE jej implikuje... -##view_adminRoles_permissions_autoselecting_manageBundle_implied = Autoselected unselected permissions, since MANAGE_BUNDLE implies all other bundle permissions... +##view_adminRoles_permissions_autoselecting_manageBundleGroups_implied = Autoselected View Bundles, which is granted by Manage Bundle Groups... +##view_adminRoles_permissions_autoselecting_manageBundle_implied = Autoselected unselected permissions, since Manage Bundle permission grants Manage Bundle Groups, Create Bundles, Delete Bundles, View Bundles and Deploy_Bundles permissions... view_adminRoles_permissions_autoselecting_manageInventory_implied = Automaticky oznaÄeny neoznaÄené zdroje, protoÅŸe MANAGE_INVENTORY implikuje povolenà na vÅ¡ech zdrojÃch... view_adminRoles_permissions_autoselecting_manageSecurity_implied = Automaticky oznaÄeny neoznaÄená povolenÃ, protoÅŸe MANAGE_SECURITY implikuje povolenà na vÅ¡ech práv... -view_adminRoles_permissions_bundleGroupPermissions = Bundle Group Permissions +##view_adminRoles_permissions_bundlePermissions = Bundle Permissions view_adminRoles_permissions_globalPermissions = Globálnà povolenà ##view_adminRoles_permissions_illegalDeselectionDueToCorrespondingWritePermSelection = {0} read permission cannot be deselected, unless the {0} write permission, which implies the read permission, is deselected first. +##view_adminRoles_permissions_illegalDeselectionDueToManageBundleGroupsSelection = {0} permission cannot be deselected, unless Manage Bundle Groups, which implies {0} permission, is deselected first. ##view_adminRoles_permissions_illegalDeselectionDueToManageBundleSelection = {0} permission cannot be deselected, unless Manage Bundle, which implies all Bundle permissions, is deselected first. ##view_adminRoles_permissions_illegalDeselectionDueToManageInventorySelection = {0} permission cannot be deselected, unless Manage Inventory, which implies all Resource permissions, is deselected first. ##view_adminRoles_permissions_illegalDeselectionDueToManageSecuritySelection = {0} permission cannot be deselected, unless the Manage Security permission, which implies all other permissions, is deselected first. @@ -544,20 +546,20 @@ view_adminRoles_permissions_isAuthorized = Autorizován? view_adminRoles_permissions_isRead = ÄtenÃ? view_adminRoles_permissions_isWrite = Zápis? ##view_adminRoles_permissions_permDesc_assignBundlesToGroup = can copy a viewable bundle to the bundle group -##view_adminRoles_permissions_permDesc_createBundles = can create new bundle [version]s. can copy existing bundles between viewable groups +##view_adminRoles_permissions_permDesc_createBundles = can create new bundle [version]s. can assign viewable bundles to viewable groups ##view_adminRoles_permissions_permDesc_createBundlesInGroup = can create new bundle [version]s for the bundle group. can copy a viewable bundle to the bundle group. ##view_adminRoles_permissions_permDesc_deleteBundles = can delete or unassign viewable bundle [version]s ##view_adminRoles_permissions_permDesc_deleteBundlesFromGroup = can delete bundle [version]s from the bundle group (implicitly deleting then from other assigned groups) ##view_adminRoles_permissions_permDesc_deployBundles = can deploy any viewable bundle version to any viewable, deployable, compatible, resource group -##view_adminRoles_permissions_permDesc_manageBundleGroups = can create, update, or delete bundle groups -view_adminRoles_permissions_permDesc_manageBundles = můşe vytváÅet, mÄnit, mazat balÃky (zobrazovat můşe kdokoli) +##view_adminRoles_permissions_permDesc_manageBundleGroups = can create and delete bundle groups. can assign bundles to bundle groups. grants View Bundles permissions +##view_adminRoles_permissions_permDesc_manageBundles = can perform any bundle task. a convenience permission that grants Manage Bundle Groups, Create Bundles, Delete Bundles, Deploy Bundles and View Bundles permissions. view_adminRoles_permissions_permDesc_manageInventory = má vÅ¡echna práva zdroje: můşe vytváÅet, mÄnit, mazat skupiny, můşe importovat automaticky nebo manuálnÄ nalezené zdroje view_adminRoles_permissions_permDesc_manageRepositories = můşe vytváÅet, mÄnit, mazat repozitáÅe jakÃœchkoli uÅŸivatelů (kaÅŸdÃœ můşe vytváÅet své repozitáÅe), můşe asociovat zdroje obsahů s repozitáÅi view_adminRoles_permissions_permDesc_manageSecurity = můşe vytváÅet, mÄnit, mazat uÅŸivatele a role - implikuje vÅ¡echna ostatnà povolenà view_adminRoles_permissions_permDesc_manageSettings = můşe modifikovat {0} serverovou konfiguraci a provozovat jakékoli operace souvisejÃcà se serverem ##view_adminRoles_permissions_permDesc_unassignBundlesFromGroup = can unassign (not delete) a bundle from the bundle group -##view_adminRoles_permissions_permDesc_viewBundles = can view any bundle including global bundles (those not assigned to any bundle group) -##view_adminRoles_permissions_permDesc_viewBundlesInGroup = can view any bundle in the group +##view_adminRoles_permissions_permDesc_viewBundles = can view bundle details, deployments, etc for any bundle, including unassigned bundles (those not assigned to any bundle group) +##view_adminRoles_permissions_permDesc_viewBundlesInGroup = (IMPLIED) can view bundle details, deployments, etc for any bundle in bundle groups associated with the relevant roles. view_adminRoles_permissions_permDesc_viewUsers = můşe zobrazovat ostatnà uÅŸivatele s vÃœjimkou jejich pÅiÅazenÃœch rolà view_adminRoles_permissions_permReadDesc_configure = zobrazà konfiguraci zdroje a historii konfigurace zdroje view_adminRoles_permissions_permReadDesc_control = (VÃCHOZÃ) zobrazà dostupné operace a historii spouÅ¡tÄnà operacà diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties index 7159f03..d4f053b 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties @@ -23,11 +23,11 @@ chart_hover_period_label = Zeitraum chart_hover_start_label = Start chart_hover_time_format = %H:%M:%S chart_ie_not_supported = Charting ist bei diesem Browser nicht unterstÃŒtzt -##chart_metrics= Metrics -##chart_metrics_collapse_tooltip= Click here to see additional tabular availability data. -##chart_metrics_expand_tooltip= Click here to collapse additional availability detail. -##chart_metrics_sparkline_header= Chart -##chart_metrics_add_to_dashboard_label= Dashboards +##chart_metrics = Metrics +##chart_metrics_add_to_dashboard_label = Dashboards +##chart_metrics_collapse_tooltip = Click here to see additional tabular availability data. +##chart_metrics_expand_tooltip = Click here to collapse additional availability detail. +##chart_metrics_sparkline_header = Chart chart_no_data_label = Keine Daten vorhanden ##chart_single_value_label = Value chart_slider_button_bar_day = Tag @@ -510,12 +510,14 @@ view_adminRoles_ldapGroupsReadOnly = LDAP Gruppendaten können nur gelesen werde view_adminRoles_noLdap = Die LDAP-Integration ist nicht konfiguriert. Um LDAP zu konfigurieren, wechseln sie zu <a {0}>{1}</a>. ##view_adminRoles_permissions_autoselecting_configureRead_implied = Autodeselected CONFIGURE_WRITE permission, since lack of CONFIGURE_READ implies lack of it... ##view_adminRoles_permissions_autoselecting_configureWrite_implied = Autoselected CONFIGURE_READ permission, since CONFIGURE_WRITE implies it... -##view_adminRoles_permissions_autoselecting_manageBundle_implied = Autoselected unselected permissions, since MANAGE_BUNDLE implies all other bundle permissions... +##view_adminRoles_permissions_autoselecting_manageBundleGroups_implied = Autoselected View Bundles, which is granted by Manage Bundle Groups... +##view_adminRoles_permissions_autoselecting_manageBundle_implied = Autoselected unselected permissions, since Manage Bundle permission grants Manage Bundle Groups, Create Bundles, Delete Bundles, View Bundles and Deploy_Bundles permissions... ##view_adminRoles_permissions_autoselecting_manageInventory_implied = Autoselected unselected Resource permissions, since MANAGE_INVENTORY implies all Resource permissions... ##view_adminRoles_permissions_autoselecting_manageSecurity_implied = Autoselected unselected permissions, since MANAGE_SECURITY implies all other permissions... -view_adminRoles_permissions_bundleGroupPermissions = Bundle Group Permissions +##view_adminRoles_permissions_bundlePermissions = Bundle Permissions view_adminRoles_permissions_globalPermissions = Globale Rechte ##view_adminRoles_permissions_illegalDeselectionDueToCorrespondingWritePermSelection = {0} read permission cannot be deselected, unless the {0} write permission, which implies the read permission, is deselected first. +##view_adminRoles_permissions_illegalDeselectionDueToManageBundleGroupsSelection = {0} permission cannot be deselected, unless Manage Bundle Groups, which implies {0} permission, is deselected first. ##view_adminRoles_permissions_illegalDeselectionDueToManageBundleSelection = {0} permission cannot be deselected, unless Manage Bundle, which implies all Bundle permissions, is deselected first. ##view_adminRoles_permissions_illegalDeselectionDueToManageInventorySelection = {0} permission cannot be deselected, unless Manage Inventory, which implies all Resource permissions, is deselected first. ##view_adminRoles_permissions_illegalDeselectionDueToManageSecuritySelection = {0} permission cannot be deselected, unless the Manage Security permission, which implies all other permissions, is deselected first. @@ -523,20 +525,20 @@ view_adminRoles_permissions_isAuthorized = Berechtigt? view_adminRoles_permissions_isRead = Lesen? view_adminRoles_permissions_isWrite = Schreiben? ##view_adminRoles_permissions_permDesc_assignBundlesToGroup = can copy a viewable bundle to the bundle group -##view_adminRoles_permissions_permDesc_createBundles = can create new bundle [version]s. can copy existing bundles between viewable groups +##view_adminRoles_permissions_permDesc_createBundles = can create new bundle [version]s. can assign viewable bundles to viewable groups ##view_adminRoles_permissions_permDesc_createBundlesInGroup = can create new bundle [version]s for the bundle group. can copy a viewable bundle to the bundle group. ##view_adminRoles_permissions_permDesc_deleteBundles = can delete or unassign viewable bundle [version]s ##view_adminRoles_permissions_permDesc_deleteBundlesFromGroup = can delete bundle [version]s from the bundle group (implicitly deleting then from other assigned groups) ##view_adminRoles_permissions_permDesc_deployBundles = can deploy any viewable bundle version to any viewable, deployable, compatible, resource group -##view_adminRoles_permissions_permDesc_manageBundleGroups = can create, update, or delete bundle groups -view_adminRoles_permissions_permDesc_manageBundles = Kann Bundles anlegen, aktualisieren und löschen (Ansehen ist fÃŒr alle implizit erlaubt). +##view_adminRoles_permissions_permDesc_manageBundleGroups = can create and delete bundle groups. can assign bundles to bundle groups. grants View Bundles permissions +##view_adminRoles_permissions_permDesc_manageBundles = can perform any bundle task. a convenience permission that grants Manage Bundle Groups, Create Bundles, Delete Bundles, Deploy Bundles and View Bundles permissions. view_adminRoles_permissions_permDesc_manageInventory = Hat alle Rechte auf alle Ressourcen, wie unten beschrieben. Kann Gruppen anlegen, aktualisieren und löschen. Kann Ressourcen in das Inventar aufnehmen. ##view_adminRoles_permissions_permDesc_manageRepositories = can create, update, or delete repositories of any user (everyone can create their own repositories), can associate content sources to repositories. view_adminRoles_permissions_permDesc_manageSecurity = Kann Benutzer und Rollen anlegen, aktualisieren oder löschen (Anschauen ist fÃŒr alle implizit erlaubt) view_adminRoles_permissions_permDesc_manageSettings = Kann die Konfiguration des {0}-Servers Àndern und jegliche Server-bezogene FunktionalitÀt ausfÃŒhren. ##view_adminRoles_permissions_permDesc_unassignBundlesFromGroup = can unassign (not delete) a bundle from the bundle group -##view_adminRoles_permissions_permDesc_viewBundles = can view any bundle including global bundles (those not assigned to any bundle group) -##view_adminRoles_permissions_permDesc_viewBundlesInGroup = can view any bundle in the group +##view_adminRoles_permissions_permDesc_viewBundles = can view bundle details, deployments, etc for any bundle, including unassigned bundles (those not assigned to any bundle group) +##view_adminRoles_permissions_permDesc_viewBundlesInGroup = (IMPLIED) can view bundle details, deployments, etc for any bundle in bundle groups associated with the relevant roles. ##view_adminRoles_permissions_permDesc_viewUsers = can view other users, with the exception of their assigned roles view_adminRoles_permissions_permReadDesc_configure = Ansehen der Ressourcen-Konfiguration und des Verlaufs derselben. view_adminRoles_permissions_permReadDesc_control = (IMPLIZIT) Ansehen der verfÃŒgbaren Operationen und des Verlaufs der ausgefÃŒhren Operationen diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties index 10dd2d1..c7c1f0a 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties @@ -509,12 +509,14 @@ view_adminRoles_ldapGroupsReadOnly = LDAPã°ã«ãŒãããŒã¿ã¯èªã¿åºãå° view_adminRoles_noLdap = LDAPã»ãã¥ãªãã£ã®çµ±åã¯æ§æãããŠããŸãããLDAPãæ§æããã«ã¯ã <a {0}>{1}</a>ã«è¡ã£ãŠãã ããã view_adminRoles_permissions_autoselecting_configureRead_implied = CONFIGURE_WRITEæš©éãèªåçã«éžæãããŸããããªããªããCONFIGURE_READãç¡ãããšããããæ瀺ããŠããããã§ãã view_adminRoles_permissions_autoselecting_configureWrite_implied = CONFIGURE_READæš©éãèªåçã«éžæãããŸããããªããªããCONFIGURE_WRITEããããæ瀺ããŠããããã§ãã -##view_adminRoles_permissions_autoselecting_manageBundle_implied = Autoselected unselected permissions, since MANAGE_BUNDLE implies all other bundle permissions... +##view_adminRoles_permissions_autoselecting_manageBundleGroups_implied = Autoselected View Bundles, which is granted by Manage Bundle Groups... +##view_adminRoles_permissions_autoselecting_manageBundle_implied = Autoselected unselected permissions, since Manage Bundle permission grants Manage Bundle Groups, Create Bundles, Delete Bundles, View Bundles and Deploy_Bundles permissions... view_adminRoles_permissions_autoselecting_manageInventory_implied = æªéžæã®ãªãœãŒã¹æš©éãèªåçã«éžæãããŸããããªããªããMANAGE_INVENTORYã¯ãã¹ãŠã®ãªãœãŒã¹æš©éãæ瀺ããŠããããã§ãã view_adminRoles_permissions_autoselecting_manageSecurity_implied = æªéžæã®æš©éãèªåçã«éžæãããŸããããªããªããMANAGE_SECURITYã¯ä»ã®ãã¹ãŠã®æš©éãæ瀺ããŠããããã§ãã -view_adminRoles_permissions_bundleGroupPermissions = Bundle Group Permissions +##view_adminRoles_permissions_bundlePermissions = Bundle Permissions view_adminRoles_permissions_globalPermissions = ã°ããŒãã«æš©é view_adminRoles_permissions_illegalDeselectionDueToCorrespondingWritePermSelection = {0} èªã¿åãæš©éã¯éžæ解é€ã§ããŸããã§ãããèªã¿åãæš©éãæ瀺ãã {0} æžã蟌ã¿æš©éãæåã«éžæ解é€ãããªããã°ããã¯ã§ããŸããã +##view_adminRoles_permissions_illegalDeselectionDueToManageBundleGroupsSelection = {0} permission cannot be deselected, unless Manage Bundle Groups, which implies {0} permission, is deselected first. ##view_adminRoles_permissions_illegalDeselectionDueToManageBundleSelection = {0} permission cannot be deselected, unless Manage Bundle, which implies all Bundle permissions, is deselected first. view_adminRoles_permissions_illegalDeselectionDueToManageInventorySelection = {0} æš©éã¯éžæ解é€ã§ããŸããã§ãããä»ã®ãã¹ãŠã®ãªãœãŒã¹ãæ瀺ãã管çã€ã³ãã³ããªãæåã«éžæ解é€ãããªããã°ããã¯ã§ããŸããã view_adminRoles_permissions_illegalDeselectionDueToManageSecuritySelection = {0} æš©éã¯éžæ解é€ã§ããŸããã§ãããä»ã®ãã¹ãŠã®æš©éãæ瀺ãã管çã»ãã¥ãªãã£æš©éãæåã«éžæ解é€ãããªããã°ããã¯ã§ããŸããã @@ -522,20 +524,20 @@ view_adminRoles_permissions_isAuthorized = æš©éãããã? view_adminRoles_permissions_isRead = èªã¿åºãã§ãã? view_adminRoles_permissions_isWrite = æžã蟌ã¿ã§ãã? ##view_adminRoles_permissions_permDesc_assignBundlesToGroup = can copy a viewable bundle to the bundle group -##view_adminRoles_permissions_permDesc_createBundles = can create new bundle [version]s. can copy existing bundles between viewable groups +##view_adminRoles_permissions_permDesc_createBundles = can create new bundle [version]s. can assign viewable bundles to viewable groups ##view_adminRoles_permissions_permDesc_createBundlesInGroup = can create new bundle [version]s for the bundle group. can copy a viewable bundle to the bundle group. ##view_adminRoles_permissions_permDesc_deleteBundles = can delete or unassign viewable bundle [version]s ##view_adminRoles_permissions_permDesc_deleteBundlesFromGroup = can delete bundle [version]s from the bundle group (implicitly deleting then from other assigned groups) ##view_adminRoles_permissions_permDesc_deployBundles = can deploy any viewable bundle version to any viewable, deployable, compatible, resource group -##view_adminRoles_permissions_permDesc_manageBundleGroups = can create, update, or delete bundle groups -view_adminRoles_permissions_permDesc_manageBundles = ããããžã§ãã³ã°ãã³ãã«ã®äœæãæŽæ°ãåé€ãå¯èœã§ã(誰ã§ãæé»çã«é²èŠ§å¯èœã§ã) +##view_adminRoles_permissions_permDesc_manageBundleGroups = can create and delete bundle groups. can assign bundles to bundle groups. grants View Bundles permissions +##view_adminRoles_permissions_permDesc_manageBundles = can perform any bundle task. a convenience permission that grants Manage Bundle Groups, Create Bundles, Delete Bundles, Deploy Bundles and View Bundles permissions. view_adminRoles_permissions_permDesc_manageInventory = ãã¹ãŠã®ãªãœãŒã¹ã«ã€ããŠãã¹ãŠã®ãªãœãŒã¹æš©éãæã¡ãŸããã€ãŸããã°ã«ãŒãã®äœæãæŽæ°ãåé€ããããŠèªåæ€åºãããããŸãã¯æåã§æ€åºããããªãœãŒã¹ã®ã€ã³ããŒããå¯èœã§ã view_adminRoles_permissions_permDesc_manageRepositories = ãŠãŒã¶ãŒã®ãªããžããªã®äœæãæŽæ°ãåé€ãå¯èœ(誰ã§ããªããžããªãäœæå¯èœ)ã§ãã³ã³ãã³ããœãŒã¹ãšãªããžããªãé¢é£ã¥ããã§ããŸãã view_adminRoles_permissions_permDesc_manageSecurity = ãŠãŒã¶ãŒãšããŒã«ã®äœæãæŽæ°ãåé€ããã®ä»ãã¹ãŠã®æš©éãå¯èœã§ã view_adminRoles_permissions_permDesc_manageSettings = {0}ãµãŒããŒã®ä¿®æ£ãšä»»æã®ãµãŒããŒé¢é£ã®æ©èœã®å®è¡ãã§ããŸã ##view_adminRoles_permissions_permDesc_unassignBundlesFromGroup = can unassign (not delete) a bundle from the bundle group -##view_adminRoles_permissions_permDesc_viewBundles = can view any bundle including global bundles (those not assigned to any bundle group) -##view_adminRoles_permissions_permDesc_viewBundlesInGroup = can view any bundle in the group +##view_adminRoles_permissions_permDesc_viewBundles = can view bundle details, deployments, etc for any bundle, including unassigned bundles (those not assigned to any bundle group) +##view_adminRoles_permissions_permDesc_viewBundlesInGroup = (IMPLIED) can view bundle details, deployments, etc for any bundle in bundle groups associated with the relevant roles. view_adminRoles_permissions_permDesc_viewUsers = ä»ã®ãŠãŒã¶ãŒãé²èŠ§ã§ããŸãããããããããã«å²ãåœãŠãããããŒã«ã¯èŠããŸããã view_adminRoles_permissions_permReadDesc_configure = ãªãœãŒã¹æ§æãšãªãœãŒã¹æ§æãªããžã§ã³å±¥æŽã®é²èŠ§ view_adminRoles_permissions_permReadDesc_control = å©çšå¯èœãªãã¬ãŒã·ã§ã³; ãªãã¬ãŒã·ã§ã³å®è¡å±¥æŽã®(æé»çãª)é²èŠ§ diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties index 9e4f7b2..0213540 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties @@ -460,12 +460,14 @@ view_adminRoles_ldapGroups = LDAP 귞룹 view_adminRoles_noLdap = LDAP 볎ì íµí©ìŽ ì€ì ëì§ ìììµëë€. LDAPì 구ì±íë €ë©Ž <a {0}>{1}</a>ë¡ ê°ììì. ##view_adminRoles_permissions_autoselecting_configureRead_implied = Autodeselected CONFIGURE_WRITE permission, since lack of CONFIGURE_READ implies lack of it... ##view_adminRoles_permissions_autoselecting_configureWrite_implied = Autoselected CONFIGURE_READ permission, since CONFIGURE_WRITE implies it... -##view_adminRoles_permissions_autoselecting_manageBundle_implied = Autoselected unselected permissions, since MANAGE_BUNDLE implies all other bundle permissions... +##view_adminRoles_permissions_autoselecting_manageBundleGroups_implied = Autoselected View Bundles, which is granted by Manage Bundle Groups... +##view_adminRoles_permissions_autoselecting_manageBundle_implied = Autoselected unselected permissions, since Manage Bundle permission grants Manage Bundle Groups, Create Bundles, Delete Bundles, View Bundles and Deploy_Bundles permissions... ##view_adminRoles_permissions_autoselecting_manageInventory_implied = Autoselected unselected Resource permissions, since MANAGE_INVENTORY implies all Resource permissions... ##view_adminRoles_permissions_autoselecting_manageSecurity_implied = Autoselected unselected permissions, since MANAGE_SECURITY implies all other permissions... -view_adminRoles_permissions_bundleGroupPermissions = Bundle Group Permissions +##view_adminRoles_permissions_bundlePermissions = Bundle Permissions view_adminRoles_permissions_globalPermissions = êžë¡ë² ê¶í view_adminRoles_permissions_illegalDeselectionDueToCorrespondingWritePermSelection = {0} ìœêž° ê¶íì ì í íŽì í ì ììµëë€. ìœêž° ê¶íì ììíë {0} ì°êž° ê¶íìŽ ëšŒì ì í íŽì ëìŽìŒ ê·žê²ì ìíí ì ììµëë€. +##view_adminRoles_permissions_illegalDeselectionDueToManageBundleGroupsSelection = {0} permission cannot be deselected, unless Manage Bundle Groups, which implies {0} permission, is deselected first. ##view_adminRoles_permissions_illegalDeselectionDueToManageBundleSelection = {0} permission cannot be deselected, unless Manage Bundle, which implies all Bundle permissions, is deselected first. view_adminRoles_permissions_illegalDeselectionDueToManageInventorySelection = {0} ê¶íì ì í íŽì í ì ììµëë€. ë€ë¥ž 몚ë ììì ììíë êŽëŠ¬ ìžë²€í ëŠ¬ê° ëšŒì ì í íŽì ëìŽìŒ ê·žê²ì ìíí ì ììµëë€. view_adminRoles_permissions_illegalDeselectionDueToManageSecuritySelection = {0} ê¶íì ì í íŽì í ì ììµëë€. ë€ë¥ž 몚ë ê¶íì ììíë êŽëŠ¬ 볎ì ê¶íìŽ ëšŒì ì í íŽì ëìŽìŒ ê·žê²ì ìíí ì ììµëë€. @@ -473,20 +475,20 @@ view_adminRoles_permissions_isAuthorized = ê¶íìŽ ììµëê¹? ##view_adminRoles_permissions_isRead = Read? ##view_adminRoles_permissions_isWrite = Write? ##view_adminRoles_permissions_permDesc_assignBundlesToGroup = can copy a viewable bundle to the bundle group -##view_adminRoles_permissions_permDesc_createBundles = can create new bundle [version]s. can copy existing bundles between viewable groups +##view_adminRoles_permissions_permDesc_createBundles = can create new bundle [version]s. can assign viewable bundles to viewable groups ##view_adminRoles_permissions_permDesc_createBundlesInGroup = can create new bundle [version]s for the bundle group. can copy a viewable bundle to the bundle group. ##view_adminRoles_permissions_permDesc_deleteBundles = can delete or unassign viewable bundle [version]s ##view_adminRoles_permissions_permDesc_deleteBundlesFromGroup = can delete bundle [version]s from the bundle group (implicitly deleting then from other assigned groups) ##view_adminRoles_permissions_permDesc_deployBundles = can deploy any viewable bundle version to any viewable, deployable, compatible, resource group -##view_adminRoles_permissions_permDesc_manageBundleGroups = can create, update, or delete bundle groups -##view_adminRoles_permissions_permDesc_manageBundles = can create, update, or delete provisioning bundles (viewing is implied for everyone) +##view_adminRoles_permissions_permDesc_manageBundleGroups = can create and delete bundle groups. can assign bundles to bundle groups. grants View Bundles permissions +##view_adminRoles_permissions_permDesc_manageBundles = can perform any bundle task. a convenience permission that grants Manage Bundle Groups, Create Bundles, Delete Bundles, Deploy Bundles and View Bundles permissions. view_adminRoles_permissions_permDesc_manageInventory = 몚ë ììì 몚ë ìì ê¶íì ê°ìµëë€. ìŠ, 귞룹ì ìì±, ìì , ìëê°ì§ ëë ìëìŒë¡ ê²ì¶ë ììì ê°ì žì¬ì ììµëë€. ##view_adminRoles_permissions_permDesc_manageRepositories = can create, update, or delete repositories of any user (everyone can create their own repositories), can associate content sources to repositories. ##view_adminRoles_permissions_permDesc_manageSecurity = can create, update, or delete users and roles - implies all other permissions view_adminRoles_permissions_permDesc_manageSettings = {0} ìë²ì ìì ë° ëªšë ìë² êŽë š êž°ë¥ì ìíí ì ììµëë€. ##view_adminRoles_permissions_permDesc_unassignBundlesFromGroup = can unassign (not delete) a bundle from the bundle group -##view_adminRoles_permissions_permDesc_viewBundles = can view any bundle including global bundles (those not assigned to any bundle group) -##view_adminRoles_permissions_permDesc_viewBundlesInGroup = can view any bundle in the group +##view_adminRoles_permissions_permDesc_viewBundles = can view bundle details, deployments, etc for any bundle, including unassigned bundles (those not assigned to any bundle group) +##view_adminRoles_permissions_permDesc_viewBundlesInGroup = (IMPLIED) can view bundle details, deployments, etc for any bundle in bundle groups associated with the relevant roles. view_adminRoles_permissions_permDesc_viewUsers = ë€ë¥ž ì¬ì©ì륌 볌 ì ììµëë€. ê·žë¬ë ê·žë€ìê² í ë¹ë ìí ì 볎ìŽì§ ììµëë€. view_adminRoles_permissions_permReadDesc_configure = ìì ì€ì ë° ìì ì€ì ìì êž°ë¡ ë³Žêž° ##view_adminRoles_permissions_permReadDesc_control = (IMPLIED) view available operations and operation execution history diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties index 2369cf4..952b01f 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties @@ -514,12 +514,14 @@ view_adminRoles_ldapGroupsReadOnly = Informa\u00E7\u00F5es do grupo LDAP com per view_adminRoles_noLdap = A integra\u00E7\u00E3o com o LDAP ainda n\u00E3o foi configurada. Para configurar o LDAP acesse <a {0}>{1}</a>. view_adminRoles_permissions_autoselecting_configureRead_implied = Permiss\u00E3o CONFIGURE_WRITE desmarcada automaticamente devida a aus\u00EAncia da permiss\u00E3o CONFIGURE_READ... view_adminRoles_permissions_autoselecting_configureWrite_implied = Permiss\u00E3o CONFIGURE_READ marcada automaticamente devida a marca\u00E7\u00E3o de CONFIGURE_WRITE... -##view_adminRoles_permissions_autoselecting_manageBundle_implied = Autoselected unselected permissions, since MANAGE_BUNDLE implies all other bundle permissions... +##view_adminRoles_permissions_autoselecting_manageBundleGroups_implied = Autoselected View Bundles, which is granted by Manage Bundle Groups... +##view_adminRoles_permissions_autoselecting_manageBundle_implied = Autoselected unselected permissions, since Manage Bundle permission grants Manage Bundle Groups, Create Bundles, Delete Bundles, View Bundles and Deploy_Bundles permissions... view_adminRoles_permissions_autoselecting_manageInventory_implied = Autoselected unselected Resource permissions, since MANAGE_INVENTORY implies all Resource permissions... view_adminRoles_permissions_autoselecting_manageSecurity_implied = Autoselected unselected permissions, since MANAGE_SECURITY implies all other permissions... -view_adminRoles_permissions_bundleGroupPermissions = Bundle Group Permissions +##view_adminRoles_permissions_bundlePermissions = Bundle Permissions view_adminRoles_permissions_globalPermissions = Permiss\u00F5es Globais view_adminRoles_permissions_illegalDeselectionDueToCorrespondingWritePermSelection = {0} permiss\u00E3o de leitura n\u00E3 pode ser desmarcada, a menos que {0} permiss\u00E3o de escrita, que implica na permiss\u00E3o de leitura, seja desmarcada primeiro. +##view_adminRoles_permissions_illegalDeselectionDueToManageBundleGroupsSelection = {0} permission cannot be deselected, unless Manage Bundle Groups, which implies {0} permission, is deselected first. ##view_adminRoles_permissions_illegalDeselectionDueToManageBundleSelection = {0} permission cannot be deselected, unless Manage Bundle, which implies all Bundle permissions, is deselected first. view_adminRoles_permissions_illegalDeselectionDueToManageInventorySelection = {0} permiss\u00E3o n\u00E3o pode ser desmarcada, a menos que Gerenciar Invent\u00E1rio, que implica todas as permiss\u00F5es de Recurso, seja desmarcada primeiro. view_adminRoles_permissions_illegalDeselectionDueToManageSecuritySelection = {0} permiss\u00E3o n\u00E3o pode ser desmarcada, a menos que a permiss\u00E3 Gerenciar SeguranÃa, que implica em todas outras permissıes, seja desmarcada primeiro. @@ -527,20 +529,20 @@ view_adminRoles_permissions_isAuthorized = Autorizado? view_adminRoles_permissions_isRead = Leitura? view_adminRoles_permissions_isWrite = Escrita? ##view_adminRoles_permissions_permDesc_assignBundlesToGroup = can copy a viewable bundle to the bundle group -##view_adminRoles_permissions_permDesc_createBundles = can create new bundle [version]s. can copy existing bundles between viewable groups +##view_adminRoles_permissions_permDesc_createBundles = can create new bundle [version]s. can assign viewable bundles to viewable groups ##view_adminRoles_permissions_permDesc_createBundlesInGroup = can create new bundle [version]s for the bundle group. can copy a viewable bundle to the bundle group. ##view_adminRoles_permissions_permDesc_deleteBundles = can delete or unassign viewable bundle [version]s ##view_adminRoles_permissions_permDesc_deleteBundlesFromGroup = can delete bundle [version]s from the bundle group (implicitly deleting then from other assigned groups) ##view_adminRoles_permissions_permDesc_deployBundles = can deploy any viewable bundle version to any viewable, deployable, compatible, resource group -##view_adminRoles_permissions_permDesc_manageBundleGroups = can create, update, or delete bundle groups -view_adminRoles_permissions_permDesc_manageBundles = pode criar, atualizar, ou excluir o provisionamento de bundles (a visualiza\u00E7\u00E3 \u00E9 implicita para todos) +##view_adminRoles_permissions_permDesc_manageBundleGroups = can create and delete bundle groups. can assign bundles to bundle groups. grants View Bundles permissions +##view_adminRoles_permissions_permDesc_manageBundles = can perform any bundle task. a convenience permission that grants Manage Bundle Groups, Create Bundles, Delete Bundles, Deploy Bundles and View Bundles permissions. view_adminRoles_permissions_permDesc_manageInventory = possui todas as permiss\u00F5es de Recurso, como descrito abaixo, pode criar, atualizar, excluir grupos e importar Recursos descobertos automaticamente ou manualmente. view_adminRoles_permissions_permDesc_manageRepositories = pode criar, atualizar, ou excluir reposit\u00F3rios de qualquer usu\u00E1rio (todos podem criar seus pr\u00F3prios reposit\u00F3rios), pode associar fontes de conte\u00FAdos a reposit\u00F3rios. view_adminRoles_permissions_permDesc_manageSecurity = pode criar, atualizar, ou excluir usu\u00E1rios e perfis (visualiza\u00E7\u00E3o \u00E9 padr\u00E3o para todos) ##view_adminRoles_permissions_permDesc_manageSettings = pode modificar a configura\u00E7\u00E3o do RHQ Server e utilizar qualquer funcionalidade relacionada ao Servidor ##view_adminRoles_permissions_permDesc_unassignBundlesFromGroup = can unassign (not delete) a bundle from the bundle group -##view_adminRoles_permissions_permDesc_viewBundles = can view any bundle including global bundles (those not assigned to any bundle group) -##view_adminRoles_permissions_permDesc_viewBundlesInGroup = can view any bundle in the group +##view_adminRoles_permissions_permDesc_viewBundles = can view bundle details, deployments, etc for any bundle, including unassigned bundles (those not assigned to any bundle group) +##view_adminRoles_permissions_permDesc_viewBundlesInGroup = (IMPLIED) can view bundle details, deployments, etc for any bundle in bundle groups associated with the relevant roles. ##view_adminRoles_permissions_permDesc_viewUsers = can view other users, with the exception of their assigned roles view_adminRoles_permissions_permReadDesc_configure = view Resource configuration and Resource configuration revision history view_adminRoles_permissions_permReadDesc_control = (IMPL\u00CDCITO) visualizar opera\u00E7\u00F5es dispon\u00EDveis e o hist\u00F3rico da execu\u00E7\u00E3o de opera\u00E7\u00F5es diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties index 795892a..4fbef12 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties @@ -2596,12 +2596,14 @@ view_adminRoles_ldapGroupsReadOnly = ЎаММÑе LDAP гÑÑпп ЎПÑÑÑпМ ##view_adminRoles_noLdap = The LDAP security integration is not configured. To configure LDAP, go to <a {0}>{1}</a>. view_adminRoles_permissions_autoselecting_configureRead_implied = ÐвÑПЌаÑОÑеÑкО ПÑклÑÑеМП CONFIGURE_WRITE пПлМПЌПÑОе, пПÑкПлÑÐºÑ ÐŸÑÑÑÑÑÑвÑÐµÑ CONFIGURE_READ... view_adminRoles_permissions_autoselecting_configureWrite_implied = ÐвÑПЌаÑОÑеÑкО вÑбÑаМП CONFIGURE_READ пПлМПЌПÑОе, пПÑкПлÑÐºÑ CONFIGURE_WRITE пПЎÑазÑЌеваеÑ, ÑÑП ... -##view_adminRoles_permissions_autoselecting_manageBundle_implied = Autoselected unselected permissions, since MANAGE_BUNDLE implies all other bundle permissions... +##view_adminRoles_permissions_autoselecting_manageBundleGroups_implied = Autoselected View Bundles, which is granted by Manage Bundle Groups... +##view_adminRoles_permissions_autoselecting_manageBundle_implied = Autoselected unselected permissions, since Manage Bundle permission grants Manage Bundle Groups, Create Bundles, Delete Bundles, View Bundles and Deploy_Bundles permissions... view_adminRoles_permissions_autoselecting_manageInventory_implied = ÐвÑПЌаÑОÑеÑкО ÑÑÑÐ°ÐœÐŸÐ²Ð»ÐµÐœÑ ÐœÐµ вÑбÑаММÑе пПлМПЌПÑÐžÑ ÑеÑÑÑÑÑПв, пПÑкПлÑÐºÑ MANAGE_INVENTORY пÑÐµÐŽÐ¿ÐŸÐ»Ð°Ð³Ð°ÐµÑ Ð²Ñе пПлМПЌПÑÐžÑ ÑеÑÑÑÑа... view_adminRoles_permissions_autoselecting_manageSecurity_implied = ÐвÑПЌаÑОÑеÑкО ÑÑÑÐ°ÐœÐŸÐ²Ð»ÐµÐœÑ ÐœÐµÐ²ÑбÑаММÑе пПлМПЌПÑОÑ, пПÑкПлÑÐºÑ MANAGE_SECURITY вклÑÑÐ°ÐµÑ Ð²Ñе ÐŽÑÑгОе пПлМПЌПÑОÑ... -view_adminRoles_permissions_bundleGroupPermissions = Bundle Group Permissions +##view_adminRoles_permissions_bundlePermissions = Bundle Permissions view_adminRoles_permissions_globalPermissions = ÐлПбалÑÐœÑе пПлМПЌÑÐžÑ view_adminRoles_permissions_illegalDeselectionDueToCorrespondingWritePermSelection = {0} пПлМПЌПÑÐžÑ ÐœÐ° ÑÑеМОе Ме ЌПгÑÑ Ð±ÑÑÑ ÐŸÑклÑÑеМÑ, пПка пÑеЎваÑОÑелÑМП {0} пПлМПЌПÑÐžÑ Ð·Ð°Ð¿ÐžÑО, кПÑПÑÑе вклÑÑаÑÑ Ð¿ÐŸÐ»ÐœÐŸÐŒÐŸÑÐžÑ ÐœÐ° ÑÑеМОе, Ме бÑÐŽÑÑ ÐŸÑклÑÑеМÑ. +##view_adminRoles_permissions_illegalDeselectionDueToManageBundleGroupsSelection = {0} permission cannot be deselected, unless Manage Bundle Groups, which implies {0} permission, is deselected first. ##view_adminRoles_permissions_illegalDeselectionDueToManageBundleSelection = {0} permission cannot be deselected, unless Manage Bundle, which implies all Bundle permissions, is deselected first. view_adminRoles_permissions_illegalDeselectionDueToManageInventorySelection = {0} пПлМПЌПÑÐžÑ ÐœÐµ ЌПгÑÑ Ð±ÑÑÑ ÐŸÑклÑÑеМÑ, пПка пÑеЎваÑОÑелÑМП Manage Inventory, кПÑПÑÐ°Ñ Ð²ÐºÐ»ÑÑÐ°ÐµÑ Ð²Ñе пПлМПЌПÑÐžÑ ÑеÑÑÑÑа, Ме бÑÐŽÐµÑ ÐŸÑклÑÑеМП. view_adminRoles_permissions_illegalDeselectionDueToManageSecuritySelection = {0} пПлМПЌПÑÐžÑ ÐœÐµ ЌПгÑÑ Ð±ÑÑÑ ÐŸÑклÑÑеМÑ, пПка пÑеЎваÑОлÑМП Manage Security пПлМПЌПÑОе, кПÑПÑПе вклÑÑÐ°ÐµÑ Ð²Ñе ÐŽÑÑгОе пПлМПЌПÑОÑ, Ме бÑÐŽÐµÑ ÐŸÑклÑÑеМП. @@ -2609,20 +2611,20 @@ view_adminRoles_permissions_isAuthorized = ÐвÑПÑОзПваМÑ? view_adminRoles_permissions_isRead = ЧОÑаÑÑ? view_adminRoles_permissions_isWrite = ÐапОÑÑ? ##view_adminRoles_permissions_permDesc_assignBundlesToGroup = can copy a viewable bundle to the bundle group -##view_adminRoles_permissions_permDesc_createBundles = can create new bundle [version]s. can copy existing bundles between viewable groups +##view_adminRoles_permissions_permDesc_createBundles = can create new bundle [version]s. can assign viewable bundles to viewable groups ##view_adminRoles_permissions_permDesc_createBundlesInGroup = can create new bundle [version]s for the bundle group. can copy a viewable bundle to the bundle group. ##view_adminRoles_permissions_permDesc_deleteBundles = can delete or unassign viewable bundle [version]s ##view_adminRoles_permissions_permDesc_deleteBundlesFromGroup = can delete bundle [version]s from the bundle group (implicitly deleting then from other assigned groups) ##view_adminRoles_permissions_permDesc_deployBundles = can deploy any viewable bundle version to any viewable, deployable, compatible, resource group -##view_adminRoles_permissions_permDesc_manageBundleGroups = can create, update, or delete bundle groups -view_adminRoles_permissions_permDesc_manageBundles = ÐПжМП ÑПзЎаваÑÑ, ПбМПвлÑÑÑ Ðž ÑЎалÑÑÑ ÑÐ·Ð»Ñ Ð¿ÑеЎПÑÑÐ°Ð²Ð»ÐµÐœÐžÑ (пÑПÑЌПÑÑ Ð¿ÑеЎПÑÑавлÑеÑÑÑ Ð²ÑеЌ) +##view_adminRoles_permissions_permDesc_manageBundleGroups = can create and delete bundle groups. can assign bundles to bundle groups. grants View Bundles permissions +##view_adminRoles_permissions_permDesc_manageBundles = can perform any bundle task. a convenience permission that grants Manage Bundle Groups, Create Bundles, Delete Bundles, Deploy Bundles and View Bundles permissions. ##view_adminRoles_permissions_permDesc_manageInventory = has all Resource permissions, as described below, for all Resources; can create, update, and delete groups; and can import auto-discovered or manually discovered Resources view_adminRoles_permissions_permDesc_manageRepositories = ЌПжМП ÑПзЎаваÑÑ, ПбМПвлÑÑÑ ÐžÐ»Ðž ÑЎалÑÑÑ ÑепПзОÑПÑОО лÑбПгП пПлÑзПваÑÐµÐ»Ñ (кажЎÑй ÐŒÐŸÐ¶ÐµÑ ÑПзЎаваÑÑ ÑвПО ÑПбÑÑвеММÑе ÑепПзОÑПÑОО), ЌПжМП аÑÑПÑООÑПваÑÑ ÐžÑÑПÑМОкО кПМÑеМÑа Ñ ÑезпПзОÑПÑОÑЌО. view_adminRoles_permissions_permDesc_manageSecurity = ЌПжМП ÑПзЎаваÑÑ, ПбМПвлÑÑÑ ÐžÐ»Ðž ÑЎалÑÑÑ Ð¿ÐŸÐ»ÑзПваÑелей О ÑПлО - вклÑÑÐ°ÐµÑ Ð²Ñе ÐŽÑÑгОе пПлМПЌПÑÐžÑ ##view_adminRoles_permissions_permDesc_manageSettings = can modify the {0} Server configuration and perform any Server-related functionality ##view_adminRoles_permissions_permDesc_unassignBundlesFromGroup = can unassign (not delete) a bundle from the bundle group -##view_adminRoles_permissions_permDesc_viewBundles = can view any bundle including global bundles (those not assigned to any bundle group) -##view_adminRoles_permissions_permDesc_viewBundlesInGroup = can view any bundle in the group +##view_adminRoles_permissions_permDesc_viewBundles = can view bundle details, deployments, etc for any bundle, including unassigned bundles (those not assigned to any bundle group) +##view_adminRoles_permissions_permDesc_viewBundlesInGroup = (IMPLIED) can view bundle details, deployments, etc for any bundle in bundle groups associated with the relevant roles. ##view_adminRoles_permissions_permDesc_viewUsers = can view other users, with the exception of their assigned roles view_adminRoles_permissions_permReadDesc_configure = пÑПÑЌПÑÑ ÐºÐŸÐœÑОгÑÑаÑОО ÑеÑÑÑÑа О ОÑÑПÑÐžÑ Ð²ÐµÑÑОй кПМÑОгÑÑаÑОО ÑеÑÑÑÑа ##view_adminRoles_permissions_permReadDesc_control = (IMPLIED) view available operations and operation execution history diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties index 0d0f267..24ea681 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties @@ -503,12 +503,14 @@ view_adminRoles_ldapGroupsReadOnly = LDAP\u7ec4\u6570\u636e\u4e3a\u53ea\u8bfb view_adminRoles_noLdap = \u6ca1\u6709\u96c6\u6210LDAP\u5b89\u5168, \u5230<a {0}>{1}</a>. ##view_adminRoles_permissions_autoselecting_configureRead_implied = Autodeselected CONFIGURE_WRITE permission, since lack of CONFIGURE_READ implies lack of it... ##view_adminRoles_permissions_autoselecting_configureWrite_implied = Autoselected CONFIGURE_READ permission, since CONFIGURE_WRITE implies it... -##view_adminRoles_permissions_autoselecting_manageBundle_implied = Autoselected unselected permissions, since MANAGE_BUNDLE implies all other bundle permissions... +##view_adminRoles_permissions_autoselecting_manageBundleGroups_implied = Autoselected View Bundles, which is granted by Manage Bundle Groups... +##view_adminRoles_permissions_autoselecting_manageBundle_implied = Autoselected unselected permissions, since Manage Bundle permission grants Manage Bundle Groups, Create Bundles, Delete Bundles, View Bundles and Deploy_Bundles permissions... ##view_adminRoles_permissions_autoselecting_manageInventory_implied = Autoselected unselected Resource permissions, since MANAGE_INVENTORY implies all Resource permissions... ##view_adminRoles_permissions_autoselecting_manageSecurity_implied = Autoselected unselected permissions, since MANAGE_SECURITY implies all other permissions... -view_adminRoles_permissions_bundleGroupPermissions = Bundle Group Permissions +##view_adminRoles_permissions_bundlePermissions = Bundle Permissions view_adminRoles_permissions_globalPermissions = \u5168\u5c40\u6388\u6743 ##view_adminRoles_permissions_illegalDeselectionDueToCorrespondingWritePermSelection = {0} read permission cannot be deselected, unless the {0} write permission, which implies the read permission, is deselected first. +##view_adminRoles_permissions_illegalDeselectionDueToManageBundleGroupsSelection = {0} permission cannot be deselected, unless Manage Bundle Groups, which implies {0} permission, is deselected first. ##view_adminRoles_permissions_illegalDeselectionDueToManageBundleSelection = {0} permission cannot be deselected, unless Manage Bundle, which implies all Bundle permissions, is deselected first. ##view_adminRoles_permissions_illegalDeselectionDueToManageInventorySelection = {0} permission cannot be deselected, unless Manage Inventory, which implies all Resource permissions, is deselected first. ##view_adminRoles_permissions_illegalDeselectionDueToManageSecuritySelection = {0} permission cannot be deselected, unless the Manage Security permission, which implies all other permissions, is deselected first. @@ -516,20 +518,20 @@ view_adminRoles_permissions_isAuthorized = \u6388\u6743? view_adminRoles_permissions_isRead = \u8bfb? view_adminRoles_permissions_isWrite = \u5199? ##view_adminRoles_permissions_permDesc_assignBundlesToGroup = can copy a viewable bundle to the bundle group -##view_adminRoles_permissions_permDesc_createBundles = can create new bundle [version]s. can copy existing bundles between viewable groups +##view_adminRoles_permissions_permDesc_createBundles = can create new bundle [version]s. can assign viewable bundles to viewable groups ##view_adminRoles_permissions_permDesc_createBundlesInGroup = can create new bundle [version]s for the bundle group. can copy a viewable bundle to the bundle group. ##view_adminRoles_permissions_permDesc_deleteBundles = can delete or unassign viewable bundle [version]s ##view_adminRoles_permissions_permDesc_deleteBundlesFromGroup = can delete bundle [version]s from the bundle group (implicitly deleting then from other assigned groups) ##view_adminRoles_permissions_permDesc_deployBundles = can deploy any viewable bundle version to any viewable, deployable, compatible, resource group -##view_adminRoles_permissions_permDesc_manageBundleGroups = can create, update, or delete bundle groups -view_adminRoles_permissions_permDesc_manageBundles = \u80fd\u521b\u5efa,\u66f4\u65b0\u6216\u8005\u5220\u9664\u63d0\u4f9b\u7684bundles(\u4efb\u4f55\u4eba\u90fd\u80fd\u67e5\u770b) +##view_adminRoles_permissions_permDesc_manageBundleGroups = can create and delete bundle groups. can assign bundles to bundle groups. grants View Bundles permissions +##view_adminRoles_permissions_permDesc_manageBundles = can perform any bundle task. a convenience permission that grants Manage Bundle Groups, Create Bundles, Delete Bundles, Deploy Bundles and View Bundles permissions. view_adminRoles_permissions_permDesc_manageInventory = \u62e5\u6709\u6240\u6709\u8d44\u6e90\u6743\u9650, \u5982\u4e0b\u6240\u8ff0, \u5bf9\u6240\u6709\u8d44\u6e90; \u5177\u6709\u521b\u5efa, \u66f4\u65b0, \u5220\u9664\u7ec4; \u80fd\u5bfc\u5165\u81ea\u52a8\u53d1\u73b0\u6216\u624b\u52a8\u53d1\u73b0\u7684\u8d44\u6e90 view_adminRoles_permissions_permDesc_manageRepositories = can create, update, or delete repositories of any user (everyone can create their own repositories), can associate content sources to repositories. view_adminRoles_permissions_permDesc_manageSecurity = \u80fd\u521b\u5efa,\u66f4\u65b0,\u6216\u5220\u9664\u7528\u6237\u548c\u89d2\u8272 (\u4efb\u4f55\u4eba\u90fd\u6709\u67e5\u770b\u6743\u9650) ##view_adminRoles_permissions_permDesc_manageSettings = \u80fd\u4fee\u6539RHQ\u670d\u52a1\u5668\u914d\u7f6e\u800c\u4e14\u80fd\u64cd\u4f5c\u4efb\u4f55\u76f8\u5173\u7684\u670d\u52a1\u5668\u529f\u80fd ##view_adminRoles_permissions_permDesc_unassignBundlesFromGroup = can unassign (not delete) a bundle from the bundle group -##view_adminRoles_permissions_permDesc_viewBundles = can view any bundle including global bundles (those not assigned to any bundle group) -##view_adminRoles_permissions_permDesc_viewBundlesInGroup = can view any bundle in the group +##view_adminRoles_permissions_permDesc_viewBundles = can view bundle details, deployments, etc for any bundle, including unassigned bundles (those not assigned to any bundle group) +##view_adminRoles_permissions_permDesc_viewBundlesInGroup = (IMPLIED) can view bundle details, deployments, etc for any bundle in bundle groups associated with the relevant roles. ##view_adminRoles_permissions_permDesc_viewUsers = can view other users, with the exception of their assigned roles view_adminRoles_permissions_permReadDesc_configure = \u67e5\u770b\u8d44\u6e90\u914d\u7f6e\u548c\u8d44\u6e90\u914d\u7f6e\u4fee\u8ba2\u5386\u53f2 view_adminRoles_permissions_permReadDesc_control = (IMPLIED) \u67e5\u770b\u53ef\u7528\u64cd\u4f5c\u548c\u64cd\u4f5c\u6267\u884c\u5386\u53f2 diff --git a/modules/enterprise/gui/coregui/src/main/webapp/images/subsystems/bundle/BundleGroup_16.png b/modules/enterprise/gui/coregui/src/main/webapp/images/subsystems/bundle/BundleGroup_16.png new file mode 100644 index 0000000..4a73421 Binary files /dev/null and b/modules/enterprise/gui/coregui/src/main/webapp/images/subsystems/bundle/BundleGroup_16.png differ diff --git a/modules/enterprise/gui/coregui/src/main/webapp/images/subsystems/bundle/BundleGroup_24.png b/modules/enterprise/gui/coregui/src/main/webapp/images/subsystems/bundle/BundleGroup_24.png new file mode 100644 index 0000000..59faacf Binary files /dev/null and b/modules/enterprise/gui/coregui/src/main/webapp/images/subsystems/bundle/BundleGroup_24.png differ diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/bundle/BundleManagerBeanTest.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/bundle/BundleManagerBeanTest.java index 55fce08..2085a4d 100644 --- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/bundle/BundleManagerBeanTest.java +++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/bundle/BundleManagerBeanTest.java @@ -1363,6 +1363,7 @@ public class BundleManagerBeanTest extends AbstractEJB3Test {
// allow bundle group delete bundleManager.deleteBundleGroups(subject, new int[] { bundleGroup.getId() }); + removeRolePermissions(role, Permission.MANAGE_BUNDLE_GROUPS);
// deny unassigned bundle create (no global create or view) try { @@ -1409,7 +1410,10 @@ public class BundleManagerBeanTest extends AbstractEJB3Test { assertEquals("Should be able to see unassigned bundle", 1, bundles.size());
// deny global perm bundle assign + addRolePermissions(role, Permission.MANAGE_BUNDLE_GROUPS); bundleGroup = bundleManager.createBundleGroup(subject, TEST_BUNDLE_GROUP_NAME, "test"); + removeRolePermissions(role, Permission.MANAGE_BUNDLE_GROUPS); + try { bundleManager.assignBundlesToBundleGroup(subject, bundleGroup.getId(), new int[] { bundle.getId() }); fail("Should have thrown PermissionException"); @@ -1417,12 +1421,47 @@ public class BundleManagerBeanTest extends AbstractEJB3Test { // expected }
- // allow global perm bundle assign + // allow bundle assign via global manage_bundle_groups + addRolePermissions(role, Permission.MANAGE_BUNDLE_GROUPS); + bundleManager.assignBundlesToBundleGroup(subject, bundleGroup.getId(), new int[] { bundle.getId() }); + + // allow bundle unassign via global manage_bundle_groups + bundleManager.unassignBundlesFromBundleGroup(subject, bundleGroup.getId(), new int[] { bundle.getId() }); + + // allow bundle assign via global create + removeRolePermissions(role, Permission.MANAGE_BUNDLE_GROUPS); addRolePermissions(role, Permission.CREATE_BUNDLES); bundleManager.assignBundlesToBundleGroup(subject, bundleGroup.getId(), new int[] { bundle.getId() });
+ // deny bundle unassign via global create + try { + bundleManager.unassignBundlesFromBundleGroup(subject, bundleGroup.getId(), new int[] { bundle.getId() }); + fail("Should have thrown PermissionException"); + } catch (PermissionException e) { + // expected + } + + // allow bundle unassign via global delete + addRolePermissions(role, Permission.DELETE_BUNDLES); + bundleManager.unassignBundlesFromBundleGroup(subject, bundleGroup.getId(), new int[] { bundle.getId() }); + removeRolePermissions(role, Permission.DELETE_BUNDLES); + + // deny bundle assign with global create but no view + removeRolePermissions(role, Permission.VIEW_BUNDLES); + try { + bundleManager.assignBundlesToBundleGroup(subject, bundleGroup.getId(), new int[] { bundle.getId() }); + fail("Should have thrown PermissionException"); + } catch (PermissionException e) { + // expected + } + + // go back and again assign via global create and view + addRolePermissions(role, Permission.VIEW_BUNDLES); + bundleManager.assignBundlesToBundleGroup(subject, bundleGroup.getId(), new int[] { bundle.getId() }); + // deny assigned, unassociated-bundle-group bundle view - removeRolePermissions(role, Permission.CREATE_BUNDLES, Permission.VIEW_BUNDLES); + removeRolePermissions(role, Permission.MANAGE_BUNDLE_GROUPS); + removeRolePermissions(role, Permission.VIEW_BUNDLES); bundles = bundleManager.findBundlesByCriteria(subject, bCriteria); assertNotNull(bundles); assert bundles.isEmpty() : "Should not be able to see assigned bundle"; @@ -1498,6 +1537,7 @@ public class BundleManagerBeanTest extends AbstractEJB3Test { // create bundle group addRolePermissions(role, Permission.MANAGE_BUNDLE_GROUPS); BundleGroup bundleGroup1 = bundleManager.createBundleGroup(subject, TEST_BUNDLE_GROUP_NAME + "_1", "bg-1"); + removeRolePermissions(role, Permission.MANAGE_BUNDLE_GROUPS);
// add bg1 to the role, but no perms addRoleBundleGroup(role, bundleGroup1); @@ -1535,7 +1575,9 @@ public class BundleManagerBeanTest extends AbstractEJB3Test { addRolePermissions(role2, Permission.CREATE_BUNDLES_IN_GROUP);
// create second bundle group + addRolePermissions(role, Permission.MANAGE_BUNDLE_GROUPS); BundleGroup bundleGroup2 = bundleManager.createBundleGroup(subject, TEST_BUNDLE_GROUP_NAME + "_2", "bg-2"); + removeRolePermissions(role, Permission.MANAGE_BUNDLE_GROUPS);
// deny bundle create in bg2 (not associated with role) try { @@ -1633,6 +1675,7 @@ public class BundleManagerBeanTest extends AbstractEJB3Test { // create bundle group addRolePermissions(role, Permission.MANAGE_BUNDLE_GROUPS); BundleGroup bundleGroup1 = bundleManager.createBundleGroup(subject, TEST_BUNDLE_GROUP_NAME + "_1", "bg-1"); + removeRolePermissions(role, Permission.MANAGE_BUNDLE_GROUPS);
// add bg1 to the role with group create addRoleBundleGroup(role, bundleGroup1); @@ -1675,6 +1718,7 @@ public class BundleManagerBeanTest extends AbstractEJB3Test { // create bundle group addRolePermissions(role, Permission.MANAGE_BUNDLE_GROUPS); BundleGroup bundleGroup = bundleManager.createBundleGroup(subject, TEST_BUNDLE_GROUP_NAME, "bg"); + removeRolePermissions(role, Permission.MANAGE_BUNDLE_GROUPS);
// add bg to the role with group create addRoleBundleGroup(role, bundleGroup); @@ -1765,6 +1809,7 @@ public class BundleManagerBeanTest extends AbstractEJB3Test { // create bundle group addRolePermissions(role, Permission.MANAGE_BUNDLE_GROUPS); BundleGroup bundleGroup = bundleManager.createBundleGroup(subject, TEST_BUNDLE_GROUP_NAME, "bg"); + removeRolePermissions(role, Permission.MANAGE_BUNDLE_GROUPS);
// add bg to the role with group create addRoleBundleGroup(role, bundleGroup); diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/authz/RoleManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/authz/RoleManagerBean.java index 96d44de..c2f6048 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/authz/RoleManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/authz/RoleManagerBean.java @@ -758,6 +758,21 @@ public class RoleManagerBean implements RoleManagerLocal, RoleManagerRemote { if (!role.getPermissions().contains(Permission.CONFIGURE_READ)) { role.getPermissions().remove(Permission.CONFIGURE_WRITE); } + + /* + * and MANAGE_BUNDLE implies all Bundle perms + */ + if (role.getPermissions().contains(Permission.MANAGE_BUNDLE)) { + role.getPermissions().addAll(Permission.BUNDLE_ALL); + } + + /* + * and MANAGE_BUNDLE_GROUPS implies global bundle view + */ + if (role.getPermissions().contains(Permission.MANAGE_BUNDLE_GROUPS)) { + role.getPermissions().add(Permission.VIEW_BUNDLES); + } + }
@Override diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerBean.java index 621a54c..986b080 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerBean.java @@ -2058,11 +2058,12 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot }
/** + * <pre> * Required Permissions: Either: * - Global.CREATE_BUNDLES and Global.VIEW_BUNDLES * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG - * + * </pre> * @param subject * @param bundleGroupId null or 0 for unassigned initial bundle version creation * @throws PermissionException @@ -2101,11 +2102,12 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot }
/** + * <pre> * Required Permissions: Either: * - Global.CREATE_BUNDLES and Global.VIEW_BUNDLES * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG - * + * </pre> * @param subject * @param bundleId required, bundleId of bundle in which bundle version is being created/updated * @throws PermissionException @@ -2142,10 +2144,13 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot }
/** - * Requires VIEW permission for the relevant bundle and either: + * <pre> + * Requires VIEW permission for the relevant bundle and one of: + * - Global.MANAGE_BUNDLE_GROUPS * - Global.CREATE_BUNDLE - * - BundleGroup.CREATE_BUNDLES_IN_GROUP or BundleGroup.ASSIGN_BUNDLES_TO_GROUP for the relevant bundle group - * + * - BundleGroup.ASSIGN_BUNDLES_TO_GROUP for the relevant bundle group + * - BundleGroup.CREATE_BUNDLES_IN_GROUP for the relevant bundle group + * </pre> * @param subject * @param bundleGroupId an existing bundle group * @param bundleIds existing bundles @@ -2155,24 +2160,25 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot throws PermissionException {
Set<Permission> globalPerms = authorizationManager.getExplicitGlobalPermissions(subject); + boolean hasGlobalManageBundleGroups = globalPerms.contains(Permission.MANAGE_BUNDLE_GROUPS); boolean hasGlobalCreateBundles = globalPerms.contains(Permission.CREATE_BUNDLES); boolean hasGlobalViewBundles = globalPerms.contains(Permission.VIEW_BUNDLES);
- if (hasGlobalCreateBundles && hasGlobalViewBundles) { + if ((hasGlobalManageBundleGroups || hasGlobalCreateBundles) && hasGlobalViewBundles) { return; }
- boolean hasBundleGroupCreate = hasGlobalCreateBundles + boolean canAssign = hasGlobalManageBundleGroups + || hasGlobalCreateBundles || authorizationManager - .hasBundleGroupPermission(subject, Permission.CREATE_BUNDLES_IN_GROUP, bundleGroupId); - boolean hasBundleGroupAssign = hasBundleGroupCreate + .hasBundleGroupPermission(subject, Permission.CREATE_BUNDLES_IN_GROUP, bundleGroupId) || authorizationManager .hasBundleGroupPermission(subject, Permission.ASSIGN_BUNDLES_TO_GROUP, bundleGroupId);
- if (!hasBundleGroupAssign) { + if (!canAssign) { String msg = "Subject [" + subject.getName() - + "] requires one of Global.CREATE_BUNDLES, BundleGroup.CREATE_BUNDLES_IN_GROUP, or BundleGroup.ASSIGN_BUNDLES_TO_GROUP to assign a bundle to undle group [" + + "] requires one of Global.MANAGE_BUNDLE_GROUPS, Global.CREATE_BUNDLES, BundleGroup.CREATE_BUNDLES_IN_GROUP, or BundleGroup.ASSIGN_BUNDLES_TO_GROUP to assign a bundle to bundle group [" + bundleGroupId + "]."; throw new PermissionException(msg); } @@ -2194,9 +2200,13 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot }
/** - * Requires VIEW permission for the relevant bundles and either: + * <pre> + * Requires VIEW permission for the relevant bundles and one of: + * - Global.MANAGE_BUNDLE_GROUPS * - Global.DELETE_BUNDLE - * - BundleGroup.DELETE_BUNDLES_FROM_GROUP or BundleGroup.UNASSIGN_BUNDLES_FROM_GROUP for the relevant bundle group + * - BundleGroup.UNASSIGN_BUNDLES_FROM_GROUP for the relevant bundle group + * - BundleGroup.DELETE_BUNDLES_FROM_GROUP for the relevant bundle group + * </pre> * * @param subject * @param bundleGroupId an existing bundle group @@ -2207,24 +2217,25 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot throws PermissionException {
Set<Permission> globalPerms = authorizationManager.getExplicitGlobalPermissions(subject); + boolean hasGlobalManageBundleGroups = globalPerms.contains(Permission.MANAGE_BUNDLE_GROUPS); boolean hasGlobalDeleteBundles = globalPerms.contains(Permission.DELETE_BUNDLES); boolean hasGlobalViewBundles = globalPerms.contains(Permission.VIEW_BUNDLES);
- if (hasGlobalDeleteBundles && hasGlobalViewBundles) { + if ((hasGlobalManageBundleGroups || hasGlobalDeleteBundles) && hasGlobalViewBundles) { return; }
- boolean hasBundleGroupDelete = hasGlobalDeleteBundles + boolean canUnassign = hasGlobalManageBundleGroups + || hasGlobalDeleteBundles || authorizationManager.hasBundleGroupPermission(subject, Permission.DELETE_BUNDLES_FROM_GROUP, - bundleGroupId); - boolean hasBundleGroupUnassign = hasBundleGroupDelete + bundleGroupId) || authorizationManager.hasBundleGroupPermission(subject, Permission.UNASSIGN_BUNDLES_FROM_GROUP, bundleGroupId);
- if (!hasBundleGroupUnassign) { + if (!canUnassign) { String msg = "Subject [" + subject.getName() - + "] requires one of Global.DELETE_BUNDLES, BundleGroup.DELETE_BUNDLES_FROM_GROUP, or BundleGroup.UNASSIGN_BUNDLES_FROM_GROUP to unassign a bundle to undle group [" + + "] requires one of Global.MANAGE_BUNDLE_GROUPS, Global.DELETE_BUNDLES, BundleGroup.DELETE_BUNDLES_FROM_GROUP, or BundleGroup.UNASSIGN_BUNDLES_FROM_GROUP to unassign a bundle from bundle group [" + bundleGroupId + "]."; throw new PermissionException(msg); } @@ -2237,7 +2248,7 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot if (!authorizationManager.canViewBundle(subject, bundleId)) { String msg = "Subject [" + subject.getName() + "] requires either Global.VIEW_BUNDLES or BundleGroup.VIEW_BUNDLES_IN_GROUP to unassign bundle [" - + bundleId + "] to bundle group [" + bundleGroupId + "]"; + + bundleId + "] from bundle group [" + bundleGroupId + "]"; throw new PermissionException(msg); } } @@ -2246,9 +2257,11 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot }
/** + * <pre> * Required Permissions: Either: * - Global.DEPLOY_BUNDLES and a view of the relevant bundle and a view of the relevant resource group (may involve multiple roles) * - Resource.DEPLOY_BUNDLES_TO_GROUP and a view of the relevant bundle and a view of the relevant resource group (may involve multiple roles) + * </pre> */ private void checkDeployBundleAuthz(Subject subject, int bundleId, int resourceGroupId) throws PermissionException {
@@ -2283,11 +2296,12 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot }
/** + * <pre> * Required Permissions: Either: * - Global.DELETE_BUNDLES and Global.VIEW_BUNDLES * - Global.DELETE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG * - BundleGroup.DELETE_BUNDLES_FROM_GROUP for bundle group BG and the relevant bundle is assigned to BG - * + * </pre> * @param subject * @param bundleId required, bundleId of bundle, or the bundle version, being deleted * @throws PermissionException diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerRemote.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerRemote.java index c1b9eef..ee023e8 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerRemote.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerRemote.java @@ -78,7 +78,6 @@ public interface BundleManagerRemote { * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG * </pre> - * * @param subject user that must have proper permissions * @param bundleVersionId id of the BundleVersion incorporating this BundleFile * @param name name of the BundleFile (and the resulting Package) @@ -101,7 +100,6 @@ public interface BundleManagerRemote { * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG * </pre> - * * @see {@link #addBundleFile(Subject, int, String, String, Architecture, InputStream)} */ BundleFile addBundleFileViaByteArray(Subject subject, int bundleVersionId, String name, String version, @@ -115,7 +113,6 @@ public interface BundleManagerRemote { * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG * </pre> - * * @see #addBundleFile(Subject, int, String, String, Architecture, InputStream) */ BundleFile addBundleFileViaURL(Subject subject, int bundleVersionId, String name, String version, @@ -130,7 +127,6 @@ public interface BundleManagerRemote { * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG * </pre> - * * @see #addBundleFileViaURL(Subject, int, String, String, Architecture, String) */ BundleFile addBundleFileViaURL(Subject subject, int bundleVersionId, String name, String version, @@ -144,7 +140,6 @@ public interface BundleManagerRemote { * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG * </pre> - * * @see {@link #addBundleFile(Subject, int, String, String, Architecture, InputStream)} */ BundleFile addBundleFileViaPackageVersion(Subject subject, int bundleVersionId, String name, int packageVersionId) @@ -153,11 +148,12 @@ public interface BundleManagerRemote { /** * Assign the specified bundles to the specified bundle group. * <pre> - * Requires VIEW permission for the relevant bundle and either: + * Requires VIEW permission for the relevant bundle and one of: + * - Global.MANAGE_BUNDLE_GROUPS * - Global.CREATE_BUNDLE - * - BundleGroup.CREATE_BUNDLES_IN_GROUP or BundleGroup.ASSIGN_BUNDLES_TO_GROUP for the relevant bundle group + * - BundleGroup.ASSIGN_BUNDLES_TO_GROUP for the relevant bundle group + * - BundleGroup.CREATE_BUNDLES_IN_GROUP for the relevant bundle group * </pre> - * * @param subject * @param bundleGroupId * @param bundleIds @@ -174,7 +170,6 @@ public interface BundleManagerRemote { * - Global.DEPLOY_BUNDLES and a view of the relevant bundle and a view of the relevant resource group (may involve multiple roles) * - Resource.DEPLOY_BUNDLES_TO_GROUP and a view of the relevant bundle and a view of the relevant resource group (may involve multiple roles) * </pre> - * * @param subject user that must have proper permissions * @param bundleVersionId the BundleVersion being deployed by this deployment * @param bundleDestinationId the BundleDestination for the deployment @@ -194,7 +189,6 @@ public interface BundleManagerRemote { * - Global.DEPLOY_BUNDLES and a view of the relevant bundle and a view of the relevant resource group (may involve multiple roles) * - Resource.DEPLOY_BUNDLES_TO_GROUP and a view of the relevant bundle and a view of the relevant resource group (may involve multiple roles) * </pre> - * * @param subject user must have MANAGE_INVENTORY permission * @param bundleId the Bundle to be deployed to this Destination * @param name a name for this destination. not null or empty @@ -213,10 +207,10 @@ public interface BundleManagerRemote {
/** * Create a new bundle group. - * <p/> + * <pre> * Require Permissions: * - Global.MANAGE_BUNDLE_GROUPS - * + * </pre> * @param subject user that must have proper permissions * @param name the unique bundle group name * @param description an optional description @@ -239,7 +233,6 @@ public interface BundleManagerRemote { * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG * </pre> - * * @param subject user that must have proper permissions * @param recipe the recipe that defines the bundle version to be created * @return the persisted BundleVersion with alot of the internal relationships filled in to help the caller @@ -259,7 +252,6 @@ public interface BundleManagerRemote { * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG * </pre> - * * @param subject user that must have proper permissions * @param bundleGroupId identifies the bundle group that the new bundle will be associated with; 0 if no group * @param recipe the recipe that defines the bundle version to be created @@ -284,7 +276,6 @@ public interface BundleManagerRemote { * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG * </pre> - * * @param subject user that must have proper permissions * @param distributionFile a local Bundle Distribution file. It must be read accessible by the RHQ server process. * @return the persisted BundleVersion with alot of the internal relationships filled in to help the caller @@ -304,7 +295,6 @@ public interface BundleManagerRemote { * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG * </pre> - * * @param subject user that must have proper permissions * @param bundleGroupId identifies the bundle group that the new bundle will be associated with; 0 if no group * @param distributionFile a local Bundle Distribution file. It must be read accessible by the RHQ server process. @@ -330,7 +320,6 @@ public interface BundleManagerRemote { * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG * </pre> - * * @param subject user that must have proper permissions * @param fileBytes the file bits that make up the entire bundle distribution file * @return the persisted BundleVersion with a lot of the internal relationships filled in to help the caller @@ -349,8 +338,7 @@ public interface BundleManagerRemote { * - Global.CREATE_BUNDLES and Global.VIEW_BUNDLES * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG - * </pre> - * + * </pre> * @param subject user that must have proper permissions * @param bundleGroupId identifies the bundle group that the new bundle will be associated with; 0 if no group * @param fileBytes the file bits that make up the entire bundle distribution file @@ -378,7 +366,6 @@ public interface BundleManagerRemote { * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG * </pre> - * * @param subject user that must have proper permissions * @param distributionFileUrl a URL String to the Bundle Distribution file. It must be live, resolvable and read accessible * by the RHQ server process. @@ -400,7 +387,6 @@ public interface BundleManagerRemote { * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG * </pre> - * * @param subject user that must have proper permissions * @param bundleGroupId identifies the bundle group that the new bundle will be associated with; 0 if no group * @param distributionFileUrl a URL String to the Bundle Distribution file. It must be live, resolvable and read accessible @@ -424,7 +410,6 @@ public interface BundleManagerRemote { * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG * </pre> - * * @see #createBundleVersionViaURL(org.rhq.core.domain.auth.Subject, String) */ BundleVersion createBundleVersionViaURL(Subject subject, String distributionFileUrl, String username, @@ -442,7 +427,6 @@ public interface BundleManagerRemote { * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG * </pre> - * * @see #createBundleVersionViaURL(org.rhq.core.domain.auth.Subject, String) */ BundleVersion createInitialBundleVersionViaURL(Subject subject, int bundleGroupId, String distributionFileUrl, @@ -459,7 +443,6 @@ public interface BundleManagerRemote { * - Global.DELETE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG * - BundleGroup.DELETE_BUNDLES_FROM_GROUP for bundle group BG and the relevant bundle is assigned to BG * </pre> - * * @param subject user that must have proper permissions * @param bundleIds IDs of all bundles to be deleted * @throws Exception if any part of the removal fails. @@ -477,7 +460,6 @@ public interface BundleManagerRemote { * - Global.DELETE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG * - BundleGroup.DELETE_BUNDLES_FROM_GROUP for bundle group BG and the relevant bundle is assigned to BG * </pre> - * * @param subject user that must have proper permissions * @param bundleId the id of the bundle to remove * @throws Exception if any part of the removal fails. @@ -486,10 +468,10 @@ public interface BundleManagerRemote {
/** * Delete a bundle group. Any currently assigned bundles will be removed but are not deleted. - * <p/> + * <pre> * Required Permissions: * - Global.MANAGE_BUNDLE_GROUPS - * + * </pre> * @param subject user that must have proper permissions * @param ids the bundle group id * @throws Exception @@ -507,7 +489,6 @@ public interface BundleManagerRemote { * - Global.DELETE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG * - BundleGroup.DELETE_BUNDLES_FROM_GROUP for bundle group BG and the relevant bundle is assigned to BG * </pre> - * * @param subject user that must have proper permissions * @param bundleVersionId the id of the bundle version to remove * @param deleteBundleIfEmpty if <code>true</code> and if this method deletes the last bundle version for its @@ -573,8 +554,7 @@ public interface BundleManagerRemote { * - Global.CREATE_BUNDLES and Global.VIEW_BUNDLES * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG - * </pre> - * + * </pre> * @param subject user that must have proper permissions * @param bundleVersionId the BundleVersion being queried * @param withoutBundleFileOnly if true omit any filenames that already have a corresponding BundleFile for @@ -587,11 +567,11 @@ public interface BundleManagerRemote {
/** * Purges the destination's live deployment content from the remote platforms. - * </pre> + * <pre> * Required Permissions: Either: * - Global.DEPLOY_BUNDLES and a view of the relevant bundle and a view of the relevant resource group (may involve multiple roles) * - Resource.DEPLOY_BUNDLES_TO_GROUP and a view of the relevant bundle and a view of the relevant resource group (may involve multiple roles) - * + * </pre> * @param subject user that must have proper permissions * @param bundleDestinationId the ID of the destination that is to be purged of bundle content */ @@ -603,11 +583,11 @@ public interface BundleManagerRemote { * complete. The returned BundleDeployment can be used to track the history of the individual deployments. * <br/><br/> * TODO: Add the scheduling capability, currently it's Immediate. - * </pre> + * <pre> * Required Permissions: Either: * - Global.DEPLOY_BUNDLES and a view of the relevant bundle and a view of the relevant resource group (may involve multiple roles) * - Resource.DEPLOY_BUNDLES_TO_GROUP and a view of the relevant bundle and a view of the relevant resource group (may involve multiple roles) - * + * </pre> * @param subject user that must have proper permissions * @param bundleDeploymentId the BundleDeployment being used to guide the deployments * @param isCleanDeployment if true perform a wipe of the deploy directory prior to the deployment. If false @@ -645,11 +625,13 @@ public interface BundleManagerRemote {
/** * Unassign the specified bundles from the specified bundle group. - * </pre> - * Requires VIEW permission for the relevant bundles and either: + * <pre> + * Requires VIEW permission for the relevant bundles and one of: + * - Global.MANAGE_BUNDLE_GROUPS * - Global.DELETE_BUNDLE - * - BundleGroup.DELETE_BUNDLES_FROM_GROUP or BundleGroup.UNASSIGN_BUNDLES_FROM_GROUP for the relevant bundle group - * + * - BundleGroup.UNASSIGN_BUNDLES_FROM_GROUP for the relevant bundle group + * - BundleGroup.DELETE_BUNDLES_FROM_GROUP for the relevant bundle group + * </pre> * @param subject * @param bundleGroupId * @param bundleIds
commit 9bed24f209aca673c69f62ee2a4fa33771df6943 Author: Jirka Kremser jkremser@redhat.com Date: Mon Aug 5 16:00:35 2013 +0200
squashed commit: Added method in the GWT impl class returning the data for sparkline graph for storage nodes. Another iteration of Storage Node UI: sparkline small graphs support. Fetching also definition ids. It is needed in order to get the data for sparkline graphs.
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java index f4d3934..a1010b5 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java @@ -63,7 +63,7 @@ import org.rhq.core.domain.resource.Resource; @NamedQuery(name = StorageNode.QUERY_DELETE_BY_ID, query = "" // + "DELETE FROM StorageNode s WHERE s.id = :storageNodeId "), @NamedQuery(name = StorageNode.QUERY_FIND_SCHEDULE_IDS_BY_PARENT_RESOURCE_ID_AND_MEASUREMENT_DEFINITION_NAMES, query = "" // - + " SELECT def.name, ms.id FROM MeasurementSchedule ms " // + + " SELECT def.name, def.id, ms.id, res.id FROM MeasurementSchedule ms " // + " JOIN ms.definition def " // + " JOIN ms.resource res " // + " WHERE ms.definition = def " // @@ -72,7 +72,7 @@ import org.rhq.core.domain.resource.Resource; + " AND def.name IN (:metricNames)"), //
@NamedQuery(name = StorageNode.QUERY_FIND_SCHEDULE_IDS_BY_GRANDPARENT_RESOURCE_ID_AND_MEASUREMENT_DEFINITION_NAMES, query = "" // - + " SELECT def.name, ms.id FROM MeasurementSchedule ms " // + + " SELECT def.name, def.id, ms.id, res.id FROM MeasurementSchedule ms " // + " JOIN ms.definition def " // + " JOIN ms.resource res " // + " WHERE ms.definition = def " // diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java index 7cc2ef1..5538db5 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java @@ -44,6 +44,7 @@ import org.rhq.enterprise.gui.coregui.client.components.view.ViewName; import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; import org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.configuration.GroupResourceConfigurationEditView; import org.rhq.enterprise.gui.coregui.client.inventory.resource.type.ResourceTypeRepository; +import org.rhq.enterprise.gui.coregui.client.util.Log; import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedVLayout; import org.rhq.enterprise.gui.coregui.client.util.message.Message;
@@ -190,8 +191,9 @@ public class StorageNodeAdminView extends EnhancedVLayout implements/* HasViewNa GWTServiceLookup.getStorageService().findNotAcknowledgedStorageNodeAlertsCount(new AsyncCallback<Integer>() { @Override public void onSuccess(Integer result) { + Log.info("Running the job fetching the number of ALL unack alerts..."); alerts.setTitle(StorageNodeAdminView.getAlertsString(alerts.getTitle(), result)); - schedule(5 * 1000); + schedule(15 * 1000); }
@Override diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java index a7058d7..7a777b1 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java @@ -358,7 +358,7 @@ public class StorageNodeDatasource extends RPCDataSource<StorageNodeLoadComposit recordsList.add(makeListGridRecord(loadComposite.getHeapPercentageUsed(), "Heap Percent Used", "This value is calculated by dividing Heap Used by Heap Maximum.", HEAP_PERCENTAGE_KEY));
// disk related metrics - recordsList.add(makeListGridRecord(loadComposite.getDataDiskUsed(), "Total Disk Space Used", "Total space used on disk by all data files, commit logs, and saved caches.", "totaldisk")); + recordsList.add(makeListGridRecord(loadComposite.getDataDiskUsed(), "Disk Space Used by Storage Node", "Total space used on disk by all data files, commit logs, and saved caches.", "totaldisk")); recordsList.add(makeListGridRecord(loadComposite.getTotalDiskUsedPercentage(),"Total Disk Space Percent Used", "Percentage of total disk space used (system and Storage Node) on the partitions that contain the data files. If multiple data locations are specified then the aggregate accross all the partitions that contain data files is reported.", TOTAL_DISK_SPACE_PERCENTAGE_KEY)); recordsList.add(makeListGridRecord(loadComposite.getDataDiskUsedPercentage(), "Data Disk Space Percent Used","Percentage of disk space used by data files on the partitions that contain the data files. If multiple data locations are specified then the aggregate accross all the partitions that contain data files is reported.", DATA_DISK_SPACE_PERCENTAGE_KEY));
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDetailView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDetailView.java index db0aeee..7d0b3a9 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDetailView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDetailView.java @@ -29,16 +29,27 @@ import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDat
import java.util.ArrayList; import java.util.Arrays; +import java.util.EnumSet; +import java.util.HashMap; import java.util.List; +import java.util.Map; +import java.util.Set;
import com.google.gwt.user.client.Timer; import com.google.gwt.user.client.rpc.AsyncCallback; +import com.smartgwt.client.types.Alignment; +import com.smartgwt.client.types.ContentsType; import com.smartgwt.client.types.Overflow; +import com.smartgwt.client.types.VerticalAlignment; import com.smartgwt.client.types.VisibilityMode; import com.smartgwt.client.widgets.HTMLFlow; import com.smartgwt.client.widgets.form.DynamicForm; +import com.smartgwt.client.widgets.form.fields.CanvasItem; import com.smartgwt.client.widgets.form.fields.FormItem; +import com.smartgwt.client.widgets.form.fields.LinkItem; import com.smartgwt.client.widgets.form.fields.StaticTextItem; +import com.smartgwt.client.widgets.form.fields.events.ClickEvent; +import com.smartgwt.client.widgets.form.fields.events.ClickHandler; import com.smartgwt.client.widgets.layout.LayoutSpacer; import com.smartgwt.client.widgets.layout.SectionStack; import com.smartgwt.client.widgets.layout.SectionStackSection; @@ -46,7 +57,10 @@ import com.smartgwt.client.widgets.layout.SectionStackSection; import org.rhq.core.domain.cloud.StorageNode; import org.rhq.core.domain.criteria.ResourceCriteria; import org.rhq.core.domain.criteria.StorageNodeCriteria; +import org.rhq.core.domain.measurement.MeasurementDefinition; +import org.rhq.core.domain.measurement.composite.MeasurementDataNumericHighLowComposite; import org.rhq.core.domain.resource.Resource; +import org.rhq.core.domain.resource.ResourceType; import org.rhq.core.domain.resource.composite.ResourceComposite; import org.rhq.core.domain.util.PageList; import org.rhq.enterprise.gui.coregui.client.BookmarkableView; @@ -56,9 +70,15 @@ import org.rhq.enterprise.gui.coregui.client.ViewPath; import org.rhq.enterprise.gui.coregui.client.components.table.TimestampCellFormatter; import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; import org.rhq.enterprise.gui.coregui.client.inventory.InventoryView; +import org.rhq.enterprise.gui.coregui.client.inventory.common.detail.summary.AbstractActivityView; +import org.rhq.enterprise.gui.coregui.client.inventory.common.detail.summary.AbstractActivityView.ChartViewWindow; import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.configuration.ResourceConfigurationEditView; +import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.D3GraphListView; import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.operation.history.ResourceOperationHistoryListView; +import org.rhq.enterprise.gui.coregui.client.inventory.resource.type.ResourceTypeRepository; +import org.rhq.enterprise.gui.coregui.client.util.BrowserUtility; import org.rhq.enterprise.gui.coregui.client.util.Log; +import org.rhq.enterprise.gui.coregui.client.util.MeasurementUtility; import org.rhq.enterprise.gui.coregui.client.util.StringUtility; import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedHLayout; import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedVLayout; @@ -84,6 +104,8 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab private StaticTextItem alertsItem; private int expandedSection = -1; private HTMLFlow header; + private ChartViewWindow window; + private D3GraphListView graphView;
private volatile int initSectionCount = 0; private int unackAlerts = -1; @@ -134,7 +156,8 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab initSectionCount++; } prepareDetailsSection(sectionStack, node); - prepareLoadSection(sectionStack, node); + fetchSparkLineDataForLoadComponent(sectionStack, node); + }
public void onFailure(Throwable caught) { @@ -168,12 +191,28 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab final ResourceComposite resourceComposite = result.get(0); // prepareOperationHistory(resourceComposite); prepareResourceConfigEditor(resourceComposite); - } } }); }
+ private void fetchSparkLineDataForLoadComponent(final SectionStack stack, final StorageNode storageNode) { + + GWTServiceLookup.getStorageService().findStorageNodeLoadDataForLast(storageNode, 8, MeasurementUtility.UNIT_HOURS, + 60, new AsyncCallback<Map<String, List<MeasurementDataNumericHighLowComposite>>>() { + @Override + public void onFailure(Throwable caught) { + + } + + @Override + public void onSuccess(Map<String, List<MeasurementDataNumericHighLowComposite>> result) { + prepareLoadSection(sectionStack, storageNode, result); + } + + }); + } + private void fetchUnackAlerts(final int storageNodeId) { GWTServiceLookup.getStorageService().findNotAcknowledgedStorageNodeAlertsCounts(Arrays.asList(storageNodeId), new AsyncCallback<List<Integer>>() { @@ -318,8 +357,9 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab initSectionCount++; }
- private void prepareLoadSection(SectionStack stack, final StorageNode storageNode) { - StorageNodeLoadComponent loadDataComponent = new StorageNodeLoadComponent(storageNode.getId()); + private void prepareLoadSection(SectionStack stack, final StorageNode storageNode, + final Map<String, List<MeasurementDataNumericHighLowComposite>> sparkLineData) { + StorageNodeLoadComponent loadDataComponent = new StorageNodeLoadComponent(storageNode.getId(), sparkLineData); loadDataComponent.setExtraSpace(5); loadLayout = new EnhancedVLayout(); loadLayout.setWidth100(); diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeLoadComponent.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeLoadComponent.java index db54108..7cfb278 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeLoadComponent.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeLoadComponent.java @@ -21,22 +21,46 @@ package org.rhq.enterprise.gui.coregui.client.admin.storage; import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasource.DONT_MISS_ME_COLOR; import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasource.OK_COLOR; import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasource.WARN_COLOR; +import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_ALERTS;
+import java.util.ArrayList; +import java.util.Iterator; import java.util.List; +import java.util.Map; +import java.util.Map.Entry;
+import com.google.gwt.user.client.Timer; +import com.google.gwt.user.client.rpc.AsyncCallback; +import com.smartgwt.client.types.Alignment; import com.smartgwt.client.types.Autofit; +import com.smartgwt.client.types.ContentsType; +import com.smartgwt.client.types.Overflow; +import com.smartgwt.client.types.VerticalAlignment; +import com.smartgwt.client.widgets.HTMLFlow; import com.smartgwt.client.widgets.IButton; import com.smartgwt.client.widgets.events.ClickEvent; import com.smartgwt.client.widgets.events.ClickHandler; +import com.smartgwt.client.widgets.form.DynamicForm; +import com.smartgwt.client.widgets.form.fields.CanvasItem; +import com.smartgwt.client.widgets.form.fields.LinkItem; +import com.smartgwt.client.widgets.form.fields.StaticTextItem; import com.smartgwt.client.widgets.grid.ListGrid; import com.smartgwt.client.widgets.grid.ListGridField; import com.smartgwt.client.widgets.grid.ListGridRecord; +import com.smartgwt.client.widgets.grid.events.DataArrivedEvent; +import com.smartgwt.client.widgets.grid.events.DataArrivedHandler; import com.smartgwt.client.widgets.toolbar.ToolStrip;
+import org.rhq.core.domain.measurement.MeasurementDefinition; +import org.rhq.core.domain.measurement.composite.MeasurementDataNumericHighLowComposite; import org.rhq.enterprise.gui.coregui.client.CoreGUI; +import org.rhq.enterprise.gui.coregui.client.LinkManager; import org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasource.StorageNodeLoadCompositeDatasource; import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; -import org.rhq.enterprise.gui.coregui.client.util.MeasurementUtility; +import org.rhq.enterprise.gui.coregui.client.inventory.common.detail.summary.AbstractActivityView; +import org.rhq.enterprise.gui.coregui.client.inventory.common.detail.summary.AbstractActivityView.ChartViewWindow; +import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.D3GraphListView; +import org.rhq.enterprise.gui.coregui.client.util.BrowserUtility; import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedVLayout;
/** @@ -46,15 +70,15 @@ import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedVLayout; */ public class StorageNodeLoadComponent extends EnhancedVLayout { private final ListGrid loadGrid; + private Map<String, List<MeasurementDataNumericHighLowComposite>> sparkLineData;
- public StorageNodeLoadComponent(int storageNodeId) { - this(storageNodeId, null, null); - } - - public StorageNodeLoadComponent(final int storageNodeId, final ListGrid parentGrid, final ListGridRecord record) { + public StorageNodeLoadComponent(final int storageNodeId, + Map<String, List<MeasurementDataNumericHighLowComposite>> sparkLineData) { super(5); setPadding(5); setBackgroundColor("#ffffff"); + this.sparkLineData = sparkLineData; + final boolean showSparkLine = sparkLineData != null && !sparkLineData.isEmpty(); loadGrid = new ListGrid() { @Override protected String getCellCSSText(ListGridRecord record, int rowNum, int colNum) { @@ -90,11 +114,13 @@ public class StorageNodeLoadComponent extends EnhancedVLayout { loadGrid.setAutoFitData(Autofit.VERTICAL); StorageNodeLoadCompositeDatasource datasource = StorageNodeLoadCompositeDatasource.getInstance(storageNodeId); List<ListGridField> fields = datasource.getListGridFields(); + if (showSparkLine) { + fields.add(new ListGridField("sparkline", 90)); + } loadGrid.setFields(fields.toArray(new ListGridField[fields.size()])); loadGrid.setAutoFetchData(true); loadGrid.setHoverWidth(300);
- ToolStrip toolStrip = new ToolStrip(); IButton settingsButton = new IButton("Settings"); settingsButton.addClickHandler(new ClickHandler() { @@ -113,10 +139,194 @@ public class StorageNodeLoadComponent extends EnhancedVLayout { }); refreshButton.setExtraSpace(5); toolStrip.addMember(refreshButton); - loadGrid.setDataSource(datasource); + if (showSparkLine) { + loadGrid.addDataArrivedHandler(new DataArrivedHandler() { + @Override + public void onDataArrived(DataArrivedEvent event) { + showSparkLineGraphs(); + } + }); + } addMember(loadGrid); - + }
+ private void showSparkLineGraphs() { + ListGridRecord[] records = loadGrid.getRecords(); + int i = 0; + for (Entry<String, List<MeasurementDataNumericHighLowComposite>> entry : sparkLineData.entrySet()) { + boolean someChartedData = false; + List<MeasurementDataNumericHighLowComposite> data = entry.getValue(); + //locate last and minimum values. + double lastValue = -1; + double minValue = Double.MAX_VALUE;// + for (MeasurementDataNumericHighLowComposite d : data) { + if ((!Double.isNaN(d.getValue())) + && (!String.valueOf(d.getValue()).contains("NaN"))) { + if (d.getValue() < minValue) { + minValue = d.getValue(); + } + lastValue = d.getValue(); + } + } + + //collapse the data into comma delimited list for consumption by third party javascript library(jquery.sparkline) + String commaDelimitedList = ""; + for (MeasurementDataNumericHighLowComposite d : data) { + if ((!Double.isNaN(d.getValue())) + && (!String.valueOf(d.getValue()).contains("NaN"))) { + commaDelimitedList += d.getValue() + ","; + } + } + + //if graph content returned + someChartedData = lastValue != -1; + + if (someChartedData && records.length > i) { + String contents = "<span id='sparkline_" + entry.getKey() + "' class='dynamicsparkline' width='0' " + + "values='" + commaDelimitedList + "'>...</span>"; + records[i].setAttribute("sparkline", contents); + } + i++; + } + loadGrid.setData(records); + + + + + +// +// +// +// +// +// if (!results.isEmpty()) { +// +// //iterate over the retrieved charting data +// for (int index = 0; index < displayOrder.length; index++) { +// //retrieve the correct measurement definition +// final MeasurementDefinition md = measurementDefMap +// .get(displayOrder[index]); +// +// //load the data results for the given metric definition +// List<MeasurementDataNumericHighLowComposite> data = results +// .get(index); +// +// //locate last and minimum values. +// double lastValue = -1; +// double minValue = Double.MAX_VALUE;// +// for (MeasurementDataNumericHighLowComposite d : data) { +// if ((!Double.isNaN(d.getValue())) +// && (!String.valueOf(d.getValue()).contains("NaN"))) { +// if (d.getValue() < minValue) { +// minValue = d.getValue(); +// } +// lastValue = d.getValue(); +// } +// } +// +// //collapse the data into comma delimited list for consumption by third party javascript library(jquery.sparkline) +// String commaDelimitedList = ""; +// +// for (MeasurementDataNumericHighLowComposite d : data) { +// if ((!Double.isNaN(d.getValue())) +// && (!String.valueOf(d.getValue()).contains("NaN"))) { +// commaDelimitedList += d.getValue() + ","; +// } +// } +// DynamicForm row = new DynamicForm(); +// row.setNumCols(3); +// row.setColWidths(65, "*", 100); +// row.setWidth100(); +// row.setAutoHeight(); +// row.setOverflow(Overflow.VISIBLE); +// HTMLFlow sparklineGraph = new HTMLFlow(); +// String contents = "<span id='sparkline_" + index +// + "' class='dynamicsparkline' width='0' " + "values='" +// + commaDelimitedList + "'>...</span>"; +// sparklineGraph.setContents(contents); +// sparklineGraph.setContentsType(ContentsType.PAGE); +// //disable scrollbars on span +// sparklineGraph.setScrollbarSize(0); +// +// CanvasItem sparklineContainer = new CanvasItem(); +// sparklineContainer.setShowTitle(false); +// sparklineContainer.setHeight(16); +// sparklineContainer.setWidth(60); +// sparklineContainer.setCanvas(sparklineGraph); +// +// //Link/title element +// final String title = md.getDisplayName(); +// LinkItem link = AbstractActivityView.newLinkItem(title, null); +// link.setTooltip(title); +// link.setTitleVAlign(VerticalAlignment.TOP); +// link.setAlign(Alignment.LEFT); +// link.setClipValue(true); +// link.setWrap(true); +// link.setHeight(26); +// link.setWidth("100%"); +// if (!BrowserUtility.isBrowserPreIE9()){ +// link.addClickHandler(new ClickHandler() { +// @Override +// public void onClick(ClickEvent event) { +// window = new ChartViewWindow(title); +// +// graphView = D3GraphListView +// .createSingleGraph(resourceComposite.getResource(), +// md.getId(), true); +// +// window.addItem(graphView); +// window.show(); +// } +// }); +// } else{ +// link.disable(); +// } +// +// +// //Value +// String convertedValue; +// convertedValue = AbstractActivityView.convertLastValueForDisplay( +// lastValue, md); +// StaticTextItem value = AbstractActivityView +// .newTextItem(convertedValue); +// value.setVAlign(VerticalAlignment.TOP); +// value.setAlign(Alignment.RIGHT); +// +// row.setItems(sparklineContainer, link, value); +// row.setWidth100(); +// +// //if graph content returned +// if ((!md.getName().trim().contains("Trait.")) && (lastValue != -1)) { +// column.addMember(row); +// someChartedData = true; +// } +// } +// if (!someChartedData) {// when there are results but no chartable entries. +// DynamicForm row = AbstractActivityView.createEmptyDisplayRow( +// +// AbstractActivityView.RECENT_MEASUREMENTS_NONE); +// column.addMember(row); +// } else { +// //insert see more link +// DynamicForm row = new DynamicForm(); +// String link = LinkManager +// .getResourceMonitoringGraphsLink(resourceId); +// AbstractActivityView.addSeeMoreLink(row, link, column); +// } +// //call out to 3rd party javascript lib +// new Timer(){ +// @Override +// public void run() { +// BrowserUtility.graphSparkLines(); +// } +// }.schedule(200); +// } else { +// DynamicForm row = AbstractActivityView +// .createEmptyDisplayRow(AbstractActivityView.RECENT_MEASUREMENTS_NONE); +// column.addMember(row); +// } +// setRefreshing(false); + } } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeTableView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeTableView.java index bb90929..328545a 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeTableView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeTableView.java @@ -27,7 +27,7 @@ import java.util.Arrays; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; - +import org.rhq.enterprise.gui.coregui.client.util.Log; import com.google.gwt.user.client.Timer; import com.google.gwt.user.client.rpc.AsyncCallback; import com.smartgwt.client.data.Criteria; @@ -140,6 +140,7 @@ public class StorageNodeTableView extends TableSection<StorageNodeDatasource> { private void scheduleUnacknowledgedAlertsPollingJob(final ListGrid listGrid) { new Timer() { public void run() { + Log.info("Running the job fetching the number of unack alerts for particular storage nodes..."); final ListGridRecord[] records = listGrid.getRecords(); List<Integer> storageNodeIds = new ArrayList<Integer>(records.length); for (ListGridRecord record : records) { @@ -156,16 +157,19 @@ public class StorageNodeTableView extends TableSection<StorageNodeDatasource> { StorageNodeAdminView.getAlertsString("New Alerts", value)); listGrid.setData(records); } - schedule(10 * 1000); + schedule(15 * 1000); }
@Override public void onFailure(Throwable caught) { schedule(60 * 1000); + // todo: + SC.say("fooo"); } }); } - }.schedule(5 * 1000); + }.schedule(15 * 1000); + Log.info("Polling job fetching the number of unack alerts for particular storage nodes has been scheduled"); }
@Override @@ -174,7 +178,7 @@ public class StorageNodeTableView extends TableSection<StorageNodeDatasource> { @Override protected Canvas getExpansionComponent(final ListGridRecord record) { int id = record.getAttributeAsInt(FIELD_ID); - return new StorageNodeLoadComponent(id, this, record); + return new StorageNodeLoadComponent(id, null); } }; listGrid.setCanExpandRecords(true); diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/StorageGWTService.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/StorageGWTService.java index 69c875e..1e3376c 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/StorageGWTService.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/StorageGWTService.java @@ -23,12 +23,14 @@ package org.rhq.enterprise.gui.coregui.client.gwt;
import java.util.List; +import java.util.Map;
import com.google.gwt.user.client.rpc.RemoteService;
import org.rhq.core.domain.cloud.StorageNode; import org.rhq.core.domain.cloud.StorageNodeLoadComposite; import org.rhq.core.domain.criteria.StorageNodeCriteria; +import org.rhq.core.domain.measurement.composite.MeasurementDataNumericHighLowComposite; import org.rhq.core.domain.util.PageList;
/** @@ -76,4 +78,6 @@ public interface StorageGWTService extends RemoteService { int findNotAcknowledgedStorageNodeAlertsCount() throws RuntimeException;
List<Integer> findNotAcknowledgedStorageNodeAlertsCounts(List<Integer> storageNodeIds) throws RuntimeException; + + Map<String, List<MeasurementDataNumericHighLowComposite>> findStorageNodeLoadDataForLast(StorageNode node, int lastN, int unit, int numPoints) throws RuntimeException; } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/StorageGWTServiceImpl.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/StorageGWTServiceImpl.java index 624953c..7f3093b 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/StorageGWTServiceImpl.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/StorageGWTServiceImpl.java @@ -24,11 +24,13 @@ package org.rhq.enterprise.gui.coregui.server.gwt;
import java.util.ArrayList; import java.util.List; +import java.util.Map;
import org.rhq.core.domain.cloud.StorageNode; import org.rhq.core.domain.cloud.StorageNodeLoadComposite; import org.rhq.core.domain.criteria.ResourceCriteria; import org.rhq.core.domain.criteria.StorageNodeCriteria; +import org.rhq.core.domain.measurement.composite.MeasurementDataNumericHighLowComposite; import org.rhq.core.domain.resource.Resource; import org.rhq.core.domain.util.PageList; import org.rhq.enterprise.gui.coregui.client.gwt.StorageGWTService; @@ -137,4 +139,15 @@ public class StorageGWTServiceImpl extends AbstractGWTServiceImpl implements Sto throw getExceptionToThrowToClient(t); } } + + @Override + public Map<String, List<MeasurementDataNumericHighLowComposite>> findStorageNodeLoadDataForLast(StorageNode node, int lastN, int unit, int numPoints) throws RuntimeException { + try { + List<Long> beginEnd = MeasurementUtils.calculateTimeFrame(lastN, unit); + return SerialUtility.prepare(storageNodeManager.findStorageNodeLoadDataForLast(getSessionSubject(), node, + beginEnd.get(0), beginEnd.get(1), numPoints), "StorageGWTServiceImpl.findStorageNodeLoadDataForLast"); + } catch (Throwable t) { + throw getExceptionToThrowToClient(t); + } + } }
commit 6fe092021521e4ea752f27a0578b864938bf13b7 Author: Jirka Kremser jkremser@redhat.com Date: Mon Aug 5 15:58:54 2013 +0200
New method on server jar for sparkline graphs for storage node.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index 0cf45e4..ae1e69a 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -29,6 +29,7 @@ import java.net.UnknownHostException; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; +import java.util.LinkedHashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; @@ -43,6 +44,7 @@ import javax.persistence.EntityManager; import javax.persistence.PersistenceContext; import javax.persistence.TypedQuery;
+import org.apache.commons.collections.map.LinkedMap; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory;
@@ -65,6 +67,7 @@ import org.rhq.core.domain.criteria.ResourceOperationHistoryCriteria; import org.rhq.core.domain.criteria.StorageNodeCriteria; import org.rhq.core.domain.measurement.MeasurementAggregate; import org.rhq.core.domain.measurement.MeasurementUnits; +import org.rhq.core.domain.measurement.composite.MeasurementDataNumericHighLowComposite; import org.rhq.core.domain.operation.OperationRequestStatus; import org.rhq.core.domain.operation.ResourceOperationHistory; import org.rhq.core.domain.operation.bean.GroupOperationSchedule; @@ -122,6 +125,19 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN private static final int MAX_ITERATIONS = 10; private static final String UPDATE_CONFIGURATION_OPERATION = "updateConfiguration"; private static final String RESTART_OPERATION = "restart"; + + // metric names on Storage Service resource + private static final String METRIC_TOKENS = "Tokens", METRIC_OWNERSHIP = "Ownership"; + private static final String METRIC_DATA_DISK_USED_PERCENTAGE = "Calculated.DataDiskUsedPercentage"; + private static final String METRIC_TOTAL_DISK_USED_PERCENTAGE = "Calculated.TotalDiskUsedPercentage"; + private static final String METRIC_FREE_DISK_TO_DATA_RATIO = "Calculated.FreeDiskToDataSizeRatio"; + private static final String METRIC_LOAD = "Load", METRIC_KEY_CACHE_SIZE = "KeyCacheSize", + METRIC_ROW_CACHE_SIZE = "RowCacheSize", METRIC_TOTAL_COMMIT_LOG_SIZE = "TotalCommitlogSize"; + + //metric names on Memory Subsystem resource + private static final String METRIC_HEAP_COMMITED = "{HeapMemoryUsage.committed}", + METRIC_HEAP_USED = "{HeapMemoryUsage.used}", METRIC_HEAP_USED_PERCENTAGE = "Calculated.HeapUsagePercentage"; +
@PersistenceContext(unitName = RHQConstants.PERSISTENCE_UNIT_NAME) private EntityManager entityManager; @@ -347,106 +363,90 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN int resourceId = getResourceIdFromStorageNode(node); Map<String, Integer> scheduleIdsMap = new HashMap<String, Integer>();
- // get the schedule ids for Storage Service resource - final String tokensMetric = "Tokens", ownershipMetric = "Ownership"; - final String dataDiskUsedPercentageMetric = "Calculated.DataDiskUsedPercentage"; - final String totalDiskUsedPercentageMetric = "Calculated.TotalDiskUsedPercentage"; - final String freeDiskToDataRatioMetric = "Calculated.FreeDiskToDataSizeRatio"; - final String loadMetric = "Load", keyCacheSize = "KeyCacheSize", rowCacheSize = "RowCacheSize", totalCommitLogSize = "TotalCommitlogSize"; - TypedQuery<Object[]> query = entityManager.<Object[]> createNamedQuery( - StorageNode.QUERY_FIND_SCHEDULE_IDS_BY_PARENT_RESOURCE_ID_AND_MEASUREMENT_DEFINITION_NAMES, Object[].class); - query.setParameter("parrentId", resourceId).setParameter("metricNames", - Arrays.asList(tokensMetric, ownershipMetric, loadMetric, keyCacheSize, rowCacheSize, totalCommitLogSize, - dataDiskUsedPercentageMetric, totalDiskUsedPercentageMetric, freeDiskToDataRatioMetric)); - for (Object[] pair : query.getResultList()) { - scheduleIdsMap.put((String) pair[0], (Integer) pair[1]); + for (Object[] tupple : getStorageServiceScheduleIds(resourceId)) { + String definitionName = (String) tupple[0]; + Integer scheduleId = (Integer) tupple[2]; + scheduleIdsMap.put(definitionName, scheduleId); } - - // get the schedule ids for Memory Subsystem resource - final String heapCommittedMetric = "{HeapMemoryUsage.committed}", heapUsedMetric = "{HeapMemoryUsage.used}", heapUsedPercentageMetric = "Calculated.HeapUsagePercentage"; - query = entityManager.<Object[]> createNamedQuery( - StorageNode.QUERY_FIND_SCHEDULE_IDS_BY_GRANDPARENT_RESOURCE_ID_AND_MEASUREMENT_DEFINITION_NAMES, - Object[].class); - query.setParameter("grandparrentId", resourceId).setParameter("metricNames", - Arrays.asList(heapCommittedMetric, heapUsedMetric, heapUsedPercentageMetric)); - for (Object[] pair : query.getResultList()) { - scheduleIdsMap.put((String) pair[0], (Integer) pair[1]); + for (Object[] tupple : getMemorySubsystemScheduleIds(resourceId)) { + String definitionName = (String) tupple[0]; + Integer scheduleId = (Integer) tupple[2]; + scheduleIdsMap.put(definitionName, scheduleId); }
- StorageNodeLoadComposite result = new StorageNodeLoadComposite(node, beginTime, endTime); - MeasurementAggregate totalDiskUsedaggregate = new MeasurementAggregate(0d, 0d, 0d); + MeasurementAggregate totalDiskUsedAggregate = new MeasurementAggregate(0d, 0d, 0d); Integer scheduleId = null;
// find the aggregates and enrich the result instance if (!scheduleIdsMap.isEmpty()) { - if ((scheduleId = scheduleIdsMap.get(tokensMetric)) != null) { + if ((scheduleId = scheduleIdsMap.get(METRIC_TOKENS)) != null) { MeasurementAggregate tokensAggregate = measurementManager.getAggregate(subject, scheduleId, beginTime, endTime); result.setTokens(tokensAggregate); } - if ((scheduleId = scheduleIdsMap.get(ownershipMetric)) != null) { + if ((scheduleId = scheduleIdsMap.get(METRIC_OWNERSHIP)) != null) { StorageNodeLoadComposite.MeasurementAggregateWithUnits ownershipAggregateWithUnits = getMeasurementAggregateWithUnits( subject, scheduleId, MeasurementUnits.PERCENTAGE, beginTime, endTime); result.setActuallyOwns(ownershipAggregateWithUnits); }
//calculated disk space related metrics - if ((scheduleId = scheduleIdsMap.get(dataDiskUsedPercentageMetric)) != null) { + if ((scheduleId = scheduleIdsMap.get(METRIC_DATA_DISK_USED_PERCENTAGE)) != null) { StorageNodeLoadComposite.MeasurementAggregateWithUnits dataDiskUsedPercentageAggregateWithUnits = getMeasurementAggregateWithUnits( subject, scheduleId, MeasurementUnits.PERCENTAGE, beginTime, endTime); result.setDataDiskUsedPercentage(dataDiskUsedPercentageAggregateWithUnits); } - if ((scheduleId = scheduleIdsMap.get(totalDiskUsedPercentageMetric)) != null) { + if ((scheduleId = scheduleIdsMap.get(METRIC_TOTAL_DISK_USED_PERCENTAGE)) != null) { StorageNodeLoadComposite.MeasurementAggregateWithUnits totalDiskUsedPercentageAggregateWithUnits = getMeasurementAggregateWithUnits( subject, scheduleId, MeasurementUnits.PERCENTAGE, beginTime, endTime); result.setTotalDiskUsedPercentage(totalDiskUsedPercentageAggregateWithUnits); } - if ((scheduleId = scheduleIdsMap.get(freeDiskToDataRatioMetric)) != null) { + if ((scheduleId = scheduleIdsMap.get(METRIC_FREE_DISK_TO_DATA_RATIO)) != null) { MeasurementAggregate freeDiskToDataRatioAggregate = measurementManager.getAggregate(subject, scheduleId, beginTime, endTime); result.setFreeDiskToDataSizeRatio(freeDiskToDataRatioAggregate); }
- if ((scheduleId = scheduleIdsMap.get(loadMetric)) != null) { + if ((scheduleId = scheduleIdsMap.get(METRIC_LOAD)) != null) { StorageNodeLoadComposite.MeasurementAggregateWithUnits loadAggregateWithUnits = getMeasurementAggregateWithUnits( subject, scheduleId, MeasurementUnits.BYTES, beginTime, endTime); result.setLoad(loadAggregateWithUnits);
- updateAggregateTotal(totalDiskUsedaggregate, loadAggregateWithUnits.getAggregate()); - } - if ((scheduleId = scheduleIdsMap.get(keyCacheSize)) != null) { - updateAggregateTotal(totalDiskUsedaggregate, - measurementManager.getAggregate(subject, scheduleId, beginTime, endTime)); - } - if ((scheduleId = scheduleIdsMap.get(rowCacheSize)) != null) { - updateAggregateTotal(totalDiskUsedaggregate, - measurementManager.getAggregate(subject, scheduleId, beginTime, endTime)); + updateAggregateTotal(totalDiskUsedAggregate, loadAggregateWithUnits.getAggregate()); } - if ((scheduleId = scheduleIdsMap.get(totalCommitLogSize)) != null) { - updateAggregateTotal(totalDiskUsedaggregate, - measurementManager.getAggregate(subject, scheduleId, beginTime, endTime)); - } - - if (totalDiskUsedaggregate.getMax() > 0) { +// if ((scheduleId = scheduleIdsMap.get(METRIC_KEY_CACHE_SIZE)) != null) { +// updateAggregateTotal(totalDiskUsedAggregate, +// measurementManager.getAggregate(subject, scheduleId, beginTime, endTime)); +// } +// if ((scheduleId = scheduleIdsMap.get(METRIC_ROW_CACHE_SIZE)) != null) { +// updateAggregateTotal(totalDiskUsedAggregate, +// measurementManager.getAggregate(subject, scheduleId, beginTime, endTime)); +// } +// if ((scheduleId = scheduleIdsMap.get(METRIC_TOTAL_COMMIT_LOG_SIZE)) != null) { +// updateAggregateTotal(totalDiskUsedAggregate, +// measurementManager.getAggregate(subject, scheduleId, beginTime, endTime)); +// } + + if (totalDiskUsedAggregate.getMax() > 0) { StorageNodeLoadComposite.MeasurementAggregateWithUnits totalDiskUsedAggregateWithUnits = new StorageNodeLoadComposite.MeasurementAggregateWithUnits( - totalDiskUsedaggregate, MeasurementUnits.BYTES); - totalDiskUsedAggregateWithUnits.setFormattedValue(getSummaryString(totalDiskUsedaggregate, + totalDiskUsedAggregate, MeasurementUnits.BYTES); + totalDiskUsedAggregateWithUnits.setFormattedValue(getSummaryString(totalDiskUsedAggregate, MeasurementUnits.BYTES)); result.setDataDiskUsed(totalDiskUsedAggregateWithUnits); }
- if ((scheduleId = scheduleIdsMap.get(heapCommittedMetric)) != null) { + if ((scheduleId = scheduleIdsMap.get(METRIC_HEAP_COMMITED)) != null) { StorageNodeLoadComposite.MeasurementAggregateWithUnits heapCommittedAggregateWithUnits = getMeasurementAggregateWithUnits( subject, scheduleId, MeasurementUnits.BYTES, beginTime, endTime); result.setHeapCommitted(heapCommittedAggregateWithUnits); } - if ((scheduleId = scheduleIdsMap.get(heapUsedMetric)) != null) { + if ((scheduleId = scheduleIdsMap.get(METRIC_HEAP_USED)) != null) { StorageNodeLoadComposite.MeasurementAggregateWithUnits heapUsedAggregateWithUnits = getMeasurementAggregateWithUnits( subject, scheduleId, MeasurementUnits.BYTES, beginTime, endTime); result.setHeapUsed(heapUsedAggregateWithUnits); } - if ((scheduleId = scheduleIdsMap.get(heapUsedPercentageMetric)) != null) { + if ((scheduleId = scheduleIdsMap.get(METRIC_HEAP_USED_PERCENTAGE)) != null) { StorageNodeLoadComposite.MeasurementAggregateWithUnits heapUsedPercentageAggregateWithUnits = getMeasurementAggregateWithUnits( subject, scheduleId, MeasurementUnits.PERCENTAGE, beginTime, endTime); @@ -456,6 +456,26 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
return result; } + + private List<Object[]> getStorageServiceScheduleIds(int storageNodeResourceId) { + // get the schedule ids for Storage Service resource + TypedQuery<Object[]> query = entityManager.<Object[]> createNamedQuery( + StorageNode.QUERY_FIND_SCHEDULE_IDS_BY_PARENT_RESOURCE_ID_AND_MEASUREMENT_DEFINITION_NAMES, Object[].class); + query.setParameter("parrentId", storageNodeResourceId).setParameter("metricNames", + Arrays.asList(METRIC_TOKENS, METRIC_OWNERSHIP, METRIC_LOAD/*, METRIC_KEY_CACHE_SIZE, METRIC_ROW_CACHE_SIZE, METRIC_TOTAL_COMMIT_LOG_SIZE*/, + METRIC_DATA_DISK_USED_PERCENTAGE, METRIC_TOTAL_DISK_USED_PERCENTAGE, METRIC_FREE_DISK_TO_DATA_RATIO)); + return query.getResultList(); + } + + private List<Object[]> getMemorySubsystemScheduleIds(int storageNodeResourceId) { + // get the schedule ids for Memory Subsystem resource + TypedQuery<Object[]> query = entityManager.<Object[]> createNamedQuery( + StorageNode.QUERY_FIND_SCHEDULE_IDS_BY_GRANDPARENT_RESOURCE_ID_AND_MEASUREMENT_DEFINITION_NAMES, + Object[].class); + query.setParameter("grandparrentId", storageNodeResourceId).setParameter("metricNames", + Arrays.asList(METRIC_HEAP_COMMITED, METRIC_HEAP_USED, METRIC_HEAP_USED_PERCENTAGE)); + return query.getResultList(); + }
/** * @param accumulator @@ -776,6 +796,55 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN public void scheduleOperationInNewTransaction(Subject subject, ResourceOperationSchedule schedule) { operationManager.scheduleResourceOperation(subject, schedule); } + + @Override + @RequiredPermissions({ @RequiredPermission(Permission.MANAGE_SETTINGS), + @RequiredPermission(Permission.MANAGE_INVENTORY) }) + public Map<String, List<MeasurementDataNumericHighLowComposite>> findStorageNodeLoadDataForLast(Subject subject, + StorageNode node, long beginTime, long endTime, int numPoints) { + int storageNodeResourceId = getResourceIdFromStorageNode(node); + Map<String, List<MeasurementDataNumericHighLowComposite>> result = new LinkedHashMap<String, List<MeasurementDataNumericHighLowComposite>>(); + + List<Object[]> tupples = getStorageServiceScheduleIds(storageNodeResourceId); + List<String> defNames = new ArrayList<String>(); + int[] definitionIds = new int[tupples.size()]; + int resId = -1; + int index = 0; + for (Object[] tupple : tupples) { + String defName = (String) tupple[0]; + int definitionId = (Integer) tupple[1]; + resId = (Integer) tupple[3]; + defNames.add(defName); + definitionIds[index++] = definitionId; + } + List<List<MeasurementDataNumericHighLowComposite>> storageServiceData = measurementManager.findDataForResource( + subject, resId, definitionIds, beginTime, endTime, numPoints); + for (int i = 0; i < storageServiceData.size(); i ++) { + List<MeasurementDataNumericHighLowComposite> oneRecord = storageServiceData.get(i); + result.put(defNames.get(i), oneRecord); + } + + tupples = getMemorySubsystemScheduleIds(storageNodeResourceId); + defNames = new ArrayList<String>(); + definitionIds = new int[tupples.size()]; + resId = -1; + index = 0; + for (Object[] tupple : tupples) { + String defName = (String) tupple[0]; + int definitionId = (Integer) tupple[1]; + resId = (Integer) tupple[3]; + defNames.add(defName); + definitionIds[index++] = definitionId; + } + List<List<MeasurementDataNumericHighLowComposite>> memorySubsystemData = measurementManager.findDataForResource( + subject, resId, definitionIds, beginTime, endTime, numPoints); + for (int i = 0; i < memorySubsystemData.size(); i ++) { + List<MeasurementDataNumericHighLowComposite> oneRecord = memorySubsystemData.get(i); + result.put(defNames.get(i), oneRecord); + } + + return result; + }
private boolean runOperationAndWaitForResult(Subject subject, Resource storageNodeResource, String operationToRun, Configuration parameters) { diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java index 63e8e3e..0c86152 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java @@ -20,6 +20,7 @@ package org.rhq.enterprise.server.cloud;
import java.net.InetAddress; import java.util.List; +import java.util.Map;
import javax.ejb.Local;
@@ -30,6 +31,7 @@ import org.rhq.core.domain.cloud.StorageNodeConfigurationComposite; import org.rhq.core.domain.cloud.StorageNodeLoadComposite; import org.rhq.core.domain.criteria.StorageNodeCriteria; import org.rhq.core.domain.operation.bean.ResourceOperationSchedule; +import org.rhq.core.domain.measurement.composite.MeasurementDataNumericHighLowComposite; import org.rhq.core.domain.resource.Resource; import org.rhq.core.domain.resource.group.ResourceGroup; import org.rhq.core.domain.util.PageList; @@ -185,6 +187,7 @@ public interface StorageNodeManagerLocal { * @throws IllegalStateException if the group is not found or does not exist. */ ResourceGroup getStorageNodeGroup(); +
void scheduleOperationInNewTransaction(Subject subject, ResourceOperationSchedule schedule);
@@ -192,4 +195,5 @@ public interface StorageNodeManagerLocal {
void runAddNodeMaintenance();
+ Map<String, List<MeasurementDataNumericHighLowComposite>> findStorageNodeLoadDataForLast(Subject subject, StorageNode node, long beginTime, long endTime, int numPoints); } diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/measurement/MeasurementDataManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/measurement/MeasurementDataManagerBean.java index 90ed917..09133ae 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/measurement/MeasurementDataManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/measurement/MeasurementDataManagerBean.java @@ -778,7 +778,7 @@ public class MeasurementDataManagerBean implements MeasurementDataManagerLocal, false);
List<MeasurementDataNumericHighLowComposite> tempList = new ArrayList<MeasurementDataNumericHighLowComposite>(); - for(MeasurementDataNumericHighLowComposite object :metricsManager.findDataForResource(schedule.getId(), beginTime, endTime,numDataPoints) ){ + for(MeasurementDataNumericHighLowComposite object : metricsManager.findDataForResource(schedule.getId(), beginTime, endTime,numDataPoints) ){ tempList.add(object); }
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/measurement/MeasurementDataManagerRemote.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/measurement/MeasurementDataManagerRemote.java index fb50541..116c82a 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/measurement/MeasurementDataManagerRemote.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/measurement/MeasurementDataManagerRemote.java @@ -112,8 +112,7 @@ public interface MeasurementDataManagerRemote { * * @param subject * @param resourceId - * @param definitionIds measurement definition id for numeric metric associated with the given compatible - * group + * @param definitionIds measurement definition id for numeric metric associated with the given resource * @param beginTime * @param endTime * @param numPoints diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClientManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClientManagerBean.java index ae80e50..3f1943d 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClientManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClientManagerBean.java @@ -96,12 +96,14 @@ public class StorageClientManagerBean { String password = getRequiredStorageProperty(PASSWORD_PROP);
metricsConfiguration = new MetricsConfiguration(); - - Session wrappedSession = createSession(username, password, storageNodeManager.getStorageNodes()); - session = new StorageSession(wrappedSession); - - session.addStorageStateListener(new StorageClusterMonitor()); - + List<StorageNode> storageNodes = storageNodeManager.getStorageNodes(); + if (storageNodes.isEmpty()) { + throw new IllegalStateException( + "There is no storage node metadata stored in the relational database. This may have happened as a " + + "result of running dbsetup or deleting rows from rhq_storage_node table. Please re-install the " + + "storage node to fix this issue."); + } + session = createSession(username, password, storageNodes); metricsDAO = new MetricsDAO(session, metricsConfiguration);
Server server = serverManager.getServer();
commit 979ebb45e7b478f83d3cfe3a46c25b0db291a36c Author: Jirka Kremser jkremser@redhat.com Date: Fri Aug 2 13:04:23 2013 +0200
Adding the header next to the "Back to List" clickable arrow. This required to change AbstractTableSection to support additional canvas to save some place on the screen.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java index b98697b..a7058d7 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java @@ -133,11 +133,11 @@ public class StorageNodeDatasource extends RPCDataSource<StorageNodeLoadComposit }); fields.add(field);
- fields.add(FIELD_JMX_PORT.getListGridField("90")); +// fields.add(FIELD_JMX_PORT.getListGridField("90")); // ListGridField cqlField = FIELD_CQL_PORT.getListGridField("90"); // cqlField.setHidden(true); // fields.add(cqlField); -// fields.add(FIELD_OPERATION_MODE.getListGridField("90")); + fields.add(FIELD_OPERATION_MODE.getListGridField("90"));
ListGridField createdTimeField = FIELD_CTIME.getListGridField("120"); TimestampCellFormatter.prepareDateField(createdTimeField); diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDetailView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDetailView.java index 942a184..db0aeee 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDetailView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDetailView.java @@ -83,13 +83,15 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab private SectionStackSection detailsAndLoadSection; private StaticTextItem alertsItem; private int expandedSection = -1; + private HTMLFlow header;
private volatile int initSectionCount = 0; private int unackAlerts = -1;
- public StorageNodeDetailView(int storageNodeId) { + public StorageNodeDetailView(int storageNodeId, HTMLFlow header) { super(); this.storageNodeId = storageNodeId; + this.header = header; setHeight100(); setWidth100(); setOverflow(Overflow.AUTO); @@ -102,10 +104,10 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab // sectionStack.setOverflow(Overflow.VISIBLE); }
- public StorageNodeDetailView(int storageNodeId, int expandedSection) { - this(storageNodeId); - this.expandedSection = expandedSection; - } +// public StorageNodeDetailView(int storageNodeId, int expandedSection) { +// this(storageNodeId); +// this.expandedSection = expandedSection; +// }
@Override protected void onInit() { @@ -122,6 +124,8 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab initSectionCount = SECTION_COUNT; } final StorageNode node = storageNodes.get(0); + header.setContents("<div style='text-align: center; font-weight: bold; font-size: medium;'> Storage Node (" + + node.getAddress() + ")</div>"); Resource res = node.getResource(); if (res != null) { fetchResourceComposite(res.getId()); @@ -254,6 +258,9 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab final StaticTextItem nameItem = new StaticTextItem(FIELD_ADDRESS.propertyName(), FIELD_ADDRESS.title()); nameItem.setValue("<b>" + storageNode.getAddress() + "</b>");
+ final StaticTextItem cqlPortItem = new StaticTextItem(FIELD_CQL_PORT.propertyName(), FIELD_CQL_PORT.title()); + cqlPortItem.setValue(storageNode.getCqlPort()); + final StaticTextItem jmxPortItem = new StaticTextItem(FIELD_JMX_PORT.propertyName(), FIELD_JMX_PORT.title()); jmxPortItem.setValue(storageNode.getJmxPort());
@@ -261,9 +268,6 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab // MSG.view_adminTopology_storageNode_jmxConnectionUrl()); // jmxConnectionUrlItem.setValue(storageNode.getJMXConnectionURL());
- final StaticTextItem cqlPortItem = new StaticTextItem(FIELD_CQL_PORT.propertyName(), FIELD_CQL_PORT.title()); - cqlPortItem.setValue(storageNode.getCqlPort()); - final StaticTextItem operationModeItem = new StaticTextItem(FIELD_OPERATION_MODE.propertyName(), MSG.view_adminTopology_serverDetail_operationMode()); operationModeItem.setValue(storageNode.getOperationMode());
@@ -300,7 +304,7 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab diskStatusItem.setValue("No action needed");
List<FormItem> formItems = new ArrayList<FormItem>(6); - formItems.addAll(Arrays.asList(nameItem, resourceItem, jmxPortItem, cqlPortItem/*, jmxConnectionUrlItem*/)); + formItems.addAll(Arrays.asList(nameItem, resourceItem,cqlPortItem, jmxPortItem/*, jmxConnectionUrlItem*/)); if (!CoreGUI.isDebugMode()) formItems.add(operationModeItem); // debug mode fails if this item is added formItems.addAll(Arrays.asList(installationDateItem, lastUpdateItem, alertsItem, memoryStatusItem, diskStatusItem)); form.setItems(formItems.toArray(new FormItem[]{})); @@ -355,8 +359,6 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab initSectionCount++; }
- - @Override public void renderView(ViewPath viewPath) { if (viewPath.toString().endsWith("/Config")) { diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeLoadComponent.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeLoadComponent.java index 556e591..db54108 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeLoadComponent.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeLoadComponent.java @@ -35,6 +35,8 @@ import com.smartgwt.client.widgets.toolbar.ToolStrip;
import org.rhq.enterprise.gui.coregui.client.CoreGUI; import org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasource.StorageNodeLoadCompositeDatasource; +import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; +import org.rhq.enterprise.gui.coregui.client.util.MeasurementUtility; import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedVLayout;
/** @@ -112,20 +114,9 @@ public class StorageNodeLoadComponent extends EnhancedVLayout { refreshButton.setExtraSpace(5); toolStrip.addMember(refreshButton);
- if (parentGrid != null && record != null) { - IButton closeButton = new IButton(MSG.common_button_close()); - closeButton.addClickHandler(new ClickHandler() { - public void onClick(ClickEvent event) { - parentGrid.collapseRecord(record); - } - }); - toolStrip.addMember(closeButton); - - - } loadGrid.setDataSource(datasource); addMember(loadGrid); -// addMember(toolStrip);
} + } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeTableView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeTableView.java index c0fdd85..bb90929 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeTableView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeTableView.java @@ -35,6 +35,7 @@ import com.smartgwt.client.types.SortDirection; import com.smartgwt.client.util.BooleanCallback; import com.smartgwt.client.util.SC; import com.smartgwt.client.widgets.Canvas; +import com.smartgwt.client.widgets.HTMLFlow; import com.smartgwt.client.widgets.grid.CellFormatter; import com.smartgwt.client.widgets.grid.ListGrid; import com.smartgwt.client.widgets.grid.ListGridField; @@ -184,7 +185,9 @@ public class StorageNodeTableView extends TableSection<StorageNodeDatasource> {
@Override public Canvas getDetailsView(Integer id) { - return new StorageNodeDetailView(id); + HTMLFlow header = new HTMLFlow("id = " + id); + setHeader(header); + return new StorageNodeDetailView(id, header); }
private void showCommonActions() { diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/table/AbstractTableSection.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/table/AbstractTableSection.java index a8befa7..cfd097f 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/table/AbstractTableSection.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/table/AbstractTableSection.java @@ -34,6 +34,8 @@ import com.smartgwt.client.widgets.grid.CellFormatter; import com.smartgwt.client.widgets.grid.ListGrid; import com.smartgwt.client.widgets.grid.ListGridField; import com.smartgwt.client.widgets.grid.ListGridRecord; +import com.smartgwt.client.widgets.layout.HLayout; +import com.smartgwt.client.widgets.layout.LayoutSpacer; import com.smartgwt.client.widgets.layout.VLayout;
import org.rhq.enterprise.gui.coregui.client.BookmarkableView; @@ -46,8 +48,9 @@ import org.rhq.enterprise.gui.coregui.client.components.buttons.BackButton; import org.rhq.enterprise.gui.coregui.client.util.Log; import org.rhq.enterprise.gui.coregui.client.util.RPCDataSource; import org.rhq.enterprise.gui.coregui.client.util.StringUtility; -import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedVLayout; +import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedHLayout; import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedUtility; +import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedVLayout;
/** * Provides the typical table view with the additional ability of traversing to a "details" view @@ -65,6 +68,7 @@ public abstract class AbstractTableSection<DS extends RPCDataSource, ID> extends
private VLayout detailsHolder; private Canvas detailsView; + private Canvas header; private String basePath; private boolean escapeHtmlInDetailsLinkColumn; private boolean initialDisplay; @@ -388,8 +392,15 @@ public abstract class AbstractTableSection<DS extends RPCDataSource, ID> extends // Only add the "Back to List" button if the details are definitely not editable, because if they are // editable, a Cancel button should already be provided by the details view. BackButton backButton = new BackButton(MSG.view_tableSection_backButton(), basePath); - detailsHolder.addMember(backButton); - VLayout verticalSpacer = new EnhancedVLayout(); + HLayout hlayout = new EnhancedHLayout(); + hlayout.addMember(backButton); + if (header != null) { + header.setWidth100(); + header.setAlign(com.smartgwt.client.types.Alignment.CENTER); + hlayout.addMember(header); + } + detailsHolder.addMember(hlayout); + LayoutSpacer verticalSpacer = new LayoutSpacer(); verticalSpacer.setHeight(8); detailsHolder.addMember(verticalSpacer); } @@ -431,5 +442,9 @@ public abstract class AbstractTableSection<DS extends RPCDataSource, ID> extends } } } + + public void setHeader(Canvas header) { + this.header = header; + }
}
commit 37c0b0dccd4574851c193074f9f7517a6286ec78 Author: Jirka Kremser jkremser@redhat.com Date: Thu Jul 25 14:57:37 2013 +0200
squashed commit: UI work
* Another iteration of Storage Node UI: added polling mechanism to fetch the number of unack alerts; new metric (FreeDiskToDataSizeRatio) was added.
* Another iteration of Storage Node UI: changes to storage node detail page; the load table and the details section are next to each other to save some place on the screen; also removed the top label/toolstrip.A
* Another iteration of Storage Node UI: colors and discretication of FreeDiskToDataSizeRatio metric to human readable form.
* Removed unused code, formatting.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java index bfdbcc9..7cc2ef1 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java @@ -52,7 +52,7 @@ import org.rhq.enterprise.gui.coregui.client.util.message.Message; * * @author Jirka Kremser */ -public class StorageNodeAdminView extends EnhancedVLayout implements HasViewName, BookmarkableView { +public class StorageNodeAdminView extends EnhancedVLayout implements/* HasViewName,*/ BookmarkableView {
public static final ViewName VIEW_ID = new ViewName("StorageNodes", MSG.view_adminTopology_storageNodes(), IconEnum.STORAGE_NODE); @@ -190,9 +190,7 @@ public class StorageNodeAdminView extends EnhancedVLayout implements HasViewName GWTServiceLookup.getStorageService().findNotAcknowledgedStorageNodeAlertsCount(new AsyncCallback<Integer>() { @Override public void onSuccess(Integer result) { - alerts.setTitle(alerts.getTitle() - + (result != 0 ? " <font color='#CC0000;'>(" + result + ")</font>" : " (" + result - + ")")); + alerts.setTitle(StorageNodeAdminView.getAlertsString(alerts.getTitle(), result)); schedule(5 * 1000); }
@@ -205,9 +203,15 @@ public class StorageNodeAdminView extends EnhancedVLayout implements HasViewName }.run(); }
- @Override - public ViewName getViewName() { - return VIEW_ID; +// @Override +// public ViewName getViewName() { +// return VIEW_ID; +// } + + public static String getAlertsString(String prefix, int numOfUnackAlerts) { + return prefix + + (numOfUnackAlerts != 0 ? " <font color='#CC0000;'>(" + numOfUnackAlerts + ")</font>" : " (" + + numOfUnackAlerts + ")"); }
private static final class TabInfo { diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java index 542afb5..b98697b 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java @@ -31,7 +31,6 @@ import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDat import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_RESOURCE_ID;
import java.util.ArrayList; -import java.util.Arrays; import java.util.List;
import com.google.gwt.i18n.client.NumberFormat; @@ -70,6 +69,10 @@ import org.rhq.enterprise.server.measurement.util.MeasurementUtils; * @author Jirka Kremser */ public class StorageNodeDatasource extends RPCDataSource<StorageNodeLoadComposite, StorageNodeCriteria> { + public static final String OK_COLOR = "color: #26aa26;"; + public static final String WARN_COLOR = "color: #ed9b26;"; + public static final String DONT_MISS_ME_COLOR = "font-weight: bold; color: #d64949;"; + // filters public static final String FILTER_ADDRESS = FIELD_ADDRESS.propertyName(); public static final String FILTER_OPERATION_MODE = FIELD_OPERATION_MODE.propertyName(); @@ -213,38 +216,23 @@ public class StorageNodeDatasource extends RPCDataSource<StorageNodeLoadComposit } int value = from.getUnackAlerts(); record.setAttribute(FIELD_ALERTS.propertyName(), "New Alerts" - + (value != 0 ? " <font color='#CC0000;'>(" + value + ")</font>" : " (" + value + ")")); + + (value != 0 ? " <span style='color: #CC0000;'>(" + value + ")</span>" : " (" + value + ")")); String memory = null; if (from.getHeapPercentageUsed() != null && from.getHeapPercentageUsed().getAggregate().getAvg() != null) memory = MeasurementConverterClient.format(from.getHeapPercentageUsed().getAggregate().getAvg(), from .getHeapPercentageUsed().getUnits(), true); record.setAttribute(FIELD_MEMORY.propertyName(), memory); - String disk = from.getFreeDiskToDataSizeRatio() != null ? NumberFormat.getFormat("0.0").format( - from.getFreeDiskToDataSizeRatio().getAvg()) : MSG.view_measure_nan(); - record.setAttribute(FIELD_DISK.propertyName(), disk); - return record; - } - - - private ListGridRecord makeListGridRecord(MeasurementAggregateWithUnits aggregateWithUnits, String name, - String hover, String id) { - ListGridRecord record = new ListGridRecord(); - record.setAttribute("id", id); - record.setAttribute(StorageNodeLoadCompositeDatasourceField.FIELD_NAME.propertyName(), name); - record.setAttribute( - StorageNodeLoadCompositeDatasourceField.FIELD_MIN.propertyName(), - MeasurementConverterClient.format(aggregateWithUnits.getAggregate().getMin(), - aggregateWithUnits.getUnits(), true)); - record.setAttribute("avgFloat", aggregateWithUnits.getAggregate().getAvg()); - record.setAttribute( - StorageNodeLoadCompositeDatasourceField.FIELD_AVG.propertyName(), - MeasurementConverterClient.format(aggregateWithUnits.getAggregate().getAvg(), - aggregateWithUnits.getUnits(), true)); - record.setAttribute( - StorageNodeLoadCompositeDatasourceField.FIELD_MAX.propertyName(), - MeasurementConverterClient.format(aggregateWithUnits.getAggregate().getMax(), - aggregateWithUnits.getUnits(), true)); - record.setAttribute("hover", hover); + if (from.getFreeDiskToDataSizeRatio() != null) { + if (from.getFreeDiskToDataSizeRatio().getMax() < 0.7) { + record.setAttribute(FIELD_DISK.propertyName(), + "<span style='" + DONT_MISS_ME_COLOR + "'>Insufficient</span>"); + } else if (from.getFreeDiskToDataSizeRatio().getMax() < 1.5) { + record.setAttribute(FIELD_DISK.propertyName(), "<span style='" + WARN_COLOR + "'>Warning</span>"); + } else { + record.setAttribute(FIELD_DISK.propertyName(), + "<span style='" + OK_COLOR + "'>Sufficient</span>"); + } + } return record; }
@@ -349,54 +337,31 @@ public class StorageNodeDatasource extends RPCDataSource<StorageNodeLoadComposit }
private static void executeFetch(final StorageNode node, final AsyncCallback<StorageNodeLoadComposite> callback) { - GWTServiceLookup.getStorageService().getLoad(node, 1, MeasurementUtils.UNIT_HOURS, callback); + GWTServiceLookup.getStorageService().getLoad(node, 8, MeasurementUtils.UNIT_HOURS, callback); }
private ListGridRecord[] makeListGridRecords(StorageNodeLoadComposite loadComposite) { - List<ListGridRecord> recordsList = new ArrayList<ListGridRecord>(6); - List<List<Object>> loadFields = Arrays - .<List<Object>> asList( - Arrays.<Object> asList(loadComposite.getHeapCommitted(), "Heap Maximum", - "The limit the RHQ storage node was started with. This corresponds with the -Xmx JVM option.", - "heapMax"), - Arrays.<Object> asList(loadComposite.getHeapUsed(), "Heap Used", - "Amount of memory actually used by the RHQ storage node", "heapUsed"), - Arrays.<Object> asList(loadComposite.getHeapPercentageUsed(), "Heap Percent Used", - "This value is calculated by dividing Heap Used by Heap Maximum.", HEAP_PERCENTAGE_KEY), - Arrays.<Object> asList(loadComposite.getLoad(), "Load", "Data stored on the node", "load"), - Arrays.<Object> asList( - loadComposite.getDataDiskUsedPercentage(), - "Data Disk Space Percent Used", - "Percentage of disk space used by data files on the partitions that contain the data files. If multiple data locations are specified then the aggregate accross all the partitions that contain data files is reported.", - DATA_DISK_SPACE_PERCENTAGE_KEY), - Arrays.<Object> asList( - loadComposite.getTotalDiskUsedPercentage(), - "Total Disk Space Percent Used", - "Percentage of total disk space used (system and Storage Node) on the partitions that contain the data files. If multiple data locations are specified then the aggregate accross all the partitions that contain data files is reported.", - TOTAL_DISK_SPACE_PERCENTAGE_KEY), Arrays.<Object> asList(loadComposite.getDataDiskUsed(), - "Total Disk Space Used", - "Total space used on disk by all data files, commit logs, and saved caches.", "totaldisk"), - Arrays.<Object> asList(loadComposite.getActuallyOwns(), "Ownership", - "Refers to the percentage of keys that a node owns.", "ownership")); - for (List<Object> aggregateWithUnitsList : loadFields) { - if (aggregateWithUnitsList.get(0) != null) { - recordsList.add(makeListGridRecord((MeasurementAggregateWithUnits) aggregateWithUnitsList.get(0), - (String) aggregateWithUnitsList.get(1), (String) aggregateWithUnitsList.get(2), - (String) aggregateWithUnitsList.get(3))); - } - } - if (loadComposite.getTokens() != null) { - ListGridRecord tokens = new ListGridRecord(); - tokens.setAttribute("id", "tokens"); - tokens.setAttribute("name", "Number of Tokens"); - tokens.setAttribute("hover", "Number of partitions of the ring that a node owns."); - tokens.setAttribute("min", loadComposite.getTokens().getMin()); - tokens.setAttribute("avg", loadComposite.getTokens().getAvg()); - tokens.setAttribute("max", loadComposite.getTokens().getMax()); - recordsList.add(tokens); - } - + List<ListGridRecord> recordsList = new ArrayList<ListGridRecord>(6) { + private static final long serialVersionUID = 1L;
+ @Override + public boolean add(ListGridRecord record) { + if (record != null) + return super.add(record); + return false; + } + }; + + // heap related metrics +// recordsList.add(makeListGridRecord(loadComposite.getHeapCommitted(), "Heap Maximum", "The limit the RHQ storage node was started with. This corresponds with the -Xmx JVM option.", "heapMax")); + recordsList.add(makeListGridRecord(loadComposite.getHeapUsed(), "Heap Used", "Amount of memory actually used by the RHQ storage node", "heapUsed")); + recordsList.add(makeListGridRecord(loadComposite.getHeapPercentageUsed(), "Heap Percent Used", "This value is calculated by dividing Heap Used by Heap Maximum.", HEAP_PERCENTAGE_KEY)); + + // disk related metrics + recordsList.add(makeListGridRecord(loadComposite.getDataDiskUsed(), "Total Disk Space Used", "Total space used on disk by all data files, commit logs, and saved caches.", "totaldisk")); + recordsList.add(makeListGridRecord(loadComposite.getTotalDiskUsedPercentage(),"Total Disk Space Percent Used", "Percentage of total disk space used (system and Storage Node) on the partitions that contain the data files. If multiple data locations are specified then the aggregate accross all the partitions that contain data files is reported.", TOTAL_DISK_SPACE_PERCENTAGE_KEY)); + recordsList.add(makeListGridRecord(loadComposite.getDataDiskUsedPercentage(), "Data Disk Space Percent Used","Percentage of disk space used by data files on the partitions that contain the data files. If multiple data locations are specified then the aggregate accross all the partitions that contain data files is reported.", DATA_DISK_SPACE_PERCENTAGE_KEY)); + if (loadComposite.getFreeDiskToDataSizeRatio() != null){ MeasurementAggregate aggregate = loadComposite.getFreeDiskToDataSizeRatio(); NumberFormat nf = NumberFormat.getFormat("0.0"); @@ -408,9 +373,22 @@ public class StorageNodeDatasource extends RPCDataSource<StorageNodeLoadComposit record.setAttribute("avg", nf.format(aggregate.getAvg())); record.setAttribute("avgFloat", aggregate.getAvg()); record.setAttribute("max", nf.format(aggregate.getMax())); - recordsList.add(record); } +// recordsList.add(makeListGridRecord(loadComposite.getLoad(), "Load", "Data stored on the node", "load")); + + // other metrics + recordsList.add(makeListGridRecord(loadComposite.getActuallyOwns(), "Ownership", "Refers to the percentage of keys that a node owns.", "ownership")); + if (loadComposite.getTokens() != null) { + ListGridRecord tokens = new ListGridRecord(); + tokens.setAttribute("id", "tokens"); + tokens.setAttribute("name", "Number of Tokens"); + tokens.setAttribute("hover", "Number of partitions of the ring that a node owns."); + tokens.setAttribute("min", loadComposite.getTokens().getMin()); + tokens.setAttribute("avg", loadComposite.getTokens().getAvg()); + tokens.setAttribute("max", loadComposite.getTokens().getMax()); + recordsList.add(tokens); + }
ListGridRecord[] records = recordsList.toArray(new ListGridRecord[recordsList.size()]); return records; @@ -418,6 +396,7 @@ public class StorageNodeDatasource extends RPCDataSource<StorageNodeLoadComposit
private ListGridRecord makeListGridRecord(MeasurementAggregateWithUnits aggregateWithUnits, String name, String hover, String id) { + if (aggregateWithUnits == null) return null; ListGridRecord record = new ListGridRecord(); record.setAttribute("id", id); record.setAttribute(StorageNodeLoadCompositeDatasourceField.FIELD_NAME.propertyName(), name); diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDetailView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDetailView.java index f6f08b4..942a184 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDetailView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDetailView.java @@ -20,6 +20,7 @@ package org.rhq.enterprise.gui.coregui.client.admin.storage;
import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_ADDRESS; +import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_ALERTS; import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_CQL_PORT; import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_CTIME; import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_JMX_PORT; @@ -34,18 +35,15 @@ import com.google.gwt.user.client.Timer; import com.google.gwt.user.client.rpc.AsyncCallback; import com.smartgwt.client.types.Overflow; import com.smartgwt.client.types.VisibilityMode; -import com.smartgwt.client.util.SC; -import com.smartgwt.client.widgets.IButton; -import com.smartgwt.client.widgets.events.ClickEvent; -import com.smartgwt.client.widgets.events.ClickHandler; +import com.smartgwt.client.widgets.HTMLFlow; import com.smartgwt.client.widgets.form.DynamicForm; import com.smartgwt.client.widgets.form.fields.FormItem; import com.smartgwt.client.widgets.form.fields.StaticTextItem; +import com.smartgwt.client.widgets.layout.LayoutSpacer; import com.smartgwt.client.widgets.layout.SectionStack; import com.smartgwt.client.widgets.layout.SectionStackSection;
import org.rhq.core.domain.cloud.StorageNode; -import org.rhq.core.domain.cloud.StorageNode.OperationMode; import org.rhq.core.domain.criteria.ResourceCriteria; import org.rhq.core.domain.criteria.StorageNodeCriteria; import org.rhq.core.domain.resource.Resource; @@ -62,7 +60,7 @@ import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.configura import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.operation.history.ResourceOperationHistoryListView; import org.rhq.enterprise.gui.coregui.client.util.Log; import org.rhq.enterprise.gui.coregui.client.util.StringUtility; -import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedToolStrip; +import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedHLayout; import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedVLayout; import org.rhq.enterprise.gui.coregui.client.util.message.Message;
@@ -77,12 +75,17 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab
private static final int SECTION_COUNT = 3; private final SectionStack sectionStack; - private SectionStackSection detailsSection; - private SectionStackSection loadSection; - private SectionStackSection historySection; + private EnhancedVLayout detailsLayout; + private EnhancedHLayout detailsAndLoadLayout; + private EnhancedVLayout loadLayout; + private SectionStackSection configurationSection; + private SectionStackSection operationSection; + private SectionStackSection detailsAndLoadSection; + private StaticTextItem alertsItem; private int expandedSection = -1;
private volatile int initSectionCount = 0; + private int unackAlerts = -1;
public StorageNodeDetailView(int storageNodeId) { super(); @@ -95,8 +98,8 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab sectionStack.setVisibilityMode(VisibilityMode.MULTIPLE); sectionStack.setWidth100(); sectionStack.setHeight100(); - sectionStack.setMargin(5); - sectionStack.setOverflow(Overflow.VISIBLE); +// sectionStack.setMargin(5); +// sectionStack.setOverflow(Overflow.VISIBLE); }
public StorageNodeDetailView(int storageNodeId, int expandedSection) { @@ -121,7 +124,7 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab final StorageNode node = storageNodes.get(0); Resource res = node.getResource(); if (res != null) { - fetchResourceComposite(node.getResource().getId()); + fetchResourceComposite(res.getId()); } else { // skip this if the resource id is not there initSectionCount++; @@ -137,6 +140,7 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab initSectionCount = SECTION_COUNT; } }); + fetchUnackAlerts(storageNodeId); }
private void fetchResourceComposite(final int resourceId) { @@ -156,7 +160,6 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab public void onSuccess(PageList<ResourceComposite> result) { if (result.isEmpty()) { onFailure(new Exception("Resource with id [" + resourceId + "] does not exist.")); - initSectionCount = SECTION_COUNT; } else { final ResourceComposite resourceComposite = result.get(0); // prepareOperationHistory(resourceComposite); @@ -166,6 +169,31 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab } }); } + + private void fetchUnackAlerts(final int storageNodeId) { + GWTServiceLookup.getStorageService().findNotAcknowledgedStorageNodeAlertsCounts(Arrays.asList(storageNodeId), + new AsyncCallback<List<Integer>>() { + @Override + public void onFailure(Throwable caught) { + Message message = new Message(MSG.view_inventory_resource_loadFailed(String.valueOf(storageNodeId)), + Message.Severity.Warning); + CoreGUI.goToView(InventoryView.VIEW_ID.getName(), message); + initSectionCount = SECTION_COUNT; + } + + @Override + public void onSuccess(List<Integer> result) { + if (result.isEmpty()) { + onFailure(new Exception("Resource with id [" + storageNodeId + "] does not exist.")); + } else { + unackAlerts = result.get(0); + if (alertsItem != null) { + alertsItem.setValue(StorageNodeAdminView.getAlertsString("New Alerts", unackAlerts)); + } + } + } + }); + }
public boolean isInitialized() { return initSectionCount >= SECTION_COUNT; @@ -182,14 +210,18 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab
public void run() { if (isInitialized()) { - if (null != detailsSection) { - sectionStack.addSection(detailsSection); - } - if (null != loadSection) { - sectionStack.addSection(loadSection); + if (null != detailsAndLoadLayout) { + LayoutSpacer spacer = new LayoutSpacer(); + spacer.setWidth(30); + detailsAndLoadLayout.setMembers(detailsLayout, spacer, loadLayout); + detailsAndLoadLayout.setHeight(220); + detailsAndLoadSection = new SectionStackSection("Storage Node Information"); + detailsAndLoadSection.setExpanded(true); + detailsAndLoadSection.setItems(detailsAndLoadLayout); + sectionStack.addSection(detailsAndLoadSection); } - if (null != historySection) { - sectionStack.addSection(historySection); + if (null != configurationSection) { + sectionStack.addSection(configurationSection); } // if (expandedSection != -1) { // for (int i = 1; i < SECTION_COUNT; i++) { @@ -222,13 +254,12 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab final StaticTextItem nameItem = new StaticTextItem(FIELD_ADDRESS.propertyName(), FIELD_ADDRESS.title()); nameItem.setValue("<b>" + storageNode.getAddress() + "</b>");
-// final TextItem jmxPortItem = new TextItem(FIELD_JMX_PORT.propertyName(), FIELD_JMX_PORT.title()); final StaticTextItem jmxPortItem = new StaticTextItem(FIELD_JMX_PORT.propertyName(), FIELD_JMX_PORT.title()); jmxPortItem.setValue(storageNode.getJmxPort());
- final StaticTextItem jmxConnectionUrlItem = new StaticTextItem("jmxConnectionUrl", - MSG.view_adminTopology_storageNode_jmxConnectionUrl()); - jmxConnectionUrlItem.setValue(storageNode.getJMXConnectionURL()); +// final StaticTextItem jmxConnectionUrlItem = new StaticTextItem("jmxConnectionUrl", +// MSG.view_adminTopology_storageNode_jmxConnectionUrl()); +// jmxConnectionUrlItem.setValue(storageNode.getJMXConnectionURL());
final StaticTextItem cqlPortItem = new StaticTextItem(FIELD_CQL_PORT.propertyName(), FIELD_CQL_PORT.title()); cqlPortItem.setValue(storageNode.getCqlPort()); @@ -256,47 +287,50 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab StaticTextItem lastUpdateItem = new StaticTextItem(FIELD_MTIME.propertyName(), FIELD_MTIME.title()); lastUpdateItem.setValue(TimestampCellFormatter.format(Long.valueOf(storageNode.getMtime()), TimestampCellFormatter.DATE_TIME_FORMAT_LONG)); - - IButton saveButton = new IButton(); - saveButton.setOverflow(Overflow.VISIBLE); - saveButton.setTitle(MSG.common_button_save()); - saveButton.addClickHandler(new ClickHandler() { - public void onClick(ClickEvent event) { - if (form.validate()) { -// storageNode.setOperationMode(OperationMode.valueOf(operationModeItem.getValueAsString())); - storageNode.setOperationMode(OperationMode.valueOf((String) operationModeItem.getValue())); - SC.say(storageNode.toString()); - // TODO: logic - } - } - }); - List<FormItem> formItems = new ArrayList<FormItem>(8); - formItems.addAll(Arrays.asList(nameItem, jmxPortItem, cqlPortItem, jmxConnectionUrlItem)); + + alertsItem = new StaticTextItem(FIELD_ALERTS.propertyName(), FIELD_ALERTS.title()); + if (unackAlerts != -1) { + alertsItem.setValue(StorageNodeAdminView.getAlertsString("New Alerts", unackAlerts)); + } + + StaticTextItem memoryStatusItem = new StaticTextItem("memoryStatus", "Memory"); + memoryStatusItem.setValue("No action needed"); + + StaticTextItem diskStatusItem = new StaticTextItem("mdiskStatus", "Disk"); + diskStatusItem.setValue("No action needed"); + + List<FormItem> formItems = new ArrayList<FormItem>(6); + formItems.addAll(Arrays.asList(nameItem, resourceItem, jmxPortItem, cqlPortItem/*, jmxConnectionUrlItem*/)); if (!CoreGUI.isDebugMode()) formItems.add(operationModeItem); // debug mode fails if this item is added - formItems.addAll(Arrays.asList(resourceItem, installationDateItem, lastUpdateItem)); + formItems.addAll(Arrays.asList(installationDateItem, lastUpdateItem, alertsItem, memoryStatusItem, diskStatusItem)); form.setItems(formItems.toArray(new FormItem[]{})); - - EnhancedToolStrip footer = new EnhancedToolStrip(); - footer.setPadding(5); - footer.setWidth100(); - footer.setMembersMargin(15); - footer.addMember(saveButton); - - SectionStackSection section = new SectionStackSection(MSG.common_title_details()); - section.setExpanded(expandedSection != -1 ? expandedSection == 0 : true);
- section.setItems(form); - detailsSection = section; + detailsLayout = new EnhancedVLayout(); + detailsLayout.setWidth(450); + detailsLayout.addMember(form); + if (detailsAndLoadLayout == null) { + detailsAndLoadLayout = new EnhancedHLayout(0); + } initSectionCount++; }
private void prepareLoadSection(SectionStack stack, final StorageNode storageNode) { StorageNodeLoadComponent loadDataComponent = new StorageNodeLoadComponent(storageNode.getId()); - SectionStackSection section = new SectionStackSection("Load"); - section.setItems(loadDataComponent); - section.setExpanded(expandedSection != -1 ? expandedSection == 1 : true); - - loadSection = section; + loadDataComponent.setExtraSpace(5); + loadLayout = new EnhancedVLayout(); + loadLayout.setWidth100(); + LayoutSpacer spacer = new LayoutSpacer(); + spacer.setHeight(10); +// HTMLFlow loadLabel = new HTMLFlow("<span style='font-weight:bold'>Status</span>"); + HTMLFlow loadLabel = new HTMLFlow("Status"); + loadLabel.addStyleName("formTitle"); + loadLabel.setTooltip("Contains selected metrics collected for last 8 hours."); + loadLabel.setHoverWidth(300); + loadLayout.setMembers(spacer, loadLabel, loadDataComponent); + + if (detailsAndLoadLayout == null) { + detailsAndLoadLayout = new EnhancedHLayout(); + } initSectionCount++; }
@@ -306,7 +340,7 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab section.setItems(historyView); section.setExpanded(false);
- historySection = section; + operationSection = section; initSectionCount++; }
@@ -314,9 +348,10 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab ResourceConfigurationEditView editorView = new ResourceConfigurationEditView(resourceComposite); SectionStackSection section = new SectionStackSection("Configuration"); section.setItems(editorView); - section.setExpanded(expandedSection != -1 && expandedSection == 2); + section.setExpanded(true); + section.setCanCollapse(false);
- historySection = section; + configurationSection = section; initSectionCount++; }
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeLoadComponent.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeLoadComponent.java index eddca2f..556e591 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeLoadComponent.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeLoadComponent.java @@ -18,8 +18,13 @@ */ package org.rhq.enterprise.gui.coregui.client.admin.storage;
+import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasource.DONT_MISS_ME_COLOR; +import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasource.OK_COLOR; +import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasource.WARN_COLOR; + import java.util.List;
+import com.smartgwt.client.types.Autofit; import com.smartgwt.client.widgets.IButton; import com.smartgwt.client.widgets.events.ClickEvent; import com.smartgwt.client.widgets.events.ClickHandler; @@ -39,9 +44,6 @@ import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedVLayout; */ public class StorageNodeLoadComponent extends EnhancedVLayout { private final ListGrid loadGrid; - private static final String OK_COLOR = "color:#26aa26;"; - private static final String WARN_COLOR = "color:#ed9b26;"; - private static final String DONT_MISS_ME_COLOR = "font-weight:bold; color:#d64949;";
public StorageNodeLoadComponent(int storageNodeId) { this(storageNodeId, null, null); @@ -65,10 +67,10 @@ public class StorageNodeLoadComponent extends EnhancedVLayout { } else { return OK_COLOR; } - } else if ("avg".equals(getFieldName(colNum)) + } else if ("max".equals(getFieldName(colNum)) && StorageNodeLoadCompositeDatasource.FREE_DISK_TO_DATA_SIZE_RATIO_KEY.equals(record .getAttribute("id"))) { - if (record.getAttributeAsFloat("avgFloat") < 1) { + if (record.getAttributeAsFloat("avgFloat") < .7) { return DONT_MISS_ME_COLOR; } else if (record.getAttributeAsFloat("avgFloat") < 1.5) { return WARN_COLOR; @@ -82,7 +84,8 @@ public class StorageNodeLoadComponent extends EnhancedVLayout { } }; loadGrid.setWidth100(); - loadGrid.setHeight(230); + loadGrid.setHeight(200); + loadGrid.setAutoFitData(Autofit.VERTICAL); StorageNodeLoadCompositeDatasource datasource = StorageNodeLoadCompositeDatasource.getInstance(storageNodeId); List<ListGridField> fields = datasource.getListGridFields(); loadGrid.setFields(fields.toArray(new ListGridField[fields.size()])); diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeTableView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeTableView.java index 3370c87..c0fdd85 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeTableView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeTableView.java @@ -151,9 +151,8 @@ public class StorageNodeTableView extends TableSection<StorageNodeDatasource> { public void onSuccess(List<Integer> result) { for (int i = 0; i < records.length; i++) { int value = result.get(i); - records[i].setAttribute(FIELD_ALERTS.propertyName(), "New Alerts" - + (value != 0 ? " <font color='#CC0000;'>(" + value + ")</font>" : " (" + value - + ")")); + records[i].setAttribute(FIELD_ALERTS.propertyName(), + StorageNodeAdminView.getAlertsString("New Alerts", value)); listGrid.setData(records); } schedule(10 * 1000);
commit 495c35d7681ba12df56c7500bb110caf19fc4aff Author: Jirka Kremser jkremser@redhat.com Date: Mon Jul 29 14:38:14 2013 +0200
squashed commit: If there is just one group definition in ConfigurationEditor, normal form is used instead SectionStack component. If there are more than 3 items the combo-box component is used (instead 5).
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/configuration/ConfigurationEditor.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/configuration/ConfigurationEditor.java index 02d25eb..4423623 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/configuration/ConfigurationEditor.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/configuration/ConfigurationEditor.java @@ -427,8 +427,8 @@ public class ConfigurationEditor extends EnhancedVLayout { layout.addMember(form); } if (groupDefinitions.size() == 1) { - propertyDefinitions.addAll(configurationDefinition.getPropertiesInGroup(groupDefinitions.get(0) - .getName())); + propertyDefinitions = new ArrayList<PropertyDefinition>( + configurationDefinition.getPropertiesInGroup(groupDefinitions.get(0).getName())); DynamicForm groupForm = buildPropertiesForm(propertyDefinitions, configuration); groupForm.setIsGroup(true); groupForm.setGroupTitle(groupDefinitions.get(0).getDisplayName()); @@ -454,10 +454,10 @@ public class ConfigurationEditor extends EnhancedVLayout { sectionStack.addSection(buildGroupSection(definition)); }
- if (groupDefinitions.size() > 1) { +// if (groupDefinitions.size() > 1) { this.toolStrip = buildToolStrip(layout, sectionStack); layout.addMember(toolStrip); - } +// } layout.addMember(sectionStack); }
@@ -1331,7 +1331,7 @@ public class ConfigurationEditor extends EnhancedVLayout { valueItem = new ComboBoxItem(); ((ComboBoxItem) valueItem).setAddUnknownValues(true); } else { - if (valueOptions.size() > 5) { + if (valueOptions.size() > 3) { valueItem = new SelectItem(); } else { valueItem = new RadioGroupItem();
commit 527a00bdbf6851edea11e547d30945985144ac3e Author: Jirka Kremser jkremser@redhat.com Date: Thu Jul 25 14:57:37 2013 +0200
Another iteration of Storage Node UI: added polling mechanism to fetch the number of unack alerts; new metric (FreeDiskToDataSizeRatio) was added.
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNodeLoadComposite.java b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNodeLoadComposite.java index 2c0b8f8..e00a25c 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNodeLoadComposite.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNodeLoadComposite.java @@ -36,6 +36,7 @@ public class StorageNodeLoadComposite implements Serializable { private StorageNode storageNode; private long beginTime; private long endTime; + private int unackAlerts;
private MeasurementAggregateWithUnits heapCommitted; private MeasurementAggregateWithUnits heapUsed; @@ -84,6 +85,14 @@ public class StorageNodeLoadComposite implements Serializable { this.endTime = endTime; }
+ public int getUnackAlerts() { + return unackAlerts; + } + + public void setUnackAlerts(int unackAlerts) { + this.unackAlerts = unackAlerts; + } + /** * @return heapCommitted A computed metric for the amount of memory that is committed for the JVM to use. */ @@ -219,14 +228,12 @@ public class StorageNodeLoadComposite implements Serializable { this.actuallyOwns = actuallyOwns; }
- /* (non-Javadoc) - * @see java.lang.Object#toString() - */ public String toString() { StringBuilder builder = new StringBuilder(); builder.append("storageNode.addresss=").append(storageNode.getAddress()).append(", "); builder.append("beginTime=").append(beginTime).append(", "); - builder.append("heapCommitted=").append(heapCommitted).append(", "); + builder.append("beginTime=").append(beginTime).append(", "); + builder.append("unackAlerts=").append(unackAlerts).append(", "); builder.append("heapUsed=").append(heapUsed).append(", "); builder.append("heapPercentageUsed=").append(heapPercentageUsed).append(", "); builder.append("load=").append(load).append(", "); diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java index 31378f1..bfdbcc9 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java @@ -106,7 +106,7 @@ public class StorageNodeAdminView extends EnhancedVLayout implements HasViewName } });
- tabset.setTabs(table, settings, alerts, backup); + tabset.setTabs(table, settings, alerts/*, backup*/); addMember(tabset); }
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java index e9e81f3..542afb5 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java @@ -19,6 +19,7 @@ package org.rhq.enterprise.gui.coregui.client.admin.storage;
import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_ADDRESS; +import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_ALERTS; import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_CQL_PORT; import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_CTIME; import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_DISK; @@ -33,6 +34,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List;
+import com.google.gwt.i18n.client.NumberFormat; import com.google.gwt.user.client.rpc.AsyncCallback; import com.smartgwt.client.data.DSRequest; import com.smartgwt.client.data.DSResponse; @@ -72,7 +74,7 @@ public class StorageNodeDatasource extends RPCDataSource<StorageNodeLoadComposit public static final String FILTER_ADDRESS = FIELD_ADDRESS.propertyName(); public static final String FILTER_OPERATION_MODE = FIELD_OPERATION_MODE.propertyName(); private static StorageNodeDatasource instance; - + private StorageNodeDatasource() { super(); setID("storageNode"); @@ -105,33 +107,38 @@ public class StorageNodeDatasource extends RPCDataSource<StorageNodeLoadComposit fields.add(idField);
fields.add(FIELD_ADDRESS.getListGridField("*")); - fields.add(FIELD_JMX_PORT.getListGridField("90")); - ListGridField cqlField = FIELD_CQL_PORT.getListGridField("90"); - cqlField.setHidden(true); - fields.add(cqlField); - fields.add(FIELD_OPERATION_MODE.getListGridField("90")); + fields.add(FIELD_ALERTS.getListGridField("120"));
- ListGridField createdTimeField = FIELD_CTIME.getListGridField("120"); - TimestampCellFormatter.prepareDateField(createdTimeField); - fields.add(createdTimeField); - - ListGridField field = FIELD_MEMORY.getListGridField("90"); + ListGridField field = FIELD_MEMORY.getListGridField("120"); field.setShowHover(true); field.setHoverCustomizer(new HoverCustomizer() { public String hoverHTML(Object value, ListGridRecord record, int rowNum, int colNum) { - return "Average memory taken for last one hour."; + return "Average memory taken for last 8 hours."; } }); fields.add(field); - - field = FIELD_DISK.getListGridField("90"); + + field = FIELD_DISK.getListGridField("120"); field.setShowHover(true); field.setHoverCustomizer(new HoverCustomizer() { public String hoverHTML(Object value, ListGridRecord record, int rowNum, int colNum) { - return "Average disk space taken for last one hour."; + return "Average disk Ratio of (Free Disk)/(Data File Size) for last 8 hours. A value below 1 is not " + + "recommended since a compaction or repair process could double the amount of disk " + + "space used by data files. If multiple data locations are specified then the " + + "aggregate accross all the partitions that contain data files is reported."; } }); fields.add(field); + + fields.add(FIELD_JMX_PORT.getListGridField("90")); +// ListGridField cqlField = FIELD_CQL_PORT.getListGridField("90"); +// cqlField.setHidden(true); +// fields.add(cqlField); +// fields.add(FIELD_OPERATION_MODE.getListGridField("90")); + + ListGridField createdTimeField = FIELD_CTIME.getListGridField("120"); + TimestampCellFormatter.prepareDateField(createdTimeField); + fields.add(createdTimeField);
ListGridField resourceIdField = FIELD_RESOURCE_ID.getListGridField("120"); // resourceIdField.setHidden(true); @@ -204,11 +211,16 @@ public class StorageNodeDatasource extends RPCDataSource<StorageNodeLoadComposit record.setAttribute(FIELD_RESOURCE_ID.propertyName(), node.getResource().getId()); } } - String memory = MeasurementConverterClient.format(from.getHeapPercentageUsed().getAggregate().getAvg(), - from.getHeapPercentageUsed().getUnits(), true); + int value = from.getUnackAlerts(); + record.setAttribute(FIELD_ALERTS.propertyName(), "New Alerts" + + (value != 0 ? " <font color='#CC0000;'>(" + value + ")</font>" : " (" + value + ")")); + String memory = null; + if (from.getHeapPercentageUsed() != null && from.getHeapPercentageUsed().getAggregate().getAvg() != null) + memory = MeasurementConverterClient.format(from.getHeapPercentageUsed().getAggregate().getAvg(), from + .getHeapPercentageUsed().getUnits(), true); record.setAttribute(FIELD_MEMORY.propertyName(), memory); - String disk = MeasurementConverterClient.format(from.getPartitionDiskUsedPercentage().getAggregate().getAvg(), - from.getPartitionDiskUsedPercentage().getUnits(), true); + String disk = from.getFreeDiskToDataSizeRatio() != null ? NumberFormat.getFormat("0.0").format( + from.getFreeDiskToDataSizeRatio().getAvg()) : MSG.view_measure_nan(); record.setAttribute(FIELD_DISK.propertyName(), disk); return record; } @@ -258,6 +270,8 @@ public class StorageNodeDatasource extends RPCDataSource<StorageNodeLoadComposit public static final String HEAP_PERCENTAGE_KEY = "heapPercentage"; public static final String DATA_DISK_SPACE_PERCENTAGE_KEY = "dataDiskSpacePercentage"; public static final String TOTAL_DISK_SPACE_PERCENTAGE_KEY = "totalDiskSpacePercentage"; + public static final String FREE_DISK_TO_DATA_SIZE_RATIO_KEY = "freeDiskToDataSizeRatio"; + private int id;
public static StorageNodeLoadCompositeDatasource getInstance(int id) { @@ -359,14 +373,11 @@ public class StorageNodeDatasource extends RPCDataSource<StorageNodeLoadComposit loadComposite.getTotalDiskUsedPercentage(), "Total Disk Space Percent Used", "Percentage of total disk space used (system and Storage Node) on the partitions that contain the data files. If multiple data locations are specified then the aggregate accross all the partitions that contain data files is reported.", - TOTAL_DISK_SPACE_PERCENTAGE_KEY), - Arrays.<Object> asList( - loadComposite.getDataDiskUsed(), + TOTAL_DISK_SPACE_PERCENTAGE_KEY), Arrays.<Object> asList(loadComposite.getDataDiskUsed(), "Total Disk Space Used", - "Total space used on disk by all data files, commit logs, and saved caches.", - "totaldisk"), - Arrays.<Object> asList(loadComposite.getActuallyOwns(), - "Ownership", "Refers to the percentage of keys that a node owns.", "ownership")); + "Total space used on disk by all data files, commit logs, and saved caches.", "totaldisk"), + Arrays.<Object> asList(loadComposite.getActuallyOwns(), "Ownership", + "Refers to the percentage of keys that a node owns.", "ownership")); for (List<Object> aggregateWithUnitsList : loadFields) { if (aggregateWithUnitsList.get(0) != null) { recordsList.add(makeListGridRecord((MeasurementAggregateWithUnits) aggregateWithUnitsList.get(0), @@ -388,14 +399,15 @@ public class StorageNodeDatasource extends RPCDataSource<StorageNodeLoadComposit
if (loadComposite.getFreeDiskToDataSizeRatio() != null){ MeasurementAggregate aggregate = loadComposite.getFreeDiskToDataSizeRatio(); - + NumberFormat nf = NumberFormat.getFormat("0.0"); ListGridRecord record = new ListGridRecord(); - record.setAttribute("id", "freeDiskToDataSizeRatio"); + record.setAttribute("id", FREE_DISK_TO_DATA_SIZE_RATIO_KEY); record.setAttribute("name", "Free Disk To Data Size Ratio"); record.setAttribute("hover", "Ratio of (Free Disk)/(Data File Size). A value below 1 is not recommended since a compaction or repair process could double the amount of disk space used by data files. If multiple data locations are specified then the aggregate accross all the partitions that contain data files is reported."); - record.setAttribute("min", aggregate.getMin()); - record.setAttribute("avg", aggregate.getAvg()); - record.setAttribute("max", aggregate.getMax()); + record.setAttribute("min", nf.format(aggregate.getMin())); + record.setAttribute("avg", nf.format(aggregate.getAvg())); + record.setAttribute("avgFloat", aggregate.getAvg()); + record.setAttribute("max", nf.format(aggregate.getMax()));
recordsList.add(record); } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasourceField.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasourceField.java index ca69076..19ba5ee 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasourceField.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasourceField.java @@ -34,6 +34,8 @@ public enum StorageNodeDatasourceField {
FIELD_JMX_PORT("jmxPort", "JMX Port"),
+ FIELD_ALERTS("alerts", "Alerts"), + FIELD_CQL_PORT("cqlPort", "CQL Port"),
FIELD_OPERATION_MODE("operationMode", CoreGUI.getMessages().view_adminTopology_server_mode()), diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeLoadComponent.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeLoadComponent.java index b0522b1..eddca2f 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeLoadComponent.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeLoadComponent.java @@ -39,6 +39,9 @@ import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedVLayout; */ public class StorageNodeLoadComponent extends EnhancedVLayout { private final ListGrid loadGrid; + private static final String OK_COLOR = "color:#26aa26;"; + private static final String WARN_COLOR = "color:#ed9b26;"; + private static final String DONT_MISS_ME_COLOR = "font-weight:bold; color:#d64949;";
public StorageNodeLoadComponent(int storageNodeId) { this(storageNodeId, null, null); @@ -51,28 +54,40 @@ public class StorageNodeLoadComponent extends EnhancedVLayout { loadGrid = new ListGrid() { @Override protected String getCellCSSText(ListGridRecord record, int rowNum, int colNum) { - if ("avg".equals(getFieldName(colNum)) + if ("avg".equals(getFieldName(colNum)) && (StorageNodeLoadCompositeDatasource.HEAP_PERCENTAGE_KEY.equals(record.getAttribute("id")) || StorageNodeLoadCompositeDatasource.DATA_DISK_SPACE_PERCENTAGE_KEY.equals(record.getAttribute("id")) || StorageNodeLoadCompositeDatasource.TOTAL_DISK_SPACE_PERCENTAGE_KEY.equals(record.getAttribute("id")))) { if (record.getAttributeAsFloat("avgFloat") > .85) { - return "font-weight:bold; color:#d64949;"; + return DONT_MISS_ME_COLOR; } else if (record.getAttributeAsFloat("avgFloat") > .7) { - return "color:#ed9b26;"; + return WARN_COLOR; } else { - return "color:#26aa26;"; + return OK_COLOR; } - } else { + } else if ("avg".equals(getFieldName(colNum)) + && StorageNodeLoadCompositeDatasource.FREE_DISK_TO_DATA_SIZE_RATIO_KEY.equals(record + .getAttribute("id"))) { + if (record.getAttributeAsFloat("avgFloat") < 1) { + return DONT_MISS_ME_COLOR; + } else if (record.getAttributeAsFloat("avgFloat") < 1.5) { + return WARN_COLOR; + } else { + return OK_COLOR; + } + } + else { return super.getCellCSSText(record, rowNum, colNum); } } }; loadGrid.setWidth100(); - loadGrid.setHeight(200); + loadGrid.setHeight(230); StorageNodeLoadCompositeDatasource datasource = StorageNodeLoadCompositeDatasource.getInstance(storageNodeId); List<ListGridField> fields = datasource.getListGridFields(); loadGrid.setFields(fields.toArray(new ListGridField[fields.size()])); loadGrid.setAutoFetchData(true); + loadGrid.setHoverWidth(300);
ToolStrip toolStrip = new ToolStrip(); @@ -107,7 +122,7 @@ public class StorageNodeLoadComponent extends EnhancedVLayout { } loadGrid.setDataSource(datasource); addMember(loadGrid); - addMember(toolStrip); +// addMember(toolStrip);
} } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeTableView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeTableView.java index 46dd734..3370c87 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeTableView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeTableView.java @@ -19,6 +19,7 @@ package org.rhq.enterprise.gui.coregui.client.admin.storage;
import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_ADDRESS; +import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_ALERTS; import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_RESOURCE_ID;
import java.util.ArrayList; @@ -27,6 +28,7 @@ import java.util.LinkedHashMap; import java.util.List; import java.util.Map;
+import com.google.gwt.user.client.Timer; import com.google.gwt.user.client.rpc.AsyncCallback; import com.smartgwt.client.data.Criteria; import com.smartgwt.client.types.SortDirection; @@ -78,12 +80,20 @@ public class StorageNodeTableView extends TableSection<StorageNodeDatasource> { }
@Override + protected void doOnDraw() { + super.doOnDraw(); + scheduleUnacknowledgedAlertsPollingJob(getListGrid()); + } + + @Override protected void configureTable() { super.configureTable(); List<ListGridField> fields = getDataSource().getListGridFields(); ListGrid listGrid = getListGrid(); + listGrid.setAutoSaveEdits(false); listGrid.setFields(fields.toArray(new ListGridField[fields.size()])); listGrid.sort(FIELD_ADDRESS.propertyName(), SortDirection.ASCENDING); + listGrid.setHoverWidth(200); showCommonActions();
for (ListGridField field : fields) { @@ -126,6 +136,38 @@ public class StorageNodeTableView extends TableSection<StorageNodeDatasource> { } }
+ private void scheduleUnacknowledgedAlertsPollingJob(final ListGrid listGrid) { + new Timer() { + public void run() { + final ListGridRecord[] records = listGrid.getRecords(); + List<Integer> storageNodeIds = new ArrayList<Integer>(records.length); + for (ListGridRecord record : records) { + // todo: get the resource ids and create a method on SLSB that accepts resource ids to make it faster + storageNodeIds.add(record.getAttributeAsInt(FIELD_ID)); + } + GWTServiceLookup.getStorageService().findNotAcknowledgedStorageNodeAlertsCounts(storageNodeIds, + new AsyncCallback<List<Integer>>() { + @Override + public void onSuccess(List<Integer> result) { + for (int i = 0; i < records.length; i++) { + int value = result.get(i); + records[i].setAttribute(FIELD_ALERTS.propertyName(), "New Alerts" + + (value != 0 ? " <font color='#CC0000;'>(" + value + ")</font>" : " (" + value + + ")")); + listGrid.setData(records); + } + schedule(10 * 1000); + } + + @Override + public void onFailure(Throwable caught) { + schedule(60 * 1000); + } + }); + } + }.schedule(5 * 1000); + } + @Override protected ListGrid createListGrid() { ListGrid listGrid = new ListGrid() { @@ -136,7 +178,7 @@ public class StorageNodeTableView extends TableSection<StorageNodeDatasource> { } }; listGrid.setCanExpandRecords(true); -// listGrid.setAutoFetchData(true); + // listGrid.setAutoFetchData(true);
return listGrid; } @@ -148,38 +190,6 @@ public class StorageNodeTableView extends TableSection<StorageNodeDatasource> {
private void showCommonActions() { addInvokeOperationsAction(); - - // addTableAction(MSG.view_adminTopology_server_removeSelected(), null, new AuthorizedTableAction(this, - // TableActionEnablement.ANY, Permission.MANAGE_SETTINGS) { - // public void executeAction(final ListGridRecord[] selections, Object actionValue) { - // final List<String> selectedAddresses = getSelectedAddresses(selections); - // String message = MSG.view_adminTopology_message_removeServerConfirm(selectedAddresses.toString()); - // SC.ask(message, new BooleanCallback() { - // public void execute(Boolean confirmed) { - // if (confirmed) { - // SC.say("You've selected:\n\n" + selectedAddresses); - //// int[] selectedIds = getSelectedIds(selections); - //// GWTServiceLookup.getTopologyService().deleteServers(selectedIds, new AsyncCallback<Void>() { - //// public void onSuccess(Void arg0) { - //// Message msg = new Message(MSG.view_adminTopology_message_removedServer(String - //// .valueOf(selections.length)), Message.Severity.Info); - //// CoreGUI.getMessageCenter().notify(msg); - //// refresh(); - //// } - //// - //// public void onFailure(Throwable caught) { - //// CoreGUI.getErrorHandler().handleError( - //// MSG.view_adminTopology_message_removeServerFail(String - //// .valueOf(selections.length)) + " " + caught.getMessage(), caught); - //// refreshTableInfo(); - //// } - //// - //// }); - // } - // } - // }); - // } - // }); }
private void addInvokeOperationsAction() { @@ -263,26 +273,6 @@ public class StorageNodeTableView extends TableSection<StorageNodeDatasource> { }); } } - - // int[] selectedIds = getSelectedIds(selections); - // GWTServiceLookup.getTopologyService().updateServerMode(selectedIds, mode, - // new AsyncCallback<Void>() { - // public void onSuccess(Void result) { - // Message msg = new Message(MSG.view_adminTopology_message_setMode( - // String.valueOf(selections.length), mode.name()), Message.Severity.Info); - // CoreGUI.getMessageCenter().notify(msg); - // refresh(); - // } - // - // public void onFailure(Throwable caught) { - // CoreGUI.getErrorHandler().handleError( - // MSG.view_adminTopology_message_setModeFail( - // String.valueOf(selections.length), mode.name()) - // + " " + caught.getMessage(), caught); - // refreshTableInfo(); - // } - // - // }); } else { refreshTableInfo(); }
commit 24fa639a78ca9a9f66b93080087a50b95b0bff40 Author: Jirka Kremser jkremser@redhat.com Date: Thu Jul 25 14:53:22 2013 +0200
Hiding the "Jump to Section" navigation if there is only one section in the ConfigurationEditor component.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/configuration/ConfigurationEditor.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/configuration/ConfigurationEditor.java index 418300f..02d25eb 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/configuration/ConfigurationEditor.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/configuration/ConfigurationEditor.java @@ -453,8 +453,11 @@ public class ConfigurationEditor extends EnhancedVLayout { // com.allen_sauer.gwt.log.client.Log.info("building: " + definition.getDisplayName()); sectionStack.addSection(buildGroupSection(definition)); } - this.toolStrip = buildToolStrip(layout, sectionStack); - layout.addMember(toolStrip); + + if (groupDefinitions.size() > 1) { + this.toolStrip = buildToolStrip(layout, sectionStack); + layout.addMember(toolStrip); + } layout.addMember(sectionStack); }
commit ad9620a5be6b6d3264969d7b6ba612b11173df5c Author: Jirka Kremser jkremser@redhat.com Date: Thu Jul 25 14:51:04 2013 +0200
Fetching all the fields of StorageNode entity if only instance with id is passed to StorageNodeManagerBean.findResourcesWithAlertDefinitions() method.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index 9afe497..0cf45e4 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -658,12 +658,8 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN if (storageNode == null) { initialStorageNodes = getStorageNodes(); } else { - int index = initialStorageNodes.indexOf(storageNode); - if (index >= 0) { - initialStorageNodes = Arrays.asList(initialStorageNodes.get(index)); - } else { - initialStorageNodes = new ArrayList<StorageNode>(); - } + initialStorageNodes = Arrays.asList(storageNode.getResource() == null ? entityManager.find( + StorageNode.class, storageNode.getId()) : storageNode); }
Queue<Resource> unvisitedResources = new LinkedList<Resource>();
commit e29314881365fe1c7147eb58bd3111840968a973 Author: Jirka Kremser jkremser@redhat.com Date: Wed Jul 24 12:44:44 2013 +0200
GWT service impl layer method for finding the unacked alerts, also fetching the # of unack alerts in getStorageNodeComposites() and making 8 hours the default time value for aggregated metrics.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/StorageGWTService.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/StorageGWTService.java index dd7f009..69c875e 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/StorageGWTService.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/StorageGWTService.java @@ -22,6 +22,8 @@ */ package org.rhq.enterprise.gui.coregui.client.gwt;
+import java.util.List; + import com.google.gwt.user.client.rpc.RemoteService;
import org.rhq.core.domain.cloud.StorageNode; @@ -72,4 +74,6 @@ public interface StorageGWTService extends RemoteService { Integer[] findResourcesWithAlertDefinitions() throws RuntimeException;
int findNotAcknowledgedStorageNodeAlertsCount() throws RuntimeException; + + List<Integer> findNotAcknowledgedStorageNodeAlertsCounts(List<Integer> storageNodeIds) throws RuntimeException; } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/StorageGWTServiceImpl.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/StorageGWTServiceImpl.java index 74837b3..624953c 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/StorageGWTServiceImpl.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/StorageGWTServiceImpl.java @@ -22,16 +22,15 @@ */ package org.rhq.enterprise.gui.coregui.server.gwt;
+import java.util.ArrayList; import java.util.List;
-import org.rhq.core.clientapi.util.ArrayUtil; import org.rhq.core.domain.cloud.StorageNode; import org.rhq.core.domain.cloud.StorageNodeLoadComposite; import org.rhq.core.domain.criteria.ResourceCriteria; import org.rhq.core.domain.criteria.StorageNodeCriteria; import org.rhq.core.domain.resource.Resource; import org.rhq.core.domain.util.PageList; -import org.rhq.core.domain.util.collection.ArrayUtils; import org.rhq.enterprise.gui.coregui.client.gwt.StorageGWTService; import org.rhq.enterprise.gui.coregui.server.util.SerialUtility; import org.rhq.enterprise.server.cloud.StorageNodeManagerLocal; @@ -121,4 +120,21 @@ public class StorageGWTServiceImpl extends AbstractGWTServiceImpl implements Sto throw getExceptionToThrowToClient(t); } } + + @Override + public List<Integer> findNotAcknowledgedStorageNodeAlertsCounts(List<Integer> storageNodeIds) throws RuntimeException { + try { + List<Integer> unackAlertCounts = new ArrayList<Integer>(storageNodeIds.size()); + for (int storageNodeId : storageNodeIds) { + StorageNode node = new StorageNode(); + node.setId(storageNodeId); + int num = storageNodeManager.findNotAcknowledgedStorageNodeAlerts(getSessionSubject(), node).size(); + unackAlertCounts.add(num); + } + assert storageNodeIds.size() == unackAlertCounts.size(); + return unackAlertCounts; + } catch (Throwable t) { + throw getExceptionToThrowToClient(t); + } + } } diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index 74a4713..9afe497 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -484,9 +484,12 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN List<StorageNode> nodes = getStorageNodes(); PageList<StorageNodeLoadComposite> result = new PageList<StorageNodeLoadComposite>(); long endTime = System.currentTimeMillis(); - long beginTime = endTime - (1 * 60 * 60 * 1000); + long beginTime = endTime - (8 * 60 * 60 * 1000); for (StorageNode node : nodes) { - result.add(getLoad(subjectManager.getOverlord(), node, beginTime, endTime)); + StorageNodeLoadComposite composite = getLoad(subjectManager.getOverlord(), node, beginTime, endTime); + int unackAlerts = findNotAcknowledgedStorageNodeAlerts(subjectManager.getOverlord(), node).size(); + composite.setUnackAlerts(unackAlerts); + result.add(composite); } return result; } diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java index a22666e..63e8e3e 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java @@ -185,8 +185,6 @@ public interface StorageNodeManagerLocal { * @throws IllegalStateException if the group is not found or does not exist. */ ResourceGroup getStorageNodeGroup(); - - Integer[] findResourcesWithAlertDefinitions();
void scheduleOperationInNewTransaction(Subject subject, ResourceOperationSchedule schedule);
commit 168e6c071c841789caae5b96a8ca15537058ba5a Author: Jirka Kremser jkremser@redhat.com Date: Wed Jul 24 12:40:01 2013 +0200
Considering FILTER_RESOURCE_IDS in the AlertDatasource.getFetchCriteria().
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/alert/AlertDataSource.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/alert/AlertDataSource.java index d66baea..90ac315 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/alert/AlertDataSource.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/alert/AlertDataSource.java @@ -83,6 +83,7 @@ public class AlertDataSource extends RPCDataSource<Alert, AlertCriteria> { public static final String PRIORITY_ICON_LOW = ImageManager.getAlertIcon(AlertPriority.LOW);
public static final String FILTER_PRIORITIES = "priorities"; + public static final String FILTER_RESOURCE_IDS = "resourceIds";
private AlertGWTServiceAsync alertService = GWTServiceLookup.getAlertService();
@@ -360,6 +361,7 @@ public class AlertDataSource extends RPCDataSource<Alert, AlertCriteria> { Date endOfDay = DateFilterItem.adjustTimeToEndOfDay(endDateFilter); criteria.addFilterEndTime(endOfDay.getTime()); } + criteria.addFilterResourceIds(getArrayFilter(request, FILTER_RESOURCE_IDS, Integer.class)); criteria.addFilterEntityContext(entityContext); criteria.fetchConditionLogs(true); // criteria.fetchGroupAlertDefinition(true);
commit f34eb24c2ebf04f9cdb0a7517554bb2be51e4941 Author: Jirka Kremser jkremser@redhat.com Date: Tue Jul 23 13:02:37 2013 +0200
Number of unack alerts is now displayed on the alert tab title.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java index b9d2335..31378f1 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java @@ -20,6 +20,7 @@ package org.rhq.enterprise.gui.coregui.client.admin.storage;
import java.util.EnumSet;
+import com.google.gwt.user.client.Timer; import com.google.gwt.user.client.rpc.AsyncCallback; import com.smartgwt.client.widgets.Label; import com.smartgwt.client.widgets.tab.events.TabSelectedEvent; @@ -60,12 +61,11 @@ public class StorageNodeAdminView extends EnhancedVLayout implements HasViewName + AdministrationView.SECTION_TOPOLOGY_VIEW_ID + "/" + VIEW_ID;
private static final String GROUP_NAME = "RHQ Storage Nodes"; -// private static final String GROUP_NAME = "storage services";
private final NamedTabSet tabset; private TabInfo tableTabInfo = new TabInfo(0, new ViewName("Nodes")); - private TabInfo settingsTabInfo = new TabInfo(1, new ViewName("Settings", "Global Settings")); - private TabInfo alertsTabInfo = new TabInfo(2, new ViewName("Alerts", "Global Alerts")); + private TabInfo settingsTabInfo = new TabInfo(1, new ViewName("Settings", "Cluster Settings")); + private TabInfo alertsTabInfo = new TabInfo(2, new ViewName("Alerts", "Cluster Alerts")); private TabInfo backupTabInfo = new TabInfo(3, new ViewName("Backup")); private StorageNodeTableView table;
@@ -97,6 +97,7 @@ public class StorageNodeAdminView extends EnhancedVLayout implements HasViewName CoreGUI.goToView(VIEW_PATH + "/" + alertsTabInfo.name); } }); + scheduleUnacknowledgedAlertsPollingJob(alerts);
final NamedTab backup = new NamedTab(backupTabInfo.name); backup.addTabSelectedHandler(new TabSelectedHandler() { @@ -182,6 +183,27 @@ public class StorageNodeAdminView extends EnhancedVLayout implements HasViewName } }); } + + private void scheduleUnacknowledgedAlertsPollingJob(final NamedTab alerts) { + new Timer() { + public void run() { + GWTServiceLookup.getStorageService().findNotAcknowledgedStorageNodeAlertsCount(new AsyncCallback<Integer>() { + @Override + public void onSuccess(Integer result) { + alerts.setTitle(alerts.getTitle() + + (result != 0 ? " <font color='#CC0000;'>(" + result + ")</font>" : " (" + result + + ")")); + schedule(5 * 1000); + } + + @Override + public void onFailure(Throwable caught) { + schedule(60 * 1000); + } + }); + } + }.run(); + }
@Override public ViewName getViewName() { diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/StorageGWTService.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/StorageGWTService.java index df0e4cf..dd7f009 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/StorageGWTService.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/StorageGWTService.java @@ -70,4 +70,6 @@ public interface StorageGWTService extends RemoteService { PageList<StorageNodeLoadComposite> getStorageNodeComposites() throws RuntimeException;
Integer[] findResourcesWithAlertDefinitions() throws RuntimeException; + + int findNotAcknowledgedStorageNodeAlertsCount() throws RuntimeException; } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/StorageGWTServiceImpl.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/StorageGWTServiceImpl.java index 45508c7..74837b3 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/StorageGWTServiceImpl.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/StorageGWTServiceImpl.java @@ -112,4 +112,13 @@ public class StorageGWTServiceImpl extends AbstractGWTServiceImpl implements Sto throw getExceptionToThrowToClient(t); } } + + @Override + public int findNotAcknowledgedStorageNodeAlertsCount() throws RuntimeException { + try { + return storageNodeManager.findNotAcknowledgedStorageNodeAlerts(getSessionSubject()).size(); + } catch (Throwable t) { + throw getExceptionToThrowToClient(t); + } + } }
commit f0c55dc98a3ed17f6b28344ba73e6bf3efc2d23d Author: Jirka Kremser jkremser@redhat.com Date: Wed Jul 17 14:14:45 2013 +0200
Making things more consistent - now, one hour aggregate are used for both the load sub-table and for the Memory and Disk columns.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java index 65314f4..e9e81f3 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java @@ -119,7 +119,7 @@ public class StorageNodeDatasource extends RPCDataSource<StorageNodeLoadComposit field.setShowHover(true); field.setHoverCustomizer(new HoverCustomizer() { public String hoverHTML(Object value, ListGridRecord record, int rowNum, int colNum) { - return "Average disk space taken for last one hour."; + return "Average memory taken for last one hour."; } }); fields.add(field); @@ -128,7 +128,7 @@ public class StorageNodeDatasource extends RPCDataSource<StorageNodeLoadComposit field.setShowHover(true); field.setHoverCustomizer(new HoverCustomizer() { public String hoverHTML(Object value, ListGridRecord record, int rowNum, int colNum) { - return "Average memory taken for last one hour."; + return "Average disk space taken for last one hour."; } }); fields.add(field); @@ -335,7 +335,7 @@ public class StorageNodeDatasource extends RPCDataSource<StorageNodeLoadComposit }
private static void executeFetch(final StorageNode node, final AsyncCallback<StorageNodeLoadComposite> callback) { - GWTServiceLookup.getStorageService().getLoad(node, 8, MeasurementUtils.UNIT_HOURS, callback); + GWTServiceLookup.getStorageService().getLoad(node, 1, MeasurementUtils.UNIT_HOURS, callback); }
private ListGridRecord[] makeListGridRecords(StorageNodeLoadComposite loadComposite) {
commit 1c5d4ff8f5c03bb1c86c9a4af0c9099d80097a62 Author: Jirka Kremser jkremser@redhat.com Date: Wed Jul 17 14:10:12 2013 +0200
New storage node ui, containing Alerts, Configuration and the table with storage nodes.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/AdministrationView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/AdministrationView.java index da01888..9a7e75f 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/AdministrationView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/AdministrationView.java @@ -32,7 +32,7 @@ import org.rhq.enterprise.gui.coregui.client.CoreGUI; import org.rhq.enterprise.gui.coregui.client.IconEnum; import org.rhq.enterprise.gui.coregui.client.admin.agent.install.RemoteAgentInstallView; import org.rhq.enterprise.gui.coregui.client.admin.roles.RolesView; -import org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeTableView; +import org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeAdminView; import org.rhq.enterprise.gui.coregui.client.admin.templates.AlertDefinitionTemplateTypeView; import org.rhq.enterprise.gui.coregui.client.admin.templates.DriftDefinitionTemplateTypeView; import org.rhq.enterprise.gui.coregui.client.admin.templates.IgnoreResourceTypesView; @@ -145,9 +145,9 @@ public class AdministrationView extends AbstractSectionedLeftNavigationView { } }, getGlobalPermissions().contains(Permission.MANAGE_SETTINGS));
- NavigationItem storageNodesItem = new NavigationItem(StorageNodeTableView.VIEW_ID, new ViewFactory() { + NavigationItem storageNodesItem = new NavigationItem(StorageNodeAdminView.VIEW_ID, new ViewFactory() { public Canvas createView() { - return new StorageNodeTableView(); + return new StorageNodeAdminView(); } }, getGlobalPermissions().contains(Permission.MANAGE_SETTINGS));
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java index c9bdc0d..b9d2335 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java @@ -18,328 +18,224 @@ */ package org.rhq.enterprise.gui.coregui.client.admin.storage;
-import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_ADDRESS; -import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_RESOURCE_ID; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; +import java.util.EnumSet;
import com.google.gwt.user.client.rpc.AsyncCallback; -import com.smartgwt.client.data.Criteria; -import com.smartgwt.client.types.SortDirection; -import com.smartgwt.client.util.BooleanCallback; -import com.smartgwt.client.util.SC; -import com.smartgwt.client.widgets.Canvas; -import com.smartgwt.client.widgets.grid.CellFormatter; -import com.smartgwt.client.widgets.grid.ListGrid; -import com.smartgwt.client.widgets.grid.ListGridField; -import com.smartgwt.client.widgets.grid.ListGridRecord; - -import org.rhq.core.domain.authz.Permission; -import org.rhq.core.domain.cloud.StorageNode.OperationMode; +import com.smartgwt.client.widgets.Label; +import com.smartgwt.client.widgets.tab.events.TabSelectedEvent; +import com.smartgwt.client.widgets.tab.events.TabSelectedHandler; + +import org.rhq.core.domain.criteria.ResourceGroupCriteria; +import org.rhq.core.domain.resource.ResourceType; +import org.rhq.core.domain.resource.group.composite.ResourceGroupComposite; +import org.rhq.core.domain.util.PageList; +import org.rhq.core.domain.util.collection.ArrayUtils; +import org.rhq.enterprise.gui.coregui.client.BookmarkableView; import org.rhq.enterprise.gui.coregui.client.CoreGUI; import org.rhq.enterprise.gui.coregui.client.IconEnum; -import org.rhq.enterprise.gui.coregui.client.LinkManager; +import org.rhq.enterprise.gui.coregui.client.ViewPath; import org.rhq.enterprise.gui.coregui.client.admin.AdministrationView; -import org.rhq.enterprise.gui.coregui.client.components.table.AuthorizedTableAction; -import org.rhq.enterprise.gui.coregui.client.components.table.TableActionEnablement; -import org.rhq.enterprise.gui.coregui.client.components.table.TableSection; +import org.rhq.enterprise.gui.coregui.client.alert.AlertHistoryView; +import org.rhq.enterprise.gui.coregui.client.components.tab.NamedTab; +import org.rhq.enterprise.gui.coregui.client.components.tab.NamedTabSet; import org.rhq.enterprise.gui.coregui.client.components.view.HasViewName; import org.rhq.enterprise.gui.coregui.client.components.view.ViewName; import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; -import org.rhq.enterprise.gui.coregui.client.util.StringUtility; -import org.rhq.enterprise.gui.coregui.client.util.async.Command; -import org.rhq.enterprise.gui.coregui.client.util.async.CountDownLatch; +import org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.configuration.GroupResourceConfigurationEditView; +import org.rhq.enterprise.gui.coregui.client.inventory.resource.type.ResourceTypeRepository; +import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedVLayout; import org.rhq.enterprise.gui.coregui.client.util.message.Message;
/** - * Shows the table of all storage nodes. + * The main view for managing storage nodes. * * @author Jirka Kremser */ -public class StorageNodeAdminView extends TableSection<StorageNodeDatasource> implements HasViewName { +public class StorageNodeAdminView extends EnhancedVLayout implements HasViewName, BookmarkableView {
public static final ViewName VIEW_ID = new ViewName("StorageNodes", MSG.view_adminTopology_storageNodes(), IconEnum.STORAGE_NODE);
public static final String VIEW_PATH = AdministrationView.VIEW_ID + "/" + AdministrationView.SECTION_TOPOLOGY_VIEW_ID + "/" + VIEW_ID; + + private static final String GROUP_NAME = "RHQ Storage Nodes"; +// private static final String GROUP_NAME = "storage services"; + + private final NamedTabSet tabset; + private TabInfo tableTabInfo = new TabInfo(0, new ViewName("Nodes")); + private TabInfo settingsTabInfo = new TabInfo(1, new ViewName("Settings", "Global Settings")); + private TabInfo alertsTabInfo = new TabInfo(2, new ViewName("Alerts", "Global Alerts")); + private TabInfo backupTabInfo = new TabInfo(3, new ViewName("Backup")); + private StorageNodeTableView table; + + private int[] resIds;
public StorageNodeAdminView() { - super(null); + super(); setHeight100(); setWidth100(); - Criteria criteria = new Criteria(); - String[] modes = new String[OperationMode.values().length]; - int i = 0; - for (OperationMode value : OperationMode.values()) { - modes[i++] = value.name(); - } - criteria.addCriteria(StorageNodeDatasource.FILTER_OPERATION_MODE, modes); - setInitialCriteria(criteria); - setDataSource(new StorageNodeDatasource()); + setLayoutTopMargin(8); + tabset = new NamedTabSet(); + NamedTab table = new NamedTab(tableTabInfo.name); + table.addTabSelectedHandler(new TabSelectedHandler() { + public void onTabSelected(TabSelectedEvent event) { + CoreGUI.goToView(VIEW_PATH); + } + }); + + NamedTab settings = new NamedTab(settingsTabInfo.name); + settings.addTabSelectedHandler(new TabSelectedHandler() { + public void onTabSelected(TabSelectedEvent event) { + CoreGUI.goToView(VIEW_PATH + "/" + settingsTabInfo.name); + } + }); + + final NamedTab alerts = new NamedTab(alertsTabInfo.name); + alerts.addTabSelectedHandler(new TabSelectedHandler() { + public void onTabSelected(TabSelectedEvent event) { + CoreGUI.goToView(VIEW_PATH + "/" + alertsTabInfo.name); + } + }); + + final NamedTab backup = new NamedTab(backupTabInfo.name); + backup.addTabSelectedHandler(new TabSelectedHandler() { + public void onTabSelected(TabSelectedEvent event) { + CoreGUI.goToView(VIEW_PATH + "/" + backupTabInfo.name); + } + }); + + tabset.setTabs(table, settings, alerts, backup); + addMember(tabset); }
- @Override - protected void configureTable() { - super.configureTable(); - List<ListGridField> fields = getDataSource().getListGridFields(); - ListGrid listGrid = getListGrid(); - listGrid.setFields(fields.toArray(new ListGridField[fields.size()])); - listGrid.sort(FIELD_ADDRESS.propertyName(), SortDirection.ASCENDING); - showCommonActions(); + private void showTab(final TabInfo tabInfo) { + if (tabInfo.equals(tableTabInfo)) { + table = new StorageNodeTableView(); + tabset.getTabByName(tabInfo.name.getName()).setPane(table); + tabset.selectTab(tabInfo.index); + } else if (tabInfo.equals(backupTabInfo)) { + tabset.getTabByName(tabInfo.name.getName()).setPane(new Label("in progress..")); + } else if (tabInfo.equals(alertsTabInfo)) { + if (resIds != null) { + tabset.getTabByName(tabInfo.name.getName()).setPane(new AlertHistoryView("storageNodesAlerts", resIds)); + } else { + GWTServiceLookup.getStorageService().findResourcesWithAlertDefinitions(new AsyncCallback<Integer[]>() { + @Override + public void onFailure(Throwable caught) { + Message message = new Message("foobar", + Message.Severity.Warning); + CoreGUI.goToView(VIEW_ID.getName(), message); + }
- for (ListGridField field : fields) { - // adding the cell formatter for name field (clickable link) - if (field.getName() == FIELD_ADDRESS.propertyName()) { - field.setCellFormatter(new CellFormatter() { @Override - public String format(Object value, ListGridRecord record, int rowNum, int colNum) { - if (value == null) { - return ""; + public void onSuccess(Integer[] result) { + if (result == null || result.length == 0) { + onFailure(new Exception("foobaz")); + } else { + resIds = ArrayUtils.unwrapArray(result); + tabset.getTabByName(tabInfo.name.getName()).setPane( + new AlertHistoryView("storageNodesAlerts", resIds)); + tabset.selectTab(tabInfo.index); } - String detailsUrl = "#" + VIEW_PATH + "/" + getId(record); - String formattedValue = StringUtility.escapeHtml(value.toString()); - return LinkManager.getHref(detailsUrl, formattedValue); - } }); - } else if (field.getName() == FIELD_RESOURCE_ID.propertyName()) { - // adding the cell formatter for resource id field (clickable link) - field.setCellFormatter(new CellFormatter() { + } + } else if (tabInfo.equals(settingsTabInfo)) { + ResourceGroupCriteria criteria = new ResourceGroupCriteria(); + criteria.addFilterName(GROUP_NAME); + criteria.setStrict(true); + GWTServiceLookup.getResourceGroupService().findResourceGroupCompositesByCriteria(criteria, + new AsyncCallback<PageList<ResourceGroupComposite>>() { @Override - public String format(Object value, ListGridRecord record, int rowNum, int colNum) { - if (value == null || value.toString().isEmpty()) { - return ""; - } - String rawUrl = null; - try { - rawUrl = LinkManager.getResourceLink(record.getAttributeAsInt(FIELD_RESOURCE_ID - .propertyName())); - } catch (NumberFormatException nfe) { - rawUrl = MSG.common_label_none(); - } + public void onFailure(Throwable caught) { + Message message = new Message(MSG.view_group_detail_failLoadComp(String.valueOf(GROUP_NAME)), + Message.Severity.Warning); + CoreGUI.goToView(VIEW_ID.getName(), message); + }
- String formattedValue = StringUtility.escapeHtml(rawUrl); - String label = StringUtility.escapeHtml("Link to Resource"); - return LinkManager.getHref(formattedValue, label); + @Override + public void onSuccess(PageList<ResourceGroupComposite> result) { + if (result.isEmpty()) { + onFailure(new Exception("Group with name [" + GROUP_NAME + "] does not exist.")); + } else { + ResourceGroupComposite groupComposite = result.get(0); + loadResourceType(groupComposite.getResourceGroup().getResourceType().getId()); + tabset.getTabByName(tabInfo.name.getName()).setPane( + new GroupResourceConfigurationEditView(groupComposite)); + tabset.selectTab(tabInfo.index); + } } }); - } } } - - @Override - protected ListGrid createListGrid() { - ListGrid listGrid = new ListGrid() { - @Override - protected Canvas getExpansionComponent(final ListGridRecord record) { - int id = record.getAttributeAsInt(FIELD_ID); - return new StorageNodeLoadComponent(id, this, record); - } - }; - listGrid.setCanExpandRecords(true); -// listGrid.setAutoFetchData(true); - - return listGrid; + + private void loadResourceType(int resourceTypeId) { + ResourceTypeRepository.Cache.getInstance().getResourceTypes( + resourceTypeId, + EnumSet.of(ResourceTypeRepository.MetadataType.content, ResourceTypeRepository.MetadataType.operations, + ResourceTypeRepository.MetadataType.measurements, ResourceTypeRepository.MetadataType.events, + ResourceTypeRepository.MetadataType.resourceConfigurationDefinition), + new ResourceTypeRepository.TypeLoadedCallback() { + public void onTypesLoaded(ResourceType type) { + + } + }); }
@Override - public Canvas getDetailsView(Integer id) { - return new StorageNodeDetailView(id); - } - - private void showCommonActions() { - addInvokeOperationsAction(); - - // addTableAction(MSG.view_adminTopology_server_removeSelected(), null, new AuthorizedTableAction(this, - // TableActionEnablement.ANY, Permission.MANAGE_SETTINGS) { - // public void executeAction(final ListGridRecord[] selections, Object actionValue) { - // final List<String> selectedAddresses = getSelectedAddresses(selections); - // String message = MSG.view_adminTopology_message_removeServerConfirm(selectedAddresses.toString()); - // SC.ask(message, new BooleanCallback() { - // public void execute(Boolean confirmed) { - // if (confirmed) { - // SC.say("You've selected:\n\n" + selectedAddresses); - //// int[] selectedIds = getSelectedIds(selections); - //// GWTServiceLookup.getTopologyService().deleteServers(selectedIds, new AsyncCallback<Void>() { - //// public void onSuccess(Void arg0) { - //// Message msg = new Message(MSG.view_adminTopology_message_removedServer(String - //// .valueOf(selections.length)), Message.Severity.Info); - //// CoreGUI.getMessageCenter().notify(msg); - //// refresh(); - //// } - //// - //// public void onFailure(Throwable caught) { - //// CoreGUI.getErrorHandler().handleError( - //// MSG.view_adminTopology_message_removeServerFail(String - //// .valueOf(selections.length)) + " " + caught.getMessage(), caught); - //// refreshTableInfo(); - //// } - //// - //// }); - // } - // } - // }); - // } - // }); - } - - private void addInvokeOperationsAction() { - Map<String, Object> operationsMap = new LinkedHashMap<String, Object>(); - operationsMap.put("Start", "start"); - operationsMap.put("Shutdown", "shutdown"); - operationsMap.put("Restart", "restart"); - operationsMap.put("Disable Debug Mode", "stopRPCServer"); - operationsMap.put("Enable Debug Mode", "startRPCServer"); - // operationsMap.put("Decommission", "decommission"); - - addTableAction(MSG.common_title_operation(), null, operationsMap, new AuthorizedTableAction(this, - TableActionEnablement.ANY, Permission.MANAGE_SETTINGS) { - - @Override - public boolean isEnabled(ListGridRecord[] selection) { - return StorageNodeAdminView.this.isEnabled(super.isEnabled(selection), selection); - }; - - @Override - public void executeAction(final ListGridRecord[] selections, Object actionValue) { - final String operationName = (String) actionValue; - final List<String> selectedAddresses = getSelectedAddresses(selections); - // String message = MSG.view_adminTopology_message_setModeConfirm(selectedAddresses.toString(), mode.name()); - SC.ask("Are you sure, you want to run operation " + operationName + "?", new BooleanCallback() { - public void execute(Boolean confirmed) { - if (confirmed) { - final CountDownLatch latch = CountDownLatch.create(selections.length, new Command() { - @Override - public void execute() { - // Message msg = new Message(MSG.view_adminTopology_message_setMode( - // String.valueOf(selections.length), mode.name()), Message.Severity.Info); - Message msg = new Message("Operation" + operationName - + " was successfully scheduled for resources with ids" - + Arrays.asList(getSelectedIds(selections)), Message.Severity.Info); - CoreGUI.getMessageCenter().notify(msg); - refreshTableInfo(); - } - }); - boolean isStopStartOrRestart = Arrays.asList("start", "shutdown", "restart").contains( - operationName); - for (ListGridRecord storageNodeRecord : selections) { - // NFE should never happen, because of the condition for table action enablement - int resourceId = storageNodeRecord.getAttributeAsInt(FIELD_RESOURCE_ID.propertyName()); - if (isStopStartOrRestart) { - // start, stop or restart the storage node - GWTServiceLookup.getOperationService().scheduleResourceOperation(resourceId, - operationName, null, "Run by Storage Node Administrations UI", 0, - new AsyncCallback<Void>() { - public void onSuccess(Void result) { - latch.countDown(); - } - - public void onFailure(Throwable caught) { - CoreGUI.getErrorHandler().handleError( - "Scheduling operation " + operationName - + " failed for resources with ids" - + Arrays.asList(getSelectedIds(selections)) + " " - + caught.getMessage(), caught); - latch.countDown(); - refreshTableInfo(); - } - }); - } else { - // invoke the operation on the storage service resource - GWTServiceLookup.getStorageService().invokeOperationOnStorageService(resourceId, - operationName, new AsyncCallback<Void>() { - public void onSuccess(Void result) { - latch.countDown(); - } - - public void onFailure(Throwable caught) { - CoreGUI.getErrorHandler().handleError( - "Scheduling operation " + operationName - + " failed for resources with ids" - + Arrays.asList(getSelectedIds(selections)) + " " - + caught.getMessage(), caught); - latch.countDown(); - refreshTableInfo(); - } - }); - } - } - - // int[] selectedIds = getSelectedIds(selections); - // GWTServiceLookup.getTopologyService().updateServerMode(selectedIds, mode, - // new AsyncCallback<Void>() { - // public void onSuccess(Void result) { - // Message msg = new Message(MSG.view_adminTopology_message_setMode( - // String.valueOf(selections.length), mode.name()), Message.Severity.Info); - // CoreGUI.getMessageCenter().notify(msg); - // refresh(); - // } - // - // public void onFailure(Throwable caught) { - // CoreGUI.getErrorHandler().handleError( - // MSG.view_adminTopology_message_setModeFail( - // String.valueOf(selections.length), mode.name()) - // + " " + caught.getMessage(), caught); - // refreshTableInfo(); - // } - // - // }); - } else { - refreshTableInfo(); - } - } - }); - } - }); + public ViewName getViewName() { + return VIEW_ID; } - - private int[] getSelectedIds(ListGridRecord[] selections) { - if (selections == null) { - return new int[0]; - } - int[] ids = new int[selections.length]; - int i = 0; - for (ListGridRecord selection : selections) { - ids[i++] = selection.getAttributeAsInt(FIELD_ID); + + private static final class TabInfo { + private int index; + private ViewName name; + + private TabInfo(int index, ViewName name) { + this.index = index; + this.name = name; } - return ids; - }
- private List<String> getSelectedAddresses(ListGridRecord[] selections) { - if (selections == null) { - return new ArrayList<String>(0); - } - List<String> ids = new ArrayList<String>(selections.length); - for (ListGridRecord selection : selections) { - ids.add(selection.getAttributeAsString(FIELD_ADDRESS.propertyName())); + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + index; + return result; } - return ids; - }
- private boolean isEnabled(boolean parentsOpinion, ListGridRecord[] selection) { - if (!parentsOpinion) { - return false; - } - for (ListGridRecord storageNodeRecord : selection) { - if (storageNodeRecord.getAttribute(FIELD_RESOURCE_ID.propertyName()) == null) { + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (obj == null) return false; - } + if (getClass() != obj.getClass()) + return false; + TabInfo other = (TabInfo) obj; + if (index != other.index) + return false; + return true; } - return true; - } - - @Override - public ViewName getViewName() { - return VIEW_ID; }
@Override - protected String getBasePath() { - return VIEW_PATH; + public void renderView(ViewPath viewPath) { + if (viewPath.getViewPath().size() == 3) { + showTab(tableTabInfo); + } else { + String viewId = viewPath.getCurrent().getPath(); + if (settingsTabInfo.name.getName().equals(viewId)) { + showTab(settingsTabInfo); + } else if (alertsTabInfo.name.getName().equals(viewId)) { + showTab(alertsTabInfo); + } else if (backupTabInfo.name.getName().equals(viewId)) { + showTab(backupTabInfo); + } else { + showTab(tableTabInfo); + table.renderView(viewPath); + } + } } } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDetailView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDetailView.java index 3fd90c7..f6f08b4 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDetailView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDetailView.java @@ -58,6 +58,7 @@ import org.rhq.enterprise.gui.coregui.client.ViewPath; import org.rhq.enterprise.gui.coregui.client.components.table.TimestampCellFormatter; import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; import org.rhq.enterprise.gui.coregui.client.inventory.InventoryView; +import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.configuration.ResourceConfigurationEditView; import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.operation.history.ResourceOperationHistoryListView; import org.rhq.enterprise.gui.coregui.client.util.Log; import org.rhq.enterprise.gui.coregui.client.util.StringUtility; @@ -79,6 +80,7 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab private SectionStackSection detailsSection; private SectionStackSection loadSection; private SectionStackSection historySection; + private int expandedSection = -1;
private volatile int initSectionCount = 0;
@@ -96,6 +98,11 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab sectionStack.setMargin(5); sectionStack.setOverflow(Overflow.VISIBLE); } + + public StorageNodeDetailView(int storageNodeId, int expandedSection) { + this(storageNodeId); + this.expandedSection = expandedSection; + }
@Override protected void onInit() { @@ -152,7 +159,9 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab initSectionCount = SECTION_COUNT; } else { final ResourceComposite resourceComposite = result.get(0); - prepareOperationHistory(resourceComposite); +// prepareOperationHistory(resourceComposite); + prepareResourceConfigEditor(resourceComposite); + } } }); @@ -182,6 +191,12 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab if (null != historySection) { sectionStack.addSection(historySection); } +// if (expandedSection != -1) { +// for (int i = 1; i < SECTION_COUNT; i++) { +// sectionStack.collapseSection(i); +// } +// sectionStack.expandSection(expandedSection); +// } addMember(sectionStack); markForRedraw();
@@ -268,7 +283,7 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab footer.addMember(saveButton);
SectionStackSection section = new SectionStackSection(MSG.common_title_details()); - section.setExpanded(true); + section.setExpanded(expandedSection != -1 ? expandedSection == 0 : true);
section.setItems(form); detailsSection = section; @@ -279,7 +294,7 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab StorageNodeLoadComponent loadDataComponent = new StorageNodeLoadComponent(storageNode.getId()); SectionStackSection section = new SectionStackSection("Load"); section.setItems(loadDataComponent); - section.setExpanded(true); + section.setExpanded(expandedSection != -1 ? expandedSection == 1 : true);
loadSection = section; initSectionCount++; @@ -294,9 +309,31 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab historySection = section; initSectionCount++; } + + private void prepareResourceConfigEditor(ResourceComposite resourceComposite) { + ResourceConfigurationEditView editorView = new ResourceConfigurationEditView(resourceComposite); + SectionStackSection section = new SectionStackSection("Configuration"); + section.setItems(editorView); + section.setExpanded(expandedSection != -1 && expandedSection == 2); + + historySection = section; + initSectionCount++; + } + +
@Override public void renderView(ViewPath viewPath) { + if (viewPath.toString().endsWith("/Config")) { +// for (int i = 1; i < SECTION_COUNT; i++) { +// sectionStack.collapseSection(i); +// } + expandedSection = 2; +// sectionStack.expandSection(expandedSection); +// detailsSection.setExpanded(false); +// loadSection.setExpanded(false); +// historySection.setExpanded(true); + } Log.debug("StorageNodeDetailView: " + viewPath); } } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeLoadComponent.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeLoadComponent.java index e044e4e..b0522b1 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeLoadComponent.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeLoadComponent.java @@ -28,6 +28,7 @@ import com.smartgwt.client.widgets.grid.ListGridField; import com.smartgwt.client.widgets.grid.ListGridRecord; import com.smartgwt.client.widgets.toolbar.ToolStrip;
+import org.rhq.enterprise.gui.coregui.client.CoreGUI; import org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasource.StorageNodeLoadCompositeDatasource; import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedVLayout;
@@ -43,7 +44,7 @@ public class StorageNodeLoadComponent extends EnhancedVLayout { this(storageNodeId, null, null); }
- public StorageNodeLoadComponent(int storageNodeId, final ListGrid parentGrid, final ListGridRecord record) { + public StorageNodeLoadComponent(final int storageNodeId, final ListGrid parentGrid, final ListGridRecord record) { super(5); setPadding(5); setBackgroundColor("#ffffff"); @@ -73,13 +74,24 @@ public class StorageNodeLoadComponent extends EnhancedVLayout { loadGrid.setFields(fields.toArray(new ListGridField[fields.size()])); loadGrid.setAutoFetchData(true);
+ + ToolStrip toolStrip = new ToolStrip(); + IButton settingsButton = new IButton("Settings"); + settingsButton.addClickHandler(new ClickHandler() { + public void onClick(ClickEvent event) { + CoreGUI.goToView(StorageNodeAdminView.VIEW_PATH + "/" + storageNodeId + "/Config"); + } + }); + settingsButton.setExtraSpace(5); + toolStrip.addMember(settingsButton); + IButton refreshButton = new IButton(MSG.common_button_refresh()); refreshButton.addClickHandler(new ClickHandler() { public void onClick(ClickEvent event) { loadGrid.fetchData(); } }); - ToolStrip toolStrip = new ToolStrip(); + refreshButton.setExtraSpace(5); toolStrip.addMember(refreshButton);
if (parentGrid != null && record != null) { @@ -90,6 +102,8 @@ public class StorageNodeLoadComponent extends EnhancedVLayout { } }); toolStrip.addMember(closeButton); + + } loadGrid.setDataSource(datasource); addMember(loadGrid); diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeTableView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeTableView.java index 9b21970..46dd734 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeTableView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeTableView.java @@ -41,14 +41,11 @@ import com.smartgwt.client.widgets.grid.ListGridRecord; import org.rhq.core.domain.authz.Permission; import org.rhq.core.domain.cloud.StorageNode.OperationMode; import org.rhq.enterprise.gui.coregui.client.CoreGUI; -import org.rhq.enterprise.gui.coregui.client.IconEnum; import org.rhq.enterprise.gui.coregui.client.LinkManager; import org.rhq.enterprise.gui.coregui.client.admin.AdministrationView; import org.rhq.enterprise.gui.coregui.client.components.table.AuthorizedTableAction; import org.rhq.enterprise.gui.coregui.client.components.table.TableActionEnablement; import org.rhq.enterprise.gui.coregui.client.components.table.TableSection; -import org.rhq.enterprise.gui.coregui.client.components.view.HasViewName; -import org.rhq.enterprise.gui.coregui.client.components.view.ViewName; import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; import org.rhq.enterprise.gui.coregui.client.util.StringUtility; import org.rhq.enterprise.gui.coregui.client.util.async.Command; @@ -60,13 +57,10 @@ import org.rhq.enterprise.gui.coregui.client.util.message.Message; * * @author Jirka Kremser */ -public class StorageNodeTableView extends TableSection<StorageNodeDatasource> implements HasViewName { - - public static final ViewName VIEW_ID = new ViewName("StorageNodes", MSG.view_adminTopology_storageNodes(), - IconEnum.STORAGE_NODE); +public class StorageNodeTableView extends TableSection<StorageNodeDatasource> {
public static final String VIEW_PATH = AdministrationView.VIEW_ID + "/" - + AdministrationView.SECTION_TOPOLOGY_VIEW_ID + "/" + VIEW_ID; + + AdministrationView.SECTION_TOPOLOGY_VIEW_ID + "/" + StorageNodeAdminView.VIEW_ID;
public StorageNodeTableView() { super(null); @@ -80,7 +74,7 @@ public class StorageNodeTableView extends TableSection<StorageNodeDatasource> im } criteria.addCriteria(StorageNodeDatasource.FILTER_OPERATION_MODE, modes); setInitialCriteria(criteria); - setDataSource(new StorageNodeDatasource()); + setDataSource(StorageNodeDatasource.instance()); }
@Override @@ -334,11 +328,6 @@ public class StorageNodeTableView extends TableSection<StorageNodeDatasource> im }
@Override - public ViewName getViewName() { - return VIEW_ID; - } - - @Override protected String getBasePath() { return VIEW_PATH; }
commit 9edd541f204058f645f81c176e84b93a316f75c5 Author: Jirka Kremser jkremser@redhat.com Date: Wed Jul 17 14:08:53 2013 +0200
adding ".externalToolBuilders" dir to .gitignore
diff --git a/.gitignore b/.gitignore index 359687a..83bbeec 100644 --- a/.gitignore +++ b/.gitignore @@ -11,6 +11,8 @@ dev-agent/ dev-cli/ modules/enterprise/gui/portal-war/build/classes/ modules/enterprise/gui/coregui/build/classes/ +#eclipse specific +modules/enterprise/gui/coregui/.externalToolBuilders/ modules/enterprise/gui/content_http-war/build/classes/
#created by rhq-enterprise-server's unit tests
commit 300f6aac60f0e2cf60d3cc3a836e3369873ca5b3 Author: Jirka Kremser jkremser@redhat.com Date: Wed Jul 17 13:45:39 2013 +0200
Adding two new methods to StorageGWTService.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/StorageGWTService.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/StorageGWTService.java index e746d41..df0e4cf 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/StorageGWTService.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/StorageGWTService.java @@ -66,4 +66,8 @@ public interface StorageGWTService extends RemoteService { * @return instance of {@link StorageNodeLoadComposite} with the aggregate measurement data of selected metrics */ StorageNodeLoadComposite getLoad(StorageNode node, int lastN, int unit) throws RuntimeException; + + PageList<StorageNodeLoadComposite> getStorageNodeComposites() throws RuntimeException; + + Integer[] findResourcesWithAlertDefinitions() throws RuntimeException; } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/StorageGWTServiceImpl.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/StorageGWTServiceImpl.java index c6dc16c..45508c7 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/StorageGWTServiceImpl.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/StorageGWTServiceImpl.java @@ -24,12 +24,14 @@ package org.rhq.enterprise.gui.coregui.server.gwt;
import java.util.List;
+import org.rhq.core.clientapi.util.ArrayUtil; import org.rhq.core.domain.cloud.StorageNode; import org.rhq.core.domain.cloud.StorageNodeLoadComposite; import org.rhq.core.domain.criteria.ResourceCriteria; import org.rhq.core.domain.criteria.StorageNodeCriteria; import org.rhq.core.domain.resource.Resource; import org.rhq.core.domain.util.PageList; +import org.rhq.core.domain.util.collection.ArrayUtils; import org.rhq.enterprise.gui.coregui.client.gwt.StorageGWTService; import org.rhq.enterprise.gui.coregui.server.util.SerialUtility; import org.rhq.enterprise.server.cloud.StorageNodeManagerLocal; @@ -91,4 +93,23 @@ public class StorageGWTServiceImpl extends AbstractGWTServiceImpl implements Sto throw getExceptionToThrowToClient(t); } } + + @Override + public PageList<StorageNodeLoadComposite> getStorageNodeComposites() throws RuntimeException { + try { + return SerialUtility.prepare(storageNodeManager.getStorageNodeComposites(), + "StorageGWTServiceImpl.getStorageNodeComposites"); + } catch (Throwable t) { + throw getExceptionToThrowToClient(t); + } + } + + @Override + public Integer[] findResourcesWithAlertDefinitions() throws RuntimeException { + try { + return storageNodeManager.findResourcesWithAlertDefinitions(); + } catch (Throwable t) { + throw getExceptionToThrowToClient(t); + } + } }
commit 14a1802c30ef6b0287fa7e5ed4f3533b2e8986cb Author: Jirka Kremser jkremser@redhat.com Date: Wed Jul 17 13:44:16 2013 +0200
Adding new method for fetching all instances of class StorageNodeLoadComposite.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index 1f4220b..74a4713 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -478,6 +478,18 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN StorageNode.class); return query.getResultList(); } + + @Override + public PageList<StorageNodeLoadComposite> getStorageNodeComposites() { + List<StorageNode> nodes = getStorageNodes(); + PageList<StorageNodeLoadComposite> result = new PageList<StorageNodeLoadComposite>(); + long endTime = System.currentTimeMillis(); + long beginTime = endTime - (1 * 60 * 60 * 1000); + for (StorageNode node : nodes) { + result.add(getLoad(subjectManager.getOverlord(), node, beginTime, endTime)); + } + return result; + }
private List<StorageNode> getClusteredStorageNodes() { return entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE, StorageNode.class) diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java index 238dd32..a22666e 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java @@ -43,6 +43,8 @@ public interface StorageNodeManagerLocal { String STORAGE_NODE_PLUGIN_NAME = "RHQStorage";
List<StorageNode> getStorageNodes(); + + PageList<StorageNodeLoadComposite> getStorageNodeComposites();
void linkResource(Resource resource);
commit 4e19b0a2cab798a0f10d5165abdc2ed8a26ede51 Author: Jirka Kremser jkremser@redhat.com Date: Wed Jul 17 13:43:13 2013 +0200
Exposing (making it public) the method findResourcesWithAlertDefinitions() on StorageNodeManager.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index 327f064..1f4220b 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -650,7 +650,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN initialStorageNodes = new ArrayList<StorageNode>(); } } - + Queue<Resource> unvisitedResources = new LinkedList<Resource>(); for (StorageNode initialStorageNode : initialStorageNodes) { if (initialStorageNode.getResource() != null) { @@ -938,4 +938,4 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN return value; }
-} \ No newline at end of file +} diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java index 00ba9e7..238dd32 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java @@ -183,6 +183,8 @@ public interface StorageNodeManagerLocal { * @throws IllegalStateException if the group is not found or does not exist. */ ResourceGroup getStorageNodeGroup(); + + Integer[] findResourcesWithAlertDefinitions();
void scheduleOperationInNewTransaction(Subject subject, ResourceOperationSchedule schedule);
commit 44caa82fe1515aeb1bb1dec49aefa36714bced53 Author: Jirka Kremser jkremser@redhat.com Date: Wed Jul 17 13:18:10 2013 +0200
StorageNodeDatasource now fetches also the avg heap usage and avg disk space usage, because we show these aggregated metrics in the storage node table.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java index 07064b7..65314f4 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java @@ -21,8 +21,10 @@ package org.rhq.enterprise.gui.coregui.client.admin.storage; import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_ADDRESS; import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_CQL_PORT; import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_CTIME; +import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_DISK; import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_ID; import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_JMX_PORT; +import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_MEMORY; import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_MTIME; import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_OPERATION_MODE; import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_RESOURCE_ID; @@ -61,22 +63,29 @@ import org.rhq.enterprise.gui.coregui.client.util.RPCDataSource; import org.rhq.enterprise.server.measurement.util.MeasurementUtils;
/** - * Datasource for @see StorageNode. + * Datasource for @see StorageNodeDatasource + heap and disk usage. * * @author Jirka Kremser */ -public class StorageNodeDatasource extends RPCDataSource<StorageNode, StorageNodeCriteria> { - +public class StorageNodeDatasource extends RPCDataSource<StorageNodeLoadComposite, StorageNodeCriteria> { // filters public static final String FILTER_ADDRESS = FIELD_ADDRESS.propertyName(); public static final String FILTER_OPERATION_MODE = FIELD_OPERATION_MODE.propertyName(); - - public StorageNodeDatasource() { + private static StorageNodeDatasource instance; + + private StorageNodeDatasource() { super(); setID("storageNode"); List<DataSourceField> fields = addDataSourceFields(); addFields(fields); } + + public static StorageNodeDatasource instance() { + if (instance == null) { + instance = new StorageNodeDatasource(); + } + return instance; + }
@Override protected List<DataSourceField> addDataSourceFields() { @@ -105,10 +114,24 @@ public class StorageNodeDatasource extends RPCDataSource<StorageNode, StorageNod ListGridField createdTimeField = FIELD_CTIME.getListGridField("120"); TimestampCellFormatter.prepareDateField(createdTimeField); fields.add(createdTimeField); - - ListGridField lastUpdateTimeField = FIELD_MTIME.getListGridField("120"); - TimestampCellFormatter.prepareDateField(lastUpdateTimeField); - fields.add(lastUpdateTimeField); + + ListGridField field = FIELD_MEMORY.getListGridField("90"); + field.setShowHover(true); + field.setHoverCustomizer(new HoverCustomizer() { + public String hoverHTML(Object value, ListGridRecord record, int rowNum, int colNum) { + return "Average disk space taken for last one hour."; + } + }); + fields.add(field); + + field = FIELD_DISK.getListGridField("90"); + field.setShowHover(true); + field.setHoverCustomizer(new HoverCustomizer() { + public String hoverHTML(Object value, ListGridRecord record, int rowNum, int colNum) { + return "Average memory taken for last one hour."; + } + }); + fields.add(field);
ListGridField resourceIdField = FIELD_RESOURCE_ID.getListGridField("120"); // resourceIdField.setHidden(true); @@ -119,8 +142,8 @@ public class StorageNodeDatasource extends RPCDataSource<StorageNode, StorageNod
@Override protected void executeFetch(final DSRequest request, final DSResponse response, StorageNodeCriteria criteria) { - GWTServiceLookup.getStorageService().findStorageNodesByCriteria(criteria, new AsyncCallback<PageList<StorageNode>>() { - public void onSuccess(PageList<StorageNode> result) { + GWTServiceLookup.getStorageService().getStorageNodeComposites(new AsyncCallback<PageList<StorageNodeLoadComposite>>() { + public void onSuccess(PageList<StorageNodeLoadComposite> result) { response.setData(buildRecords(result)); response.setTotalRows(result.size()); processResponse(request.getRequestId(), response); @@ -161,23 +184,55 @@ public class StorageNodeDatasource extends RPCDataSource<StorageNode, StorageNod }
@Override - public StorageNode copyValues(Record from) { + public StorageNodeLoadComposite copyValues(Record from) { throw new UnsupportedOperationException("StorageNodeDatasource.copyValues(Record from)"); }
@Override - public ListGridRecord copyValues(StorageNode from) { + public ListGridRecord copyValues(StorageNodeLoadComposite from) { ListGridRecord record = new ListGridRecord(); - record.setAttribute(FIELD_ID.propertyName(), from.getId()); - record.setAttribute(FIELD_ADDRESS.propertyName(), from.getAddress()); - record.setAttribute(FIELD_JMX_PORT.propertyName(), from.getJmxPort()); - record.setAttribute(FIELD_CQL_PORT.propertyName(), from.getCqlPort()); - record.setAttribute(FIELD_OPERATION_MODE.propertyName(), from.getOperationMode()); - record.setAttribute(FIELD_CTIME.propertyName(), from.getCtime()); - record.setAttribute(FIELD_MTIME.propertyName(), from.getMtime()); - if (from.getResource() != null) { - record.setAttribute(FIELD_RESOURCE_ID.propertyName(), from.getResource().getId()); + StorageNode node = from.getStorageNode(); + if (node != null) { + record.setAttribute(FIELD_ID.propertyName(), node.getId()); + record.setAttribute(FIELD_ADDRESS.propertyName(), node.getAddress()); + record.setAttribute(FIELD_JMX_PORT.propertyName(), node.getJmxPort()); + record.setAttribute(FIELD_CQL_PORT.propertyName(), node.getCqlPort()); + record.setAttribute(FIELD_OPERATION_MODE.propertyName(), node.getOperationMode()); + record.setAttribute(FIELD_CTIME.propertyName(), node.getCtime()); + record.setAttribute(FIELD_MTIME.propertyName(), node.getMtime()); + if (node.getResource() != null) { + record.setAttribute(FIELD_RESOURCE_ID.propertyName(), node.getResource().getId()); + } } + String memory = MeasurementConverterClient.format(from.getHeapPercentageUsed().getAggregate().getAvg(), + from.getHeapPercentageUsed().getUnits(), true); + record.setAttribute(FIELD_MEMORY.propertyName(), memory); + String disk = MeasurementConverterClient.format(from.getPartitionDiskUsedPercentage().getAggregate().getAvg(), + from.getPartitionDiskUsedPercentage().getUnits(), true); + record.setAttribute(FIELD_DISK.propertyName(), disk); + return record; + } + + + private ListGridRecord makeListGridRecord(MeasurementAggregateWithUnits aggregateWithUnits, String name, + String hover, String id) { + ListGridRecord record = new ListGridRecord(); + record.setAttribute("id", id); + record.setAttribute(StorageNodeLoadCompositeDatasourceField.FIELD_NAME.propertyName(), name); + record.setAttribute( + StorageNodeLoadCompositeDatasourceField.FIELD_MIN.propertyName(), + MeasurementConverterClient.format(aggregateWithUnits.getAggregate().getMin(), + aggregateWithUnits.getUnits(), true)); + record.setAttribute("avgFloat", aggregateWithUnits.getAggregate().getAvg()); + record.setAttribute( + StorageNodeLoadCompositeDatasourceField.FIELD_AVG.propertyName(), + MeasurementConverterClient.format(aggregateWithUnits.getAggregate().getAvg(), + aggregateWithUnits.getUnits(), true)); + record.setAttribute( + StorageNodeLoadCompositeDatasourceField.FIELD_MAX.propertyName(), + MeasurementConverterClient.format(aggregateWithUnits.getAggregate().getMax(), + aggregateWithUnits.getUnits(), true)); + record.setAttribute("hover", hover); return record; }
@@ -206,11 +261,7 @@ public class StorageNodeDatasource extends RPCDataSource<StorageNode, StorageNod private int id;
public static StorageNodeLoadCompositeDatasource getInstance(int id) { -// if (instance == null) { - // instance = - return new StorageNodeLoadCompositeDatasource(id); -// } -// return instance; + return new StorageNodeLoadCompositeDatasource(id); }
public StorageNodeLoadCompositeDatasource(int id) { @@ -264,11 +315,9 @@ public class StorageNodeDatasource extends RPCDataSource<StorageNode, StorageNod
@Override protected void executeFetch(final DSRequest request, final DSResponse response, StorageNodeCriteria criteria) { -// Integer id = getFilter(request, FIELD_ID.propertyName(), Integer.class); final StorageNode node = new StorageNode(); node.setId(id); - GWTServiceLookup.getStorageService().getLoad(node, 8, MeasurementUtils.UNIT_HOURS, - new AsyncCallback<StorageNodeLoadComposite>() { + executeFetch(node, new AsyncCallback<StorageNodeLoadComposite>() { public void onSuccess(final StorageNodeLoadComposite loadComposite) { ListGridRecord[] records = makeListGridRecords(loadComposite); response.setData(records); @@ -285,6 +334,10 @@ public class StorageNodeDatasource extends RPCDataSource<StorageNode, StorageNod }); }
+ private static void executeFetch(final StorageNode node, final AsyncCallback<StorageNodeLoadComposite> callback) { + GWTServiceLookup.getStorageService().getLoad(node, 8, MeasurementUtils.UNIT_HOURS, callback); + } + private ListGridRecord[] makeListGridRecords(StorageNodeLoadComposite loadComposite) { List<ListGridRecord> recordsList = new ArrayList<ListGridRecord>(6); List<List<Object>> loadFields = Arrays diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasourceField.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasourceField.java index 0382f13..ca69076 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasourceField.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasourceField.java @@ -37,6 +37,10 @@ public enum StorageNodeDatasourceField { FIELD_CQL_PORT("cqlPort", "CQL Port"),
FIELD_OPERATION_MODE("operationMode", CoreGUI.getMessages().view_adminTopology_server_mode()), + + FIELD_MEMORY("memory", "Memory"), + + FIELD_DISK("disk", "Disk"),
FIELD_CTIME("ctime", CoreGUI.getMessages().view_adminTopology_serverDetail_installationDate()),
commit b228fcd4ddc2a1bb032e4ba9e4096f23922fe22c Author: Jirka Kremser jkremser@redhat.com Date: Wed Jul 17 13:16:09 2013 +0200
Modifying the AlertHistoryView to allow display alerts of various resources (not necessarily forming a group).
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/alert/AlertHistoryView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/alert/AlertHistoryView.java index 4c4c41f..bc74a83 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/alert/AlertHistoryView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/alert/AlertHistoryView.java @@ -114,6 +114,26 @@ public class AlertHistoryView extends TableSection<AlertDataSource> implements H setInitialCriteriaFixed(false); setDataSource(new AlertDataSource(context)); } + + public AlertHistoryView(String tableTitle, int[] resourceIds) { + super(tableTitle, new SortSpecifier[] { DEFAULT_SORT_SPECIFIER }); + + Criteria initialCriteria = new Criteria(); + AlertPriority[] priorityValues = AlertPriority.values(); + String[] priorityNames = new String[priorityValues.length]; + for (int i = 0, priorityValuesLength = priorityValues.length; i < priorityValuesLength; i++) { + priorityNames[i] = priorityValues[i].name(); + } + initialCriteria.addCriteria(AlertDataSource.FILTER_PRIORITIES, priorityNames); + initialCriteria.setAttribute("resourceIds", resourceIds); + setInitialCriteria(initialCriteria); + + this.context = new EntityContext(); + this.context.type = EntityContext.Type.SubsystemView; + this.hasWriteAccess = false; + + setDataSource(new AlertDataSource(context)); + }
@Override protected void configureTableFilters() {
commit 089d80191b85565760a5097e2cc5336fe4e9e6bd Author: Thomas Segismont tsegismo@redhat.com Date: Wed Aug 7 18:53:44 2013 +0200
Bug 980639 - JBoss Web connectors are not discovered if host name contains dash/hyphen (-)
Simplified the regular expression matching connector components.
Side fix: servers started with -b [hostname] instead of -b [ip address] have connector with composed names: (myserver.com%2F127.0.0.1). It was not taken into account when trying to find the thread pool associated with it (in order to collect thread pool metrics).
Tested with EAP 5.1.2 and 5.2.0
diff --git a/modules/plugins/jboss-as-5/src/main/java/org/rhq/plugins/jbossas5/ConnectorComponent.java b/modules/plugins/jboss-as-5/src/main/java/org/rhq/plugins/jbossas5/ConnectorComponent.java index 6b7a68f..1bb0aa9 100644 --- a/modules/plugins/jboss-as-5/src/main/java/org/rhq/plugins/jbossas5/ConnectorComponent.java +++ b/modules/plugins/jboss-as-5/src/main/java/org/rhq/plugins/jbossas5/ConnectorComponent.java @@ -1,24 +1,20 @@ /* - * Jopr Management Platform - * Copyright (C) 2005-2009 Red Hat, Inc. + * RHQ Management Platform + * Copyright (C) 2005-2013 Red Hat, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation, and/or the GNU Lesser - * General Public License, version 2.1, also as published by the Free - * Software Foundation. + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License and the GNU Lesser General Public License - * for more details. + * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * and the GNU Lesser General Public License along with this program; - * if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * along with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
package org.rhq.plugins.jbossas5; @@ -28,16 +24,16 @@ import java.util.Set;
import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.rhq.core.domain.configuration.Configuration; -import org.rhq.core.domain.measurement.MeasurementReport; -import org.rhq.core.domain.measurement.MeasurementScheduleRequest; -import org.rhq.plugins.jbossas5.helper.MoreKnownComponentTypes; -import org.rhq.plugins.jbossas5.util.ResourceComponentUtils;
import org.jboss.deployers.spi.management.ManagementView; import org.jboss.managed.api.ComponentType; import org.jboss.managed.api.ManagedComponent;
+import org.rhq.core.domain.configuration.Configuration; +import org.rhq.core.domain.measurement.MeasurementReport; +import org.rhq.core.domain.measurement.MeasurementScheduleRequest; +import org.rhq.plugins.jbossas5.helper.MoreKnownComponentTypes; + /** * A ResourceComponent for managing a JBoss Web connector. * @@ -45,29 +41,21 @@ import org.jboss.managed.api.ManagedComponent; */ public class ConnectorComponent extends ManagedComponentComponent { - static final String PROTOCOL_PROPERTY = "protocol"; - static final String ADDRESS_PROPERTY = "address"; - static final String PORT_PROPERTY = "port"; + private static final Log LOG = LogFactory.getLog(ConnectorComponent.class);
private static final String THREAD_POOL_METRIC_PREFIX = "ThreadPool" + PREFIX_DELIMITER;
- // A regex for the name of a particular MBean:WebThreadPool component, - // e.g. "jboss.web:name=http-127.0.0.1-8080,type=ThreadPool" - private static final String WEB_THREAD_POOL_COMPONENT_NAME_TEMPLATE = - "jboss.web:name=%" + PROTOCOL_PROPERTY + "%-%" + ADDRESS_PROPERTY + "%-%" + PORT_PROPERTY + "%," - + "type=ThreadPool"; - - private final Log log = LogFactory.getLog(this.getClass()); + static final String PROTOCOL_PROPERTY = "protocol"; + static final String HOST_PROPERTY = "host"; + static final String ADDRESS_PROPERTY = "address"; + static final String PORT_PROPERTY = "port";
@Override public void getValues(MeasurementReport report, Set<MeasurementScheduleRequest> requests) throws Exception { Set<MeasurementScheduleRequest> remainingRequests = new LinkedHashSet(); - Configuration pluginConfig = getResourceContext().getPluginConfiguration(); - String webThreadPoolComponentName = - ResourceComponentUtils.replacePropertyExpressionsInTemplate(WEB_THREAD_POOL_COMPONENT_NAME_TEMPLATE, - pluginConfig); + String webThreadPoolComponentName = getWebThreadPoolComponentName(getResourceContext().getPluginConfiguration()); ComponentType webThreadPoolComponentType = MoreKnownComponentTypes.MBean.WebThreadPool.getType(); ManagementView managementView = getConnection().getManagementView(); ManagedComponent webThreadPoolComponent = managementView.getComponent(webThreadPoolComponentName, @@ -90,10 +78,25 @@ public class ConnectorComponent extends ManagedComponentComponent catch (Exception e) { // Don't let one bad apple spoil the barrel. - log.error("Failed to collect metric '" + metricName + "' for " + getResourceContext().getResourceType() + LOG.error("Failed to collect metric '" + metricName + "' for " + getResourceContext().getResourceType() + " Resource with key " + getResourceContext().getResourceKey() + ".", e); } } super.getValues(report, remainingRequests); } -} \ No newline at end of file + + private String getWebThreadPoolComponentName(Configuration pluginConfig) { + StringBuilder webThreadPoolComponentNameBuilder = new StringBuilder("jboss.web:name=") // + .append(pluginConfig.getSimpleValue(PROTOCOL_PROPERTY)) // + .append("-"); + if (pluginConfig.getSimpleValue(HOST_PROPERTY) != null) { + webThreadPoolComponentNameBuilder.append(pluginConfig.getSimpleValue(HOST_PROPERTY)) // + .append("%2F"); + } + webThreadPoolComponentNameBuilder.append(pluginConfig.getSimpleValue(ADDRESS_PROPERTY)) // + .append("-") // + .append(pluginConfig.getSimpleValue(PORT_PROPERTY)) // + .append(",type=ThreadPool"); + return webThreadPoolComponentNameBuilder.toString(); + } +} diff --git a/modules/plugins/jboss-as-5/src/main/java/org/rhq/plugins/jbossas5/ConnectorDiscoveryComponent.java b/modules/plugins/jboss-as-5/src/main/java/org/rhq/plugins/jbossas5/ConnectorDiscoveryComponent.java index ce63947..165e490 100644 --- a/modules/plugins/jboss-as-5/src/main/java/org/rhq/plugins/jbossas5/ConnectorDiscoveryComponent.java +++ b/modules/plugins/jboss-as-5/src/main/java/org/rhq/plugins/jbossas5/ConnectorDiscoveryComponent.java @@ -1,28 +1,30 @@ /* - * Jopr Management Platform - * Copyright (C) 2005-2009 Red Hat, Inc. + * RHQ Management Platform + * Copyright (C) 2005-2013 Red Hat, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation, and/or the GNU Lesser - * General Public License, version 2.1, also as published by the Free - * Software Foundation. + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License and the GNU Lesser General Public License - * for more details. + * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * and the GNU Lesser General Public License along with this program; - * if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * along with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
package org.rhq.plugins.jbossas5;
+import static org.rhq.plugins.jbossas5.ConnectorComponent.ADDRESS_PROPERTY; +import static org.rhq.plugins.jbossas5.ConnectorComponent.HOST_PROPERTY; +import static org.rhq.plugins.jbossas5.ConnectorComponent.PORT_PROPERTY; +import static org.rhq.plugins.jbossas5.ConnectorComponent.PROTOCOL_PROPERTY; +import static org.rhq.plugins.jbossas5.ManagedComponentComponent.Config.COMPONENT_NAME; + import java.util.LinkedHashSet; import java.util.Set; import java.util.regex.Matcher; @@ -30,6 +32,11 @@ import java.util.regex.Pattern;
import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; + +import org.jboss.deployers.spi.management.ManagementView; +import org.jboss.managed.api.ComponentType; +import org.jboss.managed.api.ManagedComponent; + import org.rhq.core.domain.configuration.Configuration; import org.rhq.core.domain.configuration.PropertySimple; import org.rhq.core.domain.resource.ResourceType; @@ -37,12 +44,8 @@ import org.rhq.core.pluginapi.inventory.DiscoveredResourceDetails; import org.rhq.core.pluginapi.inventory.ResourceDiscoveryComponent; import org.rhq.core.pluginapi.inventory.ResourceDiscoveryContext; import org.rhq.plugins.jbossas5.helper.MoreKnownComponentTypes; -import org.rhq.plugins.jbossas5.util.RegularExpressionNameMatcher; import org.rhq.plugins.jbossas5.util.ManagedComponentUtils; - -import org.jboss.deployers.spi.management.ManagementView; -import org.jboss.managed.api.ComponentType; -import org.jboss.managed.api.ManagedComponent; +import org.rhq.plugins.jbossas5.util.RegularExpressionNameMatcher;
/** * A component for discovering JBoss Web connectors. @@ -52,18 +55,24 @@ import org.jboss.managed.api.ManagedComponent; public class ConnectorDiscoveryComponent implements ResourceDiscoveryComponent<JBossWebComponent> { + private static final Log LOG = LogFactory.getLog(ConnectorDiscoveryComponent.class); + // A regex for the names of all MBean:WebRequestProcessor components, // e.g. "jboss.web:name=http-127.0.0.1-8080,type=GlobalRequestProcessor" private static final String WEB_REQUEST_PROCESSOR_COMPONENT_NAMES_REGEX = - "jboss.web:name=([^\-]+)-([^\-]+)-([0-9]+),type=GlobalRequestProcessor"; + "jboss.web:name=([^\-]+)-(.+)-([0-9]+),type=GlobalRequestProcessor";
- private final Log log = LogFactory.getLog(this.getClass()); + private static final Pattern WEB_REQUEST_PROCESSOR_COMPONENT_NAMES_PATTERN = Pattern + .compile(WEB_REQUEST_PROCESSOR_COMPONENT_NAMES_REGEX); + + private static final Pattern HOSTADRESS_PATTERN = Pattern + .compile("(.*)(%2F)(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})");
public Set<DiscoveredResourceDetails> discoverResources( ResourceDiscoveryContext<JBossWebComponent> discoveryContext) throws Exception { ResourceType resourceType = discoveryContext.getResourceType(); - log.trace("Discovering " + resourceType.getName() + " Resources..."); + LOG.trace("Discovering " + resourceType.getName() + " Resources...");
JBossWebComponent jbossWebComponent = discoveryContext.getParentResourceComponent(); ManagementView managementView = jbossWebComponent.getConnection().getManagementView(); @@ -78,16 +87,28 @@ public class ConnectorDiscoveryComponent { // Parse the component name, e.g. "jboss.web:name=http-127.0.0.1-8080,type=GlobalRequestProcessor", to // figure out the protocol, address, and port. - Pattern pattern = Pattern.compile(WEB_REQUEST_PROCESSOR_COMPONENT_NAMES_REGEX); - Matcher matcher = pattern.matcher(webRequestProcessorComponent.getName()); + Matcher matcher = WEB_REQUEST_PROCESSOR_COMPONENT_NAMES_PATTERN.matcher(webRequestProcessorComponent + .getName()); if (!matcher.matches()) { - log.error("Component name '" + webRequestProcessorComponent.getName() + "' does not match regex '" - + pattern + "'."); + LOG.error("Component name '" + webRequestProcessorComponent.getName() + "' does not match regex '" + + WEB_REQUEST_PROCESSOR_COMPONENT_NAMES_PATTERN + "'."); continue; } String protocol = matcher.group(1); - String address = matcher.group(2); + String host = null; + String address = null; + String hostAddress = matcher.group(2); + Matcher hostAddressMatcher = HOSTADRESS_PATTERN.matcher(hostAddress); + if (hostAddressMatcher.matches()) { + // We have a composed host address string: my-server.com%2F127.0.0.98 + host = hostAddressMatcher.group(1); + address = hostAddressMatcher.group(3); + } else { + // We only have an IP address + address = hostAddress; + } + int port = Integer.valueOf(matcher.group(3));
String resourceKey = protocol + "://" + address + ":" + port; @@ -96,11 +117,11 @@ public class ConnectorDiscoveryComponent String resourceVersion = null;
Configuration pluginConfig = discoveryContext.getDefaultPluginConfiguration(); - pluginConfig.put(new PropertySimple(ManagedComponentComponent.Config.COMPONENT_NAME, - webRequestProcessorComponent.getName())); - pluginConfig.put(new PropertySimple(ConnectorComponent.PROTOCOL_PROPERTY, protocol)); - pluginConfig.put(new PropertySimple(ConnectorComponent.ADDRESS_PROPERTY, address)); - pluginConfig.put(new PropertySimple(ConnectorComponent.PORT_PROPERTY, port)); + pluginConfig.put(new PropertySimple(COMPONENT_NAME, webRequestProcessorComponent.getName())); + pluginConfig.put(new PropertySimple(PROTOCOL_PROPERTY, protocol)); + pluginConfig.put(new PropertySimple(HOST_PROPERTY, host)); + pluginConfig.put(new PropertySimple(ADDRESS_PROPERTY, address)); + pluginConfig.put(new PropertySimple(PORT_PROPERTY, port));
DiscoveredResourceDetails resource = new DiscoveredResourceDetails(resourceType, @@ -114,7 +135,7 @@ public class ConnectorDiscoveryComponent discoveredResources.add(resource); }
- log.trace("Discovered " + discoveredResources.size() + " " + resourceType.getName() + " Resources."); + LOG.trace("Discovered " + discoveredResources.size() + " " + resourceType.getName() + " Resources."); return discoveredResources; }
@@ -122,9 +143,7 @@ public class ConnectorDiscoveryComponent throws Exception { ComponentType webRequestProcessorComponentType = MoreKnownComponentTypes.MBean.WebRequestProcessor.getType(); - //return managementView.getMatchingComponents(WEB_REQUEST_PROCESSOR_COMPONENT_NAMES_REGEX, - // webRequestProcessorComponentType, new RegularExpressionNameMatcher()); return ManagedComponentUtils.getManagedComponents(managementView, webRequestProcessorComponentType, WEB_REQUEST_PROCESSOR_COMPONENT_NAMES_REGEX, new RegularExpressionNameMatcher()); } -} \ No newline at end of file +}
commit b4cf20f66b562fb7cc911ea83e68387683468adc Author: John Sanda jsanda@redhat.com Date: Wed Aug 7 11:31:52 2013 -0400
store storage cluster settings in the system settings
The cluster settings are stored as read-only properties in the system settings because they should be updated through the storage subsystem.
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/common/composite/SystemSetting.java b/modules/core/domain/src/main/java/org/rhq/core/domain/common/composite/SystemSetting.java index 1c2f5b9..0f2f27c 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/common/composite/SystemSetting.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/common/composite/SystemSetting.java @@ -110,6 +110,14 @@ public enum SystemSetting { /** The length of CoreGUI inactivity (no call to UserSessionManager.refresh()) before a CoreGUI session timeout, Default: 1 hour */ RHQ_SESSION_TIMEOUT("RHQ_SESSION_TIMEOUT", PropertySimpleType.LONG, false, true),
+ /** + * The STORAGE settings are all read-only and deal with shared, cluster-wide settings + * among storage nodes. They are read-only because they should only be updated through + * the storage subsystem. + */ + STORAGE_CQL_PORT("STORAGE_CQL_PORT", PropertySimpleType.INTEGER, true, true), + STORAGE_GOSSIP_PORT("STORAGE_GOSSIP_PORT", PropertySimpleType.INTEGER, true, true), + //these seem to be unused yet still present in the database... @Deprecated HELP_USER("CAM_HELP_USER", PropertySimpleType.STRING, true, false), @@ -162,6 +170,12 @@ public enum SystemSetting { return isBoolean(value); case LONG: return isLong(value); + case INTEGER: + return isInteger(value); + case FLOAT: + return isFloat(value); + case DOUBLE: + return isDouble(value); default: throw new IllegalStateException("A system property '" + internalName + "' doesn't know how to validate its value which should have type '" + type + "'."); @@ -199,6 +213,42 @@ public enum SystemSetting { } }
+ private static boolean isInteger(String value) { + if (value == null) { + return true; + } + try { + Integer.parseInt(value); + return true; + } catch (NumberFormatException e) { + return false; + } + } + + private static boolean isFloat(String value) { + if (value == null) { + return true; + } + try { + Float.parseFloat(value); + return true; + } catch (NumberFormatException e) { + return false; + } + } + + private static boolean isDouble(String value) { + if (value == null) { + return true; + } + try { + Double.parseDouble(value); + return true; + } catch (NumberFormatException e) { + return false; + } + } + private static boolean isBoolean(String value) { //be more strict about the values than Boolean.valueOf or Boolean.parseBoolean return value == null || Boolean.toString(true).equalsIgnoreCase(value) diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index f17f006..327f064 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -28,7 +28,6 @@ import java.net.InetAddress; import java.net.UnknownHostException; import java.util.ArrayList; import java.util.Arrays; -import java.util.Date; import java.util.HashMap; import java.util.LinkedList; import java.util.List; @@ -46,9 +45,6 @@ import javax.persistence.TypedQuery;
import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.quartz.JobDataMap; -import org.quartz.SimpleTrigger; -import org.quartz.Trigger;
import org.rhq.cassandra.schema.SchemaManager; import org.rhq.core.domain.alert.Alert; @@ -94,8 +90,8 @@ import org.rhq.enterprise.server.resource.ResourceTypeManagerLocal; import org.rhq.enterprise.server.resource.group.ResourceGroupManagerLocal; import org.rhq.enterprise.server.rest.reporting.MeasurementConverter; import org.rhq.enterprise.server.scheduler.SchedulerLocal; -import org.rhq.enterprise.server.scheduler.jobs.StorageNodeMaintenanceJob; -import org.rhq.enterprise.server.storage.StorageConfigurationException; +import org.rhq.enterprise.server.storage.StorageClusterSettings; +import org.rhq.enterprise.server.storage.StorageClusterSettingsManagerBean; import org.rhq.enterprise.server.util.CriteriaQueryGenerator; import org.rhq.enterprise.server.util.CriteriaQueryRunner; import org.rhq.enterprise.server.util.LookupUtil; @@ -118,6 +114,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN private final static String SEEDS_LIST = "seedsList";
private static final String RHQ_STORAGE_CQL_PORT_PROPERTY = "nativeTransportPort"; + private static final String RHQ_STORAGE_GOSSIP_PORT_PROPERTY = "storagePort"; private static final String RHQ_STORAGE_JMX_PORT_PROPERTY = "jmxPort"; private static final String RHQ_STORAGE_ADDRESS_PROPERTY = "host";
@@ -159,10 +156,13 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN @EJB private ResourceManagerLocal resourceManager;
+ @EJB + private StorageClusterSettingsManagerBean storageClusterSettingsManager; + @Override public void linkResource(Resource resource) { - Configuration resourceConfig = resource.getPluginConfiguration(); - String address = resourceConfig.getSimpleValue(RHQ_STORAGE_ADDRESS_PROPERTY); + Configuration pluginConfig = resource.getPluginConfiguration(); + String address = pluginConfig.getSimpleValue(RHQ_STORAGE_ADDRESS_PROPERTY);
if (log.isInfoEnabled()) { log.info("Linking " + resource + " to storage node at " + address); @@ -176,12 +176,13 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN } storageNode.setResource(resource); storageNode.setOperationMode(OperationMode.NORMAL); + initClusterSettingsIfNecessary(pluginConfig); addStorageNodeToGroup(resource); } else { storageNode = new StorageNode(); storageNode.setAddress(address); - storageNode.setCqlPort(Integer.parseInt(resourceConfig.getSimpleValue(RHQ_STORAGE_CQL_PORT_PROPERTY))); - storageNode.setJmxPort(Integer.parseInt(resourceConfig.getSimpleValue(RHQ_STORAGE_JMX_PORT_PROPERTY))); + storageNode.setCqlPort(Integer.parseInt(pluginConfig.getSimpleValue(RHQ_STORAGE_CQL_PORT_PROPERTY))); + storageNode.setJmxPort(Integer.parseInt(pluginConfig.getSimpleValue(RHQ_STORAGE_JMX_PORT_PROPERTY))); storageNode.setResource(resource); storageNode.setOperationMode(OperationMode.INSTALLED);
@@ -200,6 +201,31 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN } }
+ private void initClusterSettingsIfNecessary(Configuration pluginConfig) { + // TODO Need to handle non-repeatable reads here (probably a post 4.9 task) + // + // If a user deploys two storage nodes prior to installing the RHQ server, then we + // could end up in this method concurrently for both storage nodes. The settings + // would be committed for each node with the second commit winning. The problem is + // that is the cluster settings differ for the two nodes, it will be silently + // ignored. This scenario will happen infrequently so it should be sufficient to + // resolve it with optimistic locking. The second writer should fail with an + // OptimisticLockException. + + log.info("Initializing storage cluster settings"); + + StorageClusterSettings clusterSettings = storageClusterSettingsManager.getClusterSettings( + subjectManager.getOverlord()); + if (clusterSettings != null) { + log.info("Cluster settings have already been set. Skipping initialization."); + return; + } + clusterSettings = new StorageClusterSettings(); + clusterSettings.setCqlPort(Integer.parseInt(pluginConfig.getSimpleValue(RHQ_STORAGE_CQL_PORT_PROPERTY))); + clusterSettings.setGossipPort(Integer.parseInt(pluginConfig.getSimpleValue(RHQ_STORAGE_GOSSIP_PORT_PROPERTY))); + storageClusterSettingsManager.setClusterSettings(subjectManager.getOverlord(), clusterSettings); + } + private void announceNewNode(StorageNode newStorageNode) { if (log.isInfoEnabled()) { log.info("Announcing " + newStorageNode + " to storage node cluster."); @@ -506,54 +532,6 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN return formattedValue; }
- private List<StorageNode> parseSeedsProperty(String seedsProperty) { - String[] seeds = seedsProperty.split(","); - List<StorageNode> storageNodes = new ArrayList<StorageNode>(); - for (String seed : seeds) { - StorageNode node = new StorageNode(); - node.setOperationMode(OperationMode.INSTALLED); - node.parseNodeInformation(seed); - storageNodes.add(node); - } - return storageNodes; - } - - private void scheduleQuartzJob(int clusterSize) { - String jobName = StorageNodeMaintenanceJob.class.getName(); - String jobGroupName = StorageNodeMaintenanceJob.class.getName(); - String triggerName = StorageNodeMaintenanceJob.class.getName(); - Date jobTime = new Date(System.currentTimeMillis() + 30000); - - Trigger trigger = new SimpleTrigger(triggerName, jobGroupName, jobTime); - trigger.setJobName(jobName); - trigger.setJobGroup(jobGroupName); - try { - JobDataMap jobDataMap = new JobDataMap(); - jobDataMap.put(StorageNodeMaintenanceJob.JOB_DATA_PROPERTY_CLUSTER_SIZE, Integer.toString(clusterSize)); - trigger.setJobDataMap(jobDataMap); - - quartzScheduler.scheduleJob(trigger); - } catch (Throwable t) { - log.warn("Unable to schedule storage node maintenance job", t); - } - } - - private void updateStorageNodes(Map<String, StorageNode> storageNodeMap) { - for (Map.Entry<String, StorageNode> storageNodeEntry : storageNodeMap.entrySet()) { - TypedQuery<StorageNode> query = entityManager.<StorageNode> createNamedQuery( - StorageNode.QUERY_FIND_BY_ADDRESS, StorageNode.class); - query.setParameter("address", storageNodeEntry.getKey()); - List<StorageNode> result = query.getResultList(); - if (!result.isEmpty()) { - storageNodeEntry.getValue().setId(result.get(0).getId()); - entityManager.merge(storageNodeEntry.getValue()); - } else { - entityManager.persist(storageNodeEntry.getValue()); - } - } - entityManager.flush(); - } - private StorageNodeLoadComposite.MeasurementAggregateWithUnits getMeasurementAggregateWithUnits(Subject subject, int schedId, MeasurementUnits units, long beginTime, long endTime) { MeasurementAggregate measurementAggregate = measurementManager.getAggregate(subject, schedId, beginTime, @@ -855,17 +833,17 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN log.info("Preparing to bootstrap " + storageNode + " into cluster..."); }
- List<StorageNode> existingStorageNodes = getClusteredStorageNodes(); - ResourceOperationSchedule schedule = new ResourceOperationSchedule(); schedule.setResource(storageNode.getResource()); schedule.setJobTrigger(JobTrigger.createNowTrigger()); schedule.setSubject(subjectManager.getOverlord()); schedule.setOperationName("prepareForBootstrap");
+ StorageClusterSettings clusterSettings = storageClusterSettingsManager.getClusterSettings( + subjectManager.getOverlord()); Configuration parameters = new Configuration(); - parameters.put(new PropertySimple("cqlPort", existingStorageNodes.get(0).getCqlPort())); - parameters.put(new PropertySimple("gossipPort", getGossipPort(storageNode, existingStorageNodes))); + parameters.put(new PropertySimple("cqlPort", clusterSettings.getCqlPort())); + parameters.put(new PropertySimple("gossipPort", clusterSettings.getGossipPort())); parameters.put(createPropertyListOfAddresses("storageNodeIPAddresses", getClusteredStorageNodes()));
schedule.setParameters(parameters); @@ -873,44 +851,6 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN operationManager.scheduleResourceOperation(subjectManager.getOverlord(), schedule); }
- private Integer getGossipPort(StorageNode newStorageNode, List<StorageNode> storageNodes) { - if (log.isInfoEnabled()) { - log.info("Looking up gossip port for new storage node " + newStorageNode); - } - try { - StorageNode node = null; - Configuration resourceConfig = null; - for (StorageNode storageNode : storageNodes) { - resourceConfig = configurationManager.getLiveResourceConfiguration(subjectManager.getOverlord(), - storageNode.getResource().getId(), false); - if (resourceConfig == null) { - log.warn("Failed to load resource configuration for storage node " + newStorageNode.getResource()); - } else { - node = storageNode; - break; - } - } - if (resourceConfig == null) { - log.error("Failed to obtain gossip port from existing storage nodes"); - throw new StorageConfigurationException("Failed to obtain gossip port from existing storage nodes"); - } - - PropertySimple property = resourceConfig.getSimple("gossipPort"); - if (property == null) { - throw new StorageConfigurationException("The resource configuration for " + node.getResource() + - "did not include the required property [gossipPort]"); - } - Integer port = property.getIntegerValue(); - log.info("Found gossip port set to " + port); - return property.getIntegerValue(); - } catch (Exception e) { - if (e instanceof StorageConfigurationException) { - throw (StorageConfigurationException) e; - } - throw new RuntimeException("An error occurred while trying to obtain the gossip port", e); - } - } - @Override public void runAddNodeMaintenance() { log.info("Preparing to schedule addNodeMaintenance on the storage cluster..."); diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterSettings.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterSettings.java new file mode 100644 index 0000000..2098acd --- /dev/null +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterSettings.java @@ -0,0 +1,56 @@ +package org.rhq.enterprise.server.storage; + +import java.io.Serializable; + +/** + * @author John Sanda + */ +public class StorageClusterSettings implements Serializable { + + private static final long serialVersionUID = 1; + + private int cqlPort; + + private int gossipPort; + + public int getCqlPort() { + return cqlPort; + } + + public void setCqlPort(int cqlPort) { + this.cqlPort = cqlPort; + } + + public int getGossipPort() { + return gossipPort; + } + + public void setGossipPort(int gossipPort) { + this.gossipPort = gossipPort; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + StorageClusterSettings that = (StorageClusterSettings) o; + + if (cqlPort != that.cqlPort) return false; + if (gossipPort != that.gossipPort) return false; + + return true; + } + + @Override + public int hashCode() { + int result = cqlPort; + result = 29 * result + gossipPort; + return result; + } + + @Override + public String toString() { + return "StorageClusterSettings[cqlPort=" + cqlPort + ", gossipPort=" + gossipPort + "]"; + } +} diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterSettingsManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterSettingsManagerBean.java new file mode 100644 index 0000000..e6b1fb7 --- /dev/null +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterSettingsManagerBean.java @@ -0,0 +1,49 @@ +package org.rhq.enterprise.server.storage; + +import java.util.Map; + +import javax.ejb.EJB; +import javax.ejb.Singleton; + +import org.rhq.core.domain.auth.Subject; +import org.rhq.core.domain.common.composite.SystemSetting; +import org.rhq.core.domain.common.composite.SystemSettings; +import org.rhq.enterprise.server.system.SystemManagerLocal; + +/** + * @author John Sanda + */ +@Singleton +public class StorageClusterSettingsManagerBean { + + @EJB + private SystemManagerLocal systemManager; + + public StorageClusterSettings getClusterSettings(Subject subject) { + SystemSettings settings = systemManager.getSystemSettings(subject); + Map<String, String> settingsMap = settings.toMap(); + StorageClusterSettings clusterSettings = new StorageClusterSettings(); + + if (!settingsMap.containsKey(SystemSetting.STORAGE_CQL_PORT)) { + return null; + } else { + clusterSettings.setCqlPort(Integer.parseInt(settingsMap.get(SystemSetting.STORAGE_CQL_PORT))); + } + + if (!settingsMap.containsKey(SystemSetting.STORAGE_GOSSIP_PORT)) { + return null; + } else { + clusterSettings.setGossipPort(Integer.parseInt(settingsMap.get(SystemSetting.STORAGE_GOSSIP_PORT))); + } + + return clusterSettings; + } + + public void setClusterSettings(Subject subject, StorageClusterSettings clusterSettings) { + SystemSettings settings = new SystemSettings(); + settings.put(SystemSetting.STORAGE_CQL_PORT, Integer.toString(clusterSettings.getCqlPort())); + settings.put(SystemSetting.STORAGE_GOSSIP_PORT, Integer.toString(clusterSettings.getGossipPort())); + systemManager.setStorageClusterSettings(subject, settings); + } + +} diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/system/SystemManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/system/SystemManagerBean.java index dcc81d5..aabd62b 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/system/SystemManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/system/SystemManagerBean.java @@ -200,10 +200,22 @@ public class SystemManagerBean implements SystemManagerLocal, SystemManagerRemot
@RequiredPermission(Permission.MANAGE_SETTINGS) public void setSystemSettings(Subject subject, SystemSettings settings) { - setSystemSettings(settings, false); + setSystemSettings(settings, false, false); }
- private void setSystemSettings(SystemSettings settings, boolean skipValidation) { + @RequiredPermission(Permission.MANAGE_SETTINGS) + @Override + public void setStorageClusterSettings(Subject subject, SystemSettings settings) { + for (SystemSetting setting : settings.keySet()) { + if (!isStorageSetting(setting)) { + throw new IllegalArgumentException(setting + " cannot be updated through this method. This method " + + "only allows updating of storage cluster settings."); + } + } + setSystemSettings(settings, false, true); + } + + private void setSystemSettings(SystemSettings settings, boolean skipValidation, boolean updateStorageSettings) { // first, we need to get the current settings so we'll know if we need to persist or merge the new ones @SuppressWarnings("unchecked") List<SystemConfiguration> configs = entityManager.createNamedQuery(SystemConfiguration.QUERY_FIND_ALL) @@ -246,8 +258,9 @@ public class SystemManagerBean implements SystemManagerLocal, SystemManagerRemot if ((existingValue == null && value != null) || !existingValue.equals(value)) { //SystemSetting#isReadOnly should be a superset of the "fReadOnly" field in the database //but let's just be super paranoid here... - if (prop.isReadOnly() - || (existingConfig.getFreadOnly() != null && existingConfig.getFreadOnly().booleanValue())) { + if ((prop.isReadOnly() + || (existingConfig.getFreadOnly() != null && existingConfig.getFreadOnly().booleanValue())) && + !(isStorageSetting(prop) || updateStorageSettings)) { throw new IllegalArgumentException("The setting [" + prop.getInternalName() + "] is read-only - you cannot change its current value! Current value is [" + existingConfig.getPropertyValue() + "] while the new value was [" + value + "]."); @@ -269,6 +282,14 @@ public class SystemManagerBean implements SystemManagerLocal, SystemManagerRemot cachedSystemSettings = null; }
+ private boolean isStorageSetting(SystemSetting setting) { + switch (setting) { + case STORAGE_CQL_PORT: return true; + case STORAGE_GOSSIP_PORT: return true; + default: return false; + } + } + private Map<String, String> toMap(Properties props) { HashMap<String, String> map = new HashMap<String, String>(props.size()); for (Map.Entry<Object, Object> entry : props.entrySet()) { @@ -367,7 +388,7 @@ public class SystemManagerBean implements SystemManagerLocal, SystemManagerRemot
SystemSettings settings = SystemSettings.fromMap(map);
- setSystemSettings(settings, skipValidation); + setSystemSettings(settings, skipValidation, false); }
@RequiredPermission(Permission.MANAGE_SETTINGS) diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/system/SystemManagerLocal.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/system/SystemManagerLocal.java index 1c5dc83..9f48985 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/system/SystemManagerLocal.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/system/SystemManagerLocal.java @@ -24,6 +24,7 @@ import javax.ejb.Local;
import org.rhq.core.db.DatabaseType; import org.rhq.core.domain.auth.Subject; +import org.rhq.core.domain.common.composite.SystemSettings;
/** * Provides access to the server cloud's system configuration as well as some methods @@ -141,4 +142,14 @@ public interface SystemManagerLocal extends SystemManagerRemote {
void dumpSystemInfo(Subject subject);
+ /** + * The storage cluster settings are stored as read-only system settings. They should be updated through the storage + * subsystem. This API is provided for use ONLY by the storage subsystem. + * + * @param subject The user who wants to change the settings + * @param settings The new storage cluster settings + * @throws IllegalArgumentException If the settings contain anything other than storage cluster settings. + */ + void setStorageClusterSettings(Subject subject, SystemSettings settings); + } \ No newline at end of file
commit 675faf81a5c8876cea7808d5caf95eacde7dfe60 Merge: 2957d5e a26cfb7 Author: Jay Shaughnessy jshaughn@redhat.com Date: Wed Aug 7 08:53:32 2013 -0400
Merge branch 'feature/bundle-group'
commit 2957d5edc3d6e2cf35fa7078af88548c8f738203 Author: Thomas Segismont tsegismo@redhat.com Date: Wed Aug 7 14:29:27 2013 +0200
Bug 948076 - EAP Host controller resource reports: The original product type discovered for this AS7 server was JBoss EAP 6, but the server is now reporting its product type is [JBoss AS 7]
When validating resource configuration, "product-name" attribute must be read on the "host" node address (root node on standalone servers, host=[hostname] on host controllers)
diff --git a/modules/plugins/jboss-as-7/src/main/java/org/rhq/modules/plugins/jbossas7/BaseServerComponent.java b/modules/plugins/jboss-as-7/src/main/java/org/rhq/modules/plugins/jbossas7/BaseServerComponent.java index 0b5dbf5..9c2a799 100644 --- a/modules/plugins/jboss-as-7/src/main/java/org/rhq/modules/plugins/jbossas7/BaseServerComponent.java +++ b/modules/plugins/jboss-as-7/src/main/java/org/rhq/modules/plugins/jbossas7/BaseServerComponent.java @@ -207,7 +207,7 @@ public abstract class BaseServerComponent<T extends ResourceComponent<?>> extend // Validate the product type (e.g. AS or EAP). JBossProductType runtimeType; try { - String runtimeTypeString = readAttribute("product-name"); + String runtimeTypeString = readAttribute(getHostAddress(), "product-name"); runtimeType = (runtimeTypeString != null && !runtimeTypeString.isEmpty()) ? JBossProductType .getValueByProductName(runtimeTypeString) : JBossProductType.AS; } catch (Exception e) {
commit a26cfb77ab7e444a9cd6dab5e3d2f867296a3a54 Author: Jay Shaughnessy jshaughn@redhat.com Date: Tue Aug 6 21:34:35 2013 -0400
- fix bug in bundleversion filename naming that caused a failure - change strategy when creating test bundletype to avoid a lot of Exception generation/logging.
diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/bundle/BundleManagerBeanTest.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/bundle/BundleManagerBeanTest.java index 470f69b..55fce08 100644 --- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/bundle/BundleManagerBeanTest.java +++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/bundle/BundleManagerBeanTest.java @@ -781,9 +781,9 @@ public class BundleManagerBeanTest extends AbstractEJB3Test { assertNotNull(b1); BundleVersion bv1 = createBundleVersion(b1.getName(), "1.0", b1); assertNotNull(bv1); - BundleFile bf1 = bundleManager.addBundleFileViaByteArray(overlord, bv1.getId(), TEST_PREFIX + "-bundlefile-1", + BundleFile bf1 = bundleManager.addBundleFileViaByteArray(overlord, bv1.getId(), "bundletest-bundlefile-1", "1.0", null, "Test Bundle File # 1".getBytes()); - BundleFile bf2 = bundleManager.addBundleFileViaByteArray(overlord, bv1.getId(), TEST_PREFIX + "-bundlefile-2", + BundleFile bf2 = bundleManager.addBundleFileViaByteArray(overlord, bv1.getId(), "bundletest-bundlefile-2", "1.0", null, "Test Bundle File # 2".getBytes()); }
@@ -928,12 +928,12 @@ public class BundleManagerBeanTest extends AbstractEJB3Test { Set<String> filenames = bundleManager.getBundleVersionFilenames(overlord, bv1.getId(), true); assertNotNull(filenames); assertEquals(DEFAULT_CRITERIA_PAGE_SIZE + 2, filenames.size()); - BundleFile bf1 = bundleManager.addBundleFileViaByteArray(overlord, bv1.getId(), TEST_PREFIX + "-bundlefile-1", + BundleFile bf1 = bundleManager.addBundleFileViaByteArray(overlord, bv1.getId(), "bundletest-bundlefile-1", "1.0", null, "Test Bundle File # 1".getBytes()); filenames = bundleManager.getBundleVersionFilenames(overlord, bv1.getId(), true); assertNotNull(filenames); assertEquals(DEFAULT_CRITERIA_PAGE_SIZE + 1, filenames.size()); - BundleFile bf2 = bundleManager.addBundleFileViaByteArray(overlord, bv1.getId(), TEST_PREFIX + "-bundlefile-2", + BundleFile bf2 = bundleManager.addBundleFileViaByteArray(overlord, bv1.getId(), "bundletest-bundlefile-2", "1.0", null, "Test Bundle File # 2".getBytes()); filenames = bundleManager.getBundleVersionFilenames(overlord, bv1.getId(), true); assertNotNull(filenames); @@ -1991,16 +1991,24 @@ public class BundleManagerBeanTest extends AbstractEJB3Test { private BundleType createBundleType(String name) throws Exception { final String fullName = TEST_PREFIX + "-type-" + name; BundleType bt = null; + + getTransactionManager().begin(); try { - bt = bundleManager.getBundleType(overlord, fullName); + Query q = em.createQuery("SELECT bt FROM BundleType bt WHERE bt.name = '" + fullName + "'"); + bt = (BundleType) q.getSingleResult(); } catch (Throwable t) { + // nothing + } finally { + getTransactionManager().commit(); + } + + if (null == bt) { ResourceType rt = createResourceTypeForBundleType(name); bt = bundleManager.createBundleType(overlord, fullName, rt.getId()); - - assert bt.getId() > 0; - assert bt.getName().endsWith(fullName); }
+ assert bt.getId() > 0; + assert bt.getName().endsWith(fullName); return bt; }
commit 10ba82769aa6f5e7ce7c43461ffef821f062d36b Merge: 93dd494 ee56897 Author: Jay Shaughnessy jshaughn@redhat.com Date: Tue Aug 6 16:27:10 2013 -0400
Merge branch 'master' into feature/bundle-group
Conflicts: .classpath modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilitySummaryPieGraphType.java modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/D3GroupGraphListView.java modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/D3GraphListView.java modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/AddToDashboardComponent.java modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableDataSource.java modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/ResourceHandlerBean.java modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterMonitor.java modules/helpers/ldap-tool/src/main/java/org/rhq/TestLdapSettings.java modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java
commit 93dd49487c2c288c0d1fc8dee2532a752a38038b Author: Jay Shaughnessy jshaughn@redhat.com Date: Tue Aug 6 16:09:54 2013 -0400
fix up the cassandra handling for the -Ddbsetup[-upgrade] and -Ddbreset handling to match master (Stefan's fixes).
diff --git a/modules/core/dbutils/pom.xml b/modules/core/dbutils/pom.xml index 360fdbc..a3c82c2 100644 --- a/modules/core/dbutils/pom.xml +++ b/modules/core/dbutils/pom.xml @@ -276,23 +276,22 @@ <script language="groovy"> import org.rhq.cassandra.schema.SchemaManager
- if (project.getProperty('dbsetup-upgrade') || project.getProperty('dbsetup')) { + if (project.getProperty('dbsetup-upgrade') || project.getProperty('dbreset') || project.getProperty('dbsetup')) { if (project.getProperty('storage-schema')) { - if (project.getProperty('db') == 'dev') { self.log('PERFORMING STORAGE NODE SETUP TO LATEST SCHEMA') - username = project.getProperty('rhq.dev.cassandra.username') ?: "rhqadmin" - password = project.getProperty('rhq.dev.cassandra.password') ?: "rhqadmin" - seeds = project.getProperty('rhq.dev.cassandra.seeds') ?: "127.0.0.1|7299|9142" + username = project.getProperty('rhq.cassandra.username') ?: "rhqadmin" + password = project.getProperty('rhq.cassandra.password') ?: "rhqadmin" + seeds = project.getProperty('rhq.cassandra.seeds') ?: "127.0.0.1|7299|9142"
schemaManager = new SchemaManager(username, password, seeds)
+ if (project.getProperty('dbreset') == 'true') { + self.log('Dropping schema') + schemaManager.drop() + } + self.log('Install schema') schemaManager.install() - } else { - self.log('SKIPPING SCHEMA UPDATES FOR STORAGE NODE') - self.log('Schema updates will only be applied to the dev-container storage node') - self.log('Run to with -Ddb=dev or -Pdev for to activate storage node schema changes') - } } } </script>
commit d538f8d86a17ba62c75a37c819251d5145e7fbf9 Author: Jay Shaughnessy jshaughn@redhat.com Date: Tue Aug 6 16:06:04 2013 -0400
add bundle deploy-related fine-graned perm tests and associated fixes
diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/bundle/BundleManagerBeanTest.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/bundle/BundleManagerBeanTest.java index 5e94ab1..470f69b 100644 --- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/bundle/BundleManagerBeanTest.java +++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/bundle/BundleManagerBeanTest.java @@ -32,6 +32,7 @@ import java.util.Set; import java.util.zip.ZipEntry; import java.util.zip.ZipOutputStream;
+import javax.ejb.EJBException; import javax.persistence.Query; import javax.transaction.TransactionManager;
@@ -1665,6 +1666,230 @@ public class BundleManagerBeanTest extends AbstractEJB3Test { } }
+ @Test(enabled = TESTS_ENABLED) + public void testAuthzBundleDest() throws Exception { + Subject subject = createNewSubject(TEST_USER_NAME); + Role role = createNewRoleForSubject(subject, TEST_ROLE_NAME); + subject = createSession(subject); // start a session so we can use this subject in SLSB calls + + // create bundle group + addRolePermissions(role, Permission.MANAGE_BUNDLE_GROUPS); + BundleGroup bundleGroup = bundleManager.createBundleGroup(subject, TEST_BUNDLE_GROUP_NAME, "bg"); + + // add bg to the role with group create + addRoleBundleGroup(role, bundleGroup); + addRolePermissions(role, Permission.CREATE_BUNDLES_IN_GROUP); + + // allow bundle creation in bg (has create perm) + Bundle b1 = createBundle(subject, "one", bundleGroup.getId()); + assertNotNull(b1); + BundleVersion bv1 = createBundleVersion(subject, b1.getName() + "-1", null, b1); + assertNotNull(bv1); + ResourceGroup platformResourceGroup = createTestResourceGroup(); + assertNotNull(platformResourceGroup); + + // deny destination create (no view of resource group) + try { + BundleDestination dest1 = createDestination(subject, b1, "one", "/test", platformResourceGroup); + fail("Should have thrown IllegalArgumentException"); + } catch (EJBException e) { + assert e.getCause() instanceof IllegalArgumentException + && e.getCause().getMessage().contains("Invalid groupId") : "Should have not had group visibility"; + // expected + } + + // deny destination create (no deploy perm) + LookupUtil.getRoleManager().addResourceGroupsToRole(overlord, role.getId(), + new int[] { platformResourceGroup.getId() }); + try { + BundleDestination dest1 = createDestination(subject, b1, "one", "/test", platformResourceGroup); + fail("Should have thrown PermissionException"); + } catch (PermissionException e) { + // expected + } + + // allow global + addRolePermissions(role, Permission.DEPLOY_BUNDLES); + BundleDestination dest1 = createDestination(subject, b1, "one", "/test", platformResourceGroup); + assertNotNull(dest1); + Configuration config = new Configuration(); + config.put(new PropertySimple("bundletest.property", "bundletest.property value")); + BundleDeployment bd1; + bd1 = createDeployment(subject, "one", bv1, dest1, config); + assertNotNull(bd1); + + // allow group + removeRolePermissions(role, Permission.DEPLOY_BUNDLES); + addRolePermissions(role, Permission.DEPLOY_BUNDLES_TO_GROUP); + BundleDestination dest2 = createDestination(subject, b1, "two", "/test2", platformResourceGroup); + assertNotNull(dest2); + Configuration config2 = new Configuration(); + config2.put(new PropertySimple("bundletest.property", "bundletest.property value")); + BundleDeployment bd2; + bd2 = createDeployment(subject, "two", bv1, dest2, config2); + assertNotNull(bd1); + + // deny delete deployment + removeRolePermissions(role, Permission.DEPLOY_BUNDLES_TO_GROUP); + try { + bundleManager.deleteBundleDeployment(subject, bd2.getId()); + fail("Should have thrown PermissionException"); + } catch (PermissionException e) { + // expected + } + + // allow delete deployment + addRolePermissions(role, Permission.DEPLOY_BUNDLES); + bundleManager.deleteBundleDeployment(subject, bd2.getId()); + + // deny delete destination + removeRolePermissions(role, Permission.DEPLOY_BUNDLES); + try { + bundleManager.deleteBundleDestination(subject, dest2.getId()); + fail("Should have thrown PermissionException"); + } catch (PermissionException e) { + // expected + } + + // allow delete destination + addRolePermissions(role, Permission.DEPLOY_BUNDLES_TO_GROUP); + bundleManager.deleteBundleDestination(subject, dest2.getId()); + } + + @Test(enabled = TESTS_ENABLED) + public void testAuthzBundleDeploy() throws Exception { + Subject subject = createNewSubject(TEST_USER_NAME); + Role role = createNewRoleForSubject(subject, TEST_ROLE_NAME); + subject = createSession(subject); // start a session so we can use this subject in SLSB calls + + // create bundle group + addRolePermissions(role, Permission.MANAGE_BUNDLE_GROUPS); + BundleGroup bundleGroup = bundleManager.createBundleGroup(subject, TEST_BUNDLE_GROUP_NAME, "bg"); + + // add bg to the role with group create + addRoleBundleGroup(role, bundleGroup); + addRolePermissions(role, Permission.CREATE_BUNDLES_IN_GROUP); + + // allow bundle creation in bg (has create perm) + Bundle b1 = createBundle(subject, "one", bundleGroup.getId()); + assertNotNull(b1); + BundleVersion bv1 = createBundleVersion(subject, b1.getName() + "-1", null, b1); + assertNotNull(bv1); + ResourceGroup platformResourceGroup = createTestResourceGroup(); + assertNotNull(platformResourceGroup); + LookupUtil.getRoleManager().addResourceGroupsToRole(overlord, role.getId(), + new int[] { platformResourceGroup.getId() }); + + // allow dest/deploy create (global) + addRolePermissions(role, Permission.DEPLOY_BUNDLES); + BundleDestination dest1 = createDestination(subject, b1, "one", "/test", platformResourceGroup); + assertNotNull(dest1); + Configuration config = new Configuration(); + config.put(new PropertySimple("bundletest.property", "bundletest.property value")); + BundleDeployment bd1; + bd1 = createDeployment(subject, "one", bv1, dest1, config); + assertNotNull(bd1); + + // deny schedule + removeRolePermissions(role, Permission.DEPLOY_BUNDLES); + try { + BundleDeployment bd1d = bundleManager.scheduleBundleDeployment(subject, bd1.getId(), false); + fail("Should have thrown PermissionException"); + } catch (PermissionException e) { + // expected + } + + // test with global perm + testAuthzBundleDeployInternal(subject, role, bd1, dest1, platformResourceGroup, Permission.DEPLOY_BUNDLES); + + // test with bundle group perm + testAuthzBundleDeployInternal(subject, role, bd1, dest1, platformResourceGroup, + Permission.DEPLOY_BUNDLES_TO_GROUP); + } + + private void testAuthzBundleDeployInternal(Subject subject, Role role, BundleDeployment bd1, + BundleDestination dest1, ResourceGroup platformResourceGroup, Permission permission) throws Exception { + + // allow + addRolePermissions(role, permission); + + BundleDeployment bd1d = bundleManager.scheduleBundleDeployment(subject, bd1.getId(), false); + assertNotNull(bd1d); + assertEquals(bd1.getId(), bd1d.getId()); + + BundleDeploymentCriteria bdc = new BundleDeploymentCriteria(); + bdc.addFilterId(bd1d.getId()); + bdc.fetchBundleVersion(true); + bdc.fetchDestination(true); + bdc.fetchResourceDeployments(true); + bdc.fetchTags(true); + List<BundleDeployment> bds = bundleManager.findBundleDeploymentsByCriteria(subject, bdc); + assertEquals(1, bds.size()); + bd1d = bds.get(0); + + assertEquals(platformResourceGroup, bd1d.getDestination().getGroup()); + assertEquals(dest1.getId(), bd1d.getDestination().getId()); + + BundleResourceDeploymentCriteria c = new BundleResourceDeploymentCriteria(); + c.addFilterBundleDeploymentId(bd1d.getId()); + c.fetchBundleDeployment(true); + c.fetchHistories(true); + c.fetchResource(true); + List<BundleResourceDeployment> brds = bundleManager.findBundleResourceDeploymentsByCriteria(subject, c); + assertEquals(1, brds.size()); + assertEquals(1, bd1d.getResourceDeployments().size()); + assertEquals(bd1d.getResourceDeployments().get(0).getId(), brds.get(0).getId()); + BundleResourceDeployment brd = brds.get(0); + + assertNotNull(brd.getBundleResourceDeploymentHistories()); + int size = brd.getBundleResourceDeploymentHistories().size(); + assertTrue(size > 0); + String auditMessage = "BundleTest-Message"; + bundleManager.addBundleResourceDeploymentHistoryInNewTrans(overlord, brd.getId(), + new BundleResourceDeploymentHistory(overlord.getName(), auditMessage, auditMessage, + BundleResourceDeploymentHistory.Category.DEPLOY_STEP, BundleResourceDeploymentHistory.Status.SUCCESS, + auditMessage, auditMessage)); + + brds = bundleManager.findBundleResourceDeploymentsByCriteria(subject, c); + assertEquals(1, brds.size()); + assertEquals(brd.getId(), brds.get(0).getId()); + brd = brds.get(0); + assertNotNull(brd.getBundleResourceDeploymentHistories()); + assertTrue((size + 1) == brd.getBundleResourceDeploymentHistories().size()); + BundleResourceDeploymentHistory newHistory = null; + for (BundleResourceDeploymentHistory h : brd.getBundleResourceDeploymentHistories()) { + if (auditMessage.equals(h.getMessage())) { + newHistory = h; + break; + } + } + assertNotNull(newHistory); + assertEquals(auditMessage, newHistory.getAction()); + assertEquals(BundleResourceDeploymentHistory.Status.SUCCESS, newHistory.getStatus()); + + // deny purge destination + //TransactionManager txMgr = getTransactionManager(); + //txMgr.begin(); + //bd1 = em.find(BundleDeployment.class, bd1.getId()); + //bd1.setLive(true); + //txMgr.commit(); + + removeRolePermissions(role, permission); + try { + bundleManager.purgeBundleDestination(subject, dest1.getId()); + fail("Should have thrown PermissionException"); + } catch (PermissionException e) { + // expected + } + + // allow purge destination + addRolePermissions(role, permission); + bundleManager.purgeBundleDestination(subject, dest1.getId()); + + // leave without the perm being assigned + removeRolePermissions(role, permission); + } + // subject must have create bundle version permission private void deleteBundleVersion(Subject subject, Bundle b1) throws Exception { assertNotNull(b1); @@ -1819,8 +2044,13 @@ public class BundleManagerBeanTest extends AbstractEJB3Test {
private BundleDestination createDestination(Bundle bundle, String name, String deployDir, ResourceGroup group) throws Exception { + return createDestination(overlord, bundle, name, deployDir, group); + } + + private BundleDestination createDestination(Subject subject, Bundle bundle, String name, String deployDir, + ResourceGroup group) throws Exception { final String fullName = TEST_PREFIX + "-bundledestination-" + name; - BundleDestination bd = bundleManager.createBundleDestination(overlord, bundle.getId(), fullName, fullName, + BundleDestination bd = bundleManager.createBundleDestination(subject, bundle.getId(), fullName, fullName, TEST_DESTBASEDIR_NAME, deployDir, group.getId());
assert bd.getId() > 0; @@ -1831,9 +2061,13 @@ public class BundleManagerBeanTest extends AbstractEJB3Test {
private BundleDeployment createDeployment(String name, BundleVersion bv, BundleDestination dest, Configuration config) throws Exception { + return createDeployment(overlord, name, bv, dest, config); + } + + private BundleDeployment createDeployment(Subject subject, String name, BundleVersion bv, BundleDestination dest, + Configuration config) throws Exception { final String fullName = TEST_PREFIX + "-bundledeployment-" + name; - BundleDeployment bd = bundleManager - .createBundleDeployment(overlord, bv.getId(), dest.getId(), fullName, config); + BundleDeployment bd = bundleManager.createBundleDeployment(subject, bv.getId(), dest.getId(), fullName, config);
assert bd.getId() > 0; assert bd.getDescription().endsWith(fullName); @@ -1907,6 +2141,7 @@ public class BundleManagerBeanTest extends AbstractEJB3Test {
resourceGroup = new ResourceGroup(TEST_PREFIX + "-group-" + System.currentTimeMillis()); resourceGroup.addExplicitResource(resource); + resourceGroup.addImplicitResource(resource); resourceGroup.setResourceType(resourceType); // need to tell the group the type it is em.persist(resourceGroup);
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerBean.java index 33c119e..621a54c 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerBean.java @@ -1576,8 +1576,8 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot
// filter by bundles that are viewable if (!authorizationManager.hasGlobalPermission(subject, Permission.VIEW_BUNDLES)) { - generator.setAuthorizationResourceFragment(CriteriaQueryGenerator.AuthorizationTokenType.BUNDLE, null, - subject.getId()); + generator.setAuthorizationBundleFragment(CriteriaQueryGenerator.AuthorizationTokenType.BUNDLE, + subject.getId(), "bundleVersion.bundle"); }
CriteriaQueryRunner<BundleDeployment> queryRunner = new CriteriaQueryRunner<BundleDeployment>(criteria, @@ -1625,7 +1625,7 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot // filter by bundles that are viewable if (!authorizationManager.hasGlobalPermission(subject, Permission.VIEW_BUNDLES)) { generator.setAuthorizationBundleFragment(CriteriaQueryGenerator.AuthorizationTokenType.BUNDLE, - subject.getId()); + subject.getId(), "bundleDeployment.bundleVersion.bundle"); }
CriteriaQueryRunner<BundleResourceDeployment> queryRunner = new CriteriaQueryRunner<BundleResourceDeployment>( @@ -1852,8 +1852,9 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot checkDeployBundleAuthz(subject, doomed.getBundleVersion().getBundle().getId(), doomed.getDestination() .getGroup().getId());
- // only allow deployments to be deleted if they are finished - if (BundleDeploymentStatus.SUCCESS == doomed.getStatus() + // only allow deployments to be deleted if they are not started or finished + if (BundleDeploymentStatus.PENDING == doomed.getStatus() + || BundleDeploymentStatus.SUCCESS == doomed.getStatus() || BundleDeploymentStatus.FAILURE == doomed.getStatus() || BundleDeploymentStatus.MIXED == doomed.getStatus()) { entityManager.remove(doomed); @@ -2251,8 +2252,7 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot */ private void checkDeployBundleAuthz(Subject subject, int bundleId, int resourceGroupId) throws PermissionException {
- boolean hasResourceGroupView = authorizationManager.hasGroupPermission(subject, Permission.VIEW_RESOURCE, - resourceGroupId); + boolean hasResourceGroupView = authorizationManager.canViewGroup(subject, resourceGroupId);
if (!hasResourceGroupView) { String msg = "Subject [" + subject.getName() + "] requires VIEW permission on resource group [" diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerLocal.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerLocal.java index 6df2610..21bafd6 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerLocal.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerLocal.java @@ -209,8 +209,7 @@ public interface BundleManagerLocal extends BundleManagerRemote { boolean isCleanDeployment, boolean isRevert) throws Exception;
/** - * Used by GUI. This is a simple attempt at delete, typically used for removing a poorly defined deployment before it is - * actually scheduled for deployment. The status must be PENDING. It will fail if anything actually refers to it. + * Used by GUI. The deployment must be PENDING or in a completed state. * <pre> * Required Permissions: Either: * - Global.DEPLOY_BUNDLES and a view of the relevant bundle and a view of the relevant resource group (may involve multiple roles)
commit 6e6ce234321e77a64618b5fd61aa568f04118917 Author: Jay Shaughnessy jshaughn@redhat.com Date: Tue Aug 6 16:05:05 2013 -0400
complete dbupgrade work for bundle groups and new perms
diff --git a/modules/core/dbutils/src/main/scripts/dbsetup/content-schema.xml b/modules/core/dbutils/src/main/scripts/dbsetup/content-schema.xml index 35d17b6..c6a2e49 100644 --- a/modules/core/dbutils/src/main/scripts/dbsetup/content-schema.xml +++ b/modules/core/dbutils/src/main/scripts/dbsetup/content-schema.xml @@ -744,7 +744,7 @@ <column name="ID" default="sequence-only" initial="10001" primarykey="true" required="true" type="INTEGER"/> <column name="NAME" required="true" size="4000" type="VARCHAR2"/> - <column name="DESCRIPTION" size="100" type="VARCHAR2"/> + <column name="DESCRIPTION" size="512" type="VARCHAR2"/> <column name="CTIME" type="LONG"/> <column name="MTIME" type="LONG"/> </table> diff --git a/modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml b/modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml index 25cdfec..9f8d018 100644 --- a/modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml +++ b/modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml @@ -2084,66 +2084,116 @@
<!-- Add new perms to superuser/all-resources roles --> <schema-directSQL> - <statement desc="Inserting MANAGE_BUNDLE_GROUPS permission for 'Super User' role"> + <statement desc="Inserting MANAGE_BUNDLE_GROUPS permission for 'Super User' role only"> INSERT INTO RHQ_PERMISSION (ROLE_ID, OPERATION) VALUES (1, 18) </statement> - <statement desc="Inserting CREATE_BUNDLES permission for 'Super User' role"> - INSERT INTO RHQ_PERMISSION (ROLE_ID, OPERATION) VALUES (1, 19) + + <statement desc="Inserting CREATE_BUNDLES permission for all MANAGE_BUNDLE (12) roles"> + INSERT INTO rhq_permission (role_id, operation) SELECT role_id, 19 FROM rhq_permission p where p.operation = 12 </statement> - <statement desc="Inserting DELETE_BUNDLES permission for 'Super User' role"> - INSERT INTO RHQ_PERMISSION (ROLE_ID, OPERATION) VALUES (1, 20) + <statement desc="Inserting DELETE_BUNDLES permission for all MANAGE_BUNDLE roles"> + INSERT INTO rhq_permission (role_id, operation) SELECT role_id, 20 FROM rhq_permission p where p.operation = 12 </statement> - <statement desc="Inserting DEPLOY_BUNDLES permission for 'Super User' role"> - INSERT INTO RHQ_PERMISSION (ROLE_ID, OPERATION) VALUES (1, 21) + <statement desc="Inserting DEPLOY_BUNDLES permission for all MANAGE_BUNDLE roles"> + INSERT INTO rhq_permission (role_id, operation) SELECT role_id, 21 FROM rhq_permission p where p.operation = 12 </statement> - <statement desc="Inserting VIEW_BUNDLES permission for 'Super User' role"> - INSERT INTO RHQ_PERMISSION (ROLE_ID, OPERATION) VALUES (1, 22) + <statement desc="Inserting VIEW_BUNDLES permission for all MANAGE_BUNDLE roles"> + INSERT INTO rhq_permission (role_id, operation) SELECT role_id, 22 FROM rhq_permission p where p.operation = 12 </statement> - <statement desc="Inserting ASSIGN_BUNDLES_TO_GROUP permission for 'Super User' role"> - INSERT INTO RHQ_PERMISSION (ROLE_ID, OPERATION) VALUES (1, 23) + <statement desc="Inserting ASSIGN_BUNDLES_TO_GROUP permission for all MANAGE_BUNDLE roles"> + INSERT INTO rhq_permission (role_id, operation) SELECT role_id, 23 FROM rhq_permission p where p.operation = 12 </statement> - <statement desc="Inserting UNASSIGN_BUNDLES_FROM_GROUP permission for 'Super User' role"> - INSERT INTO RHQ_PERMISSION (ROLE_ID, OPERATION) VALUES (1, 24) + <statement desc="Inserting UNASSIGN_BUNDLES_FROM_GROUP permission for all MANAGE_BUNDLE roles"> + INSERT INTO rhq_permission (role_id, operation) SELECT role_id, 24 FROM rhq_permission p where p.operation = 12 </statement> - <statement desc="Inserting CREATE_BUNDLES_IN_GROUP permission for 'Super User' role"> - INSERT INTO RHQ_PERMISSION (ROLE_ID, OPERATION) VALUES (1, 25) + <statement desc="Inserting CREATE_BUNDLES_IN_GROUP permission for all MANAGE_BUNDLE roles"> + INSERT INTO rhq_permission (role_id, operation) SELECT role_id, 25 FROM rhq_permission p where p.operation = 12 </statement> - <statement desc="Inserting DELETE_BUNDLES_FROM_GROUP permission for 'Super User' role"> - INSERT INTO RHQ_PERMISSION (ROLE_ID, OPERATION) VALUES (1, 26) + <statement desc="Inserting DELETE_BUNDLES_FROM_GROUP permission for all MANAGE_BUNDLE roles"> + INSERT INTO rhq_permission (role_id, operation) SELECT role_id, 26 FROM rhq_permission p where p.operation = 12 </statement> - <statement desc="Inserting VIEW_BUNDLES_IN_GROUP permission for 'Super User' role"> - INSERT INTO RHQ_PERMISSION (ROLE_ID, OPERATION) VALUES (1, 27) + <statement desc="Inserting VIEW_BUNDLES_IN_GROUP permission for all MANAGE_BUNDLE roles"> + INSERT INTO rhq_permission (role_id, operation) SELECT role_id, 27 FROM rhq_permission p where p.operation = 12 </statement> + </schema-directSQL>
- <!-- all-resources-role does not get MANAGE_BUNDLE_GROUPS --> - <statement desc="Inserting CREATE_BUNDLES permission for 'All Resources' role"> - INSERT INTO RHQ_PERMISSION (ROLE_ID, OPERATION) VALUES (2, 19) + <!-- RHQ_BUNDLE_GROUP --> + <schema-createSequence name="RHQ_BUNDLE_GROUP_ID_SEQ" initial="10001" /> + <schema-directSQL> + <statement desc="Creating table RHQ_BUNDLE_GROUP"> + CREATE TABLE RHQ_BUNDLE_GROUP ( + ID INTEGER PRIMARY KEY) </statement> - <statement desc="Inserting DELETE_BUNDLES permission for 'All Resources' role"> - INSERT INTO RHQ_PERMISSION (ROLE_ID, OPERATION) VALUES (2, 20) + </schema-directSQL> + <schema-addColumn table="RHQ_BUNDLE_GROUP" column="NAME" columnType="VARCHAR2" precision="4000" /> + <schema-alterColumn table="RHQ_BUNDLE_GROUP" column="NAME" nullable="FALSE" /> + <schema-addColumn table="RHQ_BUNDLE_GROUP" column="DESCRIPTION" columnType="VARCHAR2" precision="512" /> + <schema-alterColumn table="RHQ_BUNDLE_GROUP" column="DESCRIPTION" nullable="TRUE" /> + <schema-addColumn table="RHQ_BUNDLE_GROUP" column="CTIME" columnType="LONG" /> + <schema-alterColumn table="RHQ_BUNDLE_GROUP" column="CTIME" nullable="FALSE" /> + <schema-addColumn table="RHQ_BUNDLE_GROUP" column="MTIME" columnType="LONG" /> + <schema-alterColumn table="RHQ_BUNDLE_GROUP" column="MTIME" nullable="FALSE" /> + + <!-- RHQ_ROLE_BUNDLE_GROUP_MAP --> + <schema-directSQL> + <statement desc="Creating table RHQ_ROLE_BUNDLE_GROUP_MAP"> + CREATE TABLE RHQ_ROLE_BUNDLE_GROUP_MAP ( + ROLE_ID INTEGER, + BUNDLE_GROUP_ID INTEGER) </statement> - <statement desc="Inserting DEPLOY_BUNDLES permission for 'All Resources' role"> - INSERT INTO RHQ_PERMISSION (ROLE_ID, OPERATION) VALUES (2, 21) + </schema-directSQL> + <schema-alterColumn table="RHQ_ROLE_BUNDLE_GROUP_MAP" column="ROLE_ID" nullable="FALSE"/> + <schema-alterColumn table="RHQ_ROLE_BUNDLE_GROUP_MAP" column="BUNDLE_GROUP_ID" nullable="FALSE"/> + <schema-directSQL> + <statement> + ALTER TABLE RHQ_ROLE_BUNDLE_GROUP_MAP + ADD CONSTRAINT RHQ_ROLE_BUN_GROUP_MAP_KEY + PRIMARY KEY ( ROLE_ID, BUNDLE_GROUP_ID ) </statement> - <statement desc="Inserting VIEW_BUNDLES permission for 'All Resources' role"> - INSERT INTO RHQ_PERMISSION (ROLE_ID, OPERATION) VALUES (2, 22) + <statement desc="Creating RHQ_ROLE_BUNDLE_GROUP_MAP foreign key to RHQ_ROLE"> + ALTER TABLE RHQ_ROLE_BUNDLE_GROUP_MAP + ADD CONSTRAINT RHQ_RBG_ROLE_ID_FK + FOREIGN KEY (ROLE_ID) + REFERENCES RHQ_ROLE (ID) </statement> - <statement desc="Inserting ASSIGN_BUNDLES_TO_GROUP permission for 'All Resources' role"> - INSERT INTO RHQ_PERMISSION (ROLE_ID, OPERATION) VALUES (2, 23) + <statement desc="Creating RHQ_ROLE_BUNDLE_GROUP_MAP foreign key to RHQ_BUNDLE_GROUP"> + ALTER TABLE RHQ_ROLE_BUNDLE_GROUP_MAP + ADD CONSTRAINT RHQ_RBG_BUNDLE_GROUP_ID_FK + FOREIGN KEY (BUNDLE_GROUP_ID) + REFERENCES RHQ_BUNDLE_GROUP (ID) </statement> - <statement desc="Inserting UNASSIGN_BUNDLES_FROM_GROUP permission for 'All Resources' role"> - INSERT INTO RHQ_PERMISSION (ROLE_ID, OPERATION) VALUES (2, 24) + </schema-directSQL> + + <!-- RHQ_BUNDLE_GROUP_BUNDLE_MAP --> + <schema-directSQL> + <statement desc="Creating table RHQ_BUNDLE_GROUP_BUNDLE_MAP"> + CREATE TABLE RHQ_BUNDLE_GROUP_BUNDLE_MAP ( + BUNDLE_ID INTEGER, + BUNDLE_GROUP_ID INTEGER) </statement> - <statement desc="Inserting CREATE_BUNDLES_IN_GROUP permission for 'All Resources' role"> - INSERT INTO RHQ_PERMISSION (ROLE_ID, OPERATION) VALUES (2, 25) + </schema-directSQL> + <schema-alterColumn table="RHQ_BUNDLE_GROUP_BUNDLE_MAP" column="BUNDLE_ID" nullable="FALSE"/> + <schema-alterColumn table="RHQ_BUNDLE_GROUP_BUNDLE_MAP" column="BUNDLE_GROUP_ID" nullable="FALSE"/> + <schema-directSQL> + <statement> + ALTER TABLE RHQ_BUNDLE_GROUP_BUNDLE_MAP + ADD CONSTRAINT RHQ_BUNDLE_GROUP_BUN_MAP_KEY + PRIMARY KEY ( BUNDLE_ID, BUNDLE_GROUP_ID ) </statement> - <statement desc="Inserting DELETE_BUNDLES_FROM_GROUP permission for 'All Resources' role"> - INSERT INTO RHQ_PERMISSION (ROLE_ID, OPERATION) VALUES (2, 26) + <statement desc="Creating RHQ_BUNDLE_GROUP_BUNDLE_MAP foreign key to RHQ_BUNDLE"> + ALTER TABLE RHQ_BUNDLE_GROUP_BUNDLE_MAP + ADD CONSTRAINT RHQ_BGB_BUNDLE_ID_FK + FOREIGN KEY (BUNDLE_ID) + REFERENCES RHQ_BUNDLE (ID) </statement> - <statement desc="Inserting VIEW_BUNDLES_IN_GROUP permission for 'All Resources' role"> - INSERT INTO RHQ_PERMISSION (ROLE_ID, OPERATION) VALUES (2, 27) + <statement desc="Creating RHQ_BUNDLE_GROUP_BUNDLE_MAP foreign key to RHQ_BUNDLE_GROUP"> + ALTER TABLE RHQ_BUNDLE_GROUP_BUNDLE_MAP + ADD CONSTRAINT RHQ_BGB_BUNDLE_GROUP_ID_FK + FOREIGN KEY (BUNDLE_GROUP_ID) + REFERENCES RHQ_BUNDLE_GROUP (ID) </statement> - </schema-directSQL> + </schema-directSQL> + </schemaSpec>
</dbupgrade>
commit ee56897a3c2578e55feb85048d80cad2f44d50d4 Author: Heiko W. Rupp hwr@redhat.com Date: Tue Aug 6 18:23:34 2013 +0200
Fix a logic bug that showed when creation was (too) quick.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/ResourceHandlerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/ResourceHandlerBean.java index 7a6fb33..5f9a91a 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/ResourceHandlerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/ResourceHandlerBean.java @@ -861,6 +861,7 @@ public class ResourceHandlerBean extends AbstractRestBean { uriBuilder.path("/resource/creationStatus/{id}"); URI uri = uriBuilder.build(history.getId()); builder = Response.status(302); + builder.entity("Creation is running - please check back later"); builder.location(uri); // redirect to self
} @@ -909,19 +910,24 @@ public class ResourceHandlerBean extends AbstractRestBean { } else { // History says we had success but due to internal timing // the resource is not yet visible, so switch to in_progress - status = CreateResourceStatus.IN_PROGRESS; + UriBuilder uriBuilder = uriInfo.getRequestUriBuilder(); + URI uri = uriBuilder.build(); + builder = Response.status(302); + builder.entity("Creation is still running - please check back later"); + builder.location(uri); // redirect to self } } - if (status==CreateResourceStatus.IN_PROGRESS) { - - + else if (status==CreateResourceStatus.IN_PROGRESS) { + // Creation is still running, so let the user know to check back later UriBuilder uriBuilder = uriInfo.getRequestUriBuilder(); URI uri = uriBuilder.build(); builder = Response.status(302); + builder.entity("Creation is still running - please check back later"); builder.location(uri); // redirect to self } else { builder = Response.serverError(); + builder.entity(status + ": " + history.getErrorMessage()); }
MediaType mediaType = headers.getAcceptableMediaTypes().get(0);
commit ac5c180ffc62769826694a5c376f8fb878b467f3 Author: Heiko W. Rupp hwr@redhat.com Date: Tue Aug 6 17:52:38 2013 +0200
Provide correct group expressions as the test fail otherwise.
diff --git a/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/GroupTest.java b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/GroupTest.java index 040d76d..85dfd30 100644 --- a/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/GroupTest.java +++ b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/GroupTest.java @@ -777,7 +777,7 @@ public class GroupTest extends AbstractBase { list.add(""); list.add(" "); list.add(null); - list.add("resource.type = 'bla'"); + list.add("resource.availability = DOWN"); gd.setExpression(list);
Response response = @@ -808,8 +808,8 @@ public class GroupTest extends AbstractBase { GroupDef gd = new GroupDef("-x-test-def"); gd.setDescription("Just testing"); List<String> list = new ArrayList<String>(); - list.add("groupby resource"); - list.add("resource.name"); + list.add("groupby resource.type.plugin"); + list.add("groupby resource.type.name"); gd.setExpression(list);
Response response =
commit 8e606386cee4e882ff07a0cac95c3a1be967597d Author: Heiko W. Rupp hwr@redhat.com Date: Tue Aug 6 17:33:58 2013 +0200
BZ 993548 Allow for jsonp to also have json-paging-wrapping
diff --git a/modules/enterprise/gui/rest-war/src/main/java/org/rhq/enterprise/rest/JsonPFilter.java b/modules/enterprise/gui/rest-war/src/main/java/org/rhq/enterprise/rest/JsonPFilter.java index 8d43f03..47d0f65 100644 --- a/modules/enterprise/gui/rest-war/src/main/java/org/rhq/enterprise/rest/JsonPFilter.java +++ b/modules/enterprise/gui/rest-war/src/main/java/org/rhq/enterprise/rest/JsonPFilter.java @@ -51,6 +51,9 @@ import javax.servlet.http.HttpServletResponseWrapper; * @author Heiko W. Rupp */ public class JsonPFilter implements Filter { + private static final String APPLICATION_JSON = "application/json"; + private static final String VND_RHQ_WRAPPED_JSON = "application/vnd.rhq.wrapped+json"; + private static final String ACCEPT = "accept"; private String callbackName;
public void destroy() { @@ -60,7 +63,7 @@ public class JsonPFilter implements Filter { IOException {
if (!(request instanceof HttpServletRequest)) { - throw new ServletException("This filter can " + " only process HttpServletRequest requests"); + throw new ServletException("This filter can only process HttpServletRequest requests"); }
HttpServletRequest httpRequest = (HttpServletRequest) request; @@ -73,8 +76,12 @@ public class JsonPFilter implements Filter { // We need to wrap request and response, as we need to do some re-writing on both // We want to get json data inside, so change the accept header JsonPRequestWrapper requestWrapper = new JsonPRequestWrapper(httpRequest); - requestWrapper.setHeader("accept", "application/json"); - requestWrapper.setContentType("application/json"); + if (requestsJsonWrapping(httpRequest)) { + requestWrapper.setHeader(ACCEPT, VND_RHQ_WRAPPED_JSON); + } else { + requestWrapper.setHeader(ACCEPT, APPLICATION_JSON); + } + requestWrapper.setContentType(APPLICATION_JSON);
JsonPResponseWrapper responseWrapper = new JsonPResponseWrapper(httpResponse);
@@ -91,6 +98,26 @@ public class JsonPFilter implements Filter { } }
+ /** + * Check if the incoming request requests jsonw wrapping and jsonp-wrapping + * @param httpRequest + * @return + */ + private boolean requestsJsonWrapping(HttpServletRequest httpRequest) { + + String mimeType = httpRequest.getHeader(ACCEPT); + if (mimeType.equals(VND_RHQ_WRAPPED_JSON)) { + return true; + } + + String localPart = httpRequest.getContextPath(); + if (localPart.endsWith(".jsonw")) { + return true; + } + + return false; + } + public void init(FilterConfig config) throws ServletException { callbackName = config.getInitParameter("filter.jsonp.callback"); } diff --git a/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/ResourcesTest.java b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/ResourcesTest.java index 4c47960..bb7e2c2 100644 --- a/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/ResourcesTest.java +++ b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/ResourcesTest.java @@ -41,7 +41,6 @@ import org.apache.http.HttpStatus; import org.junit.Test;
import org.rhq.modules.integrationTests.restApi.d.Availability; -import org.rhq.modules.integrationTests.restApi.d.Link; import org.rhq.modules.integrationTests.restApi.d.Resource;
import static com.jayway.restassured.RestAssured.expect; @@ -127,7 +126,6 @@ public class ResourcesTest extends AbstractBase { .get("/resource/{id}") .jsonPath().getInt("typeId");
- assert typeId!=null; assert typeId>0;
given() @@ -348,6 +346,40 @@ public class ResourcesTest extends AbstractBase { }
@Test + public void testGetResourcesWithPagingAndWrappingAndJsonP() throws Exception { + + Response response = + given() + .header("Accept", "application/vnd.rhq.wrapped+json") + .queryParam("jsonp","jsonp") // Use jsonp-wrapping e.g. for JavaScript access + .with() + .queryParam("page", 1) + .queryParam("ps", 2) // Unusually small to provoke having more than 1 page + .queryParam("category", "service") + .expect() + .statusCode(200) + .log().everything() + .when() + .get("/resource"); + + String mediaType = response.getContentType(); + assert mediaType.startsWith("application/javascript"); + + // check for jsonp wrapping + String bodyString = response.asString(); + assert bodyString.startsWith("jsonp("); + assert bodyString.endsWith(");"); + + // extract the internal json data + String body = bodyString.substring(6,bodyString.length()-2); + + // validate + JsonPath jsonPath = new JsonPath(body); + assert jsonPath.getInt("pageSize") == 2; + assert jsonPath.getInt("currentPage") == 1; + } + + @Test public void testGetResourcesWithPagingAndWrappingByExtension() throws Exception {
given()
commit 74bbb4e2d9da88afb6b529517795451635dba0e1 Author: Thomas Segismont tsegismo@redhat.com Date: Tue Aug 6 17:51:05 2013 +0200
Bug 879040 - Initialization of HostControllerComponent results in ERRORs in agent log due to assuming host controller's host is named "master"
diff --git a/modules/plugins/jboss-as-7/src/main/java/org/rhq/modules/plugins/jbossas7/HostControllerComponent.java b/modules/plugins/jboss-as-7/src/main/java/org/rhq/modules/plugins/jbossas7/HostControllerComponent.java index 7ea12b9..b61c009 100644 --- a/modules/plugins/jboss-as-7/src/main/java/org/rhq/modules/plugins/jbossas7/HostControllerComponent.java +++ b/modules/plugins/jboss-as-7/src/main/java/org/rhq/modules/plugins/jbossas7/HostControllerComponent.java @@ -1,6 +1,6 @@ /* * RHQ Management Platform - * Copyright (C) 2005-2012 Red Hat, Inc. + * Copyright (C) 2005-2013 Red Hat, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify @@ -13,8 +13,8 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * along with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ package org.rhq.modules.plugins.jbossas7;
@@ -48,9 +48,6 @@ public class HostControllerComponent<T extends ResourceComponent<?>> extends Bas private static final String DOMAIN_CONFIG_TRAIT = "domain-config-file"; private static final String HOST_CONFIG_TRAIT = "host-config-file";
- private static final Address ENVIRONMENT_ADDRESS = new Address("host=master,core-service=host-environment"); - private static final Address HOST_ADDRESS = new Address("host=master"); - @Override protected AS7Mode getMode() { return AS7Mode.DOMAIN; @@ -212,14 +209,17 @@ public class HostControllerComponent<T extends ResourceComponent<?>> extends Bas @NotNull @Override protected Address getEnvironmentAddress() { - return ENVIRONMENT_ADDRESS; + return new Address("host=" + getHostName() + ",core-service=host-environment"); }
@NotNull @Override protected Address getHostAddress() { - // TODO is the local controller always on host=master?? AS7-3678 - return HOST_ADDRESS; + return new Address("host=" + getHostName()); + } + + private String getHostName() { + return context.getPluginConfiguration().getSimpleValue("domainHost", "master"); }
@NotNull
commit ab0f6dab517426f9473503752f058830ac5f28b9 Author: Tristan Tarrant ttarrant@redhat.com Date: Tue Aug 6 12:11:06 2013 +0200
Bug 988894 - The AS7 plugin should support the community version of JDG Server (aka Infinispan Server)
diff --git a/modules/plugins/jboss-as-7/src/main/java/org/rhq/modules/plugins/jbossas7/JBossProductType.java b/modules/plugins/jboss-as-7/src/main/java/org/rhq/modules/plugins/jbossas7/JBossProductType.java index 48fbf1f..45864cf 100644 --- a/modules/plugins/jboss-as-7/src/main/java/org/rhq/modules/plugins/jbossas7/JBossProductType.java +++ b/modules/plugins/jboss-as-7/src/main/java/org/rhq/modules/plugins/jbossas7/JBossProductType.java @@ -1,6 +1,6 @@ /* * RHQ Management Platform - * Copyright (C) 2012 Red Hat, Inc. + * Copyright (C) 2005-2013 Red Hat, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify @@ -13,8 +13,8 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * along with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ package org.rhq.modules.plugins.jbossas7;
@@ -32,6 +32,7 @@ public enum JBossProductType {
AS("AS", "JBoss AS 7", "JBoss Application Server 7", "AS"), EAP("EAP", "JBoss EAP 6", "JBoss Enterprise Application Platform 6", "EAP"), + ISPN("ISPN", "Infinispan Server", "Infinispan Server", "ISPN"), JDG("JDG", "JBoss JDG 6", "JBoss Data Grid 6", "Data Grid"), EPP("EPP", "JBoss EAP 6", "JBoss Enterprise Portal Platform 6", "Portal Platform"), JPP("JPP", "JBoss EAP 6", "JBoss Portal Platform 6", "Portal Platform"), @@ -123,6 +124,8 @@ public enum JBossProductType { } if (slot.equals("eap")) { productType = JBossProductType.EAP; + } else if (slot.equals("ispn")) { + productType = JBossProductType.ISPN; } else if (slot.equals("jdg")) { productType = JBossProductType.JDG; } else if (slot.equals("epp")) {//old EPP @@ -150,6 +153,8 @@ public enum JBossProductType { productType = JBossProductType.WILDFLY8; } else if (homeDirName.contains("-eap-")) { productType = JBossProductType.EAP; + } else if (homeDirName.contains("infinispan-server")) { + productType = JBossProductType.ISPN; } else if (homeDirName.contains("-jdg-")||(homeDirName.contains("datagrid-server"))) { productType = JBossProductType.JDG; } else if (homeDirName.contains("-epp-")) { diff --git a/modules/plugins/jboss-as-7/src/main/java/org/rhq/modules/plugins/jbossas7/SubsystemDiscovery.java b/modules/plugins/jboss-as-7/src/main/java/org/rhq/modules/plugins/jbossas7/SubsystemDiscovery.java index 18eb274..93210c3 100644 --- a/modules/plugins/jboss-as-7/src/main/java/org/rhq/modules/plugins/jbossas7/SubsystemDiscovery.java +++ b/modules/plugins/jboss-as-7/src/main/java/org/rhq/modules/plugins/jbossas7/SubsystemDiscovery.java @@ -1,6 +1,6 @@ /* * RHQ Management Platform - * Copyright (C) 2005-2011 Red Hat, Inc. + * Copyright (C) 2005-2013 Red Hat, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify @@ -13,9 +13,10 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * along with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ + package org.rhq.modules.plugins.jbossas7;
import java.util.ArrayList; @@ -85,7 +86,7 @@ public class SubsystemDiscovery implements ResourceDiscoveryComponent<BaseCompon lookForChildren = true; }
- // check if the parent is a JDG server. In this case ignore the as7 version + // check if the parent is a JDG/Infinispan server. In this case ignore the as7 version // of the type and vice versa if (shouldSkipEntryWrtIspn(context, confPath)) { return details; @@ -192,7 +193,7 @@ public class SubsystemDiscovery implements ResourceDiscoveryComponent<BaseCompon }
/** - * The as7 plugin and the JDG plugin both have a subsystem=infinispan. We need to decide + * The as7 plugin and the JDG/Infinispan Server plugin both have a subsystem=infinispan. We need to decide * which one to 'activate' depending on the type, plugin and the detected parent. * Rules are:<ul> * <li>If the parent is a host controller or such, there is no jdg available</li> @@ -205,30 +206,29 @@ public class SubsystemDiscovery implements ResourceDiscoveryComponent<BaseCompon * @param confPath The subsystem that got fed into discovery. Directly return is not subsystem=infinispan * @return True if this subsystem should be skipped. */ - private boolean shouldSkipEntryWrtIspn(ResourceDiscoveryContext<BaseComponent<?>> context, - String confPath) { - - String jdgPluginType = "JDG"; + private boolean shouldSkipEntryWrtIspn(ResourceDiscoveryContext<BaseComponent<?>> context, String confPath) {
// If this is not subsystem=infinispan, we should not skip it at all if (!"subsystem=infinispan".equals(confPath)) return false;
ResourceType ourType = context.getResourceType(); - boolean ourPluginTypeIsJdg = ourType.getPlugin().equals(jdgPluginType); + boolean ourPluginTypeIsJdg = ourType.getPlugin().equals("JDG");
- String productType = context.getParentResourceComponent().pluginConfiguration.getSimpleValue("productType","AS7"); - boolean isJdgProduct = jdgPluginType.equals(productType); + String productType = context.getParentResourceComponent().pluginConfiguration.getSimpleValue("productType", + "AS7"); + boolean isJdgProduct = "JDG".equals(productType) || "ISPN".equals(productType);
if (ourPluginTypeIsJdg && isJdgProduct) { - if (log.isDebugEnabled()) - log.debug("Ours is JDG and product is JDG"); + log.debug("Ours is JDG and product is JDG/InfinispanServer"); return false; }
if (!ourPluginTypeIsJdg && !isJdgProduct) { - if (log.isDebugEnabled()) - log.debug("Ours is not JDG (" + ourType.toString() + ") and product is not JDG (" + productType + ")"); + if (log.isDebugEnabled()) { + log.debug("Ours is not JDG (" + ourType.toString() + ") and product is not JDG/InfinispanServer (" + + productType + ")"); + } return false; }
commit e7360aab23440337966a1ac5c3b66df9d1baac09 Author: Heiko W. Rupp hwr@redhat.com Date: Tue Aug 6 11:40:00 2013 +0200
Plugin generator is now on GitHub, so remove it here.
diff --git a/modules/helpers/pluginAnnotations/pom.xml b/modules/helpers/pluginAnnotations/pom.xml deleted file mode 100644 index 7ce1859..0000000 --- a/modules/helpers/pluginAnnotations/pom.xml +++ /dev/null @@ -1,48 +0,0 @@ -<?xml version="1.0" encoding="UTF-8"?> - -<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> - - <modelVersion>4.0.0</modelVersion> - - <parent> - <groupId>org.rhq</groupId> - <artifactId>rhq-parent</artifactId> - <version>4.9.0-SNAPSHOT</version> - <relativePath>../../../pom.xml</relativePath> - </parent> - - <groupId>org.rhq.helpers</groupId> - <artifactId>rhq-pluginAnnotations</artifactId> - <packaging>jar</packaging> - <version>4.9.0-SNAPSHOT</version> - - <name>RHQ plugin annotations</name> - <description>Annotations to help generate plugin descriptors</description> - - <build> - - <plugins> - - <plugin> - <artifactId>maven-jar-plugin</artifactId> - <configuration> - <archive> - <manifest> - <packageName>org.rhq.helpers.pluginAnnotations</packageName> - </manifest> - </archive> - </configuration> - </plugin> - - <plugin> - <artifactId>maven-release-plugin</artifactId> - <version>2.1</version> - </plugin> - - </plugins> - - </build> - -</project> - - diff --git a/modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/ConfigProperty.java b/modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/ConfigProperty.java deleted file mode 100644 index 4b5a3ab..0000000 --- a/modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/ConfigProperty.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * RHQ Management Platform - * Copyright (C) 2005-2013 Red Hat, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA - */ - -package org.rhq.helpers.pluginAnnotations.agent; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * A configuration property for resource or plugin config. - * Currently only property simple are supported. - * @author Heiko W. Rupp - */ -@Retention(RetentionPolicy.RUNTIME) -@Target( { ElementType.FIELD}) -public @interface ConfigProperty { - - public Scope scope() default Scope.PLUGIN; - String property() default ""; - String displayName() default ""; - String description() default ""; - boolean readOnly() default false; - String defaultValue() default ""; - RhqType rhqType() default RhqType.VOID; - - - public enum Scope { - PLUGIN, - RESOURCE; - } -} diff --git a/modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/DataType.java b/modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/DataType.java deleted file mode 100644 index a416079..0000000 --- a/modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/DataType.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * JBoss, Home of Professional Open Source. - * Copyright 2009, Red Hat, Inc. and/or its affiliates, and - * individual contributors as indicated by the @author tags. See the - * copyright.txt file in the distribution for a full listing of - * individual contributors. - * - * This is free software; you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as - * published by the Free Software Foundation; either version 2.1 of - * the License, or (at your option) any later version. - * - * This software is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this software; if not, write to the Free - * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA - * 02110-1301 USA, or see the FSF site: http://www.fsf.org. - */ -package org.rhq.helpers.pluginAnnotations.agent; - -/** - * DataType. - * - * @author Galder Zamarreño - * @since 4.0 - */ -public enum DataType { - MEASUREMENT, TRAIT, CALLTIME; - - @Override - public String toString() { - return super.toString().toLowerCase(); - } - -} diff --git a/modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/DisplayType.java b/modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/DisplayType.java deleted file mode 100644 index 49551ef..0000000 --- a/modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/DisplayType.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * JBoss, Home of Professional Open Source. - * Copyright 2009, Red Hat, Inc. and/or its affiliates, and - * individual contributors as indicated by the @author tags. See the - * copyright.txt file in the distribution for a full listing of - * individual contributors. - * - * This is free software; you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as - * published by the Free Software Foundation; either version 2.1 of - * the License, or (at your option) any later version. - * - * This software is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this software; if not, write to the Free - * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA - * 02110-1301 USA, or see the FSF site: http://www.fsf.org. - */ -package org.rhq.helpers.pluginAnnotations.agent; - -/** - * DisplayType. - * - * @author Galder Zamarreño - * @since 4.0 - */ -public enum DisplayType { - SUMMARY, DETAIL; - - @Override - public String toString() { - return super.toString().toLowerCase(); - } - -} diff --git a/modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/MeasurementType.java b/modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/MeasurementType.java deleted file mode 100644 index c797e81..0000000 --- a/modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/MeasurementType.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * JBoss, Home of Professional Open Source. - * Copyright 2009, Red Hat, Inc. and/or its affiliates, and - * individual contributors as indicated by the @author tags. See the - * copyright.txt file in the distribution for a full listing of - * individual contributors. - * - * This is free software; you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as - * published by the Free Software Foundation; either version 2.1 of - * the License, or (at your option) any later version. - * - * This software is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this software; if not, write to the Free - * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA - * 02110-1301 USA, or see the FSF site: http://www.fsf.org. - */ -package org.rhq.helpers.pluginAnnotations.agent; - -/** - * MeasurementType. - * - * @author Galder Zamarreño - * @since 4.0 - */ -public enum MeasurementType { - DYNAMIC, TRENDSUP, TRENDSDOWN; - - @Override - public String toString() { - return super.toString().toLowerCase(); - } -} diff --git a/modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/Metric.java b/modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/Metric.java deleted file mode 100644 index c1482f1..0000000 --- a/modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/Metric.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * RHQ Management Platform - * Copyright (C) 2005-2009 Red Hat, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ -package org.rhq.helpers.pluginAnnotations.agent; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * Annotation to denote a metric that should be measured - * - * @author Heiko W. Rupp - * @author Galder Zamarreo - */ -@Retention(RetentionPolicy.RUNTIME) -@Target( { ElementType.FIELD, ElementType.METHOD }) -public @interface Metric { - String property() default ""; - String displayName() default ""; - String description() default ""; - long defaultInterval() default 120000000L; // 20 min - DisplayType displayType() default DisplayType.DETAIL; - DataType dataType() default DataType.MEASUREMENT; - Units units() default Units.NONE; - MeasurementType measurementType() default MeasurementType.DYNAMIC; - } \ No newline at end of file diff --git a/modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/Operation.java b/modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/Operation.java deleted file mode 100644 index ba60955..0000000 --- a/modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/Operation.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * RHQ Management Platform - * Copyright (C) 2005-2009 Red Hat, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ -package org.rhq.helpers.pluginAnnotations.agent; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * Annotation that denotes an Operation that can be executed - * - * @author Heiko W. Rupp - * @author Galder Zamarreo - */ -@Retention(RetentionPolicy.RUNTIME) -@Target(ElementType.METHOD) -public @interface Operation { - String name() default ""; - String displayName() default ""; - String description() default ""; -} \ No newline at end of file diff --git a/modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/Parameter.java b/modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/Parameter.java deleted file mode 100644 index 29dcc1e..0000000 --- a/modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/Parameter.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * JBoss, Home of Professional Open Source. - * Copyright 2009, Red Hat, Inc. and/or its affiliates, and - * individual contributors as indicated by the @author tags. See the - * copyright.txt file in the distribution for a full listing of - * individual contributors. - * - * This is free software; you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as - * published by the Free Software Foundation; either version 2.1 of - * the License, or (at your option) any later version. - * - * This software is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this software; if not, write to the Free - * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA - * 02110-1301 USA, or see the FSF site: http://www.fsf.org. - */ -package org.rhq.helpers.pluginAnnotations.agent; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * Parameter. - * - * @author Galder Zamarreño - * @since 4.0 - */ -@Target(ElementType.PARAMETER) -@Retention(RetentionPolicy.RUNTIME) -public @interface Parameter { - String name() ; - String description() default ""; - RhqType type() default RhqType.VOID; -} diff --git a/modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/RhqType.java b/modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/RhqType.java deleted file mode 100644 index 1487a51..0000000 --- a/modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/RhqType.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * RHQ Management Platform - * Copyright (C) 2005-2013 Red Hat, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA - */ - -package org.rhq.helpers.pluginAnnotations.agent; - -import java.io.File; - - -/** - * Base data types from RHQ for properties - * @author Heiko W. Rupp - */ -public enum RhqType { - INTEGER(new Class<?>[]{Integer.class,int.class},Boolean.class), - LONG(new Class<?>[]{Long.class,long.class},Long.class), - DOUBLE(new Class<?>[]{Double.class,double.class},Double.class), - STRING(new Class<?>[]{String.class},String.class), - LONG_STRING(new Class<?>[]{},String.class), - PASSWORD(new Class<?>[]{},String.class), - BOOLEAN(new Class<?>[]{Boolean.class, boolean.class},Boolean.class), - FLOAT(new Class<?>[]{Float.class, float.class},Float.class), - FILE(new Class<?>[]{File.class},File.class), - DIRECTORY(new Class<?>[]{},File.class), - VOID(new Class<?>[]{Void.class,void.class},Void.class) - ; - private Class<?>[] fromClasses; - private Class<?> toClass; - - private RhqType(Class<?>[] fromClasses,Class<?> toClass) { - - this.fromClasses = fromClasses; - this.toClass = toClass; - } - - public Class<?>[] getFromClasses() { - return fromClasses; - } - - public Class<?> getToClass() { - return toClass; - } - - public static RhqType findType(Class<?> clazz) { - for (RhqType type : RhqType.values()) { - for (Class from : type.getFromClasses()) { - if (clazz.equals(from)) { - return type; - } - } - } - return null; - } - - public String getRhqName() { - String name = name().toLowerCase(); - if (name.equals("long_string")) { - name = "longString"; - } - return name; - } -} diff --git a/modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/Units.java b/modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/Units.java deleted file mode 100644 index 150b9e0..0000000 --- a/modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/Units.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * JBoss, Home of Professional Open Source. - * Copyright 2009-2013, Red Hat, Inc. and/or its affiliates, and - * individual contributors as indicated by the @author tags. See the - * copyright.txt file in the distribution for a full listing of - * individual contributors. - * - * This is free software; you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as - * published by the Free Software Foundation; either version 2.1 of - * the License, or (at your option) any later version. - * - * This software is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this software; if not, write to the Free - * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA - * 02110-1301 USA, or see the FSF site: http://www.fsf.org. - */ -package org.rhq.helpers.pluginAnnotations.agent; - -/** - * Metric Units. - * - * @author Galder Zamarreño - * @author Heiko W. Rupp - * See also org.rhq.core.domain.measurement.MeasurementUnits - * @since 4.0 - */ -@SuppressWarnings("unused") -public enum Units { - NONE, PERCENTAGE, - BYTES, KILOBYTES, MEGABYTES, GIGABYTES, TERABYTES, PETABYTES, - BITS, KILOBITS, MEGABITS, GIGABITS, TERABITS, PETABITS, - EPOCH_MILLISECONDS, EPOCH_SECONDS, - JIFFYS, NANOSECONDS, MICROSECONDS, MILLISECONDS, SECONDS, MINUTES, HOURS, DAYS, - CELSIUS, KELVIN, FAHRENHEIT; - - @Override - public String toString() { - return super.toString().toLowerCase(); - } -} diff --git a/modules/helpers/pluginGen/log4j.properties b/modules/helpers/pluginGen/log4j.properties deleted file mode 100644 index cb52fb3..0000000 --- a/modules/helpers/pluginGen/log4j.properties +++ /dev/null @@ -1,12 +0,0 @@ -log4j.rootCategory=INFO, FILE - -log4j.appender.FILE=org.apache.log4j.DailyRollingFileAppender -log4j.appender.FILE.DatePattern='.'yyyy-MM-dd -log4j.appender.FILE.File=run.log -log4j.appender.FILE.layout=org.apache.log4j.PatternLayout -log4j.appender.FILE.layout.ConversionPattern=%d{ABSOLUTE} %-5p %c %m%n -#log4j.appender.A1.layout.ConversionPattern=%d [%t] %-5p %c - %m%n -log4j.appender.FILE.Append=false - -# turn the following on... -log4j.logger.org.rhq.helpers.pluginGen=INFO \ No newline at end of file diff --git a/modules/helpers/pluginGen/pom.xml b/modules/helpers/pluginGen/pom.xml deleted file mode 100644 index e7744e4..0000000 --- a/modules/helpers/pluginGen/pom.xml +++ /dev/null @@ -1,124 +0,0 @@ -<?xml version="1.0" encoding="UTF-8"?> - -<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> - - <modelVersion>4.0.0</modelVersion> - - <parent> - <groupId>org.rhq</groupId> - <artifactId>rhq-parent</artifactId> - <version>4.9.0-SNAPSHOT</version> - <relativePath>../../../pom.xml</relativePath> - </parent> - - <groupId>org.rhq.helpers</groupId> - <artifactId>rhq-pluginGen</artifactId> - <packaging>jar</packaging> - <version>4.9.0-SNAPSHOT</version> - - <name>RHQ plugin generator</name> - <description>Helper to generate plugin skeletons</description> - - <properties> - <!-- we are using JDK 1.7 here, as JavaFX needs this and the generator is standalone anyway --> - <animal.sniffer.java.signature.artifactId>java17</animal.sniffer.java.signature.artifactId> - </properties> - - <build> - <plugins> - - <plugin> - <artifactId>maven-compiler-plugin</artifactId> - <configuration> - <source>1.7</source> - <target>1.7</target> - </configuration> - </plugin> - - <plugin> - <artifactId>maven-jar-plugin</artifactId> - <configuration> - <archive> - <manifest> - <mainClass>org.rhq.helpers.pluginGen.PluginGen</mainClass> - <packageName>org.rhq.helpers.pluginGen</packageName> - </manifest> - </archive> - </configuration> - </plugin> - - <plugin> - <artifactId>maven-assembly-plugin</artifactId> - <configuration> - <descriptorRefs> - <descriptorRef>jar-with-dependencies</descriptorRef> - </descriptorRefs> - <archive> - <manifest> - <mainClass>org.rhq.helpers.pluginGen.PluginGen</mainClass> - </manifest> - </archive> - </configuration> - <executions> - <execution> - <id>make-assembly</id> <!-- this is used for inheritance merges --> - <phase>package</phase> <!-- append to the packaging phase. --> - <goals> - <goal>single</goal> <!-- goals == mojos --> - </goals> - </execution> - </executions> - </plugin> - - <plugin> - <artifactId>maven-release-plugin</artifactId> - <version>2.1</version> - </plugin> - - <plugin> - <groupId>org.codehaus.mojo</groupId> - <artifactId>exec-maven-plugin</artifactId> - <version>1.2.1</version> - <executions> - <execution> - <goals> - <goal>java</goal> - </goals> - </execution> - </executions> - <configuration> - <mainClass>org.rhq.helpers.pluginGen.PluginGen</mainClass> - </configuration> - </plugin> - - </plugins> - - </build> - - - <dependencies> - <dependency> - <groupId>commons-logging</groupId> - <artifactId>commons-logging</artifactId> - </dependency> - <dependency> - <groupId>org.freemarker</groupId> - <artifactId>freemarker</artifactId> - <version>2.3.16</version> - </dependency> - <dependency> - <groupId>org.rhq.helpers</groupId> - <artifactId>rhq-pluginAnnotations</artifactId> - <version>4.9.0-SNAPSHOT</version> - </dependency> - <dependency> - <groupId>com.oracle</groupId> - <artifactId>javafx</artifactId> - <version>2.0</version> - <systemPath>${java.home}/lib/jfxrt.jar</systemPath> - <scope>system</scope> - </dependency> - </dependencies> - -</project> - diff --git a/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/AnnotationProcessor.java b/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/AnnotationProcessor.java deleted file mode 100644 index 16ec437..0000000 --- a/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/AnnotationProcessor.java +++ /dev/null @@ -1,162 +0,0 @@ -/* - * RHQ Management Platform - * Copyright (C) 2005-2013 Red Hat, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA - */ - -package org.rhq.helpers.pluginGen; - -import java.lang.annotation.Annotation; -import java.lang.reflect.Field; -import java.lang.reflect.Method; -import java.util.List; - -import org.rhq.helpers.pluginAnnotations.agent.ConfigProperty; -import org.rhq.helpers.pluginAnnotations.agent.Metric; -import org.rhq.helpers.pluginAnnotations.agent.Operation; -import org.rhq.helpers.pluginAnnotations.agent.Parameter; -import org.rhq.helpers.pluginAnnotations.agent.RhqType; - -/** - * Processor that scans a directory for annotated classes and generates metrics etc. from them. - * @author Heiko W. Rupp - */ -public class AnnotationProcessor { - - private final DirectoryClassLoader classLoader; - - public AnnotationProcessor(String baseDirectory) { - classLoader = new DirectoryClassLoader(); - classLoader.setBaseDir(baseDirectory); - } - - public void populate(Props props) { - List<Class> classList = classLoader.findClasses(); - - populateMetrics(props, classList); - populateOperations(props, classList); - populateConfigurations(props, classList); - } - - public void populateMetrics(Props props, List<Class> classes) { - for (Class<?> clazz : classes) { - for (Field field : clazz.getDeclaredFields()) { - Metric metricAnnot = field.getAnnotation(Metric.class); - addMetric(props, metricAnnot, field.getName()); - } - - for (Method method : clazz.getDeclaredMethods()) { - Metric metricAnnot = method.getAnnotation(Metric.class); - addMetric(props, metricAnnot, method.getName()); - } - } - } - - public void populateOperations(Props props, List<Class> classes) { - for (Class<?> clazz : classes) { - for (Method method : clazz.getDeclaredMethods()) { - Operation operationAnnot = method.getAnnotation(Operation.class); - if (operationAnnot != null) { - String property = operationAnnot.name(); - if (property.isEmpty()) { - property = method.getName(); - } - Props.OperationProps op = new Props.OperationProps(property); - op.setDisplayName(operationAnnot.displayName()); - op.setDescription(operationAnnot.description()); - RhqType type = RhqType.findType(method.getReturnType()); - if (type != RhqType.VOID) { - Props.SimpleProperty simpleProperty = new Props.SimpleProperty(type.getRhqName()); - op.setResult(simpleProperty); - } - - Class[] types = method.getParameterTypes(); - int i=0; - for (Annotation[] annotations : method.getParameterAnnotations() ) { - for (Annotation annotation : annotations) { - if (annotation instanceof Parameter) { - Parameter parameter = (Parameter) annotation; - Props.SimpleProperty simpleProperty = new Props.SimpleProperty(parameter.name()); - simpleProperty.setDescription(parameter.description()); - Class typeClass = types[i]; - RhqType rhqType = RhqType.findType(typeClass); - if (parameter.type()!=RhqType.VOID){ - rhqType = parameter.type(); - } - simpleProperty.setType(rhqType.getRhqName()); - op.getParams().add(simpleProperty); - } - } - i++; - } - props.getOperations().add(op); - } - - } - } - } - - public void populateConfigurations(Props props, List<Class> classes) { - for (Class<?> clazz : classes) { - for (Field field : clazz.getDeclaredFields()) { - ConfigProperty configProperty = field.getAnnotation(ConfigProperty.class); - if (configProperty!=null) { - String name = configProperty.property(); - if(name.isEmpty()) { - name = field.getName(); - } - Props.SimpleProperty property = new Props.SimpleProperty(name); - property.setDescription(configProperty.description()); - property.setDisplayName(configProperty.displayName()); - Class type = field.getType(); - RhqType rhqType = RhqType.findType(type); - if (configProperty.rhqType()!=RhqType.VOID) { - rhqType = configProperty.rhqType(); - } - property.setType(rhqType.getRhqName()); - - switch (configProperty.scope()){ - case PLUGIN: - props.getPluginConfig().add(property); - break; - case RESOURCE: - props.getResourceConfig().add(property); - break; - default: - throw new IllegalStateException("Unknown scope: " +configProperty.scope().name()); - } - } - } - } - } - - private void addMetric(Props props, Metric metricAnnot, String name) { - if (metricAnnot != null) { - String property = metricAnnot.property(); - if (property.isEmpty()) { - property = name; - } - Props.MetricProps metric = new Props.MetricProps(property); - metric.setDisplayName(metricAnnot.displayName()); - metric.setDisplayType(metricAnnot.displayType()); - metric.setDataType(metricAnnot.dataType()); - metric.setDescription(metricAnnot.description()); - metric.setUnits(metricAnnot.units()); - props.getMetrics().add(metric); - } - } - -} diff --git a/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/DirectoryClassLoader.java b/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/DirectoryClassLoader.java deleted file mode 100644 index b4a729f..0000000 --- a/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/DirectoryClassLoader.java +++ /dev/null @@ -1,142 +0,0 @@ -/* - * RHQ Management Platform - * Copyright (C) 2005-2013 Red Hat, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA - */ - -package org.rhq.helpers.pluginGen; - -import java.io.ByteArrayOutputStream; -import java.io.File; -import java.io.FileFilter; -import java.io.FileInputStream; -import java.nio.file.FileVisitor; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.ArrayList; -import java.util.Hashtable; -import java.util.List; - -/** - * Classloader to load from a given directory - * @author Heiko W. Rupp - */ -public class DirectoryClassLoader extends ClassLoader { - private Hashtable classes = new Hashtable(); //used to cache already defined classes - private String baseDir; - - @Override - protected Class<?> findClass(String pathName) throws ClassNotFoundException { - - - if (baseDir==null) { - throw new IllegalStateException("Must set baseDir first"); - } - - byte classByte[]; - Class result = null; - - String className = pathName.substring(baseDir.length()+1); // remove base dir - className = className.substring(0,className.length()-6); // remove .class - className = className.replaceAll(File.separator,"."); // change / -> . - - result = (Class) classes.get(className); //checks in cached classes - if (result != null) { - return result; - } - try { - File classFile = new File(pathName); - FileInputStream fis = new FileInputStream(classFile); - - ByteArrayOutputStream byteStream = new ByteArrayOutputStream(); - int nextValue = fis.read(); - while (-1 != nextValue) { - byteStream.write(nextValue); - nextValue = fis.read(); - } - - classByte = byteStream.toByteArray(); - result = defineClass(className, classByte, 0, classByte.length, null); - classes.put(className, result); - return result; - } catch (Exception e) { - return null; - } - } - - public void setBaseDir(String baseDir) { - this.baseDir = baseDir; - } - - public List<Class> findClasses() { - if (baseDir==null) { - throw new IllegalStateException("Must set baseDir first"); - } - - File baseFile = new File(baseDir); - if(!baseFile.isDirectory()) { - throw new IllegalStateException("BaseDir is no directory"); - } - if (!baseFile.canRead()) { - throw new IllegalStateException("BaseDir is not readable"); - } - - List<File> files = walk(baseFile); - - List<Class> classes = new ArrayList<>(); - for (File file : files) { - String fileName = file.getAbsolutePath(); - - Class clazz = null; - try { - clazz = findClass(fileName); - } catch (ClassNotFoundException e) { - e.printStackTrace(); // TODO: Customise this generated block - } - classes.add(clazz); - - } - - return classes; - } - - private List<File> walk(File path) { - - List<File> files = new ArrayList<>(); - - File[] list = path.listFiles(); - - if (list == null) { - return files; - } - - for ( File f : list ) { - if ( f.isDirectory() ) { - List<File> newFiles = walk( f ); - System.out.println( "Dir:" + f.getAbsoluteFile() ); - files.addAll(newFiles); - } - else { - System.out.println( "File:" + f.getAbsoluteFile() ); - if (f.getName().endsWith(".class") && !f.getName().contains("$")) { - files.add(f); - } - - } - } - return files; - } -} diff --git a/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/Generator.java b/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/Generator.java deleted file mode 100644 index 21d0d4d..0000000 --- a/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/Generator.java +++ /dev/null @@ -1,357 +0,0 @@ -/* - * RHQ Management Platform - * Copyright (C) 2005-2013 Red Hat, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA - */ - -package org.rhq.helpers.pluginGen; - -import java.io.File; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -import javafx.application.Application; -import javafx.beans.value.ChangeListener; -import javafx.beans.value.ObservableValue; -import javafx.collections.ObservableList; -import javafx.event.ActionEvent; -import javafx.event.EventHandler; -import javafx.geometry.Insets; -import javafx.geometry.Pos; -import javafx.scene.Node; -import javafx.scene.Scene; -import javafx.scene.control.*; -import javafx.scene.layout.BorderPane; -import javafx.scene.layout.GridPane; -import javafx.scene.layout.HBox; -import javafx.scene.layout.VBox; -import javafx.scene.paint.Color; -import javafx.scene.text.Font; -import javafx.scene.text.FontPosture; -import javafx.scene.text.FontWeight; -import javafx.scene.text.Text; -import javafx.scene.text.TextAlignment; -import javafx.stage.DirectoryChooser; -import javafx.stage.Stage; - -/** - * JavaFX version of the plugin generator - * @author Heiko W. Rupp - */ -public class Generator extends Application{ - - Props props = new Props(); - private Text errorMessage; - private Stage primaryStage; - - public static void main(String[] args) { - launch(args); - } - - - @Override - public void start(Stage stage) throws Exception { - primaryStage = stage; - - Button createButton = new Button(); - createButton.setText("Create!"); - createButton.setAlignment(Pos.BOTTOM_RIGHT); - createButton.setOnAction(new EventHandler<ActionEvent>() { - @Override - public void handle(ActionEvent actionEvent) { - - PluginGen pluginGen = new PluginGen(); - pluginGen.postprocess(props); - try { - pluginGen.generate(props); - setInfoMessage("Generated!"); - - } - catch (Exception e) { - setErrorMessage("Error during generation: " + e.getMessage()); - } - } - }); - - GridPane pluginLevelPane = new GridPane(); - pluginLevelPane.setPadding(new Insets(10)); - Text pluginLevelDescription = new Text("Plugin level properties"); - pluginLevelDescription.setTextAlignment(TextAlignment.CENTER); - pluginLevelDescription.setFont(Font.font("Arial", FontWeight.BOLD, 14)); - - Text resourceLevelDescription = new Text("ResourceType level properties"); - resourceLevelDescription.setTextAlignment(TextAlignment.CENTER); - resourceLevelDescription.setFont(Font.font("Arial", FontWeight.BOLD, 14)); - - Text descriptionDescription = new Text("Field description"); - descriptionDescription.setTextAlignment(TextAlignment.CENTER); - descriptionDescription.setFont(Font.font("Arial", FontWeight.BOLD, 14)); - - HBox msgBox = getMessagesBox(); - - GridPane resourceLevelPane = new GridPane(); - resourceLevelPane.setPadding(new Insets(10)); - Text descriptionText = new Text(); - descriptionText.setFont(Font.font("Arial", FontPosture.ITALIC,12)); - - VBox innerBox = new VBox(); - innerBox.setAlignment(Pos.CENTER_LEFT); - innerBox.setPadding(new Insets(25, 25, 25, 25)); - innerBox.setSpacing(8); - - addFields(pluginLevelPane, true, descriptionText); - addFields(resourceLevelPane, false, descriptionText); - - ObservableList<Node> children = innerBox.getChildren(); - children.add(pluginLevelDescription); - children.add(pluginLevelPane); - children.add(resourceLevelDescription); - children.add(resourceLevelPane); - - - ScrollPane scrollPane = new ScrollPane(); - scrollPane.setContent(innerBox); - - BorderPane outerBox = new BorderPane(); - outerBox.setTop(msgBox); - outerBox.setPadding(new Insets(5)); - outerBox.setCenter(scrollPane); - - VBox descriptionBox = new VBox(); - descriptionBox.getChildren().add(descriptionDescription); - descriptionBox.getChildren().add(descriptionText); - outerBox.setBottom(descriptionBox); - - outerBox.setRight(createButton); - - - stage.setScene(new Scene(outerBox, 600, 550)); - stage.show(); - } - - private HBox getMessagesBox() { - HBox msgBox = new HBox(); - Label label = new Label("Messages:"); - msgBox.getChildren().add(label); - errorMessage = new Text(); - errorMessage.setFont(Font.font("Arial", FontWeight.SEMI_BOLD, 15)); - errorMessage.setId("errorMessage"); - msgBox.getChildren().add(errorMessage); - msgBox.setPadding(new Insets(5)); - msgBox.setSpacing(3); - msgBox.setAlignment(Pos.BASELINE_LEFT); - return msgBox; - } - - private int addFields(final GridPane root, boolean pluginLevel, final Text descriptionField) { - - int row = 0; - for (final Prop prop : Prop.values()) { - - if (!prop.isPluginLevel()==pluginLevel) { - continue; - } - - // Add the label - String name = prop.readableName(); - Label fieldName = new Label(name); - root.add(fieldName,0,row); - - // Now add the field itself - final Class propType = prop.getType(); - if (propType.equals(String.class)) { - addStringField(root, descriptionField, row, prop); - } else if (propType.equals(Boolean.class) || propType.equals(boolean.class)) { - addBooleanField(root, row, prop); - } else if (propType.equals(ResourceCategory.class)) { - addResourceTypeChooser(root, row, prop); - } else if (propType.equals(File.class)) { - addDirectoryChooserField(root, row, prop, descriptionField); - - } - - row++; - - } - - return row; - } - - private void addDirectoryChooserField(GridPane root, int row, final Prop prop, final Text descriptionField) { - // Can not add this directly, so add a button to trigger it - final TextField input = new TextField(); - root.add(input,1,row); - Tooltip tooltip = new Tooltip(prop.getDescription()); - Button pickButton = new Button("Pick"); - input.setTooltip(tooltip); - pickButton.setTooltip(tooltip); - pickButton.setOnAction(new EventHandler<ActionEvent>() { - @Override - public void handle(ActionEvent actionEvent) { - DirectoryChooser chooser = new DirectoryChooser(); - chooser.setTitle(prop.getDescription()); - File dir = chooser.showDialog(primaryStage); - if (dir != null) { - String dirName = dir.getAbsolutePath(); - setPropsValue(prop.getVariableName(), dirName, String.class); - clearErrorMessage(); - input.setText(dirName); - } else { - setErrorMessage("No directory selected"); - input.setText("Pick a directory"); - } - } - }); - input.focusedProperty().addListener(new ShowFieldDescriptionHandler(prop,descriptionField,input)); - // Add validation of the input - input.textProperty().addListener(new ChangeListener<String>() { - @Override - public void changed(ObservableValue<? extends String> observableValue, String s, String newText) { - File file = new File(newText); - if (!file.isDirectory()) { - setErrorMessage(newText + " is no directory"); - } else if (prop.isDirectoryWriteable() && !file.canWrite()) { - setErrorMessage(newText + " is not writable"); - } else if (!prop.isDirectoryWriteable() && !file.canRead()) { - setErrorMessage(newText + " is not readable"); - } else { - clearErrorMessage(); - } - - } - }); - - - - root.add(pickButton,2,row); - } - - private void addResourceTypeChooser(GridPane root, int row, final Prop prop) { - final ChoiceBox choiceBox = new ChoiceBox(); - for (ResourceCategory cat : ResourceCategory.values()) { - choiceBox.getItems().add(cat.getLowerName()); - } - choiceBox.getSelectionModel().selectLast(); // service is default - choiceBox.getSelectionModel().selectedItemProperty().addListener(new ChangeListener<String>() { - @Override - public void changed(ObservableValue<? extends String> observableValue, String s, String newValue) { - ResourceCategory newCategory = ResourceCategory.valueOf(newValue.toUpperCase()); - setPropsValue(prop.getVariableName(),newCategory,prop.getType()); - } - }); - Tooltip tooltip = new Tooltip(prop.getDescription()); // TODO make this a hover listener - choiceBox.setTooltip(tooltip); - root.add(choiceBox,1,row); - } - - private void addBooleanField(GridPane root, int row, final Prop prop) { - final ChoiceBox choiceBox = new ChoiceBox(); - choiceBox.getItems().addAll("Yes", "No"); - choiceBox.getSelectionModel().selectLast(); // NO is default - choiceBox.getSelectionModel().selectedItemProperty().addListener(new ChangeListener<String>() { - @Override - public void changed(ObservableValue<? extends String> observableValue, String s, String newValue) { - setPropsValue(prop.getVariableName(), newValue.equals("Yes"), prop.getType()); - } - }); - Tooltip tooltip = new Tooltip(prop.getDescription()); // TODO make this a hover listener - choiceBox.setTooltip(tooltip); - - root.add(choiceBox, 1, row); - } - - private void addStringField(GridPane root, final Text descriptionField, int row, final Prop prop) { - final Pattern pattern = Pattern.compile(prop.getValidationRegex()); - - final TextField input = new TextField(); - if (prop.getDefaultValue()!=null && !prop.getDefaultValue().isEmpty()) { - input.setText(prop.getDefaultValue()); - setPropsValue(prop.getVariableName(),prop.getDefaultValue(),prop.getType()); - } - // Add field leave event to fill in the props with the result - input.focusedProperty().addListener(new ShowFieldDescriptionHandler(prop,descriptionField,input)); - // Add validation of the input - input.textProperty().addListener(new ChangeListener<String>() { - @Override - public void changed(ObservableValue<? extends String> observableValue, String s, String newText) { - Matcher m = pattern.matcher(newText); - if (!m.matches()) { - setErrorMessage("Input does not match " + prop.getValidationRegex()); - } else { - clearErrorMessage(); - } - - } - }); - root.add(input, 1, row); - } - - private void setInfoMessage(String message) { - errorMessage.setText(message); - errorMessage.setFill(Color.DARKGREEN); - } - private void setErrorMessage(String message) { - errorMessage.setText(message); - errorMessage.setFill(Color.RED); - } - - private void clearErrorMessage() { - errorMessage.setText(""); - errorMessage.setFill(Color.WHITE); - } - - private void setPropsValue(String variableName, Object value, Class type) { - - String var = variableName.substring(0,1).toUpperCase() + variableName.substring(1); - String setterName = "set"+ var; - - try { - Method setter = Props.class.getDeclaredMethod(setterName,type); - setter.invoke(props,value); - } catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException e) { - e.printStackTrace(); // TODO: Customise this generated block - setErrorMessage(e.getMessage()); - } - } - - - private class ShowFieldDescriptionHandler implements ChangeListener<Boolean> { - - private Prop prop; - private Text descriptionField; - private TextField input; - - private ShowFieldDescriptionHandler(Prop prop,Text descriptionField, TextField input) { - this.prop = prop; - this.descriptionField = descriptionField; - this.input = input; - } - - @Override - public void changed(ObservableValue<? extends Boolean> observableValue, Boolean oldState, - Boolean newState) { - if (newState) { // User entered input field - descriptionField.setText(prop.getDescription()); - } - else { // User left input field - descriptionField.setText(""); - setPropsValue(prop.getVariableName(),input.getText(), String.class); // TODO right place? - } - } - } -} diff --git a/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/PluginGen.java b/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/PluginGen.java deleted file mode 100644 index 8722446..0000000 --- a/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/PluginGen.java +++ /dev/null @@ -1,409 +0,0 @@ -/* - * RHQ Management Platform - * Copyright (C) 2005-2008 Red Hat, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ -package org.rhq.helpers.pluginGen; - -import java.io.BufferedReader; -import java.io.BufferedWriter; -import java.io.File; -import java.io.FileWriter; -import java.io.IOException; -import java.io.InputStreamReader; -import java.io.Writer; -import java.lang.reflect.Method; -import java.util.HashMap; -import java.util.List; -import java.util.Locale; -import java.util.Map; - -import freemarker.cache.ClassTemplateLoader; -import freemarker.cache.MultiTemplateLoader; -import freemarker.cache.TemplateLoader; -import freemarker.template.Configuration; -import freemarker.template.Template; -import freemarker.template.TemplateException; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; - -/** - * Main class for the plugin generator - * - * @author Heiko W. Rupp - */ -public class PluginGen { - - private final Log log = LogFactory.getLog(PluginGen.class); - - public static void main(String[] arg) throws Exception { - - if (arg.length>0) { - if (arg[0].equals("-ui")) { - Generator.main(arg); - - } - else { - System.out.println("use option -ui to start the UI version"); - } - System.exit(0); - } - - - PluginGen pg = new PluginGen(); - pg.run(); - - } - - public PluginGen() { - } - - public void run() throws Exception { - - Props props = null; - BufferedReader br = new BufferedReader(new InputStreamReader(System.in)); - try { - - props = askQuestions(br, new Props()); - if (props == null) { - // abort by user - return; - } - - boolean done = false; - do { - - System.out.println(); - System.out.print("Do you want to add a child to " + props.getName() + "? (y/N) "); - String answer = br.readLine(); - if (answer==null) { - break; - } - answer = answer.toLowerCase(Locale.getDefault()); - if (answer.startsWith("n") || answer.length() == 0) - done = true; - else { - Props child = askQuestions(br, props); - if (child == null) { - // abort by user - return; - } - props.getChildren().add(child); - } - - } while (!done); - } catch (IOException ioe) { - System.err.println("Internal error happended: " + ioe.getMessage()); - } finally { - br.close(); - } - - if (props!=null) { - log.info("\nYou have chosen:\n" + props.toString()); - postprocess(props); - generate(props); - } - - System.out.println("Don't forget to "); - System.out.println(" - add your plugin to the parent pom.xml if needed"); - System.out.println(" - edit pom.xml of your plugin"); - System.out.println(" - edit rhq-plugin.xml of your plugin"); - } - - /** - * Do some post processing over the input received. - * @param props The properties just recorded from the user input - */ - protected void postprocess(Props props) { - - // Set the package - String pkg = props.getPackagePrefix() + "." + props.getName(); - props.setPkg(pkg); - - String name = props.getName(); // Type name - - if (props.getComponentClass().contains("{name}")) { - props.setComponentClass(props.getComponentClass().replace("{name}",name)); - } - - if (props.getDiscoveryClass().contains("{name}")) { - props.setDiscoveryClass(props.getDiscoveryClass().replace("{name}",name)); - } - - for (Props cProp : props.getChildren()) { - cProp.setPkg(pkg); - } - - if (props.getScanForAnnotations()!=null) { - AnnotationProcessor ap = new AnnotationProcessor(props.getScanForAnnotations()); - ap.populate(props); - } - } - - /** - * Ask the questions by introspecting the {@link Props} class - * @param br BufferedReader to read the users answers from - * @param parentProps Props of the parent - some of them will be copied to the children - * @return an initialized Props object - * @throws Exception if anything goes wrong - * @see org.rhq.helpers.pluginGen.Props - */ - private Props askQuestions(BufferedReader br, Props parentProps) throws Exception { - - Method[] meths = Props.class.getDeclaredMethods(); - Props props = new Props(); - - System.out.print("Please specify the plugin root category "); - List<ResourceCategory> possibleChildren = ResourceCategory.getPossibleChildren(parentProps.getCategory()); - for (ResourceCategory cat : possibleChildren) { - System.out.print(cat + "(" + cat.getAbbrev() + "), "); - } - - String answer = br.readLine(); - answer = answer.toUpperCase(Locale.getDefault()); - ResourceCategory cat = ResourceCategory.getByAbbrv(answer.charAt(0)); - if (cat != null) - props.setCategory(cat); - else { - System.err.println("Bad answer, only use P/S/I"); - System.exit(1); - } - - for (Method m : meths) { - String name = m.getName(); - if (!name.startsWith("get") && !name.startsWith("is")) - continue; - - Class retType = m.getReturnType(); - if (!retType.equals(String.class) && !retType.equals(Boolean.TYPE)) { - continue; - } - - if (name.startsWith("get")) - name = name.substring(3); - else - name = name.substring(2); - - if (name.equals("PackagePrefix") && parentProps.getPackagePrefix() != null) { - props.setPackagePrefix(parentProps.getPackagePrefix()); - } else if (name.equals("FileSystemRoot") && parentProps.getFileSystemRoot() != null) { - props.setFileSystemRoot(parentProps.getFileSystemRoot()); - } else if (name.equals("ParentType") && parentProps.getName() != null) { - // Set parent type always when we are in the child - props.setParentType(caps(parentProps.getComponentClass())); - } else if (name.equals("UsesExternalJarsInPlugin") && parentProps.getName() != null) { - // Skip this one on children - } else if (name.equals("UsePluginLifecycleListenerApi") && parentProps.getName() != null) { - // Skip this one on children - } else if (name.equals("DependsOnJmxPlugin") && parentProps.getName() != null) { - // Skip this one on children - } else if (name.equals("RhqVersion") && parentProps.getName() != null) { - // Skip this one on children - } else if (name.equals("Pkg")) { - // Always skip this - we postprocess it - } else { - - System.out.print("Please specify"); - boolean isBool = false; - if (retType.equals(Boolean.TYPE)) { - System.out.print(" if it should support " + name + " (y/N): "); - isBool = true; - } else { - System.out.print(" its " + name + ": "); - } - - answer = br.readLine(); - if (answer == null) { - System.out.println("EOL .. aborting"); - return null; - } - String setterName = "set" + caps(name); - - Method setter; - if (isBool) - setter = Props.class.getMethod(setterName, Boolean.TYPE); - else - setter = Props.class.getMethod(setterName, String.class); - - if (isBool) { - if (answer.toLowerCase(Locale.getDefault()).startsWith("y") - || answer.toLowerCase(Locale.getDefault()).startsWith("j")) { - setter.invoke(props, true); - } - } else { - if (!answer.startsWith("\n") && !answer.startsWith("\r") && !(answer.length() == 0)) - setter.invoke(props, answer); - } - } - } - - return props; - } - - /** - * Trigger the generation of the directory hierarchy. - * @param props Parameters to take into account - */ - protected void generate(Props props) { - - log.info("Generating..."); - - if (props.getFileSystemRoot() == null || props.getFileSystemRoot().equals("")) { - log.error("No root directory given, can not continue"); - return; - } - - File baseDir = new File(props.getFileSystemRoot()); - if (!baseDir.isDirectory()) { - log.error("This is no directory: '" + baseDir.getAbsolutePath() +"'"); - return; - } - - boolean success; - File activeDirectory = new File(props.getFileSystemRoot(), props.getPluginName()); - - if (!activeDirectory.exists()) { - success = activeDirectory.mkdir(); - if (!success) { - log.error("Creation of plugin basedir failed"); - return; - } - } - - // write pom.xml - createFile(props, "pom", "pom.xml", activeDirectory.getAbsolutePath()); - - // Create java directory hierarchy - String path = activeDirectory.getAbsolutePath() + File.separator + "src" + File.separator + "main" - + File.separator; - - activeDirectory = new File(path); - if (!activeDirectory.exists()) { - success = activeDirectory.mkdirs(); - if (!success) { - log.error("Creation of main directory failed"); - return; - } - } - File resourceDirs = new File(path + File.separator + "resources" + File.separator + "META-INF"); - if (!resourceDirs.exists()) { - success = resourceDirs.mkdirs(); - if (!success) { - log.error("Creation of resources/META-INF failed"); - return; - } - } - // create rhq-plugin.xml below resourceDirs - createFile(props, "descriptor", "rhq-plugin.xml", resourceDirs.getAbsolutePath()); - - File javaDirs = new File(path + File.separator + "java" + File.separator - + toDirPath(props.getPackagePrefix(), File.separator) + props.getName()); - if (!javaDirs.exists()) { - success = javaDirs.mkdirs(); - if (!success) { - log.error("Creation of java package failed"); - return; - } - } - // create Discovery and component classes - createFile(props, "discovery", props.getDiscoveryClass() + ".java", javaDirs.getAbsolutePath()); - createFile(props, "component", props.getComponentClass() + ".java", javaDirs.getAbsolutePath()); - - if (props.isEvents()) { - createFile(props, "eventPoller", caps(props.getName()) + "EventPoller.java", javaDirs.getAbsolutePath()); - } - - // See if there are children and create for them too - if (!props.getChildren().isEmpty()) - log.info("Creating child services"); - - for (Props cProps : props.getChildren()) { - createFile(cProps, "discovery", cProps.getDiscoveryClass() + ".java", javaDirs.getAbsolutePath()); - createFile(cProps, "component", cProps.getComponentClass() + ".java", javaDirs.getAbsolutePath()); - - // create EventPoller - if (cProps.isEvents()) { - createFile(cProps, "eventPoller", caps(cProps.getName()) + "EventPoller.java", javaDirs - .getAbsolutePath()); - } - } - - log.info("Done .."); - - } - - /** - * Translate a package into a filesystem path - * @param pkg Package in standard notation like com.acme.plugins - * @param separator File separator - * @return a path suitable to pass to File - */ - private String toDirPath(String pkg, String separator) { - - String res = pkg.replaceAll("\\.", separator); - if (!pkg.endsWith(".")) - res += separator; - return res; - } - - /** - * Apply a template to generate a file - * @param props The properties used to create the respective file - * @param template The name of the template without .ftl suffix - * @param fileName The name of the file to create - * @param directory The name of the directory to create in - */ - public void createFile(Props props, String template, String fileName, String directory) { - - try { - log.info("Trying to generate " + directory + "/" + fileName); - Configuration config = new Configuration(); - - // XXX fall-over to ClassTL after failure in FTL seems not to work - // FileTemplateLoader ftl = new FileTemplateLoader(new File("src/main/resources")); - ClassTemplateLoader ctl = new ClassTemplateLoader(getClass(), "/"); - TemplateLoader[] loaders = new TemplateLoader[] { ctl }; - MultiTemplateLoader mtl = new MultiTemplateLoader(loaders); - - config.setTemplateLoader(mtl); - - Template templ = config.getTemplate(template + ".ftl"); - - Writer out = new BufferedWriter(new FileWriter(new File(directory, fileName))); - try { - Map<String, Props> root = new HashMap<String, Props>(); - root.put("props", props); - templ.process(root, out); - } - finally { - out.close(); - } - } catch (IOException ioe) { - ioe.printStackTrace(); - } catch (TemplateException te) { - te.printStackTrace(); - } - - } - - static String caps(String in) { - if (in == null) - return null; - - return in.substring(0, 1).toUpperCase(Locale.getDefault()) + in.substring(1); - } -} diff --git a/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/Prop.java b/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/Prop.java deleted file mode 100644 index 40e729c..0000000 --- a/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/Prop.java +++ /dev/null @@ -1,135 +0,0 @@ -/* - * RHQ Management Platform - * Copyright (C) 2005-2013 Red Hat, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA - */ - -package org.rhq.helpers.pluginGen; - -import java.io.File; - -/** - * All the properties that can be configured along with type, description and simple validation rules - * @author Heiko W. Rupp - */ -public enum Prop { - - PLUGIN_NAME("pluginName", String.class,"Name of the plugin", "\\w+" , true ), - PLUGIN_DESCRIPTION("pluginDescription", String.class,"Description of the plugin",".*" , true ), - PACKAGE("packagePrefix", String.class,"Default Package","[a-zA-Z\\.]+",true ), - FILE_ROOT("fileSystemRoot", File.class,"Root directory to put the plugin into",".*",true , true,null), - RHQ_VERSION("rhqVersion",String.class,"RHQ version to use","[0-9][0-9\\.]+",true), - - CATEGORY("category", ResourceCategory.class, "Category of the resource type (platform = host level)",null), - TYPE_NAME("name", String.class, "Name of the resource type", "\\w+"), - DESCRIPTION("description", String.class, "Description of the type", ".*"), - DISCOVERY_CLASS("discoveryClass", String.class, "Name of the Discovery class. '{name}' will be replaced with the type name", "[A-Z][a-zA-Z0-9]*",false,false,"{name}Discovery"), - COMPONENT_CLASS("componentClass", String.class, "Name of the Discovery class. '{name}' will be replaced with the type name", "[A-Z][a-zA-Z0-9]*",false,false,"{name}Component"), - IS_SINGLETON("singleton",boolean.class,"Is this type a singleton, which means that" + - " there can only be one resource of that type for the given parent?"), - HAS_METRICS("hasMetrics",boolean.class,"Does this type support taking metrics?"), - HAS_OPERATIONS("hasOperations",boolean.class,"Does this type support operations?"), - HAS_EVENTS("events",boolean.class,"Does this type support events?"), - HAS_SUPPORT_FACET("supportFacet",boolean.class,"Does this type support the support facet?"), - RESOURCE_CONFIGURATION("resourceConfiguration",boolean.class,"Does this type support " + - "configuring the resource?"), - CAN_CREATE_CHILDREN("createChildren",boolean.class,"Can the type create child resources?"), - CAN_DELETE_CHILDREN("deleteChildren",boolean.class,"Can the type delete child resources?"), - USE_EXTENAL_JARS("usesExternalJarsInPlugin",boolean.class,"Will the plugin use external jars in the plugin jar?"), - ALLOW_MANUAL_ADD("manualAddOfResourceType",boolean.class,"Should manually adding resource be supported?"), - USE_LIFECYLE_API("usePluginLifecycleListenerApi",boolean.class,"Should the plugin lifecycle api be supported?"), - DEPENDS_ON_JMX_PLUGIN("dependsOnJmxPlugin",boolean.class,"Does the plugin use JMX and extend the JMX Plugin?"), - DEPENDS_ON_AS7_PLUGIN("dependsOnAs7Plugin",boolean.class,"Does the plugin use DMR and extend the AS7 Plugin?"), - USE_SUPPORT_FACET("supportFacet",boolean.class,"Will the support facet be used?"), - - SCAN_FOR_ANNOTATIONS("scanForAnnotations",File.class,"Directory to scan for plugin annotations to include in type",null, false,false, null) - ; - - private String variableName; - private Class type; - private String description; - private boolean pluginLevel; - private boolean directoryWriteable; - private String defaultValue; - private String validationRegex; - - private Prop(String variableName, Class type, String description, String validationRegex, boolean pluginLevel, boolean directoryWriteable, String defaultValue) { - this.variableName = variableName; - this.type = type; - this.description = description; - this.validationRegex = validationRegex; - this.pluginLevel = pluginLevel; - this.directoryWriteable = directoryWriteable; - this.defaultValue = defaultValue; - } - - private Prop(String variableName, Class type, String description, String validationRegex, boolean pluginLevel) { - this(variableName,type,description,validationRegex,pluginLevel,false,null); - } - - private Prop(String variableName, Class type, String description, String validationRegex) { - this(variableName,type,description,validationRegex,false,false,null); - } - - private Prop(String variableName, Class type, String description) { - this(variableName,type,description,null,false,false,null); - } - - public String getVariableName() { - return variableName; - } - - public Class getType() { - return type; - } - - public String getDescription() { - return description; - } - - public boolean isPluginLevel() { - return pluginLevel; - } - - public String getValidationRegex() { - return validationRegex; - } - - public String readableName() { - - String name = name(); - name = name.replaceAll("_", " "); - String[] parts = name.split(" "); - StringBuilder builder = new StringBuilder(); - for (int i = 0; i < parts.length; i++) { - String part = parts[i]; - part = part.substring(0,1).toUpperCase() + part.substring(1).toLowerCase(); - builder.append(part); - if (i < parts.length-1) { - builder.append(" "); - } - } - return builder.toString(); - } - - public boolean isDirectoryWriteable() { - return directoryWriteable; - } - - public String getDefaultValue() { - return defaultValue; - } -} diff --git a/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/Props.java b/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/Props.java deleted file mode 100644 index c88fbc0..0000000 --- a/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/Props.java +++ /dev/null @@ -1,617 +0,0 @@ -/* - * RHQ Management Platform - * Copyright (C) 2005-2010 Red Hat, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ -package org.rhq.helpers.pluginGen; - -import java.util.HashSet; -import java.util.LinkedHashSet; -import java.util.Set; - -import org.rhq.helpers.pluginAnnotations.agent.DataType; -import org.rhq.helpers.pluginAnnotations.agent.DisplayType; -import org.rhq.helpers.pluginAnnotations.agent.Units; - -/** - * The properties needed to generate a new plugin skeleton - * - * @author Heiko W. Rupp - */ -@SuppressWarnings("unused") -public class Props { - - /** What category is this ? */ - private ResourceCategory category = ResourceCategory.SERVICE; - /** The name of this item */ - private String name; - /** A description of the plugin */ - private String description; - /** Package name prefix */ - private String packagePrefix; - /** String package */ - private String pkg; - /** The name of the DiscoveryClass */ - private String discoveryClass; - /** The name of the Component class */ - private String componentClass; - /** The type of the parent we run in */ - private String parentType; - /** Filesytem root */ - private String fileSystemRoot; - /** Should this service do monitoring ? */ - private boolean hasMetrics; - /** Should this service do operations ? */ - private boolean hasOperations; - /** Is this service a singleton (e.g. a XYZ subsystem) */ - private boolean singleton; - /** Does the service support configuration ? */ - private boolean resourceConfiguration; - /** Does the service support events */ - private boolean events; - /** Does the service support the support facet? */ - private boolean supportFacet; - /** Can the service create children ? */ - private boolean createChildren; - /** Can the service delete children ? */ - private boolean deleteChildren; - /** Use externals jars in the plugin jar ? */ - private boolean usesExternalJarsInPlugin; - /** Does it support manual add of children ? */ - private boolean manualAddOfResourceType; - /** Does it use the PluginLifecycleListener api ? */ - private boolean usePluginLifecycleListenerApi; - /** Depends on JMX plugin ? */ - private boolean dependsOnJmxPlugin; - /** Depends on AS7 plugin ? */ - private boolean dependsOnAs7Plugin; - /** Directory with java files to scan for plugin annotations */ - private String scanForAnnotations; - /** What version of RHQ should this plugin's pom use ? */ - private String rhqVersion = "4.8.0"; - - /** Embedded children */ - private Set<Props> children = new HashSet<Props>(); - - private Set<SimpleProperty> pluginConfig = new LinkedHashSet<SimpleProperty>(); - private Set<SimpleProperty> resourceConfig = new LinkedHashSet<SimpleProperty>(); - - private Set<Template> templates = new HashSet<Template>(); - - private Set<MetricProps> metrics = new LinkedHashSet<MetricProps>(); - - private Set<OperationProps> operations = new LinkedHashSet<OperationProps>(); - - private Set<TypeKey> runsInsides = new LinkedHashSet<TypeKey>();; - - private String pluginName; - private String pluginDescription; - - public ResourceCategory getCategory() { - return category; - } - - public void setCategory(ResourceCategory category) { - this.category = category; - } - - public String getName() { - return name; - } - - public void setName(String name) { - this.name = name; - } - - public String getDiscoveryClass() { - return discoveryClass; - } - - public void setDiscoveryClass(String discoveryClass) { - this.discoveryClass = discoveryClass; - } - - public String getComponentClass() { - return componentClass; - } - - public void setComponentClass(String componentClass) { - this.componentClass = componentClass; - } - - public String getParentType() { - return parentType; - } - - public void setParentType(String parentType) { - this.parentType = parentType; - } - - public boolean isHasMetrics() { - return hasMetrics; - } - - public void setHasMetrics(boolean hasMetrics) { - this.hasMetrics = hasMetrics; - } - - public boolean isHasOperations() { - return hasOperations; - } - - public void setHasOperations(boolean hasOperations) { - this.hasOperations = hasOperations; - } - - public boolean isSingleton() { - return singleton; - } - - public void setSingleton(boolean singleton) { - this.singleton = singleton; - } - - public boolean isResourceConfiguration() { - return resourceConfiguration; - } - - public void setResourceConfiguration(boolean resourceConfiguration) { - this.resourceConfiguration = resourceConfiguration; - } - - public boolean isEvents() { - return events; - } - - public void setEvents(boolean events) { - this.events = events; - } - - public boolean isSupportFacet() { - return supportFacet; - } - - public void setSupportFacet(boolean supportFacet) { - this.supportFacet = supportFacet; - } - - public boolean isCreateChildren() { - return createChildren; - } - - public void setCreateChildren(boolean createChildren) { - this.createChildren = createChildren; - } - - public Set<Props> getChildren() { - return children; - } - - public void setChildren(Set<Props> children) { - this.children = children; - } - - public String getPackagePrefix() { - return packagePrefix; - } - - public void setPackagePrefix(String packagePrefix) { - this.packagePrefix = packagePrefix; - } - - public String getFileSystemRoot() { - return fileSystemRoot; - } - - public void setFileSystemRoot(String fileSystemRoot) { - this.fileSystemRoot = fileSystemRoot; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - public boolean isUsesExternalJarsInPlugin() { - return usesExternalJarsInPlugin; - } - - public void setUsesExternalJarsInPlugin(boolean usesExternalJarsInPlugin) { - this.usesExternalJarsInPlugin = usesExternalJarsInPlugin; - } - - public boolean isDeleteChildren() { - return deleteChildren; - } - - public void setDeleteChildren(boolean deleteChildren) { - this.deleteChildren = deleteChildren; - } - - public boolean isManualAddOfResourceType() { - return manualAddOfResourceType; - } - - public void setManualAddOfResourceType(boolean manualAddOfResourceType) { - this.manualAddOfResourceType = manualAddOfResourceType; - } - - public void setPkg(String pkg) { - this.pkg = pkg; - } - - public String getPkg() { - return this.pkg; - } - - public boolean isUsePluginLifecycleListenerApi() { - return usePluginLifecycleListenerApi; - } - - public void setUsePluginLifecycleListenerApi(boolean usePluginLifecycleListenerApi) { - this.usePluginLifecycleListenerApi = usePluginLifecycleListenerApi; - } - - public boolean isDependsOnJmxPlugin() { - return dependsOnJmxPlugin; - } - - public void setDependsOnJmxPlugin(boolean dependsOnJmxPlugin) { - this.dependsOnJmxPlugin = dependsOnJmxPlugin; - } - - public String getRhqVersion() { - return rhqVersion; - } - - public void setRhqVersion(String rhqVersion) { - this.rhqVersion = rhqVersion; - } - - public Set<SimpleProperty> getPluginConfig() { - return pluginConfig; - } - - public void setPluginConfig(Set<SimpleProperty> pluginConfig) { - this.pluginConfig = pluginConfig; - } - - public Set<Template> getTemplates() { - return templates; - } - - public void setTemplates(Set<Template> templates) { - this.templates = templates; - } - - public Set<SimpleProperty> getResourceConfig() { - return resourceConfig; - } - - public void setResourceConfig(Set<SimpleProperty> resourceConfig) { - this.resourceConfig = resourceConfig; - } - - public Set<MetricProps> getMetrics() { - return metrics; - } - - public void setMetrics(Set<MetricProps> metricProps) { - this.metrics = metricProps; - } - - public Set<OperationProps> getOperations() { - return operations; - } - - public void setOperations(Set<OperationProps> opProps) { - this.operations = opProps; - } - - public String getPluginName() { - return pluginName; - } - - public void setPluginName(String pluginName) { - this.pluginName = pluginName; - } - - public String getPluginDescription() { - return pluginDescription; - } - - public void setPluginDescription(String pluginDescription) { - this.pluginDescription = pluginDescription; - } - - public Set<TypeKey> getRunsInsides() { - return runsInsides; - } - - public void setRunsInsides(Set<TypeKey> runsInsides) { - this.runsInsides = runsInsides; - } - - public boolean isDependsOnAs7Plugin() { - return dependsOnAs7Plugin; - } - - public void setDependsOnAs7Plugin(boolean dependsOnAs7Plugin) { - this.dependsOnAs7Plugin = dependsOnAs7Plugin; - } - - public String getScanForAnnotations() { - return scanForAnnotations; - } - - public void setScanForAnnotations(String scanForAnnotations) { - this.scanForAnnotations = scanForAnnotations; - } - - - @Override - public String toString() { - final StringBuilder sb = new StringBuilder(); - sb.append("Props"); - sb.append("{category=").append(category); - sb.append(", name='").append(name).append('\''); - sb.append(", description='").append(description).append('\''); - sb.append(", packagePrefix='").append(packagePrefix).append('\''); - sb.append(", pkg='").append(pkg).append('\''); - sb.append(", discoveryClass='").append(discoveryClass).append('\''); - sb.append(", componentClass='").append(componentClass).append('\''); - sb.append(", parentType='").append(parentType).append('\''); - sb.append(", fileSystemRoot='").append(fileSystemRoot).append('\''); - sb.append(", monitoring=").append(hasMetrics); - sb.append(", operations=").append(hasOperations); - sb.append(", metricProps=").append(metrics); - sb.append(", operationProps=").append(operations); - sb.append(", singleton=").append(singleton); - sb.append(", resourceConfiguration=").append(resourceConfiguration); - sb.append(", events=").append(events); - sb.append(", supportFacet=").append(supportFacet); - sb.append(", createChildren=").append(createChildren); - sb.append(", deleteChildren=").append(deleteChildren); - sb.append(", usesExternalJarsInPlugin=").append(usesExternalJarsInPlugin); - sb.append(", manualAddOfResourceType=").append(manualAddOfResourceType); - sb.append(", usePluginLifecycleListenerApi=").append(usePluginLifecycleListenerApi); - sb.append(", dependsOnJmxPlugin=").append(dependsOnJmxPlugin); - sb.append(", rhqVersion='").append(rhqVersion).append('\''); - sb.append(", children=").append(children); - sb.append(", simpleProps=").append(pluginConfig); - sb.append(", templates=").append(templates); - sb.append(", runsInsides=").append(runsInsides); - sb.append('}'); - return sb.toString(); - } - - public static class TypeKey { - private String name; - private String pluginName; - - public TypeKey(String name, String pluginName) { - this.name = name; - this.pluginName = pluginName; - } - - public String getPluginName() { - return pluginName; - } - - public String getName() { - return name; - } - - @Override - public String toString() { - return "TypeKey{" + - "name='" + name + '\'' + - ", pluginName='" + pluginName + '\'' + - '}'; - } - } - - public static class SimpleProperty { - private final String name; - private String description; - private String type; - private boolean readOnly; - private String displayName; - private String defaultValue; - - public SimpleProperty(String name) { - this.name = name; - } - - public String getName() { - return name; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - public String getType() { - return type; - } - - public void setType(String type) { - this.type = type; - } - - public boolean isReadOnly() { - return readOnly; - } - - public void setReadOnly(boolean readOnly) { - this.readOnly = readOnly; - } - - public String getDisplayName() { - return displayName; - } - - public void setDisplayName(String displayName) { - this.displayName = displayName; - } - - public String getDefaultValue() { - return defaultValue; - } - - public void setDefaultValue(String defaultValue) { - this.defaultValue = defaultValue; - } - } - - public static class Template { - private final String name; - private String description; - private Set<SimpleProperty> simpleProps = new HashSet<SimpleProperty>(); - - public Template(String name) { - this.name = name; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - public Set<SimpleProperty> getSimpleProps() { - return simpleProps; - } - - public void setSimpleProps(Set<SimpleProperty> simpleProps) { - this.simpleProps = simpleProps; - } - - public String getName() { - return name; - } - } - - public static class MetricProps { - private final String property; - private String displayName; - private String description; - private DisplayType displayType; - private DataType dataType; - private Units units; - - public MetricProps(String property) { - this.property = property; - } - - public DataType getDataType() { - return dataType; - } - - public void setDataType(DataType dataType) { - this.dataType = dataType; - } - - public String getDisplayName() { - return displayName; - } - - public void setDisplayName(String displayName) { - this.displayName = displayName; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - public DisplayType getDisplayType() { - return displayType; - } - - public void setDisplayType(DisplayType displayType) { - this.displayType = displayType; - } - - public Units getUnits() { - return units; - } - - public void setUnits(Units units) { - this.units = units; - } - - public String getProperty() { - return property; - } - } - - public static class OperationProps { - private final String name; - private String displayName; - private String description; - private Set<SimpleProperty> params = new LinkedHashSet<SimpleProperty>(); - private SimpleProperty result; - - public OperationProps(String name) { - this.name = name; - } - - public String getDisplayName() { - return displayName; - } - - public void setDisplayName(String displayName) { - this.displayName = displayName; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - public String getName() { - return name; - } - - public Set<SimpleProperty> getParams() { - return params; - } - - public SimpleProperty getResult() { - return result; - } - - public void setResult(SimpleProperty result) { - this.result = result; - } - } -} diff --git a/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/ResourceCategory.java b/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/ResourceCategory.java deleted file mode 100644 index 911ed41..0000000 --- a/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/ResourceCategory.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * RHQ Management Platform - * Copyright (C) 2005-2008 Red Hat, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ -package org.rhq.helpers.pluginGen; - -import java.util.Arrays; -import java.util.EnumMap; -import java.util.EnumSet; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.ArrayList; - -/** - * Possible categories - * @author Heiko W. Rupp - */ -public enum ResourceCategory { - /** Possible categories */ - PLATFORM('P'), SERVER('S'), SERVICE('I'); - - char abbrevLetter; - - private ResourceCategory(char abbrev) { - abbrevLetter = abbrev; - } - - private static Map<ResourceCategory,List<ResourceCategory>> enumMap = - new EnumMap<ResourceCategory, List<ResourceCategory>>(ResourceCategory.class); - - static { - for (ResourceCategory cat : ResourceCategory.values()) { - List<ResourceCategory> catList = new ArrayList<ResourceCategory>(); - switch (cat) { - case PLATFORM: - catList.addAll(Arrays.asList(PLATFORM, SERVER, SERVICE)); - break; - case SERVER: - catList.addAll(Arrays.asList(SERVER, SERVICE)); - break; - case SERVICE: - catList.addAll(Arrays.asList(SERVICE)); - break; - } - enumMap.put(cat,catList); - } - } - - public static List<ResourceCategory> getPossibleChildren(ResourceCategory parent) { - - if (parent == null) - return enumMap.get(PLATFORM); - else - return enumMap.get(parent); - - } - - public char getAbbrev() { - return abbrevLetter; - } - - public static ResourceCategory getByAbbrv(char abbrev) { - EnumSet<ResourceCategory> set = EnumSet.allOf(ResourceCategory.class); - for (ResourceCategory cat : set) { - if (cat.abbrevLetter ==abbrev) - return cat; - } - return null; - } - - public String getLowerName() { - return toString().toLowerCase(Locale.getDefault()); - } -} diff --git a/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/Test.java b/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/Test.java deleted file mode 100644 index 3c4c48e..0000000 --- a/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/Test.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * RHQ Management Platform - * Copyright (C) 2005-2008 Red Hat, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ -package org.rhq.helpers.pluginGen; - -import java.io.File; - -/** - * Simple test class to easily trigger creation of output - * - * @author Heiko W. Rupp - */ -public class Test { - - public static void main(String[] args) throws Exception { - - File f = new File("."); - System.out.println("here: " + f.getAbsolutePath()); - - Props p = new Props(); - p.setName("foo"); - p.setPluginName("fooPlugin"); - p.setPluginDescription("fooDescription"); - p.setPkg("a.package"); - p.setCategory(ResourceCategory.SERVER); - p.setPackagePrefix("com.acme.plugin"); - p.setDiscoveryClass("FooDiscovery"); - p.setComponentClass("FooComponent"); - p.setResourceConfiguration(true); - p.setHasMetrics(true); - p.setHasOperations(true); - p.setEvents(true); - p.setCreateChildren(true); - p.setSingleton(true); - - Props child = new Props(); - child.setName("FooChild1"); - child.setPkg("child.package1"); - child.setCategory(ResourceCategory.SERVICE); - child.setPackagePrefix(p.getPackagePrefix()); - child.setDiscoveryClass("ChildDiscovery1"); - child.setComponentClass("ChildComponent1"); - child.setParentType("FooComponent"); - child.setEvents(true); - - p.getChildren().add(child); - - child = new Props(); - child.setName("FooChild2"); - child.setPkg("child.package2"); - child.setCategory(ResourceCategory.SERVICE); - child.setParentType("FooComponent"); - child.setPackagePrefix(p.getPackagePrefix()); - child.setDiscoveryClass("ChildDiscovery2"); - child.setComponentClass("ChildComponent2"); - child.setEvents(true); - child.setHasMetrics(true); - child.setHasOperations(true); - - p.getChildren().add(child); - - - PluginGen pg = new PluginGen(); - pg.createFile(p,"descriptor","rhq-plugin.xml","/tmp"); - pg.createFile(p,"component", "FooComponent.java", "/tmp"); - pg.createFile(p,"discovery", "FooDiscovery.java", "/tmp"); - pg.createFile(p,"pom", "pom.xml", "/tmp"); - pg.createFile(p,"eventPoller", "FooEventPoller.java", "/tmp"); - pg.createFile(p.getChildren().iterator().next(),"component", "ChildComponent1.java", "/tmp"); - - } -} diff --git a/modules/helpers/pluginGen/src/main/resources/component.ftl b/modules/helpers/pluginGen/src/main/resources/component.ftl deleted file mode 100644 index b445008..0000000 --- a/modules/helpers/pluginGen/src/main/resources/component.ftl +++ /dev/null @@ -1,284 +0,0 @@ -<#-- -/* - * RHQ Management Platform - * Copyright (C) 2005-2008 Red Hat, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ ---> -<#-- @ftlvariable name="props" type="org.rhq.helpers.pluginGen.Props" --> - -package ${props.pkg}; - -import java.util.HashSet; -import java.util.Set; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; - -import org.rhq.core.domain.configuration.Configuration; -import org.rhq.core.pluginapi.inventory.InvalidPluginConfigurationException; -import org.rhq.core.domain.measurement.AvailabilityType; -<#if props.metrics??> -import org.rhq.core.domain.measurement.MeasurementDataNumeric; -import org.rhq.core.domain.measurement.MeasurementReport; -import org.rhq.core.domain.measurement.MeasurementScheduleRequest; -</#if> -<#if props.resourceConfiguration> -import org.rhq.core.pluginapi.configuration.ConfigurationFacet; -import org.rhq.core.pluginapi.configuration.ConfigurationUpdateReport; -</#if> -<#if props.createChildren> -import org.rhq.core.pluginapi.inventory.CreateChildResourceFacet; -import org.rhq.core.pluginapi.inventory.CreateResourceReport; -</#if> -<#if props.deleteChildren> -import org.rhq.core.pluginapi.inventory.DeleteResourceFacet; -</#if> -import org.rhq.core.pluginapi.inventory.ResourceComponent; -import org.rhq.core.pluginapi.inventory.ResourceContext; -<#if props.metrics??> -import org.rhq.core.pluginapi.measurement.MeasurementFacet; -</#if> -<#if props.events> -import org.rhq.core.pluginapi.event.EventContext; -</#if> -<#if props.operations??> -import org.rhq.core.pluginapi.operation.OperationContext; -import org.rhq.core.pluginapi.operation.OperationFacet; -import org.rhq.core.pluginapi.operation.OperationResult; -</#if> -<#if props.usePluginLifecycleListenerApi> -import org.rhq.core.pluginapi.plugin.PluginContext; -import org.rhq.core.pluginapi.plugin.PluginLifecycleListener; -</#if> -<#if props.supportFacet> -import org.rhq.core.pluginapi.support.SnapshotReportRequest; -import org.rhq.core.pluginapi.support.SnapshotReportResults; -import org.rhq.core.pluginapi.support.SupportFacet; -</#if> - - -public class ${props.componentClass} implements ResourceComponent<#if props.parentType??><${props.parentType}></#if> -<#if props.metrics??> -, MeasurementFacet -</#if> -<#if props.operations??> -, OperationFacet -</#if> -<#if props.resourceConfiguration> -, ConfigurationFacet -</#if> -<#if props.createChildren> -, CreateChildResourceFacet -</#if> -<#if props.deleteChildren> -, DeleteResourceFacet -</#if> -<#if props.usePluginLifecycleListenerApi> -, PluginLifecycleListener -</#if> -<#if props.supportFacet> -, SupportFacet -</#if> -{ - private final Log log = LogFactory.getLog(this.getClass()); - - private static final int CHANGEME = 1; // TODO remove or change this - - private ResourceContext<#if props.parentType??><${props.parentType}></#if> context; - - - <#if props.events> - public static final String DUMMY_EVENT = "${props.name}DummyEvent"; // Same as in Plugin-Descriptor - - EventContext eventContext; - </#if> - - <#if props.usePluginLifecycleListenerApi> - /** - * Callback when the plugin is created - * @see org.rhq.core.pluginapi.plugin.PluginLifecycleListener#initialize(PluginContext) - */ - public void initialize(PluginContext context) throws Exception - { - } - - /** - * Callback when the plugin is unloaded - * @see org.rhq.core.pluginapi.plugin.PluginLifecycleListener#shutdown() - */ - public void shutdown() - { - } - </#if> - - /** - * Return availability of this resource - * @see org.rhq.core.pluginapi.inventory.ResourceComponent#getAvailability() - */ - public AvailabilityType getAvailability() { - // TODO supply real implementation - return AvailabilityType.UP; - } - - - /** - * Start the resource connection - * @see org.rhq.core.pluginapi.inventory.ResourceComponent#start(org.rhq.core.pluginapi.inventory.ResourceContext) - */ - public void start(ResourceContext<#if props.parentType??><${props.parentType}></#if> context) throws InvalidPluginConfigurationException, Exception { - - this.context = context; - Configuration conf = context.getPluginConfiguration(); - // TODO add code to start the resource / connection to it - - <#if props.events> - eventContext = context.getEventContext(); - ${props.name?cap_first}EventPoller eventPoller = new ${props.name?cap_first}EventPoller(); - eventContext.registerEventPoller(eventPoller, 60); - </#if> - - } - - - /** - * Tear down the resource connection - * @see org.rhq.core.pluginapi.inventory.ResourceComponent#stop() - */ - public void stop() { - - - <#if props.events> - eventContext.unregisterEventPoller(DUMMY_EVENT); - </#if> - } - - -<#if props.metrics??> - - /** - * Gather measurement data - * @see org.rhq.core.pluginapi.measurement.MeasurementFacet#getValues(org.rhq.core.domain.measurement.MeasurementReport, java.util.Set) - */ - public void getValues(MeasurementReport report, Set<MeasurementScheduleRequest> metrics) throws Exception { - - for (MeasurementScheduleRequest req : metrics) { - if (req.getName().equals("dummyMetric")) { - MeasurementDataNumeric res = new MeasurementDataNumeric(req, Double.valueOf(CHANGEME)); - report.addData(res); - } - // TODO add more metrics here - } - } -</#if> - -<#if props.operations??> - - public void startOperationFacet(OperationContext context) { - - } - - - /** - * Invokes the passed operation on the managed resource - * @param name Name of the operation - * @param params The method parameters - * @return An operation result - * @see org.rhq.core.pluginapi.operation.OperationFacet - */ - public OperationResult invokeOperation(String name, Configuration params) throws Exception { - - OperationResult res = new OperationResult(); -<#if props.operations?has_content> - <#list props.operations as operation> - if (name.equals("${operation.name}") { - // TODO implement me - } - </#list> -<#else> - if ("dummyOperation".equals(name)) { - // TODO implement me - - } - return res; - } -</#if> -</#if> - - -<#if props.resourceConfiguration> - /** - * Load the configuration from a resource into the configuration - * @return The configuration of the resource - * @see org.rhq.core.pluginapi.configuration.ConfigurationFacet - */ - public Configuration loadResourceConfiguration() - { - // TODO supply code to load the configuration from the resource into the plugin - return null; - } - - /** - * Write down the passed configuration into the resource - * @param report The configuration updated by the server - * @see org.rhq.core.pluginapi.configuration.ConfigurationFacet - */ - public void updateResourceConfiguration(ConfigurationUpdateReport report) - { - // TODO supply code to update the passed report into the resource - } -</#if> - -<#if props.createChildren> - /** - * Create a child resource - * @see org.rhq.core.pluginapi.inventory.CreateChildResourceFacet - */ - public CreateResourceReport createResource(CreateResourceReport report) - { - // TODO supply code to create a child resource - - return null; // TODO change this - } -</#if> - -<#if props.deleteChildren> - /** - * Delete a child resource - * @see org.rhq.core.pluginapi.inventory.DeleteResourceFacet - */ - public void deleteResource() throws Exception - { - // TODO supply code to delete a child resource - } -</#if> - -<#if props.supportFacet> - /** - * Takes a snapshot and returns the snapshot report content in the given stream. A facet implementation - * can support different kinds of snapshots, the given name determines which kind of snapshot to take. - * - * @param request identifies the type of snapshot to take - * @return snapshot results, including a stream containing the contents of the snapshot report - * @throws Exception if failed to generate the snapshot report - */ - public SnapshotReportResults getSnapshotReport(SnapshotReportRequest request) throws Exception - { - // TODO - return null; - } -</#if> -} diff --git a/modules/helpers/pluginGen/src/main/resources/descriptor.ftl b/modules/helpers/pluginGen/src/main/resources/descriptor.ftl deleted file mode 100644 index 1fe2a6b..0000000 --- a/modules/helpers/pluginGen/src/main/resources/descriptor.ftl +++ /dev/null @@ -1,54 +0,0 @@ -<#-- -/* - * RHQ Management Platform - * Copyright (C) 2005-2013 Red Hat, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ ---> -<#-- @ftlvariable name="props" type="org.rhq.helpers.pluginGen.Props" --> -<?xml version="1.0"?> -<plugin name="${props.pluginName}" - displayName="${props.pluginName}Plugin" -<#if props.pluginDescription??> - description="${props.pluginDescription}" -<#else> - description="TODO provide a description for the plugin" -</#if> -<#if props.usePluginLifecycleListenerApi> - pluginLifecycleListener="${props.componentClass}" -</#if> - package="${props.pkg}" - xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" - xmlns="urn:xmlns:rhq-plugin" - xmlns:c="urn:xmlns:rhq-configuration"> - -<#if props.dependsOnJmxPlugin> - <depends plugin="JMX" useClasses="true"/> -</#if> -<#if props.dependsOnAs7Plugin> - <depends plugin="JBossAS7" useClasses="true"/> -</#if> - - <${props.category.lowerName} <#include "descriptorMain.ftl"/> - - <#-- Those are the embedded children --> - <#list props.children as props> - <${props.category.lowerName} <#include "./descriptorMain.ftl"/> - </${props.category.lowerName}> - </#list> - </${props.category.lowerName}> - -</plugin> \ No newline at end of file diff --git a/modules/helpers/pluginGen/src/main/resources/descriptorMain.ftl b/modules/helpers/pluginGen/src/main/resources/descriptorMain.ftl deleted file mode 100644 index ce35ebe..0000000 --- a/modules/helpers/pluginGen/src/main/resources/descriptorMain.ftl +++ /dev/null @@ -1,117 +0,0 @@ -<#-- -/* - * RHQ Management Platform - * Copyright (C) 2005-2013 Red Hat, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ ---> -<#-- @ftlvariable name="props" type="org.rhq.helpers.pluginGen.Props" --> -<#-- - This file contains the body of the descriptor for a single - platform/server/service. It is called by descriptor.ftl - ---> -name="${props.name}" - discovery="${props.discoveryClass}" - class="${props.componentClass}" - <#if props.singleton>singleton="true"</#if> - <#if props.manualAddOfResourceType>supportsManualAdd="true"</#if> - <#if props.createChildren && props.deleteChildren>createDeletePolicy="both"<#elseif props.createChildren && !props.deleteChildren>createDeletePolicy="create-only"<#elseif !props.createChildren && props.deleteChildren>createDeletePolicy="delete-only"<#else > <#-- Dont mention it, as 'neither' is default --></#if> - > - - <#if props.runsInsides?has_content> - <runs-inside> - <#list props.runsInsides as typeKey> - <parent-resource-type name="${typeKey.name}" plugin="${typeKey.pluginName}"/> - </#list> - </runs-inside> - </#if> - <#if props.dependsOnAs7Plugin> - <runs-inside> <!-- TODO adjust type --> - <parent-resource-type name="JBossAS7 Standalone Server" plugin="JBossAS7"/> - </runs-inside> - </#if> - - <#if props.pluginConfig?has_content> - <plugin-configuration> - <#list props.pluginConfig as simpleProps> - <c:simple-property name="${simpleProps.name}" description="${simpleProps.description}" <#if simpleProps.type??>type="${simpleProps.type}"</#if> <#if simpleProps.readOnly>readOnly="true"</#if>/> - </#list> - <!-- The template section is only for manual resource additions, and default parameters and the ones presented to the user. --> - <#list props.templates as templates> - <c:template name="${templates.name}" description="${templates.description}"> - <#list templates.simpleProps as innerSimpleProps> - <c:simple-property name="${innerSimpleProps.name}" displayName="${innerSimpleProps.displayName}" - defaultValue="${innerSimpleProps.defaultValue}"/> - </#list> - </c:template> - </#list> - </plugin-configuration> - </#if> - - <#if props.hasOperations || props.operations?has_content> - <#if props.operations?has_content> - <#list props.operations as operation> - <operation name="${operation.name}" <#if operation.displayName?has_content>displayName="${operation.displayName}"</#if> description="${operation.description}"> - <#if operation.params?has_content> - <parameters> - <#list operation.params as param> - <c:simple-property name="${param.name}" <#if param.description??>description="${param.description}"</#if> type="${param.type}"/> - </#list> - </parameters> - </#if> - <#if operation.result??> - <results> - <c:simple-property name="${operation.result.name}" /> - </results> - </#if> - </operation> - </#list> - <#else> - <operation name="dummyOperation"> - <!-- TODO supply parameters and return values --> - </operation> - </#if> - </#if> - - <#if props.hasMetrics || props.metrics?has_content> - <#if props.metrics?has_content> - <#list props.metrics as metric> - <metric property="${metric.property}" <#if metric.displayName?has_content>displayName="${metric.displayName}"</#if> displayType="${metric.displayType}" units="${metric.units}" dataType="${metric.dataType}" - description="${metric.description}" /> - </#list> - <#else> - <metric property="dummyMetric" displayName="Dummy display name"/> - </#if> - </#if> - - <#if props.events> - <event name="${props.name}DummyEvent"/> - </#if> - <#if props.resourceConfiguration> - <resource-configuration> - <!-- TODO supply your configuration parameters --> - <c:simple-property name="dummy"/> - </resource-configuration> - </#if> - -<#if props.resourceConfig?has_content> - <resource-configuration> - <#list props.resourceConfig as simpleProps> - <c:simple-property name="${simpleProps.name}" description="${simpleProps.description}" <#if simpleProps.type??>type="${simpleProps.type}"</#if> <#if simpleProps.readOnly>readOnly="true"</#if>/> - </#list> - </resource-configuration> -</#if> diff --git a/modules/helpers/pluginGen/src/main/resources/discovery.ftl b/modules/helpers/pluginGen/src/main/resources/discovery.ftl deleted file mode 100644 index a0cff98..0000000 --- a/modules/helpers/pluginGen/src/main/resources/discovery.ftl +++ /dev/null @@ -1,106 +0,0 @@ -<#-- -/* - * RHQ Management Platform - * Copyright (C) 2005-2008 Red Hat, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ ---> -<#-- @ftlvariable name="props" type="org.rhq.helpers.pluginGen.Props" --> -package ${props.pkg}; - -<#if props.manualAddOfResourceType> -import java.util.Collections; -</#if> -import java.util.HashSet; -import java.util.List; -import java.util.Set; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.rhq.core.domain.configuration.Configuration; -import org.rhq.core.domain.configuration.Property; -import org.rhq.core.domain.configuration.PropertySimple; -import org.rhq.core.pluginapi.inventory.DiscoveredResourceDetails; -import org.rhq.core.pluginapi.inventory.InvalidPluginConfigurationException; -<#if props.manualAddOfResourceType> -import org.rhq.core.pluginapi.inventory.ManualAddFacet; -</#if> -import org.rhq.core.pluginapi.inventory.ProcessScanResult; -import org.rhq.core.pluginapi.inventory.ResourceDiscoveryComponent; -import org.rhq.core.pluginapi.inventory.ResourceDiscoveryContext; - - -/** - * Discovery class - */ -public class ${props.discoveryClass} implements ResourceDiscoveryComponent<#if props.parentType??><${props.parentType}></#if> -<#if props.manualAddOfResourceType>,ManualAddFacet</#if> -{ - - private final Log log = LogFactory.getLog(this.getClass()); - -<#if props.manualAddOfResourceType> - /** - * This method is an empty dummy, as you have selected manual addition - * in the plugin generator. - * If you want to have auto discovery too, remove the "return emptySet" - * and implement the auto discovery logic. - */ -<#else> - /** - * Run the auto-discovery - */ -</#if> - public Set<DiscoveredResourceDetails> discoverResources(ResourceDiscoveryContext<#if props.parentType??><${props.parentType}></#if> discoveryContext) throws Exception { -<#if props.manualAddOfResourceType> - return Collections.emptySet(); -<#else> - Set<DiscoveredResourceDetails> discoveredResources = new HashSet<DiscoveredResourceDetails>(); - - /** - * TODO : do your discovery here - * A discovered resource must have a unique key, that must - * stay the same when the resource is discovered the next - * time - */ - DiscoveredResourceDetails detail = null; // new DiscoveredResourceDetails( -// discoveryContext.getResourceType(), // ResourceType -// ); - - - // Add to return values - discoveredResources.add(detail); - log.info("Discovered new ... TODO "); // TODO change - - return discoveredResources; - -</#if> - } - -<#if props.manualAddOfResourceType> - /** - * Do the manual add of this one resource - */ - public DiscoveredResourceDetails discoverResource(Configuration pluginConfiguration, ResourceDiscoveryContext<#if props.parentType??><${props.parentType}></#if> context) throws InvalidPluginConfigurationException { - - // TODO implement this - DiscoveredResourceDetails detail = null; // new DiscoveredResourceDetails( -// context.getResourceType(), // ResourceType -// ); - - return detail; - } -</#if> -} \ No newline at end of file diff --git a/modules/helpers/pluginGen/src/main/resources/eventPoller.ftl b/modules/helpers/pluginGen/src/main/resources/eventPoller.ftl deleted file mode 100644 index e0d9c4e..0000000 --- a/modules/helpers/pluginGen/src/main/resources/eventPoller.ftl +++ /dev/null @@ -1,62 +0,0 @@ -<#-- -/* - * RHQ Management Platform - * Copyright (C) 2005-2008 Red Hat, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ ---> -<#-- @ftlvariable name="props" type="org.rhq.helpers.pluginGen.Props" --> -package ${props.pkg}; - -import java.util.HashSet; -import java.util.Set; - -import org.rhq.core.domain.event.Event; -import org.rhq.core.domain.event.EventSeverity; -import org.rhq.core.pluginapi.event.EventPoller; - - -public class ${props.name?cap_first}EventPoller implements EventPoller { - - public ${props.name?cap_first}EventPoller() { - - } - - - /** Return the type of events we handle - * @see org.rhq.core.pluginapi.event.EventPoller#getEventType() - */ - public String getEventType() { - return ${props.componentClass}.DUMMY_EVENT; - } - - - /** Return collected events - * @see org.rhq.core.pluginapi.event.EventPoller#poll() - */ - public Set<Event> poll() { - Set<Event> eventSet = new HashSet<Event>(); - // TODO add your events here. Below is an example that - /* - synchronized (events) { - eventSet.addAll(events); - events.clear(); - } - */ - return eventSet; - } - -} \ No newline at end of file diff --git a/modules/helpers/pluginGen/src/main/resources/pom.ftl b/modules/helpers/pluginGen/src/main/resources/pom.ftl deleted file mode 100644 index 7c799b8..0000000 --- a/modules/helpers/pluginGen/src/main/resources/pom.ftl +++ /dev/null @@ -1,246 +0,0 @@ -<#ftl > -<#-- -/* - * RHQ Management Platform - * Copyright (C) 2005-20013 Red Hat, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ ---> -<#-- @ftlvariable name="props" type="org.rhq.helpers.pluginGen.Props" --> -<project - xmlns="http://maven.apache.org/POM/4.0.0" - xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" - xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd" - > - - <modelVersion>4.0.0</modelVersion> - - <parent> - <groupId>org.rhq</groupId> - <artifactId>rhq-plugins-parent</artifactId> - <version><#if props.rhqVersion??>${props.rhqVersion}<#else >4.9.0-SNAPSHOT</#if></version><!-- TODO adjust RHQ version --> - </parent> - - <groupId>org.rhq</groupId> - <artifactId>${props.pluginName}-plugin</artifactId> - <packaging>jar</packaging> - - <name>RHQ ${props.pluginName} Plugin</name> - <#if props.description??> - <description>${props.description}</description> - </#if> - - - <build> - <plugins> - <plugin> - <artifactId>maven-compiler-plugin</artifactId> - <configuration> - <source>1.6</source> - <target>1.6</target> - </configuration> - </plugin> - -<#if props.usesExternalJarsInPlugin> - <plugin> - <artifactId>maven-dependency-plugin</artifactId> - <version>2.0</version> - <executions> - <execution> - <id>copy-${props.name}-jar</id> - <phase>process-resources</phase> - <goals> - <goal>copy</goal> - </goals> - <configuration> - <artifactItems> - - <!-- TODO include needed external jars - that should go into your jar file - - The next one is an example: - --> - <artifactItem> - <groupId>org.snmp4j</groupId> - <artifactId>snmp4j</artifactId> - <version>1.9.1f</version> - </artifactItem> - - </artifactItems> - <outputDirectory>${r"${project.build.outputDirectory}"}/lib</outputDirectory> - </configuration> - </execution> - </executions> - </plugin> -</#if> - </plugins> - </build> - - <profiles> - <profile> - <id>dev</id> - - <properties> - <rhq.rootDir>../../..</rhq.rootDir> - <rhq.containerDir>${r"${rhq.rootDir}/${rhq.defaultDevContainerPath}"}</rhq.containerDir> - <rhq.deploymentDir>${r"${rhq.containerDir}/jbossas/server/default/deploy/${rhq.earName}/rhq-downloads/rhq-plugins"}</rhq.deploymentDir> - </properties> - - <build> - <plugins> - - <plugin> - <artifactId>maven-antrun-plugin</artifactId> - <version>1.1</version> - <executions> - - <execution> - <id>deploy</id> - <phase>compile</phase> - <configuration> - <tasks> - <mkdir dir="${r"${rhq.deploymentDir}"}"/> - <property name="deployment.file" - location="${r"${rhq.deploymentDir}/${project.build.finalName}"}.jar"/> - <echo>*** Updating ${r"${deployment.file}"}...</echo> - <jar destfile="${r"${deployment.file}"}" basedir="${r"${project.build.outputDirectory}"}"/> - </tasks> - </configuration> - <goals> - <goal>run</goal> - </goals> - </execution> - - <execution> - <id>deploy-jar-meta-inf</id> - <phase>package</phase> - <configuration> - <tasks> - <property name="deployment.file" - location="${r"${rhq.deploymentDir}/${project.build.finalName}.jar"}"/> - <echo>*** Updating META-INF dir in ${r"${deployment.file}"}...</echo> - <unjar src="${r"${project.build.directory}/${project.build.finalName}.jar"}" - dest="${r"${project.build.outputDirectory}"}"> - <patternset> - <include name="META-INF/**"/> - </patternset> - </unjar> - <jar destfile="${r"${deployment.file}"}" - manifest="${r"${project.build.outputDirectory}/META-INF/MANIFEST.MF"}" - update="true"> - </jar> - </tasks> - </configuration> - <goals> - <goal>run</goal> - </goals> - </execution> - - <execution> - <id>undeploy</id> - <phase>clean</phase> - <configuration> - <tasks> - <property name="deployment.file" - location="${r"${rhq.deploymentDir}/${project.build.finalName}"}.jar"/> - <echo>*** Deleting ${r"${deployment.file}"}...</echo> - <delete file="${r"${deployment.file}"}"/> - </tasks> - </configuration> - <goals> - <goal>run</goal> - </goals> - </execution> - - </executions> - </plugin> - - </plugins> - </build> - - </profile> - </profiles> - - - <repositories> - <repository> - <snapshots> - <enabled>false</enabled> - </snapshots> - <id>jboss</id> - <name>JBoss Repository</name> - <url>https://repository.jboss.org/nexus/content/groups/public/</url> - </repository> - <!-- TODO add your own maven repositories here (if needed) --> - </repositories> - - <pluginRepositories> - <pluginRepository> - <snapshots> - <enabled>false</enabled> - </snapshots> - <id>jboss</id> - <name>JBoss Plugin Repository</name> - <url>https://repository.jboss.org/nexus/content/groups/public/</url> - </pluginRepository> - </pluginRepositories> - - - <dependencies> - <dependency> - <groupId>commons-logging</groupId> - <artifactId>commons-logging</artifactId> - <version>${r"${commons-logging.version}"}</version> - </dependency> - <dependency> - <groupId>org.rhq</groupId> - <artifactId>rhq-core-domain</artifactId> - <version>${r"${project.version}"}</version> - <scope>provided</scope> - </dependency> - <dependency> - <groupId>org.rhq</groupId> - <artifactId>rhq-core-plugin-api</artifactId> - <version>${r"${project.version}"}</version> - <scope>provided</scope> - </dependency> - <dependency> - <groupId>org.rhq</groupId> - <artifactId>rhq-core-native-system</artifactId> - <version>${r"${project.version}"}</version> - <scope>provided</scope> - </dependency> -<#if props.dependsOnJmxPlugin> - <dependency> - <groupId>org.rhq</groupId> - <artifactId>rhq-jmx-plugin</artifactId> - <version>${r"${project.version}"}</version> - <scope>provided</scope> - </dependency> -</#if> -<#if props.dependsOnAs7Plugin> - <dependency> - <groupId>org.rhq</groupId> - <artifactId>rhq-jboss-as-7-plugin</artifactId> - <version>${r"${project.version}"}</version> - <scope>provided</scope> - </dependency> -</#if> - - <!-- TODO add your dependencies here --> - - </dependencies> -</project> \ No newline at end of file diff --git a/modules/helpers/pluginGen/src/test/java/org/rhq/helpers/pluginGen/test/FooBean.java b/modules/helpers/pluginGen/src/test/java/org/rhq/helpers/pluginGen/test/FooBean.java deleted file mode 100644 index 5027dbb..0000000 --- a/modules/helpers/pluginGen/src/test/java/org/rhq/helpers/pluginGen/test/FooBean.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * RHQ Management Platform - * Copyright (C) 2005-2013 Red Hat, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA - */ - -package org.rhq.helpers.pluginGen.test; - -import org.rhq.helpers.pluginAnnotations.agent.ConfigProperty; -import org.rhq.helpers.pluginAnnotations.agent.DataType; -import org.rhq.helpers.pluginAnnotations.agent.DisplayType; -import org.rhq.helpers.pluginAnnotations.agent.MeasurementType; -import org.rhq.helpers.pluginAnnotations.agent.Metric; -import org.rhq.helpers.pluginAnnotations.agent.Operation; -import org.rhq.helpers.pluginAnnotations.agent.Parameter; -import org.rhq.helpers.pluginAnnotations.agent.RhqType; -import org.rhq.helpers.pluginAnnotations.agent.Units; - -/** - * Just a sample - * @author Heiko W. Rupp - */ - -public class FooBean { - - @Metric(description = "How often was this bean invoked", displayType = DisplayType.SUMMARY, measurementType = MeasurementType.DYNAMIC, - units = Units.SECONDS) - int invocationCount; - - @Metric(description = "Just a foo", dataType = DataType.TRAIT) - String lastCommand; - - @Operation(description = "Increase the invocation count") - public int increaseCounter() { - invocationCount++; - return invocationCount; - } - - @Operation(description = "Decrease the counter") - public void decreaseCounter(@Parameter(description = "How much to decrease?", name = "by") int by) { - invocationCount -= by; - } - - @ConfigProperty(scope = ConfigProperty.Scope.PLUGIN, displayName="The Password", - readOnly = false, property="thePassword",description = "A password", rhqType = RhqType.PASSWORD) - String password; - - @ConfigProperty(scope = ConfigProperty.Scope.RESOURCE) - int defaultSteps; - -} diff --git a/modules/helpers/pom.xml b/modules/helpers/pom.xml index bac4830..900adbd 100644 --- a/modules/helpers/pom.xml +++ b/modules/helpers/pom.xml @@ -20,10 +20,6 @@
<module>rtfilter</module> <module>rtfilter-subsystem</module> -<!-- - <module>pluginGen</module> - <module>pluginAnnotations</module> ---> <module>bundleGen</module> <module>jeeGen</module> <module>perftest-support</module>
commit d66b778aaad2dd425cdc865304f8d6835a46ea4e Author: John Sanda jsanda@redhat.com Date: Tue Aug 6 01:37:44 2013 -0400
update the server mode correctly
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterMonitor.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterMonitor.java index 0b404bb..543036e 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterMonitor.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterMonitor.java @@ -77,7 +77,6 @@ public class StorageClusterMonitor implements StorageStateListener {
Server server = serverManager.getServer();
- topologyManager.updateServerMode(subjectManager.getOverlord(), new Integer[] {server.getId()}, - Server.OperationMode.MAINTENANCE); + topologyManager.updateServerMode(subjectManager.getOverlord(), new Integer[] {server.getId()}, mode); } }
commit 27b3f7666c85b484432ed6aa4ee25f83711d21bc Author: John Sanda jsanda@redhat.com Date: Thu Aug 1 21:17:49 2013 -0400
adding storage_port to plugin config for storage/cassandra plugins
storage_port (i.e., gossip) is not really a connection setting. It has been added to facilitate initializing shared, cluster settings on the server. We need to store the cluster settings on the server so that when new storage nodes are deployed, we know what cluster settings to assign them.
I am working on initializing the cluster settings when the first storage ndoe is imported into inventory. Prior to this commit, the gossip port was only available via the storage node's resource configuration, which is not available when the storage node is imported into inventory. By storing the cluster settings in the resource's plugin config, the cluster settings can easily be initialized at the point when the storage node is committed into inventory.
diff --git a/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java b/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java index c53c19b..9fc389c 100644 --- a/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java +++ b/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java @@ -80,6 +80,18 @@ public class ConfigEditor { return backupFile; }
+ public String getClusterName() { + return (String) config.get("cluster_name"); + } + + public String getListenAddress() { + return (String) config.get("listen_address"); + } + + public String getAuthenticator() { + return (String) config.get("authenticator"); + } + public String getCommitLogDirectory() { return (String) config.get("commitlog_directory"); } diff --git a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeDiscoveryComponent.java b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeDiscoveryComponent.java index e7686b0..9edfe15 100644 --- a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeDiscoveryComponent.java +++ b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeDiscoveryComponent.java @@ -23,18 +23,15 @@ package org.rhq.plugins.cassandra;
import java.io.File; -import java.io.FileInputStream; -import java.io.InputStream; import java.util.HashSet; import java.util.List; -import java.util.Map; import java.util.Set;
import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.mc4j.ems.connection.support.metadata.J2SE5ConnectionTypeDescriptor; -import org.yaml.snakeyaml.Yaml;
+import org.rhq.cassandra.util.ConfigEditor; import org.rhq.core.domain.configuration.Configuration; import org.rhq.core.domain.configuration.PropertySimple; import org.rhq.core.domain.resource.ResourceUpgradeReport; @@ -55,6 +52,7 @@ public class CassandraNodeDiscoveryComponent extends JMXDiscoveryComponent { protected static final String HOST_PROPERTY = "host"; protected static final String CLUSTER_NAME_PROPERTY = "clusterName"; protected static final String NATIVE_TRANSPORT_PORT_PROPERTY = "nativeTransportPort"; + protected static final String STORAGE_PORT_PROPERTY = "storagePort"; protected static final String JMX_PORT_PROPERTY = "jmxPort"; protected static final String AUTHENTICATOR_PROPERTY = "authenticator"; protected static final String USERNAME_PROPERTY = "username"; @@ -153,44 +151,17 @@ public class CassandraNodeDiscoveryComponent extends JMXDiscoveryComponent { }
if (yamlConfigurationPath != null) { - InputStream inputStream = null; - try { - File yamlConfigurationFile = new File(yamlConfigurationPath, "cassandra.yaml"); - pluginConfig.put(new PropertySimple(YAML_PROPERTY, yamlConfigurationFile.getAbsolutePath())); - - inputStream = new FileInputStream(yamlConfigurationFile); - Yaml yaml = new Yaml(); - Map<String, String> parsedProperties = (Map<String, String>) yaml.load(inputStream); - - if (parsedProperties.get("cluster_name") != null) { - pluginConfig - .put(new PropertySimple(CLUSTER_NAME_PROPERTY, parsedProperties.get("cluster_name"))); - } - - if (parsedProperties.get("listen_address") != null) { - pluginConfig.put(new PropertySimple(HOST_PROPERTY, parsedProperties.get("listen_address"))); - } - - if (parsedProperties.get("native_transport_port") != null) { - pluginConfig.put(new PropertySimple(NATIVE_TRANSPORT_PORT_PROPERTY, parsedProperties - .get("native_transport_port"))); - } - - if (parsedProperties.get("authenticator") != null) { - pluginConfig.put(new PropertySimple(AUTHENTICATOR_PROPERTY, parsedProperties - .get("authenticator"))); - } - } catch (Exception e) { - log.error("YAML Configuration load exception ", e); - } finally { - try { - if ( inputStream != null){ - inputStream.close(); - } - } catch (Exception e) { - log.error("Unable to close stream for yaml configuration", e); - } - } + File yamlConfigurationFile = new File(yamlConfigurationPath, "cassandra.yaml"); + ConfigEditor yamlEditor = new ConfigEditor(yamlConfigurationFile); + yamlEditor.load(); + + pluginConfig.put(new PropertySimple(YAML_PROPERTY, yamlConfigurationFile.getAbsolutePath())); + pluginConfig.put(new PropertySimple(CLUSTER_NAME_PROPERTY, yamlEditor.getClusterName())); + pluginConfig.put(new PropertySimple(HOST_PROPERTY, yamlEditor.getListenAddress())); + pluginConfig.put(new PropertySimple(NATIVE_TRANSPORT_PORT_PROPERTY, + yamlEditor.getNativeTransportPort())); + pluginConfig.put(new PropertySimple(STORAGE_PORT_PROPERTY, yamlEditor.getStoragePort())); + pluginConfig.put(new PropertySimple(AUTHENTICATOR_PROPERTY, yamlEditor.getAuthenticator())); } }
diff --git a/modules/plugins/cassandra/src/test/resources/cassandra.yaml b/modules/plugins/cassandra/src/test/resources/cassandra.yaml new file mode 100644 index 0000000..ba0b796 --- /dev/null +++ b/modules/plugins/cassandra/src/test/resources/cassandra.yaml @@ -0,0 +1,690 @@ +# Cassandra storage config YAML + +# NOTE: +# See http://wiki.apache.org/cassandra/StorageConfiguration for +# full explanations of configuration directives +# /NOTE + +# The name of the cluster. This is mainly used to prevent machines in +# one logical cluster from joining another. +cluster_name: rhq + +# This defines the number of tokens randomly assigned to this node on the ring +# The more tokens, relative to other nodes, the larger the proportion of data +# that this node will store. You probably want all nodes to have the same number +# of tokens assuming they have equal hardware capability. +# +# If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility, +# and will use the initial_token as described below. +# +# Specifying initial_token will override this setting. +# +# If you already have a cluster with 1 token per node, and wish to migrate to +# multiple tokens per node, see http://wiki.apache.org/cassandra/Operations +num_tokens: 256 + +# If you haven't specified num_tokens, or have set it to the default of 1 then +# you should always specify InitialToken when setting up a production +# cluster for the first time, and often when adding capacity later. +# The principle is that each node should be given an equal slice of +# the token ring; see http://wiki.apache.org/cassandra/Operations +# for more details. +# +# If blank, Cassandra will request a token bisecting the range of +# the heaviest-loaded existing node. If there is no load information +# available, such as is the case with a new cluster, it will pick +# a random token, which will lead to hot spots. +#initial_token: + +# See http://wiki.apache.org/cassandra/HintedHandoff +hinted_handoff_enabled: true +# this defines the maximum amount of time a dead host will have hints +# generated. After it has been dead this long, new hints for it will not be +# created until it has been seen alive and gone down again. +max_hint_window_in_ms: 10800000 # 3 hours +# throttle in KB's per second, per delivery thread +hinted_handoff_throttle_in_kb: 1024 +# Number of threads with which to deliver hints; +# Consider increasing this number when you have multi-dc deployments, since +# cross-dc handoff tends to be slower +max_hints_delivery_threads: 2 + +# The following setting populates the page cache on memtable flush and compaction +# WARNING: Enable this setting only when the whole node's data fits in memory. +# Defaults to: false +# populate_io_cache_on_flush: false + +# Authentication backend, implementing IAuthenticator; used to identify users +# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator, +# PasswordAuthenticator}. +# +# - AllowAllAuthenticator performs no checks - set it to disable authentication. +# - PasswordAuthenticator relies on username/password pairs to authenticate +# users. It keeps usernames and hashed passwords in system_auth.credentials table. +# Please increase system_auth keyspace replication factor if you use this authenticator. +authenticator: org.apache.cassandra.auth.PasswordAuthenticator + +# Authorization backend, implementing IAuthorizer; used to limit access/provide permissions +# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer, +# CassandraAuthorizer}. +# +# - AllowAllAuthorizer allows any action to any user - set it to disable authorization. +# - CassandraAuthorizer stores permissions in system_auth.permissions table. Please +# increase system_auth keyspace replication factor if you use this authorizer. +authorizer: org.apache.cassandra.auth.CassandraAuthorizer + +# Validity period for permissions cache (fetching permissions can be an +# expensive operation depending on the authorizer, CassandraAuthorizer is +# one example). Defaults to 2000, set to 0 to disable. +# Will be disabled automatically for AllowAllAuthorizer. +permissions_validity_in_ms: 2000 + +# The partitioner is responsible for distributing rows (by key) across +# nodes in the cluster. Any IPartitioner may be used, including your +# own as long as it is on the classpath. Out of the box, Cassandra +# provides org.apache.cassandra.dht.{Murmur3Partitioner, RandomPartitioner +# ByteOrderedPartitioner, OrderPreservingPartitioner (deprecated)}. +# +# - RandomPartitioner distributes rows across the cluster evenly by md5. +# This is the default prior to 1.2 and is retained for compatibility. +# - Murmur3Partitioner is similar to RandomPartioner but uses Murmur3_128 +# Hash Function instead of md5. When in doubt, this is the best option. +# - ByteOrderedPartitioner orders rows lexically by key bytes. BOP allows +# scanning rows in key order, but the ordering can generate hot spots +# for sequential insertion workloads. +# - OrderPreservingPartitioner is an obsolete form of BOP, that stores +# - keys in a less-efficient format and only works with keys that are +# UTF8-encoded Strings. +# - CollatingOPP colates according to EN,US rules rather than lexical byte +# ordering. Use this as an example if you need custom collation. +# +# See http://wiki.apache.org/cassandra/Operations for more on +# partitioners and token selection. +partitioner: org.apache.cassandra.dht.Murmur3Partitioner + +# directories where Cassandra should store data on disk. +data_file_directories: + - /Users/jsanda/Development/redhat/rhq/rhq-data/storage/data + +# commit log +commitlog_directory: /Users/jsanda/Development/redhat/rhq/rhq-data/storage/commit_log + +# policy for data disk failures: +# stop: shut down gossip and Thrift, leaving the node effectively dead, but +# still inspectable via JMX. +# best_effort: stop using the failed disk and respond to requests based on +# remaining available sstables. This means you WILL see obsolete +# data at CL.ONE! +# ignore: ignore fatal errors and let requests fail, as in pre-1.2 Cassandra +disk_failure_policy: stop + +# Maximum size of the key cache in memory. +# +# Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the +# minimum, sometimes more. The key cache is fairly tiny for the amount of +# time it saves, so it's worthwhile to use it at large numbers. +# The row cache saves even more time, but must store the whole values of +# its rows, so it is extremely space-intensive. It's best to only use the +# row cache if you have hot rows or static rows. +# +# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. +# +# Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache. +key_cache_size_in_mb: + +# Duration in seconds after which Cassandra should +# safe the keys cache. Caches are saved to saved_caches_directory as +# specified in this configuration file. +# +# Saved caches greatly improve cold-start speeds, and is relatively cheap in +# terms of I/O for the key cache. Row cache saving is much more expensive and +# has limited use. +# +# Default is 14400 or 4 hours. +key_cache_save_period: 14400 + +# Number of keys from the key cache to save +# Disabled by default, meaning all keys are going to be saved +# key_cache_keys_to_save: 100 + +# Maximum size of the row cache in memory. +# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. +# +# Default value is 0, to disable row caching. +row_cache_size_in_mb: 0 + +# Duration in seconds after which Cassandra should +# safe the row cache. Caches are saved to saved_caches_directory as specified +# in this configuration file. +# +# Saved caches greatly improve cold-start speeds, and is relatively cheap in +# terms of I/O for the key cache. Row cache saving is much more expensive and +# has limited use. +# +# Default is 0 to disable saving the row cache. +row_cache_save_period: 0 + +# Number of keys from the row cache to save +# Disabled by default, meaning all keys are going to be saved +# row_cache_keys_to_save: 100 + +# The provider for the row cache to use. +# +# Supported values are: ConcurrentLinkedHashCacheProvider, SerializingCacheProvider +# +# SerializingCacheProvider serialises the contents of the row and stores +# it in native memory, i.e., off the JVM Heap. Serialized rows take +# significantly less memory than "live" rows in the JVM, so you can cache +# more rows in a given memory footprint. And storing the cache off-heap +# means you can use smaller heap sizes, reducing the impact of GC pauses. +# +# It is also valid to specify the fully-qualified class name to a class +# that implements org.apache.cassandra.cache.IRowCacheProvider. +# +# Defaults to SerializingCacheProvider +row_cache_provider: SerializingCacheProvider + +# saved caches +saved_caches_directory: /Users/jsanda/Development/redhat/rhq/rhq-data/storage/saved_caches + +# commitlog_sync may be either "periodic" or "batch." +# When in batch mode, Cassandra won't ack writes until the commit log +# has been fsynced to disk. It will wait up to +# commitlog_sync_batch_window_in_ms milliseconds for other writes, before +# performing the sync. +# +# commitlog_sync: batch +# commitlog_sync_batch_window_in_ms: 50 +# +# the other option is "periodic" where writes may be acked immediately +# and the CommitLog is simply synced every commitlog_sync_period_in_ms +# milliseconds. +commitlog_sync: periodic +commitlog_sync_period_in_ms: 10000 + +# The size of the individual commitlog file segments. A commitlog +# segment may be archived, deleted, or recycled once all the data +# in it (potentally from each columnfamily in the system) has been +# flushed to sstables. +# +# The default size is 32, which is almost always fine, but if you are +# archiving commitlog segments (see commitlog_archiving.properties), +# then you probably want a finer granularity of archiving; 8 or 16 MB +# is reasonable. +commitlog_segment_size_in_mb: 32 + +# any class that implements the SeedProvider interface and has a +# constructor that takes a Map<String, String> of parameters will do. +seed_provider: + # Addresses of hosts that are deemed contact points. + # Cassandra nodes use this list of hosts to find each other and learn + # the topology of the ring. You must change this if you are running + # multiple nodes! + - class_name: org.apache.cassandra.locator.SimpleSeedProvider + parameters: + # seeds is actually a comma-delimited list of addresses. + # Ex: "<ip1>,<ip2>,<ip3>" + - seeds: "127.0.0.1" + +# emergency pressure valve: each time heap usage after a full (CMS) +# garbage collection is above this fraction of the max, Cassandra will +# flush the largest memtables. +# +# Set to 1.0 to disable. Setting this lower than +# CMSInitiatingOccupancyFraction is not likely to be useful. +# +# RELYING ON THIS AS YOUR PRIMARY TUNING MECHANISM WILL WORK POORLY: +# it is most effective under light to moderate load, or read-heavy +# workloads; under truly massive write load, it will often be too +# little, too late. +flush_largest_memtables_at: 0.75 + +# emergency pressure valve #2: the first time heap usage after a full +# (CMS) garbage collection is above this fraction of the max, +# Cassandra will reduce cache maximum _capacity_ to the given fraction +# of the current _size_. Should usually be set substantially above +# flush_largest_memtables_at, since that will have less long-term +# impact on the system. +# +# Set to 1.0 to disable. Setting this lower than +# CMSInitiatingOccupancyFraction is not likely to be useful. +reduce_cache_sizes_at: 0.85 +reduce_cache_capacity_to: 0.6 + +# For workloads with more data than can fit in memory, Cassandra's +# bottleneck will be reads that need to fetch data from +# disk. "concurrent_reads" should be set to (16 * number_of_drives) in +# order to allow the operations to enqueue low enough in the stack +# that the OS and drives can reorder them. +# +# On the other hand, since writes are almost never IO bound, the ideal +# number of "concurrent_writes" is dependent on the number of cores in +# your system; (8 * number_of_cores) is a good rule of thumb. +concurrent_reads: 32 +concurrent_writes: 32 + +# Total memory to use for memtables. Cassandra will flush the largest +# memtable when this much memory is used. +# If omitted, Cassandra will set it to 1/3 of the heap. +# memtable_total_space_in_mb: 2048 + +# Total space to use for commitlogs. Since commitlog segments are +# mmapped, and hence use up address space, the default size is 32 +# on 32-bit JVMs, and 1024 on 64-bit JVMs. +# +# If space gets above this value (it will round up to the next nearest +# segment multiple), Cassandra will flush every dirty CF in the oldest +# segment and remove it. So a small total commitlog space will tend +# to cause more flush activity on less-active columnfamilies. +# commitlog_total_space_in_mb: 4096 + +# This sets the amount of memtable flush writer threads. These will +# be blocked by disk io, and each one will hold a memtable in memory +# while blocked. If you have a large heap and many data directories, +# you can increase this value for better flush performance. +# By default this will be set to the amount of data directories defined. +#memtable_flush_writers: 1 + +# the number of full memtables to allow pending flush, that is, +# waiting for a writer thread. At a minimum, this should be set to +# the maximum number of secondary indexes created on a single CF. +memtable_flush_queue_size: 4 + +# Whether to, when doing sequential writing, fsync() at intervals in +# order to force the operating system to flush the dirty +# buffers. Enable this to avoid sudden dirty buffer flushing from +# impacting read latencies. Almost always a good idea on SSD:s; not +# necessarily on platters. +trickle_fsync: false +trickle_fsync_interval_in_kb: 10240 + +# TCP port, for commands and data +storage_port: 7100 + +# SSL port, for encrypted communication. Unused unless enabled in +# encryption_options +ssl_storage_port: 7101 + +# Address to bind to and tell other Cassandra nodes to connect to. You +# _must_ change this if you want multiple nodes to be able to +# communicate! +# +# Leaving it blank leaves it up to InetAddress.getLocalHost(). This +# will always do the Right Thing *if* the node is properly configured +# (hostname, name resolution, etc), and the Right Thing is to use the +# address associated with the hostname (it might not be). +# +# Setting this to 0.0.0.0 is always wrong. +listen_address: 127.0.0.1 + +# Address to broadcast to other Cassandra nodes +# Leaving this blank will set it to the same value as listen_address +# broadcast_address: 1.2.3.4 + +# Internode authentication backend, implementing IInternodeAuthenticator; +# used to allow/disallow connections from peer nodes. +# internode_authenticator: org.apache.cassandra.auth.AllowAllInternodeAuthenticator + +# Whether to start the native transport server. +# Currently, only the thrift server is started by default because the native +# transport is considered beta. +# Please note that the address on which the native transport is bound is the +# same as the rpc_address. The port however is different and specified below. +start_native_transport: true +# port for the CQL native transport to listen for clients on +native_transport_port: 9142 +# The minimum and maximum threads for handling requests when the native +# transport is used. The meaning is those is similar to the one of +# rpc_min_threads and rpc_max_threads, though the default differ slightly and +# are the ones below: +# native_transport_min_threads: 16 +native_transport_max_threads: 128 + + +# Whether to start the thrift rpc server. +start_rpc: false +# The address to bind the Thrift RPC service to -- clients connect +# here. Unlike ListenAddress above, you *can* specify 0.0.0.0 here if +# you want Thrift to listen on all interfaces. +# +# Leaving this blank has the same effect it does for ListenAddress, +# (i.e. it will be based on the configured hostname of the node). +rpc_address: 127.0.0.1 +# port for Thrift to listen for clients on +rpc_port: 9160 + +# enable or disable keepalive on rpc connections +rpc_keepalive: true + +# Cassandra provides three out-of-the-box options for the RPC Server: +# +# sync -> One thread per thrift connection. For a very large number of clients, memory +# will be your limiting factor. On a 64 bit JVM, 128KB is the minimum stack size +# per thread, and that will correspond to your use of virtual memory (but physical memory +# may be limited depending on use of stack space). +# +# hsha -> Stands for "half synchronous, half asynchronous." All thrift clients are handled +# asynchronously using a small number of threads that does not vary with the amount +# of thrift clients (and thus scales well to many clients). The rpc requests are still +# synchronous (one thread per active request). +# +# The default is sync because on Windows hsha is about 30% slower. On Linux, +# sync/hsha performance is about the same, with hsha of course using less memory. +# +# Alternatively, can provide your own RPC server by providing the fully-qualified class name +# of an o.a.c.t.TServerFactory that can create an instance of it. +rpc_server_type: sync + +# Uncomment rpc_min|max_thread to set request pool size limits. +# +# Regardless of your choice of RPC server (see above), the number of maximum requests in the +# RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync +# RPC server, it also dictates the number of clients that can be connected at all). +# +# The default is unlimited and thus provide no protection against clients overwhelming the server. You are +# encouraged to set a maximum that makes sense for you in production, but do keep in mind that +# rpc_max_threads represents the maximum number of client requests this server may execute concurrently. +# +# rpc_min_threads: 16 +# rpc_max_threads: 2048 + +# uncomment to set socket buffer sizes on rpc connections +# rpc_send_buff_size_in_bytes: +# rpc_recv_buff_size_in_bytes: + +# uncomment to set socket buffer size for internode communication +# Note that when setting this, the buffer size is limited by net.core.wmem_max +# and when not setting it it is defined by net.ipv4.tcp_wmem +# See: +# /proc/sys/net/core/wmem_max +# /proc/sys/net/core/rmem_max +# /proc/sys/net/ipv4/tcp_wmem +# /proc/sys/net/ipv4/tcp_wmem +# and: man tcp +# internode_send_buff_size_in_bytes: +# internode_recv_buff_size_in_bytes: + +# Frame size for thrift (maximum field length). +thrift_framed_transport_size_in_mb: 15 + +# The max length of a thrift message, including all fields and +# internal thrift overhead. +thrift_max_message_length_in_mb: 16 + +# Set to true to have Cassandra create a hard link to each sstable +# flushed or streamed locally in a backups/ subdirectory of the +# Keyspace data. Removing these links is the operator's +# responsibility. +incremental_backups: false + +# Whether or not to take a snapshot before each compaction. Be +# careful using this option, since Cassandra won't clean up the +# snapshots for you. Mostly useful if you're paranoid when there +# is a data format change. +snapshot_before_compaction: false + +# Whether or not a snapshot is taken of the data before keyspace truncation +# or dropping of column families. The STRONGLY advised default of true +# should be used to provide data safety. If you set this flag to false, you will +# lose data on truncation or drop. +auto_snapshot: true + +# Add column indexes to a row after its contents reach this size. +# Increase if your column values are large, or if you have a very large +# number of columns. The competing causes are, Cassandra has to +# deserialize this much of the row to read a single column, so you want +# it to be small - at least if you do many partial-row reads - but all +# the index data is read for each access, so you don't want to generate +# that wastefully either. +column_index_size_in_kb: 64 + +# Size limit for rows being compacted in memory. Larger rows will spill +# over to disk and use a slower two-pass compaction process. A message +# will be logged specifying the row key. +in_memory_compaction_limit_in_mb: 64 + +# Number of simultaneous compactions to allow, NOT including +# validation "compactions" for anti-entropy repair. Simultaneous +# compactions can help preserve read performance in a mixed read/write +# workload, by mitigating the tendency of small sstables to accumulate +# during a single long running compactions. The default is usually +# fine and if you experience problems with compaction running too +# slowly or too fast, you should look at +# compaction_throughput_mb_per_sec first. +# +# concurrent_compactors defaults to the number of cores. +# Uncomment to make compaction mono-threaded, the pre-0.8 default. +#concurrent_compactors: 1 + +# Multi-threaded compaction. When enabled, each compaction will use +# up to one thread per core, plus one thread per sstable being merged. +# This is usually only useful for SSD-based hardware: otherwise, +# your concern is usually to get compaction to do LESS i/o (see: +# compaction_throughput_mb_per_sec), not more. +multithreaded_compaction: false + +# Throttles compaction to the given total throughput across the entire +# system. The faster you insert data, the faster you need to compact in +# order to keep the sstable count down, but in general, setting this to +# 16 to 32 times the rate you are inserting data is more than sufficient. +# Setting this to 0 disables throttling. Note that this account for all types +# of compaction, including validation compaction. +compaction_throughput_mb_per_sec: 16 + +# Track cached row keys during compaction, and re-cache their new +# positions in the compacted sstable. Disable if you use really large +# key caches. +compaction_preheat_key_cache: true + +# Throttles all outbound streaming file transfers on this node to the +# given total throughput in Mbps. This is necessary because Cassandra does +# mostly sequential IO when streaming data during bootstrap or repair, which +# can lead to saturating the network connection and degrading rpc performance. +# When unset, the default is 200 Mbps or 25 MB/s. +# stream_throughput_outbound_megabits_per_sec: 200 + +# How long the coordinator should wait for read operations to complete +read_request_timeout_in_ms: 10000 +# How long the coordinator should wait for seq or index scans to complete +range_request_timeout_in_ms: 10000 +# How long the coordinator should wait for writes to complete +write_request_timeout_in_ms: 10000 +# How long the coordinator should wait for truncates to complete +# (This can be much longer, because we need to flush all CFs +# to make sure we can clear out anythink in the commitlog that could +# cause truncated data to reappear.) +truncate_request_timeout_in_ms: 60000 +# The default timeout for other, miscellaneous operations +request_timeout_in_ms: 10000 + +# Enable operation timeout information exchange between nodes to accurately +# measure request timeouts, If disabled cassandra will assuming the request +# was forwarded to the replica instantly by the coordinator +# +# Warning: before enabling this property make sure to ntp is installed +# and the times are synchronized between the nodes. +cross_node_timeout: false + +# Enable socket timeout for streaming operation. +# When a timeout occurs during streaming, streaming is retried from the start +# of the current file. This *can* involve re-streaming an important amount of +# data, so you should avoid setting the value too low. +# Default value is 0, which never timeout streams. +# streaming_socket_timeout_in_ms: 0 + +# phi value that must be reached for a host to be marked down. +# most users should never need to adjust this. +# phi_convict_threshold: 8 + +# endpoint_snitch -- Set this to a class that implements +# IEndpointSnitch. The snitch has two functions: +# - it teaches Cassandra enough about your network topology to route +# requests efficiently +# - it allows Cassandra to spread replicas around your cluster to avoid +# correlated failures. It does this by grouping machines into +# "datacenters" and "racks." Cassandra will do its best not to have +# more than one replica on the same "rack" (which may not actually +# be a physical location) +# +# IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER, +# YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS +# ARE PLACED. +# +# Out of the box, Cassandra provides +# - SimpleSnitch: +# Treats Strategy order as proximity. This improves cache locality +# when disabling read repair, which can further improve throughput. +# Only appropriate for single-datacenter deployments. +# - PropertyFileSnitch: +# Proximity is determined by rack and data center, which are +# explicitly configured in cassandra-topology.properties. +# - GossipingPropertyFileSnitch +# The rack and datacenter for the local node are defined in +# cassandra-rackdc.properties and propagated to other nodes via gossip. If +# cassandra-topology.properties exists, it is used as a fallback, allowing +# migration from the PropertyFileSnitch. +# - RackInferringSnitch: +# Proximity is determined by rack and data center, which are +# assumed to correspond to the 3rd and 2nd octet of each node's +# IP address, respectively. Unless this happens to match your +# deployment conventions (as it did Facebook's), this is best used +# as an example of writing a custom Snitch class. +# - Ec2Snitch: +# Appropriate for EC2 deployments in a single Region. Loads Region +# and Availability Zone information from the EC2 API. The Region is +# treated as the Datacenter, and the Availability Zone as the rack. +# Only private IPs are used, so this will not work across multiple +# Regions. +# - Ec2MultiRegionSnitch: +# Uses public IPs as broadcast_address to allow cross-region +# connectivity. (Thus, you should set seed addresses to the public +# IP as well.) You will need to open the storage_port or +# ssl_storage_port on the public IP firewall. (For intra-Region +# traffic, Cassandra will switch to the private IP after +# establishing a connection.) +# +# You can use a custom Snitch by setting this to the full class name +# of the snitch, which will be assumed to be on your classpath. +endpoint_snitch: SimpleSnitch + +# controls how often to perform the more expensive part of host score +# calculation +dynamic_snitch_update_interval_in_ms: 100 +# controls how often to reset all host scores, allowing a bad host to +# possibly recover +dynamic_snitch_reset_interval_in_ms: 600000 +# if set greater than zero and read_repair_chance is < 1.0, this will allow +# 'pinning' of replicas to hosts in order to increase cache capacity. +# The badness threshold will control how much worse the pinned host has to be +# before the dynamic snitch will prefer other replicas over it. This is +# expressed as a double which represents a percentage. Thus, a value of +# 0.2 means Cassandra would continue to prefer the static snitch values +# until the pinned host was 20% worse than the fastest. +dynamic_snitch_badness_threshold: 0.1 + +# request_scheduler -- Set this to a class that implements +# RequestScheduler, which will schedule incoming client requests +# according to the specific policy. This is useful for multi-tenancy +# with a single Cassandra cluster. +# NOTE: This is specifically for requests from the client and does +# not affect inter node communication. +# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place +# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of +# client requests to a node with a separate queue for each +# request_scheduler_id. The scheduler is further customized by +# request_scheduler_options as described below. +request_scheduler: org.apache.cassandra.scheduler.NoScheduler + +# Scheduler Options vary based on the type of scheduler +# NoScheduler - Has no options +# RoundRobin +# - throttle_limit -- The throttle_limit is the number of in-flight +# requests per client. Requests beyond +# that limit are queued up until +# running requests can complete. +# The value of 80 here is twice the number of +# concurrent_reads + concurrent_writes. +# - default_weight -- default_weight is optional and allows for +# overriding the default which is 1. +# - weights -- Weights are optional and will default to 1 or the +# overridden default_weight. The weight translates into how +# many requests are handled during each turn of the +# RoundRobin, based on the scheduler id. +# +# request_scheduler_options: +# throttle_limit: 80 +# default_weight: 5 +# weights: +# Keyspace1: 1 +# Keyspace2: 5 + +# request_scheduler_id -- An identifer based on which to perform +# the request scheduling. Currently the only valid option is keyspace. +# request_scheduler_id: keyspace + +# index_interval controls the sampling of entries from the primrary +# row index in terms of space versus time. The larger the interval, +# the smaller and less effective the sampling will be. In technicial +# terms, the interval coresponds to the number of index entries that +# are skipped between taking each sample. All the sampled entries +# must fit in memory. Generally, a value between 128 and 512 here +# coupled with a large key cache size on CFs results in the best trade +# offs. This value is not often changed, however if you have many +# very small rows (many to an OS page), then increasing this will +# often lower memory usage without a impact on performance. +index_interval: 128 + +# Enable or disable inter-node encryption +# Default settings are TLS v1, RSA 1024-bit keys (it is imperative that +# users generate their own keys) TLS_RSA_WITH_AES_128_CBC_SHA as the cipher +# suite for authentication, key exchange and encryption of the actual data transfers. +# NOTE: No custom encryption options are enabled at the moment +# The available internode options are : all, none, dc, rack +# +# If set to dc cassandra will encrypt the traffic between the DCs +# If set to rack cassandra will encrypt the traffic between the racks +# +# The passwords used in these options must match the passwords used when generating +# the keystore and truststore. For instructions on generating these files, see: +# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSE... +# +server_encryption_options: + internode_encryption: none + keystore: conf/.keystore + keystore_password: cassandra + truststore: conf/.truststore + truststore_password: cassandra + # More advanced defaults below: + # protocol: TLS + # algorithm: SunX509 + # store_type: JKS + # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA] + # require_client_auth: false + +# enable or disable client/server encryption. +client_encryption_options: + enabled: false + keystore: conf/.keystore + keystore_password: cassandra + # require_client_auth: false + # Set trustore and truststore_password if require_client_auth is true + # truststore: conf/.truststore + # truststore_password: cassandra + # More advanced defaults below: + # protocol: TLS + # algorithm: SunX509 + # store_type: JKS + # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA] + +# internode_compression controls whether traffic between nodes is +# compressed. +# can be: all - all traffic is compressed +# dc - traffic between different datacenters is compressed +# none - nothing is compressed. +internode_compression: all + +# Enable or disable tcp_nodelay for inter-dc communication. +# Disabling it will result in larger (but fewer) network packets being sent, +# reducing overhead from the TCP protocol itself, at the cost of increasing +# latency if you block for cross-datacenter responses. +inter_dc_tcp_nodelay: true diff --git a/modules/plugins/cassandra/src/test/resources/log4j.properties b/modules/plugins/cassandra/src/test/resources/log4j.properties new file mode 100644 index 0000000..d56cc88 --- /dev/null +++ b/modules/plugins/cassandra/src/test/resources/log4j.properties @@ -0,0 +1,41 @@ +# +# /* +# * RHQ Management Platform +# * Copyright (C) 2005-2012 Red Hat, Inc. +# * All rights reserved. +# * +# * This program is free software; you can redistribute it and/or modify +# * it under the terms of the GNU General Public License, version 2, as +# * published by the Free Software Foundation, and/or the GNU Lesser +# * General Public License, version 2.1, also as published by the Free +# * Software Foundation. +# * +# * This program is distributed in the hope that it will be useful, +# * but WITHOUT ANY WARRANTY; without even the implied warranty of +# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# * GNU General Public License and the GNU Lesser General Public License +# * for more details. +# * +# * You should have received a copy of the GNU General Public License +# * and the GNU Lesser General Public License along with this program; +# * if not, write to the Free Software Foundation, Inc., +# * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# */ +# + +log4j.rootCategory=WARN, FILE, CONSOLE + +log4j.appender.FILE=org.apache.log4j.DailyRollingFileAppender +log4j.appender.FILE.DatePattern='.'yyyy-MM-dd +log4j.appender.FILE.File=./target/test.log +log4j.appender.FILE.layout=org.apache.log4j.PatternLayout +log4j.appender.FILE.layout.ConversionPattern=%d{ABSOLUTE} %-5p %c %m%n +#log4j.appender.A1.layout.ConversionPattern=%d [%t] %-5p %c - %m%n +log4j.appender.FILE.Append=false + +log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender +log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout +#log4j.appender.CONSOLE.layout.ConversionPattern=%5p %d{HH:mm:ss,SSS} %m%n +log4j.appender.CONSOLE.layout.ConversionPattern=%d{ABSOLUTE} %-5p %c %m%n + +log4j.logger.org.rhq=DEBUG diff --git a/modules/plugins/cassandra/src/test/resources/seeds.yaml b/modules/plugins/cassandra/src/test/resources/seeds.yaml new file mode 100644 index 0000000..75e398c --- /dev/null +++ b/modules/plugins/cassandra/src/test/resources/seeds.yaml @@ -0,0 +1,4 @@ +seed_provider: + - class_name: org.apache.cassandra.locator.SimpleSeedProvider + parameters: + - seeds: 127.0.0.1 diff --git a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml index 84bb832..8df8ace 100644 --- a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml +++ b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml @@ -24,18 +24,21 @@
<plugin-configuration> <c:simple-property name="connectorAddress" displayName="Manager URL" default="service:jmx:rmi:///jndi/rmi://localhost:7199/jmxrmi" - description="The RMI URL with which to connect to the Cassandra node (e.g. service:jmx:rmi:///jndi/rmi://localhost:7199/jmxrmi)."/> + description="The RMI URL with which to connect to the RHQ Storage Node (e.g. service:jmx:rmi:///jndi/rmi://localhost:7199/jmxrmi)."/> <c:simple-property name="type" readOnly="true" default="org.mc4j.ems.connection.support.metadata.J2SE5ConnectionTypeDescriptor" - description="The type used to establish the EMS connection to the Cassandra node."/> + description="The type used to establish the EMS connection to the RHQ Storage Node."/> <c:simple-property name="username" default="rhqadmin" required="true" description="The login username"/> <c:simple-property name="password" default="rhqadmin" required="true" type="password" description="The login password"/> <c:simple-property name="baseDir" displayName="Base Directory" description="The base directory from which the Cassandra Daemon was launched." required="false"/> <c:simple-property name="yamlConfiguration" displayName="YAML Configuration File" description="YAML Configuration File"/> - <c:simple-property name="nativeTransportPort" description="The port on which Cassandra listens for CQL client connections." default="9042" type="integer"/> - <c:simple-property name="jmxPort" description="The JMX port for Cassandra" default="7299" type="integer" readOnly="true"/> - <c:simple-property name="host" description="The host on which cassandra listens to CQL client connections" default="localhost"/> + <c:simple-property name="nativeTransportPort" default="9142" type="integer" readOnly="true" + description="The port on which the Storage Node listens for CQL client connections."/> + <c:simple-property name="storagePort" default="7100" type="integer" readOnly="true" + description="The port on which the Storage Node listens for internode communication."/> + <c:simple-property name="jmxPort" description="The JMX port for the RHQ Storage Node" default="7299" type="integer" readOnly="true"/> + <c:simple-property name="host" description="The host on which the RHQ Storage Node listens to CQL client connections" default="localhost"/> <c:simple-property name="clusterName" description="Cluster name" default="localhost"/> - <c:simple-property name="authenticator" required="true" default="org.apache.cassandra.auth.AllowAllAuthenticator" description="Cassandra client authenticator"> + <c:simple-property name="authenticator" required="true" default="org.apache.cassandra.auth.AllowAllAuthenticator" description="RHQ Storage Node client authenticator"> <c:property-options> <c:option name="org.apache.cassandra.auth.AllowAllAuthenticator" value="org.apache.cassandra.auth.AllowAllAuthenticator"/> <c:option name="org.apache.cassandra.auth.PasswordAuthenticator" value="org.apache.cassandra.auth.PasswordAuthenticator"/>
commit 35aec2e57a134860d16fcc80dfea8ca3f9db6c33 Author: Simeon Pinder spinder@redhat.com Date: Mon Aug 5 16:52:11 2013 -0400
minor refactor.
diff --git a/modules/helpers/ldap-tool/src/main/java/org/rhq/TestLdapSettings.java b/modules/helpers/ldap-tool/src/main/java/org/rhq/TestLdapSettings.java index 86d64ab..b13b289 100644 --- a/modules/helpers/ldap-tool/src/main/java/org/rhq/TestLdapSettings.java +++ b/modules/helpers/ldap-tool/src/main/java/org/rhq/TestLdapSettings.java @@ -23,6 +23,7 @@ import java.util.Set;
import javax.naming.CompositeName; import javax.naming.Context; +import javax.naming.InvalidNameException; import javax.naming.NamingEnumeration; import javax.naming.NamingException; import javax.naming.directory.Attribute; @@ -464,19 +465,7 @@ public class TestLdapSettings extends JFrame { // Going with the first match SearchResult si = (SearchResult) answer.next();
- // Construct the UserDN - // userDN = si.getName() + "," + baseDNs[x]; - //BZ: 981015: - userDN = null; - - try { - userDN = si.getNameInNamespace(); - } catch (UnsupportedOperationException use) { - userDN = new CompositeName(si.getName()).get(0); - if (si.isRelative()) { - userDN += "," + baseDNs[x]; - } - } + constructUserDn(baseDNs, x, si);
msg = "STEP-2:PASS: The test user '" + testUserName @@ -946,16 +935,7 @@ public class TestLdapSettings extends JFrame { // We use the first match SearchResult si = answer.next(); // Construct the UserDN - userDN = null; - - try { - userDN = si.getNameInNamespace(); - } catch (UnsupportedOperationException use) { - userDN = new CompositeName(si.getName()).get(0); - if (si.isRelative()) { - userDN += "," + baseDNs[x]; - } - } + constructUserDn(baseDNs, x, si); userDetails.put("dn", userDN);
// Construct the UserDN @@ -977,6 +957,22 @@ public class TestLdapSettings extends JFrame { return userDetails; }
+ /* Construct UserDn. + * + */ + private void constructUserDn(String[] baseDNs, int x, SearchResult si) throws InvalidNameException { + userDN = null; + + try { + userDN = si.getNameInNamespace(); + } catch (UnsupportedOperationException use) { + userDN = new CompositeName(si.getName()).get(0); + if (si.isRelative()) { + userDN += "," + baseDNs[x]; + } + } + } + public Set<String> findAvailableGroupsFor(String userName) { // Load our LDAP specific properties Properties options = env;
commit e6fb7a22c9cc1b7c3a5dab8cf48017d5da564087 Author: Simeon Pinder spinder@redhat.com Date: Mon Aug 5 16:41:57 2013 -0400
Applying fixes for [BZ 707047] and [BZ 981015] to LDAP Test Tool, to correctly encode characters and consistently construct UserDN.
diff --git a/modules/helpers/ldap-tool/src/main/java/org/rhq/TestLdapSettings.java b/modules/helpers/ldap-tool/src/main/java/org/rhq/TestLdapSettings.java index bc322ab..86d64ab 100644 --- a/modules/helpers/ldap-tool/src/main/java/org/rhq/TestLdapSettings.java +++ b/modules/helpers/ldap-tool/src/main/java/org/rhq/TestLdapSettings.java @@ -21,6 +21,7 @@ import java.util.Map; import java.util.Properties; import java.util.Set;
+import javax.naming.CompositeName; import javax.naming.Context; import javax.naming.NamingEnumeration; import javax.naming.NamingException; @@ -464,7 +465,19 @@ public class TestLdapSettings extends JFrame { SearchResult si = (SearchResult) answer.next();
// Construct the UserDN - userDN = si.getName() + "," + baseDNs[x]; + // userDN = si.getName() + "," + baseDNs[x]; + //BZ: 981015: + userDN = null; + + try { + userDN = si.getNameInNamespace(); + } catch (UnsupportedOperationException use) { + userDN = new CompositeName(si.getName()).get(0); + if (si.isRelative()) { + userDN += "," + baseDNs[x]; + } + } + msg = "STEP-2:PASS: The test user '" + testUserName + "' was succesfully located, and the following userDN will be used in authorization check:\n"; @@ -914,7 +927,8 @@ public class TestLdapSettings extends JFrame {
String filter = String.format("(&(%s)(%s=%s))", groupSearchFilter, groupMemberFilter, - testUserDN); + // testUserDN); BZ 707047 + encodeForFilter(testUserDN));
generateUiLoggingForStep4LdapFilter(userName, filter);
@@ -931,19 +945,16 @@ public class TestLdapSettings extends JFrame {
// We use the first match SearchResult si = answer.next(); - //generate the DN - String userDN = null; + // Construct the UserDN + userDN = null; + try { userDN = si.getNameInNamespace(); } catch (UnsupportedOperationException use) { - userDN = si.getName(); - if (userDN.startsWith(""")) { - userDN = userDN.substring(1, userDN.length()); - } - if (userDN.endsWith(""")) { - userDN = userDN.substring(0, userDN.length() - 1); + userDN = new CompositeName(si.getName()).get(0); + if (si.isRelative()) { + userDN += "," + baseDNs[x]; } - userDN = userDN + "," + baseDNs[x]; } userDetails.put("dn", userDN);
commit 59eaa500eedaae0a7e687800c049dbfe7f5c01b1 Author: Mike Thompson mithomps@redhat.com Date: Mon Aug 5 15:07:37 2013 -0700
[BZ 991257] Spurious Globally uncaught Exception: (TypeError): 'null' is not an object. Add EnhancedToolstrip instead of Toolstrip to fix proper destroying.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/AddToDashboardComponent.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/AddToDashboardComponent.java index 52b0d86..f337202 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/AddToDashboardComponent.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/AddToDashboardComponent.java @@ -23,12 +23,10 @@ import java.util.LinkedHashMap; import com.google.gwt.user.client.rpc.AsyncCallback; import com.smartgwt.client.widgets.IButton; import com.smartgwt.client.widgets.events.ClickEvent; -import com.smartgwt.client.widgets.form.DynamicForm; import com.smartgwt.client.widgets.form.fields.SelectItem; import com.smartgwt.client.widgets.form.fields.events.ChangeEvent; import com.smartgwt.client.widgets.form.fields.events.ChangeHandler; import com.smartgwt.client.widgets.grid.ListGridRecord; -import com.smartgwt.client.widgets.toolbar.ToolStrip;
import org.rhq.core.domain.configuration.PropertySimple; import org.rhq.core.domain.criteria.DashboardCriteria; @@ -42,12 +40,13 @@ import org.rhq.enterprise.gui.coregui.client.dashboard.portlets.inventory.resour import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; import org.rhq.enterprise.gui.coregui.client.util.Log; import org.rhq.enterprise.gui.coregui.client.util.enhanced.Enhanced; +import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedToolStrip; import org.rhq.enterprise.gui.coregui.client.util.message.Message;
/** * @author Mike Thompson */ -public class AddToDashboardComponent extends ToolStrip implements Enhanced { +public class AddToDashboardComponent extends EnhancedToolStrip implements Enhanced { final private Resource resource; private SelectItem dashboardSelectItem; private Dashboard selectedDashboard; @@ -75,7 +74,6 @@ public class AddToDashboardComponent extends ToolStrip implements Enhanced {
private void createToolstrip() { addSpacer(15); - dashboardSelectItem = new SelectItem(); addToDashboardButton = new IButton(MSG.view_metric_addToDashboard()); addToDashboardButton.disable();
@@ -103,8 +101,8 @@ public class AddToDashboardComponent extends ToolStrip implements Enhanced { .getMetricDefinitions()) { if (measurementDefinition.getId() == selectedRecord .getAttributeAsInt(MetricsViewDataSource.FIELD_METRIC_DEF_ID)) { - Log.info("Add to Dashboard -- Storing: " + measurementDefinition.getDisplayName() - + " in " + selectedDashboard.getName()); + Log.info("Add to Dashboard -- Storing: " + measurementDefinition.getDisplayName() + " in " + + selectedDashboard.getName()); storeDashboardMetric(selectedDashboard, resource, measurementDefinition); break; } @@ -114,15 +112,14 @@ public class AddToDashboardComponent extends ToolStrip implements Enhanced { }); }
- public void disableAddToDashboardButton(){ - addToDashboardButton.disable(); + public void disableAddToDashboardButton() { + addToDashboardButton.disable(); }
- public void enableAddToDashboardButton(){ + public void enableAddToDashboardButton() { addToDashboardButton.enable(); }
- public void populateDashboardMenu() { dashboardMenuMap.clear(); dashboardMap.clear(); @@ -157,7 +154,6 @@ public class AddToDashboardComponent extends ToolStrip implements Enhanced { this.metricsListGrid = metricsListGrid; }
- private void storeDashboardMetric(Dashboard dashboard, Resource resource, MeasurementDefinition definition) { DashboardPortlet dashboardPortlet = new DashboardPortlet(MSG.view_tree_common_contextMenu_resourceGraph(), ResourceD3GraphPortlet.KEY, 250);
commit 71fe832fda706228745df1bc9fe089b1b7095d8b Author: Jay Shaughnessy jshaughn@redhat.com Date: Mon Aug 5 17:42:45 2013 -0400
More fine-grained bundle testing and work - add canView authz support for bundle stuff - up the serial version uid for affected entities - fix inverse relation handling on add/remove/sets - add more testing around create and delete, fix cleanup of bundle groups - fix some delete code when roles or bundles are associated with bundle groups - add some more useful authz checking for local api
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/auth/Subject.java b/modules/core/domain/src/main/java/org/rhq/core/domain/auth/Subject.java index 5f86d8e..c70d651 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/auth/Subject.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/auth/Subject.java @@ -186,6 +186,20 @@ import org.rhq.core.domain.resource.group.ResourceGroup; + " JOIN r.subjects s " // + " WHERE s.id = :subjectId ) ) "),
+ @NamedQuery(name = Subject.QUERY_CAN_VIEW_BUNDLE, query = "SELECT COUNT(b) " + + "FROM Bundle b, IN (b.bundleGroups) bg, IN (bg.roles) r, IN (r.subjects) s " + + "WHERE s = :subject AND b.id = :bundleId"), + + @NamedQuery(name = Subject.QUERY_CAN_VIEW_BUNDLE_GROUP, query = "" // + + "SELECT count(bg) " // + + " FROM BundleGroup bg " // + + " WHERE bg.id = :bundleGroupId " // + + " AND bg.id IN (SELECT innerbg.id " // + + " FROM BundleGroup innerbg " // + + " JOIN innerbg.roles r " // + + " JOIN r.subjects s " // + + " WHERE s = :subject) "), + /* * No easy way to test whether ALL resources are in some group in some role in some subject where * subject.id = <id> & role.permission = <perm> @@ -246,6 +260,8 @@ public class Subject implements Serializable { public static final String QUERY_CAN_VIEW_RESOURCES = "Subject.canViewResources"; public static final String QUERY_CAN_VIEW_GROUP = "Subject.canViewGroup"; public static final String QUERY_CAN_VIEW_AUTO_GROUP = "Subject.canViewAutoGroup"; + public static final String QUERY_CAN_VIEW_BUNDLE = "Subject.canViewBundle"; + public static final String QUERY_CAN_VIEW_BUNDLE_GROUP = "Subject.canViewBundleGroup";
public static final String QUERY_GET_RESOURCES_BY_PERMISSION = "Subject.getResourcesByPermission";
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/authz/Role.java b/modules/core/domain/src/main/java/org/rhq/core/domain/authz/Role.java index af5af3b..3f4c136 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/authz/Role.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/authz/Role.java @@ -38,6 +38,7 @@ import javax.persistence.GeneratedValue; import javax.persistence.GenerationType; import javax.persistence.Id; import javax.persistence.JoinColumn; +import javax.persistence.JoinTable; import javax.persistence.ManyToMany; import javax.persistence.NamedQueries; import javax.persistence.NamedQuery; @@ -94,7 +95,7 @@ public class Role implements Serializable {
public static final String QUERY_DYNAMIC_CONFIG_VALUES = "Role.dynamicConfigValues";
- private static final long serialVersionUID = 1L; + private static final long serialVersionUID = 2L;
@Column(name = "ID", nullable = false) @GeneratedValue(strategy = GenerationType.AUTO, generator = "RHQ_ROLE_ID_SEQ") @@ -129,7 +130,8 @@ public class Role implements Serializable { @Cascade({ org.hibernate.annotations.CascadeType.ALL }) private Set<Permission> permissions = new HashSet<Permission>();
- @ManyToMany(mappedBy = "roles") + @JoinTable(name = "RHQ_ROLE_BUNDLE_GROUP_MAP", joinColumns = { @JoinColumn(name = "ROLE_ID") }, inverseJoinColumns = { @JoinColumn(name = "BUNDLE_GROUP_ID") }) + @ManyToMany private Set<BundleGroup> bundleGroups = new HashSet<BundleGroup>();
public Role() { @@ -277,9 +279,16 @@ public class Role implements Serializable { }
public Set<BundleGroup> getBundleGroups() { + if (this.bundleGroups == null) { + this.bundleGroups = new HashSet<BundleGroup>(); + } return bundleGroups; }
+ /** + * This also updates the inverse relations (add this role to bundle groups) + * @param bundleGroups + */ public void setBundleGroups(Set<BundleGroup> bundleGroups) { if (bundleGroups == null) { this.bundleGroups = new HashSet<BundleGroup>(); @@ -292,22 +301,22 @@ public class Role implements Serializable { } }
+ /** + * This also updates the inverse relation (add this role to bundle group) + * @param bundleGroup + */ public void addBundleGroup(BundleGroup bundleGroup) { - if (this.bundleGroups == null) { - this.bundleGroups = new HashSet<BundleGroup>(); - } - + getBundleGroups().add(bundleGroup); bundleGroup.addRole(this); - this.bundleGroups.add(bundleGroup); }
+ /** + * This also updates the inverse relation (remove this role from bundle group) + * @param bundleGroup + */ public void removeBundleGroup(BundleGroup bundleGroup) { - if (this.bundleGroups == null) { - this.bundleGroups = new HashSet<BundleGroup>(); - } - + getBundleGroups().remove(bundleGroup); bundleGroup.removeRole(this); - this.bundleGroups.remove(bundleGroup); }
public Set<ResourceGroup> getResourceGroups() { diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/bundle/Bundle.java b/modules/core/domain/src/main/java/org/rhq/core/domain/bundle/Bundle.java index 5af51af..c2f2714 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/bundle/Bundle.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/bundle/Bundle.java @@ -69,7 +69,7 @@ import org.rhq.core.domain.tagging.Tag; @Table(name = "RHQ_BUNDLE") @XmlAccessorType(XmlAccessType.FIELD) public class Bundle implements Serializable { - private static final long serialVersionUID = 1L; + private static final long serialVersionUID = 2L;
public static final String QUERY_FIND_ALL = "Bundle.findAll"; public static final String QUERY_FIND_BY_NAME = "Bundle.findByName"; @@ -104,8 +104,8 @@ public class Bundle implements Serializable { @OneToMany(mappedBy = "bundle", fetch = FetchType.LAZY) private List<BundleVersion> bundleVersions = new ArrayList<BundleVersion>();
- @ManyToMany(mappedBy = "bundles", fetch = FetchType.LAZY, cascade = CascadeType.REMOVE) - private Set<BundleGroup> bundleGroups; + @ManyToMany(mappedBy = "bundles", fetch = FetchType.LAZY) + private Set<BundleGroup> bundleGroups = new HashSet<BundleGroup>();
@ManyToMany(mappedBy = "bundles", fetch = FetchType.LAZY, cascade = CascadeType.REMOVE) private Set<Tag> tags; @@ -183,26 +183,34 @@ public class Bundle implements Serializable { }
public Set<BundleGroup> getBundleGroups() { + if (this.bundleGroups == null) { + this.bundleGroups = new HashSet<BundleGroup>(); + } return bundleGroups; }
+ /** + * This does not set the inverse relationships. + * @param bundleGroups + */ public void setBundleGroups(Set<BundleGroup> bundleGroups) { this.bundleGroups = bundleGroups; }
+ /** + * This does not set the inverse relationship. You may want {@link BundleGroup#addBundle(Bundle)}. + * @param bundleGroups + */ public void addBundleGroup(BundleGroup bundleGroup) { - if (this.bundleGroups == null) { - this.bundleGroups = new HashSet<BundleGroup>(); - } - this.bundleGroups.add(bundleGroup); + getBundleGroups().add(bundleGroup); }
+ /** + * This does not set the inverse relationship. You may want {@link BundleGroup#removeBundle(Bundle)}. + * @param bundleGroups + */ public boolean removeBundleGroup(BundleGroup bundleGroup) { - if (this.bundleGroups != null) { - return this.bundleGroups.remove(bundleGroup); - } else { - return false; - } + return getBundleGroups().remove(bundleGroup); }
public List<BundleDestination> getDestinations() { diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/bundle/BundleGroup.java b/modules/core/domain/src/main/java/org/rhq/core/domain/bundle/BundleGroup.java index 16037f7..dd6a824 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/bundle/BundleGroup.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/bundle/BundleGroup.java @@ -26,8 +26,10 @@ import java.io.Serializable; import java.util.HashSet; import java.util.Set;
+import javax.persistence.CascadeType; import javax.persistence.Column; import javax.persistence.Entity; +import javax.persistence.FetchType; import javax.persistence.GeneratedValue; import javax.persistence.GenerationType; import javax.persistence.Id; @@ -74,11 +76,10 @@ public class BundleGroup implements Serializable {
@JoinTable(name = "RHQ_BUNDLE_GROUP_BUNDLE_MAP", joinColumns = { @JoinColumn(name = "BUNDLE_GROUP_ID") }, inverseJoinColumns = { @JoinColumn(name = "BUNDLE_ID") }) @ManyToMany - private Set<Bundle> bundles = new HashSet<Bundle>(); + private Set<Bundle> bundles;
- @JoinTable(name = "RHQ_ROLE_BUNDLE_GROUP_MAP", joinColumns = { @JoinColumn(name = "BUNDLE_GROUP_ID") }, inverseJoinColumns = { @JoinColumn(name = "ROLE_ID") }) - @ManyToMany - private Set<Role> roles = new HashSet<Role>(); + @ManyToMany(mappedBy = "bundleGroups", fetch = FetchType.LAZY, cascade = CascadeType.REMOVE) + private Set<Role> roles;
public BundleGroup() { // for JPA use @@ -119,12 +120,41 @@ public class BundleGroup implements Serializable { return bundles; }
+ /** + * This also updates the inverse relation (add this bundle group to bundle) + * @param bundle + */ public void addBundle(Bundle bundle) { getBundles().add(bundle); + bundle.addBundleGroup(this); }
- public void removeBundle(Bundle bundle) { - getBundles().remove(bundle); + /** + * This also updates the inverse relation (remove this bundle group from bundle) + * @param bundle + */ + public boolean removeBundle(Bundle bundle) { + boolean result = getBundles().remove(bundle); + bundle.removeBundleGroup(this); + return result; + } + + /** + * This also updates the inverse relations + * @param bundle + */ + public void setBundles(Set<Bundle> bundles) { + for (Bundle bundle : getBundles()) { + bundle.removeBundleGroup(this); + } + + this.bundles.clear(); + + if (null != bundles) { + for (Bundle bundle : bundles) { + addBundle(bundle); + } + } }
public Set<Role> getRoles() { @@ -134,12 +164,20 @@ public class BundleGroup implements Serializable { return roles; }
+ /** + * This *does not* update the inverse relation. You may want {@link Role#addBundleGroup(BundleGroup)} + * @param role + */ public void addRole(Role role) { getRoles().add(role); }
- public void removeRole(Role role) { - getRoles().remove(role); + /** + * This *does not* update the inverse relation. You may want {@link Role#removeBundleGroup(BundleGroup)} + * @param role + */ + public boolean removeRole(Role role) { + return getRoles().remove(role); }
public Long getCtime() { diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/bundle/BundleManagerBeanTest.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/bundle/BundleManagerBeanTest.java index 117d2df..5e94ab1 100644 --- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/bundle/BundleManagerBeanTest.java +++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/bundle/BundleManagerBeanTest.java @@ -215,6 +215,15 @@ public class BundleManagerBeanTest extends AbstractEJB3Test { em.remove(em.getReference(BundleDeployment.class, ((BundleDeployment) removeMe).getId())); }
+ // remove bundle groups to free up bundles + q = em.createQuery("SELECT bg FROM BundleGroup bg WHERE bg.name LIKE '" + TEST_PREFIX + "%'"); + doomed = q.getResultList(); + for (Object removeMe : doomed) { + BundleGroup doomedBundleGroup = em.find(BundleGroup.class, ((BundleGroup) removeMe).getId()); + doomedBundleGroup.setBundles(new HashSet<Bundle>()); + em.remove(doomedBundleGroup); + } + // remove bundles which cascade remove packageTypes and destinations // packagetypes cascade remove packages // package cascade remove packageversions @@ -224,6 +233,7 @@ public class BundleManagerBeanTest extends AbstractEJB3Test { em.remove(em.getReference(Bundle.class, ((Bundle) removeMe).getId())); } em.flush(); + // remove any orphaned pvs q = em.createQuery("SELECT pv FROM PackageVersion pv WHERE pv.generalPackage.name LIKE '" + TEST_PREFIX + "%'"); @@ -257,13 +267,6 @@ public class BundleManagerBeanTest extends AbstractEJB3Test { em.remove(em.getReference(Repo.class, ((Repo) removeMe).getId())); }
- // remove bundle groups no longer referenced by bundles - q = em.createQuery("SELECT bg FROM BundleGroup bg WHERE bg.name LIKE '" + TEST_PREFIX + "%'"); - doomed = q.getResultList(); - for (Object removeMe : doomed) { - em.remove(em.getReference(BundleGroup.class, ((BundleGroup) removeMe).getId())); - } - // remove Resource Groups left over from test deployments freeing up test resources q = em.createQuery("SELECT rg FROM ResourceGroup rg WHERE rg.name LIKE '" + TEST_PREFIX + "%'"); doomed = q.getResultList(); @@ -1318,12 +1321,9 @@ public class BundleManagerBeanTest extends AbstractEJB3Test { }
@Test(enabled = TESTS_ENABLED) - public void authzBundleGroupTest() throws Exception { - Subject subject = null; - Role role = null; - - subject = createNewSubject(TEST_USER_NAME); - role = createNewRoleForSubject(subject, TEST_ROLE_NAME); + public void testAuthzBundleGroup() throws Exception { + Subject subject = createNewSubject(TEST_USER_NAME); + Role role = createNewRoleForSubject(subject, TEST_ROLE_NAME);
subject = createSession(subject); // start a session so we can use this subject in SLSB calls
@@ -1479,14 +1479,241 @@ public class BundleManagerBeanTest extends AbstractEJB3Test { assertEquals("Should be able to see assigned bundle", 1, bundleGroups.size()); assertNotNull(bundleGroups.get(0).getBundles()); assertEquals("Should have fetched bundle in bundle group", 1, bundleGroups.get(0).getBundles().size()); - assertEquals("Should have fetched bundle in bundle group", bundle, bundleGroups.get(0).getBundles() - .iterator().next()); + assertEquals("Should have fetched bundle in bundle group", bundle, bundleGroups.get(0).getBundles().iterator() + .next()); assertNotNull(bundleGroups.get(0).getRoles()); assertEquals("Should have fetched role for bundle group", 1, bundleGroups.get(0).getRoles().size()); assertEquals("Should have fetched role for bundle group", role, bundleGroups.get(0).getRoles().iterator() .next()); }
+ @Test(enabled = TESTS_ENABLED) + public void testAuthzCreateBundleVersion() throws Exception { + Subject subject = createNewSubject(TEST_USER_NAME); + Role role = createNewRoleForSubject(subject, TEST_ROLE_NAME); + + subject = createSession(subject); // start a session so we can use this subject in SLSB calls + + // create bundle group + addRolePermissions(role, Permission.MANAGE_BUNDLE_GROUPS); + BundleGroup bundleGroup1 = bundleManager.createBundleGroup(subject, TEST_BUNDLE_GROUP_NAME + "_1", "bg-1"); + + // add bg1 to the role, but no perms + addRoleBundleGroup(role, bundleGroup1); + + // deny bundle create in bg1 (no create perm) + try { + createBundle(subject, TEST_PREFIX + ".bundle", bundleGroup1.getId()); + fail("Should have thrown PermissionException"); + } catch (PermissionException e) { + // expected + } + + // allow bundle creation in bg1 (has create perm) + addRolePermissions(role, Permission.CREATE_BUNDLES_IN_GROUP); + Bundle bundle = createBundle(subject, TEST_PREFIX + ".bundle", bundleGroup1.getId()); + + // deny bundle version creation (perm taken away) + removeRolePermissions(role, Permission.CREATE_BUNDLES_IN_GROUP); + try { + BundleVersion bv1 = createBundleVersion(subject, bundle.getName() + "-1", null, bundle); + fail("Should have thrown PermissionException"); + } catch (PermissionException e) { + // expected + } + + // allow bundle version creation (perm granted) + addRolePermissions(role, Permission.CREATE_BUNDLES_IN_GROUP); + BundleVersion bv1 = createBundleVersion(subject, bundle.getName() + "-1", null, bundle); + assertNotNull(bv1); + assertEquals("1.0", bv1.getVersion()); + assert 0 == bv1.getVersionOrder(); + + // create second role + Role role2 = createNewRoleForSubject(subject, TEST_ROLE_NAME + "_2"); + addRolePermissions(role2, Permission.CREATE_BUNDLES_IN_GROUP); + + // create second bundle group + BundleGroup bundleGroup2 = bundleManager.createBundleGroup(subject, TEST_BUNDLE_GROUP_NAME + "_2", "bg-2"); + + // deny bundle create in bg2 (not associated with role) + try { + createBundle(subject, TEST_PREFIX + ".bundle", bundleGroup2.getId()); + fail("Should have thrown PermissionException"); + } catch (PermissionException e) { + // expected + } + + // deny bundle assign to bg2 (not associated with role) + try { + bundleManager.assignBundlesToBundleGroup(subject, bundleGroup2.getId(), new int[] { bundle.getId() }); + fail("Should have thrown PermissionException"); + } catch (PermissionException e) { + // expected + } + + // add bg2 to the role + addRoleBundleGroup(role2, bundleGroup2); + + // deny bundle assign to bg2 (no perm) + removeRolePermissions(role2, Permission.CREATE_BUNDLES_IN_GROUP); + try { + bundleManager.assignBundlesToBundleGroup(subject, bundleGroup2.getId(), new int[] { bundle.getId() }); + fail("Should have thrown PermissionException"); + } catch (PermissionException e) { + // expected + } + + // allow bundle assign to bg2 + addRolePermissions(role2, Permission.ASSIGN_BUNDLES_TO_GROUP); + bundleManager.assignBundlesToBundleGroup(subject, bundleGroup2.getId(), new int[] { bundle.getId() }); + + // should fetch the single bundle even though it is in two groups + BundleCriteria bundleCriteria = new BundleCriteria(); + bundleCriteria.addFilterBundleGroupIds(bundleGroup1.getId(), bundleGroup2.getId()); + List<Bundle> bundles = bundleManager.findBundlesByCriteria(subject, bundleCriteria); + assertNotNull(bundles); + assertEquals("Should be able to see assigned bundle", 1, bundles.size()); + assertEquals("Should have fetched bundle", bundle, bundles.get(0)); + + BundleVersionCriteria bvCriteria = new BundleVersionCriteria(); + bvCriteria.addFilterBundleId(bundle.getId()); + List<BundleVersion> bundleVersions = bundleManager.findBundleVersionsByCriteria(subject, bvCriteria); + assertNotNull(bundleVersions); + assertEquals("Should be able to see assigned bundle bundleversion", 1, bundleVersions.size()); + assertEquals("Should have fetched bundleversion", bv1, bundleVersions.get(0)); + + // deny unassign + try { + bundleManager.unassignBundlesFromBundleGroup(subject, bundleGroup2.getId(), new int[] { bundle.getId() }); + fail("Should have thrown PermissionException"); + } catch (PermissionException e) { + // expected + } + + // allow unassigns + addRolePermissions(role, Permission.UNASSIGN_BUNDLES_FROM_GROUP); + addRolePermissions(role2, Permission.UNASSIGN_BUNDLES_FROM_GROUP); + bundleManager.unassignBundlesFromBundleGroup(subject, bundleGroup1.getId(), new int[] { bundle.getId() }); + bundleManager.unassignBundlesFromBundleGroup(subject, bundleGroup2.getId(), new int[] { bundle.getId() }); + + // should not find the now unassigned bundle + bundles = bundleManager.findBundlesByCriteria(subject, bundleCriteria); + assertNotNull(bundles); + assertEquals("Should not be able to see unassigned bundle", 0, bundles.size()); + + bundleVersions = bundleManager.findBundleVersionsByCriteria(subject, bvCriteria); + assertNotNull(bundleVersions); + assertEquals("Should not be able to see unassigned bundle bundleversion", 0, bundleVersions.size()); + + // allow view + addRolePermissions(role, Permission.VIEW_BUNDLES); + + // should fetch the single unassigned bundle due to global view in one of the assigned roles + bundleCriteria.addFilterBundleGroupIds(null); + bundles = bundleManager.findBundlesByCriteria(subject, bundleCriteria); + assertNotNull(bundles); + assertEquals("Should be able to see unassigned bundle", 1, bundles.size()); + assertEquals("Should have fetched bundle", bundle, bundles.get(0)); + + bundleVersions = bundleManager.findBundleVersionsByCriteria(subject, bvCriteria); + assertNotNull(bundleVersions); + assertEquals("Should be able to see unassigned bundle bundleversion", 1, bundleVersions.size()); + assertEquals("Should have fetched bundleversion", bv1, bundleVersions.get(0)); + } + + @Test(enabled = TESTS_ENABLED) + public void testAuthzDeleteBundleVersion() throws Exception { + Subject subject = createNewSubject(TEST_USER_NAME); + Role role = createNewRoleForSubject(subject, TEST_ROLE_NAME); + + subject = createSession(subject); // start a session so we can use this subject in SLSB calls + + // create bundle group + addRolePermissions(role, Permission.MANAGE_BUNDLE_GROUPS); + BundleGroup bundleGroup1 = bundleManager.createBundleGroup(subject, TEST_BUNDLE_GROUP_NAME + "_1", "bg-1"); + + // add bg1 to the role with group create + addRoleBundleGroup(role, bundleGroup1); + addRolePermissions(role, Permission.CREATE_BUNDLES_IN_GROUP); + + // allow bundle creation in bg1 (has create perm) + Bundle bundle = createBundle(subject, TEST_PREFIX + ".bundle", bundleGroup1.getId()); + + // allow delete, global perm + addRolePermissions(role, Permission.DELETE_BUNDLES); + deleteBundleVersion(subject, bundle); + + // allow bundle creation in bg1 (has create perm) + bundle = createBundle(subject, TEST_PREFIX + ".bundle", bundleGroup1.getId()); + + // allow delete, bundle group perm + removeRolePermissions(role, Permission.DELETE_BUNDLES); + addRolePermissions(role, Permission.DELETE_BUNDLES_FROM_GROUP); + deleteBundleVersion(subject, bundle); + + // allow bundle creation in bg1 (has create perm) + bundle = createBundle(subject, TEST_PREFIX + ".bundle", bundleGroup1.getId()); + + // deny delete, no delete perms + removeRolePermissions(role, Permission.DELETE_BUNDLES_FROM_GROUP); + try { + deleteBundleVersion(subject, bundle); + fail("Should have thrown PermissionException"); + } catch (PermissionException e) { + // expected + } + } + + // subject must have create bundle version permission + private void deleteBundleVersion(Subject subject, Bundle b1) throws Exception { + assertNotNull(b1); + + BundleVersion bv1 = createBundleVersion(subject, b1.getName() + "-1", null, b1); + assertNotNull(bv1); + assertEquals("1.0", bv1.getVersion()); + BundleVersion bv2 = createBundleVersion(subject, b1.getName() + "-2", null, b1); + assertNotNull(bv2); + assertEquals("1.1", bv2.getVersion()); + + // let's add a bundle file so we can ensure our deletion will also delete the file too + bundleManager.addBundleFileViaByteArray(subject, bv2.getId(), "testDeleteBundleVersion", "1.0", + new Architecture("noarch"), "content".getBytes()); + BundleFileCriteria bfCriteria = new BundleFileCriteria(); + bfCriteria.addFilterBundleVersionId(bv2.getId()); + bfCriteria.fetchPackageVersion(true); + PageList<BundleFile> files = bundleManager.findBundleFilesByCriteria(overlord, bfCriteria); + assert files.size() == 1 : files; + assert files.get(0).getPackageVersion().getGeneralPackage().getName().equals("testDeleteBundleVersion") : files; + + BundleVersionCriteria bvCriteria = new BundleVersionCriteria(); + BundleCriteria bCriteria = new BundleCriteria(); + + // delete the first one - this deletes the BV but the bundle should remain intact + bundleManager.deleteBundleVersion(subject, bv2.getId(), true); + bvCriteria.addFilterId(bv2.getId()); + PageList<BundleVersion> bvResults = bundleManager.findBundleVersionsByCriteria(subject, bvCriteria); + assert bvResults.size() == 0; + bCriteria.addFilterId(b1.getId()); + PageList<Bundle> bResults = bundleManager.findBundlesByCriteria(subject, bCriteria); + assert bResults.size() == 1 : "Should not have deleted bundle yet, 1 version still exists"; + + // delete the second one - this deletes last BV thus the bundle should also get deleted + bundleManager.deleteBundleVersion(subject, bv1.getId(), true); + bvCriteria.addFilterId(bv1.getId()); + bvResults = bundleManager.findBundleVersionsByCriteria(subject, bvCriteria); + assert bvResults.size() == 0; + bCriteria.addFilterId(b1.getId()); + bResults = bundleManager.findBundlesByCriteria(subject, bCriteria); + assert bResults.size() == 0 : "Should have deleted bundle since no versions exists anymore"; + + // make sure our composite query is OK and can show us 0 bundles, too + PageList<BundleWithLatestVersionComposite> composites; + bCriteria = new BundleCriteria(); + composites = bundleManager.findBundlesWithLatestVersionCompositesByCriteria(subject, bCriteria); + assert composites.size() == 0; + } + private Subject createNewSubject(String subjectName) throws Exception {
Subject newSubject = new Subject(); @@ -1557,8 +1784,12 @@ public class BundleManagerBeanTest extends AbstractEJB3Test { }
private Bundle createBundle(Subject subject, String name) throws Exception { + return createBundle(subject, name, 0); + } + + private Bundle createBundle(Subject subject, String name, int bundleGroupId) throws Exception { BundleType bt = createBundleType(name); - return createBundle(subject, name, bt, 0); + return createBundle(subject, name, bt, bundleGroupId); }
private Bundle createBundle(Subject subject, String name, BundleType bt, int bundleGroupId) throws Exception { @@ -1578,7 +1809,7 @@ public class BundleManagerBeanTest extends AbstractEJB3Test { throws Exception { final String fullName = TEST_PREFIX + "-bundleversion-" + version + "-" + name; final String recipe = "deploy -f " + TEST_PREFIX + ".zip -d @@ test.path @@"; - BundleVersion bv = bundleManager.createBundleVersion(overlord, bundle.getId(), fullName, fullName + "-desc", + BundleVersion bv = bundleManager.createBundleVersion(subject, bundle.getId(), fullName, fullName + "-desc", version, recipe);
assert bv.getId() > 0; diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/authz/AuthorizationManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/authz/AuthorizationManagerBean.java index d71095f..d5746d3 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/authz/AuthorizationManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/authz/AuthorizationManagerBean.java @@ -304,6 +304,32 @@ public class AuthorizationManagerBean implements AuthorizationManagerLocal { }
@Override + public boolean canViewBundle(Subject subject, int bundleId) { + if (hasGlobalPermission(subject, Permission.VIEW_BUNDLES)) { + return true; + } + + Query query = entityManager.createNamedQuery(Subject.QUERY_CAN_VIEW_BUNDLE); + query.setParameter("subject", subject); + query.setParameter("bundleId", bundleId); + long count = (Long) query.getSingleResult(); + return (count != 0); + } + + @Override + public boolean canViewBundleGroup(Subject subject, int bundleGroupId) { + if (hasGlobalPermission(subject, Permission.VIEW_BUNDLES)) { + return true; + } + + Query query = entityManager.createNamedQuery(Subject.QUERY_CAN_VIEW_BUNDLE_GROUP); + query.setParameter("subject", subject); + query.setParameter("bundleGroupId", bundleGroupId); + long count = (Long) query.getSingleResult(); + return (count != 0); + } + + @Override public boolean isInventoryManager(Subject subject) { return hasGlobalPermission(subject, Permission.MANAGE_INVENTORY); } diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/authz/AuthorizationManagerLocal.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/authz/AuthorizationManagerLocal.java index 8872d61..62d3c0c 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/authz/AuthorizationManagerLocal.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/authz/AuthorizationManagerLocal.java @@ -80,6 +80,28 @@ public interface AuthorizationManagerLocal { boolean canViewAutoGroup(Subject subject, int parentResourceId, int resourceTypeId);
/** + * Returns true if the current user has explicit (Global.VIEW_BUNDLES) or implicit (via bundleGroup-Role + * association) view of the specified bundle . + * + * @param subject the current subject or caller + * @param bundleId the id of some Bundle to check permissions against + * + * @return true if the current user has some role attached to some bundle group that contains this bundle + */ + boolean canViewBundle(Subject subject, int bundleId); + + /** + * Returns true if the current user has explicit (Global.VIEW_BUNDLES) or implicit (via bundleGroup-Role + * association) view of the specified bundle group. + * + * @param subject the current subject or caller + * @param bundleGroupId the id of some Bundle to check permissions against + * + * @return true if the current user has some role attached to some bundle group that contains this bundle + */ + boolean canViewBundleGroup(Subject subject, int bundleGroupId); + + /** * Returns true if the current user possesses either: 1) the specified resource permission for the specified * resource, or 2) the global MANAGE_INVENTORY permission which, by definition, gives full access to the inventory * (all resources and all groups) @@ -244,7 +266,7 @@ public interface AuthorizationManagerLocal { * @return <code>true</code> if the given subject is considered the overlord subject */ boolean isOverlord(Subject subject); - + /** * Returns true if given subject is able to view given repo. * The subject is able to view a repo if it is public or if the subject is the owner of the repo @@ -255,7 +277,7 @@ public interface AuthorizationManagerLocal { * @return true if subject is able to view the repo, false otherwise */ boolean canViewRepo(Subject subject, int repoId); - + /** * Returns true if given subject is able to update given repo. * The subject is able to update a repo if it is owned by the subject diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/authz/RoleManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/authz/RoleManagerBean.java index aeaf597..96d44de 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/authz/RoleManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/authz/RoleManagerBean.java @@ -841,7 +841,7 @@ public class RoleManagerBean implements RoleManagerLocal, RoleManagerRemote { throw new IllegalArgumentException("Tried to remove role[" + roleId + "] from BundleGroup[" + bundleGroupId + "], but role was not found"); } - bundleGroup.removeRole(doomedRole); + doomedRole.removeBundleGroup(bundleGroup); } }
@@ -893,7 +893,7 @@ public class RoleManagerBean implements RoleManagerLocal, RoleManagerRemote { throw new IllegalArgumentException("Tried to add role[" + roleId + "] to bundleGroup[" + bundleGroupId + "], but role was not found"); } - bundleGroup.addRole(role); + role.addBundleGroup(bundleGroup); } }
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerBean.java index bc85e6d..33c119e 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerBean.java @@ -259,13 +259,14 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot Bundle bundle = new Bundle(name, bundleType, repo, packageType); bundle.setDescription(description); bundle.setPackageType(packageType); - if (null != bundleGroup) { - bundle.addBundleGroup(bundleGroup); - }
log.info("Creating bundle: " + bundle); entityManager.persist(bundle);
+ if (null != bundleGroup) { + bundleGroup.addBundle(bundle); + } + return bundle; }
@@ -511,6 +512,8 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot throw new IllegalArgumentException("Invalid bundleId: " + bundleId); }
+ checkCreateBundleVersionAuthz(subject, bundleId); + // parse the recipe (validation occurs here) and get the config def and list of files BundleType bundleType = bundle.getBundleType(); RecipeParseResults results; @@ -1784,7 +1787,7 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot
if (!authorizationManager.hasGlobalPermission(subject, Permission.VIEW_BUNDLES)) { generator.setAuthorizationBundleFragment(CriteriaQueryGenerator.AuthorizationTokenType.BUNDLE, - subject.getId()); + subject.getId(), null); }
CriteriaQueryRunner<BundleWithLatestVersionComposite> queryRunner = new CriteriaQueryRunner<BundleWithLatestVersionComposite>( @@ -1824,6 +1827,11 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot entityManager.flush(); }
+ // remove bundle from relevant any assigned bundle groups + for (BundleGroup bg : bundle.getBundleGroups()) { + bg.removeBundle(bundle); + } + // we need to whack the Repo once the Bundle no longer refers to it Repo bundleRepo = bundle.getRepo();
@@ -2004,7 +2012,7 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot
// remove from any roles for (Role r : bundleGroup.getRoles()) { - bundleGroup.removeRole(r); + r.removeBundleGroup(bundleGroup); }
bundleGroup = entityManager.merge(bundleGroup); @@ -2036,7 +2044,7 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot throw new IllegalArgumentException("BundleGroup does not exist for bundleGroupId [" + bundleGroupId + "]"); }
- checkAssignBundleGroupAuthz(subject, bundleGroupId, bundleIds); + checkUnassignBundleGroupAuthz(subject, bundleGroupId, bundleIds);
for (int bundleId : bundleIds) { Bundle bundle = entityManager.find(Bundle.class, bundleId); @@ -2074,7 +2082,7 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot }
if (hasGlobalCreateBundles) { - if (authorizationManager.hasBundleGroupPermission(subject, Permission.VIEW_BUNDLES_IN_GROUP, bundleGroupId)) { + if (authorizationManager.canViewBundleGroup(subject, bundleGroupId)) { return; } } else { @@ -2116,7 +2124,7 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot }
if (hasGlobalCreateBundles) { - if (authorizationManager.hasBundlePermission(subject, Permission.VIEW_BUNDLES_IN_GROUP, bundleId)) { + if (authorizationManager.canViewBundle(subject, bundleId)) { return; } } else { @@ -2173,7 +2181,7 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot throw new IllegalArgumentException("Invalid bundleId: [" + bundleId + "]"); }
- if (!authorizationManager.hasBundlePermission(subject, Permission.VIEW_BUNDLES_IN_GROUP, bundleId)) { + if (!authorizationManager.canViewBundle(subject, bundleId)) { String msg = "Subject [" + subject.getName() + "] requires either Global.VIEW_BUNDLES or BundleGroup.VIEW_BUNDLES_IN_GROUP to assign bundle [" + bundleId + "] to bundle group [" + bundleGroupId + "]"; @@ -2185,6 +2193,58 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot }
/** + * Requires VIEW permission for the relevant bundles and either: + * - Global.DELETE_BUNDLE + * - BundleGroup.DELETE_BUNDLES_FROM_GROUP or BundleGroup.UNASSIGN_BUNDLES_FROM_GROUP for the relevant bundle group + * + * @param subject + * @param bundleGroupId an existing bundle group + * @param bundleIds existing bundles + * @throws PermissionException + */ + private void checkUnassignBundleGroupAuthz(Subject subject, int bundleGroupId, int[] bundleIds) + throws PermissionException { + + Set<Permission> globalPerms = authorizationManager.getExplicitGlobalPermissions(subject); + boolean hasGlobalDeleteBundles = globalPerms.contains(Permission.DELETE_BUNDLES); + boolean hasGlobalViewBundles = globalPerms.contains(Permission.VIEW_BUNDLES); + + if (hasGlobalDeleteBundles && hasGlobalViewBundles) { + return; + } + + boolean hasBundleGroupDelete = hasGlobalDeleteBundles + || authorizationManager.hasBundleGroupPermission(subject, Permission.DELETE_BUNDLES_FROM_GROUP, + bundleGroupId); + boolean hasBundleGroupUnassign = hasBundleGroupDelete + || authorizationManager.hasBundleGroupPermission(subject, Permission.UNASSIGN_BUNDLES_FROM_GROUP, + bundleGroupId); + + if (!hasBundleGroupUnassign) { + String msg = "Subject [" + + subject.getName() + + "] requires one of Global.DELETE_BUNDLES, BundleGroup.DELETE_BUNDLES_FROM_GROUP, or BundleGroup.UNASSIGN_BUNDLES_FROM_GROUP to unassign a bundle to undle group [" + + bundleGroupId + "]."; + throw new PermissionException(msg); + } + + for (int bundleId : bundleIds) { + if (bundleId <= 0) { + throw new IllegalArgumentException("Invalid bundleId: [" + bundleId + "]"); + } + + if (!authorizationManager.canViewBundle(subject, bundleId)) { + String msg = "Subject [" + subject.getName() + + "] requires either Global.VIEW_BUNDLES or BundleGroup.VIEW_BUNDLES_IN_GROUP to unassign bundle [" + + bundleId + "] to bundle group [" + bundleGroupId + "]"; + throw new PermissionException(msg); + } + } + + return; + } + + /** * Required Permissions: Either: * - Global.DEPLOY_BUNDLES and a view of the relevant bundle and a view of the relevant resource group (may involve multiple roles) * - Resource.DEPLOY_BUNDLES_TO_GROUP and a view of the relevant bundle and a view of the relevant resource group (may involve multiple roles) @@ -2210,8 +2270,7 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot
boolean hasResourceGroupDeploy = hasGlobalDeployBundles || authorizationManager.hasGroupPermission(subject, Permission.DEPLOY_BUNDLES_TO_GROUP, resourceGroupId); - boolean hasBundleView = hasGlobalViewBundles - || authorizationManager.hasBundlePermission(subject, Permission.VIEW_BUNDLES_IN_GROUP, bundleId); + boolean hasBundleView = hasGlobalViewBundles || authorizationManager.canViewBundle(subject, bundleId);
if (!(hasResourceGroupDeploy && hasBundleView)) { String msg = "Subject [" + subject.getName() @@ -2248,7 +2307,7 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot }
if (hasGlobalDeleteBundles) { - if (authorizationManager.hasBundlePermission(subject, Permission.VIEW_BUNDLES_IN_GROUP, bundleId)) { + if (authorizationManager.canViewBundle(subject, bundleId)) { return; } } else { diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerLocal.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerLocal.java index f96d356..6df2610 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerLocal.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerLocal.java @@ -112,9 +112,12 @@ public interface BundleManagerLocal extends BundleManagerRemote {
/** * Internal use only, test entry point - * </p> - * This method performs NO AUTHZ! - * </p> + * <pre> + * Required Permissions: Either: + * - Global.CREATE_BUNDLES and Global.VIEW_BUNDLES + * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG + * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG + * </pre> * @param subject user that must have proper permissions * @param bundleId the bundle for which this will be the next version * @param name not null or empty
commit 0d9fb46045b844ef4f2a4c921fbe461c37c09c4e Author: Thomas Segismont tsegismo@redhat.com Date: Fri Jul 26 14:50:07 2013 +0200
Bug 886119 - JON is using JNDI when referring to child Datasource resource instead of specified Resource name
Added message in the first step of the resource creation wizard to indicate that some plugin implementations might not be able to give the resource the supplied name when it gets discovered.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/factory/ResourceFactoryInfoStep.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/factory/ResourceFactoryInfoStep.java index 31dc10d..89b6e30 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/factory/ResourceFactoryInfoStep.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/factory/ResourceFactoryInfoStep.java @@ -1,6 +1,6 @@ /* * RHQ Management Platform - * Copyright (C) 2005-2010 Red Hat, Inc. + * Copyright (C) 2005-2013 Red Hat, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify @@ -13,11 +13,13 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * along with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ package org.rhq.enterprise.gui.coregui.client.inventory.resource.factory;
+import static java.lang.Boolean.FALSE; + import java.util.ArrayList; import java.util.LinkedHashMap; import java.util.List; @@ -30,6 +32,7 @@ import com.smartgwt.client.widgets.Canvas; import com.smartgwt.client.widgets.form.DynamicForm; import com.smartgwt.client.widgets.form.fields.FormItem; import com.smartgwt.client.widgets.form.fields.SelectItem; +import com.smartgwt.client.widgets.form.fields.StaticTextItem; import com.smartgwt.client.widgets.form.fields.TextItem; import com.smartgwt.client.widgets.form.fields.events.ChangedEvent; import com.smartgwt.client.widgets.form.fields.events.ChangedHandler; @@ -99,6 +102,13 @@ public class ResourceFactoryInfoStep extends AbstractWizardStep { } }); formItems.add(nameItem); + + StaticTextItem commentItem = new StaticTextItem("resourceNameComment"); + commentItem.setWidth(300); + commentItem.setTextBoxStyle("InlineInfo"); + commentItem.setShowTitle(FALSE); + commentItem.setValue(MSG.widget_resourceFactoryWizard_nameComment()); + formItems.add(commentItem); }
if (null != versionPrompt) { diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties index 7f2e7d4..58d0f8e 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties @@ -2232,6 +2232,7 @@ widget_resourceFactoryWizard_importWizardWindowTitle = Resource Import Wizard widget_resourceFactoryWizard_infoStepName = Resource Information widget_resourceFactoryWizard_infoStep_loadFail = Failed to get available Architectures widget_resourceFactoryWizard_namePrompt = New Resource Name +widget_resourceFactoryWizard_nameComment = Not all management plug-ins or their managed resources allow the agent to set the name for a new resource. This value will only be used by agent plug-ins that support the capability. For plug-ins that do not support the capability, the resource may receive a generic or different name when it is discovered. widget_resourceFactoryWizard_templatePrompt = Connection Settings Template widget_resourceFactoryWizard_timeoutFailure = Timed out widget_resourceFactoryWizard_timeoutHelp = A timeout duration that if specified will override the default timeout for child resource creation (on the {0} Agent). The default timeout is set to 60 seconds. A higher value may be useful for particularly long create actions, like deployment of a large application. Usually used if a previous attempt suffered a timeout failure. Note that if there is a timeout failure, it is still possible that the resource deployment succeeded. In the event of a timeout you may want to execute a discovery scan before attempting to redeploy the resource. diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties index 941eb60..91b959e 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties @@ -2242,6 +2242,7 @@ widget_resourceFactoryWizard_importWizardWindowTitle = Průvodce importu zdroje widget_resourceFactoryWizard_infoStepName = Informace o zdroji widget_resourceFactoryWizard_infoStep_loadFail = NepodaÅilo se zÃskat dostupné architektury widget_resourceFactoryWizard_namePrompt = Nové jméno zdroje +#widget_resourceFactoryWizard_nameComment = Not all management plug-ins or their managed resources allow the agent to set the name for a new resource. This value will only be used by agent plug-ins that support the capability. For plug-ins that do not support the capability, the resource may receive a generic or different name when it is discovered. widget_resourceFactoryWizard_templatePrompt = Å ablona pro nastavenà pÅipojenà ##widget_resourceFactoryWizard_timeoutFailure = Timed out ##widget_resourceFactoryWizard_timeoutHelp = A timeout duration that if specified will override the default timeout for child resource creation (on the {0} Agent). The default timeout is set to 60 seconds. A higher value may be useful for particularly long create actions, like deployment of a large application. Usually used if a previous attempt suffered a timeout failure. Note that if there is a timeout failure, it is still possible that the resource deployment succeeded. In the event of a timeout you may want to execute a discovery scan before attempting to redeploy the resource. diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties index e30a19e..cf68680 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties @@ -2067,6 +2067,7 @@ widget_resourceFactoryWizard_importWizardWindowTitle = Wizard zum Import von Res widget_resourceFactoryWizard_infoStepName = Information ÃŒber die Ressource widget_resourceFactoryWizard_infoStep_loadFail = Konnte die verfÃŒgbaren Architekturen nicht ermitteln widget_resourceFactoryWizard_namePrompt = Name der neuen Ressource +#widget_resourceFactoryWizard_nameComment = Not all management plug-ins or their managed resources allow the agent to set the name for a new resource. This value will only be used by agent plug-ins that support the capability. For plug-ins that do not support the capability, the resource may receive a generic or different name when it is discovered. widget_resourceFactoryWizard_templatePrompt = Vorlage fÃŒr die Verbindungseinstellungen ##widget_resourceFactoryWizard_timeoutHelp = A timeout duration that if specified will override the default timeout for child resource creation (on the {0} Agent). The default timeout is set to 60 seconds. A higher value may be useful for particularly long create actions, like deployment of a large application. Usually used if a previous attempt suffered a timeout failure. Note that if there is a timeout failure, it is still possible that the resource deployment succeeded. In the event of a timeout you may want to execute a discovery scan before attempting to redeploy the resource. widget_resourceFactoryWizard_uploadFailure = Konnte die Datei nicht hochladen diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties index bed024b..abe264e 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties @@ -2213,6 +2213,7 @@ widget_resourceFactoryWizard_importWizardWindowTitle = ãªãœãŒã¹ã€ã³ã㌠widget_resourceFactoryWizard_infoStepName = ãªãœãŒã¹æ å ± widget_resourceFactoryWizard_infoStep_loadFail = ã¢ãŒããã¯ãã£ãŒãå©çšå¯èœã«ããã®ã«å€±æããŸãã widget_resourceFactoryWizard_namePrompt = æ°èŠãªãœãŒã¹å +#widget_resourceFactoryWizard_nameComment = Not all management plug-ins or their managed resources allow the agent to set the name for a new resource. This value will only be used by agent plug-ins that support the capability. For plug-ins that do not support the capability, the resource may receive a generic or different name when it is discovered. widget_resourceFactoryWizard_templatePrompt = ã³ãã¯ã·ã§ã³èšå®ãã³ãã¬ãŒã ###widget_resourceFactoryWizard_timeoutFailure = Timed out ##widget_resourceFactoryWizard_timeoutHelp = A timeout duration that if specified will override the default timeout for child resource creation (on the {0} Agent). The default timeout is set to 60 seconds. A higher value may be useful for particularly long create actions, like deployment of a large application. Usually used if a previous attempt suffered a timeout failure. Note that if there is a timeout failure, it is still possible that the resource deployment succeeded. In the event of a timeout you may want to execute a discovery scan before attempting to redeploy the resource. diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties index 416f4b4..c9081f8 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties @@ -1822,6 +1822,7 @@ widget_resourceFactoryWizard_importFailure2 = ìëìŒë¡ 늬ìì€ë¥Œ ê°ì ž widget_resourceFactoryWizard_importWizardWindowTitle = 늬ìì€ ê°ì žì€êž° ë§ë²ì¬ widget_resourceFactoryWizard_infoStepName = 늬ìì€ ì 볎 widget_resourceFactoryWizard_namePrompt = ì 늬ìì€ ìŽëŠ +#widget_resourceFactoryWizard_nameComment = Not all management plug-ins or their managed resources allow the agent to set the name for a new resource. This value will only be used by agent plug-ins that support the capability. For plug-ins that do not support the capability, the resource may receive a generic or different name when it is discovered. widget_resourceFactoryWizard_timeoutFailure = íì ìì widget_resourceFactoryWizard_versionPrompt = íší€ì§ ë²ì widget_resourceSelector_pleaseSelectMultipleResource = íë ìŽìì 늬ìì€ë¥Œ ì ííììì€ diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties index 47cac36..566dcf9 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties @@ -2328,6 +2328,7 @@ widget_resourceFactoryWizard_importWizardWindowTitle = Assistente para Importa\u widget_resourceFactoryWizard_infoStepName = Informa\u00E7\u00E3o do Recurso widget_resourceFactoryWizard_infoStep_loadFail = Falha ao recuperar Arquiteturas dispon\u00EDveis. widget_resourceFactoryWizard_namePrompt = Novo Nome do Recurso +#widget_resourceFactoryWizard_nameComment = Not all management plug-ins or their managed resources allow the agent to set the name for a new resource. This value will only be used by agent plug-ins that support the capability. For plug-ins that do not support the capability, the resource may receive a generic or different name when it is discovered. widget_resourceFactoryWizard_templatePrompt = Modelo de Propriedades para Conex\u00E3o ##widget_resourceFactoryWizard_timeoutFailure = Timed out ##widget_resourceFactoryWizard_timeoutHelp = A timeout duration that if specified will override the default timeout for child resource creation (on the {0} Agent). The default timeout is set to 60 seconds. A higher value may be useful for particularly long create actions, like deployment of a large application. Usually used if a previous attempt suffered a timeout failure. Note that if there is a timeout failure, it is still possible that the resource deployment succeeded. In the event of a timeout you may want to execute a discovery scan before attempting to redeploy the resource. diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties index 99f34f9..b6f53b1 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties @@ -2154,6 +2154,7 @@ #widget_resourceFactoryWizard_infoStepName = Resource Information #widget_resourceFactoryWizard_infoStep_loadFail = Failed to get available Architectures #widget_resourceFactoryWizard_namePrompt = New Resource Name +#widget_resourceFactoryWizard_nameComment = Not all management plug-ins or their managed resources allow the agent to set the name for a new resource. This value will only be used by agent plug-ins that support the capability. For plug-ins that do not support the capability, the resource may receive a generic or different name when it is discovered. #widget_resourceFactoryWizard_templatePrompt = Connection Settings Template #widget_resourceFactoryWizard_timeoutHelp = A timeout duration. If specified will override the default timeout for child resource creation (on the {0} Agent). The default timeout is set to 60 seconds. Useful for particularly long create actions, like deployment of a large application. Usually used if a previous attempt suffered a timeout failure. #widget_resourceFactoryWizard_timeoutFailure = Timed out. Note that it is possible that the deployment may still succeed. diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties index 69ca0f6..2ea7f25 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties @@ -2211,7 +2211,8 @@ widget_resourceFactoryWizard_importWizardWindowTitle = \u5bfc\u5165\u8d44\u6e90\ widget_resourceFactoryWizard_infoStepName = \u8d44\u6e90\u6d88\u606f widget_resourceFactoryWizard_infoStep_loadFail = \u65e0\u6cd5\u53d6\u5f97\u67b6\u6784 widget_resourceFactoryWizard_namePrompt = \u65b0\u8d44\u6e90\u540d -widget_resourceFactoryWizard_templatePrompt = \u8fde\u63a5\u8bbe\u7f6e\u6a21\u677f +#widget_resourceFactoryWizard_nameComment = Not all management plug-ins or their managed resources allow the agent to set the name for a new resource. This value will only be used by agent plug-ins that support the capability. For plug-ins that do not support the capability, the resource may receive a generic or different name when it is discovered. +#widget_resourceFactoryWizard_templatePrompt = \u8fde\u63a5\u8bbe\u7f6e\u6a21\u677f #widget_resourceFactoryWizard_timeoutHelp = A timeout duration that if specified will override the default timeout for child resource creation (on the {0} Agent). The default timeout is set to 60 seconds. A higher value may be useful for particularly long create actions, like deployment of a large application. Usually used if a previous attempt suffered a timeout failure. Note that if there is a timeout failure, it is still possible that the resource deployment succeeded. In the event of a timeout you may want to execute a discovery scan before attempting to redeploy the resource. #widget_resourceFactoryWizard_timeoutFailure = Timed out widget_resourceFactoryWizard_uploadFailure = \u4e0a\u4f20\u6587\u4ef6\u5931\u8d25 diff --git a/modules/enterprise/gui/coregui/src/main/webapp/CoreGUI.css b/modules/enterprise/gui/coregui/src/main/webapp/CoreGUI.css index 6f4d6ca..393ffb4 100644 --- a/modules/enterprise/gui/coregui/src/main/webapp/CoreGUI.css +++ b/modules/enterprise/gui/coregui/src/main/webapp/CoreGUI.css @@ -372,6 +372,10 @@ a.menuBar, a.menuBar:link, a.menuBar:visited, a.menuBar:hover { color: #C22; }
+.InlineInfo { + color: #00AC3D; /* medium green */ +} + .log-panel { z-index: 9999999 !important; }
commit 917414abdf9bbe8e34844a3c92e491cd6f1711a4 Author: Thomas Segismont tsegismo@redhat.com Date: Fri Jul 26 19:29:31 2013 +0200
Bug 913764 - [as7] Version identifier of EAP resource changes depending on run state of EAP
The version was determined with a call to the http management interface. Now it's all based on file inspection
diff --git a/modules/plugins/jboss-as-7/src/main/java/org/rhq/modules/plugins/jbossas7/BaseProcessDiscovery.java b/modules/plugins/jboss-as-7/src/main/java/org/rhq/modules/plugins/jbossas7/BaseProcessDiscovery.java index cd9f276..876b1ac 100644 --- a/modules/plugins/jboss-as-7/src/main/java/org/rhq/modules/plugins/jbossas7/BaseProcessDiscovery.java +++ b/modules/plugins/jboss-as-7/src/main/java/org/rhq/modules/plugins/jbossas7/BaseProcessDiscovery.java @@ -18,6 +18,9 @@ */ package org.rhq.modules.plugins.jbossas7;
+import static org.rhq.core.util.StringUtil.arrayToString; +import static org.rhq.core.util.StringUtil.isNotBlank; + import java.io.File; import java.io.FileInputStream; import java.io.FileNotFoundException; @@ -212,20 +215,7 @@ public abstract class BaseProcessDiscovery implements ResourceDiscoveryComponent HostPort hostPort = hostConfig.getDomainControllerHostPort(commandLine); String name = buildDefaultResourceName(hostPort, managementHostPort, productType); String description = buildDefaultResourceDescription(hostPort, productType); - - String version; - String versionFromHomeDir = determineServerVersionFromHomeDir(homeDir); - if (productType == JBossProductType.AS) { - version = versionFromHomeDir; - } else { - ProductInfo productInfo = new ProductInfo(managementHostPort.host, serverPluginConfig.getUser(), - serverPluginConfig.getPassword(), managementHostPort.port); - productInfo = productInfo.getFromRemote(); - String productVersion = (productInfo.fromRemote) ? productInfo.productVersion : versionFromHomeDir; - // TODO: Grab the product version from the product info properties file, so we aren't relying on connecting - // to the server to obtain it. - version = productType.SHORT_NAME + " " + productVersion; - } + String version = getVersion(homeDir, productType);
return new DiscoveredResourceDetails(discoveryContext.getResourceType(), key, name, version, description, pluginConfig, process); @@ -605,6 +595,52 @@ public abstract class BaseProcessDiscovery implements ResourceDiscoveryComponent } }
+ private String getVersion(File homeDir, JBossProductType productType) { + // Products should have a version.txt file at root dir + File versionFile = new File(homeDir, "version.txt"); + String version = getProductVersionInFile(versionFile, " - Version ", productType); + if (version == null && productType != JBossProductType.AS && productType != JBossProductType.WILDFLY8) { + // No version.txt file. Try modules/system/layers/base/org/jboss/as/product/slot/dir/META-INF/MANIFEST.MF + String layeredProductManifestFilePath = arrayToString( + new String[] { "modules", "system", "layers", "base", "org", "jboss", "as", "product", + productType.SHORT_NAME.toLowerCase(), "dir", "META-INF", "MANIFEST.MF" }, File.separatorChar); + File productManifest = new File(homeDir, layeredProductManifestFilePath); + version = getProductVersionInFile(productManifest, "JBoss-Product-Release-Version: ", productType); + if (version == null) { + // Try modules/org/jboss/as/product/slot/dir/META-INF/MANIFEST.MF + String productManifestFilePath = arrayToString(new String[] { "modules", "org", "jboss", "as", + "product", productType.SHORT_NAME.toLowerCase(), "dir", "META-INF", "MANIFEST.MF" }, + File.separatorChar); + productManifest = new File(homeDir, productManifestFilePath); + version = getProductVersionInFile(productManifest, "JBoss-Product-Release-Version: ", productType); + } + } + if (version == null) { + // Fallback + version = determineServerVersionFromHomeDir(homeDir); + } + return version; + } + + private String getProductVersionInFile(File file, String versionPrefix, JBossProductType productType) { + if (!file.exists() || file.isDirectory()) { + return null; + } + try { + String versionLine = FileUtils.findString(file.getAbsolutePath(), versionPrefix); + if (isNotBlank(versionLine)) { + return new StringBuilder(productType.SHORT_NAME).append(" ") + .append(versionLine.substring(versionLine.lastIndexOf(versionPrefix) + versionPrefix.length())) + .toString(); + } + } catch (IOException e) { + if (log.isDebugEnabled()) { + log.debug("Could not read file " + file.getAbsolutePath(), e); + } + } + return null; + } + protected String determineServerVersionFromHomeDir(File homeDir) { String version; String homeDirName = homeDir.getName();
commit b93245a5c4087a12666baf394af64930d34acb99 Author: Stefan Negrea snegrea@redhat.com Date: Fri Aug 2 18:15:28 2013 -0500
Remove dampening from the alert definition. Dampening not needed for discreet operation executions.
diff --git a/modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java b/modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java index 2188020..840477c 100644 --- a/modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java +++ b/modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java @@ -386,10 +386,7 @@ public class AlertDefinitionServerPluginComponent implements ServerPluginCompone snapshotFailureCondition.setOption(OperationRequestStatus.FAILURE.name()); newTemplate.addCondition(snapshotFailureCondition);
- AlertDampening dampener = new AlertDampening(AlertDampening.Category.PARTIAL_COUNT); - dampener.setPeriod(15); - dampener.setPeriodUnits(TimeUnits.MINUTES); - dampener.setValue(10); + AlertDampening dampener = new AlertDampening(AlertDampening.Category.NONE); newTemplate.setAlertDampening(dampener);
int newTemplateId = alertTemplateManager.createAlertTemplate(subjectManager.getOverlord(), newTemplate,
commit f0ed14645f583dfa58cc8581b4ea847a8e8c032d Author: Stefan Negrea snegrea@redhat.com Date: Fri Aug 2 16:59:09 2013 -0500
[BZ 990245] Add an extra null check for safety for the child resource set.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index c119df0..f17f006 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -34,6 +34,7 @@ import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Queue; +import java.util.Set;
import javax.ejb.EJB; import javax.ejb.Stateless; @@ -686,8 +687,11 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN resourceIdsWithAlertDefinitions.add(resource.getId()); }
- for (Resource child : resource.getChildResources()) { - unvisitedResources.add(child); + Set<Resource> childResources = resource.getChildResources(); + if (childResources != null) { + for (Resource child : childResources) { + unvisitedResources.add(child); + } } }
commit 7c3460d3aa21ca871c3676a84b482230d7d298e8 Author: Stefan Negrea snegrea@redhat.com Date: Fri Aug 2 16:54:15 2013 -0500
Add an alert template for take snapshot operation failure for the storage service.
diff --git a/modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java b/modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java index 37d10a8..2188020 100644 --- a/modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java +++ b/modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java @@ -40,6 +40,7 @@ import org.rhq.core.domain.configuration.PropertySimple; import org.rhq.core.domain.criteria.AlertDefinitionCriteria; import org.rhq.core.domain.criteria.ResourceTypeCriteria; import org.rhq.core.domain.measurement.MeasurementDefinition; +import org.rhq.core.domain.operation.OperationRequestStatus; import org.rhq.core.domain.resource.ResourceType; import org.rhq.enterprise.server.alert.AlertDefinitionManagerLocal; import org.rhq.enterprise.server.alert.AlertTemplateManagerLocal; @@ -64,10 +65,12 @@ public class AlertDefinitionServerPluginComponent implements ServerPluginCompone private static final String DATA_DISK_USED_PERCENTAGE_METRIC_NAME = "Calculated.DataDiskUsedPercentage"; private static final String TOTAL_DISK_USED_PERCENTAGE_METRIC_NAME = "Calculated.TotalDiskUsedPercentage"; private static final String FREE_DISK_TO_DATA_SIZE_RATIO_METRIC_NAME = "Calculated.FreeDiskToDataSizeRatio"; + private static final String TAKE_SNAPSHOT_OPERATION_NAME = "takeSnapshot";
static private final List<InjectedTemplate> injectedTemplates; static private final InjectedTemplate storageNodeHighHeapTemplate; static private final InjectedTemplate storageNodeHighDiskUsageTemplate; + static private final InjectedTemplate storageNodeSnapshotFailureTemplate;
static { storageNodeHighHeapTemplate = new InjectedTemplate( @@ -82,9 +85,16 @@ public class AlertDefinitionServerPluginComponent implements ServerPluginCompone "StorageNodeHighDiskUsageTemplate", // "An alert template to notify users of excessive heap use by an RHQ Storage Node. When fired please see documentation for the proper corrective action.");
+ storageNodeSnapshotFailureTemplate = new InjectedTemplate( + "RHQStorage", // + "StorageService", // + "StorageNodeSnapshotFailureTemplate", // + "An alert template to notify users when a snapshot operations fails for an RHQ Storage Node. When fired please see documentation for the proper corrective action."); + injectedTemplates = new ArrayList<InjectedTemplate>(); injectedTemplates.add(storageNodeHighHeapTemplate); injectedTemplates.add(storageNodeHighDiskUsageTemplate); + injectedTemplates.add(storageNodeSnapshotFailureTemplate); }
private ServerPluginContext context; @@ -227,6 +237,8 @@ public class AlertDefinitionServerPluginComponent implements ServerPluginCompone newAlertDefId = injectStorageNodeHighHeapTemplate(resourceType); } else if (storageNodeHighDiskUsageTemplate.equals(injectedAlertDef)) { newAlertDefId = injectStorageNodeHighDiskUsageTemplate(resourceType); + } else if (storageNodeSnapshotFailureTemplate.equals(injectedAlertDef)) { + newAlertDefId = injectStorageNodeSnapshotFailureTemplate(resourceType); }
adc.addFilterId(newAlertDefId); @@ -356,6 +368,36 @@ public class AlertDefinitionServerPluginComponent implements ServerPluginCompone return newTemplateId; }
+ private int injectStorageNodeSnapshotFailureTemplate(ResourceType resourceType) { + AlertTemplateManagerLocal alertTemplateManager = LookupUtil.getAlertTemplateManager(); + SubjectManagerLocal subjectManager = LookupUtil.getSubjectManager(); + + AlertDefinition newTemplate = new AlertDefinition(); + newTemplate.setName(storageNodeSnapshotFailureTemplate.getName()); + newTemplate.setResourceType(resourceType); + newTemplate.setPriority(AlertPriority.MEDIUM); + newTemplate.setConditionExpression(BooleanExpression.ANY); + newTemplate.setRecoveryId(0); + newTemplate.setEnabled(true); + + AlertCondition snapshotFailureCondition = new AlertCondition(); + snapshotFailureCondition.setCategory(AlertConditionCategory.CONTROL); + snapshotFailureCondition.setName(TAKE_SNAPSHOT_OPERATION_NAME); + snapshotFailureCondition.setOption(OperationRequestStatus.FAILURE.name()); + newTemplate.addCondition(snapshotFailureCondition); + + AlertDampening dampener = new AlertDampening(AlertDampening.Category.PARTIAL_COUNT); + dampener.setPeriod(15); + dampener.setPeriodUnits(TimeUnits.MINUTES); + dampener.setValue(10); + newTemplate.setAlertDampening(dampener); + + int newTemplateId = alertTemplateManager.createAlertTemplate(subjectManager.getOverlord(), newTemplate, + resourceType.getId()); + + return newTemplateId; + } + private static class InjectedTemplate { static public final String FIELD_PLUGIN_NAME = "plugin"; static public final String FIELD_RESOURCE_TYPE_NAME = "type";
commit 431f35d8ed804bc4e6ffce8fb2612985087d1958 Author: Stefan Negrea snegrea@redhat.com Date: Thu Aug 1 17:28:44 2013 -0500
[BZ 990245] Use an attached storage node entity rather than the detached one recieved from the remote interface.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index 34e5ebd..c119df0 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -660,11 +660,16 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
@Override public Integer[] findResourcesWithAlertDefinitions(StorageNode storageNode) { - List<StorageNode> initialStorageNodes; + List<StorageNode> initialStorageNodes = getStorageNodes(); if (storageNode == null) { initialStorageNodes = getStorageNodes(); } else { - initialStorageNodes = Arrays.asList(storageNode); + int index = initialStorageNodes.indexOf(storageNode); + if (index >= 0) { + initialStorageNodes = Arrays.asList(initialStorageNodes.get(index)); + } else { + initialStorageNodes = new ArrayList<StorageNode>(); + } }
Queue<Resource> unvisitedResources = new LinkedList<Resource>();
commit 4d624b061398b84782c970bf5da587ea81ef0f7d Author: Jay Shaughnessy jshaughn@redhat.com Date: Fri Aug 2 15:25:52 2013 -0400
First authz test passing - fixed testing approach to use all slsbs and proper non-super-subject - fixed criteria bundle/bundleGroup auth token issues - fixed criteria filter override issues - fixed various bugs and added more supporting slsb methods - added some authz to bundle manager local methods where it seemed needed/useful - cleaned up RoleManagerLocal to extend the remote
diff --git a/modules/core/dbutils/pom.xml b/modules/core/dbutils/pom.xml index d7e2d65..360fdbc 100644 --- a/modules/core/dbutils/pom.xml +++ b/modules/core/dbutils/pom.xml @@ -17,7 +17,7 @@ <description>Database schema setup, upgrade and other utilities</description>
<properties> - <db.schema.version>2.134</db.schema.version> + <db.schema.version>2.135</db.schema.version> <rhq.ds.type-mapping>${rhq.test.ds.type-mapping}</rhq.ds.type-mapping> <rhq.ds.server-name>${rhq.test.ds.server-name}</rhq.ds.server-name> <rhq.ds.db-name>${rhq.test.ds.db-name}</rhq.ds.db-name> @@ -276,7 +276,7 @@ <script language="groovy"> import org.rhq.cassandra.schema.SchemaManager
- if (project.getProperty('dbsetup-upgrade') || project.getProperty('dbreset')) { + if (project.getProperty('dbsetup-upgrade') || project.getProperty('dbsetup')) { if (project.getProperty('storage-schema')) { if (project.getProperty('db') == 'dev') { self.log('PERFORMING STORAGE NODE SETUP TO LATEST SCHEMA') @@ -286,11 +286,6 @@
schemaManager = new SchemaManager(username, password, seeds)
- if (project.getProperty('dbreset') == 'true') { - self.log('Dropping schema') - schemaManager.drop() - } - self.log('Install schema') schemaManager.install() } else { diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/criteria/BundleCriteria.java b/modules/core/domain/src/main/java/org/rhq/core/domain/criteria/BundleCriteria.java index 32f2f9d..2e34174 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/criteria/BundleCriteria.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/criteria/BundleCriteria.java @@ -26,6 +26,7 @@ import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlRootElement;
import org.rhq.core.domain.bundle.Bundle; +import org.rhq.core.domain.util.CriteriaUtils; import org.rhq.core.domain.util.PageOrdering;
/** @@ -64,8 +65,9 @@ public class BundleCriteria extends TaggedCriteria { filterOverrides.put("bundleTypeId", "bundleType.id = ?"); filterOverrides.put("bundleTypeName", "bundleType.name like ?"); filterOverrides.put("bundleGroupIds", "" // - + "id IN ( SELECT bg.bundle.id " // - + " FROM BundleGroup bg " // + + "id IN ( SELECT innerbundle.id " // + + " FROM Bundle innerbundle " // + + " JOIN innerbundle.bundleGroups bg" + " WHERE bg.id IN ( ? ) )"); filterOverrides.put("destinationIds", "" // + "id IN ( SELECT bd.bundle.id " // @@ -103,15 +105,8 @@ public class BundleCriteria extends TaggedCriteria { this.filterDescription = filterDescription; }
- /** Convenience routine calls addFilterBundleGroupIds */ - public void addFilterBundleGroupId(Integer filterBundleGroupId) { - List<Integer> ids = new ArrayList<Integer>(1); - ids.add(filterBundleGroupId); - this.addFilterBundleGroupIds(ids); - } - - public void addFilterBundleGroupIds(List<Integer> filterBundleGroupIds) { - this.filterBundleGroupIds = filterBundleGroupIds; + public void addFilterBundleGroupIds(Integer... filterBundleGroupIds) { + this.filterBundleGroupIds = CriteriaUtils.getListIgnoringNulls(filterBundleGroupIds); }
/** Convenience routine calls addFilterDestinationIds */ diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/criteria/BundleGroupCriteria.java b/modules/core/domain/src/main/java/org/rhq/core/domain/criteria/BundleGroupCriteria.java index 88886d7..69ceea4 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/criteria/BundleGroupCriteria.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/criteria/BundleGroupCriteria.java @@ -18,7 +18,6 @@ */ package org.rhq.core.domain.criteria;
-import java.util.ArrayList; import java.util.List;
import javax.xml.bind.annotation.XmlAccessType; @@ -26,6 +25,7 @@ import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlRootElement;
import org.rhq.core.domain.bundle.BundleGroup; +import org.rhq.core.domain.util.CriteriaUtils; import org.rhq.core.domain.util.PageOrdering;
/** @@ -39,9 +39,7 @@ public class BundleGroupCriteria extends Criteria {
private String filterName; private String filterDescription; - private Integer filterBundleId; private List<Integer> filterBundleIds; // requires overrides - private Integer filterRoleId; private List<Integer> filterRoleIds; // requires overrides
private boolean fetchBundles; @@ -52,12 +50,14 @@ public class BundleGroupCriteria extends Criteria {
public BundleGroupCriteria() { filterOverrides.put("bundleIds", "" // - + "id IN ( SELECT b.id " // + + "id IN ( SELECT bg.id " // + " FROM Bundle b " // + + " JOIN b.bundleGroups bg" + " WHERE b.id IN ( ? ) )"); filterOverrides.put("roleIds", "" // - + "id IN ( SELECT r.id " // + + "id IN ( SELECT bg.id " // + " FROM Role r " // + + " JOIN r.bundleGroups bg" + " WHERE r.id IN ( ? ) )"); }
@@ -74,15 +74,12 @@ public class BundleGroupCriteria extends Criteria { this.filterDescription = filterDescription; }
- /** Convenience routine calls addFilterBundleVersionIds */ - public void addFilterBundleId(Integer filterBundleId) { - List<Integer> ids = new ArrayList<Integer>(1); - ids.add(filterBundleId); - this.addFilterBundleIds(ids); + public void addFilterBundleIds(Integer... filterBundleIds) { + this.filterBundleIds = CriteriaUtils.getListIgnoringNulls(filterBundleIds); }
- public void addFilterBundleIds(List<Integer> filterBundleIds) { - this.filterBundleIds = filterBundleIds; + public void addFilterRoleIds(Integer... filterRoleIds) { + this.filterRoleIds = CriteriaUtils.getListIgnoringNulls(filterRoleIds); }
public void fetchBundles(boolean fetchBundles) { diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/bundle/BundleManagerBeanTest.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/bundle/BundleManagerBeanTest.java index c4d9a79..117d2df 100644 --- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/bundle/BundleManagerBeanTest.java +++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/bundle/BundleManagerBeanTest.java @@ -39,11 +39,14 @@ import org.hibernate.LazyInitializationException; import org.testng.annotations.Test;
import org.rhq.core.domain.auth.Subject; +import org.rhq.core.domain.authz.Permission; +import org.rhq.core.domain.authz.Role; import org.rhq.core.domain.bundle.Bundle; import org.rhq.core.domain.bundle.BundleDeployment; import org.rhq.core.domain.bundle.BundleDeploymentStatus; import org.rhq.core.domain.bundle.BundleDestination; import org.rhq.core.domain.bundle.BundleFile; +import org.rhq.core.domain.bundle.BundleGroup; import org.rhq.core.domain.bundle.BundleResourceDeployment; import org.rhq.core.domain.bundle.BundleResourceDeploymentHistory; import org.rhq.core.domain.bundle.BundleType; @@ -65,8 +68,11 @@ import org.rhq.core.domain.content.Repo; import org.rhq.core.domain.criteria.BundleCriteria; import org.rhq.core.domain.criteria.BundleDeploymentCriteria; import org.rhq.core.domain.criteria.BundleFileCriteria; +import org.rhq.core.domain.criteria.BundleGroupCriteria; import org.rhq.core.domain.criteria.BundleResourceDeploymentCriteria; import org.rhq.core.domain.criteria.BundleVersionCriteria; +import org.rhq.core.domain.criteria.RoleCriteria; +import org.rhq.core.domain.criteria.SubjectCriteria; import org.rhq.core.domain.resource.Agent; import org.rhq.core.domain.resource.InventoryStatus; import org.rhq.core.domain.resource.Resource; @@ -78,6 +84,7 @@ import org.rhq.core.domain.util.PageOrdering; import org.rhq.core.util.file.FileUtil; import org.rhq.core.util.stream.StreamUtil; import org.rhq.core.util.updater.DeploymentProperties; +import org.rhq.enterprise.server.authz.PermissionException; import org.rhq.enterprise.server.plugin.pc.MasterServerPluginContainer; import org.rhq.enterprise.server.resource.ResourceManagerLocal; import org.rhq.enterprise.server.test.AbstractEJB3Test; @@ -98,10 +105,13 @@ public class BundleManagerBeanTest extends AbstractEJB3Test {
private static final boolean TESTS_ENABLED = true;
- private static final String TEST_PREFIX = "bundletest"; + private static final String TEST_PREFIX = BundleManagerBeanTest.class.getSimpleName(); private static final String TEST_BUNDLE_DESTBASEDIR_PROP = TEST_PREFIX + ".destBaseDirProp"; private static final String TEST_BUNDLE_DESTBASEDIR_PROP_VALUE = TEST_PREFIX + "/destBaseDir"; + private static final String TEST_BUNDLE_GROUP_NAME = TEST_PREFIX + ".bundleGroup"; private static final String TEST_DESTBASEDIR_NAME = TEST_PREFIX + ".destBaseDirName"; + private static final String TEST_ROLE_NAME = TEST_PREFIX + ".role"; + private static final String TEST_USER_NAME = TEST_PREFIX + ".user";
private BundleManagerLocal bundleManager; private ResourceManagerLocal resourceManager; @@ -143,6 +153,21 @@ public class BundleManagerBeanTest extends AbstractEJB3Test {
private void cleanupDatabase() { try { + RoleCriteria roleCriteria = new RoleCriteria(); + roleCriteria.addFilterName(TEST_ROLE_NAME); + List<Role> testRoles = LookupUtil.getRoleManager().findRolesByCriteria(overlord, roleCriteria); + for (Role testRole : testRoles) { + LookupUtil.getRoleManager().deleteRoles(overlord, new int[] { testRole.getId() }); + } + + SubjectCriteria subjectCriteria = new SubjectCriteria(); + subjectCriteria.addFilterName(TEST_USER_NAME); + List<Subject> testSubjects = LookupUtil.getSubjectManager().findSubjectsByCriteria(overlord, + subjectCriteria); + for (Subject testSubject : testSubjects) { + LookupUtil.getSubjectManager().deleteSubjects(overlord, new int[] { testSubject.getId() }); + } + getTransactionManager().begin();
Query q; @@ -232,6 +257,13 @@ public class BundleManagerBeanTest extends AbstractEJB3Test { em.remove(em.getReference(Repo.class, ((Repo) removeMe).getId())); }
+ // remove bundle groups no longer referenced by bundles + q = em.createQuery("SELECT bg FROM BundleGroup bg WHERE bg.name LIKE '" + TEST_PREFIX + "%'"); + doomed = q.getResultList(); + for (Object removeMe : doomed) { + em.remove(em.getReference(BundleGroup.class, ((BundleGroup) removeMe).getId())); + } + // remove Resource Groups left over from test deployments freeing up test resources q = em.createQuery("SELECT rg FROM ResourceGroup rg WHERE rg.name LIKE '" + TEST_PREFIX + "%'"); doomed = q.getResultList(); @@ -755,7 +787,7 @@ public class BundleManagerBeanTest extends AbstractEJB3Test { public void testAddBundleFilesToDifferentBundles() throws Exception { // create a bundle type to use for both bundles. BundleType bt = createBundleType("one"); - Bundle b1 = createBundle("one", bt); + Bundle b1 = createBundle(overlord, "one", bt, 0); assertNotNull(b1); BundleVersion bv1 = createBundleVersion(b1.getName(), "1.0", b1); assertNotNull(bv1); @@ -763,7 +795,7 @@ public class BundleManagerBeanTest extends AbstractEJB3Test { null, "Bundle #1 File # 1".getBytes());
// create a second bundle but create file of the same name as above - Bundle b2 = createBundle("two", bt); + Bundle b2 = createBundle(overlord, "two", bt, 0); assertNotNull(b2); BundleVersion bv2 = createBundleVersion(b2.getName(), "1.0", b2); assertNotNull(bv2); @@ -860,9 +892,10 @@ public class BundleManagerBeanTest extends AbstractEJB3Test { int size = brd.getBundleResourceDeploymentHistories().size(); assertTrue(size > 0); String auditMessage = "BundleTest-Message"; - bundleManager.addBundleResourceDeploymentHistoryInNewTrans(overlord, brd.getId(), new BundleResourceDeploymentHistory( - overlord.getName(), auditMessage, auditMessage, BundleResourceDeploymentHistory.Category.DEPLOY_STEP, - BundleResourceDeploymentHistory.Status.SUCCESS, auditMessage, auditMessage)); + bundleManager.addBundleResourceDeploymentHistoryInNewTrans(overlord, brd.getId(), + new BundleResourceDeploymentHistory(overlord.getName(), auditMessage, auditMessage, + BundleResourceDeploymentHistory.Category.DEPLOY_STEP, BundleResourceDeploymentHistory.Status.SUCCESS, + auditMessage, auditMessage));
brds = bundleManager.findBundleResourceDeploymentsByCriteria(overlord, c); assertEquals(1, brds.size()); @@ -1284,31 +1317,253 @@ public class BundleManagerBeanTest extends AbstractEJB3Test { assertEquals(1, bundles.size()); }
- @Test(enabled = false) - public void testNoAuthz() throws Exception { - // create + @Test(enabled = TESTS_ENABLED) + public void authzBundleGroupTest() throws Exception { + Subject subject = null; + Role role = null; + + subject = createNewSubject(TEST_USER_NAME); + role = createNewRoleForSubject(subject, TEST_ROLE_NAME); + + subject = createSession(subject); // start a session so we can use this subject in SLSB calls + + // deny bundle group create + try { + bundleManager.createBundleGroup(subject, TEST_BUNDLE_GROUP_NAME, "test"); + fail("Should have thrown PermissionException"); + } catch (PermissionException e) { + // expected + } + + // allow bundle group create + addRolePermissions(role, Permission.MANAGE_BUNDLE_GROUPS); + BundleGroup bundleGroup = bundleManager.createBundleGroup(subject, TEST_BUNDLE_GROUP_NAME, "test"); + + // deny bundle group delete + removeRolePermissions(role, Permission.MANAGE_BUNDLE_GROUPS); + try { + bundleManager.deleteBundleGroups(subject, new int[] { bundleGroup.getId() }); + fail("Should have thrown PermissionException"); + } catch (PermissionException e) { + // expected + } + + // deny global perm bundleGroup view + BundleGroupCriteria bgCriteria = new BundleGroupCriteria(); + List<BundleGroup> bundleGroups = bundleManager.findBundleGroupsByCriteria(subject, bgCriteria); + assertNotNull(bundleGroups); + assert bundleGroups.isEmpty() : "Should not be able to see unassociated bundle group"; + + // allow global perm bundleGroup view + addRolePermissions(role, Permission.MANAGE_BUNDLE_GROUPS); + bundleGroups = bundleManager.findBundleGroupsByCriteria(subject, bgCriteria); + assertNotNull(bundleGroups); + assertEquals("Should be able to see unassociated bundle group", 1, bundleGroups.size()); + + // allow bundle group delete + bundleManager.deleteBundleGroups(subject, new int[] { bundleGroup.getId() }); + + // deny unassigned bundle create (no global create or view) + try { + createBundle(subject, TEST_PREFIX + ".bundle"); + fail("Should have thrown PermissionException"); + } catch (PermissionException e) { + // expected + } + + // deny unassigned bundle create (no global view) + addRolePermissions(role, Permission.CREATE_BUNDLES); + try { + createBundle(subject, TEST_PREFIX + ".bundle"); + fail("Should have thrown PermissionException"); + } catch (PermissionException e) { + // expected + } + + // deny unassigned bundle create (no global create) + removeRolePermissions(role, Permission.CREATE_BUNDLES); + addRolePermissions(role, Permission.VIEW_BUNDLES); + try { + createBundle(subject, TEST_PREFIX + ".bundle"); + fail("Should have thrown PermissionException"); + } catch (PermissionException e) { + // expected + } + + // allow unassigned bundle create + addRolePermissions(role, Permission.CREATE_BUNDLES); + Bundle bundle = createBundle(subject, TEST_PREFIX + ".bundle"); + + // deny unassigned bundle view + removeRolePermissions(role, Permission.CREATE_BUNDLES, Permission.VIEW_BUNDLES); + BundleCriteria bCriteria = new BundleCriteria(); + List<Bundle> bundles = bundleManager.findBundlesByCriteria(subject, bCriteria); + assertNotNull(bundles); + assert bundles.isEmpty() : "Should not be able to see unassigned bundle"; + + // allow unassigned bundle view + addRolePermissions(role, Permission.VIEW_BUNDLES); + bundles = bundleManager.findBundlesByCriteria(subject, bCriteria); + assertNotNull(bundles); + assertEquals("Should be able to see unassigned bundle", 1, bundles.size()); + + // deny global perm bundle assign + bundleGroup = bundleManager.createBundleGroup(subject, TEST_BUNDLE_GROUP_NAME, "test"); + try { + bundleManager.assignBundlesToBundleGroup(subject, bundleGroup.getId(), new int[] { bundle.getId() }); + fail("Should have thrown PermissionException"); + } catch (PermissionException e) { + // expected + } + + // allow global perm bundle assign + addRolePermissions(role, Permission.CREATE_BUNDLES); + bundleManager.assignBundlesToBundleGroup(subject, bundleGroup.getId(), new int[] { bundle.getId() }); + + // deny assigned, unassociated-bundle-group bundle view + removeRolePermissions(role, Permission.CREATE_BUNDLES, Permission.VIEW_BUNDLES); + bundles = bundleManager.findBundlesByCriteria(subject, bCriteria); + assertNotNull(bundles); + assert bundles.isEmpty() : "Should not be able to see assigned bundle"; + + // allow assigned, associated-bundle-group bundle view + addRoleBundleGroup(role, bundleGroup); + bundles = bundleManager.findBundlesByCriteria(subject, bCriteria); + assertNotNull(bundles); + assertEquals("Should be able to see assigned bundle", 1, bundles.size()); + + // check new bundle criteria options (no match) + bCriteria.addFilterBundleGroupIds(87678); + bCriteria.fetchBundleGroups(true); + bundles = bundleManager.findBundlesByCriteria(subject, bCriteria); + assertNotNull(bundles); + assert bundles.isEmpty() : "Should not have found anything"; + + // check new bundle criteria options (match) + bCriteria.addFilterBundleGroupIds(bundleGroup.getId()); + bCriteria.fetchBundleGroups(true); + bundles = bundleManager.findBundlesByCriteria(subject, bCriteria); + assertNotNull(bundles); + assertEquals("Should be able to see assigned bundle", 1, bundles.size()); + assertNotNull(bundles.get(0).getBundleGroups()); + assertEquals("Should have fetched bundlegroup", 1, bundles.get(0).getBundleGroups().size()); + assertEquals("Should have fetched expected bundlegroup", bundleGroup, bundles.get(0).getBundleGroups() + .iterator().next()); + + // check new bundle group criteria options (no match) + bgCriteria.addFilterId(87678); + bgCriteria.addFilterBundleIds(87678); + bgCriteria.addFilterRoleIds(87678); + bgCriteria.fetchBundles(true); + bgCriteria.fetchRoles(true); + bundleGroups = bundleManager.findBundleGroupsByCriteria(subject, bgCriteria); + assertNotNull(bundleGroups); + assert bundleGroups.isEmpty() : "Should not have found anything"; + + // check new bundle group criteria options (no match) + bgCriteria.addFilterId(bundleGroup.getId()); + bundleGroups = bundleManager.findBundleGroupsByCriteria(subject, bgCriteria); + assertNotNull(bundleGroups); + assert bundleGroups.isEmpty() : "Should not have found anything"; + + // check new bundle group criteria options (no match) + bgCriteria.addFilterBundleIds(bundle.getId()); + bundleGroups = bundleManager.findBundleGroupsByCriteria(subject, bgCriteria); + assertNotNull(bundleGroups); + assert bundleGroups.isEmpty() : "Should not have found anything"; + + // check new bundle group criteria options (match) + bgCriteria.addFilterRoleIds(role.getId()); + bundleGroups = bundleManager.findBundleGroupsByCriteria(subject, bgCriteria); + assertNotNull(bundleGroups); + assertEquals("Should be able to see assigned bundle", 1, bundleGroups.size()); + assertNotNull(bundleGroups.get(0).getBundles()); + assertEquals("Should have fetched bundle in bundle group", 1, bundleGroups.get(0).getBundles().size()); + assertEquals("Should have fetched bundle in bundle group", bundle, bundleGroups.get(0).getBundles() + .iterator().next()); + assertNotNull(bundleGroups.get(0).getRoles()); + assertEquals("Should have fetched role for bundle group", 1, bundleGroups.get(0).getRoles().size()); + assertEquals("Should have fetched role for bundle group", role, bundleGroups.get(0).getRoles().iterator() + .next()); + } + + private Subject createNewSubject(String subjectName) throws Exception { + + Subject newSubject = new Subject(); + newSubject.setName(subjectName); + newSubject.setFactive(true); + newSubject.setFsystem(false); + + return LookupUtil.getSubjectManager().createSubject(overlord, newSubject); + } + + private Role createNewRoleForSubject(Subject subject, String roleName) throws Exception { + Role newRole = new Role(roleName); + newRole.setFsystem(false); + newRole.addSubject(subject);
+ return LookupUtil.getRoleManager().createRole(overlord, newRole); + } + + private void addRolePermissions(Role role, Permission... permissions) throws Exception { + + for (Permission p : permissions) { + role.getPermissions().add(p); + } + LookupUtil.getRoleManager().setPermissions(overlord, role.getId(), role.getPermissions()); + } + + private void removeRolePermissions(Role role, Permission... permissions) throws Exception { + + for (Permission p : permissions) { + role.getPermissions().remove(p); + } + LookupUtil.getRoleManager().setPermissions(overlord, role.getId(), role.getPermissions()); + } + + private void addRoleBundleGroup(Role role, BundleGroup bundleGroup) throws Exception { + + int[] ids = new int[1]; + ids[0] = bundleGroup.getId(); + LookupUtil.getRoleManager().addBundleGroupsToRole(overlord, role.getId(), ids); + } + + private void removeRoleBundleGroup(Role role, BundleGroup bundleGroup) throws Exception { + + int[] ids = new int[1]; + ids[0] = bundleGroup.getId(); + LookupUtil.getRoleManager().removeBundleGroupsFromRole(overlord, role.getId(), ids); }
// helper methods private BundleType createBundleType(String name) throws Exception { final String fullName = TEST_PREFIX + "-type-" + name; - ResourceType rt = createResourceTypeForBundleType(name); - BundleType bt = bundleManager.createBundleType(overlord, fullName, rt.getId()); + BundleType bt = null; + try { + bt = bundleManager.getBundleType(overlord, fullName); + } catch (Throwable t) { + ResourceType rt = createResourceTypeForBundleType(name); + bt = bundleManager.createBundleType(overlord, fullName, rt.getId()); + + assert bt.getId() > 0; + assert bt.getName().endsWith(fullName); + }
- assert bt.getId() > 0; - assert bt.getName().endsWith(fullName); return bt; }
private Bundle createBundle(String name) throws Exception { + return createBundle(overlord, name); + } + + private Bundle createBundle(Subject subject, String name) throws Exception { BundleType bt = createBundleType(name); - return createBundle(name, bt); + return createBundle(subject, name, bt, 0); }
- private Bundle createBundle(String name, BundleType bt) throws Exception { + private Bundle createBundle(Subject subject, String name, BundleType bt, int bundleGroupId) throws Exception { final String fullName = TEST_PREFIX + "-bundle-" + name; - Bundle b = bundleManager.createBundle(overlord, fullName, fullName + "-desc", bt.getId(), 0); + Bundle b = bundleManager.createBundle(subject, fullName, fullName + "-desc", bt.getId(), bundleGroupId);
assert b.getId() > 0; assert b.getName().endsWith(fullName); @@ -1316,6 +1571,11 @@ public class BundleManagerBeanTest extends AbstractEJB3Test { }
private BundleVersion createBundleVersion(String name, String version, Bundle bundle) throws Exception { + return createBundleVersion(overlord, name, version, bundle); + } + + private BundleVersion createBundleVersion(Subject subject, String name, String version, Bundle bundle) + throws Exception { final String fullName = TEST_PREFIX + "-bundleversion-" + version + "-" + name; final String recipe = "deploy -f " + TEST_PREFIX + ".zip -d @@ test.path @@"; BundleVersion bv = bundleManager.createBundleVersion(overlord, bundle.getId(), fullName, fullName + "-desc", diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/util/SessionTestHelper.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/util/SessionTestHelper.java index 10b5dbc..b85408d 100644 --- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/util/SessionTestHelper.java +++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/util/SessionTestHelper.java @@ -19,6 +19,7 @@ package org.rhq.enterprise.server.util;
import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.EnumSet; import java.util.Random; @@ -137,6 +138,18 @@ public class SessionTestHelper { return newRole; }
+ public static void addRolePermissions(EntityManager em, Role role, Permission... permissions) { + role.getPermissions().addAll(Arrays.asList(permissions)); + em.merge(role); + em.flush(); + } + + public static void removeRolePermissions(EntityManager em, Role role, Permission... permissions) { + role.getPermissions().removeAll(Arrays.asList(permissions)); + em.merge(role); + em.flush(); + } + public static ResourceType createNewResourceType(EntityManager em) { ResourceType type = new ResourceType(preprocess("testType"), "testPlugin", ResourceCategory.PLATFORM, null); ConfigurationDefinition resourceConfigDef = new ConfigurationDefinition("Fake def", diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/authz/RoleManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/authz/RoleManagerBean.java index af4c81f..aeaf597 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/authz/RoleManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/authz/RoleManagerBean.java @@ -39,6 +39,7 @@ import org.apache.commons.logging.LogFactory; import org.rhq.core.domain.auth.Subject; import org.rhq.core.domain.authz.Permission; import org.rhq.core.domain.authz.Role; +import org.rhq.core.domain.bundle.BundleGroup; import org.rhq.core.domain.criteria.RoleCriteria; import org.rhq.core.domain.resource.group.LdapGroup; import org.rhq.core.domain.resource.group.ResourceGroup; @@ -88,6 +89,7 @@ public class RoleManagerBean implements RoleManagerLocal, RoleManagerRemote { /** * @see org.rhq.enterprise.server.authz.RoleManagerLocal#findRolesBySubject(int subjectId,PageControl pageControl) */ + @Override @SuppressWarnings("unchecked") // the first param, subject, is not the subject making the request, its the subject whose roles are to be returned. // therefore, we won't want our security interceptor to check this method since the subject won't have a session associated with it @@ -109,6 +111,7 @@ public class RoleManagerBean implements RoleManagerLocal, RoleManagerRemote { /** * @see org.rhq.enterprise.server.authz.RoleManagerLocal#findRoles(PageControl) */ + @Override @SuppressWarnings("unchecked") public PageList<Role> findRoles(PageControl pc) { pc.initDefaultOrderingField("r.name"); @@ -135,6 +138,7 @@ public class RoleManagerBean implements RoleManagerLocal, RoleManagerRemote { /** * @see org.rhq.enterprise.server.authz.RoleManagerLocal#createRole(Subject, Role) */ + @Override @RequiredPermission(Permission.MANAGE_SECURITY) public Role createRole(Subject whoami, Role newRole) { // Make sure there's not an existing role with the same name. @@ -182,6 +186,7 @@ public class RoleManagerBean implements RoleManagerLocal, RoleManagerRemote { /** * @see org.rhq.enterprise.server.authz.RoleManagerLocal#deleteRoles(Subject, int[]) */ + @Override @RequiredPermission(Permission.MANAGE_SECURITY) public void deleteRoles(Subject subject, int[] doomedRoleIds) { if (doomedRoleIds != null) { @@ -202,6 +207,13 @@ public class RoleManagerBean implements RoleManagerLocal, RoleManagerRemote { entityManager.merge(doomedResourceGroupRelationship); }
+ //remove attached Bundle Groups + Set<BundleGroup> bundleGroupsToUnhook = new HashSet<BundleGroup>(doomedRole.getBundleGroups()); // avoid concurrent mod exception + for (BundleGroup doomedBundleGroupRelationship : bundleGroupsToUnhook) { + doomedRole.removeBundleGroup(doomedBundleGroupRelationship); + entityManager.merge(doomedBundleGroupRelationship); + } + //remove attached LDAP Subjects Set<Subject> ldapSubjectsToUnhook = new HashSet<Subject>(doomedRole.getLdapSubjects()); // avoid concurrent mod exception for (Subject doomedLdapSubjectRelationship : ldapSubjectsToUnhook) { @@ -232,6 +244,7 @@ public class RoleManagerBean implements RoleManagerLocal, RoleManagerRemote { /** * @see org.rhq.enterprise.server.authz.RoleManagerLocal#addRolesToSubject(Subject, int, int[]) */ + @Override @RequiredPermission(Permission.MANAGE_SECURITY) public void addRolesToSubject(Subject subject, int subjectId, int[] roleIds) { addRolesToSubject(subject, subjectId, roleIds, false); @@ -272,6 +285,7 @@ public class RoleManagerBean implements RoleManagerLocal, RoleManagerRemote { /** * @see org.rhq.enterprise.server.authz.RoleManagerLocal#addSubjectsToRole(Subject, int, int[]) */ + @Override @RequiredPermission(Permission.MANAGE_SECURITY) public void addSubjectsToRole(Subject subject, int roleId, int[] subjectIds) { if (subjectIds != null) { @@ -303,6 +317,7 @@ public class RoleManagerBean implements RoleManagerLocal, RoleManagerRemote { /** * @see org.rhq.enterprise.server.authz.RoleManagerLocal#removeRolesFromSubject(Subject, int, int[]) */ + @Override @RequiredPermission(Permission.MANAGE_SECURITY) public void removeRolesFromSubject(Subject subject, int subjectId, int[] roleIds) { if (roleIds != null) { @@ -324,6 +339,7 @@ public class RoleManagerBean implements RoleManagerLocal, RoleManagerRemote { return; }
+ @Override @RequiredPermission(Permission.MANAGE_SECURITY) public void setAssignedSubjectRoles(Subject subject, int subjectId, int[] roleIds) {
@@ -359,6 +375,7 @@ public class RoleManagerBean implements RoleManagerLocal, RoleManagerRemote { /** * @see org.rhq.enterprise.server.authz.RoleManagerLocal#getRoleById(Integer) */ + @Override public Role getRoleById(Integer roleId) { Role role = entityManager.find(Role.class, roleId); return role; @@ -367,6 +384,7 @@ public class RoleManagerBean implements RoleManagerLocal, RoleManagerRemote { /** * @see org.rhq.enterprise.server.authz.RoleManagerLocal#setPermissions(Subject, Integer, Set) */ + @Override @RequiredPermission(Permission.MANAGE_SECURITY) public void setPermissions(Subject subject, Integer roleId, Set<Permission> permissions) { Role role = entityManager.find(Role.class, roleId); @@ -381,6 +399,7 @@ public class RoleManagerBean implements RoleManagerLocal, RoleManagerRemote { /** * @see org.rhq.enterprise.server.authz.RoleManagerLocal#getPermissions(Integer) */ + @Override public Set<Permission> getPermissions(Integer roleId) { Role role = entityManager.find(Role.class, roleId); Set<Permission> rolePermissions = role.getPermissions(); @@ -390,6 +409,7 @@ public class RoleManagerBean implements RoleManagerLocal, RoleManagerRemote { /** * @see org.rhq.enterprise.server.authz.RoleManagerLocal#updateRole(Subject, Role) */ + @Override @RequiredPermission(Permission.MANAGE_SECURITY) public Role updateRole(Subject whoami, Role role) { Role attachedRole = entityManager.find(Role.class, role.getId()); @@ -481,6 +501,7 @@ public class RoleManagerBean implements RoleManagerLocal, RoleManagerRemote { /** * @see org.rhq.enterprise.server.authz.RoleManagerLocal#findSubjectsByRole(Integer,PageControl) */ + @Override @SuppressWarnings("unchecked") public PageList<Subject> findSubjectsByRole(Integer roleId, PageControl pc) { pc.initDefaultOrderingField("s.name"); @@ -501,6 +522,7 @@ public class RoleManagerBean implements RoleManagerLocal, RoleManagerRemote { /** * @see org.rhq.enterprise.server.authz.RoleManagerLocal#findRolesByIds(Integer[],PageControl) */ + @Override @SuppressWarnings("unchecked") public PageList<Role> findRolesByIds(Integer[] roleIds, PageControl pc) { if ((roleIds == null) || (roleIds.length == 0)) { @@ -528,6 +550,7 @@ public class RoleManagerBean implements RoleManagerLocal, RoleManagerRemote { return new PageList<Role>(roles, (int) count, pc); }
+ @Override @RequiredPermission(Permission.MANAGE_SECURITY) @SuppressWarnings("unchecked") public PageList<Role> findAvailableRolesForSubject(Subject subject, Integer subjectId, Integer[] pendingRoleIds, @@ -565,14 +588,39 @@ public class RoleManagerBean implements RoleManagerLocal, RoleManagerRemote { return new PageList<Role>(roles, (int) count, pc); }
+ @Override @RequiredPermission(Permission.MANAGE_SECURITY) public PageList<Role> findSubjectUnassignedRoles(Subject subject, int subjectId, PageControl pc) { return findAvailableRolesForSubject(subject, subjectId, null, pc); }
+ @Override + @RequiredPermission(Permission.MANAGE_SECURITY) + public void addBundleGroupsToRole(Subject subject, int roleId, int[] bundleGroupIds) { + if ((bundleGroupIds != null) && (bundleGroupIds.length > 0)) { + Role role = entityManager.find(Role.class, roleId); + if (role == null) { + throw new IllegalArgumentException("Could not find role[" + roleId + "] in order to add resourceGroups"); + } + role.getBundleGroups().size(); // load them in + + for (Integer bundleGroupId : bundleGroupIds) { + BundleGroup bundleGroup = entityManager.find(BundleGroup.class, bundleGroupId); + if (bundleGroup == null) { + throw new IllegalArgumentException("Tried to add BundleGroup[" + bundleGroupId + "] to role[" + + roleId + "], but bundleGroup was not found."); + } + role.addBundleGroup(bundleGroup); + } + } + + return; + } + /** * @see org.rhq.enterprise.server.authz.RoleManagerLocal#addResourceGroupsToRole(Subject, int, int[]) */ + @Override @RequiredPermission(Permission.MANAGE_SECURITY) public void addResourceGroupsToRole(Subject subject, int roleId, int[] groupIds) { if ((groupIds != null) && (groupIds.length > 0)) { @@ -595,9 +643,32 @@ public class RoleManagerBean implements RoleManagerLocal, RoleManagerRemote { return; }
+ @Override + @RequiredPermission(Permission.MANAGE_SECURITY) + public void removeBundleGroupsFromRole(Subject subject, int roleId, int[] bundleGroupIds) { + if ((bundleGroupIds != null) && (bundleGroupIds.length > 0)) { + Role role = entityManager.find(Role.class, roleId); + if (role == null) { + throw new IllegalArgumentException("Could not find role[" + roleId + + "] in order to remove BundleGroups"); + } + role.getBundleGroups().size(); // load them in + + for (Integer bundleGroupId : bundleGroupIds) { + BundleGroup bundleGroup = entityManager.find(BundleGroup.class, bundleGroupId); + if (bundleGroup == null) { + throw new IllegalArgumentException("Tried to remove BundleGroup[" + bundleGroupId + "] from role[" + + roleId + "], but BundleGroup was not found"); + } + role.removeBundleGroup(bundleGroup); + } + } + } + /** * @see org.rhq.enterprise.server.authz.RoleManagerLocal#removeResourceGroupsFromRole(Subject, int, int[]) */ + @Override @RequiredPermission(Permission.MANAGE_SECURITY) public void removeResourceGroupsFromRole(Subject subject, int roleId, int[] groupIds) { if ((groupIds != null) && (groupIds.length > 0)) { @@ -618,6 +689,27 @@ public class RoleManagerBean implements RoleManagerLocal, RoleManagerRemote { } }
+ @Override + @RequiredPermission(Permission.MANAGE_SECURITY) + public void setAssignedBundleGroups(Subject subject, int roleId, int[] bundleGroupIds) { + Role role = getRole(subject, roleId); + List<Integer> currentBundleGroups = new ArrayList<Integer>(); + for (BundleGroup group : role.getBundleGroups()) { + currentBundleGroups.add(group.getId()); + } + + List<Integer> newBundleGroups = ArrayUtils.wrapInList(bundleGroupIds); // members needing addition + newBundleGroups.removeAll(currentBundleGroups); + int[] newBundleGroupIds = ArrayUtils.unwrapCollection(newBundleGroups); + roleManager.addBundleGroupsToRole(subject, roleId, newBundleGroupIds); + + List<Integer> removedBundleGroups = new ArrayList<Integer>(currentBundleGroups); // members needing removal + removedBundleGroups.removeAll(ArrayUtils.wrapInList(bundleGroupIds)); + int[] removedGroupIds = ArrayUtils.unwrapCollection(removedBundleGroups); + roleManager.removeBundleGroupsFromRole(subject, roleId, removedGroupIds); + } + + @Override @RequiredPermission(Permission.MANAGE_SECURITY) public void setAssignedResourceGroups(Subject subject, int roleId, int[] groupIds) { Role role = getRole(subject, roleId); @@ -668,11 +760,13 @@ public class RoleManagerBean implements RoleManagerLocal, RoleManagerRemote { } }
+ @Override public PageList<Role> findSubjectAssignedRoles(Subject subject, int subjectId, PageControl pc) { PageList<Role> assignedRoles = findRolesBySubject(subjectId, pc); return assignedRoles; }
+ @Override @RequiredPermission(Permission.MANAGE_SECURITY) public void removeSubjectsFromRole(Subject subject, int roleId, int[] subjectIds) { if ((subjectIds != null) && (subjectIds.length > 0)) { @@ -697,6 +791,7 @@ public class RoleManagerBean implements RoleManagerLocal, RoleManagerRemote { } }
+ @Override @RequiredPermission(Permission.MANAGE_SECURITY) public void setAssignedSubjects(Subject subject, int roleId, int[] subjectIds) {
@@ -729,6 +824,31 @@ public class RoleManagerBean implements RoleManagerLocal, RoleManagerRemote { } }
+ @Override + @RequiredPermission(Permission.MANAGE_SECURITY) + public void removeRolesFromBundleGroup(Subject subject, int bundleGroupId, int[] roleIds) { + if ((roleIds != null) && (roleIds.length > 0)) { + BundleGroup bundleGroup = entityManager.find(BundleGroup.class, bundleGroupId); + if (bundleGroup == null) { + throw new IllegalArgumentException("Could not find BundleGroup[" + bundleGroupId + + "] in order to remove roles"); + } + bundleGroup.getRoles().size(); // load them in + + for (Integer roleId : roleIds) { + Role doomedRole = entityManager.find(Role.class, roleId); + if (doomedRole == null) { + throw new IllegalArgumentException("Tried to remove role[" + roleId + "] from BundleGroup[" + + bundleGroupId + "], but role was not found"); + } + bundleGroup.removeRole(doomedRole); + } + } + + return; + } + + @Override @RequiredPermission(Permission.MANAGE_SECURITY) public void removeRolesFromResourceGroup(Subject subject, int groupId, int[] roleIds) { if ((roleIds != null) && (roleIds.length > 0)) { @@ -751,10 +871,36 @@ public class RoleManagerBean implements RoleManagerLocal, RoleManagerRemote { return; }
+ @Override public Role getRole(Subject subject, int roleId) { return entityManager.find(Role.class, roleId); }
+ @Override + @RequiredPermission(Permission.MANAGE_SECURITY) + public void addRolesToBundleGroup(Subject subject, int bundleGroupId, int[] roleIds) { + if ((roleIds != null) && (roleIds.length > 0)) { + BundleGroup bundleGroup = entityManager.find(BundleGroup.class, bundleGroupId); + if (bundleGroup == null) { + throw new IllegalArgumentException("Could not find bundleGroup[" + bundleGroupId + + "] in order to add roles"); + } + bundleGroup.getRoles().size(); // load them in + + for (Integer roleId : roleIds) { + Role role = entityManager.find(Role.class, roleId); + if (role == null) { + throw new IllegalArgumentException("Tried to add role[" + roleId + "] to bundleGroup[" + + bundleGroupId + "], but role was not found"); + } + bundleGroup.addRole(role); + } + } + + return; + } + + @Override @RequiredPermission(Permission.MANAGE_SECURITY) public void addRolesToResourceGroup(Subject subject, int groupId, int[] roleIds) { if ((roleIds != null) && (roleIds.length > 0)) { @@ -777,6 +923,7 @@ public class RoleManagerBean implements RoleManagerLocal, RoleManagerRemote { return; }
+ @Override @SuppressWarnings("unchecked") public PageList<Role> findRolesByCriteria(Subject subject, RoleCriteria criteria) {
@@ -788,7 +935,7 @@ public class RoleManagerBean implements RoleManagerLocal, RoleManagerRemote {
CriteriaQueryGenerator generator = new CriteriaQueryGenerator(subject, criteria); CriteriaQueryRunner<Role> queryRunner = new CriteriaQueryRunner<Role>(criteria, generator, entityManager); - @SuppressWarnings({ "UnnecessaryLocalVariable" }) + PageList<Role> roles = queryRunner.execute();
return roles; diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/authz/RoleManagerLocal.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/authz/RoleManagerLocal.java index 5c2e1cb..d099f7c 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/authz/RoleManagerLocal.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/authz/RoleManagerLocal.java @@ -25,7 +25,6 @@ import javax.ejb.Local; import org.rhq.core.domain.auth.Subject; import org.rhq.core.domain.authz.Permission; import org.rhq.core.domain.authz.Role; -import org.rhq.core.domain.criteria.RoleCriteria; import org.rhq.core.domain.util.PageControl; import org.rhq.core.domain.util.PageList;
@@ -36,7 +35,7 @@ import org.rhq.core.domain.util.PageList; * @author John Mazzitelli */ @Local -public interface RoleManagerLocal { +public interface RoleManagerLocal extends RoleManagerRemote { /** * This returns a page list of all the roles that a subject is authorized to access. * @@ -57,26 +56,6 @@ public interface RoleManagerLocal { PageList<Role> findRoles(PageControl pc);
/** - * Persists the new role to the database. The subjects assigned to the role are ignored - this only creates the role - * entity with 0 subjects initially assigned to it. - * - * @param subject the user attempting to create the role - * @param newRole the new role to persist - * - * @return the persisted role with the primary key populated - */ - Role createRole(Subject subject, Role newRole); - - /** - * Removes a set of roles from the database. The subjects assigned to the roles are no longer authorized with the - * deleted roles. Groups attached to the deleted roles are left alone. - * - * @param subject the user attempting to delete the role - * @param doomedRoleIds the IDs of the roles to delete - */ - void deleteRoles(Subject subject, int[] doomedRoleIds); - - /** * Sets the permissions for the specified role. Any currently existing role permissions are overwritten - that is, * <code>permissions</code> will be the complete set of permissions the role will now be authorized with. * @@ -96,16 +75,6 @@ public interface RoleManagerLocal { Set<Permission> getPermissions(Integer roleId);
/** - * Updates the given role, excluding the subjects and groups. This updates permissions, name, description, etc. - * - * @param subject user asking to update the role - * @param role - * - * @return the updated role - */ - Role updateRole(Subject subject, Role role); - - /** * Given a set of role Ids, this returns a list of all the roles. * * @param roleIds @@ -140,12 +109,6 @@ public interface RoleManagerLocal { PageList<Role> findAvailableRolesForSubject(Subject subject, Integer subjectId, Integer[] pendingRoleIds, PageControl pc);
- // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - // - // The following are shared with the Remote Interface - // - // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - /** * Returns the role with the given ID * @@ -157,64 +120,5 @@ public interface RoleManagerLocal { // Use getRole instead Role getRoleById(Integer roleId);
- Role getRole(Subject subject, int roleId); - - PageList<Role> findSubjectAssignedRoles(Subject subject, int subjectId, PageControl pc); - - //This is a proxy of getAvailableRolesForSubject but without pendingRoleIds as required by remote spec - PageList<Role> findSubjectUnassignedRoles(Subject subject, int subjectId, PageControl pc); - - /** - * Assigns a set of roles to a subject which authorizes the subject to do anything the roles permit. - * - * @param subject the user attempting to assign the roles to the subject - * @param subjectId the subject who is to be authorized with the given roles - * @param roleIds the roles to assign - */ - void addRolesToSubject(Subject subject, int subjectId, int[] roleIds); - - /** - * Disassociates particular roles from a subject. Once complete, the subject will no longer be authorized with the - * given roles. - * - * @param subject the user that is attempting to perform the remove - * @param subjectId the user that is to have the roles unassigned from it - * @param roleIds list of role IDs that are to be removed from user - */ - void removeRolesFromSubject(Subject subject, int subjectId, int[] roleIds); - - void setAssignedSubjectRoles(Subject subject, int subjectId, int[] roleIds); - - void addSubjectsToRole(Subject subject, int roleId, int[] subjectIds); - - void removeSubjectsFromRole(Subject subject, int roleId, int[] subjectIds); - void setAssignedSubjects(Subject sessionSubject, int roleId, int[] subjectIds); - - /** - * Adds the given resource groups to the given role. - * - * @param subject user attempting to add the groups to the role - * @param roleId - * @param pendingGroupIds - */ - void addResourceGroupsToRole(Subject subject, int roleId, int[] pendingGroupIds); - - void addRolesToResourceGroup(Subject subject, int groupId, int[] roleIds); - - void setAssignedResourceGroups(Subject subject, int roleId, int[] groupIds); - - /** - * Removes the given resource groups from the given role. - * - * @param subject user attempting to remove the groups from the role - * @param roleId - * @param groupIds - */ - void removeResourceGroupsFromRole(Subject subject, int roleId, int[] groupIds); - - void removeRolesFromResourceGroup(Subject subject, int groupId, int[] roleIds); - - PageList<Role> findRolesByCriteria(Subject subject, RoleCriteria criteria); - } \ No newline at end of file diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/authz/RoleManagerRemote.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/authz/RoleManagerRemote.java index 0586998..83194da 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/authz/RoleManagerRemote.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/authz/RoleManagerRemote.java @@ -130,6 +130,15 @@ public interface RoleManagerRemote { void setAssignedSubjectRoles(Subject subject, int subjectId, int[] roleIds);
/** + * Adds the given bundle groups to the given role. + * + * @param subject The logged in user's subject. + * @param roleId + * @param bundleGroupIds + */ + void addBundleGroupsToRole(Subject subject, int roleId, int[] bundleGroupIds); + + /** * Adds the given resource groups to the given role. * * @param subject The logged in user's subject. @@ -138,11 +147,24 @@ public interface RoleManagerRemote { */ void addResourceGroupsToRole(Subject subject, int roleId, int[] pendingGroupIds);
+ void addRolesToBundleGroup(Subject subject, int bundleGroupId, int[] roleIds); + void addRolesToResourceGroup(Subject subject, int groupId, int[] roleIds);
+ void setAssignedBundleGroups(Subject subject, int roleId, int[] bundleGroupIds); + void setAssignedResourceGroups(Subject subject, int roleId, int[] groupIds);
/** + * Removes the given bundle groups from the given role. + * + * @param subject user attempting to remove the groups from the role + * @param roleId + * @param bundleGroupIds + */ + void removeBundleGroupsFromRole(Subject subject, int roleId, int[] bundleGroupIds); + + /** * Removes the given resource groups from the given role. * * @param subject user attempting to remove the groups from the role @@ -151,6 +173,8 @@ public interface RoleManagerRemote { */ void removeResourceGroupsFromRole(Subject subject, int roleId, int[] groupIds);
+ void removeRolesFromBundleGroup(Subject subject, int bundleGroupId, int[] roleIds); + void removeRolesFromResourceGroup(Subject subject, int groupId, int[] roleIds);
PageList<Role> findRolesByCriteria(Subject subject, RoleCriteria criteria); diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerBean.java index a9882c4..bc85e6d 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerBean.java @@ -66,6 +66,7 @@ import org.rhq.core.clientapi.agent.bundle.BundleScheduleResponse; import org.rhq.core.clientapi.agent.configuration.ConfigurationUtility; import org.rhq.core.domain.auth.Subject; import org.rhq.core.domain.authz.Permission; +import org.rhq.core.domain.authz.Role; import org.rhq.core.domain.bundle.Bundle; import org.rhq.core.domain.bundle.BundleDeployment; import org.rhq.core.domain.bundle.BundleDeploymentStatus; @@ -233,6 +234,8 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot } }
+ checkCreateInitialBundleVersionAuthz(subject, bundleGroupId); + // create and add the required Repo. the Repo is a detached object which helps in its eventual removal. Repo repo = new Repo(name); repo.setCandidate(false); @@ -761,7 +764,6 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot }
if (isInitialVersion) { - checkCreateInitialBundleVersionAuthz(subject, initialBundleGroupId); bundle = bundleManager.createBundle(subject, bundleName, bundleDescription, bundleType.getId(), initialBundleGroupId); createdBundle = true; @@ -1619,7 +1621,7 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot
// filter by bundles that are viewable if (!authorizationManager.hasGlobalPermission(subject, Permission.VIEW_BUNDLES)) { - generator.setAuthorizationResourceFragment(CriteriaQueryGenerator.AuthorizationTokenType.BUNDLE, null, + generator.setAuthorizationBundleFragment(CriteriaQueryGenerator.AuthorizationTokenType.BUNDLE, subject.getId()); }
@@ -1660,7 +1662,7 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot CriteriaQueryGenerator generator = new CriteriaQueryGenerator(subject, criteria);
if (!authorizationManager.hasGlobalPermission(subject, Permission.VIEW_BUNDLES)) { - generator.setAuthorizationResourceFragment(CriteriaQueryGenerator.AuthorizationTokenType.BUNDLE, null, + generator.setAuthorizationBundleFragment(CriteriaQueryGenerator.AuthorizationTokenType.BUNDLE, subject.getId()); }
@@ -1720,7 +1722,7 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot
// filter by bundles that are viewable if (!authorizationManager.hasGlobalPermission(subject, Permission.VIEW_BUNDLES)) { - generator.setAuthorizationResourceFragment(CriteriaQueryGenerator.AuthorizationTokenType.BUNDLE, null, + generator.setAuthorizationBundleFragment(CriteriaQueryGenerator.AuthorizationTokenType.BUNDLE, subject.getId()); }
@@ -1735,8 +1737,8 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot CriteriaQueryGenerator generator = new CriteriaQueryGenerator(subject, criteria);
if (!authorizationManager.hasGlobalPermission(subject, Permission.VIEW_BUNDLES)) { - generator.setAuthorizationResourceFragment(CriteriaQueryGenerator.AuthorizationTokenType.BUNDLE, null, - subject.getId()); + generator.setAuthorizationBundleFragment(CriteriaQueryGenerator.AuthorizationTokenType.BUNDLE, + subject.getId(), null); }
CriteriaQueryRunner<Bundle> queryRunner = new CriteriaQueryRunner<Bundle>(criteria, generator, entityManager); @@ -1781,7 +1783,7 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot generator.alterProjection(replacementSelectList);
if (!authorizationManager.hasGlobalPermission(subject, Permission.VIEW_BUNDLES)) { - generator.setAuthorizationResourceFragment(CriteriaQueryGenerator.AuthorizationTokenType.BUNDLE, null, + generator.setAuthorizationBundleFragment(CriteriaQueryGenerator.AuthorizationTokenType.BUNDLE, subject.getId()); }
@@ -1990,7 +1992,7 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot public void deleteBundleGroups(Subject subject, int[] bundleGroupIds) throws Exception {
for (int bundleGroupId : bundleGroupIds) { - BundleGroup bundleGroup = this.entityManager.find(BundleGroup.class, bundleGroupIds); + BundleGroup bundleGroup = this.entityManager.find(BundleGroup.class, bundleGroupId); if (null == bundleGroup) { return; } @@ -1999,6 +2001,12 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot for (Bundle b : bundleGroup.getBundles()) { bundleGroup.removeBundle(b); } + + // remove from any roles + for (Role r : bundleGroup.getRoles()) { + bundleGroup.removeRole(r); + } + bundleGroup = entityManager.merge(bundleGroup);
// now remove the bundle group @@ -2012,8 +2020,8 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot
// filter by bundle groups that are viewable if (!authorizationManager.hasGlobalPermission(subject, Permission.MANAGE_BUNDLE_GROUPS)) { - generator.setAuthorizationResourceFragment(CriteriaQueryGenerator.AuthorizationTokenType.BUNDLE_GROUP, - null, subject.getId()); + generator.setAuthorizationBundleFragment(CriteriaQueryGenerator.AuthorizationTokenType.BUNDLE_GROUP, + subject.getId(), null); }
CriteriaQueryRunner<BundleGroup> queryRunner = new CriteriaQueryRunner<BundleGroup>(criteria, generator, diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerLocal.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerLocal.java index 01ca620..f96d356 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerLocal.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerLocal.java @@ -68,9 +68,12 @@ public interface BundleManagerLocal extends BundleManagerRemote {
/** * Internal use only, and test entry point. - * </p> - * This method performs NO AUTHZ! - * </p> + * <pre> + * Required Permissions (same as createInitialBundleVersionXxx): Either: + * - Global.CREATE_BUNDLES and Global.VIEW_BUNDLES + * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG + * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG + * </pre> * @param subject user that must have proper permissions * @param name not null or empty * @param description optional long description of the bundle @@ -87,9 +90,12 @@ public interface BundleManagerLocal extends BundleManagerRemote { * Convenience method that combines {@link #createBundle(Subject, String, int)} and {@link #createBundleVersion(Subject, int, String, String, String)}. * This will first check to see if a bundle with the given type/name exists - if it doesn't, it will be created. If it does, it will be reused. * This will then create the bundle version that will be associated with the bundle that was created or found. - * </p> - * This method performs NO AUTHZ! - * </p> + * <pre> + * Required Permissions (same as createInitialBundleVersionXxx): Either: + * - Global.CREATE_BUNDLES and Global.VIEW_BUNDLES + * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG + * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG + * </pre> * @param subject user that must have proper permissions * @param bundleName name of the bundle to use (if not found, it will be created) * @param bundleDescription optional long description of the bundle diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/util/CriteriaQueryGenerator.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/util/CriteriaQueryGenerator.java index 0a1060d..3692b78 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/util/CriteriaQueryGenerator.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/util/CriteriaQueryGenerator.java @@ -124,10 +124,9 @@ public final class CriteriaQueryGenerator { } else if (type == AuthorizationTokenType.GROUP) { defaultFragment = "group"; setAuthorizationResourceFragment(type, defaultFragment, subjectId); - } else if (type == AuthorizationTokenType.BUNDLE) { - setAuthorizationBundleFragment(subjectId); - } else if (type == AuthorizationTokenType.BUNDLE_GROUP) { - setAuthorizationBundleGroupFragment(subjectId); + } else { + throw new IllegalArgumentException(this.getClass().getSimpleName() + + " does not yet support generating resource queries for '" + type + "' token types"); } }
@@ -237,10 +236,31 @@ public final class CriteriaQueryGenerator { return customAuthzFragment; }
- public void setAuthorizationBundleFragment(int subjectId) { + public void setAuthorizationBundleFragment(AuthorizationTokenType type, int subjectId) { + if (type == AuthorizationTokenType.BUNDLE) { + setAuthorizationBundleFragment(type, subjectId, "bundle"); + } else if (type == AuthorizationTokenType.BUNDLE_GROUP) { + setAuthorizationBundleFragment(type, subjectId, "bundleGroup"); + } else { + throw new IllegalArgumentException(this.getClass().getSimpleName() + + " does not yet support generating bundle queries for '" + type + "' token types"); + } + } + + public void setAuthorizationBundleFragment(AuthorizationTokenType type, int subjectId, String fragment) { + if (type == AuthorizationTokenType.BUNDLE) { + setAuthorizationBundleFragment(subjectId, fragment); + } else if (type == AuthorizationTokenType.BUNDLE_GROUP) { + setAuthorizationBundleGroupFragment(subjectId, fragment); + } else { + throw new IllegalArgumentException(this.getClass().getSimpleName() + + " does not yet support generating bundle queries for '" + type + "' token types"); + } + } + + private void setAuthorizationBundleFragment(int subjectId, String fragment) { this.authorizationSubjectId = subjectId;
- String fragment = "bundle"; String customAuthzFragment = "" // + "( %aliasWithFragment%.id IN ( SELECT %innerAlias%.id " + NL // + " FROM %alias% innerAlias " + NL // @@ -271,8 +291,9 @@ public final class CriteriaQueryGenerator { } }
- public void setAuthorizationBundleGroupFragment(int subjectId) { - String fragment = "bundleGroup"; + private void setAuthorizationBundleGroupFragment(int subjectId, String fragment) { + this.authorizationSubjectId = subjectId; + String customAuthzFragment = "" // + "( %aliasWithFragment%.id IN ( SELECT %innerAlias%.id " + NL // + " FROM %alias% innerAlias " + NL //
commit 6a6485acff97d1073015d6aab0d52c5fb4022895 Author: Mike Thompson mithomps@redhat.com Date: Thu Aug 1 14:09:03 2013 -0700
Small Graph subsystem refactoring.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/AbstractD3GraphListView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/AbstractD3GraphListView.java index d9f27b1..2ce63aa 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/AbstractD3GraphListView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/AbstractD3GraphListView.java @@ -45,13 +45,13 @@ import org.rhq.enterprise.gui.coregui.client.util.preferences.MeasurementUserPre public abstract class AbstractD3GraphListView extends EnhancedVLayout implements AutoRefresh,RedrawGraphs { protected final static int SINGLE_CHART_HEIGHT = 225; protected final static int MULTI_CHART_HEIGHT = 210; - protected static Label loadingLabel = new Label(MSG.common_msg_loading()); + protected static final Label loadingLabel = new Label(MSG.common_msg_loading()); protected List<Availability> availabilityList; protected List<ResourceGroupAvailability> groupAvailabilityList; protected AvailabilityD3GraphView availabilityGraph; - protected MeasurementUserPreferences measurementUserPrefs; + protected final MeasurementUserPreferences measurementUserPrefs; protected boolean showAvailabilityGraph = false; - protected ButtonBarDateTimeRangeEditor buttonBarDateTimeRangeEditor; + protected final ButtonBarDateTimeRangeEditor buttonBarDateTimeRangeEditor; protected Timer refreshTimer;
public AbstractD3GraphListView() { diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilitySummaryPieGraphType.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilitySummaryPieGraphType.java index fb8e096..678724a 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilitySummaryPieGraphType.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilitySummaryPieGraphType.java @@ -92,8 +92,8 @@ public class AvailabilitySummaryPieGraphType { console.log("Draw Availability Summary Pie Chart");
var global = this, - w = 100, - h = 100, + w = @org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.AvailabilitySummaryPieGraphType::WIDTH, + h = @org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.AvailabilitySummaryPieGraphType::HEIGHT, r = h / 2, color = $wnd.d3.scale.category10(), data = global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.AvailabilitySummaryPieGraphType::getAvailabilitySummaryJson()(), diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/D3GroupGraphListView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/D3GroupGraphListView.java index a9789c6..16670aa 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/D3GroupGraphListView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/D3GroupGraphListView.java @@ -54,7 +54,7 @@ import org.rhq.enterprise.gui.coregui.client.util.async.CountDownLatch; */ public final class D3GroupGraphListView extends AbstractD3GraphListView implements AutoRefresh {
- private ResourceGroup resourceGroup; + private final ResourceGroup resourceGroup; private VLayout graphsVLayout;
public D3GroupGraphListView(ResourceGroup resourceGroup, boolean monitorDetailView) { @@ -67,7 +67,6 @@ public final class D3GroupGraphListView extends AbstractD3GraphListView implemen @Override protected void onDraw() { super.onDraw(); - destroyMembers();
addMember(buttonBarDateTimeRangeEditor); @@ -81,9 +80,7 @@ public final class D3GroupGraphListView extends AbstractD3GraphListView implemen graphsVLayout.setWidth100(); graphsVLayout.setHeight100();
- if (resourceGroup != null) { - buildGraphs(); - } + buildGraphs(); addMember(graphsVLayout); }
@@ -196,9 +193,7 @@ public final class D3GroupGraphListView extends AbstractD3GraphListView implemen graphView.setWidth("95%"); graphView.setHeight(MULTI_CHART_HEIGHT);
- if (graphsVLayout != null) { - graphsVLayout.addMember(graphView); - } + graphsVLayout.addMember(graphView); }
} diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/GroupMetricsTableDataSource.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/GroupMetricsTableDataSource.java index 0b7b0bf..92221c5 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/GroupMetricsTableDataSource.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/GroupMetricsTableDataSource.java @@ -22,7 +22,6 @@ import org.rhq.core.domain.resource.group.composite.ResourceGroupComposite; import org.rhq.enterprise.gui.coregui.client.CoreGUI; import org.rhq.enterprise.gui.coregui.client.UserSessionManager; import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; -import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.table.MetricsTableDataSource; import org.rhq.enterprise.gui.coregui.client.inventory.resource.type.ResourceTypeRepository; import org.rhq.enterprise.gui.coregui.client.util.MeasurementUtility; import org.rhq.enterprise.gui.coregui.client.util.preferences.MeasurementUserPreferences; @@ -149,8 +148,8 @@ public class GroupMetricsTableDataSource extends MetricsTableDataSource {
//now retrieve metric display sumamries GWTServiceLookup.getMeasurementChartsService().getMetricDisplaySummariesForCompatibleGroup(groupId, - definitionArrayIds, Long.valueOf(range.get(0)).longValue(), - Long.valueOf(range.get(1)).longValue(), false, + definitionArrayIds, range.get(0), + range.get(1), false, new AsyncCallback<ArrayList<MetricDisplaySummary>>() { @Override public void onSuccess(ArrayList<MetricDisplaySummary> result) { diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/MetricsTableDataSource.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/MetricsTableDataSource.java new file mode 100644 index 0000000..ef71ab7 --- /dev/null +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/MetricsTableDataSource.java @@ -0,0 +1,333 @@ +package org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.List; +import java.util.Set; + +import com.google.gwt.user.client.Timer; +import com.google.gwt.user.client.rpc.AsyncCallback; +import com.smartgwt.client.data.DSRequest; +import com.smartgwt.client.data.DSResponse; +import com.smartgwt.client.data.Record; +import com.smartgwt.client.widgets.grid.CellFormatter; +import com.smartgwt.client.widgets.grid.ListGridField; +import com.smartgwt.client.widgets.grid.ListGridRecord; + +import org.rhq.core.domain.criteria.Criteria; +import org.rhq.core.domain.criteria.ResourceCriteria; +import org.rhq.core.domain.measurement.DataType; +import org.rhq.core.domain.measurement.MeasurementDefinition; +import org.rhq.core.domain.measurement.MeasurementSchedule; +import org.rhq.core.domain.measurement.composite.MeasurementDataNumericHighLowComposite; +import org.rhq.core.domain.measurement.ui.MetricDisplaySummary; +import org.rhq.core.domain.measurement.ui.MetricDisplayValue; +import org.rhq.core.domain.resource.Resource; +import org.rhq.core.domain.resource.ResourceType; +import org.rhq.core.domain.resource.composite.ResourceComposite; +import org.rhq.core.domain.util.PageList; +import org.rhq.enterprise.gui.coregui.client.CoreGUI; +import org.rhq.enterprise.gui.coregui.client.UserSessionManager; +import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; +import org.rhq.enterprise.gui.coregui.client.inventory.resource.type.ResourceTypeRepository; +import org.rhq.enterprise.gui.coregui.client.util.BrowserUtility; +import org.rhq.enterprise.gui.coregui.client.util.Log; +import org.rhq.enterprise.gui.coregui.client.util.MeasurementUtility; +import org.rhq.enterprise.gui.coregui.client.util.RPCDataSource; +import org.rhq.enterprise.gui.coregui.client.util.async.Command; +import org.rhq.enterprise.gui.coregui.client.util.async.CountDownLatch; +import org.rhq.enterprise.gui.coregui.client.util.preferences.MeasurementUserPreferences; + +/** + * A simple data source to read in metric data summaries for a resource. + * This doesn't support paging - everything is returned in one query. Since + * the number of metrics per resource is relatively small (never more than tens of them), + * we just load them all in at once. + * + * @author John Mazzitelli + * @author Mike Thompson + */ +public class MetricsTableDataSource extends RPCDataSource<MetricDisplaySummary, Criteria> { + + public static final String FIELD_SPARKLINE = "sparkline"; + public static final String FIELD_METRIC_LABEL = "label"; + public static final String FIELD_ALERT_COUNT = "alertCount"; + public static final String FIELD_MIN_VALUE = "min"; + public static final String FIELD_MAX_VALUE = "max"; + public static final String FIELD_AVG_VALUE = "avg"; + public static final String FIELD_LAST_VALUE = "last"; + public static final String FIELD_METRIC_DEF_ID = "defId"; + public static final String FIELD_METRIC_SCHED_ID = "schedId"; + public static final String FIELD_METRIC_UNITS = "units"; + public static final String FIELD_METRIC_NAME = "name"; + public static final String FIELD_RESOURCE_ID = "resourceId"; + private int resourceId; + private List<MetricDisplaySummary> metricDisplaySummaries; + private List<List<MeasurementDataNumericHighLowComposite>> metricsDataList; + private MeasurementUserPreferences measurementUserPrefs; + + public MetricsTableDataSource(int resourceId) { + this.resourceId = resourceId; + measurementUserPrefs = new MeasurementUserPreferences(UserSessionManager.getUserPreferences()); + } + + /** + * The view that contains the list grid which will display this datasource's data will call this + * method to get the field information which is used to control the display of the data. + * + * @return list grid fields used to display the datasource data + */ + public ArrayList<ListGridField> getListGridFields() { + ArrayList<ListGridField> fields = new ArrayList<ListGridField>(7); + + ListGridField sparklineField = new ListGridField(FIELD_SPARKLINE, "chart"); + sparklineField.setCellFormatter(new CellFormatter() { + @Override + public String format(Object value, ListGridRecord record, int rowNum, int colNum) { + if (value == null) { + return ""; + } + String contents = "<span id='sparkline_" + resourceId + "-" + + record.getAttributeAsInt(FIELD_METRIC_DEF_ID) + "' class='dynamicsparkline' width='70' " + + "values='" + record.getAttribute(FIELD_SPARKLINE) + "'></span>"; + return contents; + + } + }); + + sparklineField.setWidth(80); + fields.add(sparklineField); + + ListGridField nameField = new ListGridField(FIELD_METRIC_LABEL, MSG.common_title_name()); + nameField.setWidth("30%"); + fields.add(nameField); + + ListGridField alertsField = new ListGridField(FIELD_ALERT_COUNT, MSG.common_title_alerts()); + alertsField.setWidth("10%"); + fields.add(alertsField); + + ListGridField minField = new ListGridField(FIELD_MIN_VALUE, MSG.view_resource_monitor_table_min()); + minField.setWidth("15%"); + fields.add(minField); + + ListGridField maxField = new ListGridField(FIELD_MAX_VALUE, MSG.view_resource_monitor_table_max()); + maxField.setWidth("15%"); + fields.add(maxField); + + ListGridField avgField = new ListGridField(FIELD_AVG_VALUE, MSG.view_resource_monitor_table_avg()); + avgField.setWidth("15%"); + fields.add(avgField); + + ListGridField lastField = new ListGridField(FIELD_LAST_VALUE, MSG.view_resource_monitor_table_last()); + lastField.setWidth("15%"); + fields.add(lastField); + + return fields; + } + + @Override + public MetricDisplaySummary copyValues(Record from) { + // we should never need this method - we only go in one direction + // if we ever need this, just have copyValues store an "object" attribute whose value is "from" + // which this method then just reads out. Since we don't need this now, save memory by not + // keeping the MetricDisplayValue around + return null; + } + + @Override + public ListGridRecord copyValues(MetricDisplaySummary from) { + MeasurementUtility.formatSimpleMetrics(from); + + ListGridRecord record = new ListGridRecord(); + record.setAttribute(FIELD_SPARKLINE, getCsvMetricsForSparkline()); + record.setAttribute(FIELD_METRIC_LABEL, from.getLabel()); + record.setAttribute(FIELD_ALERT_COUNT, String.valueOf(from.getAlertCount())); + record.setAttribute(FIELD_MIN_VALUE, getMetricStringValue(from.getMinMetric())); + record.setAttribute(FIELD_MAX_VALUE, getMetricStringValue(from.getMaxMetric())); + record.setAttribute(FIELD_AVG_VALUE, getMetricStringValue(from.getAvgMetric())); + record.setAttribute(FIELD_LAST_VALUE, getMetricStringValue(from.getLastMetric())); + record.setAttribute(FIELD_METRIC_DEF_ID, from.getDefinitionId()); + record.setAttribute(FIELD_METRIC_SCHED_ID, from.getScheduleId()); + record.setAttribute(FIELD_METRIC_UNITS, from.getUnits()); + record.setAttribute(FIELD_METRIC_NAME, from.getMetricName()); + record.setAttribute(FIELD_RESOURCE_ID, resourceId); + return record; + } + + private String getCsvMetricsForSparkline() { + StringBuilder sb = new StringBuilder(); + Log.debug("getCsvMetricsForSparkline.metricsDataList: " + metricsDataList.size()); + for (List<MeasurementDataNumericHighLowComposite> measurementData : metricsDataList) { + for (int i = 0; i < measurementData.size(); i++) { + // take the last 20 values + if (i >= measurementData.size() - 20) { + if (!Double.isNaN(measurementData.get(i).getValue())) { + sb.append((int) measurementData.get(i).getValue()); + sb.append(","); + } + } + } + if (sb.toString().endsWith(",")) { + sb.setLength(sb.length() - 1); + } + } + Log.debug("getCsvMetricsForSparkline: " + sb.toString()); + + return sb.toString(); + } + + protected String getMetricStringValue(MetricDisplayValue value) { + return (value != null) ? value.toString() : ""; + } + + @Override + protected Criteria getFetchCriteria(DSRequest request) { + // we don't use criterias for this datasource, just return null + return null; + } + + @Override + protected void executeFetch(final DSRequest request, final DSResponse response, final Criteria unused) { + + GWTServiceLookup.getMeasurementScheduleService().findSchedulesForResourceAndType(resourceId, + DataType.MEASUREMENT, null, true, new AsyncCallback<ArrayList<MeasurementSchedule>>() { + @Override + public void onSuccess(ArrayList<MeasurementSchedule> measurementSchedules) { + int[] scheduleIds = new int[measurementSchedules.size()]; + int i = 0; + for (MeasurementSchedule measurementSchedule : measurementSchedules) { + scheduleIds[i++] = measurementSchedule.getId(); + } + + final CountDownLatch countDownLatch = CountDownLatch.create(2, new Command() { + + @Override + public void execute() { + response.setData(buildRecords(metricDisplaySummaries)); + processResponse(request.getRequestId(), response); + + new Timer() { + + @Override + public void run() { + BrowserUtility.graphSparkLines(); + } + }.schedule(150); + Log.debug("Finished CountdownLatch for metrics loaded: " + metricsDataList.size()); + } + }); + + retrieveResourceMetrics(resourceId, countDownLatch); + + GWTServiceLookup.getMeasurementChartsService().getMetricDisplaySummariesForResource(resourceId, + scheduleIds, measurementUserPrefs.getMetricRangePreferences().begin, + measurementUserPrefs.getMetricRangePreferences().end, + new AsyncCallback<ArrayList<MetricDisplaySummary>>() { + @Override + public void onSuccess(ArrayList<MetricDisplaySummary> metricDisplaySummaries) { + setMetricDisplaySummaries(metricDisplaySummaries); + countDownLatch.countDown(); + } + + @Override + public void onFailure(Throwable caught) { + CoreGUI.getErrorHandler().handleError("Cannot load metrics", caught); + countDownLatch.countDown(); + } + } + + ); + } + + @Override + public void onFailure(Throwable caught) { + CoreGUI.getErrorHandler().handleError("Cannot load schedules", caught); + } + }); + } + + void setMetricDisplaySummaries(List<MetricDisplaySummary> metricDisplaySummaries) { + this.metricDisplaySummaries = metricDisplaySummaries; + } + + public void retrieveResourceMetrics(final Integer resourceId, final CountDownLatch countDownLatch) { + + ResourceCriteria criteria = new ResourceCriteria(); + criteria.addFilterId(resourceId); + + //locate the resource + GWTServiceLookup.getResourceService().findResourceCompositesByCriteria(criteria, + new AsyncCallback<PageList<ResourceComposite>>() { + @Override + public void onFailure(Throwable caught) { + Log.debug("Error retrieving resource resource composite for resource [" + resourceId + "]:" + + caught.getMessage()); + } + + @Override + public void onSuccess(PageList<ResourceComposite> resourceCompositePageList) { + if (!resourceCompositePageList.isEmpty()) { + final ResourceComposite resourceComposite = resourceCompositePageList.get(0); + final Resource resource = resourceComposite.getResource(); + // Load the fully fetched ResourceType. + ResourceType resourceType = resource.getResourceType(); + ResourceTypeRepository.Cache.getInstance().getResourceTypes(resourceType.getId(), + EnumSet.of(ResourceTypeRepository.MetadataType.measurements), + new ResourceTypeRepository.TypeLoadedCallback() { + public void onTypesLoaded(ResourceType type) { + resource.setResourceType(type); + //metric definitions + Set<MeasurementDefinition> definitions = type.getMetricDefinitions(); + + //build id mapping for measurementDefinition instances Ex. Free Memory -> MeasurementDefinition[100071] + final HashMap<String, MeasurementDefinition> measurementDefMap = new HashMap<String, MeasurementDefinition>(); + for (MeasurementDefinition definition : definitions) { + measurementDefMap.put(definition.getDisplayName(), definition); + } + //bundle definition ids for asynch call. + int[] definitionArrayIds = new int[definitions.size()]; + final String[] displayOrder = new String[definitions.size()]; + measurementDefMap.keySet().toArray(displayOrder); + //sort the charting data ex. Free Memory, Free Swap Space,..System Load + Arrays.sort(displayOrder); + + //organize definitionArrayIds for ordered request on server. + int index = 0; + for (String definitionToDisplay : displayOrder) { + definitionArrayIds[index++] = measurementDefMap.get(definitionToDisplay) + .getId(); + } + + GWTServiceLookup.getMeasurementDataService().findDataForResource(resourceId, + definitionArrayIds, measurementUserPrefs.getMetricRangePreferences().begin, + measurementUserPrefs.getMetricRangePreferences().end, 60, + new AsyncCallback<List<List<MeasurementDataNumericHighLowComposite>>>() { + @Override + public void onFailure(Throwable caught) { + Log.warn("Error retrieving recent metrics charting data for resource [" + + resourceId + "]:" + caught.getMessage()); + } + + @Override + public void onSuccess( + List<List<MeasurementDataNumericHighLowComposite>> measurementDataList) { + + if (!measurementDataList.isEmpty()) { + metricsDataList = measurementDataList; + Log.debug("*** Setting metricsDataList.size: " + + metricsDataList.size()); + countDownLatch.countDown(); + } + } + }); + + } + }); + } + } + }); + + } +} diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/D3GraphListView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/D3GraphListView.java index 8b9f327..5635d88 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/D3GraphListView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/D3GraphListView.java @@ -60,9 +60,9 @@ import org.rhq.enterprise.gui.coregui.client.util.async.CountDownLatch; */ public class D3GraphListView extends AbstractD3GraphListView {
- private static int NUM_ASYNC_CALLS = 2; // wait for X async calls in Latch + private static final int NUM_ASYNC_CALLS = 2; // wait for X async calls in Latch
- private Resource resource; + private final Resource resource; private Set<Integer> definitionIds = null; private boolean useSummaryData = false; private PageList<MeasurementOOBComposite> measurementOOBCompositeList; @@ -91,7 +91,7 @@ public class D3GraphListView extends AbstractD3GraphListView { return D3GraphListView.createSingleGraph(resource, measurementId, false); }
- protected D3GraphListView(Resource resource, Set<Integer> definitionIds, boolean showAvailabilityGraph) { + private D3GraphListView(Resource resource, Set<Integer> definitionIds, boolean showAvailabilityGraph) { super(); this.resource = resource; commonConstructorSettings(); @@ -99,7 +99,7 @@ public class D3GraphListView extends AbstractD3GraphListView { this.showAvailabilityGraph = showAvailabilityGraph; }
- protected D3GraphListView(Resource resource, boolean showAvailabilityGraph) { + private D3GraphListView(Resource resource, boolean showAvailabilityGraph) { super(); this.resource = resource; this.showAvailabilityGraph = showAvailabilityGraph; @@ -130,9 +130,7 @@ public class D3GraphListView extends AbstractD3GraphListView { vLayout.setWidth100(); vLayout.setHeight100();
- if (resource != null) { - queryAndBuildGraphs(); - } + queryAndBuildGraphs(); addMember(vLayout); }
@@ -176,7 +174,7 @@ public class D3GraphListView extends AbstractD3GraphListView { private void queryAndBuildGraphs() { final long startTimer = System.currentTimeMillis();
- if (null != availabilityGraph) { + if (showAvailabilityGraph) { queryAvailability(EntityContext.forResource(resource.getId()), buttonBarDateTimeRangeEditor.getStartTime(), buttonBarDateTimeRangeEditor.getEndTime(), null); } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableDataSource.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableDataSource.java deleted file mode 100644 index 57e62ab..0000000 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableDataSource.java +++ /dev/null @@ -1,335 +0,0 @@ -package org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.table; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.EnumSet; -import java.util.HashMap; -import java.util.List; -import java.util.Set; - -import com.google.gwt.user.client.Timer; -import com.google.gwt.user.client.rpc.AsyncCallback; -import com.smartgwt.client.data.DSRequest; -import com.smartgwt.client.data.DSResponse; -import com.smartgwt.client.data.Record; -import com.smartgwt.client.widgets.grid.CellFormatter; -import com.smartgwt.client.widgets.grid.ListGridField; -import com.smartgwt.client.widgets.grid.ListGridRecord; - -import org.rhq.core.domain.criteria.Criteria; -import org.rhq.core.domain.criteria.ResourceCriteria; -import org.rhq.core.domain.measurement.DataType; -import org.rhq.core.domain.measurement.MeasurementDefinition; -import org.rhq.core.domain.measurement.MeasurementSchedule; -import org.rhq.core.domain.measurement.composite.MeasurementDataNumericHighLowComposite; -import org.rhq.core.domain.measurement.ui.MetricDisplaySummary; -import org.rhq.core.domain.measurement.ui.MetricDisplayValue; -import org.rhq.core.domain.resource.Resource; -import org.rhq.core.domain.resource.ResourceType; -import org.rhq.core.domain.resource.composite.ResourceComposite; -import org.rhq.core.domain.util.PageList; -import org.rhq.enterprise.gui.coregui.client.CoreGUI; -import org.rhq.enterprise.gui.coregui.client.UserSessionManager; -import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; -import org.rhq.enterprise.gui.coregui.client.inventory.resource.type.ResourceTypeRepository; -import org.rhq.enterprise.gui.coregui.client.util.BrowserUtility; -import org.rhq.enterprise.gui.coregui.client.util.Log; -import org.rhq.enterprise.gui.coregui.client.util.MeasurementUtility; -import org.rhq.enterprise.gui.coregui.client.util.RPCDataSource; -import org.rhq.enterprise.gui.coregui.client.util.async.Command; -import org.rhq.enterprise.gui.coregui.client.util.async.CountDownLatch; -import org.rhq.enterprise.gui.coregui.client.util.preferences.MeasurementUserPreferences; - -/** - * A simple data source to read in metric data summaries for a resource. - * This doesn't support paging - everything is returned in one query. Since - * the number of metrics per resource is relatively small (never more than tens of them), - * we just load them all in at once. - * - * @author John Mazzitelli - * @author Mike Thompson - * @todo: get rid of this once we have tested the new screen out - */ -@Deprecated -public class MetricsTableDataSource extends RPCDataSource<MetricDisplaySummary, Criteria> { - - public static final String FIELD_SPARKLINE = "sparkline"; - public static final String FIELD_METRIC_LABEL = "label"; - public static final String FIELD_ALERT_COUNT = "alertCount"; - public static final String FIELD_MIN_VALUE = "min"; - public static final String FIELD_MAX_VALUE = "max"; - public static final String FIELD_AVG_VALUE = "avg"; - public static final String FIELD_LAST_VALUE = "last"; - public static final String FIELD_METRIC_DEF_ID = "defId"; - public static final String FIELD_METRIC_SCHED_ID = "schedId"; - public static final String FIELD_METRIC_UNITS = "units"; - public static final String FIELD_METRIC_NAME = "name"; - public static final String FIELD_RESOURCE_ID = "resourceId"; - private int resourceId; - private List<MetricDisplaySummary> metricDisplaySummaries; - private List<List<MeasurementDataNumericHighLowComposite>> metricsDataList; - private MeasurementUserPreferences measurementUserPrefs; - - public MetricsTableDataSource(int resourceId) { - this.resourceId = resourceId; - measurementUserPrefs = new MeasurementUserPreferences(UserSessionManager.getUserPreferences()); - } - - /** - * The view that contains the list grid which will display this datasource's data will call this - * method to get the field information which is used to control the display of the data. - * - * @return list grid fields used to display the datasource data - */ - public ArrayList<ListGridField> getListGridFields() { - ArrayList<ListGridField> fields = new ArrayList<ListGridField>(7); - - ListGridField sparklineField = new ListGridField(FIELD_SPARKLINE, "chart"); - sparklineField.setCellFormatter(new CellFormatter() { - @Override - public String format(Object value, ListGridRecord record, int rowNum, int colNum) { - if (value == null) { - return ""; - } - String contents = "<span id='sparkline_" + resourceId + "-" - + record.getAttributeAsInt(FIELD_METRIC_DEF_ID) + "' class='dynamicsparkline' width='70' " - + "values='" + record.getAttribute(FIELD_SPARKLINE) + "'></span>"; - return contents; - - } - }); - - sparklineField.setWidth(80); - fields.add(sparklineField); - - ListGridField nameField = new ListGridField(FIELD_METRIC_LABEL, MSG.common_title_name()); - nameField.setWidth("30%"); - fields.add(nameField); - - ListGridField alertsField = new ListGridField(FIELD_ALERT_COUNT, MSG.common_title_alerts()); - alertsField.setWidth("10%"); - fields.add(alertsField); - - ListGridField minField = new ListGridField(FIELD_MIN_VALUE, MSG.view_resource_monitor_table_min()); - minField.setWidth("15%"); - fields.add(minField); - - ListGridField maxField = new ListGridField(FIELD_MAX_VALUE, MSG.view_resource_monitor_table_max()); - maxField.setWidth("15%"); - fields.add(maxField); - - ListGridField avgField = new ListGridField(FIELD_AVG_VALUE, MSG.view_resource_monitor_table_avg()); - avgField.setWidth("15%"); - fields.add(avgField); - - ListGridField lastField = new ListGridField(FIELD_LAST_VALUE, MSG.view_resource_monitor_table_last()); - lastField.setWidth("15%"); - fields.add(lastField); - - return fields; - } - - @Override - public MetricDisplaySummary copyValues(Record from) { - // we should never need this method - we only go in one direction - // if we ever need this, just have copyValues store an "object" attribute whose value is "from" - // which this method then just reads out. Since we don't need this now, save memory by not - // keeping the MetricDisplayValue around - return null; - } - - @Override - public ListGridRecord copyValues(MetricDisplaySummary from) { - MeasurementUtility.formatSimpleMetrics(from); - - ListGridRecord record = new ListGridRecord(); - record.setAttribute(FIELD_SPARKLINE, getCsvMetricsForSparkline()); - record.setAttribute(FIELD_METRIC_LABEL, from.getLabel()); - record.setAttribute(FIELD_ALERT_COUNT, String.valueOf(from.getAlertCount())); - record.setAttribute(FIELD_MIN_VALUE, getMetricStringValue(from.getMinMetric())); - record.setAttribute(FIELD_MAX_VALUE, getMetricStringValue(from.getMaxMetric())); - record.setAttribute(FIELD_AVG_VALUE, getMetricStringValue(from.getAvgMetric())); - record.setAttribute(FIELD_LAST_VALUE, getMetricStringValue(from.getLastMetric())); - record.setAttribute(FIELD_METRIC_DEF_ID, from.getDefinitionId()); - record.setAttribute(FIELD_METRIC_SCHED_ID, from.getScheduleId()); - record.setAttribute(FIELD_METRIC_UNITS, from.getUnits()); - record.setAttribute(FIELD_METRIC_NAME, from.getMetricName()); - record.setAttribute(FIELD_RESOURCE_ID, resourceId); - return record; - } - - private String getCsvMetricsForSparkline() { - StringBuilder sb = new StringBuilder(); - Log.debug("getCsvMetricsForSparkline.metricsDataList: " + metricsDataList.size()); - for (List<MeasurementDataNumericHighLowComposite> measurementData : metricsDataList) { - for (int i = 0; i < measurementData.size(); i++) { - // take the last 20 values - if (i >= measurementData.size() - 20) { - if (!Double.isNaN(measurementData.get(i).getValue())) { - sb.append((int) measurementData.get(i).getValue()); - sb.append(","); - } - } - } - if (sb.toString().endsWith(",")) { - sb.setLength(sb.length() - 1); - } - } - Log.debug("getCsvMetricsForSparkline: " + sb.toString()); - - return sb.toString(); - } - - protected String getMetricStringValue(MetricDisplayValue value) { - return (value != null) ? value.toString() : ""; - } - - @Override - protected Criteria getFetchCriteria(DSRequest request) { - // we don't use criterias for this datasource, just return null - return null; - } - - @Override - protected void executeFetch(final DSRequest request, final DSResponse response, final Criteria unused) { - - GWTServiceLookup.getMeasurementScheduleService().findSchedulesForResourceAndType(resourceId, - DataType.MEASUREMENT, null, true, new AsyncCallback<ArrayList<MeasurementSchedule>>() { - @Override - public void onSuccess(ArrayList<MeasurementSchedule> measurementSchedules) { - int[] scheduleIds = new int[measurementSchedules.size()]; - int i = 0; - for (MeasurementSchedule measurementSchedule : measurementSchedules) { - scheduleIds[i++] = measurementSchedule.getId(); - } - - final CountDownLatch countDownLatch = CountDownLatch.create(2, new Command() { - - @Override - public void execute() { - response.setData(buildRecords(metricDisplaySummaries)); - processResponse(request.getRequestId(), response); - - new Timer() { - - @Override - public void run() { - BrowserUtility.graphSparkLines(); - } - }.schedule(150); - Log.debug("*** Finished CountdownLatch for metrics loaded: " + metricsDataList.size()); - } - }); - - retrieveResourceMetrics(resourceId, countDownLatch); - - GWTServiceLookup.getMeasurementChartsService().getMetricDisplaySummariesForResource(resourceId, - scheduleIds, measurementUserPrefs.getMetricRangePreferences().begin, - measurementUserPrefs.getMetricRangePreferences().end, - new AsyncCallback<ArrayList<MetricDisplaySummary>>() { - @Override - public void onSuccess(ArrayList<MetricDisplaySummary> metricDisplaySummaries) { - setMetricDisplaySummaries(metricDisplaySummaries); - countDownLatch.countDown(); - } - - @Override - public void onFailure(Throwable caught) { - CoreGUI.getErrorHandler().handleError("Cannot load metrics", caught); - countDownLatch.countDown(); - } - } - - ); - } - - @Override - public void onFailure(Throwable caught) { - CoreGUI.getErrorHandler().handleError("Cannot load schedules", caught); - } - }); - } - - void setMetricDisplaySummaries(List<MetricDisplaySummary> metricDisplaySummaries) { - this.metricDisplaySummaries = metricDisplaySummaries; - } - - public void retrieveResourceMetrics(final Integer resourceId, final CountDownLatch countDownLatch) { - - ResourceCriteria criteria = new ResourceCriteria(); - criteria.addFilterId(resourceId); - - //locate the resource - GWTServiceLookup.getResourceService().findResourceCompositesByCriteria(criteria, - new AsyncCallback<PageList<ResourceComposite>>() { - @Override - public void onFailure(Throwable caught) { - Log.debug("Error retrieving resource resource composite for resource [" + resourceId + "]:" - + caught.getMessage()); - } - - @Override - public void onSuccess(PageList<ResourceComposite> resourceCompositePageList) { - if (!resourceCompositePageList.isEmpty()) { - final ResourceComposite resourceComposite = resourceCompositePageList.get(0); - final Resource resource = resourceComposite.getResource(); - // Load the fully fetched ResourceType. - ResourceType resourceType = resource.getResourceType(); - ResourceTypeRepository.Cache.getInstance().getResourceTypes(resourceType.getId(), - EnumSet.of(ResourceTypeRepository.MetadataType.measurements), - new ResourceTypeRepository.TypeLoadedCallback() { - public void onTypesLoaded(ResourceType type) { - resource.setResourceType(type); - //metric definitions - Set<MeasurementDefinition> definitions = type.getMetricDefinitions(); - - //build id mapping for measurementDefinition instances Ex. Free Memory -> MeasurementDefinition[100071] - final HashMap<String, MeasurementDefinition> measurementDefMap = new HashMap<String, MeasurementDefinition>(); - for (MeasurementDefinition definition : definitions) { - measurementDefMap.put(definition.getDisplayName(), definition); - } - //bundle definition ids for asynch call. - int[] definitionArrayIds = new int[definitions.size()]; - final String[] displayOrder = new String[definitions.size()]; - measurementDefMap.keySet().toArray(displayOrder); - //sort the charting data ex. Free Memory, Free Swap Space,..System Load - Arrays.sort(displayOrder); - - //organize definitionArrayIds for ordered request on server. - int index = 0; - for (String definitionToDisplay : displayOrder) { - definitionArrayIds[index++] = measurementDefMap.get(definitionToDisplay) - .getId(); - } - - GWTServiceLookup.getMeasurementDataService().findDataForResource(resourceId, - definitionArrayIds, measurementUserPrefs.getMetricRangePreferences().begin, - measurementUserPrefs.getMetricRangePreferences().end, 60, - new AsyncCallback<List<List<MeasurementDataNumericHighLowComposite>>>() { - @Override - public void onFailure(Throwable caught) { - Log.warn("Error retrieving recent metrics charting data for resource [" - + resourceId + "]:" + caught.getMessage()); - } - - @Override - public void onSuccess( - List<List<MeasurementDataNumericHighLowComposite>> measurementDataList) { - - if (!measurementDataList.isEmpty()) { - metricsDataList = measurementDataList; - Log.debug("*** Setting metricsDataList.size: " - + metricsDataList.size()); - countDownLatch.countDown(); - } - } - }); - - } - }); - } - } - }); - - } -}
commit 92cb23791fc56b03ada9d1dcfb1d48aa469e6677 Author: Stefan Negrea snegrea@redhat.com Date: Thu Aug 1 13:07:20 2013 -0500
Extensive Cassandra schema manager updates: 1) The entire code was simplified and all queries moved away from code. 2) It is now possible to bind variables to queries (eg. user and password). 3) Simplified the external interface. 4) Only the main interface is now exposed outside of the package. 5) Added unit tests for the file loading and binding code 6) Cleaned the queries and the execution flow 7) The drop restores Cassandra to the original state, pre install
diff --git a/.classpath b/.classpath index 386316a..cad0bdb 100644 --- a/.classpath +++ b/.classpath @@ -216,6 +216,8 @@ <classpathentry kind="src" path="modules/enterprise/server/data-migration/src/main/java"/> <classpathentry kind="src" path="modules/enterprise/server/data-migration/src/test/java"/> <classpathentry kind="src" path="modules/helpers/ldap-tool/src/main/java"/> + <classpathentry kind="src" path="modules/common/cassandra-schema/src/test/java"/> + <classpathentry kind="src" path="modules/plugins/rhq-storage/src/test/java"/> <classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.7"/> <classpathentry exported="true" kind="var" path="M2_REPO/org/apache/httpcomponents/httpclient/4.2.3/httpclient-4.2.3.jar" sourcepath="M2_REPO/org/apache/httpcomponents/httpclient/4.2.3/httpclient-4.2.3-sources.jar"/> <classpathentry exported="true" kind="var" path="M2_REPO/commons-io/commons-io/2.1/commons-io-2.1.jar" sourcepath="M2_REPO/commons-io/commons-io/2.1/commons-io-2.1-sources.jar"/> diff --git a/modules/common/cassandra-ccm/cassandra-ccm-arquillian/src/main/java/org/rhq/cassandra/ccm/arquillian/CCMSuiteDeploymentExtension.java b/modules/common/cassandra-ccm/cassandra-ccm-arquillian/src/main/java/org/rhq/cassandra/ccm/arquillian/CCMSuiteDeploymentExtension.java index a86c49e..d307f0b 100644 --- a/modules/common/cassandra-ccm/cassandra-ccm-arquillian/src/main/java/org/rhq/cassandra/ccm/arquillian/CCMSuiteDeploymentExtension.java +++ b/modules/common/cassandra-ccm/cassandra-ccm-arquillian/src/main/java/org/rhq/cassandra/ccm/arquillian/CCMSuiteDeploymentExtension.java @@ -159,7 +159,7 @@ public class CCMSuiteDeploymentExtension implements LoadableExtension { try { schemaManager.install(); clusterInitService.waitForSchemaAgreement(nodes); - schemaManager.updateTopology(true); + schemaManager.updateTopology(); } catch (Exception e) { if (null != ccm) { ccm.shutdownCluster(); diff --git a/modules/common/cassandra-ccm/cassandra-ccm-maven-plugin/src/main/java/org/rhq/cassandra/ccm/maven/DeployMojo.java b/modules/common/cassandra-ccm/cassandra-ccm-maven-plugin/src/main/java/org/rhq/cassandra/ccm/maven/DeployMojo.java index b84018f..f50535c 100644 --- a/modules/common/cassandra-ccm/cassandra-ccm-maven-plugin/src/main/java/org/rhq/cassandra/ccm/maven/DeployMojo.java +++ b/modules/common/cassandra-ccm/cassandra-ccm-maven-plugin/src/main/java/org/rhq/cassandra/ccm/maven/DeployMojo.java @@ -74,7 +74,7 @@ public class DeployMojo extends AbstractMojo {
try { schemaManager.install(); - schemaManager.updateTopology(true); + schemaManager.updateTopology(); } catch (Exception e) { throw new MojoExecutionException("Schema installation failed.", e); } diff --git a/modules/common/cassandra-ccm/cassandra-ccm-testng/src/main/java/org/rhq/cassandra/CCMTestNGListener.java b/modules/common/cassandra-ccm/cassandra-ccm-testng/src/main/java/org/rhq/cassandra/CCMTestNGListener.java index 38d5337..a9292f7 100644 --- a/modules/common/cassandra-ccm/cassandra-ccm-testng/src/main/java/org/rhq/cassandra/CCMTestNGListener.java +++ b/modules/common/cassandra-ccm/cassandra-ccm-testng/src/main/java/org/rhq/cassandra/CCMTestNGListener.java @@ -129,7 +129,7 @@ public class CCMTestNGListener implements IInvokedMethodListener { if (annotation.waitForSchemaAgreement()) { clusterInitService.waitForSchemaAgreement(nodes); } - schemaManager.updateTopology(true); + schemaManager.updateTopology(); }
private void shutdownCluster() throws Exception { diff --git a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/AbstractManager.java b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/AbstractManager.java index bcf9831..baf7c23 100644 --- a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/AbstractManager.java +++ b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/AbstractManager.java @@ -25,21 +25,9 @@
package org.rhq.cassandra.schema;
-import java.io.BufferedReader; -import java.io.InputStream; -import java.io.InputStreamReader; -import java.net.JarURLConnection; -import java.net.URL; import java.util.ArrayList; -import java.util.Collections; -import java.util.Comparator; -import java.util.Enumeration; import java.util.List; -import java.util.jar.JarEntry; -import java.util.jar.JarFile; - -import javax.xml.parsers.DocumentBuilder; -import javax.xml.parsers.DocumentBuilderFactory; +import java.util.Properties;
import com.datastax.driver.core.Cluster; import com.datastax.driver.core.ProtocolOptions.Compression; @@ -50,9 +38,6 @@ import com.datastax.driver.core.exceptions.NoHostAvailableException;
import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.w3c.dom.Document; -import org.w3c.dom.Node; -import org.w3c.dom.NodeList;
import org.rhq.cassandra.util.ClusterBuilder; import org.rhq.core.domain.cloud.StorageNode; @@ -61,26 +46,35 @@ import org.rhq.core.util.StringUtil; /** * @author Stefan Negrea */ -public class AbstractManager { - - private static final String UPDATE_PLAN_ELEMENT = "updatePlan"; - private static final String STEP_ELEMENT = "step"; - - private static final String SCHEMA_EXISTS_QUERY = "SELECT * FROM system.schema_keyspaces WHERE keyspace_name = 'rhq';"; - private static final String VERSION_COLUMNFAMILY_EXISTS_QUERY = "SELECT * from system.schema_columnfamilies WHERE keyspace_name='rhq' AND columnfamily_name='schema_version';"; - private static final String VERSION_QUERY = "SELECT version FROM rhq.schema_version"; - private static final String REPLICATION_FACTOR_QUERY = "SELECT strategy_options FROM system.schema_keyspaces where keyspace_name='rhq';"; - +abstract class AbstractManager {
+ private static final String MANAGEMENT_BASE_FOLDER = "management"; + protected static final String DEFAULT_CASSANDRA_USER = "cassandra"; + protected static final String DEFAULT_CASSANDRA_PASSWORD = "cassandra";
private final Log log = LogFactory.getLog(AbstractManager.class);
- protected Session session; - protected final String username; - protected final String password; - protected List<StorageNode> nodes = new ArrayList<StorageNode>(); + enum Query { + USER_EXISTS, + SCHEMA_EXISTS, + VERSION_COLUMNFAMILY_EXISTS, + VERSION, + REPLICATION_FACTOR, + INSERT_SCHEMA_VERSION; + + @Override + public String toString() { + return this.name().toLowerCase(); + } + } + + private Session session; + private final String username; + private final String password; + private List<StorageNode> nodes = new ArrayList<StorageNode>(); + private final UpdateFile managementTasks;
- public AbstractManager(String username, String password, List<StorageNode> nodes) { + protected AbstractManager(String username, String password, List<StorageNode> nodes) { try { this.username = username; this.password = password; @@ -88,13 +82,106 @@ public class AbstractManager { } catch (NoHostAvailableException e) { throw new RuntimeException("Unable create storage node session.", e); } + + try { + UpdateFolder managementFolder = new UpdateFolder(MANAGEMENT_BASE_FOLDER); + managementTasks = managementFolder.getUpdateFiles().get(0); + } catch (Exception e) { + throw new RuntimeException("Unable create storage node session.", e); + } + } + + /** + * Init the Cassandra cluster session with the username and password provided + * at creation. + */ + protected void initClusterSession() { + initClusterSession(username, password); + } + + /** + * Init the Cassandra cluster session with provided username and password. + * + * @param username + * @param password + */ + protected void initClusterSession(String username, String password) { + shutdownClusterConnection(); + + String[] hostNames = new String[nodes.size()]; + for (int i = 0; i < hostNames.length; ++i) { + hostNames[i] = nodes.get(i).getAddress(); + } + + log.info("Initializing session to connect to " + StringUtil.arrayToString(hostNames)); + + Cluster cluster = new ClusterBuilder().addContactPoints(hostNames).withCredentials(username, password) + .withPort(nodes.get(0).getCqlPort()).withCompression(Compression.NONE).build(); + + log.info("Cluster connection configured."); + + session = cluster.connect("system"); + log.info("Cluster connected."); + } + + /** + * Shutdown the Cassandra cluster connection. + */ + protected void shutdownClusterConnection() { + log.info("Shutting down existing cluster connections"); + if (session != null && session.getCluster() != null) { + session.getCluster().shutdown(); + } + } + + /** + * Get cluster size. + * + * @return cluster size + */ + protected int getClusterSize() { + return nodes.size(); + } + + /** + * @return the username + */ + protected String getUsername() { + return username; + } + + /** + * @return the password + */ + protected String getPassword() { + return password; + } + + /** + * Runs a CQL query to check the existence of the RHQ user + * + * @return true if the RHQ user exists, false otherwise + */ + protected boolean userExists() { + try { + ResultSet resultSet = executeManagementQuery(Query.USER_EXISTS, "username", username); + return !resultSet.all().isEmpty(); + } catch (Exception e) { + log.error(e); + throw new RuntimeException(e); + } }
+ /** + * Run a CQL query to check the existence of the RHQ schema + * + * @return true if the RHQ schema exists, false otherwise + */ protected boolean schemaExists() { try { - ResultSet resultSet = session.execute(SCHEMA_EXISTS_QUERY); + ResultSet resultSet = executeManagementQuery(Query.SCHEMA_EXISTS); if (!resultSet.all().isEmpty()) { - resultSet = session.execute(VERSION_COLUMNFAMILY_EXISTS_QUERY); + resultSet = executeManagementQuery(Query.VERSION_COLUMNFAMILY_EXISTS); return !resultSet.all().isEmpty(); } return false; @@ -104,10 +191,15 @@ public class AbstractManager { } }
+ /** + * Run a CQL query to retrieve the current RHQ schema version + * + * @return current RHQ schema version + */ protected int getSchemaVersion() { int maxVersion = 0; try { - ResultSet resultSet = session.execute(VERSION_QUERY); + ResultSet resultSet = executeManagementQuery(Query.VERSION); for (Row row : resultSet.all()) { if (maxVersion < row.getInt(0)) { maxVersion = row.getInt(0); @@ -121,155 +213,140 @@ public class AbstractManager { return maxVersion; }
- protected void removeAppliedUpdates(List<String> updateFiles, int currentSchemaVersion) { - while (!updateFiles.isEmpty()) { - int version = this.extractVersionFromUpdateFile(updateFiles.get(0)); - if (version <= currentSchemaVersion) { - updateFiles.remove(0); - } else { - break; - } + /** + * Calculate the replication factor based on the input cluster size. + * + * @return calculated replication factor + */ + protected int calculateNewReplicationFactor() { + int replicationFactor; + if (getClusterSize() < 3) { + replicationFactor = getClusterSize(); + } else if (getClusterSize() < 4) { + replicationFactor = 2; + } else { + replicationFactor = 3; } + return replicationFactor; }
- protected int extractVersionFromUpdateFile(String file) { - file = file.substring(file.lastIndexOf('/') + 1); - file = file.substring(0, file.indexOf('.')); - return Integer.parseInt(file); - } - - protected List<String> getSteps(String file) throws Exception { - List<String> steps = new ArrayList<String>(); - InputStream stream = null; + /** + * Run a CQL query to retrieve the current replication factor for RHQ schema. + * + * @return existing replication factor + */ + protected int queryReplicationFactor() { + int replicationFactor = 1; try { - stream = SchemaManager.class.getClassLoader().getResourceAsStream(file); - - DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance(); - DocumentBuilder builder = factory.newDocumentBuilder(); - Document doc = builder.parse(stream); + ResultSet resultSet = executeManagementQuery(Query.REPLICATION_FACTOR); + Row row = resultSet.one();
- Node rootDocument = doc.getElementsByTagName(UPDATE_PLAN_ELEMENT).item(0); - NodeList updateStepElements = rootDocument.getChildNodes(); + String replicationFactorString = "replication_factor""; + String resultString = row.getString(0); + resultString = resultString.substring(resultString.indexOf(replicationFactorString) + + replicationFactorString.length()); + resultString = resultString.substring(resultString.indexOf('"') + 1); + resultString = resultString.substring(0, resultString.indexOf('"'));
- for (int index = 0; index < updateStepElements.getLength(); index++) { - Node updateStepElement = updateStepElements.item(index); - if (STEP_ELEMENT.equals(updateStepElement.getNodeName()) && updateStepElement.getTextContent() != null) { - steps.add(updateStepElement.getTextContent()); - } - } + replicationFactor = Integer.parseInt(resultString); } catch (Exception e) { - log.error("Error reading the list of steps from " + file + " file.", e); - throw e; - } finally { - if (stream != null) { - try { - stream.close(); - } catch (Exception e) { - log.error("Error closing the stream with the list of steps from " + file + " file.", e); - throw e; - } - } + log.error(e); }
- return steps; + return replicationFactor; }
- protected List<String> getUpdateFiles(String folder) throws Exception { - List<String> files = new ArrayList<String>(); - InputStream stream = null; - - try { - URL resourceFolderURL = this.getClass().getClassLoader().getResource(folder); + /** + * Execute a named management query. + * + * @param query named management query + * @return result + */ + protected ResultSet executeManagementQuery(Query query) { + return executeManagementQuery(query, null); + }
- if (resourceFolderURL.getProtocol().equals("file")) { - stream = this.getClass().getClassLoader().getResourceAsStream(folder); - BufferedReader reader = new BufferedReader(new InputStreamReader(stream)); + /** + * Execute a named management query with the given property (name,value). + * + * @param query named management query + * @param propertyName property name + * @param propertyValue property value. + * @return + */ + protected ResultSet executeManagementQuery(Query query, String propertyName, String propertyValue) { + Properties properties = new Properties(); + properties.put(propertyName, propertyValue); + return executeManagementQuery(query, properties); + }
- String updateFile; - while ((updateFile = reader.readLine()) != null) { - files.add(folder + updateFile); - } - } else if (resourceFolderURL.getProtocol().equals("jar")) { - URL jarURL = this.getClass().getClassLoader().getResources(folder).nextElement(); - JarURLConnection jarURLCon = (JarURLConnection) (jarURL.openConnection()); - JarFile jarFile = jarURLCon.getJarFile(); - Enumeration<JarEntry> entries = jarFile.entries(); - while (entries.hasMoreElements()) { - String entry = entries.nextElement().getName(); - if (entry.startsWith(folder) && !entry.equals(folder)) { - files.add(entry); - } - } - } + /** + * Execute a named management query with the given properties. + * + * @param query named management query + * @param properties properties + * @return + */ + protected ResultSet executeManagementQuery(Query query, Properties properties) { + String queryString = managementTasks.getNamedStep(query.toString(), properties); + return execute(queryString); + }
- Collections.sort(files, new Comparator<String>() { - @Override - public int compare(String o1, String o2) { - return o1.compareTo(o2); - } - }); - } catch (Exception e) { - log.error("Error reading the list of update files.", e); - throw e; - } finally { - if (stream != null) { - try{ - stream.close(); - } catch (Exception e) { - log.error("Error closing the stream with the list of update files.", e); - throw e; - } - } - }
- return files; + /** + * Execute all the queries in an update file as returned by @link {@link UpdateFile#getOrderedSteps()}. + * + * @param updateFile update file + * @return list of result sets, one for each executed query. + */ + protected List<ResultSet> execute(UpdateFile updateFile) { + return execute(updateFile, null); }
- protected void initCluster() { - initCluster(username, password); + /** + * Execute all the queries in an update file as returned by @link {@link UpdateFile#getOrderedSteps(Properties))} with + * the given property (name,value). + * + * @param updateFile update file + * @param propertyName property name + * @param propertyValue property value + * @return list of result sets, one for each executed query. + */ + protected List<ResultSet> execute(UpdateFile updateFile, String propertyName, String propertyValue) { + Properties properties = new Properties(); + properties.put(propertyName, propertyValue); + return execute(updateFile, properties); }
- protected void initCluster(String username, String password) { - String[] hostNames = new String[nodes.size()]; - for (int i = 0; i < hostNames.length; ++i) { - hostNames[i] = nodes.get(i).getAddress(); + /** + * Execute all the queries in an update file as returned by @link {@link UpdateFile#getOrderedSteps(Properties))} with + * the given property (name,value). + * + * @param updateFile update file + * @param properties properties + * @return list of result sets, one for each executed query. + */ + protected List<ResultSet> execute(UpdateFile updateFile, Properties properties) { + List<ResultSet> results = new ArrayList<ResultSet>(); + + log.info("Applying update file: " + updateFile); + for (String step : updateFile.getOrderedSteps(properties)) { + log.info("Statement: \n" + step); + results.add(execute(step)); } + log.info("Applied update file: " + updateFile);
- log.info("Initializing session to connect to " + StringUtil.arrayToString(hostNames)); - - Cluster cluster = new ClusterBuilder().addContactPoints(hostNames).withCredentials(username, password) - .withPort(nodes.get(0).getCqlPort()).withCompression(Compression.NONE).build(); - - log.info("Cluster connection configured."); - - session = cluster.connect("system"); - log.info("Cluster connected."); + return results; }
- protected void shutdown() { - log.info("Shutting down connections"); - session.getCluster().shutdown(); + /** + * Execute a CQL query. + * + * @param query query + * @return result for the query + */ + protected ResultSet execute(String query) { + return session.execute(query); }
- protected int getReplicationFactor() { - int replicationFactor = 1; - try { - String replicationFactorString = "replication_factor""; - - ResultSet resultSet = session.execute(REPLICATION_FACTOR_QUERY); - Row row = resultSet.one(); - - String resultString = row.getString(0); - resultString = resultString.substring(resultString.indexOf(replicationFactorString) - + replicationFactorString.length()); - resultString = resultString.substring(resultString.indexOf('"') + 1); - resultString = resultString.substring(0, resultString.indexOf('"')); - - replicationFactor = Integer.parseInt(resultString); - } catch (Exception e) { - log.error(e); - } - - return replicationFactor; - } } diff --git a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/SchemaManager.java b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/SchemaManager.java index 2836964..8d28bfa 100644 --- a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/SchemaManager.java +++ b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/SchemaManager.java @@ -29,8 +29,6 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List;
-import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.log4j.ConsoleAppender; import org.apache.log4j.Level; import org.apache.log4j.Logger; @@ -43,8 +41,6 @@ import org.rhq.core.domain.cloud.StorageNode; */ public class SchemaManager {
- private final Log log = LogFactory.getLog(SchemaManager.class); - /** * The username that RHQ will use to connect to the storage cluster. */ @@ -83,25 +79,53 @@ public class SchemaManager { this.nodes.addAll(nodes); }
+ /** + * Install and update the RHQ schema. + * + * @throws Exception + */ public void install() throws Exception { VersionManager version = new VersionManager(username, password, nodes); version.install(); }
+ /** + * Drop RHQ schema and revert the database to pre-RHQ state. + * + * @throws Exception + */ public void drop() throws Exception { VersionManager version = new VersionManager(username, password, nodes); version.drop(); }
- public boolean updateTopology(boolean isNewSchema) throws Exception { + /** + * Update cluster topology settings, such as replication factor. + * + * @param isNewSchema + * @return + * @throws Exception + */ + public void updateTopology() throws Exception { TopologyManager topology = new TopologyManager(username, password, nodes); - return topology.updateTopology(isNewSchema); + topology.updateTopology(); }
+ /** + * Returns the list of storage nodes. + * + * @return list of storage nodes + */ public List<StorageNode> getStorageNodes() { return nodes; }
+ /** + * Parse raw string that contains the list of storage nodes. + * + * @param nodes list of storage nodes + * @return + */ private static List<StorageNode> parseNodeInformation(String... nodes) { List<StorageNode> parsedNodes = new ArrayList<StorageNode>(); for (String node : nodes) { @@ -113,6 +137,12 @@ public class SchemaManager { return parsedNodes; }
+ /** + * A main runner used for direct usage of the schema manager. + * + * @param args arguments + * @throws Exception + */ public static void main(String[] args) throws Exception { try { Logger root = Logger.getRootLogger(); @@ -126,29 +156,29 @@ public class SchemaManager { System.out.println("Usage : command username password nodes..."); System.out.println("\n"); System.out.println("Commands : install | drop | topology"); - System.out.println("Node format: hostname|thriftPort|nativeTransportPort"); - + System.out.println("Node format: hostname|jmxPort|cqlPort"); return; }
String command = args[0]; String username = args[1]; String password = args[2]; + String[] hosts = Arrays.copyOfRange(args, 3, args.length);
- SchemaManager schemaManager = new SchemaManager(username, password, - Arrays.copyOfRange(args, 3, args.length)); + SchemaManager schemaManager = new SchemaManager(username, password, hosts);
if ("install".equalsIgnoreCase(command)) { schemaManager.install(); } else if ("drop".equalsIgnoreCase(command)) { schemaManager.drop(); } else if ("topology".equalsIgnoreCase(command)) { - schemaManager.updateTopology(true); + schemaManager.updateTopology(); } else { throw new IllegalArgumentException(command + " not available."); } } catch (Exception e) { System.err.println(e); + e.printStackTrace(); } finally { System.exit(0); } diff --git a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/TopologyManager.java b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/TopologyManager.java index fd987a1..6c08faa 100644 --- a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/TopologyManager.java +++ b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/TopologyManager.java @@ -27,9 +27,6 @@ package org.rhq.cassandra.schema;
import java.util.List;
-import com.datastax.driver.core.BoundStatement; -import com.datastax.driver.core.PreparedStatement; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory;
@@ -38,12 +35,11 @@ import org.rhq.core.domain.cloud.StorageNode; /** * @author Stefan Negrea */ -public class TopologyManager extends AbstractManager { - - private final Log log = LogFactory.getLog(TopologyManager.class); +class TopologyManager extends AbstractManager {
private static final String TOPOLOGY_BASE_FOLDER = "topology";
+ private final Log log = LogFactory.getLog(TopologyManager.class);
private enum Task { UpdateReplicationFactor("0001.xml"), @@ -55,12 +51,8 @@ public class TopologyManager extends AbstractManager { this.file = file; }
- protected String getFile(boolean isNewSchema) { - if (isNewSchema) { - return TOPOLOGY_BASE_FOLDER + "/create/" + this.file; - } - - return TOPOLOGY_BASE_FOLDER + "/update/" + this.file; + protected String getFile() { + return TOPOLOGY_BASE_FOLDER + "/" + this.file; } }
@@ -68,78 +60,65 @@ public class TopologyManager extends AbstractManager { super(username, password, nodes); }
- public boolean updateTopology(boolean isNewSchema) throws Exception { - boolean result = false; - - initCluster(); - if (schemaExists()) { - log.info("Applying topology updates..."); - result = this.updateReplicationFactor(isNewSchema, nodes.size()); - this.updateGCGrace(isNewSchema, nodes.size()); - } else { - log.info("Topology updates cannot be applied because the schema is not installed."); + /** + * Updates cluster topology settings: + * 1) replication factor + * 2) gc grace period + * + * @return true if update successful, false otherwise. + */ + public void updateTopology() { + try { + initClusterSession(); + if (schemaExists()) { + log.info("Applying topology updates..."); + updateReplicationFactor(); + updateGCGrace(); + } else { + log.info("Topology updates cannot be applied because the schema is not installed."); + } + } finally { + shutdownClusterConnection(); } - shutdown(); - - return result; }
- private boolean updateReplicationFactor(boolean isNewSchema, int numberOfNodes) throws Exception { + /** + * Update replication factor based on the current set of storage nodes. + * + * @return true if successful, false otherwise. + */ + private void updateReplicationFactor() { log.info("Starting to execute " + Task.UpdateReplicationFactor + " task.");
- int replicationFactor = 1; - - if (numberOfNodes == 2) { - replicationFactor = 2; - } else if (numberOfNodes == 3) { - replicationFactor = 2; - } else if (numberOfNodes > 3) { - replicationFactor = 3; - } - - if (getReplicationFactor() == replicationFactor) { - return false; - } - - log.info("Applying file " + Task.UpdateReplicationFactor.getFile(isNewSchema) + " for " + - Task.UpdateReplicationFactor + " task."); - for (String query : this.getSteps(Task.UpdateReplicationFactor.getFile(isNewSchema))) { - executedPreparedStatement(query, replicationFactor); + int newReplicationFactor = calculateNewReplicationFactor(); + int existingReplicationFactor = queryReplicationFactor(); + if (existingReplicationFactor == newReplicationFactor) { + log.info("No need to update replication factor. Replication factor already " + newReplicationFactor); + } else { + execute(new UpdateFile(Task.UpdateReplicationFactor.getFile()), "replication_factor", newReplicationFactor + + ""); + log.info("Updated replication factor from " + existingReplicationFactor + " to " + newReplicationFactor); } - log.info("File " + Task.UpdateReplicationFactor.getFile(isNewSchema) + " applied for " + - Task.UpdateReplicationFactor + " task.");
log.info("Successfully executed " + Task.UpdateReplicationFactor + " task."); - return true; }
- private boolean updateGCGrace(boolean isNewSchema, int numberOfNodes) throws Exception { + /** + * Update gc grace interval based on the current set of storage nodes. + */ + private void updateGCGrace() { log.info("Starting to execute " + Task.UpdateGCGrace + " task.");
int gcGraceSeconds = 864000; - if (numberOfNodes == 1) { + if (getClusterSize() == 1) { gcGraceSeconds = 0; } else { gcGraceSeconds = 691200; // 8 days }
- - log.info("Applying file " + Task.UpdateGCGrace.getFile(isNewSchema) + " for " + Task.UpdateGCGrace + " task."); - for (String query : this.getSteps(Task.UpdateGCGrace.getFile(isNewSchema))) { - executedPreparedStatement(query, gcGraceSeconds); - } - log.info("File " + Task.UpdateGCGrace.getFile(isNewSchema) + " applied for " + Task.UpdateGCGrace + " task."); + execute(new UpdateFile(Task.UpdateGCGrace.getFile()), "gc_grace_seconds", gcGraceSeconds + ""); + log.info("Updated gc_grace_seconds to " + gcGraceSeconds);
log.info("Successfully executed " + Task.UpdateGCGrace + " task."); - return true; } - - private void executedPreparedStatement(String query, Object... values) { - String formattedQuery = String.format(query, values); - log.info("Statement: \n" + formattedQuery); - PreparedStatement preparedStatement = session.prepare(formattedQuery); - BoundStatement boundStatement = preparedStatement.bind(); - session.execute(boundStatement); - } - } diff --git a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/UpdateFile.java b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/UpdateFile.java new file mode 100644 index 0000000..a5cf33c --- /dev/null +++ b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/UpdateFile.java @@ -0,0 +1,235 @@ +/* + * + * * RHQ Management Platform + * * Copyright (C) 2005-2012 Red Hat, Inc. + * * All rights reserved. + * * + * * This program is free software; you can redistribute it and/or modify + * * it under the terms of the GNU General Public License, version 2, as + * * published by the Free Software Foundation, and/or the GNU Lesser + * * General Public License, version 2.1, also as published by the Free + * * Software Foundation. + * * + * * This program is distributed in the hope that it will be useful, + * * but WITHOUT ANY WARRANTY; without even the implied warranty of + * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * * GNU General Public License and the GNU Lesser General Public License + * * for more details. + * * + * * You should have received a copy of the GNU General Public License + * * and the GNU Lesser General Public License along with this program; + * * if not, write to the Free Software Foundation, Inc., + * * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +package org.rhq.cassandra.schema; + +import java.io.InputStream; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Properties; +import java.util.Set; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import javax.xml.parsers.DocumentBuilder; +import javax.xml.parsers.DocumentBuilderFactory; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.w3c.dom.Document; +import org.w3c.dom.Node; +import org.w3c.dom.NodeList; + +/** + * @author Stefan Negrea + */ +class UpdateFile implements Comparable<UpdateFile> { + + private static final String UPDATE_PLAN_ELEMENT = "updatePlan"; + private static final String STEP_ELEMENT = "step"; + + private final Log log = LogFactory.getLog(UpdateFile.class); + + private final String file; + + public UpdateFile(String file) { + this.file = file; + } + + public String getFile() { + return this.file; + } + + /** + * Retrieve a named step from the list of steps ready to be executed + * on Cassandra. + * + * @param name step name + * @return step + * @throws Exception + */ + public String getNamedStep(String name) { + return getNamedStep(name, null); + } + + /** + * Retrieve a named step from the list of steps ready to be executed + * on Cassandra. The step will go through variable binding process with the + * provided properties. + * + * @param name step name + * @param properties properties to bind + * @return step + */ + public String getNamedStep(String name, Properties properties) { + List<Node> stepNodes = getStepNodes(); + for (Node stepNode : stepNodes) { + Node nameNode = stepNode.getAttributes().getNamedItem("name"); + if (nameNode != null && nameNode.getNodeValue().equals(name)) { + return bind(stepNode.getTextContent(), properties); + } + } + + return null; + } + + /** + * Retrieve all the steps in the file in declaration order. The steps are ready to + * be executed. + * + * @return list of steps + */ + public List<String> getOrderedSteps() { + return getOrderedSteps(null); + } + + /** + * Retrieve all the steps in the file in declaration order. The steps are ready to + * be executed. Each step will go through variable binding process with the + * provided properties. + * + * @param properties properties to bind. + * @return + */ + public List<String> getOrderedSteps(Properties properties) { + List<String> boundSteps = new ArrayList<String>(); + List<Node> stepNodes = getStepNodes(); + + for (Node stepNode : stepNodes) { + boundSteps.add(bind(stepNode.getTextContent(), properties)); + } + + return boundSteps; + } + + /** + * Retrieve unbound list of steps from the file in declaration order. + * + * @return unbound list of steps. + */ + private List<Node> getStepNodes() { + InputStream stream = null; + try { + stream = SchemaManager.class.getClassLoader().getResourceAsStream(file); + + DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance(); + DocumentBuilder builder = factory.newDocumentBuilder(); + Document doc = builder.parse(stream); + + NodeList updateElements = doc.getElementsByTagName(UPDATE_PLAN_ELEMENT); + if (updateElements == null || updateElements.getLength() != 1) { + throw new RuntimeException("No <updatePlan> elements found"); + } + + Node rootDocument = updateElements.item(0); + NodeList updateStepElements = rootDocument.getChildNodes(); + + List<Node> stepList = new ArrayList<Node>(); + for (int index = 0; index < updateStepElements.getLength(); index++) { + Node updateStepElement = updateStepElements.item(index); + if (STEP_ELEMENT.equals(updateStepElement.getNodeName()) && updateStepElement.getTextContent() != null) { + stepList.add(updateStepElements.item(index)); + } + } + + return stepList; + } catch (Exception e) { + log.error("Error reading the list of steps from " + file + " file.", e); + throw new RuntimeException(e); + } finally { + if (stream != null) { + try { + stream.close(); + } catch (Exception e) { + log.error("Error closing the stream with the list of steps from " + file + " file.", e); + throw new RuntimeException(e); + } + } + } + } + + /** + * Bind the set of provided properties to the input step. The text should have + * all the variable to be bound in %variable_name% form. + * + * This method should be called even if no properties are provided because it will + * throw a runtime exception if the text contains properties that are expected to be + * bound but the list of variable is either empty or does not contain + * them. + * + * @param unboundText unbound text + * @param properties properties to bind + * @return properties bound text + */ + private String bind(String unboundText, Properties properties) { + Set<String> foundProperties = new HashSet<String>(); + Pattern regex = Pattern.compile("\%([^%]*)\%"); + Matcher matchPattern = regex.matcher(unboundText); + while (matchPattern.find()) { + String matchedString = matchPattern.group(); + String property = matchedString.substring(1, matchedString.length() - 1); + foundProperties.add(property); + } + + String boundText = unboundText; + + if( foundProperties.size() !=0 && properties == null){ + throw new RuntimeException("No properties provided but " + foundProperties.size() + + " required for binding."); + } else if (foundProperties.size() != 0) { + for (String foundProperty : foundProperties) { + String propertyValue = properties.getProperty(foundProperty); + if (propertyValue == null) { + throw new RuntimeException("Cannot bind query. Property [" + foundProperty + "] not found."); + } + + boundText = boundText.replaceAll("\%" + foundProperty + "\%", propertyValue); + } + } + + return boundText; + } + + /** + * Extract the version from the file name. + * + * @return version + */ + public int extractVersion() { + String filename = this.getFile(); + filename = filename.substring(filename.lastIndexOf('/') + 1); + filename = filename.substring(0, filename.indexOf('.')); + return Integer.parseInt(filename); + } + + /* (non-Javadoc) + * @see java.lang.Comparable#compareTo(java.lang.Object) + */ + @Override + public int compareTo(UpdateFile o) { + return this.getFile().compareTo(o.getFile()); + } +} diff --git a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/UpdateFolder.java b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/UpdateFolder.java new file mode 100644 index 0000000..152a757 --- /dev/null +++ b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/UpdateFolder.java @@ -0,0 +1,141 @@ +/* + * + * * RHQ Management Platform + * * Copyright (C) 2005-2012 Red Hat, Inc. + * * All rights reserved. + * * + * * This program is free software; you can redistribute it and/or modify + * * it under the terms of the GNU General Public License, version 2, as + * * published by the Free Software Foundation, and/or the GNU Lesser + * * General Public License, version 2.1, also as published by the Free + * * Software Foundation. + * * + * * This program is distributed in the hope that it will be useful, + * * but WITHOUT ANY WARRANTY; without even the implied warranty of + * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * * GNU General Public License and the GNU Lesser General Public License + * * for more details. + * * + * * You should have received a copy of the GNU General Public License + * * and the GNU Lesser General Public License along with this program; + * * if not, write to the Free Software Foundation, Inc., + * * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +package org.rhq.cassandra.schema; + +import java.io.BufferedReader; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.net.JarURLConnection; +import java.net.URL; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.Enumeration; +import java.util.List; +import java.util.jar.JarEntry; +import java.util.jar.JarFile; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +/** + * @author Stefan Negrea + */ +class UpdateFolder { + + private final Log log = LogFactory.getLog(UpdateFolder.class); + + private final String folder; + private final List<UpdateFile> updateFiles; + + public UpdateFolder(String folder) throws Exception { + this.folder = folder; + this.updateFiles = this.loadUpdateFiles(); + } + + /** + * Removes all the update files up to and including the provided version. + * + * @param currentVersion current version + */ + public void removeAppliedUpdates(int currentVersion) { + List<UpdateFile> updateFiles = this.getUpdateFiles(); + while (!updateFiles.isEmpty()) { + int version = updateFiles.get(0).extractVersion(); + if (version <= currentVersion) { + updateFiles.remove(0); + } else { + break; + } + } + } + + /** + * Return the list of available update files. + * + * @return list of update files + */ + public List<UpdateFile> getUpdateFiles() { + return this.updateFiles; + } + + /** + * Loads the initial set of update files based on the input folder. + * + * @return list of update files + * @throws Exception + */ + private List<UpdateFile> loadUpdateFiles() throws Exception { + List<UpdateFile> files = new ArrayList<UpdateFile>(); + InputStream stream = null; + + try { + URL resourceFolderURL = this.getClass().getClassLoader().getResource(folder); + + if (resourceFolderURL.getProtocol().equals("file")) { + stream = this.getClass().getClassLoader().getResourceAsStream(folder); + BufferedReader reader = new BufferedReader(new InputStreamReader(stream)); + + String updateFile; + while ((updateFile = reader.readLine()) != null) { + files.add(new UpdateFile(folder + updateFile)); + } + } else if (resourceFolderURL.getProtocol().equals("jar")) { + URL jarURL = this.getClass().getClassLoader().getResources(folder).nextElement(); + JarURLConnection jarURLCon = (JarURLConnection) (jarURL.openConnection()); + JarFile jarFile = jarURLCon.getJarFile(); + Enumeration<JarEntry> entries = jarFile.entries(); + while (entries.hasMoreElements()) { + String entry = entries.nextElement().getName(); + if (entry.startsWith(folder) && !entry.equals(folder) && !entry.equals(folder + "/")) { + files.add(new UpdateFile(entry)); + } + } + } + + Collections.sort(files, new Comparator<UpdateFile>() { + @Override + public int compare(UpdateFile o1, UpdateFile o2) { + return o1.compareTo(o2); + } + }); + } catch (Exception e) { + log.error("Error reading the list of update files.", e); + throw e; + } finally { + if (stream != null) { + try{ + stream.close(); + } catch (Exception e) { + log.error("Error closing the stream with the list of update files.", e); + throw e; + } + } + } + + return files; + } +} diff --git a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/VersionManager.java b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/VersionManager.java index e2daa2f..794e991 100644 --- a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/VersionManager.java +++ b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/VersionManager.java @@ -25,14 +25,11 @@
package org.rhq.cassandra.schema;
-import java.util.Date; import java.util.List; +import java.util.Properties; import java.util.UUID;
-import com.datastax.driver.core.BoundStatement; -import com.datastax.driver.core.PreparedStatement; import com.datastax.driver.core.exceptions.AuthenticationException; -import com.datastax.driver.core.exceptions.NoHostAvailableException;
import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -42,10 +39,9 @@ import org.rhq.core.domain.cloud.StorageNode; /** * @author Stefan Negrea */ -public class VersionManager extends AbstractManager { +class VersionManager extends AbstractManager {
private static final String SCHEMA_BASE_FOLDER = "schema"; - private static final String INSERT_VERSION_QUERY = "INSERT INTO rhq.schema_version (version, time ) VALUES ( ?, ?);";
private final Log log = LogFactory.getLog(VersionManager.class);
@@ -65,125 +61,170 @@ public class VersionManager extends AbstractManager { } }
- public VersionManager(String username, String password, List<StorageNode> nodes) { + public VersionManager(String username, String password, List<StorageNode> nodes) throws Exception { super(username, password, nodes); }
+ /** + * Install and update the RHQ schema: + * 1) If the schema does not exist then attempt to create it and then run the updates in order. + * 2) If the schema exists then run the updates in order. + * + * @throws Exception + */ public void install() throws Exception { log.info("Preparing to install schema"); + + boolean clusterSessionInitialized = false; try { - initCluster(); + initClusterSession(); + clusterSessionInitialized = true; } catch (AuthenticationException e) { - // If we cannot connect with the rhqadmin user, then assume it has not been - // created; so, we need to perform the "bootstrap" step of creating the user - // before we apply any schema changes. We want to create the user first so that - // we can go ahead and remove the default cassandra user and apply all changes - // using the rhqadmin user. - bootstrap(); + log.debug("Authentication exception. Will now attempt to create the schema."); + log.debug(e); + } finally { + shutdownClusterConnection(); }
+ if (!clusterSessionInitialized) { + create(); + } + + update(); + } + + /** + * Create RHQ schema and make related updates to the Cassandra installation. + * + * @throws Exception + */ + private void create() throws Exception { + UpdateFolder updateFolder = new UpdateFolder(Task.Create.getFolder()); + + Properties properties = new Properties(System.getProperties()); + properties.put("replication_factor", calculateNewReplicationFactor() + ""); + properties.put("cassandra_user_password", UUID.randomUUID() + ""); + properties.put("rhq_admin_username", getUsername()); + properties.put("rhq_admin_password", getPassword()); + + /** + * NOTE: Before applying any schema, we need to create the rhqadmin user. If we have more + * than a single node cluster then we also need to set the RF of the system_auth + * keyspace BEFORE we create the rhqadmin user. If we do not do in this order we will + * get inconsistent reads which will can result in failed authentication. + */ + //1. Execute the creation of RHQ schema, version table, admin user. try { - initCluster(); + initClusterSession(DEFAULT_CASSANDRA_USER, DEFAULT_CASSANDRA_PASSWORD); if (!schemaExists()) { - session.execute("ALTER USER cassandra NOSUPERUSER"); - session.execute("ALTER USER cassandra WITH PASSWORD '" + UUID.randomUUID() + "'"); - this.executeTask(Task.Create); - } else { + execute(updateFolder.getUpdateFiles().get(0), properties); + } else { log.info("RHQ schema already exists."); } - this.executeTask(Task.Update); + } catch (Exception ex) { + log.error(ex); + throw new RuntimeException(ex); + } finally { + shutdownClusterConnection(); + } + + //2. Change Cassandra default user privileges and password. + try { + initClusterSession(); + execute(updateFolder.getUpdateFiles().get(1), properties); } finally { - shutdown(); + shutdownClusterConnection(); } }
/** - * Before applying any schema, we need to create the rhqadmin user. If we have more - * than a single node cluster then we also need to set the RF of the system_auth - * keyspace BEFORE we create the rhqadmin user. If we do not do in this order we will - * get inconsistent reads which will can result in failed authentication. + * Update existing schema to the most current version in the update folder. + * + * @throws Exception */ - public void bootstrap() { + private void update() throws Exception { try { - initCluster("cassandra", "cassandra"); + initClusterSession();
- int replicationFactor; - if (nodes.size() < 3) { - replicationFactor = nodes.size(); - } else if (nodes.size() < 4) { - replicationFactor = 2; - } else { - replicationFactor = 3; + if (!schemaExists()) { + log.error("Schema not installed."); + throw new RuntimeException("Schema not installed propertly, cannot apply schema updates."); } - log.info("Updating replication_factor of system_auth keyspace to " + replicationFactor); - session.execute("ALTER KEYSPACE system_auth WITH replication = {'class': 'SimpleStrategy', " + - "'replication_factor': " + replicationFactor + "}");
- log.info("Creating rhqadmin user"); - session.execute("CREATE USER rhqadmin WITH PASSWORD 'rhqadmin' SUPERUSER"); + UpdateFolder updateFolder = new UpdateFolder(Task.Update.getFolder()); + + int currentSchemaVersion = getSchemaVersion(); + log.info("Current schema version is " + currentSchemaVersion); + updateFolder.removeAppliedUpdates(currentSchemaVersion); + + if (updateFolder.getUpdateFiles().size() == 0) { + log.info("RHQ schema is current! No updates applied."); + } else { + for (UpdateFile updateFile : updateFolder.getUpdateFiles()) { + execute(updateFile); + + Properties versionProperties = new Properties(); + versionProperties.put("version", updateFile.extractVersion() + ""); + versionProperties.put("time", System.currentTimeMillis() + ""); + executeManagementQuery(Query.INSERT_SCHEMA_VERSION, versionProperties); + + log.info("RHQ schema update " + updateFile +" applied."); + } + } } finally { - shutdown(); + shutdownClusterConnection(); } }
+ /** + * Drop RHQ schema and revert the database to pre-RHQ state: + * 1) Reinstate Cassandra superuser + * 2) Drop RHQ schema + * 3) Drop RHQ user + * + * @throws Exception + */ public void drop() throws Exception { log.info("Preparing to drop RHQ schema"); - try { - initCluster();
- if (schemaExists()) { - this.executeTask(Task.Drop); - } else { - log.info("RHQ schema does not exist. Drop operation not required."); - } - } catch (NoHostAvailableException e) { + UpdateFolder updateFolder = new UpdateFolder(Task.Drop.getFolder()); + Properties properties = new Properties(System.getProperties()); + properties.put("rhq_admin_username", getUsername()); + + try{ + initClusterSession(); + //1. Reinstated Cassandra superuser + execute(updateFolder.getUpdateFiles().get(0), properties); + log.info("Cassandra user reverted to default configuration."); + } catch (Exception e) { throw new RuntimeException(e); } finally { - shutdown(); + shutdownClusterConnection(); } - }
- private void executeTask(Task task) { try { - log.info("Starting to execute " + task + " task."); - - List<String> updateFiles = this.getUpdateFiles(task.getFolder()); + //Use Cassandra superuser to drop RHQ schema and user + initClusterSession(DEFAULT_CASSANDRA_USER, DEFAULT_CASSANDRA_PASSWORD);
- if (Task.Update.equals(task)) { - int currentSchemaVersion = this.getSchemaVersion(); - log.info("Current schema version is " + currentSchemaVersion); - this.removeAppliedUpdates(updateFiles, currentSchemaVersion); - } - - if (updateFiles.size() == 0 && Task.Update.equals(task)) { - log.info("RHQ schema is current! No updates applied."); + if (schemaExists()) { + //2. Drop RHQ schema + execute(updateFolder.getUpdateFiles().get(1), properties); + log.info("RHQ schema dropped."); + } else { + log.info("RHQ schema does not exist. Drop operation not required."); }
- for (String updateFile : updateFiles) { - log.info("Applying file " + updateFile + " for " + task + " task."); - for (String step : getSteps(updateFile)) { - log.info("Statement: \n" + step); - session.execute(step); - } - - if (Task.Update.equals(task)) { - this.updateSchemaVersion(updateFile); - } - - log.info("File " + updateFile + " applied for " + task + " task."); + if (userExists()) { + //3. Drop RHQ user + execute(updateFolder.getUpdateFiles().get(2), properties); + log.info("RHQ admin user dropped."); + } else { + log.info("RHQ admin user does not exist. Drop operation not required."); } } catch (Exception e) { - log.error(e); throw new RuntimeException(e); + } finally { + shutdownClusterConnection(); } - - log.info("Successfully executed " + task + " task."); - } - - private void updateSchemaVersion(String updateFileName) { - PreparedStatement preparedStatement = session.prepare(INSERT_VERSION_QUERY); - BoundStatement boundStatement = preparedStatement.bind(this.extractVersionFromUpdateFile(updateFileName), - new Date()); - session.execute(boundStatement); } } diff --git a/modules/common/cassandra-schema/src/main/resources/management/0001.xml b/modules/common/cassandra-schema/src/main/resources/management/0001.xml new file mode 100644 index 0000000..9fa4a16 --- /dev/null +++ b/modules/common/cassandra-schema/src/main/resources/management/0001.xml @@ -0,0 +1,25 @@ +<updatePlan> + <step name="user_exists"> + SELECT * FROM system_auth.users WHERE name = '%username%' + </step> + + <step name="schema_exists"> + SELECT * FROM system.schema_keyspaces WHERE keyspace_name = 'rhq' + </step> + + <step name="version_columnfamily_exists"> + SELECT * from system.schema_columnfamilies WHERE keyspace_name='rhq' AND columnfamily_name='schema_version' + </step> + + <step name="version"> + SELECT version FROM rhq.schema_version + </step> + + <step name="replication_factor"> + SELECT strategy_options FROM system.schema_keyspaces where keyspace_name='rhq' + </step> + + <step name="insert_schema_version"> + INSERT INTO rhq.schema_version (version, time ) VALUES ( %version%, %time%) + </step> +</updatePlan> \ No newline at end of file diff --git a/modules/common/cassandra-schema/src/main/resources/schema/create/0001.xml b/modules/common/cassandra-schema/src/main/resources/schema/create/0001.xml index 7a8d901..3f2db38 100644 --- a/modules/common/cassandra-schema/src/main/resources/schema/create/0001.xml +++ b/modules/common/cassandra-schema/src/main/resources/schema/create/0001.xml @@ -1,15 +1,22 @@ <updatePlan> <step> - CREATE KEYSPACE rhq WITH - replication = {'class': 'SimpleStrategy', 'replication_factor': 1}; + ALTER KEYSPACE system_auth WITH replication = {'class': 'SimpleStrategy', 'replication_factor': %replication_factor% } </step>
<step> - GRANT ALL PERMISSIONS ON KEYSPACE system_auth to rhqadmin; + CREATE USER %rhq_admin_username% WITH PASSWORD '%rhq_admin_password%' SUPERUSER </step>
<step> - GRANT ALL PERMISSIONS ON KEYSPACE rhq to rhqadmin; + CREATE KEYSPACE rhq WITH replication = {'class': 'SimpleStrategy', 'replication_factor': %replication_factor% } + </step> + + <step> + GRANT ALL PERMISSIONS ON KEYSPACE system_auth to %rhq_admin_username% + </step> + + <step> + GRANT ALL PERMISSIONS ON KEYSPACE rhq to %rhq_admin_username% </step>
<step> @@ -17,6 +24,6 @@ version int, time timestamp, PRIMARY KEY (version, time) - ) WITH COMPACT STORAGE; + ) WITH COMPACT STORAGE </step> </updatePlan> \ No newline at end of file diff --git a/modules/common/cassandra-schema/src/main/resources/schema/create/0002.xml b/modules/common/cassandra-schema/src/main/resources/schema/create/0002.xml new file mode 100644 index 0000000..417c9fa --- /dev/null +++ b/modules/common/cassandra-schema/src/main/resources/schema/create/0002.xml @@ -0,0 +1,9 @@ +<updatePlan> + <step> + ALTER USER cassandra NOSUPERUSER + </step> + + <step> + ALTER USER cassandra WITH PASSWORD '%cassandra_user_password%' + </step> +</updatePlan> \ No newline at end of file diff --git a/modules/common/cassandra-schema/src/main/resources/schema/drop/0001.xml b/modules/common/cassandra-schema/src/main/resources/schema/drop/0001.xml index b826965..425fdb8 100644 --- a/modules/common/cassandra-schema/src/main/resources/schema/drop/0001.xml +++ b/modules/common/cassandra-schema/src/main/resources/schema/drop/0001.xml @@ -1,10 +1,9 @@ <updatePlan> <step> - DROP KEYSPACE rhq; + ALTER USER cassandra SUPERUSER </step>
<step> - DROP USER rhqadmin; + ALTER USER cassandra WITH PASSWORD 'cassandra' </step> - </updatePlan> \ No newline at end of file diff --git a/modules/common/cassandra-schema/src/main/resources/schema/drop/0002.xml b/modules/common/cassandra-schema/src/main/resources/schema/drop/0002.xml new file mode 100644 index 0000000..fa7913a --- /dev/null +++ b/modules/common/cassandra-schema/src/main/resources/schema/drop/0002.xml @@ -0,0 +1,5 @@ +<updatePlan> + <step> + DROP KEYSPACE rhq + </step> +</updatePlan> \ No newline at end of file diff --git a/modules/common/cassandra-schema/src/main/resources/schema/drop/0003.xml b/modules/common/cassandra-schema/src/main/resources/schema/drop/0003.xml new file mode 100644 index 0000000..1147fce --- /dev/null +++ b/modules/common/cassandra-schema/src/main/resources/schema/drop/0003.xml @@ -0,0 +1,5 @@ +<updatePlan> + <step> + DROP USER %rhq_admin_username% + </step> +</updatePlan> \ No newline at end of file diff --git a/modules/common/cassandra-schema/src/main/resources/topology/0001.xml b/modules/common/cassandra-schema/src/main/resources/topology/0001.xml new file mode 100644 index 0000000..d65fc11 --- /dev/null +++ b/modules/common/cassandra-schema/src/main/resources/topology/0001.xml @@ -0,0 +1,8 @@ +<updatePlan> + <step> + ALTER KEYSPACE rhq WITH replication = {'class': 'SimpleStrategy', 'replication_factor': %replication_factor%} + </step> + <step> + ALTER KEYSPACE system_auth WITH replication = {'class': 'SimpleStrategy', 'replication_factor': %replication_factor%} + </step> +</updatePlan> \ No newline at end of file diff --git a/modules/common/cassandra-schema/src/main/resources/topology/0002.xml b/modules/common/cassandra-schema/src/main/resources/topology/0002.xml new file mode 100644 index 0000000..24f2c0e --- /dev/null +++ b/modules/common/cassandra-schema/src/main/resources/topology/0002.xml @@ -0,0 +1,25 @@ +<updatePlan> + <step> + ALTER COLUMNFAMILY rhq.metrics_index WITH gc_grace_seconds = %gc_grace_seconds% + </step> + + <step> + ALTER COLUMNFAMILY rhq.raw_metrics WITH gc_grace_seconds = %gc_grace_seconds% + </step> + + <step> + ALTER COLUMNFAMILY rhq.one_hour_metrics WITH gc_grace_seconds = %gc_grace_seconds% + </step> + + <step> + ALTER COLUMNFAMILY rhq.six_hour_metrics WITH gc_grace_seconds = %gc_grace_seconds% + </step> + + <step> + ALTER COLUMNFAMILY rhq.twenty_four_hour_metrics WITH gc_grace_seconds = %gc_grace_seconds% + </step> + + <step> + ALTER COLUMNFAMILY rhq.schema_version WITH gc_grace_seconds = %gc_grace_seconds% + </step> +</updatePlan> \ No newline at end of file diff --git a/modules/common/cassandra-schema/src/main/resources/topology/create/0001.xml b/modules/common/cassandra-schema/src/main/resources/topology/create/0001.xml deleted file mode 100644 index 5cbd7eb..0000000 --- a/modules/common/cassandra-schema/src/main/resources/topology/create/0001.xml +++ /dev/null @@ -1,5 +0,0 @@ -<updatePlan> - <step> - ALTER KEYSPACE rhq WITH replication = {'class': 'SimpleStrategy', 'replication_factor': %s}; - </step> -</updatePlan> \ No newline at end of file diff --git a/modules/common/cassandra-schema/src/main/resources/topology/create/0002.xml b/modules/common/cassandra-schema/src/main/resources/topology/create/0002.xml deleted file mode 100644 index d631030..0000000 --- a/modules/common/cassandra-schema/src/main/resources/topology/create/0002.xml +++ /dev/null @@ -1,26 +0,0 @@ -<updatePlan> - <step> - ALTER COLUMNFAMILY rhq.metrics_index WITH gc_grace_seconds = %s; - </step> - - <step> - ALTER COLUMNFAMILY rhq.raw_metrics WITH gc_grace_seconds = %s; - </step> - - <step> - ALTER COLUMNFAMILY rhq.one_hour_metrics WITH gc_grace_seconds = %s; - </step> - - <step> - ALTER COLUMNFAMILY rhq.six_hour_metrics WITH gc_grace_seconds = %s; - </step> - - <step> - ALTER COLUMNFAMILY rhq.twenty_four_hour_metrics WITH gc_grace_seconds = %s; - </step> - - <step> - ALTER COLUMNFAMILY rhq.schema_version WITH gc_grace_seconds = %s; - </step> - -</updatePlan> \ No newline at end of file diff --git a/modules/common/cassandra-schema/src/main/resources/topology/update/0001.xml b/modules/common/cassandra-schema/src/main/resources/topology/update/0001.xml deleted file mode 100644 index f2c0e57..0000000 --- a/modules/common/cassandra-schema/src/main/resources/topology/update/0001.xml +++ /dev/null @@ -1,9 +0,0 @@ -<updatePlan> - <step> - ALTER KEYSPACE system_auth WITH replication = {'class': 'SimpleStrategy', 'replication_factor': %s}; - </step> - - <step> - ALTER KEYSPACE rhq WITH replication = {'class': 'SimpleStrategy', 'replication_factor': %s}; - </step> -</updatePlan> \ No newline at end of file diff --git a/modules/common/cassandra-schema/src/main/resources/topology/update/0002.xml b/modules/common/cassandra-schema/src/main/resources/topology/update/0002.xml deleted file mode 100644 index d631030..0000000 --- a/modules/common/cassandra-schema/src/main/resources/topology/update/0002.xml +++ /dev/null @@ -1,26 +0,0 @@ -<updatePlan> - <step> - ALTER COLUMNFAMILY rhq.metrics_index WITH gc_grace_seconds = %s; - </step> - - <step> - ALTER COLUMNFAMILY rhq.raw_metrics WITH gc_grace_seconds = %s; - </step> - - <step> - ALTER COLUMNFAMILY rhq.one_hour_metrics WITH gc_grace_seconds = %s; - </step> - - <step> - ALTER COLUMNFAMILY rhq.six_hour_metrics WITH gc_grace_seconds = %s; - </step> - - <step> - ALTER COLUMNFAMILY rhq.twenty_four_hour_metrics WITH gc_grace_seconds = %s; - </step> - - <step> - ALTER COLUMNFAMILY rhq.schema_version WITH gc_grace_seconds = %s; - </step> - -</updatePlan> \ No newline at end of file diff --git a/modules/common/cassandra-schema/src/test/java/org/rhq/cassandra/schema/UpdateFileTest.java b/modules/common/cassandra-schema/src/test/java/org/rhq/cassandra/schema/UpdateFileTest.java new file mode 100644 index 0000000..6034671 --- /dev/null +++ b/modules/common/cassandra-schema/src/test/java/org/rhq/cassandra/schema/UpdateFileTest.java @@ -0,0 +1,170 @@ +package org.rhq.cassandra.schema; + +import java.util.List; +import java.util.Properties; +import java.util.Random; + +import org.testng.Assert; +import org.testng.annotations.Test; + +@Test +public class UpdateFileTest { + + public void noBindingOrdered() { + UpdateFile updateFile = new UpdateFile("no_binding.xml"); + List<String> orderedSteps = updateFile.getOrderedSteps(); + Assert.assertEquals(orderedSteps.size(), 4); + + for (int index = 0; index < orderedSteps.size(); index++) { + Assert.assertEquals(Integer.parseInt(orderedSteps.get(index)), index); + } + } + + public void noBindingNamedSteps() { + UpdateFile updateFile = new UpdateFile("no_binding_named_steps.xml"); + List<String> orderedSteps = updateFile.getOrderedSteps(); + Assert.assertEquals(orderedSteps.size(), 4); + + for (int index = 0; index < orderedSteps.size(); index++) { + String step = updateFile.getNamedStep(index + ""); + Assert.assertEquals(Integer.parseInt(step), index); + } + } + + public void noBindingOrderedExtraTags() { + UpdateFile updateFile = new UpdateFile("no_binding.xml"); + List<String> orderedSteps = updateFile.getOrderedSteps(); + Assert.assertEquals(orderedSteps.size(), 4); + } + + public void noBindingOrderedWithUnrelatedProperties() throws Exception { + Properties testProperties = new Properties(); + testProperties.put("random_property_2", "12345"); + testProperties.put("random_property_1", "67890"); + + UpdateFile updateFile = new UpdateFile("no_binding.xml"); + List<String> orderedSteps = updateFile.getOrderedSteps(); + Assert.assertEquals(orderedSteps.size(), 4); + + for (int index = 0; index < orderedSteps.size(); index++) { + Assert.assertEquals(Integer.parseInt(orderedSteps.get(index)), index); + } + } + + @Test(expectedExceptions = RuntimeException.class) + public void bindingErrorNoProperties() throws Exception { + UpdateFile updateFile = new UpdateFile("required_binding.xml"); + updateFile.getOrderedSteps(); + } + + @Test(expectedExceptions = RuntimeException.class) + public void bindingErrorPartialProperties() throws Exception { + Properties testProperties = new Properties(); + testProperties.put("first_property", "0"); + testProperties.put("second_property", "1"); + + UpdateFile updateFile = new UpdateFile("required_binding.xml"); + updateFile.getOrderedSteps(testProperties); + } + + @Test(expectedExceptions = RuntimeException.class) + public void badFileNoUpdatePlan() { + UpdateFile updateFile = new UpdateFile("bad_file_1.xml"); + updateFile.getOrderedSteps(); + } + + public void noUpdateSteps() { + UpdateFile updateFile = new UpdateFile("bad_file_2.xml"); + updateFile.getOrderedSteps(); + } + + @Test(expectedExceptions = RuntimeException.class) + public void badFileBadXML() { + UpdateFile updateFile = new UpdateFile("bad_file_3.xml"); + updateFile.getOrderedSteps(); + } + + public void binding() { + Random random = new Random(); + double randomNumber = random.nextDouble() * random.nextInt(); + + Properties testProperties = new Properties(); + testProperties.put("first_property", "0"); + testProperties.put("second_property", "1"); + testProperties.put("third_property", "2"); + testProperties.put("fourth_property", "3"); + testProperties.put("fifth_property", randomNumber + ""); + + UpdateFile updateFile = new UpdateFile("required_binding.xml"); + List<String> orderedSteps = updateFile.getOrderedSteps(testProperties); + Assert.assertEquals(orderedSteps.size(), 4); + + for (int index = 0; index < orderedSteps.size(); index++) { + if (index % 2 == 0) { + Assert.assertEquals(orderedSteps.get(index), index + "" + randomNumber); + } else { + Assert.assertEquals(orderedSteps.get(index), index + " testString " + randomNumber + " testString " + + randomNumber); + } + } + } + + public void bindingNamedSteps() { + Random random = new Random(); + double randomNumber = random.nextDouble() * random.nextInt(); + + Properties testProperties = new Properties(); + testProperties.put("first_property", "0"); + testProperties.put("second_property", "1"); + testProperties.put("third_property", "2"); + testProperties.put("fourth_property", "3"); + testProperties.put("fifth_property", randomNumber + ""); + + UpdateFile updateFile = new UpdateFile("required_binding_named_steps.xml"); + List<String> orderedSteps = updateFile.getOrderedSteps(testProperties); + Assert.assertEquals(orderedSteps.size(), 4); + + for (int index = 0; index < orderedSteps.size(); index++) { + String step = updateFile.getNamedStep(index + "", testProperties); + if (index % 2 == 0) { + Assert.assertEquals(step, index + "" + randomNumber); + } else { + Assert.assertEquals(step, index + " testString " + randomNumber + " testString " + randomNumber); + } + } + } + + public void bindingNamedStepPartialProperties() { + Random random = new Random(); + double randomNumber = random.nextDouble() * random.nextInt(); + + Properties testProperties = new Properties(); + testProperties.put("second_property", "1"); + testProperties.put("fifth_property", randomNumber + ""); + + UpdateFile updateFile = new UpdateFile("required_binding_named_steps.xml"); + String step = updateFile.getNamedStep("1", testProperties); + Assert.assertEquals(step, 1 + " testString " + randomNumber + " testString " + randomNumber); + } + + @Test(expectedExceptions = RuntimeException.class) + public void bindingNamedStepWrongPartialProperties() { + Random random = new Random(); + double randomNumber = random.nextDouble() * random.nextInt(); + + Properties testProperties = new Properties(); + testProperties.put("first_property", "0"); + //second_property is actually needed and not first_property + testProperties.put("fifth_property", randomNumber + ""); + + UpdateFile updateFile = new UpdateFile("required_binding_named_steps.xml"); + String step = updateFile.getNamedStep("1", testProperties); + Assert.assertEquals(step, 1 + " testString " + randomNumber + " testString " + randomNumber); + } + + public void bindingNamedStepNotFound() { + UpdateFile updateFile = new UpdateFile("required_binding_named_steps.xml"); + String step = updateFile.getNamedStep("randomName"); + Assert.assertNull(step); + } +} \ No newline at end of file diff --git a/modules/common/cassandra-schema/src/test/resources/bad_file_1.xml b/modules/common/cassandra-schema/src/test/resources/bad_file_1.xml new file mode 100644 index 0000000..691bf0d --- /dev/null +++ b/modules/common/cassandra-schema/src/test/resources/bad_file_1.xml @@ -0,0 +1,9 @@ +<updatePlana> + <step>%first_property%%fifth_property%</step> + + <step>%second_property%%fifth_property%%fifth_property%</step> + + <step>%third_property%%fifth_property%</step> + + <step>%fourth_property%%fifth_property%%fifth_property%</step> +</updatePlana> \ No newline at end of file diff --git a/modules/common/cassandra-schema/src/test/resources/bad_file_2.xml b/modules/common/cassandra-schema/src/test/resources/bad_file_2.xml new file mode 100644 index 0000000..3ef99b8 --- /dev/null +++ b/modules/common/cassandra-schema/src/test/resources/bad_file_2.xml @@ -0,0 +1,9 @@ +<updatePlan> + <steps>%first_property%%fifth_property%</steps> + + <steps>%second_property%%fifth_property%%fifth_property%</steps> + + <steps>%third_property%%fifth_property%</steps> + + <steps>%fourth_property%%fifth_property%%fifth_property%</steps> +</updatePlan> \ No newline at end of file diff --git a/modules/common/cassandra-schema/src/test/resources/bad_file_3.xml b/modules/common/cassandra-schema/src/test/resources/bad_file_3.xml new file mode 100644 index 0000000..4461d6a --- /dev/null +++ b/modules/common/cassandra-schema/src/test/resources/bad_file_3.xml @@ -0,0 +1,9 @@ +<updatePlan> + <steps>%first_property%%fifth_property%</steps> + + <steps>%second_property%%fifth_property%%fifth_property%</steps> + + <random>%third_property%%fifth_property%</steps> + + <steps>%fourth_property%%fifth_property%%fifth_property%</steps> +</updatePlan> \ No newline at end of file diff --git a/modules/common/cassandra-schema/src/test/resources/no_binding.xml b/modules/common/cassandra-schema/src/test/resources/no_binding.xml new file mode 100644 index 0000000..ad3fbb7 --- /dev/null +++ b/modules/common/cassandra-schema/src/test/resources/no_binding.xml @@ -0,0 +1,9 @@ +<updatePlan> + <step>0</step> + + <step>1</step> + + <step>2</step> + + <step>3</step> +</updatePlan> \ No newline at end of file diff --git a/modules/common/cassandra-schema/src/test/resources/no_binding_extra_tags.xml b/modules/common/cassandra-schema/src/test/resources/no_binding_extra_tags.xml new file mode 100644 index 0000000..88cb24a --- /dev/null +++ b/modules/common/cassandra-schema/src/test/resources/no_binding_extra_tags.xml @@ -0,0 +1,12 @@ +<updatePlan> + <step>0</step> + + <step>1</step> + + <step>2</step> + + <step>3</step> + + <steps>3234</steps> + <what>??</what> +</updatePlan> \ No newline at end of file diff --git a/modules/common/cassandra-schema/src/test/resources/no_binding_named_steps.xml b/modules/common/cassandra-schema/src/test/resources/no_binding_named_steps.xml new file mode 100644 index 0000000..843ad0f --- /dev/null +++ b/modules/common/cassandra-schema/src/test/resources/no_binding_named_steps.xml @@ -0,0 +1,9 @@ +<updatePlan> + <step name="0">0</step> + + <step name="1">1</step> + + <step name="2">2</step> + + <step name="3">3</step> +</updatePlan> \ No newline at end of file diff --git a/modules/common/cassandra-schema/src/test/resources/required_binding.xml b/modules/common/cassandra-schema/src/test/resources/required_binding.xml new file mode 100644 index 0000000..7332a99 --- /dev/null +++ b/modules/common/cassandra-schema/src/test/resources/required_binding.xml @@ -0,0 +1,9 @@ +<updatePlan> + <step>%first_property%%fifth_property%</step> + + <step>%second_property% testString %fifth_property% testString %fifth_property%</step> + + <step>%third_property%%fifth_property%</step> + + <step>%fourth_property% testString %fifth_property% testString %fifth_property%</step> +</updatePlan> \ No newline at end of file diff --git a/modules/common/cassandra-schema/src/test/resources/required_binding_named_steps.xml b/modules/common/cassandra-schema/src/test/resources/required_binding_named_steps.xml new file mode 100644 index 0000000..f50e7a5 --- /dev/null +++ b/modules/common/cassandra-schema/src/test/resources/required_binding_named_steps.xml @@ -0,0 +1,9 @@ +<updatePlan> + <step name="0">%first_property%%fifth_property%</step> + + <step name="1">%second_property% testString %fifth_property% testString %fifth_property%</step> + + <step name="2">%third_property%%fifth_property%</step> + + <step name="3">%fourth_property% testString %fifth_property% testString %fifth_property%</step> +</updatePlan> \ No newline at end of file diff --git a/modules/enterprise/server/installer/src/main/java/org/rhq/enterprise/server/installer/InstallerServiceImpl.java b/modules/enterprise/server/installer/src/main/java/org/rhq/enterprise/server/installer/InstallerServiceImpl.java index 160b5dd..5c8002a 100644 --- a/modules/enterprise/server/installer/src/main/java/org/rhq/enterprise/server/installer/InstallerServiceImpl.java +++ b/modules/enterprise/server/installer/src/main/java/org/rhq/enterprise/server/installer/InstallerServiceImpl.java @@ -487,7 +487,7 @@ public class InstallerServiceImpl implements InstallerService { } log("Install RHQ schema along with updates to Cassandra."); storageNodeSchemaManager.install(); - storageNodeSchemaManager.updateTopology(true); + storageNodeSchemaManager.updateTopology(); } else { log("Ignoring Cassandra schema - installer will assume it exists and is already up-to-date."); } diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index bf25daf..34e5ebd 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -968,17 +968,15 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN operationManager.scheduleGroupOperation(subjectManager.getOverlord(), schedule); }
- private boolean updateTopology(List<StorageNode> storageNodes) { + private void updateTopology(List<StorageNode> storageNodes) { String username = getRequiredStorageProperty(USERNAME_PROPERTY); String password = getRequiredStorageProperty(PASSWORD_PROPERTY); SchemaManager schemaManager = new SchemaManager(username, password, storageNodes); try{ - return schemaManager.updateTopology(false); + schemaManager.updateTopology(); } catch (Exception e) { log.error("An error occurred while applying schema topology changes", e); } - - return false; }
private String getRequiredStorageProperty(String property) { diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/scheduler/jobs/StorageNodeMaintenanceJob.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/scheduler/jobs/StorageNodeMaintenanceJob.java index 54ca4c2..aa55cb4 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/scheduler/jobs/StorageNodeMaintenanceJob.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/scheduler/jobs/StorageNodeMaintenanceJob.java @@ -135,17 +135,15 @@ public class StorageNodeMaintenanceJob extends AbstractStatefulJob { runNodeMaintenance(seedList, isReadRepairNeeded); }
- private boolean updateTopology(List<StorageNode> storageNodes) throws JobExecutionException { + private void updateTopology(List<StorageNode> storageNodes) throws JobExecutionException { String username = getRequiredStorageProperty(USERNAME_PROP); String password = getRequiredStorageProperty(PASSWORD_PROP); SchemaManager schemaManager = new SchemaManager(username, password, storageNodes); try{ - return schemaManager.updateTopology(false); + schemaManager.updateTopology(); } catch (Exception e) { log.error("An error occurred while applying schema topology changes", e); } - - return false; }
private List<StorageNode> waitForClustering(List<StorageNode> storageNodes) { diff --git a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java index bd171a4..aafa481 100644 --- a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java +++ b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java @@ -154,7 +154,7 @@ public class StorageNodeComponentITest {
SchemaManager schemaManager = new SchemaManager("rhqadmin", "rhqadmin", "127.0.0.1|7399|9142"); schemaManager.install(); - schemaManager.updateTopology(true); + schemaManager.updateTopology(); }
private ProcessExecution getProcessExecution(File binDir) {
commit 69656f42348a8ee43e83e77d1d0bc565b290ba8f Author: John Sanda jsanda@redhat.com Date: Wed Jul 31 21:59:34 2013 -0400
adding initial support for setting gossip port for new storage node
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index 1c96e27..bf25daf 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -94,6 +94,7 @@ import org.rhq.enterprise.server.resource.group.ResourceGroupManagerLocal; import org.rhq.enterprise.server.rest.reporting.MeasurementConverter; import org.rhq.enterprise.server.scheduler.SchedulerLocal; import org.rhq.enterprise.server.scheduler.jobs.StorageNodeMaintenanceJob; +import org.rhq.enterprise.server.storage.StorageConfigurationException; import org.rhq.enterprise.server.util.CriteriaQueryGenerator; import org.rhq.enterprise.server.util.CriteriaQueryRunner; import org.rhq.enterprise.server.util.LookupUtil; @@ -214,7 +215,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN schedule.setOperationName("updateKnownNodes");
Configuration parameters = new Configuration(); - parameters.put(createPropertyListOfAddresses("ipAddresses", combine(getStorageNodes(), newStorageNode))); + parameters.put(createPropertyListOfAddresses("ipAddresses", combine(getClusteredStorageNodes(), newStorageNode))); schedule.setParameters(parameters);
operationManager.scheduleGroupOperation(subjectManager.getOverlord(), schedule); @@ -451,6 +452,11 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN return query.getResultList(); }
+ private List<StorageNode> getClusteredStorageNodes() { + return entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE, StorageNode.class) + .setParameter("operationMode", OperationMode.NORMAL).getResultList(); + } + @Override @RequiredPermission(Permission.MANAGE_SETTINGS) public PageList<StorageNode> findStorageNodesByCriteria(Subject subject, StorageNodeCriteria criteria) { @@ -840,7 +846,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN log.info("Preparing to bootstrap " + storageNode + " into cluster..."); }
- List<StorageNode> existingStorageNodes = getStorageNodes(); + List<StorageNode> existingStorageNodes = getClusteredStorageNodes();
ResourceOperationSchedule schedule = new ResourceOperationSchedule(); schedule.setResource(storageNode.getResource()); @@ -850,15 +856,52 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
Configuration parameters = new Configuration(); parameters.put(new PropertySimple("cqlPort", existingStorageNodes.get(0).getCqlPort())); - // TODO need to add support for storage_port in cassandra/storage plugins - parameters.put(new PropertySimple("gossipPort", 7100)); - parameters.put(createPropertyListOfAddresses("storageNodeIPAddresses", getStorageNodes())); + parameters.put(new PropertySimple("gossipPort", getGossipPort(storageNode, existingStorageNodes))); + parameters.put(createPropertyListOfAddresses("storageNodeIPAddresses", getClusteredStorageNodes()));
schedule.setParameters(parameters);
operationManager.scheduleResourceOperation(subjectManager.getOverlord(), schedule); }
+ private Integer getGossipPort(StorageNode newStorageNode, List<StorageNode> storageNodes) { + if (log.isInfoEnabled()) { + log.info("Looking up gossip port for new storage node " + newStorageNode); + } + try { + StorageNode node = null; + Configuration resourceConfig = null; + for (StorageNode storageNode : storageNodes) { + resourceConfig = configurationManager.getLiveResourceConfiguration(subjectManager.getOverlord(), + storageNode.getResource().getId(), false); + if (resourceConfig == null) { + log.warn("Failed to load resource configuration for storage node " + newStorageNode.getResource()); + } else { + node = storageNode; + break; + } + } + if (resourceConfig == null) { + log.error("Failed to obtain gossip port from existing storage nodes"); + throw new StorageConfigurationException("Failed to obtain gossip port from existing storage nodes"); + } + + PropertySimple property = resourceConfig.getSimple("gossipPort"); + if (property == null) { + throw new StorageConfigurationException("The resource configuration for " + node.getResource() + + "did not include the required property [gossipPort]"); + } + Integer port = property.getIntegerValue(); + log.info("Found gossip port set to " + port); + return property.getIntegerValue(); + } catch (Exception e) { + if (e instanceof StorageConfigurationException) { + throw (StorageConfigurationException) e; + } + throw new RuntimeException("An error occurred while trying to obtain the gossip port", e); + } + } + @Override public void runAddNodeMaintenance() { log.info("Preparing to schedule addNodeMaintenance on the storage cluster..."); diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageConfigurationException.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageConfigurationException.java new file mode 100644 index 0000000..dc616a8 --- /dev/null +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageConfigurationException.java @@ -0,0 +1,23 @@ +package org.rhq.enterprise.server.storage; + +/** + * @author John Sanda + */ +public class StorageConfigurationException extends RuntimeException { + + public StorageConfigurationException() { + super(); + } + + public StorageConfigurationException(String message) { + super(message); + } + + public StorageConfigurationException(String message, Throwable cause) { + super(message, cause); + } + + public StorageConfigurationException(Throwable cause) { + super(cause); + } +}
commit d7bf4998586c6daba79a166f2cc2440f27d259be Author: Jay Shaughnessy jshaughn@redhat.com Date: Wed Jul 31 17:10:54 2013 -0400
Add new perms to out-of-box roles, start db-upgrade step for fine-grained bundle perm work
diff --git a/modules/core/dbutils/src/main/scripts/dbsetup/authz-data.xml b/modules/core/dbutils/src/main/scripts/dbsetup/authz-data.xml index 681bd66..ecc7d4a 100644 --- a/modules/core/dbutils/src/main/scripts/dbsetup/authz-data.xml +++ b/modules/core/dbutils/src/main/scripts/dbsetup/authz-data.xml @@ -43,13 +43,19 @@
<table name="RHQ_PERMISSION"> <!-- give super-user-role all permissions - (explicitly add resource perms, even though they are implied by MANAGE_INVENTORY) --> + (explicitly add resource perms, even though they are implied by MANAGE_INVENTORY) + (explicitly add bundle perms, even though they are implied by MANAGE_BUNDLE) --> <data ROLE_ID="1" OPERATION="0"/> <!-- Permission.MANAGE_SECURITY --> <data ROLE_ID="1" OPERATION="1"/> <!-- Permission.MANAGE_INVENTORY --> <data ROLE_ID="1" OPERATION="2"/> <!-- Permission.MANAGE_SETTINGS --> - <data ROLE_ID="1" OPERATION="12"/> <!-- Permission.MANAGE_BUNDLE --> + <data ROLE_ID="1" OPERATION="12"/> <!-- Permission.MANAGE_BUNDLE --> <data ROLE_ID="1" OPERATION="15"/> <!-- Permission.MANAGE_REPOSITORIES --> <data ROLE_ID="1" OPERATION="17"/> <!-- Permission.VIEW_USERS --> + <data ROLE_ID="1" OPERATION="18"/> <!-- Permission.MANAGE_BUNDLE_GROUPS --> + <data ROLE_ID="1" OPERATION="19"/> <!-- Permission.CREATE_BUNDLES --> + <data ROLE_ID="1" OPERATION="20"/> <!-- Permission.DELETE_BUNDLES --> + <data ROLE_ID="1" OPERATION="21"/> <!-- Permission.DEPLOY_BUNDLES --> + <data ROLE_ID="1" OPERATION="22"/> <!-- Permission.VIEW_BUNDLES --> <!-- resource permissions start here--> <data ROLE_ID="1" OPERATION="3"/> <!-- Permission.VIEW_RESOURCE --> <data ROLE_ID="1" OPERATION="4"/> <!-- Permission.MODIFY_RESOURCE --> @@ -63,12 +69,22 @@ <data ROLE_ID="1" OPERATION="13"/> <!-- Permission.CONFIGURE_READ --> <data ROLE_ID="1" OPERATION="14"/> <!-- Permission.MANAGE_EVENTS --> <data ROLE_ID="1" OPERATION="16"/> <!-- Permission.MANAGE_DRIFT --> + <!-- bundle permissions start here--> + <data ROLE_ID="1" OPERATION="23"/> <!-- Permission.ASSIGN_BUNDLES_TO_GROUP --> + <data ROLE_ID="1" OPERATION="24"/> <!-- Permission.UNASSIGN_BUNDLES_FROM_GROUP --> + <data ROLE_ID="1" OPERATION="25"/> <!-- Permission.CREATE_BUNDLES_IN_GROUP --> + <data ROLE_ID="1" OPERATION="26"/> <!-- Permission.DELETE_BUNDLES_FROM_GROUP --> + <data ROLE_ID="1" OPERATION="27"/> <!-- Permission.VIEW_BUNDLES_IN_GROUP -->
- <!-- give all-resources-role MANAGE_INVENTORY and MANAGE_BUNDLE permissions + <!-- give all-resources-role MANAGE_INVENTORY and all bundle permissions other than MANAGE_BUNDLE_GROUPS (explicitly add resource perms as well, even though they are implied by MANAGE_INVENTORY) --> <data ROLE_ID="2" OPERATION="1"/> <!-- Permission.MANAGE_INVENTORY --> <data ROLE_ID="2" OPERATION="12"/> <!-- Permission.MANAGE_BUNDLE --> <data ROLE_ID="2" OPERATION="17"/> <!-- Permission.VIEW_USERS --> + <data ROLE_ID="2" OPERATION="19"/> <!-- Permission.CREATE_BUNDLES --> + <data ROLE_ID="2" OPERATION="20"/> <!-- Permission.DELETE_BUNDLES --> + <data ROLE_ID="2" OPERATION="21"/> <!-- Permission.DEPLOY_BUNDLES --> + <data ROLE_ID="2" OPERATION="22"/> <!-- Permission.VIEW_BUNDLES --> <!-- resource permissions start here--> <data ROLE_ID="2" OPERATION="3"/> <!-- Permission.VIEW_RESOURCE --> <data ROLE_ID="2" OPERATION="4"/> <!-- Permission.MODIFY_RESOURCE --> @@ -82,6 +98,12 @@ <data ROLE_ID="2" OPERATION="13"/> <!-- Permission.CONFIGURE_READ --> <data ROLE_ID="2" OPERATION="14"/> <!-- Permission.MANAGE_EVENTS --> <data ROLE_ID="2" OPERATION="16"/> <!-- Permission.MANAGE_DRIFT --> + <!-- bundle permissions start here--> + <data ROLE_ID="2" OPERATION="23"/> <!-- Permission.ASSIGN_BUNDLES_TO_GROUP --> + <data ROLE_ID="2" OPERATION="24"/> <!-- Permission.UNASSIGN_BUNDLES_FROM_GROUP --> + <data ROLE_ID="2" OPERATION="25"/> <!-- Permission.CREATE_BUNDLES_IN_GROUP --> + <data ROLE_ID="2" OPERATION="26"/> <!-- Permission.DELETE_BUNDLES_FROM_GROUP --> + <data ROLE_ID="2" OPERATION="27"/> <!-- Permission.VIEW_BUNDLES_IN_GROUP --> </table>
</dbsetup> diff --git a/modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml b/modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml index bb5fa50..25cdfec 100644 --- a/modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml +++ b/modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml @@ -2079,6 +2079,73 @@ <schema-javaTask className="StorageNodeAddressUpgradeTask" /> </schemaSpec>
+ <schemaSpec version="2.135"> + <!-- Fine Grained Bundle Permissions --> + + <!-- Add new perms to superuser/all-resources roles --> + <schema-directSQL> + <statement desc="Inserting MANAGE_BUNDLE_GROUPS permission for 'Super User' role"> + INSERT INTO RHQ_PERMISSION (ROLE_ID, OPERATION) VALUES (1, 18) + </statement> + <statement desc="Inserting CREATE_BUNDLES permission for 'Super User' role"> + INSERT INTO RHQ_PERMISSION (ROLE_ID, OPERATION) VALUES (1, 19) + </statement> + <statement desc="Inserting DELETE_BUNDLES permission for 'Super User' role"> + INSERT INTO RHQ_PERMISSION (ROLE_ID, OPERATION) VALUES (1, 20) + </statement> + <statement desc="Inserting DEPLOY_BUNDLES permission for 'Super User' role"> + INSERT INTO RHQ_PERMISSION (ROLE_ID, OPERATION) VALUES (1, 21) + </statement> + <statement desc="Inserting VIEW_BUNDLES permission for 'Super User' role"> + INSERT INTO RHQ_PERMISSION (ROLE_ID, OPERATION) VALUES (1, 22) + </statement> + <statement desc="Inserting ASSIGN_BUNDLES_TO_GROUP permission for 'Super User' role"> + INSERT INTO RHQ_PERMISSION (ROLE_ID, OPERATION) VALUES (1, 23) + </statement> + <statement desc="Inserting UNASSIGN_BUNDLES_FROM_GROUP permission for 'Super User' role"> + INSERT INTO RHQ_PERMISSION (ROLE_ID, OPERATION) VALUES (1, 24) + </statement> + <statement desc="Inserting CREATE_BUNDLES_IN_GROUP permission for 'Super User' role"> + INSERT INTO RHQ_PERMISSION (ROLE_ID, OPERATION) VALUES (1, 25) + </statement> + <statement desc="Inserting DELETE_BUNDLES_FROM_GROUP permission for 'Super User' role"> + INSERT INTO RHQ_PERMISSION (ROLE_ID, OPERATION) VALUES (1, 26) + </statement> + <statement desc="Inserting VIEW_BUNDLES_IN_GROUP permission for 'Super User' role"> + INSERT INTO RHQ_PERMISSION (ROLE_ID, OPERATION) VALUES (1, 27) + </statement> + + <!-- all-resources-role does not get MANAGE_BUNDLE_GROUPS --> + <statement desc="Inserting CREATE_BUNDLES permission for 'All Resources' role"> + INSERT INTO RHQ_PERMISSION (ROLE_ID, OPERATION) VALUES (2, 19) + </statement> + <statement desc="Inserting DELETE_BUNDLES permission for 'All Resources' role"> + INSERT INTO RHQ_PERMISSION (ROLE_ID, OPERATION) VALUES (2, 20) + </statement> + <statement desc="Inserting DEPLOY_BUNDLES permission for 'All Resources' role"> + INSERT INTO RHQ_PERMISSION (ROLE_ID, OPERATION) VALUES (2, 21) + </statement> + <statement desc="Inserting VIEW_BUNDLES permission for 'All Resources' role"> + INSERT INTO RHQ_PERMISSION (ROLE_ID, OPERATION) VALUES (2, 22) + </statement> + <statement desc="Inserting ASSIGN_BUNDLES_TO_GROUP permission for 'All Resources' role"> + INSERT INTO RHQ_PERMISSION (ROLE_ID, OPERATION) VALUES (2, 23) + </statement> + <statement desc="Inserting UNASSIGN_BUNDLES_FROM_GROUP permission for 'All Resources' role"> + INSERT INTO RHQ_PERMISSION (ROLE_ID, OPERATION) VALUES (2, 24) + </statement> + <statement desc="Inserting CREATE_BUNDLES_IN_GROUP permission for 'All Resources' role"> + INSERT INTO RHQ_PERMISSION (ROLE_ID, OPERATION) VALUES (2, 25) + </statement> + <statement desc="Inserting DELETE_BUNDLES_FROM_GROUP permission for 'All Resources' role"> + INSERT INTO RHQ_PERMISSION (ROLE_ID, OPERATION) VALUES (2, 26) + </statement> + <statement desc="Inserting VIEW_BUNDLES_IN_GROUP permission for 'All Resources' role"> + INSERT INTO RHQ_PERMISSION (ROLE_ID, OPERATION) VALUES (2, 27) + </statement> + </schema-directSQL> + </schemaSpec> + </dbupgrade> </target> </project>
commit 4b5ce46db93610cc198fd2442027992db72ee36a Author: Jay Shaughnessy jshaughn@redhat.com Date: Wed Jul 31 17:09:56 2013 -0400
Fix issues when performing dbsetup-upgrade with defaults
diff --git a/modules/core/dbutils/pom.xml b/modules/core/dbutils/pom.xml index 483688f..d7e2d65 100644 --- a/modules/core/dbutils/pom.xml +++ b/modules/core/dbutils/pom.xml @@ -280,9 +280,9 @@ if (project.getProperty('storage-schema')) { if (project.getProperty('db') == 'dev') { self.log('PERFORMING STORAGE NODE SETUP TO LATEST SCHEMA') - username = project.getProperty('rhq.dev.cassandra.username') ?: "cassandra" - password = project.getProperty('rhq.dev.cassandra.password') ?: "cassandra" - seeds = project.getProperty('rhq.dev.cassandra.seeds') ?: "127.0.0.1|7199|9142" + username = project.getProperty('rhq.dev.cassandra.username') ?: "rhqadmin" + password = project.getProperty('rhq.dev.cassandra.password') ?: "rhqadmin" + seeds = project.getProperty('rhq.dev.cassandra.seeds') ?: "127.0.0.1|7299|9142"
schemaManager = new SchemaManager(username, password, seeds)
commit a3f58e21fe5e48819fa89c12e2d6ffb66dab08d4 Author: Jay Shaughnessy jshaughn@redhat.com Date: Wed Jul 31 17:09:02 2013 -0400
Fix issue when using -Ditest.use-external-storage-node
diff --git a/modules/common/cassandra-ccm/cassandra-ccm-arquillian/src/main/java/org/rhq/cassandra/ccm/arquillian/CCMSuiteDeploymentExtension.java b/modules/common/cassandra-ccm/cassandra-ccm-arquillian/src/main/java/org/rhq/cassandra/ccm/arquillian/CCMSuiteDeploymentExtension.java index a86c49e..6001450 100644 --- a/modules/common/cassandra-ccm/cassandra-ccm-arquillian/src/main/java/org/rhq/cassandra/ccm/arquillian/CCMSuiteDeploymentExtension.java +++ b/modules/common/cassandra-ccm/cassandra-ccm-arquillian/src/main/java/org/rhq/cassandra/ccm/arquillian/CCMSuiteDeploymentExtension.java @@ -26,7 +26,6 @@ package org.rhq.cassandra.ccm.arquillian;
import java.io.File; -import java.util.Collections; import java.util.List; import java.util.concurrent.Callable;
@@ -115,7 +114,7 @@ public class CCMSuiteDeploymentExtension implements LoadableExtension {
SchemaManager schemaManager; ClusterInitService clusterInitService = new ClusterInitService(); - List<StorageNode> nodes = Collections.emptyList(); + List<StorageNode> nodes = null;
if (!Boolean.valueOf(System.getProperty("itest.use-external-storage-node", "false"))) {
@@ -148,7 +147,7 @@ public class CCMSuiteDeploymentExtension implements LoadableExtension { } } else { try { - String seed = System.getProperty("rhq.cassandra.seeds", "127.0.0.1|7199|9042"); + String seed = System.getProperty("rhq.cassandra.seeds", "127.0.0.1|7299|9042"); schemaManager = new SchemaManager("rhqadmin", "rhqadmin", seed);
} catch (Exception e) {
commit 2355432987c8498e90cb0002f21079b1bc37300c Author: John Sanda jsanda@redhat.com Date: Wed Jul 31 13:36:33 2013 -0400
fix check for determining whether or not read repair is needed when adding nodes
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index ee0f406..1c96e27 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -866,19 +866,21 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN List<StorageNode> storageNodes = entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE, StorageNode.class).setParameter("operationMode", OperationMode.NORMAL).getResultList();
- int clusterSize = storageNodes.size(); + // The previous cluster size will be the current size - 1 since we currently only + // support deploying one node at a time. + int previousClusterSize = storageNodes.size() - 1; boolean isReadRepairNeeded;
- if (clusterSize >= 4) { + if (previousClusterSize >= 4) { // At 4 nodes we increase the RF to 3. We are not increasing the RF beyond // that for additional nodes; so, there is no need to run repair if we are // expanding from a 4 node cluster since the RF remains the same. isReadRepairNeeded = false; - } else if (clusterSize == 1) { + } else if (previousClusterSize == 1) { // The RF will increase since we are going from a single to a multi-node // cluster; therefore, we want to run repair. isReadRepairNeeded = true; - } else if (clusterSize == 2) { + } else if (previousClusterSize == 2) { if (storageNodes.size() > 3) { // If we go from 2 to > 3 nodes we will increase the RF to 3; therefore // we want to run repair. @@ -888,7 +890,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN // to run repair. isReadRepairNeeded = false; } - } else if (clusterSize == 3) { + } else if (previousClusterSize == 3) { // We are increasing the cluster size > 3 which means the RF will be // updated to 3; therefore, we want to run repair. isReadRepairNeeded = true;
commit c1963ab6528a823f547814b8d0a574ce4552b15a Author: John Sanda jsanda@redhat.com Date: Tue Jul 30 16:41:18 2013 -0400
update itest
diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/src/test/resources/expected.cassandra-jvm.properties b/modules/common/cassandra-ccm/cassandra-ccm-core/src/test/resources/expected.cassandra-jvm.properties index 774a831..588fb3d 100644 --- a/modules/common/cassandra-ccm/cassandra-ccm-core/src/test/resources/expected.cassandra-jvm.properties +++ b/modules/common/cassandra-ccm/cassandra-ccm-core/src/test/resources/expected.cassandra-jvm.properties @@ -13,7 +13,7 @@ heap_new=-Xmn${HEAP_NEWSIZE}
heap_dump_on_OOMError="-XX:+HeapDumpOnOutOfMemoryError"
-heap_dump_dir="" +heap_dump_dir=
thread_stack_size=-Xss180k
commit 6c7eeff519bdecacf17a3ccced43e7815e5d5dbe Author: Simeon Pinder spinder@redhat.com Date: Wed Jul 31 11:05:43 2013 -0400
i)ensure jdk 6+ versions and ii)reuse userDN between the steps.
diff --git a/modules/helpers/ldap-tool/pom.xml b/modules/helpers/ldap-tool/pom.xml index 770f22c..91416d5 100644 --- a/modules/helpers/ldap-tool/pom.xml +++ b/modules/helpers/ldap-tool/pom.xml @@ -60,6 +60,16 @@ </execution> </executions> </plugin> + <!-- Build for JDK 1.6 and later. --> + <plugin> + <groupId>org.apache.maven.plugins</groupId> + <artifactId>maven-compiler-plugin</artifactId> + <version>2.0.2</version> + <configuration> + <source>1.6</source> + <target>1.6</target> + </configuration> + </plugin>
<plugin> <artifactId>maven-release-plugin</artifactId> diff --git a/modules/helpers/ldap-tool/src/main/java/org/rhq/TestLdapSettings.java b/modules/helpers/ldap-tool/src/main/java/org/rhq/TestLdapSettings.java index 89b0698..bc322ab 100644 --- a/modules/helpers/ldap-tool/src/main/java/org/rhq/TestLdapSettings.java +++ b/modules/helpers/ldap-tool/src/main/java/org/rhq/TestLdapSettings.java @@ -57,10 +57,10 @@ import javax.swing.border.TitledBorder; * LDAP calls during auth/authz operations. * * The specific LDAP logic below needs to mirror the latest RHQ code and allow the user - * to test our their configuration without requring a specific RHQ/JON build as a dependency. + * to test our their configuration without requiring a specific RHQ/JON build as a dependency. * * NOTE: To avoid a runtime dependency on specific versions of RHQ or JON, the small implementation - * methods were copied into this class with minimatl changes for logging and ui messaging. The + * methods were copied into this class with minimal changes for logging and ui messaging. The * definitive implementation for each 'copied' method can be found in LDAPGroupManagerBean. * * @author Simeon Pinder @@ -85,6 +85,7 @@ public class TestLdapSettings extends JFrame { private JMenuBar menuBar; private String advdb = "**Verbose:debug ----"; private static final String BASEDN_DELIMITER = ";"; + private String userDN;
private static final long serialVersionUID = 1L; int textBoxWidth = 20; @@ -419,7 +420,7 @@ public class TestLdapSettings extends JFrame { } // testing a valid user involves a filtered ldap search // using the loginProperty, and optionally searchFilter - String userDN = ""; + userDN = ""; if (proceed) { // default loginProperty to cn if it's not set if (loginProperty.isEmpty()) { @@ -895,9 +896,8 @@ public class TestLdapSettings extends JFrame { // Load any search filter String groupSearchFilter = (String) systemConfig.get(SystemSetting.LDAP_GROUP_FILTER.getInternalName()); String groupMemberFilter = (String) systemConfig.get(SystemSetting.LDAP_GROUP_MEMBER.getInternalName()); - String userDn = (String) systemConfig.get(Context.SECURITY_PRINCIPAL);
- String testUserDN = userDn; + String testUserDN = userDN; String ldapServer = (String) systemConfig.get(Context.PROVIDER_URL);
Properties env = getProperties(ldapServer);
commit a29233274b8c623dc83073e97fb8a7a079ded87d Author: Simeon Pinder spinder@redhat.com Date: Wed Jul 31 07:02:29 2013 -0400
i)Fix issue with authz validation in TestLdapSettings ii)Remove old TestLdapSettings source.
diff --git a/etc/dev-utils/TestLdapSettings.java b/etc/dev-utils/TestLdapSettings.java deleted file mode 100644 index 2e29b3d..0000000 --- a/etc/dev-utils/TestLdapSettings.java +++ /dev/null @@ -1,738 +0,0 @@ -package com.test; - -import java.awt.BorderLayout; -import java.awt.Color; -import java.awt.Dimension; -import java.awt.FlowLayout; -import java.awt.event.ActionEvent; -import java.awt.event.ActionListener; -import java.awt.event.WindowAdapter; -import java.awt.event.WindowEvent; -import java.io.PrintWriter; -import java.io.StringWriter; -import java.io.UnsupportedEncodingException; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Properties; -import java.util.Set; - -import javax.naming.CompositeName; -import javax.naming.Context; -import javax.naming.NamingEnumeration; -import javax.naming.directory.Attribute; -import javax.naming.directory.Attributes; -import javax.naming.directory.SearchControls; -import javax.naming.directory.SearchResult; -import javax.naming.ldap.InitialLdapContext; -import javax.swing.Box; -import javax.swing.BoxLayout; -import javax.swing.JButton; -import javax.swing.JCheckBox; -import javax.swing.JFrame; -import javax.swing.JLabel; -import javax.swing.JPanel; -import javax.swing.JScrollPane; -import javax.swing.JTextArea; -import javax.swing.JTextField; -import javax.swing.border.LineBorder; -import javax.swing.border.TitledBorder; - -//import org.rhq.enterprise.server.util.security.UntrustedSSLSocketFactory; - -/* Is a development test tool that allows the user to simulate the RHQ server side - * LDAP calls during auth/authz operations. - * - * @author Simeon Pinder - */ -public class TestLdapSettings extends JFrame { - //shared fields - private JTextArea testResults; - private JCheckBox ssl; - private JTextField testUserNameValue; - private JTextField testUserPasswordValue; - private HashMap<String, JTextField> fieldMappings; - private String[] keys; - private JCheckBox enableLdapReferral; - private JCheckBox enableVerboseDebugging; - private JCheckBox enableVerboseGroupParsing; - private String advdb = "**Verbose:debug ----"; - - private static final long serialVersionUID = 1L; - int textBoxWidth = 20; - - public static void main(String args[]) { - new TestLdapSettings(); - } - - // Configure window properties - private TestLdapSettings() { - - setTitle("Check LDAP Settings: Simulates LDAP checks/queries of RHQ LDAP integration"); - getContentPane().setLayout(new BorderLayout()); - // top panel definition - JPanel top = new JPanel(); - top.setLayout(new BoxLayout(top, BoxLayout.Y_AXIS)); - top.setBorder(LineBorder.createGrayLineBorder()); - keys = new String[] { "URL:", "Search Filter:", - "Search Base:","Login Property", - "Username:", "Group Search Filter:", - "Password:", "Group Member Filter:", - }; - fieldMappings = loadUiFields(top, keys); - - //add the two checkboxes for additiona debugging options - enableLdapReferral= new JCheckBox("[follow] ldap referrals"); - enableLdapReferral.setSelected(false); - enableVerboseDebugging= new JCheckBox("more verbose logging"); - enableVerboseDebugging.setSelected(false); - enableVerboseGroupParsing= new JCheckBox("more detailed group parsing"); - enableVerboseGroupParsing.setSelected(false); - //put into row display - JPanel advancedDebugRegion = new JPanel(); - advancedDebugRegion.setLayout(new FlowLayout(FlowLayout.LEFT)); - LineBorder advancedBorder = new LineBorder(Color.BLACK, 2); - TitledBorder debugBorder = new TitledBorder(advancedBorder, "Debug:"); - advancedDebugRegion.setBorder(debugBorder); - advancedDebugRegion.add(enableLdapReferral); - advancedDebugRegion.add(enableVerboseDebugging); - advancedDebugRegion.add(enableVerboseGroupParsing); - top.add(advancedDebugRegion); - - ssl = new JCheckBox("SSL:"); - ssl.setEnabled(false); - top.add(ssl); - // test user auth region - JPanel testUserRegion = new JPanel(); - testUserRegion.setLayout(new FlowLayout(FlowLayout.LEFT)); - LineBorder border = new LineBorder(Color.BLUE, 2); - TitledBorder tBorder = new TitledBorder(border, "Authentication/Authorization Check Credentials: (insert valid ldap user assigned to group)"); - testUserRegion.setBorder(tBorder); - JLabel testUserName = new JLabel("Test UserName:"); - testUserNameValue = new JTextField(textBoxWidth); - JLabel testUserPassword = new JLabel("Test Password:"); - testUserPasswordValue = new JTextField(textBoxWidth); - testUserRegion.add(testUserName); - testUserRegion.add(testUserNameValue); - testUserRegion.add(testUserPassword); - testUserRegion.add(testUserPasswordValue); - top.add(testUserRegion); - - // center - JPanel center = new JPanel(); - center.setLayout(new BoxLayout(center, BoxLayout.X_AXIS)); - // build center panel - buildCenterPanel(center); - - // final component layout - getContentPane().add(top, BorderLayout.NORTH); - getContentPane().add(center, BorderLayout.CENTER); - this.setSize(720, 700); - addWindowListener(new WindowAdapter() { - public void windowClosing(WindowEvent e) { - System.exit(0); - } - }); - setVisible(true); - } - - // define the center display panel. - private void buildCenterPanel(JPanel center) { - // First element is Test Button - JButton test = new JButton("Test Settings"); - center.add(test); - // second is large text box that display ldap queries - testResults = new JTextArea("(click button to test settings values: simulates 4 separate checks showing ldap filters used)", - 40, 40); - JScrollPane jsp = new JScrollPane(testResults); - center.add(jsp); - test.addActionListener(new ActionListener() { - public void actionPerformed(ActionEvent e) { - testResults.setText("");//clear out empty msg - //trim spaces from all fields - String ldapServer = fieldMappings.get(keys[0]).getText().trim(); - String searchFilter = fieldMappings.get(keys[1]).getText().trim(); - String searchBase = fieldMappings.get(keys[2]).getText().trim(); - String loginProperty = fieldMappings.get(keys[3]).getText().trim(); - String bindUserName = fieldMappings.get(keys[4]).getText().trim(); - String groupSearchFilter = fieldMappings.get(keys[5]).getText().trim(); - String bindPassword = fieldMappings.get(keys[6]).getText().trim(); - String groupMemberFilter = fieldMappings.get(keys[7]).getText().trim(); - String testUserName = testUserNameValue.getText().trim(); - String testUserPassword = testUserPasswordValue.getText().trim(); - // validate initial required elements - String msg = null; - boolean proceed = true; - //valid required details set. - if (ldapServer.isEmpty() || bindUserName.isEmpty() - || bindPassword.isEmpty() || searchBase.isEmpty()) { - msg ="STEP-1:FAIL: "+ keys[0] + ", " + keys[2] + ", " + keys[4] + ", " - + keys[6] + " cannot be empty to proceed."; - log(msg); - proceed = false; - } - Properties env; - InitialLdapContext ctx = null; - if (proceed) {// attempt initial ldap bind from RHQ server - msg = "STEP-1:TESTING: Attempting to bind to server:" + ldapServer - + "\n with user '" + bindUserName - + "' and password entered."; - log(msg); - env = getProperties(ldapServer); - env.setProperty(Context.SECURITY_PRINCIPAL, bindUserName); - env.setProperty(Context.SECURITY_CREDENTIALS, bindPassword); - env.setProperty(Context.SECURITY_AUTHENTICATION, "simple"); - try { - ctx = new InitialLdapContext(env, null); - msg = "STEP-1:PASS: LDAP bind credentials are correct. Successfully connected to '" - + ldapServer - + "'.\n This means the LDAP Bind credentials for the RHQ Server authentication/authorization requests to ldap server " - + "are correct."; - if(enableVerboseDebugging.isSelected()){ - msg+="\n"+advdb+" LDAP simple authentication bind successful."; - } - log(msg); - proceed = true; - } catch (Exception ex) { - msg = "STEP-1:FAIL: Unable to connect to the LDAP server with credentials specified.\n"; - msg+="Exception:"+ex.getMessage(); - if(enableVerboseDebugging.isSelected()){ - msg = appendStacktraceToMsg(msg, ex); - } - log(msg); - proceed = false; - } - } - if (proceed) {// retrieve test credentials to test run auth - // load search controls - SearchControls searchControls = getSearchControls(); - // validating searchFilter and test user/pass creds - proceed = true; - if (testUserName.isEmpty() || (testUserPassword.isEmpty())) { - msg = "STEP-2:FAIL: Test Username/Password fields cannot be empty for this step."; - log(msg); - proceed = false; - } - // testing a valid user involves a filtered ldap search - // using the loginProperty, and optionally searchFilter - String userDN = ""; - if (proceed) { - // default loginProperty to cn if it's not set - if (loginProperty.isEmpty()) { - loginProperty = "cn"; - if(enableVerboseDebugging.isSelected()){ - String mesg = "As you have not specified a login property, defaulting to 'cn'"; - log(advdb+" "+msg); - } - } - String filter; - if (!searchFilter.isEmpty()) { - filter = "(&(" + loginProperty + "=" + testUserName - + ")" + "(" + searchFilter + "))"; - } else { - filter = "(" + loginProperty + "=" + testUserName - + ")"; - } - if(enableVerboseDebugging.isSelected()){ - log(advdb+" The searchfilter is optionally appended to login property for additional shared attribute across users."); - } - msg = "STEP-2:TESTING: To validate the test user the following LDAP filtered component will be used to find matching users:\n"; - msg += filter; - log(msg); - // test out the search on the target ldap server - try { - String[] baseDNs = searchBase.split(";"); - for (int x = 0; x < baseDNs.length; x++) { - NamingEnumeration answer = ctx.search( - baseDNs[x], filter, searchControls); - if(enableVerboseDebugging.isSelected()){ - log(advdb+" this search was excuted against DN component '"+baseDNs[x]+"'."); - } - // boolean ldapApiNpeFound = false; - if (!answer.hasMoreElements()) { - msg="STEP-2:WARN Unable to locate a matching users for the filter'"+filter+ - "'. Please check your loginProperty. Usually 'cn' or 'uid'"; - log(msg); - continue; - } - // Going with the first match - SearchResult si = (SearchResult) answer.next(); - - // Construct the UserDN - userDN = null; - - try { - userDN = si.getNameInNamespace(); - } catch (UnsupportedOperationException use) { - userDN = new CompositeName(si.getName()).get(0); - if (si.isRelative()) { - userDN += "," + baseDNs[x]; - } - } - - msg = "STEP-2:PASS: The test user '" - + testUserName - + "' was succesfully located, and the following userDN will be used in authorization check:\n"; - msg += userDN; - log(msg); - - ctx.addToEnvironment(Context.SECURITY_PRINCIPAL, userDN); - ctx.addToEnvironment(Context.SECURITY_CREDENTIALS,testUserPassword); - ctx.addToEnvironment(Context.SECURITY_AUTHENTICATION,"simple"); - - // if successful then verified that user and pw - // are valid ldap credentials - ctx.reconnect(null); - msg = "STEP-2:PASS: The user '" - + testUserName - + "' was succesfully authenticated using userDN '" - + userDN + "' and password provided.\n" - +"*Note: the loginProperty must match the loginProperty listed in dn: for the user. It is the DN that RHQ will lookup and use."; - log(msg); - } - } catch (Exception ex) { - msg = "STEP-2:FAIL: There was an error while searching for or authenticating the user '" - + testUserName + "'\n"; - msg += ex.getMessage(); - if(enableVerboseDebugging.isSelected()){ - msg = appendStacktraceToMsg(msg, ex); - } - log(msg); - proceed=false; - } - try { - ctx.addToEnvironment(Context.SECURITY_PRINCIPAL, bindUserName); - ctx.addToEnvironment(Context.SECURITY_CREDENTIALS, bindPassword); - ctx.addToEnvironment(Context.SECURITY_AUTHENTICATION, "simple"); - ctx.reconnect(null); - } catch (Exception ex) { - msg = "STEP-2:WARN: There was an error when switching back to the bind user '" - + bindUserName + "'\n"; - msg += ex.getMessage(); - if(enableVerboseDebugging.isSelected()){ - msg = appendStacktraceToMsg(msg, ex); - } - log(msg); - } - - } - // with authentication completed, now check authorization. - // validate filter components to list all available groups - proceed = false; - if (!groupSearchFilter.isEmpty()) { - Set<Map<String, String>> ret = new HashSet<Map<String, String>>(); - String filter = String - .format("(%s)", groupSearchFilter); - msg = "STEP-3:TESTING: This ldap filter " - + filter - + " will be used to locate ALL available LDAP groups"; - log(msg); - try { - String[] baseDNs = searchBase.split(";"); - for (int x = 0; x < baseDNs.length; x++) { - if(enableVerboseDebugging.isSelected()){ - log(advdb+" this search was excuted against DN component '"+baseDNs[x]+"'."); - } - NamingEnumeration answer = ctx.search( - baseDNs[x], filter, searchControls); - - if(enableVerboseGroupParsing.isSelected()){//in this mode report initial state of Enumeration results - log(advdb+" LDAP Group Search/Enumeration found "+((answer.hasMore())? " SOME ":" NO ")+" matching group(s)."); - } - - boolean ldapApiEnumerationBugEncountered = false; - while ((!ldapApiEnumerationBugEncountered) - && answer.hasMoreElements()) { - // We use the first match - SearchResult si = null; - try { - si = (SearchResult) answer.next(); - } catch (NullPointerException npe) { - if(enableVerboseDebugging.isSelected()){ - log(advdb+" NullPtr exception detected. If known LDAP api enum npe ignore: "+npe.getMessage()+"."); - } - ldapApiEnumerationBugEncountered = true; - break; - } - Map<String, String> entry = new HashMap<String, String>(); - if(enableVerboseDebugging.isSelected()||enableVerboseGroupParsing.isSelected()){ - Attributes attributeContainer = si.getAttributes(); - NamingEnumeration<? extends Attribute> attributes = attributeContainer.getAll(); - String attributesReturned = " "; - while(attributes.hasMore()){ - attributesReturned+=attributes.next().getID()+","; - } - String dbugMesg="\n"+advdb+" Group search LDAP ("+attributeContainer.size()+") attributes located for group '"+si.getName()+"' are ["+ - attributesReturned.substring(0, attributesReturned.length()-1)+"]."; - //directly update here to shorten messages for lots of groups - testResults.setText(testResults.getText() + dbugMesg); - - //additionally parse attribute ids and values for illegal ldap characters - if(enableVerboseGroupParsing.isSelected()){ - attributes = attributeContainer.getAll(); - String currentAttributeId =""; - String currentValue =""; - //spinder: 3/17/11: should we bail on first bad data or display them all? - while(attributes.hasMore()){ - boolean badData = false; - Attribute att = attributes.next(); - currentAttributeId =att.getID(); - if(containsIllegalLdap(currentAttributeId)){ - log(advdb+" LDAP Group: bad atrribute data detected for group '"+si.getName()+"' for attribute '"+currentAttributeId+"'."); - badData=true; - } - if(att.getAll()!=null){ - NamingEnumeration<?> enumer = att.getAll(); - while(enumer.hasMore()){ - currentValue = enumer.next()+""; - if(containsIllegalLdap(currentValue)){ - log(advdb+" LDAP Group: bad data detected for group '"+si.getName()+"' with attribute '"+currentAttributeId+"' and value:"+currentValue); - badData=true; - } - } - } - if(badData){ - log(advdb+"** LDAP Group: Some bad LDAP data detected for group '"+si.getName()+"'."); - } - } - } - } - - Attribute commonNameAttr = si.getAttributes() - .get("cn"); - if(commonNameAttr!=null){ - String name = (String) commonNameAttr.get(); - name = name.trim(); - Attribute desc = si.getAttributes().get( - "description"); - String description = desc != null ? (String) desc - .get() - : ""; - description = description.trim(); - entry.put("id", name); - entry.put("name", name); - entry.put("description", description); - ret.add(entry); - }else{//unable to retrieve details for specific group. - log(advdb+" There was an error retrieving 'cn' attribute for group '"+si.getName()+"'. Not adding to returned list of groups. "); - } - } - } - msg = "STEP-3:TESTING: Using Group Search Filter '" + filter - + "', " + ret.size() - + " ldap group(s) were located.\n"; - if (ret.size() > 0) { - HashMap<String, String>[] ldapLists = new HashMap[ret.size()]; - ret.toArray(ldapLists); - if(enableVerboseGroupParsing.isSelected()){//in this mode go beyond the first ten results. - msg += "STEP-3:PASS: Listing 'all' of the ldap groups located: \n"; - for (int i = 0; i < ret.size(); i++) { - msg += ldapLists[i] + "\n"; - } - }else{//otherwise only show first 10[subset of available groups] - msg += "STEP-3:PASS: Listing a few(<=10) of the ldap groups located: \n"; - for (int i = 0; (i < ret.size() && i < 10); i++) { - msg += ldapLists[i] + "\n"; - } - } - proceed = true;// then can proceed to next step. - } - log(msg); - } catch (Exception ex) { - msg = "STEP-3:FAIL: There was an error searching with the groupFilter supplied: " - + groupSearchFilter + "'\n"; - msg += ex.getMessage(); - if(enableVerboseDebugging.isSelected()){ - msg = appendStacktraceToMsg(msg, ex); - } - log(msg); - proceed=false; - } - } else { - msg = "STEP-3:FAIL: Group Search Filter: cannot be empty to proceed."; - log(msg); - proceed=false; - } - // retrieve lists of authorized groups available for the - if (proceed) { - // check groupMember - if (!groupMemberFilter.isEmpty()) { - Set<Map<String, String>> ret = new HashSet<Map<String, String>>(); - String filter = String.format("(&(%s)(%s=%s))", - groupSearchFilter, groupMemberFilter, - LDAPStringUtil.encodeForFilter(userDN)); - msg = "STEP-4:TESTING: about to do ldap search with filter \n'" - + filter - + "'\n to locate groups that test user IS authorized to access."; - log(msg); - try { - String[] baseDNs = searchBase.split(";"); - for (int x = 0; x < baseDNs.length; x++) { - NamingEnumeration answer = ctx.search( - baseDNs[x], filter, searchControls); - boolean ldapApiEnumerationBugEncountered = false; - //BZ:582471- ldap api bug change - while ((!ldapApiEnumerationBugEncountered) - && answer.hasMoreElements()) { - // We use the first match - SearchResult si = null; - try { - si = (SearchResult) answer.next(); - } catch (NullPointerException npe) { - ldapApiEnumerationBugEncountered = true; - break; - } - Map<String, String> entry = new HashMap<String, String>(); - String name = (String) si - .getAttributes().get("cn") - .get(); - name = name.trim(); - Attribute desc = si.getAttributes() - .get("description"); - String description = desc != null ? (String) desc - .get() - : ""; - description = description.trim(); - entry.put("id", name); - entry.put("name", name); - entry.put("description", description); - ret.add(entry); - } - } - msg = "STEP-4:TESTING: Using Group Search Filter '" - + filter + "', " + ret.size() - + " ldap group(s) were located.\n"; - if (ret.size() > 0) { - HashMap<String, String>[] ldapLists = new HashMap[ret - .size()]; - ret.toArray(ldapLists); - msg += "STEP-4:PASS: Listing a few of the ldap groups located: \n"; - // iterate over first ten or less to demonstrate retrieve - for (int i = 0; (i < ret.size() && i < 10); i++) { - msg += ldapLists[i] + "\n"; - } - proceed = true;// then can proceed to next - // step. - }else{ - msg+="STEP-4:WARN: With current settings, test user is not authorized for any groups. Is this correct?"; - } - log(msg); - } catch (Exception ex) { - msg = "STEP-4:FAIL: There was an error searching with the groupFilter supplied: " - + groupSearchFilter + "'\n"; - msg += ex.getMessage(); - if(enableVerboseDebugging.isSelected()){ - msg = appendStacktraceToMsg(msg, ex); - } - log(msg); - proceed=false; - } - - } else { - msg = "STEP-4:FAIL: Group Member Filter must be non-empty to proceed with simulating authorization check for test user."; - log(msg); - } - } - if(proceed){ - msg="COMPLETED:PASS: The current settings, for successful steps, should be correct to enter into your RHQ server."; - msg+="\n\n\n\n When you encounter failures, warnings or other unexpected results you should use an external "; - msg+="LDAP search utility to check that the generated filters return the expected LDAP results."; - log(msg); - } - } - } - - private boolean containsIllegalLdap(String currentValue) { - boolean invalidData = false; - if((currentValue!=null)&&(!currentValue.trim().isEmpty())){ - //TODO: spinder 3/17/11: need to figure out regex to filter/detect bad data in returned ldap. Giving up for now. -// String regex = "(?<=(?:[^\]|^)(\\)+|[^\]|^)[/,+"><;=#]|(?<=(?:[^\]|^)(\\)+|[^\]|^)\(?!\|[/,+"><;=#]| $|(?<=^\) )|^"; -// regex = "(?<=(?:[^\\]|^)(\\\\)+|[^\\]|^)[/,+\"><;=#]|(?<=(?:[^\\]|^)(\\\\)+|[^\\]|^)\\(?!\\|[/,+\"><;=#]| $|(?<=^\\) )|^"; -// System.out.println("++++++++ CURR VAL:"+currentValue+":INV-CHeck:"+currentValue.matches(",+"\<;\n=/")+":NEWCHECK:"+(currentValue.matches(regex))); -// if(currentValue.matches(",+"\<;\n=/")){ -// invalidData=true; -// } -// String badList = ",+"\<;\n="; - String badList = "+"\<;\n"; - for(char car :currentValue.toCharArray()){ - for(char c :badList.toCharArray()){ - if(car == c){ - invalidData=true; - } - } - } - - } - return invalidData; - } - - private String appendStacktraceToMsg(String msg, Exception ex) { - String moreVerbose = ""; - moreVerbose+=advdb+" Exception type:"+ex.getClass()+"\n"; - moreVerbose+=advdb+" Exception stack trace reference:"+ex.getStackTrace()+"\n"; - if(ex.getStackTrace()!=null){ - StringWriter sw = new StringWriter(); - PrintWriter pw = new PrintWriter(sw); - ex.printStackTrace(pw); - moreVerbose+=advdb+" stack trace reference:"+sw.toString(); - } - msg+="\n"+moreVerbose; - return msg; - } - }); - } - - // throw the label and fields together, two to a row. - private HashMap<String, JTextField> loadUiFields(JPanel top, - String[] componentKeys) { - HashMap<String, JTextField> mappings = new HashMap<String, JTextField>(); - for (int i = 0; i < componentKeys.length; i++) { - String firstLabelKey = componentKeys[i]; - String secondLabelKey = componentKeys[++i]; - // locate second key - JPanel row = new JPanel(); - row.setLayout(new FlowLayout(FlowLayout.LEFT)); - JLabel label1 = new JLabel(firstLabelKey); - label1.setSize(textBoxWidth, 5); - JTextField value1 = new JTextField(textBoxWidth); - JLabel label2 = new JLabel(secondLabelKey); - JTextField value2 = new JTextField(textBoxWidth); - row.add(label1); - row.add(value1); - row.add(Box.createRigidArea(new Dimension(0, 5))); - row.add(label2); - row.add(value2); - mappings.put(firstLabelKey, value1); - mappings.put(secondLabelKey, value2); - top.add(row); - } - - return mappings; - } - - private Properties getProperties(String contentProvider) { - Properties env = new Properties(); - env.setProperty(Context.INITIAL_CONTEXT_FACTORY, - "com.sun.jndi.ldap.LdapCtxFactory"); - env.setProperty(Context.PROVIDER_URL, contentProvider); - if(!enableLdapReferral.isSelected()){ - env.setProperty(Context.REFERRAL, "ignore"); - }else{ - String msg="**---- You have chosen to tell LDAP servers to [FOLLOW] context referrals. Default is [IGNORE] referrals. --**"; - log(msg); - env.setProperty(Context.REFERRAL, "follow"); - } - -// // Setup SSL if requested -// String protocol = ssl.isSelected()? "ssl":""; -// if ((protocol != null) && protocol.equals("ssl")) { -// String ldapSocketFactory = env -// .getProperty("java.naming.ldap.factory.socket"); -// if (ldapSocketFactory == null) { -// env.put("java.naming.ldap.factory.socket", -// UntrustedSSLSocketFactory.class.getName()); -// } -// env.put(Context.SECURITY_PROTOCOL, "ssl"); -// } - - return env; - } - - private String delineate() { - String line = "-"; - for (int i = 0; i < 30; i++) { - line += "-"; - } - return line; - } - - private void log(String msg) { - String message = "\n" + delineate() + "\n"; - message += msg; - message += "\n" + delineate() + "\n\n"; - testResults.setText(testResults.getText() + message); - } - - private SearchControls getSearchControls() { - int scope = SearchControls.SUBTREE_SCOPE; - int timeLimit = 0; - long countLimit = 0; - String[] returnedAttributes = null; - boolean returnObject = false; - boolean deference = false; - SearchControls constraints = new SearchControls(scope, countLimit, - timeLimit, returnedAttributes, returnObject, deference); - return constraints; - } -} - -class LDAPStringUtil { - - /** - * <p>Encode a string so that it can be used in an LDAP search filter.</p> - * - * <p>The following table shows the characters that are encoded and their - * encoded version.</p> - * - * <table> - * <tr><th align="center">Character</th><th>Encoded As</th></tr> - * <tr><td align="center">*</td><td>\2a</td></tr> - * <tr><td align="center">(</td><td>\28</td></tr> - * <tr><td align="center">)</td><td>\29</td></tr> - * <tr><td align="center"></td><td>\5c</td></tr> - * <tr><td align="center"><code>null</code></td><td>\00</td></tr> - * </table> - * - * <p>In addition to encoding the above characters, any non-ASCII character - * (any character with a hex value greater then <code>0x7f</code>) is also - * encoded and rewritten as a UTF-8 character or sequence of characters in - * hex notation.</p> - * - * @param filterString a string that is to be encoded - * @return the encoded version of <code>filterString</code> suitable for use - * in a LDAP search filter - * @see <a href="http://tools.ietf.org/html/rfc4515">RFC 4515</a> - */ - public static String encodeForFilter(final String filterString) { - if (filterString != null && filterString.length() > 0) { - StringBuilder encString = new StringBuilder(filterString.length()); - for (int i = 0; i < filterString.length(); i++) { - char ch = filterString.charAt(i); - switch (ch) { - case '*': // encode a wildcard * character - encString.append("\2a"); - break; - case '(': // encode a open parenthesis ( character - encString.append("\28"); - break; - case ')': // encode a close parenthesis ) character - encString.append("\29"); - break; - case '\': // encode a backslash \ character - encString.append("\5c"); - break; - case '\u0000': // encode a null character - encString.append("\00"); - break; - default: - if (ch <= 0x7f) { // an ASCII character - encString.append(ch); - } else if (ch >= 0x80) { // encode to UTF-8 - try { - byte[] utf8bytes = String.valueOf(ch).getBytes("UTF8"); - for (byte b : utf8bytes) { - encString.append(String.format("\%02x", b)); - } - } catch (UnsupportedEncodingException e) { - // ignore - } - } - } - } - return encString.toString(); - } - return filterString; - } - -} - diff --git a/modules/helpers/ldap-tool/src/main/java/org/rhq/TestLdapSettings.java b/modules/helpers/ldap-tool/src/main/java/org/rhq/TestLdapSettings.java index f3aee15..89b0698 100644 --- a/modules/helpers/ldap-tool/src/main/java/org/rhq/TestLdapSettings.java +++ b/modules/helpers/ldap-tool/src/main/java/org/rhq/TestLdapSettings.java @@ -895,7 +895,9 @@ public class TestLdapSettings extends JFrame { // Load any search filter String groupSearchFilter = (String) systemConfig.get(SystemSetting.LDAP_GROUP_FILTER.getInternalName()); String groupMemberFilter = (String) systemConfig.get(SystemSetting.LDAP_GROUP_MEMBER.getInternalName()); - String testUserDN=userName; + String userDn = (String) systemConfig.get(Context.SECURITY_PRINCIPAL); + + String testUserDN = userDn; String ldapServer = (String) systemConfig.get(Context.PROVIDER_URL);
Properties env = getProperties(ldapServer);
commit 71cff59023452b91967370821cfa6aeee5a049d6 Author: Simeon Pinder spinder@redhat.com Date: Tue Jul 30 19:07:57 2013 -0400
Make warning to users more prominent and accessible.
diff --git a/modules/helpers/ldap-tool/src/main/java/org/rhq/TestLdapSettings.java b/modules/helpers/ldap-tool/src/main/java/org/rhq/TestLdapSettings.java index b9667ea..f3aee15 100644 --- a/modules/helpers/ldap-tool/src/main/java/org/rhq/TestLdapSettings.java +++ b/modules/helpers/ldap-tool/src/main/java/org/rhq/TestLdapSettings.java @@ -39,6 +39,9 @@ import javax.swing.JButton; import javax.swing.JCheckBox; import javax.swing.JFrame; import javax.swing.JLabel; +import javax.swing.JMenu; +import javax.swing.JMenuBar; +import javax.swing.JMenuItem; import javax.swing.JPanel; import javax.swing.JPasswordField; import javax.swing.JScrollPane; @@ -79,6 +82,7 @@ public class TestLdapSettings extends JFrame { private JCheckBox iterativeVerboseLogging; private JCheckBox enablePosixGroups; private JCheckBox enable32xFeatures; + private JMenuBar menuBar; private String advdb = "**Verbose:debug ----"; private static final String BASEDN_DELIMITER = ";";
@@ -95,10 +99,10 @@ public class TestLdapSettings extends JFrame { final String warnMessage = "<html>***WARNING: Depending upon<br>" + "i)how the ldap server is configured <br>" + "ii)client query paging settings <br>" + - " enabling <b>'more detailed logging'</b>,<br>" + - " <b>'more group parsing'</b> AND/OR <b>'also log to console'</b> may cause the console to hang/freeze <br>" + + " enabling <b>'more verbose logging'</b>,<br>" + + " <b>'more detailed group parsing'</b> AND/OR <b>'also log to console'</b> may cause the console to hang/freeze <br>" + " as the LDAP tool continues to parse large query results. If that occurs it is <br>" + - " suggested that you kill tool and re-run with 'also log to console' so that the console logs<br>" + + " suggested that you stop this tool and re-run your queries with <b>'also log to console'</b> so that the console logs<br>" + " will show which dataset is causing the delay and then you should modify your search|group|member<br>" + " filters accordingly to <b>return smaller results</b> and/or <b>consume larger payloads</b>.<br>" + "***WARNING</html>"; @@ -108,6 +112,12 @@ public class TestLdapSettings extends JFrame {
setTitle("Check LDAP Settings: Simulates LDAP checks/queries of RHQ LDAP integration"); getContentPane().setLayout(new BorderLayout()); + menuBar = new JMenuBar(); + JMenu menu = new JMenu("View ***Warning"); + JMenuItem menuItem = new JMenuItem(warnMessage); + menu.add(menuItem); + menuBar.add(menu); + setJMenuBar(menuBar); // top panel definition top = new JPanel(); top.setLayout(new BoxLayout(top, BoxLayout.Y_AXIS));
commit 6363bfd5135c7444aa0798509bb53e6948b1b573 Author: Simeon Pinder spinder@redhat.com Date: Tue Jul 30 17:27:46 2013 -0400
Move under org.rhq like other components.
diff --git a/.classpath b/.classpath index e5e7a21..386316a 100644 --- a/.classpath +++ b/.classpath @@ -215,6 +215,7 @@ <classpathentry kind="src" path="modules/plugins/rhq-storage/src/main/java"/> <classpathentry kind="src" path="modules/enterprise/server/data-migration/src/main/java"/> <classpathentry kind="src" path="modules/enterprise/server/data-migration/src/test/java"/> + <classpathentry kind="src" path="modules/helpers/ldap-tool/src/main/java"/> <classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.7"/> <classpathentry exported="true" kind="var" path="M2_REPO/org/apache/httpcomponents/httpclient/4.2.3/httpclient-4.2.3.jar" sourcepath="M2_REPO/org/apache/httpcomponents/httpclient/4.2.3/httpclient-4.2.3-sources.jar"/> <classpathentry exported="true" kind="var" path="M2_REPO/commons-io/commons-io/2.1/commons-io-2.1.jar" sourcepath="M2_REPO/commons-io/commons-io/2.1/commons-io-2.1-sources.jar"/> diff --git a/modules/helpers/ldap-tool/pom.xml b/modules/helpers/ldap-tool/pom.xml index 28f8678..770f22c 100644 --- a/modules/helpers/ldap-tool/pom.xml +++ b/modules/helpers/ldap-tool/pom.xml @@ -19,6 +19,7 @@ <properties> <executable.name>TestLdapSettings</executable.name> <tool.version>1.0.1</tool.version> + <test.package>org.rhq</test.package> </properties>
<name>RHQ ldap test tool</name> @@ -33,8 +34,8 @@ <configuration> <archive> <manifest> - <packageName>com.test</packageName> - <mainClass>com.test.TestLdapSettings</mainClass> + <packageName>${test.package}</packageName> + <mainClass>${test.package}.TestLdapSettings</mainClass> </manifest> </archive> </configuration> diff --git a/modules/helpers/ldap-tool/src/main/java/com/test/TestLdapSettings.java b/modules/helpers/ldap-tool/src/main/java/com/test/TestLdapSettings.java deleted file mode 100644 index 75ff277..0000000 --- a/modules/helpers/ldap-tool/src/main/java/com/test/TestLdapSettings.java +++ /dev/null @@ -1,1285 +0,0 @@ -package com.test; - -import java.awt.BorderLayout; -import java.awt.Color; -import java.awt.Component; -import java.awt.Dimension; -import java.awt.FlowLayout; -import java.awt.event.ActionEvent; -import java.awt.event.ActionListener; -import java.awt.event.ItemEvent; -import java.awt.event.ItemListener; -import java.awt.event.WindowAdapter; -import java.awt.event.WindowEvent; -import java.io.IOException; -import java.io.PrintWriter; -import java.io.StringWriter; -import java.io.UnsupportedEncodingException; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Properties; -import java.util.Set; - -import javax.naming.Context; -import javax.naming.NamingEnumeration; -import javax.naming.NamingException; -import javax.naming.directory.Attribute; -import javax.naming.directory.Attributes; -import javax.naming.directory.InvalidSearchFilterException; -import javax.naming.directory.SearchControls; -import javax.naming.directory.SearchResult; -import javax.naming.ldap.Control; -import javax.naming.ldap.InitialLdapContext; -import javax.naming.ldap.PagedResultsControl; -import javax.naming.ldap.PagedResultsResponseControl; -import javax.swing.Box; -import javax.swing.BoxLayout; -import javax.swing.JButton; -import javax.swing.JCheckBox; -import javax.swing.JFrame; -import javax.swing.JLabel; -import javax.swing.JPanel; -import javax.swing.JPasswordField; -import javax.swing.JScrollPane; -import javax.swing.JTextArea; -import javax.swing.JTextField; -import javax.swing.SwingUtilities; -import javax.swing.UIManager; -import javax.swing.UnsupportedLookAndFeelException; -import javax.swing.border.LineBorder; -import javax.swing.border.TitledBorder; -import javax.swing.event.ChangeEvent; -import javax.swing.event.ChangeListener; - -//import org.rhq.enterprise.server.util.security.UntrustedSSLSocketFactory; - -/* Is a development test tool that allows the user to simulate the RHQ server side - * LDAP calls during auth/authz operations. - * - * The specific LDAP logic below needs to mirror the latest RHQ code and allow the user - * to test our their configuration without requring a specific RHQ/JON build as a dependency. - * - * NOTE: To avoid a runtime dependency on specific versions of RHQ or JON, the small implementation - * methods were copied into this class with minimatl changes for logging and ui messaging. The - * definitive implementation for each 'copied' method can be found in LDAPGroupManagerBean. - * - * @author Simeon Pinder - */ -public class TestLdapSettings extends JFrame { - //shared fields - private JTextArea testResults; - private JCheckBox showPasswords; - private JCheckBox ssl; - private JLabel groupPageSizeName; - private JTextField groupMemberQueryValue; - private JTextField testUserNameValue; - private JTextField testUserPasswordValue; - private HashMap<String, JTextField> fieldMappings; - private String[] keys; - private JCheckBox enableLdapReferral; - private JCheckBox enableVerboseDebugging; - private JCheckBox enableVerboseGroupParsing; - private JCheckBox iterativeVerboseLogging; - private JCheckBox enablePosixGroups; - private JCheckBox enable32xFeatures; - private String advdb = "**Verbose:debug ----"; - private static final String BASEDN_DELIMITER = ";"; - - private static final long serialVersionUID = 1L; - int textBoxWidth = 20; - private static JPanel top = null; - private static JPanel testUserRegion = null; - private static Properties env=null; - - public static void main(String args[]) { - new TestLdapSettings(); - } - //After enabling support for Query parsing, we need to warn users of the effects. - final String warnMessage = "<html>***WARNING: Depending upon<br>" + - "i)how the ldap server is configured <br>" + - "ii)client query paging settings <br>" + - " enabling <b>'more detailed logging'</b>,<br>" + - " <b>'more group parsing'</b> AND/OR <b>'also log to console'</b> may cause the console to hang/freeze <br>" + - " as the LDAP tool continues to parse large query results. If that occurs it is <br>" + - " suggested that you kill tool and re-run with 'also log to console' so that the console logs<br>" + - " will show which dataset is causing the delay and then you should modify your search|group|member<br>" + - " filters accordingly to <b>return smaller results</b> and/or <b>consume larger payloads</b>.<br>" + - "***WARNING</html>"; - - // Configure window properties - private TestLdapSettings() { - - setTitle("Check LDAP Settings: Simulates LDAP checks/queries of RHQ LDAP integration"); - getContentPane().setLayout(new BorderLayout()); - // top panel definition - top = new JPanel(); - top.setLayout(new BoxLayout(top, BoxLayout.Y_AXIS)); - top.setBorder(LineBorder.createGrayLineBorder()); - //define checkbox here as it's checked when generating UI. - showPasswords = new JCheckBox("show passwords:"); - showPasswords.setSelected(false); - - keys = new String[] { "URL:", "Search Filter:", - "Search Base:","Login Property", - "Username:", "Group Search Filter:", - "Password:", "Group Member Filter:", - }; - fieldMappings = loadUiFields(top, keys); - - //add the two checkboxes for additiona debugging options - enableLdapReferral= new JCheckBox("[follow] ldap referrals"); - enableLdapReferral.setSelected(false); - enableVerboseDebugging= new JCheckBox("more verbose logging"); - enableVerboseDebugging.setSelected(false); - enableVerboseDebugging.setToolTipText(warnMessage); - enableVerboseGroupParsing= new JCheckBox("more detailed group parsing"); - enableVerboseGroupParsing.setSelected(false); - enableVerboseGroupParsing.setToolTipText("*Take care when using this mode with a large number of groups* Every group discovered is parsed/listed."); - iterativeVerboseLogging= new JCheckBox("also log to console"); - iterativeVerboseLogging.setSelected(false); - iterativeVerboseLogging.setToolTipText("This mode is useful when the test tool is having difficulty returning results from large queries."); - iterativeVerboseLogging.setToolTipText(warnMessage); - enablePosixGroups= new JCheckBox("is Posix Group"); - enablePosixGroups.setSelected(false); - enablePosixGroups.setEnabled(false); - - //put into 3.2.x functionality row - JPanel jon32xRegion = new JPanel(); - jon32xRegion.setLayout(new FlowLayout(FlowLayout.LEFT)); - LineBorder jon32xLineBorder = new LineBorder(Color.BLACK, 2); - TitledBorder jon32xBorder = new TitledBorder(jon32xLineBorder, "JON 3.2.x/RHQ 4.8.x specific features:"); - jon32xRegion.setBorder(jon32xBorder); - enable32xFeatures= new JCheckBox("enable JON 3.2.x/RHQ 4.8.x features"); - enable32xFeatures.setToolTipText("This enables features not available before RHQ 4.8.x/JON 3.2.x."); - enable32xFeatures.setSelected(false); - enable32xFeatures.addItemListener(new ItemListener() { - @Override - public void itemStateChanged(ItemEvent e) { - if(enable32xFeatures.isSelected()){ - groupPageSizeName.setEnabled(true); - groupMemberQueryValue.setEnabled(true); - groupMemberQueryValue.setEditable(true); - groupMemberQueryValue.setText("1000"); - enablePosixGroups.setEnabled(true); - }else{ - groupMemberQueryValue.setText(""); - groupPageSizeName.setEnabled(false); - groupMemberQueryValue.setEnabled(false); - groupMemberQueryValue.setEditable(false); - enablePosixGroups.setEnabled(false); - enablePosixGroups.setSelected(false); - } - } - }); - - jon32xRegion.add(enable32xFeatures); - groupPageSizeName = new JLabel("Group Query Page Size:"); - groupPageSizeName.setEnabled(false); - groupMemberQueryValue = new JTextField(10); - groupMemberQueryValue.setText("1000"); - groupMemberQueryValue.setEditable(false); - jon32xRegion.add(groupPageSizeName); - jon32xRegion.add(groupMemberQueryValue); - jon32xRegion.add(enablePosixGroups); - top.add(jon32xRegion); - - //put into row display - JPanel advancedDebugRegion = new JPanel(); - advancedDebugRegion.setLayout(new FlowLayout(FlowLayout.LEFT)); - LineBorder advancedBorder = new LineBorder(Color.BLACK, 2); - TitledBorder debugBorder = new TitledBorder(advancedBorder, "Debug: **Warning --<hover HERE>**"); - advancedDebugRegion.setBorder(debugBorder); - advancedDebugRegion.add(enableLdapReferral); - advancedDebugRegion.add(enableVerboseDebugging); - advancedDebugRegion.add(enableVerboseGroupParsing); - advancedDebugRegion.add(iterativeVerboseLogging); - advancedDebugRegion.setToolTipText(warnMessage); - top.add(advancedDebugRegion); - - JPanel securityPanel = new JPanel(); - securityPanel.setLayout(new FlowLayout(FlowLayout.LEFT)); - showPasswords.addItemListener(new ItemListener() { - @Override - public void itemStateChanged(ItemEvent e) { - SwingUtilities.invokeLater(new Runnable() { - @Override - public void run() { - //store off existing value - String existingValue = ""; - String existingTestUserPass = ""; - JTextField current = fieldMappings.get("Password:"); - if(current instanceof JPasswordField){ - JPasswordField pass = ((JPasswordField)current); - if(pass!=null){ - char[] charArray = pass.getPassword(); - if(charArray.length>0){ - existingValue = new String(charArray); - } - } - }else{ - existingValue = current.getText(); - } - //save off test user password as well - if(testUserPasswordValue instanceof JPasswordField){ - JPasswordField pass = ((JPasswordField)testUserPasswordValue); - if(pass!=null){ - char[] charArray = pass.getPassword(); - if(charArray.length>0){ - existingTestUserPass = new String(charArray); - } - } - }else{ - existingTestUserPass=testUserPasswordValue.getText(); - } - - JTextField updatedContainer = null; - if(showPasswords.isSelected()){ - updatedContainer = new JTextField(textBoxWidth); - updatedContainer.setText(existingValue); - testUserPasswordValue = new JTextField(textBoxWidth); - testUserPasswordValue.setText(existingTestUserPass); - }else{ - updatedContainer = new JPasswordField(textBoxWidth); - updatedContainer.setText(existingValue); - testUserPasswordValue = new JPasswordField(textBoxWidth); - testUserPasswordValue.setText(existingTestUserPass); - } - //locate the JPanel and rebuild it Should be at index 3 - JPanel passwordRow = (JPanel) top.getComponent(3); -// JTextField jf = (JTextField) passwordRow.getComponent(1); - //store off existing components - Component[] existing = new Component[passwordRow.getComponentCount()]; - for(int i=0; i<passwordRow.getComponentCount();i++){ - existing[i] = passwordRow.getComponent(i); - } - passwordRow.removeAll(); - for(int j=0;j<existing.length;j++){ - if(j==1){//insert new JTextField instead - passwordRow.add(updatedContainer); - }else{ - passwordRow.add(existing[j]); - } - } - //reload testUserRegion - //store off existing components - Component[] existingTest = new Component[testUserRegion.getComponentCount()]; - for(int i=0; i<testUserRegion.getComponentCount();i++){ - existingTest[i] = testUserRegion.getComponent(i); - } - testUserRegion.removeAll(); - for(int j=0;j<existingTest.length;j++){ - if(j==3){//insert new JTextField instead - testUserRegion.add(testUserPasswordValue); - }else{ - testUserRegion.add(existingTest[j]); - } - } - - top.revalidate(); - top.repaint(); - } - }); - } - }); - securityPanel.add(showPasswords); - ssl = new JCheckBox("SSL:"); - ssl.setEnabled(false); - securityPanel.add(ssl); - top.add(securityPanel); - - // test user auth region - testUserRegion = new JPanel(); - testUserRegion.setLayout(new FlowLayout(FlowLayout.LEFT)); - LineBorder border = new LineBorder(Color.BLUE, 2); - TitledBorder tBorder = new TitledBorder(border, "Authentication/Authorization Check Credentials: (insert valid ldap user assigned to group)"); - testUserRegion.setBorder(tBorder); - JLabel testUserName = new JLabel("Test UserName:"); - testUserNameValue = new JTextField(textBoxWidth); - JLabel testUserPassword = new JLabel("Test Password:"); -// testUserPasswordValue = new JTextField(textBoxWidth); - testUserPasswordValue = new JPasswordField(textBoxWidth); - testUserRegion.add(testUserName); - testUserRegion.add(testUserNameValue); - testUserRegion.add(testUserPassword); - testUserRegion.add(testUserPasswordValue); - top.add(testUserRegion); - - // center - JPanel center = new JPanel(); - center.setLayout(new BoxLayout(center, BoxLayout.X_AXIS)); - // build center panel - buildCenterPanel(center); - - // final component layout - getContentPane().add(top, BorderLayout.NORTH); - getContentPane().add(center, BorderLayout.CENTER); - this.setSize(720, 700); - addWindowListener(new WindowAdapter() { - public void windowClosing(WindowEvent e) { - System.exit(0); - } - }); - setVisible(true); - } - - // define the center display panel. - private void buildCenterPanel(JPanel center) { - // First element is Test Button - JButton test = new JButton("Test Settings"); - center.add(test); - // second is large text box that display ldap queries - testResults = new JTextArea("(click button to test settings values: simulates 4 separate checks showing ldap filters used)", - 40, 40); - JScrollPane jsp = new JScrollPane(testResults); - center.add(jsp); - test.addActionListener(new ActionListener() { - public void actionPerformed(ActionEvent e) { - testResults.setText("");//clear out empty msg - //trim spaces from all fields - String ldapServer = fieldMappings.get(keys[0]).getText().trim(); - String searchFilter = fieldMappings.get(keys[1]).getText().trim(); - String searchBase = fieldMappings.get(keys[2]).getText().trim(); - String loginProperty = fieldMappings.get(keys[3]).getText().trim(); - String bindUserName = fieldMappings.get(keys[4]).getText().trim(); - String groupSearchFilter = fieldMappings.get(keys[5]).getText().trim(); - String bindPassword = fieldMappings.get(keys[6]).getText().trim(); - String groupMemberFilter = fieldMappings.get(keys[7]).getText().trim(); - String groupMemberQuerySize = groupMemberQueryValue.getText().trim(); - String testUserName = testUserNameValue.getText().trim(); - String testUserPassword = testUserPasswordValue.getText().trim(); - // validate initial required elements - String msg = null; - boolean proceed = true; - //valid required details set. - if (ldapServer.isEmpty() || bindUserName.isEmpty() - || bindPassword.isEmpty() || searchBase.isEmpty()) { - msg ="STEP-1:FAIL: "+ keys[0] + ", " + keys[2] + ", " + keys[4] + ", " - + keys[6] + " cannot be empty to proceed."; - log(msg); - proceed = false; - } - env = null; - InitialLdapContext ctx = null; - if (proceed) {// attempt initial ldap bind from RHQ server - msg = "STEP-1:TESTING: Attempting to bind to server:" + ldapServer - + "\n with user '" + bindUserName - + "' and password entered."; - log(msg); - env = getProperties(ldapServer); - env.setProperty(Context.SECURITY_PRINCIPAL, bindUserName); - env.setProperty(Context.SECURITY_CREDENTIALS, bindPassword); - env.setProperty(Context.SECURITY_AUTHENTICATION, "simple"); - //put the rest of the LDAP properties into the Properties instance for use later. - //there still needs to be separate variables since some are for UI validation. - env.setProperty(SystemSetting.LDAP_GROUP_FILTER.getInternalName(), groupSearchFilter); - env.setProperty(SystemSetting.LDAP_GROUP_MEMBER.getInternalName(), groupMemberFilter); - env.setProperty(SystemSetting.LDAP_BASE_DN.getInternalName(), searchBase); - env.setProperty(SystemSetting.LDAP_LOGIN_PROPERTY.getInternalName(), loginProperty); - env.setProperty(SystemSetting.LDAP_BIND_DN.getInternalName(), bindUserName); - env.setProperty(SystemSetting.LDAP_BIND_PW.getInternalName(), bindPassword); - env.setProperty(SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE.getInternalName(), groupMemberQuerySize); - - for(Object key :env.keySet()){ - System.out.println(key+"="+env.getProperty(key+"")); - } - - try { - ctx = new InitialLdapContext(env, null); - msg = "STEP-1:PASS: LDAP bind credentials are correct. Successfully connected to '" - + ldapServer - + "'.\n This means the LDAP Bind credentials for the RHQ Server authentication/authorization requests to ldap server " - + "are correct."; - if(enableVerboseDebugging.isSelected()){ - msg+="\n"+advdb+" LDAP simple authentication bind successful."; - } - log(msg); - proceed = true; - } catch (Exception ex) { - msg = "STEP-1:FAIL: Unable to connect to the LDAP server with credentials specified.\n"; - msg+="Exception:"+ex.getMessage(); - if(enableVerboseDebugging.isSelected()){ - msg = appendStacktraceToMsg(msg, ex); - } - log(msg); - proceed = false; - } - } - if (proceed) {// retrieve test credentials to test run auth - // load search controls - SearchControls searchControls = getSearchControls(); - // validating searchFilter and test user/pass creds - proceed = true; - if (testUserName.isEmpty() || (testUserPassword.isEmpty())) { - msg = "STEP-2:FAIL: Test Username/Password fields cannot be empty for this step."; - log(msg); - proceed = false; - } - // testing a valid user involves a filtered ldap search - // using the loginProperty, and optionally searchFilter - String userDN = ""; - if (proceed) { - // default loginProperty to cn if it's not set - if (loginProperty.isEmpty()) { - loginProperty = "cn"; - if(enableVerboseDebugging.isSelected()){ - String mesg = "As you have not specified a login property, defaulting to 'cn'"; - log(advdb+" "+msg); - } - } - String filter; - if (!searchFilter.isEmpty()) { - filter = "(&(" + loginProperty + "=" + testUserName - + ")" + "(" + searchFilter + "))"; - } else { - filter = "(" + loginProperty + "=" + testUserName - + ")"; - } - if(enableVerboseDebugging.isSelected()){ - log(advdb+" The searchfilter is optionally appended to login property for additional shared attribute across users."); - } - msg = "STEP-2:TESTING: To validate the test user the following LDAP filtered component will be used to find matching users:\n"; - msg += filter; - log(msg); - // test out the search on the target ldap server - try { - String[] baseDNs = searchBase.split(";"); - for (int x = 0; x < baseDNs.length; x++) { - NamingEnumeration answer = ctx.search( - baseDNs[x], filter, searchControls); - if(enableVerboseDebugging.isSelected()){ - log(advdb+" this search was excuted against DN component '"+baseDNs[x]+"'."); - } - // boolean ldapApiNpeFound = false; - if (!answer.hasMoreElements()) { - msg="STEP-2:WARN Unable to locate a matching users for the filter'"+filter+ - "'. Please check your loginProperty. Usually 'cn' or 'uid'"; - log(msg); - continue; - } - // Going with the first match - SearchResult si = (SearchResult) answer.next(); - - // Construct the UserDN - userDN = si.getName() + "," + baseDNs[x]; - msg = "STEP-2:PASS: The test user '" - + testUserName - + "' was succesfully located, and the following userDN will be used in authorization check:\n"; - msg += userDN; - log(msg); - - ctx.addToEnvironment(Context.SECURITY_PRINCIPAL, userDN); - ctx.addToEnvironment(Context.SECURITY_CREDENTIALS,testUserPassword); - ctx.addToEnvironment(Context.SECURITY_AUTHENTICATION,"simple"); - - // if successful then verified that user and pw - // are valid ldap credentials - ctx.reconnect(null); - msg = "STEP-2:PASS: The user '" - + testUserName - + "' was succesfully authenticated using userDN '" - + userDN + "' and password provided.\n" - +"*Note: the loginProperty must match the loginProperty listed in dn: for the user. It is the DN that RHQ will lookup and use."; - log(msg); - } - } catch (Exception ex) { - msg = "STEP-2:FAIL: There was an error while searching for or authenticating the user '" - + testUserName + "'\n"; - msg += ex.getMessage(); - if(enableVerboseDebugging.isSelected()){ - msg = appendStacktraceToMsg(msg, ex); - } - log(msg); - proceed=false; - } - } - // with authentication completed, now check authorization. - // validate filter components to list all available groups - proceed = false; - if (!groupSearchFilter.isEmpty()) { - Set<Map<String, String>> ret = new HashSet<Map<String, String>>(); - String filter = null; - - if (groupSearchFilter.startsWith("(") && groupSearchFilter.endsWith(")")){ - filter = groupSearchFilter; // RFC 2254 does not allow for ((expression)) - }else{ - filter = String - .format("(%s)", groupSearchFilter); - } - msg = "STEP-3:TESTING: This ldap filter " - + filter - + " will be used to locate ALL available LDAP groups"; - log(msg); - - Properties systemConfig = populateProperties(env); - - ret = buildGroup(systemConfig, filter); - msg = "STEP-3:TESTING: Using Group Search Filter '" - + filter + "', " + ret.size() - + " ldap group(s) were located.\n"; - if (ret.size() > 0) { - HashMap<String, String>[] ldapLists = new HashMap[ret - .size()]; - ret.toArray(ldapLists); - // in this mode go beyond the first ten results. - if (enableVerboseGroupParsing.isSelected()) { - msg += "STEP-3:PASS: Listing 'all' of the ldap groups located: \n"; - for (int i = 0; i < ret.size(); i++) { - msg += ldapLists[i] + "\n"; - } - } else {// otherwise only show first 10[subset of - // available groups] - msg += "STEP-3:PASS: Listing a few(<=10) of the ldap groups located: \n"; - for (int i = 0; (i < ret.size() && i < 10); i++) { - msg += ldapLists[i] + "\n"; - } - } - proceed = true;// then can proceed to next step. - } - log(msg); - } else { - msg = "STEP-3:FAIL: Group Search Filter: cannot be empty to proceed."; - log(msg); - proceed=false; - } - // retrieve lists of authorized groups available for the - if (proceed) { - // check groupMember - if (!groupMemberFilter.isEmpty()) { -// Map<String, String> userDetails = new HashMap<String, String>(); -// userDetails = findLdapUserDetails(userDN); - Set<String> userDetails = findAvailableGroupsFor(testUserName); - - if(!userDetails.isEmpty()){ - proceed=true; - } - } else { - msg = "STEP-4:FAIL: Group Member Filter must be non-empty to proceed with simulating authorization check for test user."; - log(msg); - } - } - if(proceed){ - msg="COMPLETED:PASS: The current settings, for successful steps, should be correct to enter into your RHQ server."; - msg+="\n\n\n\n When you encounter failures, warnings or other unexpected results you should use an external "; - msg+="LDAP search utility to check that the generated filters return the expected LDAP results."; - log(msg); - } - } - } - }); - } - - private String appendStacktraceToMsg(String msg, Exception ex) { - String moreVerbose = ""; - moreVerbose+=advdb+" Exception type:"+ex.getClass()+"\n"; - moreVerbose+=advdb+" Exception stack trace reference:"+ex.getStackTrace()+"\n"; - if(ex.getStackTrace()!=null){ - StringWriter sw = new StringWriter(); - PrintWriter pw = new PrintWriter(sw); - ex.printStackTrace(pw); - moreVerbose+=advdb+" stack trace reference:"+sw.toString(); - } - msg+="\n"+moreVerbose; - return msg; - } - - private boolean containsIllegalLdap(String currentValue) { - boolean invalidData = false; - if((currentValue!=null)&&(!currentValue.trim().isEmpty())){ - //TODO: spinder 3/17/11: need to figure out regex to filter/detect bad data in returned ldap. Giving up for now. -// String regex = "(?<=(?:[^\]|^)(\\)+|[^\]|^)[/,+"><;=#]|(?<=(?:[^\]|^)(\\)+|[^\]|^)\(?!\|[/,+"><;=#]| $|(?<=^\) )|^"; -// regex = "(?<=(?:[^\\]|^)(\\\\)+|[^\\]|^)[/,+\"><;=#]|(?<=(?:[^\\]|^)(\\\\)+|[^\\]|^)\\(?!\\|[/,+\"><;=#]| $|(?<=^\\) )|^"; -// System.out.println("++++++++ CURR VAL:"+currentValue+":INV-CHeck:"+currentValue.matches(",+"\<;\n=/")+":NEWCHECK:"+(currentValue.matches(regex))); -// if(currentValue.matches(",+"\<;\n=/")){ -// invalidData=true; -// } -// String badList = ",+"\<;\n="; - String badList = "+"\<;\n"; - for(char car :currentValue.toCharArray()){ - for(char c :badList.toCharArray()){ - if(car == c){ - invalidData=true; - } - } - } - - } - return invalidData; - } - /** - * @throws NamingException - * @see org.jboss.security.auth.spi.UsernamePasswordLoginModule#validatePassword(java.lang.String,java.lang.String) - */ - protected Set<Map<String, String>> buildGroup(Properties systemConfig, String filter) { - Set<Map<String, String>> groupDetailsMap = new HashSet<Map<String, String>>(); - // Load our LDAP specific properties - // Load the BaseDN - String baseDN = (String) systemConfig.get(SystemSetting.LDAP_BASE_DN.getInternalName()); - - // Load the LoginProperty - String loginProperty = (String) systemConfig.get(SystemSetting.LDAP_LOGIN_PROPERTY.getInternalName()); - if (loginProperty == null) { - // Use the default - loginProperty = "cn"; - } - // Load any information we may need to bind - String bindDN = (String) systemConfig.get(SystemSetting.LDAP_BIND_DN.getInternalName()); - String bindPW = (String) systemConfig.get(SystemSetting.LDAP_BIND_PW.getInternalName()); - if (bindDN != null) { - systemConfig.setProperty(Context.SECURITY_PRINCIPAL, bindDN); - systemConfig.setProperty(Context.SECURITY_CREDENTIALS, bindPW); - systemConfig.setProperty(Context.SECURITY_AUTHENTICATION, "simple"); - } - try { - InitialLdapContext ctx = new InitialLdapContext(systemConfig, null); - SearchControls searchControls = getSearchControls(); - /*String filter = "(&(objectclass=groupOfUniqueNames)(uniqueMember=uid=" + userName - + ",ou=People, dc=rhndev, dc=redhat, dc=com))";*/ - - //modify the search control to only include the attributes we will use - String[] attributes = { "cn", "description" }; - searchControls.setReturningAttributes(attributes); - - //BZ:964250: add rfc 2696 - //default to 1000 results. System setting page size from UI should be non-negative integer > 0. - //additionally as system settings are modifiable via CLI which may not have param checking enabled do some - //more checking. - int defaultPageSize = 1000; - // only if they're enabled in the UI. - if (enable32xFeatures.isSelected()) { - String groupPageSize = systemConfig.getProperty( - SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE - .getInternalName(), "" + defaultPageSize); - if ((groupPageSize != null) - && (!groupPageSize.trim().isEmpty())) { - int passedInPageSize = -1; - try { - passedInPageSize = Integer - .valueOf(groupPageSize.trim()); - if (passedInPageSize > 0) { - defaultPageSize = passedInPageSize; - if(enableVerboseDebugging.isSelected()){ - log(advdb - + " LDAP Group Query Page Sizing of '"+defaultPageSize+"' is being requested from server."); - } - } - } catch (NumberFormatException nfe) { - // log issue and do nothing. Go with the default. - String msg = "LDAP Group Page Size passed in '" - + groupPageSize - + "' in is invalid. Defaulting to 1000 results." - + nfe.getMessage(); - log(msg); - } - } - ctx.setRequestControls(new Control[] { new PagedResultsControl( - defaultPageSize, Control.CRITICAL) }); - } - // Loop through each configured base DN. It may be useful - // in the future to allow for a filter to be configured for - // each BaseDN, but for now the filter will apply to all. - String[] baseDNs = baseDN.split(BASEDN_DELIMITER); - - for (int x = 0; x < baseDNs.length; x++) { - if (enableVerboseDebugging.isSelected()) { - log(advdb - + " this search was excuted against DN component '" - + baseDNs[x] + "'."); - } - executeGroupSearch(filter, groupDetailsMap, ctx, searchControls, baseDNs, x); - - // continually parsing pages of results until we're done. - // only if they're enabled in the UI. - if (enable32xFeatures.isSelected()) { - // handle paged results if they're being used here - byte[] cookie = null; - Control[] controls = ctx.getResponseControls(); - if (controls != null) { - for (Control control : controls) { - if (control instanceof PagedResultsResponseControl) { - PagedResultsResponseControl pagedResult = (PagedResultsResponseControl) control; - cookie = pagedResult.getCookie(); - } - } - } - - while (cookie != null) { - String msg = "RFC 2696 is supported by the server and we are paging through the results. "+ - groupDetailsMap.size()+" results returned so far."; - if(enableVerboseGroupParsing.isSelected()){ - log(advdb - + msg); - } - // ensure the next requests contains the session/cookie - // details - ctx.setRequestControls(new Control[] { new PagedResultsControl( - defaultPageSize, cookie, Control.CRITICAL) }); - executeGroupSearch(filter, groupDetailsMap, ctx, - searchControls, baseDNs, x); - // empty out cookie - cookie = null; - // test for further iterations - controls = ctx.getResponseControls(); - if (controls != null) { - for (Control control : controls) { - if (control instanceof PagedResultsResponseControl) { - PagedResultsResponseControl pagedResult = (PagedResultsResponseControl) control; - cookie = pagedResult.getCookie(); - } - } - } - } - } - }//end of for loop - } catch (NamingException e) { - if (e instanceof InvalidSearchFilterException) { - InvalidSearchFilterException fException = (InvalidSearchFilterException) e; - String message = "The ldap group filter defined is invalid "; - log(message); - } - //TODO: check for ldap connection/unavailable/etc. exceptions. - else { - String mesg = "LDAP communication error: " + e.getMessage(); - log(mesg); - } - } catch (IOException iex) { - String msg = "Unexpected LDAP communciation error:" + iex.getMessage(); - log(msg); - } - - return groupDetailsMap; - } - - /** Executes the LDAP group query using the filters, context and search controls, etc. parameters passed in. - * The matching groups located during processing this pages of results are added as new entries to the - * groupDetailsMap passed in. - * - * @param filter - * @param groupDetailsMap - * @param ctx - * @param searchControls - * @param baseDNs - * @param x - * @throws NamingException - */ - private void executeGroupSearch(String filter, Set<Map<String, String>> groupDetailsMap, InitialLdapContext ctx, - SearchControls searchControls, String[] baseDNs, int x) throws NamingException { - //execute search based on controls and context passed in. - NamingEnumeration<SearchResult> answer = ctx.search(baseDNs[x], filter, searchControls); - boolean ldapApiEnumerationBugEncountered = false; - while ((!ldapApiEnumerationBugEncountered) && answer.hasMoreElements()) {//BZ:582471- ldap api bug change - // We use the first match - SearchResult si = null; - try { - si = answer.next(); - } catch (NullPointerException npe) { - if (enableVerboseDebugging.isSelected()) { - log(advdb - + " NullPtr exception detected. If known LDAP api enum npe ignore: " - + npe.getMessage() + "."); - } - ldapApiEnumerationBugEncountered = true; - break; - } - - if (enableVerboseDebugging.isSelected() - || enableVerboseGroupParsing.isSelected()) { - Attributes attributeContainer = si.getAttributes(); - NamingEnumeration<? extends Attribute> attributes = attributeContainer - .getAll(); - String attributesReturned = " "; - while (attributes.hasMore()) { - attributesReturned += attributes.next().getID() + ","; - } - String dbugMesg = "\n" - + advdb - + " Group search LDAP (" - + attributeContainer.size() - + ") attributes located for group '" - + si.getName() - + "' are [" - + attributesReturned.substring(0, - attributesReturned.length() - 1) + "]."; - // directly update here to shorten messages for lots of groups - testResults.setText(testResults.getText() + dbugMesg); - //This flag can be used in the unlikely case that the UI hangs during a test operation.: - if(iterativeVerboseLogging.isSelected()){ - System.out.println(dbugMesg); - } - - // additionally parse attribute ids and values for illegal ldap - // characters - if (enableVerboseGroupParsing.isSelected()) { - attributes = attributeContainer.getAll(); - String currentAttributeId = ""; - String currentValue = ""; - // spinder: 3/17/11: should we bail on first bad data or - // display them all? - while (attributes.hasMore()) { - boolean badData = false; - Attribute att = attributes.next(); - currentAttributeId = att.getID(); - if (containsIllegalLdap(currentAttributeId)) { - log(advdb - + " LDAP Group: bad atrribute data detected for group '" - + si.getName() + "' for attribute '" - + currentAttributeId + "'."); - badData = true; - } - if (att.getAll() != null) { - NamingEnumeration<?> enumer = att.getAll(); - while (enumer.hasMore()) { - currentValue = enumer.next() + ""; - if (containsIllegalLdap(currentValue)) { - log(advdb - + " LDAP Group: bad data detected for group '" - + si.getName() - + "' with attribute '" - + currentAttributeId - + "' and value:" + currentValue); - badData = true; - } - } - } - if (badData) { - log(advdb - + "** LDAP Group: Some bad LDAP data detected for group '" - + si.getName() + "'."); - } - } - } - } - - Map<String, String> entry = new HashMap<String, String>(); - // String name = (String) si.getAttributes().get("cn").get(); - Attribute commonNameAttr = si.getAttributes().get("cn"); - if (commonNameAttr != null) { - String name = (String) commonNameAttr.get(); - name = name.trim(); - Attribute desc = si.getAttributes().get("description"); - String description = desc != null ? (String) desc.get() : ""; - description = description.trim(); - entry.put("id", name); - entry.put("name", name); - entry.put("description", description); - groupDetailsMap.add(entry); - } else {// unable to retrieve details for specific group. - log(advdb - + " There was an error retrieving 'cn' attribute for group '" - + si.getName() - + "'. Not adding to returned list of groups. "); - } - } - } - - public Map<String, String> findLdapUserDetails(String userName) { - // Load our LDAP specific properties - Properties systemConfig = env; - HashMap<String, String> userDetails = new HashMap<String, String>(); - - // Load the BaseDN - String baseDN = (String) systemConfig.get(SystemSetting.LDAP_BASE_DN.getInternalName()); - - // Load the LoginProperty - String loginProperty = (String) systemConfig.get(SystemSetting.LDAP_LOGIN_PROPERTY.getInternalName()); - if (loginProperty == null) { - // Use the default - loginProperty = "cn"; - } - // Load any information we may need to bind - String bindDN = (String) systemConfig.get(SystemSetting.LDAP_BIND_DN.getInternalName()); - String bindPW = (String) systemConfig.get(SystemSetting.LDAP_BIND_PW.getInternalName()); - - // Load any search filter - String groupSearchFilter = (String) systemConfig.get(SystemSetting.LDAP_GROUP_FILTER.getInternalName()); - String groupMemberFilter = (String) systemConfig.get(SystemSetting.LDAP_GROUP_MEMBER.getInternalName()); - String testUserDN=userName; - String ldapServer = (String) systemConfig.get(Context.PROVIDER_URL); - - Properties env = getProperties(ldapServer); - - if (bindDN != null) { - env.setProperty(Context.SECURITY_PRINCIPAL, bindDN); - env.setProperty(Context.SECURITY_CREDENTIALS, bindPW); - env.setProperty(Context.SECURITY_AUTHENTICATION, "simple"); - } - - try { - InitialLdapContext ctx = new InitialLdapContext(env, null); - SearchControls searchControls = getSearchControls(); - - String filter = String.format("(&(%s)(%s=%s))", - groupSearchFilter, groupMemberFilter, - testUserDN); - - generateUiLoggingForStep4LdapFilter(userName, filter); - - // Loop through each configured base DN. It may be useful - // in the future to allow for a filter to be configured for - // each BaseDN, but for now the filter will apply to all. - String[] baseDNs = baseDN.split(BASEDN_DELIMITER); - for (int x = 0; x < baseDNs.length; x++) { - NamingEnumeration<SearchResult> answer = ctx.search(baseDNs[x], filter, searchControls); - if (!answer.hasMoreElements()) { //BZ:582471- ldap api bug change - // Nothing found for this DN, move to the next one if we have one. - continue; - } - - // We use the first match - SearchResult si = answer.next(); - //generate the DN - String userDN = null; - try { - userDN = si.getNameInNamespace(); - } catch (UnsupportedOperationException use) { - userDN = si.getName(); - if (userDN.startsWith(""")) { - userDN = userDN.substring(1, userDN.length()); - } - if (userDN.endsWith(""")) { - userDN = userDN.substring(0, userDN.length() - 1); - } - userDN = userDN + "," + baseDNs[x]; - } - userDetails.put("dn", userDN); - - // Construct the UserDN - NamingEnumeration<String> keys = si.getAttributes().getIDs(); - while (keys.hasMore()) { - String key = keys.next(); - Attribute value = si.getAttributes().get(key); - if ((value != null) && (value.get() != null)) { - userDetails.put(key, value.get().toString()); - } - } -// return userDetails; - }//end of for loop - generateUiLoggingStep4Authz(filter); - return userDetails; - } catch (Exception ex) { - generateUiLoggingStep4Exception(ex); - } - return userDetails; - } - - public Set<String> findAvailableGroupsFor(String userName) { - // Load our LDAP specific properties - Properties options = env; - String groupFilter = options.getProperty(SystemSetting.LDAP_GROUP_FILTER.getInternalName(), ""); - String groupMember = options.getProperty(SystemSetting.LDAP_GROUP_MEMBER.getInternalName(), ""); - String groupUsePosix = options.getProperty(SystemSetting.LDAP_GROUP_USE_POSIX.getInternalName(), "false"); - if (groupUsePosix == null) { - groupUsePosix = Boolean.toString(false);//default to false - } - boolean usePosixGroups = Boolean.valueOf(groupUsePosix); - String userAttribute = getUserAttribute(options, userName, usePosixGroups); - Set<String> ldapSet = new HashSet<String>(); - - if (userAttribute != null && userAttribute.trim().length() > 0) { - //TODO: spinder 4/21/10 put in error/debug logging messages for badly formatted filter combinations - String filter = ""; - //form assumes examples where groupFilter is like 'objectclass=groupOfNames' and groupMember is 'member' - // to produce ldap filter like (&(objectclass=groupOfNames)(member=cn=Administrator,ou=People,dc=test,dc=com)) - // or like (&(objectclass=groupOfNames)(memberUid=Administrator)) for posixGroups. - filter = String.format("(&(%s)(%s=%s))", groupFilter, groupMember, encodeForFilter(userAttribute)); - - Set<Map<String, String>> matched = buildGroup(options, filter); -// log.trace("Located '" + matched.size() + "' LDAP groups for user '" + userName -// + "' using following ldap filter '" + filter + "'."); - - //iterate to extract just the group names. - for (Map<String, String> match : matched) { - ldapSet.add(match.get("id")); - } - } else { -// log.debug("Group lookup will not be performed due to no UserDN found for user " + userName); - } - - return ldapSet; - } - - private void generateUiLoggingStep4Exception(Exception ex) { - String groupSearchFilter = env - .getProperty(SystemSetting.LDAP_GROUP_FILTER - .getInternalName()); - String msg = "STEP-4:FAIL: There was an error searching with the groupFilter supplied: " - + groupSearchFilter + "'\n"; - msg += ex.getMessage(); - if (enableVerboseDebugging.isSelected()) { - msg = appendStacktraceToMsg(msg, ex); - } - log(msg); - } - - private void generateUiLoggingStep4Authz(String filter) { - Set<Map<String, String>> groups = buildGroup(env, filter); - String msg = "STEP-4:TESTING: Using Group Search Filter '" - + filter + "', " + groups.size() - + " ldap group(s) were located.\n"; - if (groups.size() > 0) { - HashMap<String, String>[] ldapLists = new HashMap[groups - .size()]; - groups.toArray(ldapLists); - msg += "STEP-4:PASS: Listing a few of the ldap groups located: \n"; - // iterate over first ten or less to demonstrate retrieve - for (int i = 0; (i < groups.size() && i < 10); i++) { - msg += ldapLists[i] + "\n"; - } - }else{ - msg+="STEP-4:WARN: With current settings, test user is not authorized for any groups. Is this correct?"; - } - log(msg); - } - - private void generateUiLoggingForStep4LdapFilter(String userName, - String filter) { - String msg = "STEP-4:TESTING: about to do ldap search with filter \n'" - + filter - + "'\n to locate groups that test user '"+userName+"' IS authorized to access."; - log(msg); - } - - - // throw the label and fields together, two to a row. - private HashMap<String, JTextField> loadUiFields(JPanel top, - String[] componentKeys) { - HashMap<String, JTextField> mappings = new HashMap<String, JTextField>(); - for (int i = 0; i < componentKeys.length; i++) { - String firstLabelKey = componentKeys[i]; - String secondLabelKey = componentKeys[++i]; - // locate second key - JPanel row = new JPanel(); - row.setLayout(new FlowLayout(FlowLayout.LEFT)); - JLabel label1 = new JLabel(firstLabelKey); - label1.setSize(textBoxWidth, 5); -// JTextField value1 = new JTextField(textBoxWidth); - JTextField value1 = null; - if (firstLabelKey.equalsIgnoreCase("Password:")&&(!showPasswords.isSelected())) { - value1 = new JPasswordField(textBoxWidth); - } else { - value1 = new JTextField(textBoxWidth); - } - JLabel label2 = new JLabel(secondLabelKey); - JTextField value2 = new JTextField(textBoxWidth); - row.add(label1); - row.add(value1); - row.add(Box.createRigidArea(new Dimension(0, 5))); - row.add(label2); - row.add(value2); - mappings.put(firstLabelKey, value1); - mappings.put(secondLabelKey, value2); - top.add(row); - } - - return mappings; - } - - private Properties getProperties(String contentProvider) { - Properties env = new Properties(); - env.setProperty(Context.INITIAL_CONTEXT_FACTORY, - "com.sun.jndi.ldap.LdapCtxFactory"); - env.setProperty(Context.PROVIDER_URL, contentProvider); - if(!enableLdapReferral.isSelected()){ - env.setProperty(Context.REFERRAL, "ignore"); - }else{ - String msg="**---- You have chosen to tell LDAP servers to [FOLLOW] context referrals. Default is [IGNORE] referrals. --**"; - log(msg); - env.setProperty(Context.REFERRAL, "follow"); - } - -// // Setup SSL if requested -// String protocol = ssl.isSelected()? "ssl":""; -// if ((protocol != null) && protocol.equals("ssl")) { -// String ldapSocketFactory = env -// .getProperty("java.naming.ldap.factory.socket"); -// if (ldapSocketFactory == null) { -// env.put("java.naming.ldap.factory.socket", -// UntrustedSSLSocketFactory.class.getName()); -// } -// env.put(Context.SECURITY_PROTOCOL, "ssl"); -// } - - return env; - } - - private String delineate() { - String line = "-"; - for (int i = 0; i < 30; i++) { - line += "-"; - } - return line; - } - - /** Takes care of delineating messages and conditional logging contents passed in. - * @param msg - */ - private void log(String msg) { - String message = "\n" + delineate() + "\n"; - message += msg; - message += "\n" + delineate() + "\n\n"; - //This flag can be used in the unlikely case that the UI hangs during a test operation.: - if(iterativeVerboseLogging.isSelected()){ - System.out.println(message); - } - testResults.setText(testResults.getText() + message); - } - - private SearchControls getSearchControls() { - int scope = SearchControls.SUBTREE_SCOPE; - int timeLimit = 0; - long countLimit = 0; - String[] returnedAttributes = null; - boolean returnObject = false; - boolean deference = false; - SearchControls constraints = new SearchControls(scope, countLimit, - timeLimit, returnedAttributes, returnObject, deference); - return constraints; - } - - /** Translate SystemSettings to familiar Properties instance since we're - * passing not one but multiple values. - * - * @param systemSettings - * @return - */ - private Properties populateProperties(Properties existing) { - Properties properties = new Properties(); - if(existing!=null){ - properties = existing; - } - for (SystemSetting entry : SystemSetting.values()) { - if(entry!=null){ - switch(entry){ - case LDAP_BASED_JAAS_PROVIDER: - properties.put(entry.getInternalName(), ""); - break; - } - } - } - return properties; - } - - /**Build/retrieve the user DN. Not usually a property. - * - * @param options - * @param userName - * @param usePosixGroups boolean indicating whether we search for groups with posixGroup format - * @return - */ - private String getUserAttribute(Properties options, String userName, boolean usePosixGroups) { - Map<String, String> details = findLdapUserDetails(userName); - String userAttribute = null; - if (usePosixGroups) {//return just the username as posixGroup member search uses (&(%s)(memberUid=username)) - userAttribute = userName; - } else {//this is the default where group search uses (&(%s)(uniqueMember={userDn})) - userAttribute = details.get("dn"); - } - - return userAttribute; - } - - /** See LDAPStringUtil.encodeForFilter() for original code/source/author/etc. - * <p>Encode a string so that it can be used in an LDAP search filter.</p> - * - * <p>The following table shows the characters that are encoded and their - * encoded version.</p> - * - * <table> - * <tr><th align="center">Character</th><th>Encoded As</th></tr> - * <tr><td align="center">*</td><td>\2a</td></tr> - * <tr><td align="center">(</td><td>\28</td></tr> - * <tr><td align="center">)</td><td>\29</td></tr> - * <tr><td align="center"></td><td>\5c</td></tr> - * <tr><td align="center"><code>null</code></td><td>\00</td></tr> - * </table> - * - * <p>In addition to encoding the above characters, any non-ASCII character - * (any character with a hex value greater then <code>0x7f</code>) is also - * encoded and rewritten as a UTF-8 character or sequence of characters in - * hex notation.</p> - * - * @param filterString a string that is to be encoded - * @return the encoded version of <code>filterString</code> suitable for use - * in a LDAP search filter - * @see <a href="http://tools.ietf.org/html/rfc4515">RFC 4515</a> - */ - public static String encodeForFilter(final String filterString) { - if (filterString != null && filterString.length() > 0) { - StringBuilder encString = new StringBuilder(filterString.length()); - for (int i = 0; i < filterString.length(); i++) { - char ch = filterString.charAt(i); - switch (ch) { - case '*': // encode a wildcard * character - encString.append("\2a"); - break; - case '(': // encode a open parenthesis ( character - encString.append("\28"); - break; - case ')': // encode a close parenthesis ) character - encString.append("\29"); - break; - case '\': // encode a backslash \ character - encString.append("\5c"); - break; - case '\u0000': // encode a null character - encString.append("\00"); - break; - default: - if (ch <= 0x7f) { // an ASCII character - encString.append(ch); - } else if (ch >= 0x80) { // encode to UTF-8 - try { - byte[] utf8bytes = String.valueOf(ch).getBytes("UTF8"); - for (byte b : utf8bytes) { - encString.append(String.format("\%02x", b)); - } - } catch (UnsupportedEncodingException e) { - // ignore - } - } - } - } - return encString.toString(); - } - return filterString; - } -} - -//Mock up the upgraded system properties approach to use SystemSetting -enum SystemSetting { - LDAP_BASED_JAAS_PROVIDER("CAM_JAAS_PROVIDER"), - LDAP_NAMING_PROVIDER_URL("CAM_LDAP_NAMING_PROVIDER_URL"), - USE_SSL_FOR_LDAP("CAM_LDAP_PROTOCOL"), - LDAP_LOGIN_PROPERTY("CAM_LDAP_LOGIN_PROPERTY"), - LDAP_FILTER("CAM_LDAP_FILTER"), - LDAP_GROUP_FILTER("CAM_LDAP_GROUP_FILTER"), - LDAP_GROUP_MEMBER("CAM_LDAP_GROUP_MEMBER"), - LDAP_GROUP_QUERY_PAGE_SIZE("CAM_LDAP_GROUP_QUERY_PAGE_SIZE"), - LDAP_BASE_DN("CAM_LDAP_BASE_DN"), - LDAP_BIND_DN("CAM_LDAP_BIND_DN"), - LDAP_BIND_PW("CAM_LDAP_BIND_PW"), - LDAP_NAMING_FACTORY("CAM_LDAP_NAMING_FACTORY_INITIAL"), - LDAP_GROUP_USE_POSIX("CAM_LDAP_GROUP_USE_POSIX"), - ; - - private String internalName; - - private SystemSetting(String name) { - this.internalName = name; - } - - public String getInternalName() { - return internalName; - } - - public static SystemSetting getByInternalName(String internalName) { - for (SystemSetting p : SystemSetting.values()) { - if (p.internalName.equals(internalName)) { - return p; - } - } - return null; - } -} - - diff --git a/modules/helpers/ldap-tool/src/main/java/org/rhq/TestLdapSettings.java b/modules/helpers/ldap-tool/src/main/java/org/rhq/TestLdapSettings.java new file mode 100644 index 0000000..b9667ea --- /dev/null +++ b/modules/helpers/ldap-tool/src/main/java/org/rhq/TestLdapSettings.java @@ -0,0 +1,1277 @@ +package org.rhq; + +import java.awt.BorderLayout; +import java.awt.Color; +import java.awt.Component; +import java.awt.Dimension; +import java.awt.FlowLayout; +import java.awt.event.ActionEvent; +import java.awt.event.ActionListener; +import java.awt.event.ItemEvent; +import java.awt.event.ItemListener; +import java.awt.event.WindowAdapter; +import java.awt.event.WindowEvent; +import java.io.IOException; +import java.io.PrintWriter; +import java.io.StringWriter; +import java.io.UnsupportedEncodingException; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Properties; +import java.util.Set; + +import javax.naming.Context; +import javax.naming.NamingEnumeration; +import javax.naming.NamingException; +import javax.naming.directory.Attribute; +import javax.naming.directory.Attributes; +import javax.naming.directory.InvalidSearchFilterException; +import javax.naming.directory.SearchControls; +import javax.naming.directory.SearchResult; +import javax.naming.ldap.Control; +import javax.naming.ldap.InitialLdapContext; +import javax.naming.ldap.PagedResultsControl; +import javax.naming.ldap.PagedResultsResponseControl; +import javax.swing.Box; +import javax.swing.BoxLayout; +import javax.swing.JButton; +import javax.swing.JCheckBox; +import javax.swing.JFrame; +import javax.swing.JLabel; +import javax.swing.JPanel; +import javax.swing.JPasswordField; +import javax.swing.JScrollPane; +import javax.swing.JTextArea; +import javax.swing.JTextField; +import javax.swing.SwingUtilities; +import javax.swing.border.LineBorder; +import javax.swing.border.TitledBorder; + +//import org.rhq.enterprise.server.util.security.UntrustedSSLSocketFactory; + +/* Is a development test tool that allows the user to simulate the RHQ server side + * LDAP calls during auth/authz operations. + * + * The specific LDAP logic below needs to mirror the latest RHQ code and allow the user + * to test our their configuration without requring a specific RHQ/JON build as a dependency. + * + * NOTE: To avoid a runtime dependency on specific versions of RHQ or JON, the small implementation + * methods were copied into this class with minimatl changes for logging and ui messaging. The + * definitive implementation for each 'copied' method can be found in LDAPGroupManagerBean. + * + * @author Simeon Pinder + */ +public class TestLdapSettings extends JFrame { + //shared fields + private JTextArea testResults; + private JCheckBox showPasswords; + private JCheckBox ssl; + private JLabel groupPageSizeName; + private JTextField groupMemberQueryValue; + private JTextField testUserNameValue; + private JTextField testUserPasswordValue; + private HashMap<String, JTextField> fieldMappings; + private String[] keys; + private JCheckBox enableLdapReferral; + private JCheckBox enableVerboseDebugging; + private JCheckBox enableVerboseGroupParsing; + private JCheckBox iterativeVerboseLogging; + private JCheckBox enablePosixGroups; + private JCheckBox enable32xFeatures; + private String advdb = "**Verbose:debug ----"; + private static final String BASEDN_DELIMITER = ";"; + + private static final long serialVersionUID = 1L; + int textBoxWidth = 20; + private static JPanel top = null; + private static JPanel testUserRegion = null; + private static Properties env=null; + + public static void main(String args[]) { + new TestLdapSettings(); + } + //After enabling support for Query parsing, we need to warn users of the effects. + final String warnMessage = "<html>***WARNING: Depending upon<br>" + + "i)how the ldap server is configured <br>" + + "ii)client query paging settings <br>" + + " enabling <b>'more detailed logging'</b>,<br>" + + " <b>'more group parsing'</b> AND/OR <b>'also log to console'</b> may cause the console to hang/freeze <br>" + + " as the LDAP tool continues to parse large query results. If that occurs it is <br>" + + " suggested that you kill tool and re-run with 'also log to console' so that the console logs<br>" + + " will show which dataset is causing the delay and then you should modify your search|group|member<br>" + + " filters accordingly to <b>return smaller results</b> and/or <b>consume larger payloads</b>.<br>" + + "***WARNING</html>"; + + // Configure window properties + private TestLdapSettings() { + + setTitle("Check LDAP Settings: Simulates LDAP checks/queries of RHQ LDAP integration"); + getContentPane().setLayout(new BorderLayout()); + // top panel definition + top = new JPanel(); + top.setLayout(new BoxLayout(top, BoxLayout.Y_AXIS)); + top.setBorder(LineBorder.createGrayLineBorder()); + //define checkbox here as it's checked when generating UI. + showPasswords = new JCheckBox("show passwords:"); + showPasswords.setSelected(false); + + keys = new String[] { "URL:", "Search Filter:", + "Search Base:","Login Property", + "Username:", "Group Search Filter:", + "Password:", "Group Member Filter:", + }; + fieldMappings = loadUiFields(top, keys); + + //add the two checkboxes for additiona debugging options + enableLdapReferral= new JCheckBox("[follow] ldap referrals"); + enableLdapReferral.setSelected(false); + enableVerboseDebugging= new JCheckBox("more verbose logging"); + enableVerboseDebugging.setSelected(false); + enableVerboseDebugging.setToolTipText(warnMessage); + enableVerboseGroupParsing= new JCheckBox("more detailed group parsing"); + enableVerboseGroupParsing.setSelected(false); + enableVerboseGroupParsing.setToolTipText("*Take care when using this mode with a large number of groups* Every group discovered is parsed/listed."); + iterativeVerboseLogging= new JCheckBox("also log to console"); + iterativeVerboseLogging.setSelected(false); + iterativeVerboseLogging.setToolTipText("This mode is useful when the test tool is having difficulty returning results from large queries."); + iterativeVerboseLogging.setToolTipText(warnMessage); + enablePosixGroups= new JCheckBox("is Posix Group"); + enablePosixGroups.setSelected(false); + enablePosixGroups.setEnabled(false); + + //put into 3.2.x functionality row + JPanel jon32xRegion = new JPanel(); + jon32xRegion.setLayout(new FlowLayout(FlowLayout.LEFT)); + LineBorder jon32xLineBorder = new LineBorder(Color.BLACK, 2); + TitledBorder jon32xBorder = new TitledBorder(jon32xLineBorder, "JON 3.2.x/RHQ 4.8.x specific features:"); + jon32xRegion.setBorder(jon32xBorder); + enable32xFeatures= new JCheckBox("enable JON 3.2.x/RHQ 4.8.x features"); + enable32xFeatures.setToolTipText("This enables features not available before RHQ 4.8.x/JON 3.2.x."); + enable32xFeatures.setSelected(false); + enable32xFeatures.addItemListener(new ItemListener() { + @Override + public void itemStateChanged(ItemEvent e) { + if(enable32xFeatures.isSelected()){ + groupPageSizeName.setEnabled(true); + groupMemberQueryValue.setEnabled(true); + groupMemberQueryValue.setEditable(true); + groupMemberQueryValue.setText("1000"); + enablePosixGroups.setEnabled(true); + }else{ + groupMemberQueryValue.setText(""); + groupPageSizeName.setEnabled(false); + groupMemberQueryValue.setEnabled(false); + groupMemberQueryValue.setEditable(false); + enablePosixGroups.setEnabled(false); + enablePosixGroups.setSelected(false); + } + } + }); + + jon32xRegion.add(enable32xFeatures); + groupPageSizeName = new JLabel("Group Query Page Size:"); + groupPageSizeName.setEnabled(false); + groupMemberQueryValue = new JTextField(10); + groupMemberQueryValue.setText("1000"); + groupMemberQueryValue.setEditable(false); + jon32xRegion.add(groupPageSizeName); + jon32xRegion.add(groupMemberQueryValue); + jon32xRegion.add(enablePosixGroups); + top.add(jon32xRegion); + + //put into row display + JPanel advancedDebugRegion = new JPanel(); + advancedDebugRegion.setLayout(new FlowLayout(FlowLayout.LEFT)); + LineBorder advancedBorder = new LineBorder(Color.BLACK, 2); + TitledBorder debugBorder = new TitledBorder(advancedBorder, "Debug: **Warning --<hover HERE>**"); + advancedDebugRegion.setBorder(debugBorder); + advancedDebugRegion.add(enableLdapReferral); + advancedDebugRegion.add(enableVerboseDebugging); + advancedDebugRegion.add(enableVerboseGroupParsing); + advancedDebugRegion.add(iterativeVerboseLogging); + advancedDebugRegion.setToolTipText(warnMessage); + top.add(advancedDebugRegion); + + JPanel securityPanel = new JPanel(); + securityPanel.setLayout(new FlowLayout(FlowLayout.LEFT)); + showPasswords.addItemListener(new ItemListener() { + @Override + public void itemStateChanged(ItemEvent e) { + SwingUtilities.invokeLater(new Runnable() { + @Override + public void run() { + //store off existing value + String existingValue = ""; + String existingTestUserPass = ""; + JTextField current = fieldMappings.get("Password:"); + if(current instanceof JPasswordField){ + JPasswordField pass = ((JPasswordField)current); + if(pass!=null){ + char[] charArray = pass.getPassword(); + if(charArray.length>0){ + existingValue = new String(charArray); + } + } + }else{ + existingValue = current.getText(); + } + //save off test user password as well + if(testUserPasswordValue instanceof JPasswordField){ + JPasswordField pass = ((JPasswordField)testUserPasswordValue); + if(pass!=null){ + char[] charArray = pass.getPassword(); + if(charArray.length>0){ + existingTestUserPass = new String(charArray); + } + } + }else{ + existingTestUserPass=testUserPasswordValue.getText(); + } + + JTextField updatedContainer = null; + if(showPasswords.isSelected()){ + updatedContainer = new JTextField(textBoxWidth); + updatedContainer.setText(existingValue); + testUserPasswordValue = new JTextField(textBoxWidth); + testUserPasswordValue.setText(existingTestUserPass); + }else{ + updatedContainer = new JPasswordField(textBoxWidth); + updatedContainer.setText(existingValue); + testUserPasswordValue = new JPasswordField(textBoxWidth); + testUserPasswordValue.setText(existingTestUserPass); + } + //locate the JPanel and rebuild it Should be at index 3 + JPanel passwordRow = (JPanel) top.getComponent(3); +// JTextField jf = (JTextField) passwordRow.getComponent(1); + //store off existing components + Component[] existing = new Component[passwordRow.getComponentCount()]; + for(int i=0; i<passwordRow.getComponentCount();i++){ + existing[i] = passwordRow.getComponent(i); + } + passwordRow.removeAll(); + for(int j=0;j<existing.length;j++){ + if(j==1){//insert new JTextField instead + passwordRow.add(updatedContainer); + }else{ + passwordRow.add(existing[j]); + } + } + //reload testUserRegion + //store off existing components + Component[] existingTest = new Component[testUserRegion.getComponentCount()]; + for(int i=0; i<testUserRegion.getComponentCount();i++){ + existingTest[i] = testUserRegion.getComponent(i); + } + testUserRegion.removeAll(); + for(int j=0;j<existingTest.length;j++){ + if(j==3){//insert new JTextField instead + testUserRegion.add(testUserPasswordValue); + }else{ + testUserRegion.add(existingTest[j]); + } + } + + top.revalidate(); + top.repaint(); + } + }); + } + }); + securityPanel.add(showPasswords); + ssl = new JCheckBox("SSL:"); + ssl.setEnabled(false); + securityPanel.add(ssl); + top.add(securityPanel); + + // test user auth region + testUserRegion = new JPanel(); + testUserRegion.setLayout(new FlowLayout(FlowLayout.LEFT)); + LineBorder border = new LineBorder(Color.BLUE, 2); + TitledBorder tBorder = new TitledBorder(border, "Authentication/Authorization Check Credentials: (insert valid ldap user assigned to group)"); + testUserRegion.setBorder(tBorder); + JLabel testUserName = new JLabel("Test UserName:"); + testUserNameValue = new JTextField(textBoxWidth); + JLabel testUserPassword = new JLabel("Test Password:"); +// testUserPasswordValue = new JTextField(textBoxWidth); + testUserPasswordValue = new JPasswordField(textBoxWidth); + testUserRegion.add(testUserName); + testUserRegion.add(testUserNameValue); + testUserRegion.add(testUserPassword); + testUserRegion.add(testUserPasswordValue); + top.add(testUserRegion); + + // center + JPanel center = new JPanel(); + center.setLayout(new BoxLayout(center, BoxLayout.X_AXIS)); + // build center panel + buildCenterPanel(center); + + // final component layout + getContentPane().add(top, BorderLayout.NORTH); + getContentPane().add(center, BorderLayout.CENTER); + this.setSize(720, 700); + addWindowListener(new WindowAdapter() { + public void windowClosing(WindowEvent e) { + System.exit(0); + } + }); + setVisible(true); + } + + // define the center display panel. + private void buildCenterPanel(JPanel center) { + // First element is Test Button + JButton test = new JButton("Test Settings"); + center.add(test); + // second is large text box that display ldap queries + testResults = new JTextArea("(click button to test settings values: simulates 4 separate checks showing ldap filters used)", + 40, 40); + JScrollPane jsp = new JScrollPane(testResults); + center.add(jsp); + test.addActionListener(new ActionListener() { + public void actionPerformed(ActionEvent e) { + testResults.setText("");//clear out empty msg + //trim spaces from all fields + String ldapServer = fieldMappings.get(keys[0]).getText().trim(); + String searchFilter = fieldMappings.get(keys[1]).getText().trim(); + String searchBase = fieldMappings.get(keys[2]).getText().trim(); + String loginProperty = fieldMappings.get(keys[3]).getText().trim(); + String bindUserName = fieldMappings.get(keys[4]).getText().trim(); + String groupSearchFilter = fieldMappings.get(keys[5]).getText().trim(); + String bindPassword = fieldMappings.get(keys[6]).getText().trim(); + String groupMemberFilter = fieldMappings.get(keys[7]).getText().trim(); + String groupMemberQuerySize = groupMemberQueryValue.getText().trim(); + String testUserName = testUserNameValue.getText().trim(); + String testUserPassword = testUserPasswordValue.getText().trim(); + // validate initial required elements + String msg = null; + boolean proceed = true; + //valid required details set. + if (ldapServer.isEmpty() || bindUserName.isEmpty() + || bindPassword.isEmpty() || searchBase.isEmpty()) { + msg ="STEP-1:FAIL: "+ keys[0] + ", " + keys[2] + ", " + keys[4] + ", " + + keys[6] + " cannot be empty to proceed."; + log(msg); + proceed = false; + } + env = null; + InitialLdapContext ctx = null; + if (proceed) {// attempt initial ldap bind from RHQ server + msg = "STEP-1:TESTING: Attempting to bind to server:" + ldapServer + + "\n with user '" + bindUserName + + "' and password entered."; + log(msg); + env = getProperties(ldapServer); + env.setProperty(Context.SECURITY_PRINCIPAL, bindUserName); + env.setProperty(Context.SECURITY_CREDENTIALS, bindPassword); + env.setProperty(Context.SECURITY_AUTHENTICATION, "simple"); + //put the rest of the LDAP properties into the Properties instance for use later. + //there still needs to be separate variables since some are for UI validation. + env.setProperty(SystemSetting.LDAP_GROUP_FILTER.getInternalName(), groupSearchFilter); + env.setProperty(SystemSetting.LDAP_GROUP_MEMBER.getInternalName(), groupMemberFilter); + env.setProperty(SystemSetting.LDAP_BASE_DN.getInternalName(), searchBase); + env.setProperty(SystemSetting.LDAP_LOGIN_PROPERTY.getInternalName(), loginProperty); + env.setProperty(SystemSetting.LDAP_BIND_DN.getInternalName(), bindUserName); + env.setProperty(SystemSetting.LDAP_BIND_PW.getInternalName(), bindPassword); + env.setProperty(SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE.getInternalName(), groupMemberQuerySize); + + try { + ctx = new InitialLdapContext(env, null); + msg = "STEP-1:PASS: LDAP bind credentials are correct. Successfully connected to '" + + ldapServer + + "'.\n This means the LDAP Bind credentials for the RHQ Server authentication/authorization requests to ldap server " + + "are correct."; + if(enableVerboseDebugging.isSelected()){ + msg+="\n"+advdb+" LDAP simple authentication bind successful."; + } + log(msg); + proceed = true; + } catch (Exception ex) { + msg = "STEP-1:FAIL: Unable to connect to the LDAP server with credentials specified.\n"; + msg+="Exception:"+ex.getMessage(); + if(enableVerboseDebugging.isSelected()){ + msg = appendStacktraceToMsg(msg, ex); + } + log(msg); + proceed = false; + } + } + if (proceed) {// retrieve test credentials to test run auth + // load search controls + SearchControls searchControls = getSearchControls(); + // validating searchFilter and test user/pass creds + proceed = true; + if (testUserName.isEmpty() || (testUserPassword.isEmpty())) { + msg = "STEP-2:FAIL: Test Username/Password fields cannot be empty for this step."; + log(msg); + proceed = false; + } + // testing a valid user involves a filtered ldap search + // using the loginProperty, and optionally searchFilter + String userDN = ""; + if (proceed) { + // default loginProperty to cn if it's not set + if (loginProperty.isEmpty()) { + loginProperty = "cn"; + if(enableVerboseDebugging.isSelected()){ + String mesg = "As you have not specified a login property, defaulting to 'cn'"; + log(advdb+" "+msg); + } + } + String filter; + if (!searchFilter.isEmpty()) { + filter = "(&(" + loginProperty + "=" + testUserName + + ")" + "(" + searchFilter + "))"; + } else { + filter = "(" + loginProperty + "=" + testUserName + + ")"; + } + if(enableVerboseDebugging.isSelected()){ + log(advdb+" The searchfilter is optionally appended to login property for additional shared attribute across users."); + } + msg = "STEP-2:TESTING: To validate the test user the following LDAP filtered component will be used to find matching users:\n"; + msg += filter; + log(msg); + // test out the search on the target ldap server + try { + String[] baseDNs = searchBase.split(";"); + for (int x = 0; x < baseDNs.length; x++) { + NamingEnumeration answer = ctx.search( + baseDNs[x], filter, searchControls); + if(enableVerboseDebugging.isSelected()){ + log(advdb+" this search was excuted against DN component '"+baseDNs[x]+"'."); + } + // boolean ldapApiNpeFound = false; + if (!answer.hasMoreElements()) { + msg="STEP-2:WARN Unable to locate a matching users for the filter'"+filter+ + "'. Please check your loginProperty. Usually 'cn' or 'uid'"; + log(msg); + continue; + } + // Going with the first match + SearchResult si = (SearchResult) answer.next(); + + // Construct the UserDN + userDN = si.getName() + "," + baseDNs[x]; + msg = "STEP-2:PASS: The test user '" + + testUserName + + "' was succesfully located, and the following userDN will be used in authorization check:\n"; + msg += userDN; + log(msg); + + ctx.addToEnvironment(Context.SECURITY_PRINCIPAL, userDN); + ctx.addToEnvironment(Context.SECURITY_CREDENTIALS,testUserPassword); + ctx.addToEnvironment(Context.SECURITY_AUTHENTICATION,"simple"); + + // if successful then verified that user and pw + // are valid ldap credentials + ctx.reconnect(null); + msg = "STEP-2:PASS: The user '" + + testUserName + + "' was succesfully authenticated using userDN '" + + userDN + "' and password provided.\n" + +"*Note: the loginProperty must match the loginProperty listed in dn: for the user. It is the DN that RHQ will lookup and use."; + log(msg); + } + } catch (Exception ex) { + msg = "STEP-2:FAIL: There was an error while searching for or authenticating the user '" + + testUserName + "'\n"; + msg += ex.getMessage(); + if(enableVerboseDebugging.isSelected()){ + msg = appendStacktraceToMsg(msg, ex); + } + log(msg); + proceed=false; + } + } + // with authentication completed, now check authorization. + // validate filter components to list all available groups + proceed = false; + if (!groupSearchFilter.isEmpty()) { + Set<Map<String, String>> ret = new HashSet<Map<String, String>>(); + String filter = null; + + if (groupSearchFilter.startsWith("(") && groupSearchFilter.endsWith(")")){ + filter = groupSearchFilter; // RFC 2254 does not allow for ((expression)) + }else{ + filter = String + .format("(%s)", groupSearchFilter); + } + msg = "STEP-3:TESTING: This ldap filter " + + filter + + " will be used to locate ALL available LDAP groups"; + log(msg); + + Properties systemConfig = populateProperties(env); + + ret = buildGroup(systemConfig, filter); + msg = "STEP-3:TESTING: Using Group Search Filter '" + + filter + "', " + ret.size() + + " ldap group(s) were located.\n"; + if (ret.size() > 0) { + HashMap<String, String>[] ldapLists = new HashMap[ret + .size()]; + ret.toArray(ldapLists); + // in this mode go beyond the first ten results. + if (enableVerboseGroupParsing.isSelected()) { + msg += "STEP-3:PASS: Listing 'all' of the ldap groups located: \n"; + for (int i = 0; i < ret.size(); i++) { + msg += ldapLists[i] + "\n"; + } + } else {// otherwise only show first 10[subset of + // available groups] + msg += "STEP-3:PASS: Listing a few(<=10) of the ldap groups located: \n"; + for (int i = 0; (i < ret.size() && i < 10); i++) { + msg += ldapLists[i] + "\n"; + } + } + proceed = true;// then can proceed to next step. + } + log(msg); + } else { + msg = "STEP-3:FAIL: Group Search Filter: cannot be empty to proceed."; + log(msg); + proceed=false; + } + // retrieve lists of authorized groups available for the + if (proceed) { + // check groupMember + if (!groupMemberFilter.isEmpty()) { +// Map<String, String> userDetails = new HashMap<String, String>(); +// userDetails = findLdapUserDetails(userDN); + Set<String> userDetails = findAvailableGroupsFor(testUserName); + + if(!userDetails.isEmpty()){ + proceed=true; + } + } else { + msg = "STEP-4:FAIL: Group Member Filter must be non-empty to proceed with simulating authorization check for test user."; + log(msg); + } + } + if(proceed){ + msg="COMPLETED:PASS: The current settings, for successful steps, should be correct to enter into your RHQ server."; + msg+="\n\n\n\n When you encounter failures, warnings or other unexpected results you should use an external "; + msg+="LDAP search utility to check that the generated filters return the expected LDAP results."; + log(msg); + } + } + } + }); + } + + private String appendStacktraceToMsg(String msg, Exception ex) { + String moreVerbose = ""; + moreVerbose+=advdb+" Exception type:"+ex.getClass()+"\n"; + moreVerbose+=advdb+" Exception stack trace reference:"+ex.getStackTrace()+"\n"; + if(ex.getStackTrace()!=null){ + StringWriter sw = new StringWriter(); + PrintWriter pw = new PrintWriter(sw); + ex.printStackTrace(pw); + moreVerbose+=advdb+" stack trace reference:"+sw.toString(); + } + msg+="\n"+moreVerbose; + return msg; + } + + private boolean containsIllegalLdap(String currentValue) { + boolean invalidData = false; + if((currentValue!=null)&&(!currentValue.trim().isEmpty())){ + //TODO: spinder 3/17/11: need to figure out regex to filter/detect bad data in returned ldap. Giving up for now. +// String regex = "(?<=(?:[^\]|^)(\\)+|[^\]|^)[/,+"><;=#]|(?<=(?:[^\]|^)(\\)+|[^\]|^)\(?!\|[/,+"><;=#]| $|(?<=^\) )|^"; +// regex = "(?<=(?:[^\\]|^)(\\\\)+|[^\\]|^)[/,+\"><;=#]|(?<=(?:[^\\]|^)(\\\\)+|[^\\]|^)\\(?!\\|[/,+\"><;=#]| $|(?<=^\\) )|^"; +// System.out.println("++++++++ CURR VAL:"+currentValue+":INV-CHeck:"+currentValue.matches(",+"\<;\n=/")+":NEWCHECK:"+(currentValue.matches(regex))); +// if(currentValue.matches(",+"\<;\n=/")){ +// invalidData=true; +// } +// String badList = ",+"\<;\n="; + String badList = "+"\<;\n"; + for(char car :currentValue.toCharArray()){ + for(char c :badList.toCharArray()){ + if(car == c){ + invalidData=true; + } + } + } + + } + return invalidData; + } + /** + * @throws NamingException + * @see org.jboss.security.auth.spi.UsernamePasswordLoginModule#validatePassword(java.lang.String,java.lang.String) + */ + protected Set<Map<String, String>> buildGroup(Properties systemConfig, String filter) { + Set<Map<String, String>> groupDetailsMap = new HashSet<Map<String, String>>(); + // Load our LDAP specific properties + // Load the BaseDN + String baseDN = (String) systemConfig.get(SystemSetting.LDAP_BASE_DN.getInternalName()); + + // Load the LoginProperty + String loginProperty = (String) systemConfig.get(SystemSetting.LDAP_LOGIN_PROPERTY.getInternalName()); + if (loginProperty == null) { + // Use the default + loginProperty = "cn"; + } + // Load any information we may need to bind + String bindDN = (String) systemConfig.get(SystemSetting.LDAP_BIND_DN.getInternalName()); + String bindPW = (String) systemConfig.get(SystemSetting.LDAP_BIND_PW.getInternalName()); + if (bindDN != null) { + systemConfig.setProperty(Context.SECURITY_PRINCIPAL, bindDN); + systemConfig.setProperty(Context.SECURITY_CREDENTIALS, bindPW); + systemConfig.setProperty(Context.SECURITY_AUTHENTICATION, "simple"); + } + try { + InitialLdapContext ctx = new InitialLdapContext(systemConfig, null); + SearchControls searchControls = getSearchControls(); + /*String filter = "(&(objectclass=groupOfUniqueNames)(uniqueMember=uid=" + userName + + ",ou=People, dc=rhndev, dc=redhat, dc=com))";*/ + + //modify the search control to only include the attributes we will use + String[] attributes = { "cn", "description" }; + searchControls.setReturningAttributes(attributes); + + //BZ:964250: add rfc 2696 + //default to 1000 results. System setting page size from UI should be non-negative integer > 0. + //additionally as system settings are modifiable via CLI which may not have param checking enabled do some + //more checking. + int defaultPageSize = 1000; + // only if they're enabled in the UI. + if (enable32xFeatures.isSelected()) { + String groupPageSize = systemConfig.getProperty( + SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE + .getInternalName(), "" + defaultPageSize); + if ((groupPageSize != null) + && (!groupPageSize.trim().isEmpty())) { + int passedInPageSize = -1; + try { + passedInPageSize = Integer + .valueOf(groupPageSize.trim()); + if (passedInPageSize > 0) { + defaultPageSize = passedInPageSize; + if(enableVerboseDebugging.isSelected()){ + log(advdb + + " LDAP Group Query Page Sizing of '"+defaultPageSize+"' is being requested from server."); + } + } + } catch (NumberFormatException nfe) { + // log issue and do nothing. Go with the default. + String msg = "LDAP Group Page Size passed in '" + + groupPageSize + + "' in is invalid. Defaulting to 1000 results." + + nfe.getMessage(); + log(msg); + } + } + ctx.setRequestControls(new Control[] { new PagedResultsControl( + defaultPageSize, Control.CRITICAL) }); + } + // Loop through each configured base DN. It may be useful + // in the future to allow for a filter to be configured for + // each BaseDN, but for now the filter will apply to all. + String[] baseDNs = baseDN.split(BASEDN_DELIMITER); + + for (int x = 0; x < baseDNs.length; x++) { + if (enableVerboseDebugging.isSelected()) { + log(advdb + + " this search was excuted against DN component '" + + baseDNs[x] + "'."); + } + executeGroupSearch(filter, groupDetailsMap, ctx, searchControls, baseDNs, x); + + // continually parsing pages of results until we're done. + // only if they're enabled in the UI. + if (enable32xFeatures.isSelected()) { + // handle paged results if they're being used here + byte[] cookie = null; + Control[] controls = ctx.getResponseControls(); + if (controls != null) { + for (Control control : controls) { + if (control instanceof PagedResultsResponseControl) { + PagedResultsResponseControl pagedResult = (PagedResultsResponseControl) control; + cookie = pagedResult.getCookie(); + } + } + } + + while (cookie != null) { + String msg = "RFC 2696 is supported by the server and we are paging through the results. "+ + groupDetailsMap.size()+" results returned so far."; + if(enableVerboseGroupParsing.isSelected()){ + log(advdb + + msg); + } + // ensure the next requests contains the session/cookie + // details + ctx.setRequestControls(new Control[] { new PagedResultsControl( + defaultPageSize, cookie, Control.CRITICAL) }); + executeGroupSearch(filter, groupDetailsMap, ctx, + searchControls, baseDNs, x); + // empty out cookie + cookie = null; + // test for further iterations + controls = ctx.getResponseControls(); + if (controls != null) { + for (Control control : controls) { + if (control instanceof PagedResultsResponseControl) { + PagedResultsResponseControl pagedResult = (PagedResultsResponseControl) control; + cookie = pagedResult.getCookie(); + } + } + } + } + } + }//end of for loop + } catch (NamingException e) { + if (e instanceof InvalidSearchFilterException) { + InvalidSearchFilterException fException = (InvalidSearchFilterException) e; + String message = "The ldap group filter defined is invalid "; + log(message); + } + //TODO: check for ldap connection/unavailable/etc. exceptions. + else { + String mesg = "LDAP communication error: " + e.getMessage(); + log(mesg); + } + } catch (IOException iex) { + String msg = "Unexpected LDAP communciation error:" + iex.getMessage(); + log(msg); + } + + return groupDetailsMap; + } + + /** Executes the LDAP group query using the filters, context and search controls, etc. parameters passed in. + * The matching groups located during processing this pages of results are added as new entries to the + * groupDetailsMap passed in. + * + * @param filter + * @param groupDetailsMap + * @param ctx + * @param searchControls + * @param baseDNs + * @param x + * @throws NamingException + */ + private void executeGroupSearch(String filter, Set<Map<String, String>> groupDetailsMap, InitialLdapContext ctx, + SearchControls searchControls, String[] baseDNs, int x) throws NamingException { + //execute search based on controls and context passed in. + NamingEnumeration<SearchResult> answer = ctx.search(baseDNs[x], filter, searchControls); + boolean ldapApiEnumerationBugEncountered = false; + while ((!ldapApiEnumerationBugEncountered) && answer.hasMoreElements()) {//BZ:582471- ldap api bug change + // We use the first match + SearchResult si = null; + try { + si = answer.next(); + } catch (NullPointerException npe) { + if (enableVerboseDebugging.isSelected()) { + log(advdb + + " NullPtr exception detected. If known LDAP api enum npe ignore: " + + npe.getMessage() + "."); + } + ldapApiEnumerationBugEncountered = true; + break; + } + + if (enableVerboseDebugging.isSelected() + || enableVerboseGroupParsing.isSelected()) { + Attributes attributeContainer = si.getAttributes(); + NamingEnumeration<? extends Attribute> attributes = attributeContainer + .getAll(); + String attributesReturned = " "; + while (attributes.hasMore()) { + attributesReturned += attributes.next().getID() + ","; + } + String dbugMesg = "\n" + + advdb + + " Group search LDAP (" + + attributeContainer.size() + + ") attributes located for group '" + + si.getName() + + "' are [" + + attributesReturned.substring(0, + attributesReturned.length() - 1) + "]."; + // directly update here to shorten messages for lots of groups + testResults.setText(testResults.getText() + dbugMesg); + //This flag can be used in the unlikely case that the UI hangs during a test operation.: + if(iterativeVerboseLogging.isSelected()){ + System.out.println(dbugMesg); + } + + // additionally parse attribute ids and values for illegal ldap + // characters + if (enableVerboseGroupParsing.isSelected()) { + attributes = attributeContainer.getAll(); + String currentAttributeId = ""; + String currentValue = ""; + // spinder: 3/17/11: should we bail on first bad data or + // display them all? + while (attributes.hasMore()) { + boolean badData = false; + Attribute att = attributes.next(); + currentAttributeId = att.getID(); + if (containsIllegalLdap(currentAttributeId)) { + log(advdb + + " LDAP Group: bad atrribute data detected for group '" + + si.getName() + "' for attribute '" + + currentAttributeId + "'."); + badData = true; + } + if (att.getAll() != null) { + NamingEnumeration<?> enumer = att.getAll(); + while (enumer.hasMore()) { + currentValue = enumer.next() + ""; + if (containsIllegalLdap(currentValue)) { + log(advdb + + " LDAP Group: bad data detected for group '" + + si.getName() + + "' with attribute '" + + currentAttributeId + + "' and value:" + currentValue); + badData = true; + } + } + } + if (badData) { + log(advdb + + "** LDAP Group: Some bad LDAP data detected for group '" + + si.getName() + "'."); + } + } + } + } + + Map<String, String> entry = new HashMap<String, String>(); + // String name = (String) si.getAttributes().get("cn").get(); + Attribute commonNameAttr = si.getAttributes().get("cn"); + if (commonNameAttr != null) { + String name = (String) commonNameAttr.get(); + name = name.trim(); + Attribute desc = si.getAttributes().get("description"); + String description = desc != null ? (String) desc.get() : ""; + description = description.trim(); + entry.put("id", name); + entry.put("name", name); + entry.put("description", description); + groupDetailsMap.add(entry); + } else {// unable to retrieve details for specific group. + log(advdb + + " There was an error retrieving 'cn' attribute for group '" + + si.getName() + + "'. Not adding to returned list of groups. "); + } + } + } + + public Map<String, String> findLdapUserDetails(String userName) { + // Load our LDAP specific properties + Properties systemConfig = env; + HashMap<String, String> userDetails = new HashMap<String, String>(); + + // Load the BaseDN + String baseDN = (String) systemConfig.get(SystemSetting.LDAP_BASE_DN.getInternalName()); + + // Load the LoginProperty + String loginProperty = (String) systemConfig.get(SystemSetting.LDAP_LOGIN_PROPERTY.getInternalName()); + if (loginProperty == null) { + // Use the default + loginProperty = "cn"; + } + // Load any information we may need to bind + String bindDN = (String) systemConfig.get(SystemSetting.LDAP_BIND_DN.getInternalName()); + String bindPW = (String) systemConfig.get(SystemSetting.LDAP_BIND_PW.getInternalName()); + + // Load any search filter + String groupSearchFilter = (String) systemConfig.get(SystemSetting.LDAP_GROUP_FILTER.getInternalName()); + String groupMemberFilter = (String) systemConfig.get(SystemSetting.LDAP_GROUP_MEMBER.getInternalName()); + String testUserDN=userName; + String ldapServer = (String) systemConfig.get(Context.PROVIDER_URL); + + Properties env = getProperties(ldapServer); + + if (bindDN != null) { + env.setProperty(Context.SECURITY_PRINCIPAL, bindDN); + env.setProperty(Context.SECURITY_CREDENTIALS, bindPW); + env.setProperty(Context.SECURITY_AUTHENTICATION, "simple"); + } + + try { + InitialLdapContext ctx = new InitialLdapContext(env, null); + SearchControls searchControls = getSearchControls(); + + String filter = String.format("(&(%s)(%s=%s))", + groupSearchFilter, groupMemberFilter, + testUserDN); + + generateUiLoggingForStep4LdapFilter(userName, filter); + + // Loop through each configured base DN. It may be useful + // in the future to allow for a filter to be configured for + // each BaseDN, but for now the filter will apply to all. + String[] baseDNs = baseDN.split(BASEDN_DELIMITER); + for (int x = 0; x < baseDNs.length; x++) { + NamingEnumeration<SearchResult> answer = ctx.search(baseDNs[x], filter, searchControls); + if (!answer.hasMoreElements()) { //BZ:582471- ldap api bug change + // Nothing found for this DN, move to the next one if we have one. + continue; + } + + // We use the first match + SearchResult si = answer.next(); + //generate the DN + String userDN = null; + try { + userDN = si.getNameInNamespace(); + } catch (UnsupportedOperationException use) { + userDN = si.getName(); + if (userDN.startsWith(""")) { + userDN = userDN.substring(1, userDN.length()); + } + if (userDN.endsWith(""")) { + userDN = userDN.substring(0, userDN.length() - 1); + } + userDN = userDN + "," + baseDNs[x]; + } + userDetails.put("dn", userDN); + + // Construct the UserDN + NamingEnumeration<String> keys = si.getAttributes().getIDs(); + while (keys.hasMore()) { + String key = keys.next(); + Attribute value = si.getAttributes().get(key); + if ((value != null) && (value.get() != null)) { + userDetails.put(key, value.get().toString()); + } + } +// return userDetails; + }//end of for loop + generateUiLoggingStep4Authz(filter); + return userDetails; + } catch (Exception ex) { + generateUiLoggingStep4Exception(ex); + } + return userDetails; + } + + public Set<String> findAvailableGroupsFor(String userName) { + // Load our LDAP specific properties + Properties options = env; + String groupFilter = options.getProperty(SystemSetting.LDAP_GROUP_FILTER.getInternalName(), ""); + String groupMember = options.getProperty(SystemSetting.LDAP_GROUP_MEMBER.getInternalName(), ""); + String groupUsePosix = options.getProperty(SystemSetting.LDAP_GROUP_USE_POSIX.getInternalName(), "false"); + if (groupUsePosix == null) { + groupUsePosix = Boolean.toString(false);//default to false + } + boolean usePosixGroups = Boolean.valueOf(groupUsePosix); + String userAttribute = getUserAttribute(options, userName, usePosixGroups); + Set<String> ldapSet = new HashSet<String>(); + + if (userAttribute != null && userAttribute.trim().length() > 0) { + //TODO: spinder 4/21/10 put in error/debug logging messages for badly formatted filter combinations + String filter = ""; + //form assumes examples where groupFilter is like 'objectclass=groupOfNames' and groupMember is 'member' + // to produce ldap filter like (&(objectclass=groupOfNames)(member=cn=Administrator,ou=People,dc=test,dc=com)) + // or like (&(objectclass=groupOfNames)(memberUid=Administrator)) for posixGroups. + filter = String.format("(&(%s)(%s=%s))", groupFilter, groupMember, encodeForFilter(userAttribute)); + + Set<Map<String, String>> matched = buildGroup(options, filter); +// log.trace("Located '" + matched.size() + "' LDAP groups for user '" + userName +// + "' using following ldap filter '" + filter + "'."); + + //iterate to extract just the group names. + for (Map<String, String> match : matched) { + ldapSet.add(match.get("id")); + } + } else { +// log.debug("Group lookup will not be performed due to no UserDN found for user " + userName); + } + + return ldapSet; + } + + private void generateUiLoggingStep4Exception(Exception ex) { + String groupSearchFilter = env + .getProperty(SystemSetting.LDAP_GROUP_FILTER + .getInternalName()); + String msg = "STEP-4:FAIL: There was an error searching with the groupFilter supplied: " + + groupSearchFilter + "'\n"; + msg += ex.getMessage(); + if (enableVerboseDebugging.isSelected()) { + msg = appendStacktraceToMsg(msg, ex); + } + log(msg); + } + + private void generateUiLoggingStep4Authz(String filter) { + Set<Map<String, String>> groups = buildGroup(env, filter); + String msg = "STEP-4:TESTING: Using Group Search Filter '" + + filter + "', " + groups.size() + + " ldap group(s) were located.\n"; + if (groups.size() > 0) { + HashMap<String, String>[] ldapLists = new HashMap[groups + .size()]; + groups.toArray(ldapLists); + msg += "STEP-4:PASS: Listing a few of the ldap groups located: \n"; + // iterate over first ten or less to demonstrate retrieve + for (int i = 0; (i < groups.size() && i < 10); i++) { + msg += ldapLists[i] + "\n"; + } + }else{ + msg+="STEP-4:WARN: With current settings, test user is not authorized for any groups. Is this correct?"; + } + log(msg); + } + + private void generateUiLoggingForStep4LdapFilter(String userName, + String filter) { + String msg = "STEP-4:TESTING: about to do ldap search with filter \n'" + + filter + + "'\n to locate groups that test user '"+userName+"' IS authorized to access."; + log(msg); + } + + + // throw the label and fields together, two to a row. + private HashMap<String, JTextField> loadUiFields(JPanel top, + String[] componentKeys) { + HashMap<String, JTextField> mappings = new HashMap<String, JTextField>(); + for (int i = 0; i < componentKeys.length; i++) { + String firstLabelKey = componentKeys[i]; + String secondLabelKey = componentKeys[++i]; + // locate second key + JPanel row = new JPanel(); + row.setLayout(new FlowLayout(FlowLayout.LEFT)); + JLabel label1 = new JLabel(firstLabelKey); + label1.setSize(textBoxWidth, 5); +// JTextField value1 = new JTextField(textBoxWidth); + JTextField value1 = null; + if (firstLabelKey.equalsIgnoreCase("Password:")&&(!showPasswords.isSelected())) { + value1 = new JPasswordField(textBoxWidth); + } else { + value1 = new JTextField(textBoxWidth); + } + JLabel label2 = new JLabel(secondLabelKey); + JTextField value2 = new JTextField(textBoxWidth); + row.add(label1); + row.add(value1); + row.add(Box.createRigidArea(new Dimension(0, 5))); + row.add(label2); + row.add(value2); + mappings.put(firstLabelKey, value1); + mappings.put(secondLabelKey, value2); + top.add(row); + } + + return mappings; + } + + private Properties getProperties(String contentProvider) { + Properties env = new Properties(); + env.setProperty(Context.INITIAL_CONTEXT_FACTORY, + "com.sun.jndi.ldap.LdapCtxFactory"); + env.setProperty(Context.PROVIDER_URL, contentProvider); + if(!enableLdapReferral.isSelected()){ + env.setProperty(Context.REFERRAL, "ignore"); + }else{ + String msg="**---- You have chosen to tell LDAP servers to [FOLLOW] context referrals. Default is [IGNORE] referrals. --**"; + log(msg); + env.setProperty(Context.REFERRAL, "follow"); + } + +// // Setup SSL if requested +// String protocol = ssl.isSelected()? "ssl":""; +// if ((protocol != null) && protocol.equals("ssl")) { +// String ldapSocketFactory = env +// .getProperty("java.naming.ldap.factory.socket"); +// if (ldapSocketFactory == null) { +// env.put("java.naming.ldap.factory.socket", +// UntrustedSSLSocketFactory.class.getName()); +// } +// env.put(Context.SECURITY_PROTOCOL, "ssl"); +// } + + return env; + } + + private String delineate() { + String line = "-"; + for (int i = 0; i < 30; i++) { + line += "-"; + } + return line; + } + + /** Takes care of delineating messages and conditional logging contents passed in. + * @param msg + */ + private void log(String msg) { + String message = "\n" + delineate() + "\n"; + message += msg; + message += "\n" + delineate() + "\n\n"; + //This flag can be used in the unlikely case that the UI hangs during a test operation.: + if(iterativeVerboseLogging.isSelected()){ + System.out.println(message); + } + testResults.setText(testResults.getText() + message); + } + + private SearchControls getSearchControls() { + int scope = SearchControls.SUBTREE_SCOPE; + int timeLimit = 0; + long countLimit = 0; + String[] returnedAttributes = null; + boolean returnObject = false; + boolean deference = false; + SearchControls constraints = new SearchControls(scope, countLimit, + timeLimit, returnedAttributes, returnObject, deference); + return constraints; + } + + /** Translate SystemSettings to familiar Properties instance since we're + * passing not one but multiple values. + * + * @param systemSettings + * @return + */ + private Properties populateProperties(Properties existing) { + Properties properties = new Properties(); + if(existing!=null){ + properties = existing; + } + for (SystemSetting entry : SystemSetting.values()) { + if(entry!=null){ + switch(entry){ + case LDAP_BASED_JAAS_PROVIDER: + properties.put(entry.getInternalName(), ""); + break; + } + } + } + return properties; + } + + /**Build/retrieve the user DN. Not usually a property. + * + * @param options + * @param userName + * @param usePosixGroups boolean indicating whether we search for groups with posixGroup format + * @return + */ + private String getUserAttribute(Properties options, String userName, boolean usePosixGroups) { + Map<String, String> details = findLdapUserDetails(userName); + String userAttribute = null; + if (usePosixGroups) {//return just the username as posixGroup member search uses (&(%s)(memberUid=username)) + userAttribute = userName; + } else {//this is the default where group search uses (&(%s)(uniqueMember={userDn})) + userAttribute = details.get("dn"); + } + + return userAttribute; + } + + /** See LDAPStringUtil.encodeForFilter() for original code/source/author/etc. + * <p>Encode a string so that it can be used in an LDAP search filter.</p> + * + * <p>The following table shows the characters that are encoded and their + * encoded version.</p> + * + * <table> + * <tr><th align="center">Character</th><th>Encoded As</th></tr> + * <tr><td align="center">*</td><td>\2a</td></tr> + * <tr><td align="center">(</td><td>\28</td></tr> + * <tr><td align="center">)</td><td>\29</td></tr> + * <tr><td align="center"></td><td>\5c</td></tr> + * <tr><td align="center"><code>null</code></td><td>\00</td></tr> + * </table> + * + * <p>In addition to encoding the above characters, any non-ASCII character + * (any character with a hex value greater then <code>0x7f</code>) is also + * encoded and rewritten as a UTF-8 character or sequence of characters in + * hex notation.</p> + * + * @param filterString a string that is to be encoded + * @return the encoded version of <code>filterString</code> suitable for use + * in a LDAP search filter + * @see <a href="http://tools.ietf.org/html/rfc4515">RFC 4515</a> + */ + public static String encodeForFilter(final String filterString) { + if (filterString != null && filterString.length() > 0) { + StringBuilder encString = new StringBuilder(filterString.length()); + for (int i = 0; i < filterString.length(); i++) { + char ch = filterString.charAt(i); + switch (ch) { + case '*': // encode a wildcard * character + encString.append("\2a"); + break; + case '(': // encode a open parenthesis ( character + encString.append("\28"); + break; + case ')': // encode a close parenthesis ) character + encString.append("\29"); + break; + case '\': // encode a backslash \ character + encString.append("\5c"); + break; + case '\u0000': // encode a null character + encString.append("\00"); + break; + default: + if (ch <= 0x7f) { // an ASCII character + encString.append(ch); + } else if (ch >= 0x80) { // encode to UTF-8 + try { + byte[] utf8bytes = String.valueOf(ch).getBytes("UTF8"); + for (byte b : utf8bytes) { + encString.append(String.format("\%02x", b)); + } + } catch (UnsupportedEncodingException e) { + // ignore + } + } + } + } + return encString.toString(); + } + return filterString; + } +} + +//Mock up the upgraded system properties approach to use SystemSetting +enum SystemSetting { + LDAP_BASED_JAAS_PROVIDER("CAM_JAAS_PROVIDER"), + LDAP_NAMING_PROVIDER_URL("CAM_LDAP_NAMING_PROVIDER_URL"), + USE_SSL_FOR_LDAP("CAM_LDAP_PROTOCOL"), + LDAP_LOGIN_PROPERTY("CAM_LDAP_LOGIN_PROPERTY"), + LDAP_FILTER("CAM_LDAP_FILTER"), + LDAP_GROUP_FILTER("CAM_LDAP_GROUP_FILTER"), + LDAP_GROUP_MEMBER("CAM_LDAP_GROUP_MEMBER"), + LDAP_GROUP_QUERY_PAGE_SIZE("CAM_LDAP_GROUP_QUERY_PAGE_SIZE"), + LDAP_BASE_DN("CAM_LDAP_BASE_DN"), + LDAP_BIND_DN("CAM_LDAP_BIND_DN"), + LDAP_BIND_PW("CAM_LDAP_BIND_PW"), + LDAP_NAMING_FACTORY("CAM_LDAP_NAMING_FACTORY_INITIAL"), + LDAP_GROUP_USE_POSIX("CAM_LDAP_GROUP_USE_POSIX"), + ; + + private String internalName; + + private SystemSetting(String name) { + this.internalName = name; + } + + public String getInternalName() { + return internalName; + } + + public static SystemSetting getByInternalName(String internalName) { + for (SystemSetting p : SystemSetting.values()) { + if (p.internalName.equals(internalName)) { + return p; + } + } + return null; + } +} + +
commit ec33bbd668d34ab132f4fc46a545e2c9e4cb0951 Author: Simeon Pinder spinder@redhat.com Date: Tue Jul 30 17:11:49 2013 -0400
Move TestLdapSettings to it's own module.
diff --git a/modules/helpers/ldap-tool/pom.xml b/modules/helpers/ldap-tool/pom.xml new file mode 100644 index 0000000..28f8678 --- /dev/null +++ b/modules/helpers/ldap-tool/pom.xml @@ -0,0 +1,74 @@ +<?xml version="1.0" encoding="UTF-8"?> + +<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> + + <modelVersion>4.0.0</modelVersion> + + <parent> + <groupId>org.rhq</groupId> + <artifactId>rhq-parent</artifactId> + <version>4.9.0-SNAPSHOT</version> + <relativePath>../../../pom.xml</relativePath> + </parent> + + <groupId>org.rhq.helpers</groupId> + <artifactId>ldap-tool</artifactId> + <packaging>jar</packaging> + <version>4.9.0-SNAPSHOT</version> + + <properties> + <executable.name>TestLdapSettings</executable.name> + <tool.version>1.0.1</tool.version> + </properties> + + <name>RHQ ldap test tool</name> + <description>Executable jar to exercise LDAP settings used by RHQ with external LDAP server.</description> + + <build> + + <plugins> + + <plugin> + <artifactId>maven-jar-plugin</artifactId> + <configuration> + <archive> + <manifest> + <packageName>com.test</packageName> + <mainClass>com.test.TestLdapSettings</mainClass> + </manifest> + </archive> + </configuration> + </plugin> + <plugin> + <groupId>org.apache.maven.plugins</groupId> + <artifactId>maven-antrun-plugin</artifactId> + <version>1.7</version> + <executions> + <execution> + <id>tool-finalize</id> + <phase>verify</phase> + <configuration> + <target> + <copy file="${project.build.directory}/${project.build.finalName}.jar" + tofile="${project.build.directory}/${executable.name}.jar"/> + </target> + </configuration> + <goals> + <goal>run</goal> + </goals> + </execution> + </executions> + </plugin> + + <plugin> + <artifactId>maven-release-plugin</artifactId> + <version>2.1</version> + </plugin> + + </plugins> + + </build> + +</project> + + diff --git a/modules/helpers/ldap-tool/src/main/java/com/test/TestLdapSettings.java b/modules/helpers/ldap-tool/src/main/java/com/test/TestLdapSettings.java new file mode 100644 index 0000000..75ff277 --- /dev/null +++ b/modules/helpers/ldap-tool/src/main/java/com/test/TestLdapSettings.java @@ -0,0 +1,1285 @@ +package com.test; + +import java.awt.BorderLayout; +import java.awt.Color; +import java.awt.Component; +import java.awt.Dimension; +import java.awt.FlowLayout; +import java.awt.event.ActionEvent; +import java.awt.event.ActionListener; +import java.awt.event.ItemEvent; +import java.awt.event.ItemListener; +import java.awt.event.WindowAdapter; +import java.awt.event.WindowEvent; +import java.io.IOException; +import java.io.PrintWriter; +import java.io.StringWriter; +import java.io.UnsupportedEncodingException; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Properties; +import java.util.Set; + +import javax.naming.Context; +import javax.naming.NamingEnumeration; +import javax.naming.NamingException; +import javax.naming.directory.Attribute; +import javax.naming.directory.Attributes; +import javax.naming.directory.InvalidSearchFilterException; +import javax.naming.directory.SearchControls; +import javax.naming.directory.SearchResult; +import javax.naming.ldap.Control; +import javax.naming.ldap.InitialLdapContext; +import javax.naming.ldap.PagedResultsControl; +import javax.naming.ldap.PagedResultsResponseControl; +import javax.swing.Box; +import javax.swing.BoxLayout; +import javax.swing.JButton; +import javax.swing.JCheckBox; +import javax.swing.JFrame; +import javax.swing.JLabel; +import javax.swing.JPanel; +import javax.swing.JPasswordField; +import javax.swing.JScrollPane; +import javax.swing.JTextArea; +import javax.swing.JTextField; +import javax.swing.SwingUtilities; +import javax.swing.UIManager; +import javax.swing.UnsupportedLookAndFeelException; +import javax.swing.border.LineBorder; +import javax.swing.border.TitledBorder; +import javax.swing.event.ChangeEvent; +import javax.swing.event.ChangeListener; + +//import org.rhq.enterprise.server.util.security.UntrustedSSLSocketFactory; + +/* Is a development test tool that allows the user to simulate the RHQ server side + * LDAP calls during auth/authz operations. + * + * The specific LDAP logic below needs to mirror the latest RHQ code and allow the user + * to test our their configuration without requring a specific RHQ/JON build as a dependency. + * + * NOTE: To avoid a runtime dependency on specific versions of RHQ or JON, the small implementation + * methods were copied into this class with minimatl changes for logging and ui messaging. The + * definitive implementation for each 'copied' method can be found in LDAPGroupManagerBean. + * + * @author Simeon Pinder + */ +public class TestLdapSettings extends JFrame { + //shared fields + private JTextArea testResults; + private JCheckBox showPasswords; + private JCheckBox ssl; + private JLabel groupPageSizeName; + private JTextField groupMemberQueryValue; + private JTextField testUserNameValue; + private JTextField testUserPasswordValue; + private HashMap<String, JTextField> fieldMappings; + private String[] keys; + private JCheckBox enableLdapReferral; + private JCheckBox enableVerboseDebugging; + private JCheckBox enableVerboseGroupParsing; + private JCheckBox iterativeVerboseLogging; + private JCheckBox enablePosixGroups; + private JCheckBox enable32xFeatures; + private String advdb = "**Verbose:debug ----"; + private static final String BASEDN_DELIMITER = ";"; + + private static final long serialVersionUID = 1L; + int textBoxWidth = 20; + private static JPanel top = null; + private static JPanel testUserRegion = null; + private static Properties env=null; + + public static void main(String args[]) { + new TestLdapSettings(); + } + //After enabling support for Query parsing, we need to warn users of the effects. + final String warnMessage = "<html>***WARNING: Depending upon<br>" + + "i)how the ldap server is configured <br>" + + "ii)client query paging settings <br>" + + " enabling <b>'more detailed logging'</b>,<br>" + + " <b>'more group parsing'</b> AND/OR <b>'also log to console'</b> may cause the console to hang/freeze <br>" + + " as the LDAP tool continues to parse large query results. If that occurs it is <br>" + + " suggested that you kill tool and re-run with 'also log to console' so that the console logs<br>" + + " will show which dataset is causing the delay and then you should modify your search|group|member<br>" + + " filters accordingly to <b>return smaller results</b> and/or <b>consume larger payloads</b>.<br>" + + "***WARNING</html>"; + + // Configure window properties + private TestLdapSettings() { + + setTitle("Check LDAP Settings: Simulates LDAP checks/queries of RHQ LDAP integration"); + getContentPane().setLayout(new BorderLayout()); + // top panel definition + top = new JPanel(); + top.setLayout(new BoxLayout(top, BoxLayout.Y_AXIS)); + top.setBorder(LineBorder.createGrayLineBorder()); + //define checkbox here as it's checked when generating UI. + showPasswords = new JCheckBox("show passwords:"); + showPasswords.setSelected(false); + + keys = new String[] { "URL:", "Search Filter:", + "Search Base:","Login Property", + "Username:", "Group Search Filter:", + "Password:", "Group Member Filter:", + }; + fieldMappings = loadUiFields(top, keys); + + //add the two checkboxes for additiona debugging options + enableLdapReferral= new JCheckBox("[follow] ldap referrals"); + enableLdapReferral.setSelected(false); + enableVerboseDebugging= new JCheckBox("more verbose logging"); + enableVerboseDebugging.setSelected(false); + enableVerboseDebugging.setToolTipText(warnMessage); + enableVerboseGroupParsing= new JCheckBox("more detailed group parsing"); + enableVerboseGroupParsing.setSelected(false); + enableVerboseGroupParsing.setToolTipText("*Take care when using this mode with a large number of groups* Every group discovered is parsed/listed."); + iterativeVerboseLogging= new JCheckBox("also log to console"); + iterativeVerboseLogging.setSelected(false); + iterativeVerboseLogging.setToolTipText("This mode is useful when the test tool is having difficulty returning results from large queries."); + iterativeVerboseLogging.setToolTipText(warnMessage); + enablePosixGroups= new JCheckBox("is Posix Group"); + enablePosixGroups.setSelected(false); + enablePosixGroups.setEnabled(false); + + //put into 3.2.x functionality row + JPanel jon32xRegion = new JPanel(); + jon32xRegion.setLayout(new FlowLayout(FlowLayout.LEFT)); + LineBorder jon32xLineBorder = new LineBorder(Color.BLACK, 2); + TitledBorder jon32xBorder = new TitledBorder(jon32xLineBorder, "JON 3.2.x/RHQ 4.8.x specific features:"); + jon32xRegion.setBorder(jon32xBorder); + enable32xFeatures= new JCheckBox("enable JON 3.2.x/RHQ 4.8.x features"); + enable32xFeatures.setToolTipText("This enables features not available before RHQ 4.8.x/JON 3.2.x."); + enable32xFeatures.setSelected(false); + enable32xFeatures.addItemListener(new ItemListener() { + @Override + public void itemStateChanged(ItemEvent e) { + if(enable32xFeatures.isSelected()){ + groupPageSizeName.setEnabled(true); + groupMemberQueryValue.setEnabled(true); + groupMemberQueryValue.setEditable(true); + groupMemberQueryValue.setText("1000"); + enablePosixGroups.setEnabled(true); + }else{ + groupMemberQueryValue.setText(""); + groupPageSizeName.setEnabled(false); + groupMemberQueryValue.setEnabled(false); + groupMemberQueryValue.setEditable(false); + enablePosixGroups.setEnabled(false); + enablePosixGroups.setSelected(false); + } + } + }); + + jon32xRegion.add(enable32xFeatures); + groupPageSizeName = new JLabel("Group Query Page Size:"); + groupPageSizeName.setEnabled(false); + groupMemberQueryValue = new JTextField(10); + groupMemberQueryValue.setText("1000"); + groupMemberQueryValue.setEditable(false); + jon32xRegion.add(groupPageSizeName); + jon32xRegion.add(groupMemberQueryValue); + jon32xRegion.add(enablePosixGroups); + top.add(jon32xRegion); + + //put into row display + JPanel advancedDebugRegion = new JPanel(); + advancedDebugRegion.setLayout(new FlowLayout(FlowLayout.LEFT)); + LineBorder advancedBorder = new LineBorder(Color.BLACK, 2); + TitledBorder debugBorder = new TitledBorder(advancedBorder, "Debug: **Warning --<hover HERE>**"); + advancedDebugRegion.setBorder(debugBorder); + advancedDebugRegion.add(enableLdapReferral); + advancedDebugRegion.add(enableVerboseDebugging); + advancedDebugRegion.add(enableVerboseGroupParsing); + advancedDebugRegion.add(iterativeVerboseLogging); + advancedDebugRegion.setToolTipText(warnMessage); + top.add(advancedDebugRegion); + + JPanel securityPanel = new JPanel(); + securityPanel.setLayout(new FlowLayout(FlowLayout.LEFT)); + showPasswords.addItemListener(new ItemListener() { + @Override + public void itemStateChanged(ItemEvent e) { + SwingUtilities.invokeLater(new Runnable() { + @Override + public void run() { + //store off existing value + String existingValue = ""; + String existingTestUserPass = ""; + JTextField current = fieldMappings.get("Password:"); + if(current instanceof JPasswordField){ + JPasswordField pass = ((JPasswordField)current); + if(pass!=null){ + char[] charArray = pass.getPassword(); + if(charArray.length>0){ + existingValue = new String(charArray); + } + } + }else{ + existingValue = current.getText(); + } + //save off test user password as well + if(testUserPasswordValue instanceof JPasswordField){ + JPasswordField pass = ((JPasswordField)testUserPasswordValue); + if(pass!=null){ + char[] charArray = pass.getPassword(); + if(charArray.length>0){ + existingTestUserPass = new String(charArray); + } + } + }else{ + existingTestUserPass=testUserPasswordValue.getText(); + } + + JTextField updatedContainer = null; + if(showPasswords.isSelected()){ + updatedContainer = new JTextField(textBoxWidth); + updatedContainer.setText(existingValue); + testUserPasswordValue = new JTextField(textBoxWidth); + testUserPasswordValue.setText(existingTestUserPass); + }else{ + updatedContainer = new JPasswordField(textBoxWidth); + updatedContainer.setText(existingValue); + testUserPasswordValue = new JPasswordField(textBoxWidth); + testUserPasswordValue.setText(existingTestUserPass); + } + //locate the JPanel and rebuild it Should be at index 3 + JPanel passwordRow = (JPanel) top.getComponent(3); +// JTextField jf = (JTextField) passwordRow.getComponent(1); + //store off existing components + Component[] existing = new Component[passwordRow.getComponentCount()]; + for(int i=0; i<passwordRow.getComponentCount();i++){ + existing[i] = passwordRow.getComponent(i); + } + passwordRow.removeAll(); + for(int j=0;j<existing.length;j++){ + if(j==1){//insert new JTextField instead + passwordRow.add(updatedContainer); + }else{ + passwordRow.add(existing[j]); + } + } + //reload testUserRegion + //store off existing components + Component[] existingTest = new Component[testUserRegion.getComponentCount()]; + for(int i=0; i<testUserRegion.getComponentCount();i++){ + existingTest[i] = testUserRegion.getComponent(i); + } + testUserRegion.removeAll(); + for(int j=0;j<existingTest.length;j++){ + if(j==3){//insert new JTextField instead + testUserRegion.add(testUserPasswordValue); + }else{ + testUserRegion.add(existingTest[j]); + } + } + + top.revalidate(); + top.repaint(); + } + }); + } + }); + securityPanel.add(showPasswords); + ssl = new JCheckBox("SSL:"); + ssl.setEnabled(false); + securityPanel.add(ssl); + top.add(securityPanel); + + // test user auth region + testUserRegion = new JPanel(); + testUserRegion.setLayout(new FlowLayout(FlowLayout.LEFT)); + LineBorder border = new LineBorder(Color.BLUE, 2); + TitledBorder tBorder = new TitledBorder(border, "Authentication/Authorization Check Credentials: (insert valid ldap user assigned to group)"); + testUserRegion.setBorder(tBorder); + JLabel testUserName = new JLabel("Test UserName:"); + testUserNameValue = new JTextField(textBoxWidth); + JLabel testUserPassword = new JLabel("Test Password:"); +// testUserPasswordValue = new JTextField(textBoxWidth); + testUserPasswordValue = new JPasswordField(textBoxWidth); + testUserRegion.add(testUserName); + testUserRegion.add(testUserNameValue); + testUserRegion.add(testUserPassword); + testUserRegion.add(testUserPasswordValue); + top.add(testUserRegion); + + // center + JPanel center = new JPanel(); + center.setLayout(new BoxLayout(center, BoxLayout.X_AXIS)); + // build center panel + buildCenterPanel(center); + + // final component layout + getContentPane().add(top, BorderLayout.NORTH); + getContentPane().add(center, BorderLayout.CENTER); + this.setSize(720, 700); + addWindowListener(new WindowAdapter() { + public void windowClosing(WindowEvent e) { + System.exit(0); + } + }); + setVisible(true); + } + + // define the center display panel. + private void buildCenterPanel(JPanel center) { + // First element is Test Button + JButton test = new JButton("Test Settings"); + center.add(test); + // second is large text box that display ldap queries + testResults = new JTextArea("(click button to test settings values: simulates 4 separate checks showing ldap filters used)", + 40, 40); + JScrollPane jsp = new JScrollPane(testResults); + center.add(jsp); + test.addActionListener(new ActionListener() { + public void actionPerformed(ActionEvent e) { + testResults.setText("");//clear out empty msg + //trim spaces from all fields + String ldapServer = fieldMappings.get(keys[0]).getText().trim(); + String searchFilter = fieldMappings.get(keys[1]).getText().trim(); + String searchBase = fieldMappings.get(keys[2]).getText().trim(); + String loginProperty = fieldMappings.get(keys[3]).getText().trim(); + String bindUserName = fieldMappings.get(keys[4]).getText().trim(); + String groupSearchFilter = fieldMappings.get(keys[5]).getText().trim(); + String bindPassword = fieldMappings.get(keys[6]).getText().trim(); + String groupMemberFilter = fieldMappings.get(keys[7]).getText().trim(); + String groupMemberQuerySize = groupMemberQueryValue.getText().trim(); + String testUserName = testUserNameValue.getText().trim(); + String testUserPassword = testUserPasswordValue.getText().trim(); + // validate initial required elements + String msg = null; + boolean proceed = true; + //valid required details set. + if (ldapServer.isEmpty() || bindUserName.isEmpty() + || bindPassword.isEmpty() || searchBase.isEmpty()) { + msg ="STEP-1:FAIL: "+ keys[0] + ", " + keys[2] + ", " + keys[4] + ", " + + keys[6] + " cannot be empty to proceed."; + log(msg); + proceed = false; + } + env = null; + InitialLdapContext ctx = null; + if (proceed) {// attempt initial ldap bind from RHQ server + msg = "STEP-1:TESTING: Attempting to bind to server:" + ldapServer + + "\n with user '" + bindUserName + + "' and password entered."; + log(msg); + env = getProperties(ldapServer); + env.setProperty(Context.SECURITY_PRINCIPAL, bindUserName); + env.setProperty(Context.SECURITY_CREDENTIALS, bindPassword); + env.setProperty(Context.SECURITY_AUTHENTICATION, "simple"); + //put the rest of the LDAP properties into the Properties instance for use later. + //there still needs to be separate variables since some are for UI validation. + env.setProperty(SystemSetting.LDAP_GROUP_FILTER.getInternalName(), groupSearchFilter); + env.setProperty(SystemSetting.LDAP_GROUP_MEMBER.getInternalName(), groupMemberFilter); + env.setProperty(SystemSetting.LDAP_BASE_DN.getInternalName(), searchBase); + env.setProperty(SystemSetting.LDAP_LOGIN_PROPERTY.getInternalName(), loginProperty); + env.setProperty(SystemSetting.LDAP_BIND_DN.getInternalName(), bindUserName); + env.setProperty(SystemSetting.LDAP_BIND_PW.getInternalName(), bindPassword); + env.setProperty(SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE.getInternalName(), groupMemberQuerySize); + + for(Object key :env.keySet()){ + System.out.println(key+"="+env.getProperty(key+"")); + } + + try { + ctx = new InitialLdapContext(env, null); + msg = "STEP-1:PASS: LDAP bind credentials are correct. Successfully connected to '" + + ldapServer + + "'.\n This means the LDAP Bind credentials for the RHQ Server authentication/authorization requests to ldap server " + + "are correct."; + if(enableVerboseDebugging.isSelected()){ + msg+="\n"+advdb+" LDAP simple authentication bind successful."; + } + log(msg); + proceed = true; + } catch (Exception ex) { + msg = "STEP-1:FAIL: Unable to connect to the LDAP server with credentials specified.\n"; + msg+="Exception:"+ex.getMessage(); + if(enableVerboseDebugging.isSelected()){ + msg = appendStacktraceToMsg(msg, ex); + } + log(msg); + proceed = false; + } + } + if (proceed) {// retrieve test credentials to test run auth + // load search controls + SearchControls searchControls = getSearchControls(); + // validating searchFilter and test user/pass creds + proceed = true; + if (testUserName.isEmpty() || (testUserPassword.isEmpty())) { + msg = "STEP-2:FAIL: Test Username/Password fields cannot be empty for this step."; + log(msg); + proceed = false; + } + // testing a valid user involves a filtered ldap search + // using the loginProperty, and optionally searchFilter + String userDN = ""; + if (proceed) { + // default loginProperty to cn if it's not set + if (loginProperty.isEmpty()) { + loginProperty = "cn"; + if(enableVerboseDebugging.isSelected()){ + String mesg = "As you have not specified a login property, defaulting to 'cn'"; + log(advdb+" "+msg); + } + } + String filter; + if (!searchFilter.isEmpty()) { + filter = "(&(" + loginProperty + "=" + testUserName + + ")" + "(" + searchFilter + "))"; + } else { + filter = "(" + loginProperty + "=" + testUserName + + ")"; + } + if(enableVerboseDebugging.isSelected()){ + log(advdb+" The searchfilter is optionally appended to login property for additional shared attribute across users."); + } + msg = "STEP-2:TESTING: To validate the test user the following LDAP filtered component will be used to find matching users:\n"; + msg += filter; + log(msg); + // test out the search on the target ldap server + try { + String[] baseDNs = searchBase.split(";"); + for (int x = 0; x < baseDNs.length; x++) { + NamingEnumeration answer = ctx.search( + baseDNs[x], filter, searchControls); + if(enableVerboseDebugging.isSelected()){ + log(advdb+" this search was excuted against DN component '"+baseDNs[x]+"'."); + } + // boolean ldapApiNpeFound = false; + if (!answer.hasMoreElements()) { + msg="STEP-2:WARN Unable to locate a matching users for the filter'"+filter+ + "'. Please check your loginProperty. Usually 'cn' or 'uid'"; + log(msg); + continue; + } + // Going with the first match + SearchResult si = (SearchResult) answer.next(); + + // Construct the UserDN + userDN = si.getName() + "," + baseDNs[x]; + msg = "STEP-2:PASS: The test user '" + + testUserName + + "' was succesfully located, and the following userDN will be used in authorization check:\n"; + msg += userDN; + log(msg); + + ctx.addToEnvironment(Context.SECURITY_PRINCIPAL, userDN); + ctx.addToEnvironment(Context.SECURITY_CREDENTIALS,testUserPassword); + ctx.addToEnvironment(Context.SECURITY_AUTHENTICATION,"simple"); + + // if successful then verified that user and pw + // are valid ldap credentials + ctx.reconnect(null); + msg = "STEP-2:PASS: The user '" + + testUserName + + "' was succesfully authenticated using userDN '" + + userDN + "' and password provided.\n" + +"*Note: the loginProperty must match the loginProperty listed in dn: for the user. It is the DN that RHQ will lookup and use."; + log(msg); + } + } catch (Exception ex) { + msg = "STEP-2:FAIL: There was an error while searching for or authenticating the user '" + + testUserName + "'\n"; + msg += ex.getMessage(); + if(enableVerboseDebugging.isSelected()){ + msg = appendStacktraceToMsg(msg, ex); + } + log(msg); + proceed=false; + } + } + // with authentication completed, now check authorization. + // validate filter components to list all available groups + proceed = false; + if (!groupSearchFilter.isEmpty()) { + Set<Map<String, String>> ret = new HashSet<Map<String, String>>(); + String filter = null; + + if (groupSearchFilter.startsWith("(") && groupSearchFilter.endsWith(")")){ + filter = groupSearchFilter; // RFC 2254 does not allow for ((expression)) + }else{ + filter = String + .format("(%s)", groupSearchFilter); + } + msg = "STEP-3:TESTING: This ldap filter " + + filter + + " will be used to locate ALL available LDAP groups"; + log(msg); + + Properties systemConfig = populateProperties(env); + + ret = buildGroup(systemConfig, filter); + msg = "STEP-3:TESTING: Using Group Search Filter '" + + filter + "', " + ret.size() + + " ldap group(s) were located.\n"; + if (ret.size() > 0) { + HashMap<String, String>[] ldapLists = new HashMap[ret + .size()]; + ret.toArray(ldapLists); + // in this mode go beyond the first ten results. + if (enableVerboseGroupParsing.isSelected()) { + msg += "STEP-3:PASS: Listing 'all' of the ldap groups located: \n"; + for (int i = 0; i < ret.size(); i++) { + msg += ldapLists[i] + "\n"; + } + } else {// otherwise only show first 10[subset of + // available groups] + msg += "STEP-3:PASS: Listing a few(<=10) of the ldap groups located: \n"; + for (int i = 0; (i < ret.size() && i < 10); i++) { + msg += ldapLists[i] + "\n"; + } + } + proceed = true;// then can proceed to next step. + } + log(msg); + } else { + msg = "STEP-3:FAIL: Group Search Filter: cannot be empty to proceed."; + log(msg); + proceed=false; + } + // retrieve lists of authorized groups available for the + if (proceed) { + // check groupMember + if (!groupMemberFilter.isEmpty()) { +// Map<String, String> userDetails = new HashMap<String, String>(); +// userDetails = findLdapUserDetails(userDN); + Set<String> userDetails = findAvailableGroupsFor(testUserName); + + if(!userDetails.isEmpty()){ + proceed=true; + } + } else { + msg = "STEP-4:FAIL: Group Member Filter must be non-empty to proceed with simulating authorization check for test user."; + log(msg); + } + } + if(proceed){ + msg="COMPLETED:PASS: The current settings, for successful steps, should be correct to enter into your RHQ server."; + msg+="\n\n\n\n When you encounter failures, warnings or other unexpected results you should use an external "; + msg+="LDAP search utility to check that the generated filters return the expected LDAP results."; + log(msg); + } + } + } + }); + } + + private String appendStacktraceToMsg(String msg, Exception ex) { + String moreVerbose = ""; + moreVerbose+=advdb+" Exception type:"+ex.getClass()+"\n"; + moreVerbose+=advdb+" Exception stack trace reference:"+ex.getStackTrace()+"\n"; + if(ex.getStackTrace()!=null){ + StringWriter sw = new StringWriter(); + PrintWriter pw = new PrintWriter(sw); + ex.printStackTrace(pw); + moreVerbose+=advdb+" stack trace reference:"+sw.toString(); + } + msg+="\n"+moreVerbose; + return msg; + } + + private boolean containsIllegalLdap(String currentValue) { + boolean invalidData = false; + if((currentValue!=null)&&(!currentValue.trim().isEmpty())){ + //TODO: spinder 3/17/11: need to figure out regex to filter/detect bad data in returned ldap. Giving up for now. +// String regex = "(?<=(?:[^\]|^)(\\)+|[^\]|^)[/,+"><;=#]|(?<=(?:[^\]|^)(\\)+|[^\]|^)\(?!\|[/,+"><;=#]| $|(?<=^\) )|^"; +// regex = "(?<=(?:[^\\]|^)(\\\\)+|[^\\]|^)[/,+\"><;=#]|(?<=(?:[^\\]|^)(\\\\)+|[^\\]|^)\\(?!\\|[/,+\"><;=#]| $|(?<=^\\) )|^"; +// System.out.println("++++++++ CURR VAL:"+currentValue+":INV-CHeck:"+currentValue.matches(",+"\<;\n=/")+":NEWCHECK:"+(currentValue.matches(regex))); +// if(currentValue.matches(",+"\<;\n=/")){ +// invalidData=true; +// } +// String badList = ",+"\<;\n="; + String badList = "+"\<;\n"; + for(char car :currentValue.toCharArray()){ + for(char c :badList.toCharArray()){ + if(car == c){ + invalidData=true; + } + } + } + + } + return invalidData; + } + /** + * @throws NamingException + * @see org.jboss.security.auth.spi.UsernamePasswordLoginModule#validatePassword(java.lang.String,java.lang.String) + */ + protected Set<Map<String, String>> buildGroup(Properties systemConfig, String filter) { + Set<Map<String, String>> groupDetailsMap = new HashSet<Map<String, String>>(); + // Load our LDAP specific properties + // Load the BaseDN + String baseDN = (String) systemConfig.get(SystemSetting.LDAP_BASE_DN.getInternalName()); + + // Load the LoginProperty + String loginProperty = (String) systemConfig.get(SystemSetting.LDAP_LOGIN_PROPERTY.getInternalName()); + if (loginProperty == null) { + // Use the default + loginProperty = "cn"; + } + // Load any information we may need to bind + String bindDN = (String) systemConfig.get(SystemSetting.LDAP_BIND_DN.getInternalName()); + String bindPW = (String) systemConfig.get(SystemSetting.LDAP_BIND_PW.getInternalName()); + if (bindDN != null) { + systemConfig.setProperty(Context.SECURITY_PRINCIPAL, bindDN); + systemConfig.setProperty(Context.SECURITY_CREDENTIALS, bindPW); + systemConfig.setProperty(Context.SECURITY_AUTHENTICATION, "simple"); + } + try { + InitialLdapContext ctx = new InitialLdapContext(systemConfig, null); + SearchControls searchControls = getSearchControls(); + /*String filter = "(&(objectclass=groupOfUniqueNames)(uniqueMember=uid=" + userName + + ",ou=People, dc=rhndev, dc=redhat, dc=com))";*/ + + //modify the search control to only include the attributes we will use + String[] attributes = { "cn", "description" }; + searchControls.setReturningAttributes(attributes); + + //BZ:964250: add rfc 2696 + //default to 1000 results. System setting page size from UI should be non-negative integer > 0. + //additionally as system settings are modifiable via CLI which may not have param checking enabled do some + //more checking. + int defaultPageSize = 1000; + // only if they're enabled in the UI. + if (enable32xFeatures.isSelected()) { + String groupPageSize = systemConfig.getProperty( + SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE + .getInternalName(), "" + defaultPageSize); + if ((groupPageSize != null) + && (!groupPageSize.trim().isEmpty())) { + int passedInPageSize = -1; + try { + passedInPageSize = Integer + .valueOf(groupPageSize.trim()); + if (passedInPageSize > 0) { + defaultPageSize = passedInPageSize; + if(enableVerboseDebugging.isSelected()){ + log(advdb + + " LDAP Group Query Page Sizing of '"+defaultPageSize+"' is being requested from server."); + } + } + } catch (NumberFormatException nfe) { + // log issue and do nothing. Go with the default. + String msg = "LDAP Group Page Size passed in '" + + groupPageSize + + "' in is invalid. Defaulting to 1000 results." + + nfe.getMessage(); + log(msg); + } + } + ctx.setRequestControls(new Control[] { new PagedResultsControl( + defaultPageSize, Control.CRITICAL) }); + } + // Loop through each configured base DN. It may be useful + // in the future to allow for a filter to be configured for + // each BaseDN, but for now the filter will apply to all. + String[] baseDNs = baseDN.split(BASEDN_DELIMITER); + + for (int x = 0; x < baseDNs.length; x++) { + if (enableVerboseDebugging.isSelected()) { + log(advdb + + " this search was excuted against DN component '" + + baseDNs[x] + "'."); + } + executeGroupSearch(filter, groupDetailsMap, ctx, searchControls, baseDNs, x); + + // continually parsing pages of results until we're done. + // only if they're enabled in the UI. + if (enable32xFeatures.isSelected()) { + // handle paged results if they're being used here + byte[] cookie = null; + Control[] controls = ctx.getResponseControls(); + if (controls != null) { + for (Control control : controls) { + if (control instanceof PagedResultsResponseControl) { + PagedResultsResponseControl pagedResult = (PagedResultsResponseControl) control; + cookie = pagedResult.getCookie(); + } + } + } + + while (cookie != null) { + String msg = "RFC 2696 is supported by the server and we are paging through the results. "+ + groupDetailsMap.size()+" results returned so far."; + if(enableVerboseGroupParsing.isSelected()){ + log(advdb + + msg); + } + // ensure the next requests contains the session/cookie + // details + ctx.setRequestControls(new Control[] { new PagedResultsControl( + defaultPageSize, cookie, Control.CRITICAL) }); + executeGroupSearch(filter, groupDetailsMap, ctx, + searchControls, baseDNs, x); + // empty out cookie + cookie = null; + // test for further iterations + controls = ctx.getResponseControls(); + if (controls != null) { + for (Control control : controls) { + if (control instanceof PagedResultsResponseControl) { + PagedResultsResponseControl pagedResult = (PagedResultsResponseControl) control; + cookie = pagedResult.getCookie(); + } + } + } + } + } + }//end of for loop + } catch (NamingException e) { + if (e instanceof InvalidSearchFilterException) { + InvalidSearchFilterException fException = (InvalidSearchFilterException) e; + String message = "The ldap group filter defined is invalid "; + log(message); + } + //TODO: check for ldap connection/unavailable/etc. exceptions. + else { + String mesg = "LDAP communication error: " + e.getMessage(); + log(mesg); + } + } catch (IOException iex) { + String msg = "Unexpected LDAP communciation error:" + iex.getMessage(); + log(msg); + } + + return groupDetailsMap; + } + + /** Executes the LDAP group query using the filters, context and search controls, etc. parameters passed in. + * The matching groups located during processing this pages of results are added as new entries to the + * groupDetailsMap passed in. + * + * @param filter + * @param groupDetailsMap + * @param ctx + * @param searchControls + * @param baseDNs + * @param x + * @throws NamingException + */ + private void executeGroupSearch(String filter, Set<Map<String, String>> groupDetailsMap, InitialLdapContext ctx, + SearchControls searchControls, String[] baseDNs, int x) throws NamingException { + //execute search based on controls and context passed in. + NamingEnumeration<SearchResult> answer = ctx.search(baseDNs[x], filter, searchControls); + boolean ldapApiEnumerationBugEncountered = false; + while ((!ldapApiEnumerationBugEncountered) && answer.hasMoreElements()) {//BZ:582471- ldap api bug change + // We use the first match + SearchResult si = null; + try { + si = answer.next(); + } catch (NullPointerException npe) { + if (enableVerboseDebugging.isSelected()) { + log(advdb + + " NullPtr exception detected. If known LDAP api enum npe ignore: " + + npe.getMessage() + "."); + } + ldapApiEnumerationBugEncountered = true; + break; + } + + if (enableVerboseDebugging.isSelected() + || enableVerboseGroupParsing.isSelected()) { + Attributes attributeContainer = si.getAttributes(); + NamingEnumeration<? extends Attribute> attributes = attributeContainer + .getAll(); + String attributesReturned = " "; + while (attributes.hasMore()) { + attributesReturned += attributes.next().getID() + ","; + } + String dbugMesg = "\n" + + advdb + + " Group search LDAP (" + + attributeContainer.size() + + ") attributes located for group '" + + si.getName() + + "' are [" + + attributesReturned.substring(0, + attributesReturned.length() - 1) + "]."; + // directly update here to shorten messages for lots of groups + testResults.setText(testResults.getText() + dbugMesg); + //This flag can be used in the unlikely case that the UI hangs during a test operation.: + if(iterativeVerboseLogging.isSelected()){ + System.out.println(dbugMesg); + } + + // additionally parse attribute ids and values for illegal ldap + // characters + if (enableVerboseGroupParsing.isSelected()) { + attributes = attributeContainer.getAll(); + String currentAttributeId = ""; + String currentValue = ""; + // spinder: 3/17/11: should we bail on first bad data or + // display them all? + while (attributes.hasMore()) { + boolean badData = false; + Attribute att = attributes.next(); + currentAttributeId = att.getID(); + if (containsIllegalLdap(currentAttributeId)) { + log(advdb + + " LDAP Group: bad atrribute data detected for group '" + + si.getName() + "' for attribute '" + + currentAttributeId + "'."); + badData = true; + } + if (att.getAll() != null) { + NamingEnumeration<?> enumer = att.getAll(); + while (enumer.hasMore()) { + currentValue = enumer.next() + ""; + if (containsIllegalLdap(currentValue)) { + log(advdb + + " LDAP Group: bad data detected for group '" + + si.getName() + + "' with attribute '" + + currentAttributeId + + "' and value:" + currentValue); + badData = true; + } + } + } + if (badData) { + log(advdb + + "** LDAP Group: Some bad LDAP data detected for group '" + + si.getName() + "'."); + } + } + } + } + + Map<String, String> entry = new HashMap<String, String>(); + // String name = (String) si.getAttributes().get("cn").get(); + Attribute commonNameAttr = si.getAttributes().get("cn"); + if (commonNameAttr != null) { + String name = (String) commonNameAttr.get(); + name = name.trim(); + Attribute desc = si.getAttributes().get("description"); + String description = desc != null ? (String) desc.get() : ""; + description = description.trim(); + entry.put("id", name); + entry.put("name", name); + entry.put("description", description); + groupDetailsMap.add(entry); + } else {// unable to retrieve details for specific group. + log(advdb + + " There was an error retrieving 'cn' attribute for group '" + + si.getName() + + "'. Not adding to returned list of groups. "); + } + } + } + + public Map<String, String> findLdapUserDetails(String userName) { + // Load our LDAP specific properties + Properties systemConfig = env; + HashMap<String, String> userDetails = new HashMap<String, String>(); + + // Load the BaseDN + String baseDN = (String) systemConfig.get(SystemSetting.LDAP_BASE_DN.getInternalName()); + + // Load the LoginProperty + String loginProperty = (String) systemConfig.get(SystemSetting.LDAP_LOGIN_PROPERTY.getInternalName()); + if (loginProperty == null) { + // Use the default + loginProperty = "cn"; + } + // Load any information we may need to bind + String bindDN = (String) systemConfig.get(SystemSetting.LDAP_BIND_DN.getInternalName()); + String bindPW = (String) systemConfig.get(SystemSetting.LDAP_BIND_PW.getInternalName()); + + // Load any search filter + String groupSearchFilter = (String) systemConfig.get(SystemSetting.LDAP_GROUP_FILTER.getInternalName()); + String groupMemberFilter = (String) systemConfig.get(SystemSetting.LDAP_GROUP_MEMBER.getInternalName()); + String testUserDN=userName; + String ldapServer = (String) systemConfig.get(Context.PROVIDER_URL); + + Properties env = getProperties(ldapServer); + + if (bindDN != null) { + env.setProperty(Context.SECURITY_PRINCIPAL, bindDN); + env.setProperty(Context.SECURITY_CREDENTIALS, bindPW); + env.setProperty(Context.SECURITY_AUTHENTICATION, "simple"); + } + + try { + InitialLdapContext ctx = new InitialLdapContext(env, null); + SearchControls searchControls = getSearchControls(); + + String filter = String.format("(&(%s)(%s=%s))", + groupSearchFilter, groupMemberFilter, + testUserDN); + + generateUiLoggingForStep4LdapFilter(userName, filter); + + // Loop through each configured base DN. It may be useful + // in the future to allow for a filter to be configured for + // each BaseDN, but for now the filter will apply to all. + String[] baseDNs = baseDN.split(BASEDN_DELIMITER); + for (int x = 0; x < baseDNs.length; x++) { + NamingEnumeration<SearchResult> answer = ctx.search(baseDNs[x], filter, searchControls); + if (!answer.hasMoreElements()) { //BZ:582471- ldap api bug change + // Nothing found for this DN, move to the next one if we have one. + continue; + } + + // We use the first match + SearchResult si = answer.next(); + //generate the DN + String userDN = null; + try { + userDN = si.getNameInNamespace(); + } catch (UnsupportedOperationException use) { + userDN = si.getName(); + if (userDN.startsWith(""")) { + userDN = userDN.substring(1, userDN.length()); + } + if (userDN.endsWith(""")) { + userDN = userDN.substring(0, userDN.length() - 1); + } + userDN = userDN + "," + baseDNs[x]; + } + userDetails.put("dn", userDN); + + // Construct the UserDN + NamingEnumeration<String> keys = si.getAttributes().getIDs(); + while (keys.hasMore()) { + String key = keys.next(); + Attribute value = si.getAttributes().get(key); + if ((value != null) && (value.get() != null)) { + userDetails.put(key, value.get().toString()); + } + } +// return userDetails; + }//end of for loop + generateUiLoggingStep4Authz(filter); + return userDetails; + } catch (Exception ex) { + generateUiLoggingStep4Exception(ex); + } + return userDetails; + } + + public Set<String> findAvailableGroupsFor(String userName) { + // Load our LDAP specific properties + Properties options = env; + String groupFilter = options.getProperty(SystemSetting.LDAP_GROUP_FILTER.getInternalName(), ""); + String groupMember = options.getProperty(SystemSetting.LDAP_GROUP_MEMBER.getInternalName(), ""); + String groupUsePosix = options.getProperty(SystemSetting.LDAP_GROUP_USE_POSIX.getInternalName(), "false"); + if (groupUsePosix == null) { + groupUsePosix = Boolean.toString(false);//default to false + } + boolean usePosixGroups = Boolean.valueOf(groupUsePosix); + String userAttribute = getUserAttribute(options, userName, usePosixGroups); + Set<String> ldapSet = new HashSet<String>(); + + if (userAttribute != null && userAttribute.trim().length() > 0) { + //TODO: spinder 4/21/10 put in error/debug logging messages for badly formatted filter combinations + String filter = ""; + //form assumes examples where groupFilter is like 'objectclass=groupOfNames' and groupMember is 'member' + // to produce ldap filter like (&(objectclass=groupOfNames)(member=cn=Administrator,ou=People,dc=test,dc=com)) + // or like (&(objectclass=groupOfNames)(memberUid=Administrator)) for posixGroups. + filter = String.format("(&(%s)(%s=%s))", groupFilter, groupMember, encodeForFilter(userAttribute)); + + Set<Map<String, String>> matched = buildGroup(options, filter); +// log.trace("Located '" + matched.size() + "' LDAP groups for user '" + userName +// + "' using following ldap filter '" + filter + "'."); + + //iterate to extract just the group names. + for (Map<String, String> match : matched) { + ldapSet.add(match.get("id")); + } + } else { +// log.debug("Group lookup will not be performed due to no UserDN found for user " + userName); + } + + return ldapSet; + } + + private void generateUiLoggingStep4Exception(Exception ex) { + String groupSearchFilter = env + .getProperty(SystemSetting.LDAP_GROUP_FILTER + .getInternalName()); + String msg = "STEP-4:FAIL: There was an error searching with the groupFilter supplied: " + + groupSearchFilter + "'\n"; + msg += ex.getMessage(); + if (enableVerboseDebugging.isSelected()) { + msg = appendStacktraceToMsg(msg, ex); + } + log(msg); + } + + private void generateUiLoggingStep4Authz(String filter) { + Set<Map<String, String>> groups = buildGroup(env, filter); + String msg = "STEP-4:TESTING: Using Group Search Filter '" + + filter + "', " + groups.size() + + " ldap group(s) were located.\n"; + if (groups.size() > 0) { + HashMap<String, String>[] ldapLists = new HashMap[groups + .size()]; + groups.toArray(ldapLists); + msg += "STEP-4:PASS: Listing a few of the ldap groups located: \n"; + // iterate over first ten or less to demonstrate retrieve + for (int i = 0; (i < groups.size() && i < 10); i++) { + msg += ldapLists[i] + "\n"; + } + }else{ + msg+="STEP-4:WARN: With current settings, test user is not authorized for any groups. Is this correct?"; + } + log(msg); + } + + private void generateUiLoggingForStep4LdapFilter(String userName, + String filter) { + String msg = "STEP-4:TESTING: about to do ldap search with filter \n'" + + filter + + "'\n to locate groups that test user '"+userName+"' IS authorized to access."; + log(msg); + } + + + // throw the label and fields together, two to a row. + private HashMap<String, JTextField> loadUiFields(JPanel top, + String[] componentKeys) { + HashMap<String, JTextField> mappings = new HashMap<String, JTextField>(); + for (int i = 0; i < componentKeys.length; i++) { + String firstLabelKey = componentKeys[i]; + String secondLabelKey = componentKeys[++i]; + // locate second key + JPanel row = new JPanel(); + row.setLayout(new FlowLayout(FlowLayout.LEFT)); + JLabel label1 = new JLabel(firstLabelKey); + label1.setSize(textBoxWidth, 5); +// JTextField value1 = new JTextField(textBoxWidth); + JTextField value1 = null; + if (firstLabelKey.equalsIgnoreCase("Password:")&&(!showPasswords.isSelected())) { + value1 = new JPasswordField(textBoxWidth); + } else { + value1 = new JTextField(textBoxWidth); + } + JLabel label2 = new JLabel(secondLabelKey); + JTextField value2 = new JTextField(textBoxWidth); + row.add(label1); + row.add(value1); + row.add(Box.createRigidArea(new Dimension(0, 5))); + row.add(label2); + row.add(value2); + mappings.put(firstLabelKey, value1); + mappings.put(secondLabelKey, value2); + top.add(row); + } + + return mappings; + } + + private Properties getProperties(String contentProvider) { + Properties env = new Properties(); + env.setProperty(Context.INITIAL_CONTEXT_FACTORY, + "com.sun.jndi.ldap.LdapCtxFactory"); + env.setProperty(Context.PROVIDER_URL, contentProvider); + if(!enableLdapReferral.isSelected()){ + env.setProperty(Context.REFERRAL, "ignore"); + }else{ + String msg="**---- You have chosen to tell LDAP servers to [FOLLOW] context referrals. Default is [IGNORE] referrals. --**"; + log(msg); + env.setProperty(Context.REFERRAL, "follow"); + } + +// // Setup SSL if requested +// String protocol = ssl.isSelected()? "ssl":""; +// if ((protocol != null) && protocol.equals("ssl")) { +// String ldapSocketFactory = env +// .getProperty("java.naming.ldap.factory.socket"); +// if (ldapSocketFactory == null) { +// env.put("java.naming.ldap.factory.socket", +// UntrustedSSLSocketFactory.class.getName()); +// } +// env.put(Context.SECURITY_PROTOCOL, "ssl"); +// } + + return env; + } + + private String delineate() { + String line = "-"; + for (int i = 0; i < 30; i++) { + line += "-"; + } + return line; + } + + /** Takes care of delineating messages and conditional logging contents passed in. + * @param msg + */ + private void log(String msg) { + String message = "\n" + delineate() + "\n"; + message += msg; + message += "\n" + delineate() + "\n\n"; + //This flag can be used in the unlikely case that the UI hangs during a test operation.: + if(iterativeVerboseLogging.isSelected()){ + System.out.println(message); + } + testResults.setText(testResults.getText() + message); + } + + private SearchControls getSearchControls() { + int scope = SearchControls.SUBTREE_SCOPE; + int timeLimit = 0; + long countLimit = 0; + String[] returnedAttributes = null; + boolean returnObject = false; + boolean deference = false; + SearchControls constraints = new SearchControls(scope, countLimit, + timeLimit, returnedAttributes, returnObject, deference); + return constraints; + } + + /** Translate SystemSettings to familiar Properties instance since we're + * passing not one but multiple values. + * + * @param systemSettings + * @return + */ + private Properties populateProperties(Properties existing) { + Properties properties = new Properties(); + if(existing!=null){ + properties = existing; + } + for (SystemSetting entry : SystemSetting.values()) { + if(entry!=null){ + switch(entry){ + case LDAP_BASED_JAAS_PROVIDER: + properties.put(entry.getInternalName(), ""); + break; + } + } + } + return properties; + } + + /**Build/retrieve the user DN. Not usually a property. + * + * @param options + * @param userName + * @param usePosixGroups boolean indicating whether we search for groups with posixGroup format + * @return + */ + private String getUserAttribute(Properties options, String userName, boolean usePosixGroups) { + Map<String, String> details = findLdapUserDetails(userName); + String userAttribute = null; + if (usePosixGroups) {//return just the username as posixGroup member search uses (&(%s)(memberUid=username)) + userAttribute = userName; + } else {//this is the default where group search uses (&(%s)(uniqueMember={userDn})) + userAttribute = details.get("dn"); + } + + return userAttribute; + } + + /** See LDAPStringUtil.encodeForFilter() for original code/source/author/etc. + * <p>Encode a string so that it can be used in an LDAP search filter.</p> + * + * <p>The following table shows the characters that are encoded and their + * encoded version.</p> + * + * <table> + * <tr><th align="center">Character</th><th>Encoded As</th></tr> + * <tr><td align="center">*</td><td>\2a</td></tr> + * <tr><td align="center">(</td><td>\28</td></tr> + * <tr><td align="center">)</td><td>\29</td></tr> + * <tr><td align="center"></td><td>\5c</td></tr> + * <tr><td align="center"><code>null</code></td><td>\00</td></tr> + * </table> + * + * <p>In addition to encoding the above characters, any non-ASCII character + * (any character with a hex value greater then <code>0x7f</code>) is also + * encoded and rewritten as a UTF-8 character or sequence of characters in + * hex notation.</p> + * + * @param filterString a string that is to be encoded + * @return the encoded version of <code>filterString</code> suitable for use + * in a LDAP search filter + * @see <a href="http://tools.ietf.org/html/rfc4515">RFC 4515</a> + */ + public static String encodeForFilter(final String filterString) { + if (filterString != null && filterString.length() > 0) { + StringBuilder encString = new StringBuilder(filterString.length()); + for (int i = 0; i < filterString.length(); i++) { + char ch = filterString.charAt(i); + switch (ch) { + case '*': // encode a wildcard * character + encString.append("\2a"); + break; + case '(': // encode a open parenthesis ( character + encString.append("\28"); + break; + case ')': // encode a close parenthesis ) character + encString.append("\29"); + break; + case '\': // encode a backslash \ character + encString.append("\5c"); + break; + case '\u0000': // encode a null character + encString.append("\00"); + break; + default: + if (ch <= 0x7f) { // an ASCII character + encString.append(ch); + } else if (ch >= 0x80) { // encode to UTF-8 + try { + byte[] utf8bytes = String.valueOf(ch).getBytes("UTF8"); + for (byte b : utf8bytes) { + encString.append(String.format("\%02x", b)); + } + } catch (UnsupportedEncodingException e) { + // ignore + } + } + } + } + return encString.toString(); + } + return filterString; + } +} + +//Mock up the upgraded system properties approach to use SystemSetting +enum SystemSetting { + LDAP_BASED_JAAS_PROVIDER("CAM_JAAS_PROVIDER"), + LDAP_NAMING_PROVIDER_URL("CAM_LDAP_NAMING_PROVIDER_URL"), + USE_SSL_FOR_LDAP("CAM_LDAP_PROTOCOL"), + LDAP_LOGIN_PROPERTY("CAM_LDAP_LOGIN_PROPERTY"), + LDAP_FILTER("CAM_LDAP_FILTER"), + LDAP_GROUP_FILTER("CAM_LDAP_GROUP_FILTER"), + LDAP_GROUP_MEMBER("CAM_LDAP_GROUP_MEMBER"), + LDAP_GROUP_QUERY_PAGE_SIZE("CAM_LDAP_GROUP_QUERY_PAGE_SIZE"), + LDAP_BASE_DN("CAM_LDAP_BASE_DN"), + LDAP_BIND_DN("CAM_LDAP_BIND_DN"), + LDAP_BIND_PW("CAM_LDAP_BIND_PW"), + LDAP_NAMING_FACTORY("CAM_LDAP_NAMING_FACTORY_INITIAL"), + LDAP_GROUP_USE_POSIX("CAM_LDAP_GROUP_USE_POSIX"), + ; + + private String internalName; + + private SystemSetting(String name) { + this.internalName = name; + } + + public String getInternalName() { + return internalName; + } + + public static SystemSetting getByInternalName(String internalName) { + for (SystemSetting p : SystemSetting.values()) { + if (p.internalName.equals(internalName)) { + return p; + } + } + return null; + } +} + +
commit 8bac47d3d8425fc0aa0b0781bacfa4efe10cf651 Author: Simeon Pinder spinder@redhat.com Date: Tue Jul 30 17:07:58 2013 -0400
Load ldap properties in same way as before refactor and deprecate old property load mechanism as does not work well with reusing existing properties.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java index 097b993..8a0e321 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java @@ -586,7 +586,34 @@ SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE.name(), "" } } //now load default/shared LDAP properties as we always have - properties = getProperties(properties); + // Set our default factory name if one is not given + String factoryName = properties.getProperty(SystemSetting.LDAP_NAMING_FACTORY.name()); + properties.setProperty(Context.INITIAL_CONTEXT_FACTORY, factoryName); + + // Setup SSL if requested + String value = properties.getProperty(SystemSetting.USE_SSL_FOR_LDAP.name()); + boolean ldapSsl = "ssl".equalsIgnoreCase(value); + if (ldapSsl) { + String ldapSocketFactory = properties.getProperty("java.naming.ldap.factory.socket"); + if (ldapSocketFactory == null) { + properties.put("java.naming.ldap.factory.socket", UntrustedSSLSocketFactory.class.getName()); + } + properties.put(Context.SECURITY_PROTOCOL, "ssl"); + } + + // Set the LDAP url + String providerUrl = properties.getProperty(SystemSetting.LDAP_NAMING_PROVIDER_URL.name()); + if (providerUrl == null) { + int port = (ldapSsl) ? 636 : 389; + providerUrl = "ldap://localhost:" + port; + } + + properties.setProperty(Context.PROVIDER_URL, providerUrl); + + // Follow referrals automatically + properties.setProperty(Context.REFERRAL, "ignore"); //BZ:582471- active directory query change + + // properties = getProperties(properties); } return properties; } @@ -598,6 +625,7 @@ SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE.name(), "" * * @return properties that are to be used when connecting to LDAP server */ + @Deprecated private Properties getProperties(Properties systemConfig) { Properties env = new Properties(systemConfig); // Set our default factory name if one is not given
commit f977354b21882b73b2da5ea6dd02784e83d9957a Author: Simeon Pinder spinder@redhat.com Date: Mon Jul 29 12:02:56 2013 -0400
Include default ldap property settings in initialization as well.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java index cd27c32..097b993 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java @@ -585,6 +585,8 @@ SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE.name(), "" } } } + //now load default/shared LDAP properties as we always have + properties = getProperties(properties); } return properties; }
commit 0b2ca875a21f1e7cce3508f46a2e482611846c7f Author: Simeon Pinder spinder@redhat.com Date: Mon Jul 29 01:32:10 2013 -0400
Fix some deprecated property references.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java index 02de558..cd27c32 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java @@ -98,7 +98,7 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { Properties systemConfig = populateProperties(systemManager.getSystemSettings(subjectManager.getOverlord()));
//retrieve the filters. - String groupFilter = (String) systemConfig.get(SystemSetting.LDAP_GROUP_FILTER.getInternalName()); + String groupFilter = (String) systemConfig.get(SystemSetting.LDAP_GROUP_FILTER.name()); if ((groupFilter != null) && (!groupFilter.trim().isEmpty())) { String filter; if (groupFilter.startsWith("(") && groupFilter.endsWith(")")) { @@ -116,9 +116,9 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal {
public Set<String> findAvailableGroupsFor(String userName) { Properties options = populateProperties(systemManager.getSystemSettings(subjectManager.getOverlord())); - String groupFilter = options.getProperty(SystemSetting.LDAP_GROUP_FILTER.getInternalName(), ""); - String groupMember = options.getProperty(SystemSetting.LDAP_GROUP_MEMBER.getInternalName(), ""); - String groupUsePosix = options.getProperty(SystemSetting.LDAP_GROUP_USE_POSIX.getInternalName(), "false"); + String groupFilter = options.getProperty(SystemSetting.LDAP_GROUP_FILTER.name(), ""); + String groupMember = options.getProperty(SystemSetting.LDAP_GROUP_MEMBER.name(), ""); + String groupUsePosix = options.getProperty(SystemSetting.LDAP_GROUP_USE_POSIX.name(), "false"); if (groupUsePosix == null) { groupUsePosix = Boolean.toString(false);//default to false } @@ -299,20 +299,20 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { HashMap<String, String> userDetails = new HashMap<String, String>();
// Load the BaseDN - String baseDN = (String) systemConfig.get(SystemSetting.LDAP_BASE_DN.getInternalName()); + String baseDN = (String) systemConfig.get(SystemSetting.LDAP_BASE_DN.name());
// Load the LoginProperty - String loginProperty = (String) systemConfig.get(SystemSetting.LDAP_LOGIN_PROPERTY.getInternalName()); + String loginProperty = (String) systemConfig.get(SystemSetting.LDAP_LOGIN_PROPERTY.name()); if (loginProperty == null) { // Use the default loginProperty = "cn"; } // Load any information we may need to bind - String bindDN = (String) systemConfig.get(SystemSetting.LDAP_BASE_DN.getInternalName()); - String bindPW = (String) systemConfig.get(SystemSetting.LDAP_BIND_PW.getInternalName()); + String bindDN = (String) systemConfig.get(SystemSetting.LDAP_BIND_DN.name()); + String bindPW = (String) systemConfig.get(SystemSetting.LDAP_BIND_PW.name());
// Load any search filter - String searchFilter = (String) systemConfig.get(SystemSetting.LDAP_FILTER.getInternalName()); + String searchFilter = (String) systemConfig.get(SystemSetting.LDAP_FILTER.name()); if (bindDN != null) { systemConfig.setProperty(Context.SECURITY_PRINCIPAL, bindDN); systemConfig.setProperty(Context.SECURITY_CREDENTIALS, bindPW); @@ -384,17 +384,17 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { Set<Map<String, String>> groupDetailsMap = new HashSet<Map<String, String>>(); //Load our LDAP specific properties // Load the BaseDN - String baseDN = (String) systemConfig.get(SystemSetting.LDAP_BASE_DN.getInternalName()); + String baseDN = (String) systemConfig.get(SystemSetting.LDAP_BASE_DN.name());
// Load the LoginProperty - String loginProperty = (String) systemConfig.get(SystemSetting.LDAP_LOGIN_PROPERTY.getInternalName()); + String loginProperty = (String) systemConfig.get(SystemSetting.LDAP_LOGIN_PROPERTY.name()); if (loginProperty == null) { // Use the default loginProperty = "cn"; } // Load any information we may need to bind - String bindDN = (String) systemConfig.get(SystemSetting.LDAP_BIND_DN.getInternalName()); - String bindPW = (String) systemConfig.get(SystemSetting.LDAP_BIND_PW.getInternalName()); + String bindDN = (String) systemConfig.get(SystemSetting.LDAP_BIND_DN.name()); + String bindPW = (String) systemConfig.get(SystemSetting.LDAP_BIND_PW.name()); if (bindDN != null) { systemConfig.setProperty(Context.SECURITY_PRINCIPAL, bindDN); systemConfig.setProperty(Context.SECURITY_CREDENTIALS, bindPW); @@ -411,7 +411,7 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { searchControls.setReturningAttributes(attributes);
//detect whether to use Query Page Control - String groupUseQueryPaging = systemConfig.getProperty(SystemSetting.LDAP_GROUP_PAGING.getInternalName(), + String groupUseQueryPaging = systemConfig.getProperty(SystemSetting.LDAP_GROUP_PAGING.name(), "false"); if (groupUseQueryPaging == null) { groupUseQueryPaging = Boolean.toString(false);//default to false @@ -426,7 +426,7 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { // only if they're enabled in the UI. if (useQueryPaging) { String groupPageSize = systemConfig.getProperty( - SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE.getInternalName(), "" +SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE.name(), "" + defaultPageSize); if ((groupPageSize != null) && (!groupPageSize.trim().isEmpty())) { int passedInPageSize = -1; @@ -599,7 +599,7 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { private Properties getProperties(Properties systemConfig) { Properties env = new Properties(systemConfig); // Set our default factory name if one is not given - String factoryName = env.getProperty(SystemSetting.LDAP_NAMING_FACTORY.getInternalName()); + String factoryName = env.getProperty(SystemSetting.LDAP_NAMING_FACTORY.name()); env.setProperty(Context.INITIAL_CONTEXT_FACTORY, factoryName);
// Setup SSL if requested @@ -614,7 +614,7 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { }
// Set the LDAP url - String providerUrl = env.getProperty(RHQConstants.LDAPUrl); + String providerUrl = env.getProperty(SystemSetting.LDAP_NAMING_PROVIDER_URL.name()); if (providerUrl == null) { int port = (ldapSsl) ? 636 : 389; providerUrl = "ldap://localhost:" + port;
commit 34ad4a2a2894435c84663c52b06bed9ee231bded Author: Simeon Pinder spinder@redhat.com Date: Wed Jun 19 13:51:17 2013 -0400
Some more doc and cleanup/refactor.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java index 4c0c2fc..02de558 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java @@ -156,6 +156,7 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { throw new IllegalArgumentException("Role with id [" + roleId + "] does not exist."); }
+ //add some code to synch up the current list of ldap groups. Set<LdapGroup> currentGroups = role.getLdapGroups(); List<String> currentGroupNames = new ArrayList<String>(currentGroups.size()); for (LdapGroup group : currentGroups) { @@ -167,10 +168,12 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { newGroupNames.add(group.getName()); }
+ //figure out which ones are new then add them. List<String> namesOfGroupsToBeAdded = new ArrayList<String>(newGroupNames); namesOfGroupsToBeAdded.removeAll(currentGroupNames); addLdapGroupsToRole(subject, roleId, namesOfGroupsToBeAdded);
+ //figure out which ones need to be removed. then remove them. List<String> namesOfGroupsToBeRemoved = new ArrayList<String>(currentGroupNames); namesOfGroupsToBeRemoved.removeAll(newGroupNames); int[] idsOfGroupsToBeRemoved = new int[namesOfGroupsToBeRemoved.size()]; @@ -290,11 +293,10 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { }
public Map<String, String> findLdapUserDetails(String userName) { + // Load our LDAP specific properties Properties systemConfig = populateProperties(systemManager.getSystemSettings(subjectManager.getOverlord()));
HashMap<String, String> userDetails = new HashMap<String, String>(); - // Load our LDAP specific properties - Properties env = getProperties(systemConfig);
// Load the BaseDN String baseDN = (String) systemConfig.get(SystemSetting.LDAP_BASE_DN.getInternalName()); @@ -312,13 +314,13 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { // Load any search filter String searchFilter = (String) systemConfig.get(SystemSetting.LDAP_FILTER.getInternalName()); if (bindDN != null) { - env.setProperty(Context.SECURITY_PRINCIPAL, bindDN); - env.setProperty(Context.SECURITY_CREDENTIALS, bindPW); - env.setProperty(Context.SECURITY_AUTHENTICATION, "simple"); + systemConfig.setProperty(Context.SECURITY_PRINCIPAL, bindDN); + systemConfig.setProperty(Context.SECURITY_CREDENTIALS, bindPW); + systemConfig.setProperty(Context.SECURITY_AUTHENTICATION, "simple"); }
try { - InitialLdapContext ctx = new InitialLdapContext(env, null); + InitialLdapContext ctx = new InitialLdapContext(systemConfig, null); SearchControls searchControls = getSearchControls();
// Add the search filter if specified. This only allows for a single search filter.. i.e. foo=bar.
commit 34a7e70c60a4032b70ef299dcf1584a084557c9e Author: Simeon Pinder spinder@redhat.com Date: Wed Jun 19 13:36:52 2013 -0400
Use property internal name.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java index d80732e..4c0c2fc 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java @@ -380,7 +380,7 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { */ protected Set<Map<String, String>> buildGroup(Properties systemConfig, String filter) { Set<Map<String, String>> groupDetailsMap = new HashSet<Map<String, String>>(); - + //Load our LDAP specific properties // Load the BaseDN String baseDN = (String) systemConfig.get(SystemSetting.LDAP_BASE_DN.getInternalName());
@@ -423,7 +423,8 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { int defaultPageSize = 1000; // only if they're enabled in the UI. if (useQueryPaging) { - String groupPageSize = systemConfig.getProperty(SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE.name(), "" + String groupPageSize = systemConfig.getProperty( + SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE.getInternalName(), "" + defaultPageSize); if ((groupPageSize != null) && (!groupPageSize.trim().isEmpty())) { int passedInPageSize = -1;
commit dcd7ed118b33c3de223f4e78b8eec62234dd0a48 Author: Simeon Pinder spinder@redhat.com Date: Mon Jun 17 12:26:59 2013 -0400
Clean up some deprecated usage of Properties by LDAP, and small refactoring.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java index 7473321..d80732e 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java @@ -94,27 +94,30 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { private SystemManagerLocal systemManager;
public Set<Map<String, String>> findAvailableGroups() { - Properties systemConfig = systemManager.getSystemConfiguration(subjectManager.getOverlord()); - Set<Map<String, String>> emptyAvailableGroups = new HashSet<Map<String, String>>(); + //load current system properties + Properties systemConfig = populateProperties(systemManager.getSystemSettings(subjectManager.getOverlord()));
//retrieve the filters. - String groupFilter = (String) systemConfig.get(RHQConstants.LDAPGroupFilter); + String groupFilter = (String) systemConfig.get(SystemSetting.LDAP_GROUP_FILTER.getInternalName()); if ((groupFilter != null) && (!groupFilter.trim().isEmpty())) { String filter; - if (groupFilter.startsWith("(") && groupFilter.endsWith(")")) + if (groupFilter.startsWith("(") && groupFilter.endsWith(")")) { filter = groupFilter; // RFC 2254 does not allow for ((expression)) - else + } else { filter = String.format("(%s)", groupFilter); // not wrapped in (), wrap it + }
return buildGroup(systemConfig, filter); } + + Set<Map<String, String>> emptyAvailableGroups = new HashSet<Map<String, String>>(); return emptyAvailableGroups; }
public Set<String> findAvailableGroupsFor(String userName) { - Properties options = systemManager.getSystemConfiguration(subjectManager.getOverlord()); - String groupFilter = options.getProperty(RHQConstants.LDAPGroupFilter, ""); - String groupMember = options.getProperty(RHQConstants.LDAPGroupMember, ""); + Properties options = populateProperties(systemManager.getSystemSettings(subjectManager.getOverlord())); + String groupFilter = options.getProperty(SystemSetting.LDAP_GROUP_FILTER.getInternalName(), ""); + String groupMember = options.getProperty(SystemSetting.LDAP_GROUP_MEMBER.getInternalName(), ""); String groupUsePosix = options.getProperty(SystemSetting.LDAP_GROUP_USE_POSIX.getInternalName(), "false"); if (groupUsePosix == null) { groupUsePosix = Boolean.toString(false);//default to false @@ -287,26 +290,27 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { }
public Map<String, String> findLdapUserDetails(String userName) { - Properties systemConfig = systemManager.getSystemConfiguration(subjectManager.getOverlord()); + Properties systemConfig = populateProperties(systemManager.getSystemSettings(subjectManager.getOverlord())); + HashMap<String, String> userDetails = new HashMap<String, String>(); // Load our LDAP specific properties Properties env = getProperties(systemConfig);
// Load the BaseDN - String baseDN = (String) systemConfig.get(RHQConstants.LDAPBaseDN); + String baseDN = (String) systemConfig.get(SystemSetting.LDAP_BASE_DN.getInternalName());
// Load the LoginProperty - String loginProperty = (String) systemConfig.get(RHQConstants.LDAPLoginProperty); + String loginProperty = (String) systemConfig.get(SystemSetting.LDAP_LOGIN_PROPERTY.getInternalName()); if (loginProperty == null) { // Use the default loginProperty = "cn"; } // Load any information we may need to bind - String bindDN = (String) systemConfig.get(RHQConstants.LDAPBindDN); - String bindPW = (String) systemConfig.get(RHQConstants.LDAPBindPW); + String bindDN = (String) systemConfig.get(SystemSetting.LDAP_BASE_DN.getInternalName()); + String bindPW = (String) systemConfig.get(SystemSetting.LDAP_BIND_PW.getInternalName());
// Load any search filter - String searchFilter = (String) systemConfig.get(RHQConstants.LDAPFilter); + String searchFilter = (String) systemConfig.get(SystemSetting.LDAP_FILTER.getInternalName()); if (bindDN != null) { env.setProperty(Context.SECURITY_PRINCIPAL, bindDN); env.setProperty(Context.SECURITY_CREDENTIALS, bindPW); @@ -375,29 +379,27 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { * @see org.jboss.security.auth.spi.UsernamePasswordLoginModule#validatePassword(java.lang.String,java.lang.String) */ protected Set<Map<String, String>> buildGroup(Properties systemConfig, String filter) { - Set<Map<String, String>> ret = new HashSet<Map<String, String>>(); - // Load our LDAP specific properties - Properties env = getProperties(systemConfig); + Set<Map<String, String>> groupDetailsMap = new HashSet<Map<String, String>>();
// Load the BaseDN - String baseDN = (String) systemConfig.get(RHQConstants.LDAPBaseDN); + String baseDN = (String) systemConfig.get(SystemSetting.LDAP_BASE_DN.getInternalName());
// Load the LoginProperty - String loginProperty = (String) systemConfig.get(RHQConstants.LDAPLoginProperty); + String loginProperty = (String) systemConfig.get(SystemSetting.LDAP_LOGIN_PROPERTY.getInternalName()); if (loginProperty == null) { // Use the default loginProperty = "cn"; } // Load any information we may need to bind - String bindDN = (String) systemConfig.get(RHQConstants.LDAPBindDN); - String bindPW = (String) systemConfig.get(RHQConstants.LDAPBindPW); + String bindDN = (String) systemConfig.get(SystemSetting.LDAP_BIND_DN.getInternalName()); + String bindPW = (String) systemConfig.get(SystemSetting.LDAP_BIND_PW.getInternalName()); if (bindDN != null) { - env.setProperty(Context.SECURITY_PRINCIPAL, bindDN); - env.setProperty(Context.SECURITY_CREDENTIALS, bindPW); - env.setProperty(Context.SECURITY_AUTHENTICATION, "simple"); + systemConfig.setProperty(Context.SECURITY_PRINCIPAL, bindDN); + systemConfig.setProperty(Context.SECURITY_CREDENTIALS, bindPW); + systemConfig.setProperty(Context.SECURITY_AUTHENTICATION, "simple"); } try { - InitialLdapContext ctx = new InitialLdapContext(env, null); + InitialLdapContext ctx = new InitialLdapContext(systemConfig, null); SearchControls searchControls = getSearchControls(); /*String filter = "(&(objectclass=groupOfUniqueNames)(uniqueMember=uid=" + userName + ",ou=People, dc=rhndev, dc=redhat, dc=com))";*/ @@ -421,8 +423,7 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { int defaultPageSize = 1000; // only if they're enabled in the UI. if (useQueryPaging) { - Properties options = populateProperties(systemManager.getSystemSettings(subjectManager.getOverlord())); - String groupPageSize = options.getProperty(SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE.name(), "" + String groupPageSize = systemConfig.getProperty(SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE.name(), "" + defaultPageSize); if ((groupPageSize != null) && (!groupPageSize.trim().isEmpty())) { int passedInPageSize = -1; @@ -445,11 +446,12 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { String[] baseDNs = baseDN.split(BASEDN_DELIMITER);
for (int x = 0; x < baseDNs.length; x++) { - executeGroupSearch(filter, ret, ctx, searchControls, baseDNs, x); + executeGroupSearch(filter, groupDetailsMap, ctx, searchControls, baseDNs, x);
// continually parsing pages of results until we're done. // only if they're enabled in the UI. if (useQueryPaging) { + //handle paged results if they're being used here byte[] cookie = null; Control[] controls = ctx.getResponseControls(); @@ -466,7 +468,7 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { //ensure the next requests contains the session/cookie details ctx.setRequestControls(new Control[] { new PagedResultsControl(defaultPageSize, cookie, Control.CRITICAL) }); - executeGroupSearch(filter, ret, ctx, searchControls, baseDNs, x); + executeGroupSearch(filter, groupDetailsMap, ctx, searchControls, baseDNs, x); //empty out cookie cookie = null; //test for further iterations @@ -479,6 +481,25 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { } } } + //continually parsing pages of results until we're done. + while (cookie != null) { + //ensure the next requests contains the session/cookie details + ctx.setRequestControls(new Control[] { new PagedResultsControl(defaultPageSize, cookie, + Control.CRITICAL) }); + executeGroupSearch(filter, groupDetailsMap, ctx, searchControls, baseDNs, x); + //empty out cookie + cookie = null; + //test for further iterations + controls = ctx.getResponseControls(); + if (controls != null) { + for (Control control : controls) { + if (control instanceof PagedResultsResponseControl) { + PagedResultsResponseControl pagedResult = (PagedResultsResponseControl) control; + cookie = pagedResult.getCookie(); + } + } + } + } } } } @@ -498,32 +519,7 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { log.error("Unexpected LDAP communciation error:" + iex.getMessage(), iex); throw new LdapCommunicationException(iex); } - - return ret; - } - - /** Translate SystemSettings to familiar Properties instance since we're - * passing not one but multiple values. - * - * @param systemSettings - * @return - */ - private Properties populateProperties(SystemSettings systemSettings) { - Properties properties = null; - if (systemSettings != null) { - properties = new Properties(); - Set<Entry<SystemSetting, String>> entries = systemSettings.entrySet(); - for (Entry<SystemSetting, String> entry : entries) { - SystemSetting key = entry.getKey(); - if (key != null) { - String value = entry.getValue(); - if (value != null) { - properties.put(key.name(), value); - } - } - } - } - return properties; + return groupDetailsMap; }
/** Executes the LDAP group query using the filters, context and search controls, etc. parameters passed in. @@ -566,6 +562,30 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { } }
+ /** Translate SystemSettings to familiar Properties instance since we're + * passing not one but multiple values. + * + * @param systemSettings + * @return + */ + private Properties populateProperties(SystemSettings systemSettings) { + Properties properties = null; + if (systemSettings != null) { + properties = new Properties(); + Set<Entry<SystemSetting, String>> entries = systemSettings.entrySet(); + for (Entry<SystemSetting, String> entry : entries) { + SystemSetting key = entry.getKey(); + if (key != null) { + String value = entry.getValue(); + if (value != null) { + properties.put(key.name(), value); + } + } + } + } + return properties; + } + /** * Load a default set of properties to use when connecting to the LDAP server. If basic authentication is needed, * the caller must set Context.SECURITY_PRINCIPAL, Context.SECURITY_CREDENTIALS and Context.SECURITY_AUTHENTICATION @@ -576,7 +596,7 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { private Properties getProperties(Properties systemConfig) { Properties env = new Properties(systemConfig); // Set our default factory name if one is not given - String factoryName = env.getProperty(RHQConstants.LDAPFactory); + String factoryName = env.getProperty(SystemSetting.LDAP_NAMING_FACTORY.getInternalName()); env.setProperty(Context.INITIAL_CONTEXT_FACTORY, factoryName);
// Setup SSL if requested
commit 32327e648f5b7a8f05da633f3aed3abc27adf875 Author: Jirka Kremser jkremser@redhat.com Date: Wed Jul 31 15:09:45 2013 +0200
[BZ 976827] - Description text area in General properties tab under Alerts -> Definition is not getting set to blank one it has been edited. Instead it displays the previous value that was stored - setting an empty string if descriptionField.getValue() == null
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/alert/definitions/GeneralPropertiesAlertDefinitionForm.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/alert/definitions/GeneralPropertiesAlertDefinitionForm.java index 1f47a81..d501787 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/alert/definitions/GeneralPropertiesAlertDefinitionForm.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/alert/definitions/GeneralPropertiesAlertDefinitionForm.java @@ -175,9 +175,7 @@ public class GeneralPropertiesAlertDefinitionForm extends DynamicForm implements public void saveAlertDefinition() { alertDefinition.setName(nameField.getValue().toString()); Object description = descriptionField.getValue(); - if (null != description) { - alertDefinition.setDescription(description.toString()); - } + alertDefinition.setDescription(null == description ? "" : description.toString());
String prioritySelected = prioritySelection.getValue().toString(); alertDefinition.setPriority(AlertPriority.valueOf(prioritySelected));
commit ac9f0b85670fa41d9d08f3de7455ce8436ec5e73 Author: Mike Thompson mithomps@redhat.com Date: Tue Jul 30 17:43:16 2013 -0700
i18n availability summary pie graph.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilitySummaryPieGraphType.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilitySummaryPieGraphType.java index 057a77f..fb8e096 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilitySummaryPieGraphType.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilitySummaryPieGraphType.java @@ -18,14 +18,15 @@ */ package org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype;
+import java.util.ArrayList; +import java.util.List; + import com.smartgwt.client.widgets.HTMLFlow; + import org.rhq.core.domain.measurement.MeasurementUnits; import org.rhq.enterprise.gui.coregui.client.util.Log; import org.rhq.enterprise.gui.coregui.client.util.MeasurementConverterClient;
-import java.util.ArrayList; -import java.util.List; - /** * Contains the javascript chart definition for an implementation of the d3 availability chart. This implementation is * just a line that changes color based on availability type: up=green, down=red, orange=disabled, unknown=grey, @@ -48,7 +49,8 @@ public class AvailabilitySummaryPieGraphType {
StringBuilder divAndSvgDefs = new StringBuilder(); divAndSvgDefs.append("<div id="availSummaryChart" >"); - divAndSvgDefs.append("<svg xmlns="http://www.w3.org/2000/svg%5C" version="1.1" style="height:"+HEIGHT+"px;" ></svg>"); + divAndSvgDefs.append("<svg xmlns="http://www.w3.org/2000/svg%5C" version="1.1" style="height:" + HEIGHT + + "px;" ></svg>"); divAndSvgDefs.append("</div>"); HTMLFlow graphFlow = new HTMLFlow(divAndSvgDefs.toString()); graphFlow.setWidth(WIDTH); @@ -56,7 +58,8 @@ public class AvailabilitySummaryPieGraphType { return graphFlow; }
- public void setAvailabilityData(String upLabel, double upPercent, String downLabel, double downPercent, String disabledLabel, double disabledPercent ){ + public void setAvailabilityData(String upLabel, double upPercent, String downLabel, double downPercent, + String disabledLabel, double disabledPercent) { availabilitySummaries = new ArrayList<AvailabilitySummary>(); availabilitySummaries.add(new AvailabilitySummary(upLabel, upPercent)); availabilitySummaries.add(new AvailabilitySummary(downLabel, downPercent)); @@ -70,8 +73,9 @@ public class AvailabilitySummaryPieGraphType { // loop through the avail intervals for (AvailabilitySummary availabilitySummary : availabilitySummaries) { sb.append("{ "label":"" + availabilitySummary.getLabel() + "", "); - sb.append(" "value": "" + MeasurementConverterClient.format(availabilitySummary.getValue(), - MeasurementUnits.PERCENTAGE, true) + "" },"); + sb.append(" "value": "" + + MeasurementConverterClient.format(availabilitySummary.getValue(), MeasurementUnits.PERCENTAGE, + true) + "" },"); } sb.setLength(sb.length() - 1); } @@ -87,47 +91,50 @@ public class AvailabilitySummaryPieGraphType { public native void drawJsniChart() /*-{ console.log("Draw Availability Summary Pie Chart");
- var w = 100, - h = 100, - r = h/2, - color = $wnd.d3.scale.category10(), - data = this.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.AvailabilitySummaryPieGraphType::getAvailabilitySummaryJson()(), - vis = $wnd.d3.select("#availSummaryChart svg") - .append("g") - .data(data) - .attr("width", w) - .attr("height", h) - .attr("transform", "translate(" + r + "," + r + ")"), - arc = $wnd.d3.svg.arc() - .outerRadius(r), - pie = $wnd.d3.layout.pie(), - arcs = vis.selectAll("g.slice") - .data(pie) - .enter() - .append("g") - .attr("class", "slice"); + var global = this, + w = 100, + h = 100, + r = h / 2, + color = $wnd.d3.scale.category10(), + data = global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.AvailabilitySummaryPieGraphType::getAvailabilitySummaryJson()(), + vis = $wnd.d3.select("#availSummaryChart svg") + .append("g") + .data(data) + .attr("width", w) + .attr("height", h) + .attr("transform", "translate(" + r + "," + r + ")"), + arc = $wnd.d3.svg.arc() + .outerRadius(r), + pie = $wnd.d3.layout.pie(), + arcs = vis.selectAll("g.slice") + .data(pie) + .enter() + .append("g") + .attr("class", "slice");
arcs.append("path") - .attr("fill", function (d, i) { - return color(i); - }) - .attr("d", arc); + .attr("fill", function (d, i) { + return color(i); + }) + .attr("d", arc);
arcs.append("text") - .attr("transform", function (d) { - d.innerRadius = 0; - d.outerRadius = r; - return "translate(" + arc.centroid(d) + ")"; - }) - .attr("text-anchor", "middle") - .text(function (d, i) { - return data[i].value; - }); + .attr("transform", function (d) { + d.innerRadius = 0; + d.outerRadius = r; + return "translate(" + arc.centroid(d) + ")"; + }) + .attr("text-anchor", "middle") + .style("font-size", "9px") + .style("font-family", "Arial, Verdana, sans-serif;") + .attr("fill", "#000") + .text(function (d, i) { + return data[i].value; + }); console.log("done with avail summary pie graph");
}-*/;
- private static class AvailabilitySummary { final private String label; final private double value; diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/AddToDashboardComponent.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/AddToDashboardComponent.java index ce4f8ef..52b0d86 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/AddToDashboardComponent.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/AddToDashboardComponent.java @@ -23,6 +23,7 @@ import java.util.LinkedHashMap; import com.google.gwt.user.client.rpc.AsyncCallback; import com.smartgwt.client.widgets.IButton; import com.smartgwt.client.widgets.events.ClickEvent; +import com.smartgwt.client.widgets.form.DynamicForm; import com.smartgwt.client.widgets.form.fields.SelectItem; import com.smartgwt.client.widgets.form.fields.events.ChangeEvent; import com.smartgwt.client.widgets.form.fields.events.ChangeHandler; @@ -79,7 +80,7 @@ public class AddToDashboardComponent extends ToolStrip implements Enhanced { addToDashboardButton.disable();
dashboardSelectItem = new SelectItem(); - dashboardSelectItem.setTitle("Dashboards"); + dashboardSelectItem.setTitle(MSG.chart_metrics_add_to_dashboard_label()); dashboardSelectItem.setWidth(300); dashboardSelectItem.setPickListWidth(210); populateDashboardMenu(); @@ -102,7 +103,7 @@ public class AddToDashboardComponent extends ToolStrip implements Enhanced { .getMetricDefinitions()) { if (measurementDefinition.getId() == selectedRecord .getAttributeAsInt(MetricsViewDataSource.FIELD_METRIC_DEF_ID)) { - Log.debug("Add to Dashboard -- Storing: " + measurementDefinition.getDisplayName() + Log.info("Add to Dashboard -- Storing: " + measurementDefinition.getDisplayName() + " in " + selectedDashboard.getName()); storeDashboardMetric(selectedDashboard, resource, measurementDefinition); break; diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/ResourceMetricAvailabilityView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/ResourceMetricAvailabilityView.java index cffcfd7..5835f61 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/ResourceMetricAvailabilityView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/ResourceMetricAvailabilityView.java @@ -40,7 +40,7 @@ import org.rhq.enterprise.gui.coregui.client.util.MeasurementConverterClient; import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedVLayout;
/** - * This shows the availability history for a resource. + * This view shows the detail availability data in tabular form and a pie chart for available %. * * @author Jay Shaughnessy * @author John Mazzitelli @@ -49,6 +49,7 @@ import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedVLayout; public class ResourceMetricAvailabilityView extends EnhancedVLayout {
private Resource resource; + private DynamicForm form; private StaticTextItem currentField; private StaticTextItem availTimeField; private StaticTextItem downTimeField; @@ -81,7 +82,7 @@ public class ResourceMetricAvailabilityView extends EnhancedVLayout {
private DynamicForm createSummaryForm() { - DynamicForm form = new DynamicForm(); + form = new DynamicForm(); form.setWidth100(); form.setAutoHeight(); form.setMargin(10); @@ -138,14 +139,13 @@ public class ResourceMetricAvailabilityView extends EnhancedVLayout { currentTimeField.setShowTitle(false);
CanvasItem availPieChartItem = new CanvasItem(); - //@todo: i18n - availPieChartItem.setTitle("Availability"); + availPieChartItem.setTitle(MSG.pie_chart_availability_summary_label()); availPieChartItem.setCanvas(availabilitySummaryPieGraph.createGraphMarker()); availPieChartItem.setRowSpan(3); availPieChartItem.setVAlign(VerticalAlignment.TOP); availPieChartItem.setTitleVAlign(VerticalAlignment.TOP); - availPieChartItem.setHeight(60); - availPieChartItem.setWidth(60); + availPieChartItem.setHeight(AvailabilitySummaryPieGraphType.HEIGHT); + availPieChartItem.setWidth(AvailabilitySummaryPieGraphType.WIDTH);
form.setItems(currentField, availPieChartItem, availTimeField, downTimeField, disabledTimeField, failureCountField, disabledCountField, mtbfField, mttrField, unknownField, @@ -164,17 +164,15 @@ public class ResourceMetricAvailabilityView extends EnhancedVLayout { public void onSuccess(ResourceAvailabilitySummary result) { Log.debug("reloadSummaryData");
- //@todo: i18n availabilitySummaryPieGraph.setAvailabilityData( - "Up", result.getUpPercentage(), - "Down", result.getDownPercentage(), - "Disabled" ,result.getDisabledPercentage() + MSG.pie_chart_availability_summary_up(), result.getUpPercentage(), + MSG.pie_chart_availability_summary_down(), result.getDownPercentage(), + MSG.pie_chart_availability_summary_disabled() ,result.getDisabledPercentage() ); new Timer(){
@Override public void run() { - Log.debug("Run Avail Graph"); availabilitySummaryPieGraph.drawJsniChart(); } }.schedule(150); diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties index 040537d..b800267 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties @@ -21,6 +21,7 @@ chart_hover_start_label = Start chart_hover_time_format = %I:%M:%S %p chart_ie_not_supported = Charting is not available for this browser. chart_metrics= Metrics +chart_metrics_add_to_dashboard_label= Dashboards chart_metrics_collapse_tooltip= Click here to see additional tabular availability data. chart_metrics_expand_tooltip= Click here to collapse additional availability detail. chart_metrics_sparkline_header= Chart @@ -460,6 +461,10 @@ filter_from_date = From filter_to_date = To group_tree_groupOfResourceType = Group of [{0}] group_tree_partialClusterTooltip = {0} out of {1} group members have a ''{2}'' resource +pie_chart_availability_summary_disabled = Disabled +pie_chart_availability_summary_down = Down +pie_chart_availability_summary_label = Availability +pie_chart_availability_summary_up = Up util_ancestry_parentAncestry = Parent Ancestry for: util_errorHandler_nullException = exception was null util_monitoringRequestCallback_error_checkServerStatusFailure = Unable to determine login status - check Server status. diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties index 2e23eff..d57e8e9 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties @@ -49,6 +49,7 @@ chart_hover_date_format = %d.%m.%y chart_hover_time_format = %H:%M:%S ##chart_ie_not_supported = Charting is not available for this browser. ##chart_metrics= Metrics +##chart_metrics_add_to_dashboard_label= Dashboards ##chart_metrics_collapse_tooltip= Click here to see additional tabular availability data. ##chart_metrics_expand_tooltip= Click here to collapse additional availability detail. ##chart_metrics_sparkline_header= Chart @@ -479,6 +480,10 @@ filter_from_date = Od filter_to_date = Do group_tree_groupOfResourceType = Skupina: [{0}] group_tree_partialClusterTooltip = {0} z {1} Älenů skupiny má "{2}" zdroj +##pie_chart_availability_summary_disabled = Disabled +##pie_chart_availability_summary_down = Down +##pie_chart_availability_summary_label = Availability +##pie_chart_availability_summary_up = Up util_ancestry_parentAncestry = Původ rodiÄe pro: util_errorHandler_nullException = vÃœjimka byla null util_monitoringRequestCallback_error_checkServerStatusFailure = NepodaÅilo se urÄit stav pÅihlaÅ¡ovánà - zkontrolujte stav serveru. diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties index b4fe981..93f8426 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties @@ -27,6 +27,7 @@ chart_ie_not_supported = Charting ist bei diesem Browser nicht unterstÃŒtzt ##chart_metrics_collapse_tooltip= Click here to see additional tabular availability data. ##chart_metrics_expand_tooltip= Click here to collapse additional availability detail. ##chart_metrics_sparkline_header= Chart +##chart_metrics_add_to_dashboard_label= Dashboards chart_no_data_label = Keine Daten vorhanden ##chart_single_value_label = Value chart_slider_button_bar_day = Tag @@ -458,6 +459,10 @@ filter_from_date = Von filter_to_date = Bis group_tree_groupOfResourceType = Gruppe von [{0}] group_tree_partialClusterTooltip = {0} der {1} Gruppenmitglieder haben eine ''{2}'' Ressource +##pie_chart_availability_summary_disabled = Disabled +##pie_chart_availability_summary_down = Down +##pie_chart_availability_summary_label = Availability +##pie_chart_availability_summary_up = Up util_disambiguationReportDecorator_pluginSuffix = ({0} Plugin) util_errorHandler_nullException = Exception war null ##util_monitoringRequestCallback_error_checkServerStatusFailure = Unable to determine login status - check Server status. diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties index c1a8521..ae2e805 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties @@ -28,6 +28,7 @@ ##chart_metrics_collapse_tooltip= Click here to see additional tabular availability data. ##chart_metrics_expand_tooltip= Click here to collapse additional availability detail. ##chart_metrics_sparkline_header= Chart +##chart_metrics_add_to_dashboard_label= Dashboards ##chart_no_data_label = No Data ##chart_single_value_label = Value ##chart_slider_button_bar_minute = Min @@ -456,6 +457,10 @@ filter_from_date = éå§ filter_to_date = çµäº ##group_tree_groupOfResourceType = Group of [{0}] group_tree_partialClusterTooltip = {1} ã®äžã® {0} ã°ã«ãŒãã¡ã³ããŒã ''{2}'' ãªãœãŒã¹ãæã£ãŠããŸã +##pie_chart_availability_summary_disabled = Disabled +##pie_chart_availability_summary_down = Down +##pie_chart_availability_summary_label = Availability +##pie_chart_availability_summary_up = Up util_ancestry_parentAncestry = 芪ã®å ç¥ : util_disambiguationReportDecorator_pluginSuffix = ({0} ãã©ã°ã€ã³) util_errorHandler_nullException = äŸå€ã¯ null ã§ãã diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties index 2958fbe..c61d0d6 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties @@ -26,6 +26,7 @@ ##chart_metrics_collapse_tooltip= Click here to see additional tabular availability data. ##chart_metrics_expand_tooltip= Click here to collapse additional availability detail. ##chart_metrics_sparkline_header= Chart +##chart_metrics_add_to_dashboard_label= Dashboards ##chart_no_data_label = No Data ##chart_single_value_label = Value ##chart_slider_button_bar_minute = Min @@ -413,6 +414,10 @@ favorites_resources = ìŠê²šì°Ÿêž° 늬ìì€ filter_from_date = ìì group_tree_groupOfResourceType = Group of [{0}] group_tree_partialClusterTooltip = {1} ì€ {0} 귞룹 ë©€ë²ê° ''{2}'' ììì ê°ì§ê³ ììµëë€. +##pie_chart_availability_summary_disabled = Disabled +##pie_chart_availability_summary_down = Down +##pie_chart_availability_summary_label = Availability +##pie_chart_availability_summary_up = Up util_ancestry_parentAncestry = ë¶ëªšì ì¡°ì: util_errorHandler_nullException = ììžë nullìŽììµëë€. util_monitoringRequestCallback_error_checkServerStatusFailure = ë¡ê·žìž ìí륌 íìží ì ììµëë€ - ìë²ì ìí륌 íìžíììì diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties index 62f31f5..0d505ec 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties @@ -25,6 +25,7 @@ ##chart_metrics_collapse_tooltip= Click here to see additional tabular availability data. ##chart_metrics_expand_tooltip= Click here to collapse additional availability detail. ##chart_metrics_sparkline_header= Chart +##chart_metrics_add_to_dashboard_label= Dashboards ##chart_no_data_label = No Data ##chart_single_value_label = Value ##chart_slider_button_bar_minute = Min @@ -461,6 +462,10 @@ favorites_resources = Recursos Favoritos ##filter_to_date = To ##group_tree_groupOfResourceType = Group of [{0}] group_tree_partialClusterTooltip = {0} out of {1} group members have a ''{2}'' resource +##pie_chart_availability_summary_disabled = Disabled +##pie_chart_availability_summary_down = Down +##pie_chart_availability_summary_label = Availability +##pie_chart_availability_summary_up = Up util_ancestry_parentAncestry = Ancestral para: util_errorHandler_nullException = Exce\u00E7\u00E3o nula util_monitoringRequestCallback_error_checkServerStatusFailure = Imposs\u00EDvel verificar o status do login - verifique o status do Servidor. diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties index eca8c24..ace5cf2 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties @@ -25,6 +25,7 @@ ##chart_metrics_collapse_tooltip= Click here to see additional tabular availability data. ##chart_metrics_expand_tooltip= Click here to collapse additional availability detail. ##chart_metrics_sparkline_header= Chart +##chart_metrics_add_to_dashboard_label= Dashboards ##chart_no_data_label = No Data ##chart_single_value_label = Value ##chart_slider_button_bar_minute = Min @@ -2550,6 +2551,10 @@ favorites_resources = ÐзбÑаММÑе ÑеÑÑÑÑÑ filter_from_date = ÐÑ filter_to_date = ÐП group_tree_partialClusterTooltip = {0} Оз {1} ÑлеМПв гÑÑÐ¿Ð¿Ñ ÐžÐŒÐµÑÑ ÑеÑÑÑÑ ''{2}'' +##pie_chart_availability_summary_disabled = Disabled +##pie_chart_availability_summary_down = Down +##pie_chart_availability_summary_label = Availability +##pie_chart_availability_summary_up = Up util_errorHandler_nullException = ÐПзМОклП null ОÑклÑÑеМОе util_monitoringRequestCallback_error_checkServerStatusFailure = Ðе ÑЎаеÑÑÑ ÐŸÐ¿ÑеЎелОÑÑ ÑÑаÑÑÑ Ð°Ð²ÑПÑОзаÑОО - пÑПвеÑÑÑе ÑПÑÑПÑМОе ÑеÑвеÑа. util_userPerm_loadFailGlobal = Ðе ÑЎаеÑÑÑ Ð·Ð°Ð³ÑÑзОÑÑ Ð²Ð°ÑО глПбалÑÐœÑе пПлМПЌПÑÐžÑ - Ме пÑеЎПÑÑавлеМÑ. diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties index e71f1fb..287a7f9 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties @@ -25,6 +25,7 @@ ##chart_metrics_collapse_tooltip= Click here to see additional tabular availability data. ##chart_metrics_expand_tooltip= Click here to collapse additional availability detail. ##chart_metrics_sparkline_header= Chart +##chart_metrics_add_to_dashboard_label= Dashboards ##chart_no_data_label = No Data ##chart_single_value_label = Value ##chart_slider_button_bar_minute = Min @@ -451,6 +452,10 @@ favorites_resources = \u8d44\u6e90\u6536\u85cf\u5939 ##filter_to_date = To ##group_tree_groupOfResourceType = Group of [{0}] group_tree_partialClusterTooltip = \u9664\u53bb {1},{0}\u7684\u7ec4\u6210\u5458\u62e5\u6709 \u4e00\u4e2a''{2}''\u8d44\u6e90 +##pie_chart_availability_summary_disabled = Disabled +##pie_chart_availability_summary_down = Down +##pie_chart_availability_summary_label = Availability +##pie_chart_availability_summary_up = Up util_ancestry_parentAncestry = Parent Ancestry for: util_errorHandler_nullException = \u5f02\u5e38\u662f null util_monitoringRequestCallback_error_checkServerStatusFailure = \u65e0\u6cd5\u786e\u5b9a\u767b\u5f55\u72b6\u6001- \u68c0\u67e5\u670d\u52a1\u5668\u72b6\u6001.
commit b6290571f9b6c3f72886ce4ebf85dad38e11698c Author: Mike Thompson mithomps@redhat.com Date: Tue Jul 30 11:42:37 2013 -0700
Fix bug with new metrics database and not much history results in NPE on metrics page.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableView.java index 82a9d98..bb67587 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableView.java @@ -121,9 +121,9 @@ public class MetricsTableView extends Table<MetricsViewDataSource> implements Re metricsTableListGrid.addSelectionChangedHandler(new SelectionChangedHandler() { @Override public void onSelectionChanged(SelectionEvent selectionEvent) { - if(metricsTableListGrid.getSelectedRecords().length > 0){ + if (metricsTableListGrid.getSelectedRecords().length > 0) { addToDashboardComponent.enableAddToDashboardButton(); - }else { + } else { addToDashboardComponent.disableAddToDashboardButton(); } } @@ -304,11 +304,15 @@ public class MetricsTableView extends Table<MetricsViewDataSource> implements Re public void onDataArrived(DataArrivedEvent dataArrivedEvent) { int startRow = dataArrivedEvent.getStartRow(); int endRow = dataArrivedEvent.getEndRow(); + for (int i = startRow; i < endRow; i++) { - if (null != metricsTableView && null != metricsTableView.expandedRows - && metricsTableView.expandedRows.contains(getRecord(i).getAttributeAsInt( - MetricsViewDataSource.FIELD_METRIC_DEF_ID))) { - expandRecord(getRecord(i)); + ListGridRecord listGridRecord = getRecord(i); + if (null != listGridRecord) { + int metricDefinitionId = listGridRecord + .getAttributeAsInt(MetricsViewDataSource.FIELD_METRIC_DEF_ID); + if (null != metricsTableView && metricsTableView.expandedRows.contains(metricDefinitionId)) { + expandRecord(listGridRecord); + } } } }
commit 865522aac13f00413c818ad6714118ffcf913f6f Author: mtho11 mikecthompson@gmail.com Date: Tue Jul 30 10:59:02 2013 -0700
Change layout of fields in availability detail form of metric page.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityOverUnderGraphType.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityOverUnderGraphType.java index ee06724..3e573dd 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityOverUnderGraphType.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityOverUnderGraphType.java @@ -230,8 +230,6 @@ public class AvailabilityOverUnderGraphType implements AvailabilityGraphType { .attr("height", height + margin.top + margin.bottom) .attr("transform", "translate(" + margin.left + "," + margin.top + ")");
- console.error("avail.chartSelection: "+ availChartContext.chartSelection); -
svg.selectAll("rect.availBars") .data(availChartContext.data) diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilitySummaryPieGraphType.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilitySummaryPieGraphType.java index 23effb0..057a77f 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilitySummaryPieGraphType.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilitySummaryPieGraphType.java @@ -18,7 +18,10 @@ */ package org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype;
+import com.smartgwt.client.widgets.HTMLFlow; +import org.rhq.core.domain.measurement.MeasurementUnits; import org.rhq.enterprise.gui.coregui.client.util.Log; +import org.rhq.enterprise.gui.coregui.client.util.MeasurementConverterClient;
import java.util.ArrayList; import java.util.List; @@ -32,11 +35,27 @@ import java.util.List; */ public class AvailabilitySummaryPieGraphType {
+ public static final int HEIGHT = 100; + public static final int WIDTH = 100; + private List<AvailabilitySummary> availabilitySummaries;
public AvailabilitySummaryPieGraphType() { }
+ public HTMLFlow createGraphMarker() { + Log.debug("drawGraph marker in AvailabilitySummaryPieGraph"); + + StringBuilder divAndSvgDefs = new StringBuilder(); + divAndSvgDefs.append("<div id="availSummaryChart" >"); + divAndSvgDefs.append("<svg xmlns="http://www.w3.org/2000/svg%5C" version="1.1" style="height:"+HEIGHT+"px;" ></svg>"); + divAndSvgDefs.append("</div>"); + HTMLFlow graphFlow = new HTMLFlow(divAndSvgDefs.toString()); + graphFlow.setWidth(WIDTH); + graphFlow.setHeight(HEIGHT); + return graphFlow; + } + public void setAvailabilityData(String upLabel, double upPercent, String downLabel, double downPercent, String disabledLabel, double disabledPercent ){ availabilitySummaries = new ArrayList<AvailabilitySummary>(); availabilitySummaries.add(new AvailabilitySummary(upLabel, upPercent)); @@ -51,7 +70,8 @@ public class AvailabilitySummaryPieGraphType { // loop through the avail intervals for (AvailabilitySummary availabilitySummary : availabilitySummaries) { sb.append("{ "label":"" + availabilitySummary.getLabel() + "", "); - sb.append(" "value": "" + availabilitySummary.getValue() * 100 + "" },"); + sb.append(" "value": "" + MeasurementConverterClient.format(availabilitySummary.getValue(), + MeasurementUnits.PERCENTAGE, true) + "" },"); } sb.setLength(sb.length() - 1); } @@ -67,26 +87,21 @@ public class AvailabilitySummaryPieGraphType { public native void drawJsniChart() /*-{ console.log("Draw Availability Summary Pie Chart");
- var data = this.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.AvailabilitySummaryPieGraphType::getAvailabilitySummaryJson()(); - var w = 100, h = 100, - r = 30, - color = $wnd.d3.scale.category10(); - - var vis = $wnd.d3.select("#availSummaryChart svg") + r = h/2, + color = $wnd.d3.scale.category10(), + data = this.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.AvailabilitySummaryPieGraphType::getAvailabilitySummaryJson()(), + vis = $wnd.d3.select("#availSummaryChart svg") .append("g") .data(data) .attr("width", w) .attr("height", h) - .attr("transform", "translate(" + r + "," + r + ")"); - - var arc = $wnd.d3.svg.arc() - .outerRadius(r); - - var pie = $wnd.d3.layout.pie(); - - var arcs = vis.selectAll("g.slice") + .attr("transform", "translate(" + r + "," + r + ")"), + arc = $wnd.d3.svg.arc() + .outerRadius(r), + pie = $wnd.d3.layout.pie(), + arcs = vis.selectAll("g.slice") .data(pie) .enter() .append("g") @@ -108,7 +123,7 @@ public class AvailabilitySummaryPieGraphType { .text(function (d, i) { return data[i].value; }); - console.warn("done with avail summary pie graph"); + console.log("done with avail summary pie graph");
}-*/;
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/ResourceMetricAvailabilityView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/ResourceMetricAvailabilityView.java index 961ea5a..cffcfd7 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/ResourceMetricAvailabilityView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/ResourceMetricAvailabilityView.java @@ -22,8 +22,9 @@ package org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitori
import com.google.gwt.user.client.Timer; import com.google.gwt.user.client.rpc.AsyncCallback; -import com.smartgwt.client.widgets.HTMLFlow; +import com.smartgwt.client.types.VerticalAlignment; import com.smartgwt.client.widgets.form.DynamicForm; +import com.smartgwt.client.widgets.form.fields.CanvasItem; import com.smartgwt.client.widgets.form.fields.FormItem; import com.smartgwt.client.widgets.form.fields.StaticTextItem;
@@ -49,11 +50,8 @@ public class ResourceMetricAvailabilityView extends EnhancedVLayout {
private Resource resource; private StaticTextItem currentField; - private StaticTextItem availField; private StaticTextItem availTimeField; - private StaticTextItem downField; private StaticTextItem downTimeField; - private StaticTextItem disabledField; private StaticTextItem disabledTimeField; private StaticTextItem failureCountField; private StaticTextItem disabledCountField; @@ -71,29 +69,16 @@ public class ResourceMetricAvailabilityView extends EnhancedVLayout { availabilitySummaryPieGraph = new AvailabilitySummaryPieGraphType();
setWidth100(); - setHeight(265); + setHeight(165); }
@Override protected void onInit() { super.onInit(); - addMember(createGraphMarker()); addMember(createSummaryForm()); }
- public HTMLFlow createGraphMarker() { - Log.debug("drawGraph marker in AvailabilitySummaryPieGraph"); - - StringBuilder divAndSvgDefs = new StringBuilder(); - divAndSvgDefs.append("<div id="availSummaryChart" - + "" ><svg xmlns="http://www.w3.org/2000/svg%5C" version="1.1" style="height:100px;">"); - divAndSvgDefs.append("</svg></div>"); - HTMLFlow graph = new HTMLFlow(divAndSvgDefs.toString()); - graph.setWidth100(); - graph.setHeight(100); - //addMember(graph); - return graph; - } +
private DynamicForm createSummaryForm() { DynamicForm form = new DynamicForm(); @@ -108,28 +93,16 @@ public class ResourceMetricAvailabilityView extends EnhancedVLayout { currentField.setColSpan(4);
// row 2 - availField = new StaticTextItem("avail", MSG.view_resource_monitor_availability_availability()); - availField.setWrapTitle(false); - prepareTooltip(availField, MSG.view_resource_monitor_availability_availability_tooltip()); - availTimeField = new StaticTextItem("availTime", MSG.view_resource_monitor_availability_uptime()); availTimeField.setWrapTitle(false); prepareTooltip(availTimeField, MSG.view_resource_monitor_availability_uptime_tooltip());
// row 3 - downField = new StaticTextItem("down", MSG.view_resource_monitor_availability_down()); - downField.setWrapTitle(false); - prepareTooltip(downField, MSG.view_resource_monitor_availability_down_tooltip()); - downTimeField = new StaticTextItem("downTime", MSG.view_resource_monitor_availability_downtime()); downTimeField.setWrapTitle(false); prepareTooltip(downTimeField, MSG.view_resource_monitor_availability_downtime_tooltip());
// row 4 - disabledField = new StaticTextItem("disabled", MSG.view_resource_monitor_availability_disabled()); - disabledField.setWrapTitle(false); - prepareTooltip(disabledField, MSG.view_resource_monitor_availability_disabled_tooltip()); - disabledTimeField = new StaticTextItem("disabledTime", MSG.view_resource_monitor_availability_disabledTime()); disabledTimeField.setWrapTitle(false); prepareTooltip(disabledTimeField, MSG.view_resource_monitor_availability_disabledTime_tooltip()); @@ -164,7 +137,17 @@ public class ResourceMetricAvailabilityView extends EnhancedVLayout { currentTimeField.setColSpan(4); currentTimeField.setShowTitle(false);
- form.setItems(currentField, availField, availTimeField, downField, downTimeField, disabledField, + CanvasItem availPieChartItem = new CanvasItem(); + //@todo: i18n + availPieChartItem.setTitle("Availability"); + availPieChartItem.setCanvas(availabilitySummaryPieGraph.createGraphMarker()); + availPieChartItem.setRowSpan(3); + availPieChartItem.setVAlign(VerticalAlignment.TOP); + availPieChartItem.setTitleVAlign(VerticalAlignment.TOP); + availPieChartItem.setHeight(60); + availPieChartItem.setWidth(60); + + form.setItems(currentField, availPieChartItem, availTimeField, downTimeField, disabledTimeField, failureCountField, disabledCountField, mtbfField, mttrField, unknownField, currentTimeField);
@@ -181,6 +164,7 @@ public class ResourceMetricAvailabilityView extends EnhancedVLayout { public void onSuccess(ResourceAvailabilitySummary result) { Log.debug("reloadSummaryData");
+ //@todo: i18n availabilitySummaryPieGraph.setAvailabilityData( "Up", result.getUpPercentage(), "Down", result.getDownPercentage(), @@ -197,16 +181,10 @@ public class ResourceMetricAvailabilityView extends EnhancedVLayout {
currentField.setValue(MSG.view_resource_monitor_availability_currentStatus_value(result .getCurrent().getName(), TimestampCellFormatter.format(result.getLastChange().getTime()))); - availField.setValue(MeasurementConverterClient.format(result.getUpPercentage(), - MeasurementUnits.PERCENTAGE, true)); availTimeField.setValue(MeasurementConverterClient.format((double) result.getUpTime(), MeasurementUnits.MILLISECONDS, true)); - downField.setValue(MeasurementConverterClient.format(result.getDownPercentage(), - MeasurementUnits.PERCENTAGE, true)); downTimeField.setValue(MeasurementConverterClient.format((double) result.getDownTime(), MeasurementUnits.MILLISECONDS, true)); - disabledField.setValue(MeasurementConverterClient.format(result.getDisabledPercentage(), - MeasurementUnits.PERCENTAGE, true)); disabledTimeField.setValue(MeasurementConverterClient.format((double) result.getDisabledTime(), MeasurementUnits.MILLISECONDS, true)); failureCountField.setValue(result.getFailures());
commit 3c77e8807e7f07b3f309622156637a519ce0ef1b Author: mtho11 mikecthompson@gmail.com Date: Mon Jul 29 20:07:11 2013 -0700
Availability pie chart for metrics page.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityOverUnderGraphType.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityOverUnderGraphType.java index 811a579..ee06724 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityOverUnderGraphType.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityOverUnderGraphType.java @@ -230,6 +230,8 @@ public class AvailabilityOverUnderGraphType implements AvailabilityGraphType { .attr("height", height + margin.top + margin.bottom) .attr("transform", "translate(" + margin.left + "," + margin.top + ")");
+ console.error("avail.chartSelection: "+ availChartContext.chartSelection); +
svg.selectAll("rect.availBars") .data(availChartContext.data) @@ -329,7 +331,6 @@ public class AvailabilityOverUnderGraphType implements AvailabilityGraphType { // Public API draw: function (availChartContext) { "use strict"; - console.log("AvailabilityChart"); drawBars(availChartContext); createHovers(); } @@ -338,7 +339,6 @@ public class AvailabilityOverUnderGraphType implements AvailabilityGraphType {
}();
- console.log("Avail Data records: "+availChartContext.data.length); if (typeof availChartContext.data !== 'undefined' && availChartContext.data.length > 0) { availabilityGraph.draw(availChartContext); console.log("Availability Chart Drawn"); diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilitySummaryPieGraphType.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilitySummaryPieGraphType.java new file mode 100644 index 0000000..23effb0 --- /dev/null +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilitySummaryPieGraphType.java @@ -0,0 +1,133 @@ +/* + * RHQ Management Platform + * Copyright (C) 2005-2012 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +package org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype; + +import org.rhq.enterprise.gui.coregui.client.util.Log; + +import java.util.ArrayList; +import java.util.List; + +/** + * Contains the javascript chart definition for an implementation of the d3 availability chart. This implementation is + * just a line that changes color based on availability type: up=green, down=red, orange=disabled, unknown=grey, + * empty=grey, warn=yellow. This version of the availability graph shows continuous intervals. + * + * @author Mike Thompson + */ +public class AvailabilitySummaryPieGraphType { + + private List<AvailabilitySummary> availabilitySummaries; + + public AvailabilitySummaryPieGraphType() { + } + + public void setAvailabilityData(String upLabel, double upPercent, String downLabel, double downPercent, String disabledLabel, double disabledPercent ){ + availabilitySummaries = new ArrayList<AvailabilitySummary>(); + availabilitySummaries.add(new AvailabilitySummary(upLabel, upPercent)); + availabilitySummaries.add(new AvailabilitySummary(downLabel, downPercent)); + availabilitySummaries.add(new AvailabilitySummary(disabledLabel, disabledPercent)); + + } + + public String getAvailabilitySummaryJson() { + StringBuilder sb = new StringBuilder("["); + if (null != availabilitySummaries) { + // loop through the avail intervals + for (AvailabilitySummary availabilitySummary : availabilitySummaries) { + sb.append("{ "label":"" + availabilitySummary.getLabel() + "", "); + sb.append(" "value": "" + availabilitySummary.getValue() * 100 + "" },"); + } + sb.setLength(sb.length() - 1); + } + + sb.append("]"); + Log.debug(sb.toString()); + return sb.toString(); + } + + /** + * The magic JSNI to draw the charts with d3. + */ + public native void drawJsniChart() /*-{ + console.log("Draw Availability Summary Pie Chart"); + + var data = this.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.AvailabilitySummaryPieGraphType::getAvailabilitySummaryJson()(); + + var w = 100, + h = 100, + r = 30, + color = $wnd.d3.scale.category10(); + + var vis = $wnd.d3.select("#availSummaryChart svg") + .append("g") + .data(data) + .attr("width", w) + .attr("height", h) + .attr("transform", "translate(" + r + "," + r + ")"); + + var arc = $wnd.d3.svg.arc() + .outerRadius(r); + + var pie = $wnd.d3.layout.pie(); + + var arcs = vis.selectAll("g.slice") + .data(pie) + .enter() + .append("g") + .attr("class", "slice"); + + arcs.append("path") + .attr("fill", function (d, i) { + return color(i); + }) + .attr("d", arc); + + arcs.append("text") + .attr("transform", function (d) { + d.innerRadius = 0; + d.outerRadius = r; + return "translate(" + arc.centroid(d) + ")"; + }) + .attr("text-anchor", "middle") + .text(function (d, i) { + return data[i].value; + }); + console.warn("done with avail summary pie graph"); + + }-*/; + + + private static class AvailabilitySummary { + final private String label; + final private double value; + + private AvailabilitySummary(String label, double value) { + this.label = label; + this.value = value; + } + + private String getLabel() { + return label; + } + + private double getValue() { + return value; + } + } +} diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableView.java index 09a7bc3..82a9d98 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableView.java @@ -305,7 +305,7 @@ public class MetricsTableView extends Table<MetricsViewDataSource> implements Re int startRow = dataArrivedEvent.getStartRow(); int endRow = dataArrivedEvent.getEndRow(); for (int i = startRow; i < endRow; i++) { - if (null != metricsTableView.expandedRows + if (null != metricsTableView && null != metricsTableView.expandedRows && metricsTableView.expandedRows.contains(getRecord(i).getAttributeAsInt( MetricsViewDataSource.FIELD_METRIC_DEF_ID))) { expandRecord(getRecord(i)); diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/ResourceMetricAvailabilityView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/ResourceMetricAvailabilityView.java index 9d6b892..961ea5a 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/ResourceMetricAvailabilityView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/ResourceMetricAvailabilityView.java @@ -20,7 +20,9 @@
package org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.table;
+import com.google.gwt.user.client.Timer; import com.google.gwt.user.client.rpc.AsyncCallback; +import com.smartgwt.client.widgets.HTMLFlow; import com.smartgwt.client.widgets.form.DynamicForm; import com.smartgwt.client.widgets.form.fields.FormItem; import com.smartgwt.client.widgets.form.fields.StaticTextItem; @@ -31,6 +33,8 @@ import org.rhq.core.domain.resource.composite.ResourceAvailabilitySummary; import org.rhq.enterprise.gui.coregui.client.CoreGUI; import org.rhq.enterprise.gui.coregui.client.components.table.TimestampCellFormatter; import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; +import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.AvailabilitySummaryPieGraphType; +import org.rhq.enterprise.gui.coregui.client.util.Log; import org.rhq.enterprise.gui.coregui.client.util.MeasurementConverterClient; import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedVLayout;
@@ -58,22 +62,39 @@ public class ResourceMetricAvailabilityView extends EnhancedVLayout { private StaticTextItem unknownField; private StaticTextItem currentTimeField;
+ private AvailabilitySummaryPieGraphType availabilitySummaryPieGraph; + public ResourceMetricAvailabilityView(Resource resource) { super();
this.resource = resource; + availabilitySummaryPieGraph = new AvailabilitySummaryPieGraphType();
setWidth100(); - setHeight(165); + setHeight(265); }
@Override protected void onInit() { super.onInit(); - + addMember(createGraphMarker()); addMember(createSummaryForm()); }
+ public HTMLFlow createGraphMarker() { + Log.debug("drawGraph marker in AvailabilitySummaryPieGraph"); + + StringBuilder divAndSvgDefs = new StringBuilder(); + divAndSvgDefs.append("<div id="availSummaryChart" + + "" ><svg xmlns="http://www.w3.org/2000/svg%5C" version="1.1" style="height:100px;">"); + divAndSvgDefs.append("</svg></div>"); + HTMLFlow graph = new HTMLFlow(divAndSvgDefs.toString()); + graph.setWidth100(); + graph.setHeight(100); + //addMember(graph); + return graph; + } + private DynamicForm createSummaryForm() { DynamicForm form = new DynamicForm(); form.setWidth100(); @@ -158,6 +179,21 @@ public class ResourceMetricAvailabilityView extends EnhancedVLayout {
@Override public void onSuccess(ResourceAvailabilitySummary result) { + Log.debug("reloadSummaryData"); + + availabilitySummaryPieGraph.setAvailabilityData( + "Up", result.getUpPercentage(), + "Down", result.getDownPercentage(), + "Disabled" ,result.getDisabledPercentage() + ); + new Timer(){ + + @Override + public void run() { + Log.debug("Run Avail Graph"); + availabilitySummaryPieGraph.drawJsniChart(); + } + }.schedule(150);
currentField.setValue(MSG.view_resource_monitor_availability_currentStatus_value(result .getCurrent().getName(), TimestampCellFormatter.format(result.getLastChange().getTime()))); @@ -189,6 +225,8 @@ public class ResourceMetricAvailabilityView extends EnhancedVLayout {
currentTimeField.setValue(MSG.view_resource_monitor_availability_currentAsOf(TimestampCellFormatter .format(result.getCurrentTime()))); + + }
@Override
commit 476581aacd624552d8757c4b82015c3afc6920cb Author: John Sanda jsanda@redhat.com Date: Tue Jul 30 16:13:09 2013 -0400
forgot to add test resource file in previous commit
diff --git a/modules/plugins/rhq-storage/src/test/resources/cassandra.yaml b/modules/plugins/rhq-storage/src/test/resources/cassandra.yaml new file mode 100644 index 0000000..fd7973b --- /dev/null +++ b/modules/plugins/rhq-storage/src/test/resources/cassandra.yaml @@ -0,0 +1,690 @@ +# Cassandra storage config YAML + +# NOTE: +# See http://wiki.apache.org/cassandra/StorageConfiguration for +# full explanations of configuration directives +# /NOTE + +# The name of the cluster. This is mainly used to prevent machines in +# one logical cluster from joining another. +cluster_name: rhq + +# This defines the number of tokens randomly assigned to this node on the ring +# The more tokens, relative to other nodes, the larger the proportion of data +# that this node will store. You probably want all nodes to have the same number +# of tokens assuming they have equal hardware capability. +# +# If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility, +# and will use the initial_token as described below. +# +# Specifying initial_token will override this setting. +# +# If you already have a cluster with 1 token per node, and wish to migrate to +# multiple tokens per node, see http://wiki.apache.org/cassandra/Operations +num_tokens: 256 + +# If you haven't specified num_tokens, or have set it to the default of 1 then +# you should always specify InitialToken when setting up a production +# cluster for the first time, and often when adding capacity later. +# The principle is that each node should be given an equal slice of +# the token ring; see http://wiki.apache.org/cassandra/Operations +# for more details. +# +# If blank, Cassandra will request a token bisecting the range of +# the heaviest-loaded existing node. If there is no load information +# available, such as is the case with a new cluster, it will pick +# a random token, which will lead to hot spots. +#initial_token: + +# See http://wiki.apache.org/cassandra/HintedHandoff +hinted_handoff_enabled: true +# this defines the maximum amount of time a dead host will have hints +# generated. After it has been dead this long, new hints for it will not be +# created until it has been seen alive and gone down again. +max_hint_window_in_ms: 10800000 # 3 hours +# throttle in KB's per second, per delivery thread +hinted_handoff_throttle_in_kb: 1024 +# Number of threads with which to deliver hints; +# Consider increasing this number when you have multi-dc deployments, since +# cross-dc handoff tends to be slower +max_hints_delivery_threads: 2 + +# The following setting populates the page cache on memtable flush and compaction +# WARNING: Enable this setting only when the whole node's data fits in memory. +# Defaults to: false +# populate_io_cache_on_flush: false + +# Authentication backend, implementing IAuthenticator; used to identify users +# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator, +# PasswordAuthenticator}. +# +# - AllowAllAuthenticator performs no checks - set it to disable authentication. +# - PasswordAuthenticator relies on username/password pairs to authenticate +# users. It keeps usernames and hashed passwords in system_auth.credentials table. +# Please increase system_auth keyspace replication factor if you use this authenticator. +authenticator: org.apache.cassandra.auth.PasswordAuthenticator + +# Authorization backend, implementing IAuthorizer; used to limit access/provide permissions +# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer, +# CassandraAuthorizer}. +# +# - AllowAllAuthorizer allows any action to any user - set it to disable authorization. +# - CassandraAuthorizer stores permissions in system_auth.permissions table. Please +# increase system_auth keyspace replication factor if you use this authorizer. +authorizer: org.apache.cassandra.auth.CassandraAuthorizer + +# Validity period for permissions cache (fetching permissions can be an +# expensive operation depending on the authorizer, CassandraAuthorizer is +# one example). Defaults to 2000, set to 0 to disable. +# Will be disabled automatically for AllowAllAuthorizer. +permissions_validity_in_ms: 2000 + +# The partitioner is responsible for distributing rows (by key) across +# nodes in the cluster. Any IPartitioner may be used, including your +# own as long as it is on the classpath. Out of the box, Cassandra +# provides org.apache.cassandra.dht.{Murmur3Partitioner, RandomPartitioner +# ByteOrderedPartitioner, OrderPreservingPartitioner (deprecated)}. +# +# - RandomPartitioner distributes rows across the cluster evenly by md5. +# This is the default prior to 1.2 and is retained for compatibility. +# - Murmur3Partitioner is similar to RandomPartioner but uses Murmur3_128 +# Hash Function instead of md5. When in doubt, this is the best option. +# - ByteOrderedPartitioner orders rows lexically by key bytes. BOP allows +# scanning rows in key order, but the ordering can generate hot spots +# for sequential insertion workloads. +# - OrderPreservingPartitioner is an obsolete form of BOP, that stores +# - keys in a less-efficient format and only works with keys that are +# UTF8-encoded Strings. +# - CollatingOPP colates according to EN,US rules rather than lexical byte +# ordering. Use this as an example if you need custom collation. +# +# See http://wiki.apache.org/cassandra/Operations for more on +# partitioners and token selection. +partitioner: org.apache.cassandra.dht.Murmur3Partitioner + +# directories where Cassandra should store data on disk. +data_file_directories: + - /Users/jsanda/Development/redhat/rhq/rhq-data/storage-1/data + +# commit log +commitlog_directory: /Users/jsanda/Development/redhat/rhq/rhq-data/storage-1/commit_log + +# policy for data disk failures: +# stop: shut down gossip and Thrift, leaving the node effectively dead, but +# still inspectable via JMX. +# best_effort: stop using the failed disk and respond to requests based on +# remaining available sstables. This means you WILL see obsolete +# data at CL.ONE! +# ignore: ignore fatal errors and let requests fail, as in pre-1.2 Cassandra +disk_failure_policy: stop + +# Maximum size of the key cache in memory. +# +# Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the +# minimum, sometimes more. The key cache is fairly tiny for the amount of +# time it saves, so it's worthwhile to use it at large numbers. +# The row cache saves even more time, but must store the whole values of +# its rows, so it is extremely space-intensive. It's best to only use the +# row cache if you have hot rows or static rows. +# +# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. +# +# Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache. +key_cache_size_in_mb: + +# Duration in seconds after which Cassandra should +# safe the keys cache. Caches are saved to saved_caches_directory as +# specified in this configuration file. +# +# Saved caches greatly improve cold-start speeds, and is relatively cheap in +# terms of I/O for the key cache. Row cache saving is much more expensive and +# has limited use. +# +# Default is 14400 or 4 hours. +key_cache_save_period: 14400 + +# Number of keys from the key cache to save +# Disabled by default, meaning all keys are going to be saved +# key_cache_keys_to_save: 100 + +# Maximum size of the row cache in memory. +# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. +# +# Default value is 0, to disable row caching. +row_cache_size_in_mb: 0 + +# Duration in seconds after which Cassandra should +# safe the row cache. Caches are saved to saved_caches_directory as specified +# in this configuration file. +# +# Saved caches greatly improve cold-start speeds, and is relatively cheap in +# terms of I/O for the key cache. Row cache saving is much more expensive and +# has limited use. +# +# Default is 0 to disable saving the row cache. +row_cache_save_period: 0 + +# Number of keys from the row cache to save +# Disabled by default, meaning all keys are going to be saved +# row_cache_keys_to_save: 100 + +# The provider for the row cache to use. +# +# Supported values are: ConcurrentLinkedHashCacheProvider, SerializingCacheProvider +# +# SerializingCacheProvider serialises the contents of the row and stores +# it in native memory, i.e., off the JVM Heap. Serialized rows take +# significantly less memory than "live" rows in the JVM, so you can cache +# more rows in a given memory footprint. And storing the cache off-heap +# means you can use smaller heap sizes, reducing the impact of GC pauses. +# +# It is also valid to specify the fully-qualified class name to a class +# that implements org.apache.cassandra.cache.IRowCacheProvider. +# +# Defaults to SerializingCacheProvider +row_cache_provider: SerializingCacheProvider + +# saved caches +saved_caches_directory: /Users/jsanda/Development/redhat/rhq/rhq-data/storage-1/saved_caches + +# commitlog_sync may be either "periodic" or "batch." +# When in batch mode, Cassandra won't ack writes until the commit log +# has been fsynced to disk. It will wait up to +# commitlog_sync_batch_window_in_ms milliseconds for other writes, before +# performing the sync. +# +# commitlog_sync: batch +# commitlog_sync_batch_window_in_ms: 50 +# +# the other option is "periodic" where writes may be acked immediately +# and the CommitLog is simply synced every commitlog_sync_period_in_ms +# milliseconds. +commitlog_sync: periodic +commitlog_sync_period_in_ms: 10000 + +# The size of the individual commitlog file segments. A commitlog +# segment may be archived, deleted, or recycled once all the data +# in it (potentally from each columnfamily in the system) has been +# flushed to sstables. +# +# The default size is 32, which is almost always fine, but if you are +# archiving commitlog segments (see commitlog_archiving.properties), +# then you probably want a finer granularity of archiving; 8 or 16 MB +# is reasonable. +commitlog_segment_size_in_mb: 32 + +# any class that implements the SeedProvider interface and has a +# constructor that takes a Map<String, String> of parameters will do. +seed_provider: + # Addresses of hosts that are deemed contact points. + # Cassandra nodes use this list of hosts to find each other and learn + # the topology of the ring. You must change this if you are running + # multiple nodes! + - class_name: org.apache.cassandra.locator.SimpleSeedProvider + parameters: + # seeds is actually a comma-delimited list of addresses. + # Ex: "<ip1>,<ip2>,<ip3>" + - seeds: "127.0.0.1" + +# emergency pressure valve: each time heap usage after a full (CMS) +# garbage collection is above this fraction of the max, Cassandra will +# flush the largest memtables. +# +# Set to 1.0 to disable. Setting this lower than +# CMSInitiatingOccupancyFraction is not likely to be useful. +# +# RELYING ON THIS AS YOUR PRIMARY TUNING MECHANISM WILL WORK POORLY: +# it is most effective under light to moderate load, or read-heavy +# workloads; under truly massive write load, it will often be too +# little, too late. +flush_largest_memtables_at: 0.75 + +# emergency pressure valve #2: the first time heap usage after a full +# (CMS) garbage collection is above this fraction of the max, +# Cassandra will reduce cache maximum _capacity_ to the given fraction +# of the current _size_. Should usually be set substantially above +# flush_largest_memtables_at, since that will have less long-term +# impact on the system. +# +# Set to 1.0 to disable. Setting this lower than +# CMSInitiatingOccupancyFraction is not likely to be useful. +reduce_cache_sizes_at: 0.85 +reduce_cache_capacity_to: 0.6 + +# For workloads with more data than can fit in memory, Cassandra's +# bottleneck will be reads that need to fetch data from +# disk. "concurrent_reads" should be set to (16 * number_of_drives) in +# order to allow the operations to enqueue low enough in the stack +# that the OS and drives can reorder them. +# +# On the other hand, since writes are almost never IO bound, the ideal +# number of "concurrent_writes" is dependent on the number of cores in +# your system; (8 * number_of_cores) is a good rule of thumb. +concurrent_reads: 32 +concurrent_writes: 32 + +# Total memory to use for memtables. Cassandra will flush the largest +# memtable when this much memory is used. +# If omitted, Cassandra will set it to 1/3 of the heap. +# memtable_total_space_in_mb: 2048 + +# Total space to use for commitlogs. Since commitlog segments are +# mmapped, and hence use up address space, the default size is 32 +# on 32-bit JVMs, and 1024 on 64-bit JVMs. +# +# If space gets above this value (it will round up to the next nearest +# segment multiple), Cassandra will flush every dirty CF in the oldest +# segment and remove it. So a small total commitlog space will tend +# to cause more flush activity on less-active columnfamilies. +# commitlog_total_space_in_mb: 4096 + +# This sets the amount of memtable flush writer threads. These will +# be blocked by disk io, and each one will hold a memtable in memory +# while blocked. If you have a large heap and many data directories, +# you can increase this value for better flush performance. +# By default this will be set to the amount of data directories defined. +#memtable_flush_writers: 1 + +# the number of full memtables to allow pending flush, that is, +# waiting for a writer thread. At a minimum, this should be set to +# the maximum number of secondary indexes created on a single CF. +memtable_flush_queue_size: 4 + +# Whether to, when doing sequential writing, fsync() at intervals in +# order to force the operating system to flush the dirty +# buffers. Enable this to avoid sudden dirty buffer flushing from +# impacting read latencies. Almost always a good idea on SSD:s; not +# necessarily on platters. +trickle_fsync: false +trickle_fsync_interval_in_kb: 10240 + +# TCP port, for commands and data +storage_port: 7100 + +# SSL port, for encrypted communication. Unused unless enabled in +# encryption_options +ssl_storage_port: 7101 + +# Address to bind to and tell other Cassandra nodes to connect to. You +# _must_ change this if you want multiple nodes to be able to +# communicate! +# +# Leaving it blank leaves it up to InetAddress.getLocalHost(). This +# will always do the Right Thing *if* the node is properly configured +# (hostname, name resolution, etc), and the Right Thing is to use the +# address associated with the hostname (it might not be). +# +# Setting this to 0.0.0.0 is always wrong. +listen_address: 127.0.0.1 + +# Address to broadcast to other Cassandra nodes +# Leaving this blank will set it to the same value as listen_address +# broadcast_address: 1.2.3.4 + +# Internode authentication backend, implementing IInternodeAuthenticator; +# used to allow/disallow connections from peer nodes. +internode_authenticator: org.rhq.cassandra.auth.RhqInternodeAuthenticator + +# Whether to start the native transport server. +# Currently, only the thrift server is started by default because the native +# transport is considered beta. +# Please note that the address on which the native transport is bound is the +# same as the rpc_address. The port however is different and specified below. +start_native_transport: true +# port for the CQL native transport to listen for clients on +native_transport_port: 9142 +# The minimum and maximum threads for handling requests when the native +# transport is used. The meaning is those is similar to the one of +# rpc_min_threads and rpc_max_threads, though the default differ slightly and +# are the ones below: +# native_transport_min_threads: 16 +native_transport_max_threads: 128 + + +# Whether to start the thrift rpc server. +start_rpc: false +# The address to bind the Thrift RPC service to -- clients connect +# here. Unlike ListenAddress above, you *can* specify 0.0.0.0 here if +# you want Thrift to listen on all interfaces. +# +# Leaving this blank has the same effect it does for ListenAddress, +# (i.e. it will be based on the configured hostname of the node). +rpc_address: 127.0.0.1 +# port for Thrift to listen for clients on +rpc_port: 9160 + +# enable or disable keepalive on rpc connections +rpc_keepalive: true + +# Cassandra provides three out-of-the-box options for the RPC Server: +# +# sync -> One thread per thrift connection. For a very large number of clients, memory +# will be your limiting factor. On a 64 bit JVM, 128KB is the minimum stack size +# per thread, and that will correspond to your use of virtual memory (but physical memory +# may be limited depending on use of stack space). +# +# hsha -> Stands for "half synchronous, half asynchronous." All thrift clients are handled +# asynchronously using a small number of threads that does not vary with the amount +# of thrift clients (and thus scales well to many clients). The rpc requests are still +# synchronous (one thread per active request). +# +# The default is sync because on Windows hsha is about 30% slower. On Linux, +# sync/hsha performance is about the same, with hsha of course using less memory. +# +# Alternatively, can provide your own RPC server by providing the fully-qualified class name +# of an o.a.c.t.TServerFactory that can create an instance of it. +rpc_server_type: sync + +# Uncomment rpc_min|max_thread to set request pool size limits. +# +# Regardless of your choice of RPC server (see above), the number of maximum requests in the +# RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync +# RPC server, it also dictates the number of clients that can be connected at all). +# +# The default is unlimited and thus provide no protection against clients overwhelming the server. You are +# encouraged to set a maximum that makes sense for you in production, but do keep in mind that +# rpc_max_threads represents the maximum number of client requests this server may execute concurrently. +# +# rpc_min_threads: 16 +# rpc_max_threads: 2048 + +# uncomment to set socket buffer sizes on rpc connections +# rpc_send_buff_size_in_bytes: +# rpc_recv_buff_size_in_bytes: + +# uncomment to set socket buffer size for internode communication +# Note that when setting this, the buffer size is limited by net.core.wmem_max +# and when not setting it it is defined by net.ipv4.tcp_wmem +# See: +# /proc/sys/net/core/wmem_max +# /proc/sys/net/core/rmem_max +# /proc/sys/net/ipv4/tcp_wmem +# /proc/sys/net/ipv4/tcp_wmem +# and: man tcp +# internode_send_buff_size_in_bytes: +# internode_recv_buff_size_in_bytes: + +# Frame size for thrift (maximum field length). +thrift_framed_transport_size_in_mb: 15 + +# The max length of a thrift message, including all fields and +# internal thrift overhead. +thrift_max_message_length_in_mb: 16 + +# Set to true to have Cassandra create a hard link to each sstable +# flushed or streamed locally in a backups/ subdirectory of the +# Keyspace data. Removing these links is the operator's +# responsibility. +incremental_backups: false + +# Whether or not to take a snapshot before each compaction. Be +# careful using this option, since Cassandra won't clean up the +# snapshots for you. Mostly useful if you're paranoid when there +# is a data format change. +snapshot_before_compaction: false + +# Whether or not a snapshot is taken of the data before keyspace truncation +# or dropping of column families. The STRONGLY advised default of true +# should be used to provide data safety. If you set this flag to false, you will +# lose data on truncation or drop. +auto_snapshot: true + +# Add column indexes to a row after its contents reach this size. +# Increase if your column values are large, or if you have a very large +# number of columns. The competing causes are, Cassandra has to +# deserialize this much of the row to read a single column, so you want +# it to be small - at least if you do many partial-row reads - but all +# the index data is read for each access, so you don't want to generate +# that wastefully either. +column_index_size_in_kb: 64 + +# Size limit for rows being compacted in memory. Larger rows will spill +# over to disk and use a slower two-pass compaction process. A message +# will be logged specifying the row key. +in_memory_compaction_limit_in_mb: 64 + +# Number of simultaneous compactions to allow, NOT including +# validation "compactions" for anti-entropy repair. Simultaneous +# compactions can help preserve read performance in a mixed read/write +# workload, by mitigating the tendency of small sstables to accumulate +# during a single long running compactions. The default is usually +# fine and if you experience problems with compaction running too +# slowly or too fast, you should look at +# compaction_throughput_mb_per_sec first. +# +# concurrent_compactors defaults to the number of cores. +# Uncomment to make compaction mono-threaded, the pre-0.8 default. +#concurrent_compactors: 1 + +# Multi-threaded compaction. When enabled, each compaction will use +# up to one thread per core, plus one thread per sstable being merged. +# This is usually only useful for SSD-based hardware: otherwise, +# your concern is usually to get compaction to do LESS i/o (see: +# compaction_throughput_mb_per_sec), not more. +multithreaded_compaction: false + +# Throttles compaction to the given total throughput across the entire +# system. The faster you insert data, the faster you need to compact in +# order to keep the sstable count down, but in general, setting this to +# 16 to 32 times the rate you are inserting data is more than sufficient. +# Setting this to 0 disables throttling. Note that this account for all types +# of compaction, including validation compaction. +compaction_throughput_mb_per_sec: 16 + +# Track cached row keys during compaction, and re-cache their new +# positions in the compacted sstable. Disable if you use really large +# key caches. +compaction_preheat_key_cache: true + +# Throttles all outbound streaming file transfers on this node to the +# given total throughput in Mbps. This is necessary because Cassandra does +# mostly sequential IO when streaming data during bootstrap or repair, which +# can lead to saturating the network connection and degrading rpc performance. +# When unset, the default is 200 Mbps or 25 MB/s. +# stream_throughput_outbound_megabits_per_sec: 200 + +# How long the coordinator should wait for read operations to complete +read_request_timeout_in_ms: 20000 +# How long the coordinator should wait for seq or index scans to complete +range_request_timeout_in_ms: 20000 +# How long the coordinator should wait for writes to complete +write_request_timeout_in_ms: 20000 +# How long the coordinator should wait for truncates to complete +# (This can be much longer, because we need to flush all CFs +# to make sure we can clear out anythink in the commitlog that could +# cause truncated data to reappear.) +truncate_request_timeout_in_ms: 60000 +# The default timeout for other, miscellaneous operations +request_timeout_in_ms: 20000 + +# Enable operation timeout information exchange between nodes to accurately +# measure request timeouts, If disabled cassandra will assuming the request +# was forwarded to the replica instantly by the coordinator +# +# Warning: before enabling this property make sure to ntp is installed +# and the times are synchronized between the nodes. +cross_node_timeout: false + +# Enable socket timeout for streaming operation. +# When a timeout occurs during streaming, streaming is retried from the start +# of the current file. This *can* involve re-streaming an important amount of +# data, so you should avoid setting the value too low. +# Default value is 0, which never timeout streams. +# streaming_socket_timeout_in_ms: 0 + +# phi value that must be reached for a host to be marked down. +# most users should never need to adjust this. +# phi_convict_threshold: 8 + +# endpoint_snitch -- Set this to a class that implements +# IEndpointSnitch. The snitch has two functions: +# - it teaches Cassandra enough about your network topology to route +# requests efficiently +# - it allows Cassandra to spread replicas around your cluster to avoid +# correlated failures. It does this by grouping machines into +# "datacenters" and "racks." Cassandra will do its best not to have +# more than one replica on the same "rack" (which may not actually +# be a physical location) +# +# IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER, +# YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS +# ARE PLACED. +# +# Out of the box, Cassandra provides +# - SimpleSnitch: +# Treats Strategy order as proximity. This improves cache locality +# when disabling read repair, which can further improve throughput. +# Only appropriate for single-datacenter deployments. +# - PropertyFileSnitch: +# Proximity is determined by rack and data center, which are +# explicitly configured in cassandra-topology.properties. +# - GossipingPropertyFileSnitch +# The rack and datacenter for the local node are defined in +# cassandra-rackdc.properties and propagated to other nodes via gossip. If +# cassandra-topology.properties exists, it is used as a fallback, allowing +# migration from the PropertyFileSnitch. +# - RackInferringSnitch: +# Proximity is determined by rack and data center, which are +# assumed to correspond to the 3rd and 2nd octet of each node's +# IP address, respectively. Unless this happens to match your +# deployment conventions (as it did Facebook's), this is best used +# as an example of writing a custom Snitch class. +# - Ec2Snitch: +# Appropriate for EC2 deployments in a single Region. Loads Region +# and Availability Zone information from the EC2 API. The Region is +# treated as the Datacenter, and the Availability Zone as the rack. +# Only private IPs are used, so this will not work across multiple +# Regions. +# - Ec2MultiRegionSnitch: +# Uses public IPs as broadcast_address to allow cross-region +# connectivity. (Thus, you should set seed addresses to the public +# IP as well.) You will need to open the storage_port or +# ssl_storage_port on the public IP firewall. (For intra-Region +# traffic, Cassandra will switch to the private IP after +# establishing a connection.) +# +# You can use a custom Snitch by setting this to the full class name +# of the snitch, which will be assumed to be on your classpath. +endpoint_snitch: SimpleSnitch + +# controls how often to perform the more expensive part of host score +# calculation +dynamic_snitch_update_interval_in_ms: 100 +# controls how often to reset all host scores, allowing a bad host to +# possibly recover +dynamic_snitch_reset_interval_in_ms: 600000 +# if set greater than zero and read_repair_chance is < 1.0, this will allow +# 'pinning' of replicas to hosts in order to increase cache capacity. +# The badness threshold will control how much worse the pinned host has to be +# before the dynamic snitch will prefer other replicas over it. This is +# expressed as a double which represents a percentage. Thus, a value of +# 0.2 means Cassandra would continue to prefer the static snitch values +# until the pinned host was 20% worse than the fastest. +dynamic_snitch_badness_threshold: 0.1 + +# request_scheduler -- Set this to a class that implements +# RequestScheduler, which will schedule incoming client requests +# according to the specific policy. This is useful for multi-tenancy +# with a single Cassandra cluster. +# NOTE: This is specifically for requests from the client and does +# not affect inter node communication. +# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place +# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of +# client requests to a node with a separate queue for each +# request_scheduler_id. The scheduler is further customized by +# request_scheduler_options as described below. +request_scheduler: org.apache.cassandra.scheduler.NoScheduler + +# Scheduler Options vary based on the type of scheduler +# NoScheduler - Has no options +# RoundRobin +# - throttle_limit -- The throttle_limit is the number of in-flight +# requests per client. Requests beyond +# that limit are queued up until +# running requests can complete. +# The value of 80 here is twice the number of +# concurrent_reads + concurrent_writes. +# - default_weight -- default_weight is optional and allows for +# overriding the default which is 1. +# - weights -- Weights are optional and will default to 1 or the +# overridden default_weight. The weight translates into how +# many requests are handled during each turn of the +# RoundRobin, based on the scheduler id. +# +# request_scheduler_options: +# throttle_limit: 80 +# default_weight: 5 +# weights: +# Keyspace1: 1 +# Keyspace2: 5 + +# request_scheduler_id -- An identifer based on which to perform +# the request scheduling. Currently the only valid option is keyspace. +# request_scheduler_id: keyspace + +# index_interval controls the sampling of entries from the primrary +# row index in terms of space versus time. The larger the interval, +# the smaller and less effective the sampling will be. In technicial +# terms, the interval coresponds to the number of index entries that +# are skipped between taking each sample. All the sampled entries +# must fit in memory. Generally, a value between 128 and 512 here +# coupled with a large key cache size on CFs results in the best trade +# offs. This value is not often changed, however if you have many +# very small rows (many to an OS page), then increasing this will +# often lower memory usage without a impact on performance. +index_interval: 128 + +# Enable or disable inter-node encryption +# Default settings are TLS v1, RSA 1024-bit keys (it is imperative that +# users generate their own keys) TLS_RSA_WITH_AES_128_CBC_SHA as the cipher +# suite for authentication, key exchange and encryption of the actual data transfers. +# NOTE: No custom encryption options are enabled at the moment +# The available internode options are : all, none, dc, rack +# +# If set to dc cassandra will encrypt the traffic between the DCs +# If set to rack cassandra will encrypt the traffic between the racks +# +# The passwords used in these options must match the passwords used when generating +# the keystore and truststore. For instructions on generating these files, see: +# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSE... +# +server_encryption_options: + internode_encryption: none + keystore: conf/.keystore + keystore_password: cassandra + truststore: conf/.truststore + truststore_password: cassandra + # More advanced defaults below: + # protocol: TLS + # algorithm: SunX509 + # store_type: JKS + # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA] + # require_client_auth: false + +# enable or disable client/server encryption. +client_encryption_options: + enabled: false + keystore: conf/.keystore + keystore_password: cassandra + # require_client_auth: false + # Set trustore and truststore_password if require_client_auth is true + # truststore: conf/.truststore + # truststore_password: cassandra + # More advanced defaults below: + # protocol: TLS + # algorithm: SunX509 + # store_type: JKS + # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA] + +# internode_compression controls whether traffic between nodes is +# compressed. +# can be: all - all traffic is compressed +# dc - traffic between different datacenters is compressed +# none - nothing is compressed. +internode_compression: none + +# Enable or disable tcp_nodelay for inter-dc communication. +# Disabling it will result in larger (but fewer) network packets being sent, +# reducing overhead from the TCP protocol itself, at the cost of increasing +# latency if you block for cross-datacenter responses. +inter_dc_tcp_nodelay: true
commit af4705c8854779ba9354641589cdf2e3f478a712 Author: John Sanda jsanda@redhat.com Date: Tue Jul 30 14:28:38 2013 -0400
add resource config support for cql and gossip ports in rhq-storage plugin
diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/cassandra-jvm.properties b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/cassandra-jvm.properties index 1faee9d..2eb5ab5 100644 --- a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/cassandra-jvm.properties +++ b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/cassandra-jvm.properties @@ -13,7 +13,7 @@ heap_new="-Xmn64M"
heap_dump_on_OOMError="-XX:+HeapDumpOnOutOfMemoryError"
-heap_dump_dir="" +heap_dump_dir=
thread_stack_size="-Xss180k"
diff --git a/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java b/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java index 5276c84..c53c19b 100644 --- a/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java +++ b/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java @@ -108,7 +108,7 @@ public class ConfigEditor { return (Integer) config.get("native_transport_port"); }
- public void setNativeTransportPort(int port) { + public void setNativeTransportPort(Integer port) { config.put("native_transport_port", port); }
@@ -116,7 +116,7 @@ public class ConfigEditor { return (Integer) config.get("storage_port"); }
- public void setStoragePort(int port) { + public void setStoragePort(Integer port) { config.put("storage_port", port); }
diff --git a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeConfigDelegate.java b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeConfigDelegate.java index dd5b8a2..edaf9f9 100644 --- a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeConfigDelegate.java +++ b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeConfigDelegate.java @@ -5,6 +5,12 @@ import java.io.FileInputStream; import java.io.IOException; import java.util.Properties;
+import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.yaml.snakeyaml.error.YAMLException; + +import org.rhq.cassandra.util.ConfigEditor; +import org.rhq.cassandra.util.ConfigEditorException; import org.rhq.core.domain.configuration.Configuration; import org.rhq.core.domain.configuration.ConfigurationUpdateStatus; import org.rhq.core.domain.configuration.PropertySimple; @@ -18,12 +24,16 @@ import org.rhq.core.util.StringUtil; */ public class StorageNodeConfigDelegate implements ConfigurationFacet {
+ private Log log = LogFactory.getLog(StorageNodeConfigDelegate.class); + private File jvmOptsFile; private File wrapperEnvFile; + private File cassandraYamlFile;
public StorageNodeConfigDelegate(File basedir) { File confDir = new File(basedir, "conf"); jvmOptsFile = new File(confDir, "cassandra-jvm.properties"); + cassandraYamlFile = new File(confDir, "cassandra.yaml");
// for windows, config props also get propagated to the wrapper env if (isWindows()) { @@ -60,6 +70,11 @@ public class StorageNodeConfigDelegate implements ConfigurationFacet { config.put(new PropertySimple("heapDumpDir", new File(basedir, "bin").getAbsolutePath())); }
+ ConfigEditor yamlEditor = new ConfigEditor(cassandraYamlFile); + yamlEditor.load(); + config.put(new PropertySimple("cqlPort", yamlEditor.getNativeTransportPort())); + config.put(new PropertySimple("gossipPort", yamlEditor.getStoragePort())); + return config; }
@@ -135,6 +150,7 @@ public class StorageNodeConfigDelegate implements ConfigurationFacet { Configuration config = configurationUpdateReport.getConfiguration();
updateCassandraJvmProps(config); + updateCassandraYaml(config);
if (isWindows()) { updateWrapperEnv(config); @@ -145,6 +161,8 @@ public class StorageNodeConfigDelegate implements ConfigurationFacet { configurationUpdateReport.setErrorMessage("No configuration update was applied: " + e.getMessage()); } catch (IOException e) { configurationUpdateReport.setErrorMessageFromThrowable(e); + } catch (ConfigEditorException e) { + configurationUpdateReport.setErrorMessageFromThrowable(e); } }
@@ -195,6 +213,43 @@ public class StorageNodeConfigDelegate implements ConfigurationFacet { propertiesUpdater.update(properties); }
+ private void updateCassandraYaml(Configuration newConfig) { + ConfigEditor editor = new ConfigEditor(cassandraYamlFile); + try { + editor.load(); + + PropertySimple cqlPortProperty = newConfig.getSimple("cqlPort"); + if (cqlPortProperty != null) { + editor.setNativeTransportPort(cqlPortProperty.getIntegerValue()); + } + + PropertySimple gossipPortProperty = newConfig.getSimple("gossipPort"); + if (gossipPortProperty != null) { + editor.setStoragePort(gossipPortProperty.getIntegerValue()); + } + + editor.save(); + } catch (ConfigEditorException e) { + if (e.getCause() instanceof YAMLException) { + log.error("Failed to update " + cassandraYamlFile); + log.info("Attempting to restore " + cassandraYamlFile); + try { + editor.restore(); + throw e; + } catch (ConfigEditorException e1) { + log.error("Failed to restore " + cassandraYamlFile + ". A copy of the file prior to any " + + "modifications can be found at " + editor.getBackupFile()); + throw new ConfigEditorException("There was an error updating " + cassandraYamlFile + " and " + + "undoing the changes failed. A copy of the file can be found at " + editor.getBackupFile() + + ". See the agent logs for more details.", e); + } + } else { + log.error("No updates were made to " + cassandraYamlFile + " due to an unexpected error", e); + throw e; + } + } + } + private void updateWrapperEnv(Configuration config) throws IOException { PropertiesFileUpdate propertiesUpdater = new PropertiesFileUpdate(wrapperEnvFile.getAbsolutePath()); Properties properties = propertiesUpdater.loadExistingProperties(); diff --git a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml index f5a4f6d..84bb832 100644 --- a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml +++ b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml @@ -175,6 +175,17 @@ <c:simple-property name="heapDumpDir" displayName="Heap Dump Directory" required="false" description="The directory in which heap dumps will be written."/> </c:group> + <c:group name="Ports"> + <c:simple-property name="cqlPort" + displayName="CQL Port" + type="integer" + description="The port on which to listen for client requests over the native, CQL protocol. + This is a shared, cluster-wide setting."/> + <c:simple-property name="gossipPort" + type="integer" + description="The port used for internode communication. This is a shared, cluster-wide setting."/> + + </c:group> </resource-configuration>
<server name="Cassandra Server JVM" diff --git a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeConfigDelegateTest.java b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeConfigDelegateTest.java index 64d14b2..15861c1 100644 --- a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeConfigDelegateTest.java +++ b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeConfigDelegateTest.java @@ -6,17 +6,20 @@ import java.io.File; import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.IOException; +import java.io.InputStream; import java.lang.reflect.Method; import java.util.Properties;
import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test;
+import org.rhq.cassandra.util.ConfigEditor; import org.rhq.core.domain.configuration.Configuration; import org.rhq.core.domain.configuration.ConfigurationUpdateStatus; import org.rhq.core.domain.configuration.PropertySimple; import org.rhq.core.pluginapi.configuration.ConfigurationUpdateReport; import org.rhq.core.util.file.FileUtil; +import org.rhq.core.util.stream.StreamUtil;
/** * @author John Sanda @@ -25,6 +28,8 @@ public class StorageNodeConfigDelegateTest {
private File basedir;
+ private File cassandraYamlFile; + private StorageNodeConfigDelegate configDelegate;
@BeforeMethod @@ -33,6 +38,11 @@ public class StorageNodeConfigDelegateTest { basedir = new File(dir, getClass().getSimpleName() + "/" + test.getName()); FileUtil.purge(basedir, true); configDelegate = new StorageNodeConfigDelegate(basedir); + + cassandraYamlFile = new File(confDir(), "cassandra.yaml"); + InputStream inputStream = getClass().getResourceAsStream("/cassandra.yaml"); + FileOutputStream outputStream = new FileOutputStream(cassandraYamlFile); + StreamUtil.copy(inputStream, outputStream); }
@@ -55,13 +65,24 @@ public class StorageNodeConfigDelegateTest { public void updateValidConfig() throws Exception { createDefaultConfig();
- Configuration config = new Configuration(); - config.put(new PropertySimple("minHeapSize", "1024M")); - config.put(new PropertySimple("maxHeapSize", "1024M")); - config.put(new PropertySimple("heapNewSize", "256M")); - config.put(new PropertySimple("threadStackSize", "240")); - config.put(new PropertySimple("heapDumpOnOOMError", true)); - config.put(new PropertySimple("heapDumpDir", confDir())); + Configuration config = Configuration.builder() + .addSimple("minHeapSize", "1024M") + .addSimple("maxHeapSize", "1024M") + .addSimple("heapNewSize", "256M") + .addSimple("threadStackSize", "240") + .addSimple("heapDumpOnOOMError", true) + .addSimple("heapDumpDir", confDir()) + .addSimple("cqlPort", 9595) + .addSimple("gossipPort", 9696) + .build(); +// config.put(new PropertySimple("minHeapSize", "1024M")); +// config.put(new PropertySimple("maxHeapSize", "1024M")); +// config.put(new PropertySimple("heapNewSize", "256M")); +// config.put(new PropertySimple("threadStackSize", "240")); +// config.put(new PropertySimple("heapDumpOnOOMError", true)); +// config.put(new PropertySimple("heapDumpDir", confDir())); +// config.put(new PropertySimple("cqlPort", 9595)); +// config.put(new PropertySimple("gossipPort", 9696));
ConfigurationUpdateReport report = new ConfigurationUpdateReport(config);
@@ -78,6 +99,12 @@ public class StorageNodeConfigDelegateTest { "Failed to update property [heap_dump_on_OOMError]"); assertEquals(properties.getProperty("heap_dump_dir"), confDir().getAbsolutePath(), "Failed to update property [heap_dump_dir]"); + + ConfigEditor yamlEditor = new ConfigEditor(cassandraYamlFile); + yamlEditor.load(); + + assertEquals(yamlEditor.getNativeTransportPort(), (Integer) 9595, "Failed to update native_transport_port"); + assertEquals(yamlEditor.getStoragePort(), (Integer) 9696, "Failed to update storage_port"); }
@Test
commit f19e7d84f953356db55deabebc8fe2ec43d276af Author: John Sanda jsanda@redhat.com Date: Tue Jul 30 13:17:15 2013 -0400
make dataFileDirectories property read/write
diff --git a/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java b/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java index 9e1f86b..5276c84 100644 --- a/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java +++ b/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java @@ -88,6 +88,10 @@ public class ConfigEditor { return (List<String>) config.get("data_file_directories"); }
+ public void setDataFileDirectories(List<String> dirs) { + config.put("data_file_directories", dirs); + } + public String getSavedCachesDirectory() { return (String) config.get("saved_caches_directory"); } diff --git a/modules/common/cassandra-util/src/test/java/org/rhq/cassandra/util/ConfigEditorTest.java b/modules/common/cassandra-util/src/test/java/org/rhq/cassandra/util/ConfigEditorTest.java index f339b48..9c3cc16 100644 --- a/modules/common/cassandra-util/src/test/java/org/rhq/cassandra/util/ConfigEditorTest.java +++ b/modules/common/cassandra-util/src/test/java/org/rhq/cassandra/util/ConfigEditorTest.java @@ -1,5 +1,6 @@ package org.rhq.cassandra.util;
+import static java.util.Arrays.asList; import static org.testng.Assert.assertEquals;
import java.io.File; @@ -88,6 +89,24 @@ public class ConfigEditorTest { assertEquals(editor.getStoragePort(), config.storage_port, "Failed to fetch storage_port"); }
+ @Test + public void updateDataFilesDirectories() throws Exception { + ConfigEditor editor = new ConfigEditor(configFile); + editor.load(); + editor.setDataFileDirectories(asList("/data/dir1", "/data/dir2", "data/dir3")); + editor.save(); + + Config config = loadConfig(); + + assertEquals(config.data_file_directories, new String[] {"/data/dir1", "/data/dir2", "data/dir3"}, + "Failed to update data_file_directories"); + + editor.load(); + + assertEquals(editor.getDataFileDirectories().toArray(new String[3]), config.data_file_directories, + "Failed to fetch data_file_directories"); + } + private Config loadConfig() throws Exception { FileInputStream inputStream = new FileInputStream(configFile); org.yaml.snakeyaml.constructor.Constructor constructor =
commit 9fe2884cd3c6602d045dc4fc1a7c6c8820f9ec83 Author: John Sanda jsanda@redhat.com Date: Tue Jul 30 12:57:25 2013 -0400
make storagePort and nativeTransportPort read/write properties
diff --git a/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java b/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java index 0b4a127..9e1f86b 100644 --- a/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java +++ b/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java @@ -100,10 +100,18 @@ public class ConfigEditor { params.put("seeds", StringUtil.arrayToString(seeds)); }
+ public Integer getNativeTransportPort() { + return (Integer) config.get("native_transport_port"); + } + public void setNativeTransportPort(int port) { config.put("native_transport_port", port); }
+ public Integer getStoragePort() { + return (Integer) config.get("storage_port"); + } + public void setStoragePort(int port) { config.put("storage_port", port); } diff --git a/modules/common/cassandra-util/src/test/java/org/rhq/cassandra/util/ConfigEditorTest.java b/modules/common/cassandra-util/src/test/java/org/rhq/cassandra/util/ConfigEditorTest.java index cf344e2..f339b48 100644 --- a/modules/common/cassandra-util/src/test/java/org/rhq/cassandra/util/ConfigEditorTest.java +++ b/modules/common/cassandra-util/src/test/java/org/rhq/cassandra/util/ConfigEditorTest.java @@ -65,6 +65,11 @@ public class ConfigEditorTest { Config config = loadConfig();
assertEquals(config.native_transport_port, (Integer) 9393, "Failed to update native_transport_port"); + + editor.load(); + + assertEquals(editor.getNativeTransportPort(), config.native_transport_port, + "Failed to fetch native_transport_port"); }
@Test @@ -77,6 +82,10 @@ public class ConfigEditorTest { Config config = loadConfig();
assertEquals(config.storage_port, (Integer) 6767, "Failed to update storage_port"); + + editor.load(); + + assertEquals(editor.getStoragePort(), config.storage_port, "Failed to fetch storage_port"); }
private Config loadConfig() throws Exception {
commit 76badf25b9a3932837fb00af3d2bc484b6045a2b Author: John Sanda jsanda@redhat.com Date: Tue Jul 30 10:53:24 2013 -0400
adding back support for deploying additional nodes with internode authentication
When the user installs a new storage node the following steps occur,
* The node is committed into inventory (but not part of storage cluster) * StorageNode entity is created with mode set to INSTALLED * New node is announced to the cluster * Server schedules operation for cluster nodes to update internode auth conf file to include the new node * Server prepares new node for bootstrap * Schedules operation on new node to * Apply cluster settings * Update internode auth conf settings * Restart the node so it bootstraps into cluster * Token ranges will be assigned to node * New node will stream data from other nodes * Server is notified that new node is up and part of the cluster * The operation mode is set to NORMAL * addNodeMaintenance operation is run on each cluster node * Updates replication_factor if necessary * Runs repair if necessary * Deletes old keys * Updates seeds property
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java index 575edc74..f4d3934 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java @@ -56,6 +56,8 @@ import org.rhq.core.domain.resource.Resource; + " FROM StorageNode s " // + "LEFT JOIN FETCH s.resource r " // + " WHERE s.address = :address"), + @NamedQuery(name = StorageNode.QUERY_FIND_ALL_BY_MODE, query = + "SELECT s FROM StorageNode s WHERE s.operationMode = :operationMode"), @NamedQuery(name = StorageNode.QUERY_FIND_ALL_NOT_INSTALLED, query = "SELECT s FROM StorageNode s WHERE NOT s.operationMode = 'INSTALLED'"), @NamedQuery(name = StorageNode.QUERY_FIND_ALL_NORMAL, query = "SELECT s FROM StorageNode s WHERE s.operationMode = 'NORMAL'"), @NamedQuery(name = StorageNode.QUERY_DELETE_BY_ID, query = "" // @@ -91,6 +93,7 @@ public class StorageNode implements Serializable {
public static final String QUERY_FIND_ALL = "StorageNode.findAll"; public static final String QUERY_FIND_BY_ADDRESS = "StorageNode.findByAddress"; + public static final String QUERY_FIND_ALL_BY_MODE = "StorageNode.findAllByMode"; public static final String QUERY_FIND_ALL_NOT_INSTALLED = "StorageNode.findAllCloudMembers"; public static final String QUERY_DELETE_BY_ID = "StorageNode.deleteById"; public static final String QUERY_FIND_ALL_NORMAL = "StorageNode.findAllNormalCloudMembers"; diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index 049cf42..ee0f406 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -24,6 +24,8 @@ */ package org.rhq.enterprise.server.cloud;
+import java.net.InetAddress; +import java.net.UnknownHostException; import java.util.ArrayList; import java.util.Arrays; import java.util.Date; @@ -47,6 +49,7 @@ import org.quartz.JobDataMap; import org.quartz.SimpleTrigger; import org.quartz.Trigger;
+import org.rhq.cassandra.schema.SchemaManager; import org.rhq.core.domain.alert.Alert; import org.rhq.core.domain.auth.Subject; import org.rhq.core.domain.authz.Permission; @@ -57,6 +60,8 @@ import org.rhq.core.domain.cloud.StorageNodeConfigurationComposite; import org.rhq.core.domain.cloud.StorageNodeLoadComposite; import org.rhq.core.domain.common.JobTrigger; import org.rhq.core.domain.configuration.Configuration; +import org.rhq.core.domain.configuration.PropertyList; +import org.rhq.core.domain.configuration.PropertySimple; import org.rhq.core.domain.criteria.AlertCriteria; import org.rhq.core.domain.criteria.ResourceGroupCriteria; import org.rhq.core.domain.criteria.ResourceOperationHistoryCriteria; @@ -73,6 +78,7 @@ import org.rhq.core.domain.resource.group.ResourceGroup; import org.rhq.core.domain.util.PageControl; import org.rhq.core.domain.util.PageList; import org.rhq.core.domain.util.PageOrdering; +import org.rhq.core.util.StringUtil; import org.rhq.enterprise.server.RHQConstants; import org.rhq.enterprise.server.alert.AlertManagerLocal; import org.rhq.enterprise.server.auth.SubjectManagerLocal; @@ -82,6 +88,7 @@ import org.rhq.enterprise.server.cloud.instance.ServerManagerLocal; import org.rhq.enterprise.server.configuration.ConfigurationManagerLocal; import org.rhq.enterprise.server.measurement.MeasurementDataManagerLocal; import org.rhq.enterprise.server.operation.OperationManagerLocal; +import org.rhq.enterprise.server.resource.ResourceManagerLocal; import org.rhq.enterprise.server.resource.ResourceTypeManagerLocal; import org.rhq.enterprise.server.resource.group.ResourceGroupManagerLocal; import org.rhq.enterprise.server.rest.reporting.MeasurementConverter; @@ -100,6 +107,14 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
private final Log log = LogFactory.getLog(StorageNodeManagerBean.class);
+ private static final String USERNAME_PROPERTY = "rhq.cassandra.username"; + private static final String PASSWORD_PROPERTY = "rhq.cassandra.password"; + private final static String MAINTENANCE_OPERATION = "addNodeMaintenance"; + private final static String MAINTENANCE_OPERATION_NOTE = "Topology change maintenance."; + private final static String RUN_REPAIR_PROPERTY = "runRepair"; + private final static String UPDATE_SEEDS_LIST = "updateSeedsList"; + private final static String SEEDS_LIST = "seedsList"; + private static final String RHQ_STORAGE_CQL_PORT_PROPERTY = "nativeTransportPort"; private static final String RHQ_STORAGE_JMX_PORT_PROPERTY = "jmxPort"; private static final String RHQ_STORAGE_ADDRESS_PROPERTY = "host"; @@ -139,46 +154,86 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN @EJB private StorageNodeManagerLocal storageNodeManger;
+ @EJB + private ResourceManagerLocal resourceManager; + @Override public void linkResource(Resource resource) { - List<StorageNode> storageNodes = this.getStorageNodes(); - Configuration resourceConfig = resource.getPluginConfiguration(); - String configAddress = resourceConfig.getSimpleValue(RHQ_STORAGE_ADDRESS_PROPERTY); - - if (configAddress != null) { - // TODO Do not add the node to the group until we have verified it has joined the cluster - // StorageNodeMaintenanceJob currently determines if a new node has successfully joined the cluster. - addStorageNodeToGroup(resource); - - boolean storageNodeFound = false; - if (storageNodes != null) { - for (StorageNode storageNode : storageNodes) { - if (configAddress.equals(storageNode.getAddress())) { - storageNode.setResource(resource); - storageNode.setOperationMode(OperationMode.NORMAL); - storageNodeFound = true; - break; - } - } - } + String address = resourceConfig.getSimpleValue(RHQ_STORAGE_ADDRESS_PROPERTY);
- if (!storageNodeFound) { - int cqlPort = Integer.parseInt(resourceConfig.getSimpleValue(RHQ_STORAGE_CQL_PORT_PROPERTY)); - int jmxPort = Integer.parseInt(resourceConfig.getSimpleValue(RHQ_STORAGE_JMX_PORT_PROPERTY)); + if (log.isInfoEnabled()) { + log.info("Linking " + resource + " to storage node at " + address); + } + try { + StorageNode storageNode = findStorageNodeByAddress(InetAddress.getByName(address));
- StorageNode storageNode = new StorageNode(); - storageNode.setAddress(configAddress); - storageNode.setCqlPort(cqlPort); - storageNode.setJmxPort(jmxPort); + if (storageNode != null) { + if (log.isInfoEnabled()) { + log.info(storageNode + " is an existing storage node. No cluster maintenance is necessary."); + } storageNode.setResource(resource); storageNode.setOperationMode(OperationMode.NORMAL); + addStorageNodeToGroup(resource); + } else { + storageNode = new StorageNode(); + storageNode.setAddress(address); + storageNode.setCqlPort(Integer.parseInt(resourceConfig.getSimpleValue(RHQ_STORAGE_CQL_PORT_PROPERTY))); + storageNode.setJmxPort(Integer.parseInt(resourceConfig.getSimpleValue(RHQ_STORAGE_JMX_PORT_PROPERTY))); + storageNode.setResource(resource); + storageNode.setOperationMode(OperationMode.INSTALLED);
entityManager.persist(storageNode);
-// scheduleQuartzJob(storageNodes.size()); + if (log.isInfoEnabled()) { + log.info(storageNode + " is a new storage node and not part of the storage node cluster."); + log.info("Scheduling maintenance operations to bring " + storageNode + " into the cluster..."); + } + + announceNewNode(storageNode); } + } catch (UnknownHostException e) { + throw new RuntimeException("Could not resolve address [" + address + "]. The resource " + resource + + " cannot be linked to a storage node", e); + } + } + + private void announceNewNode(StorageNode newStorageNode) { + if (log.isInfoEnabled()) { + log.info("Announcing " + newStorageNode + " to storage node cluster."); + } + + ResourceGroup storageNodeGroup = getStorageNodeGroup(); + + GroupOperationSchedule schedule = new GroupOperationSchedule(); + schedule.setGroup(storageNodeGroup); + schedule.setHaltOnFailure(false); + schedule.setExecutionOrder(new ArrayList<Resource>(storageNodeGroup.getExplicitResources())); + schedule.setJobTrigger(JobTrigger.createNowTrigger()); + schedule.setSubject(subjectManager.getOverlord()); + schedule.setOperationName("updateKnownNodes"); + + Configuration parameters = new Configuration(); + parameters.put(createPropertyListOfAddresses("ipAddresses", combine(getStorageNodes(), newStorageNode))); + schedule.setParameters(parameters); + + operationManager.scheduleGroupOperation(subjectManager.getOverlord(), schedule); + } + + private List<StorageNode> combine(List<StorageNode> storageNodes, StorageNode storageNode) { + List<StorageNode> newList = new ArrayList<StorageNode>(storageNodes.size() + 1); + newList.addAll(storageNodes); + newList.add(storageNode); + + return newList; + } + + private PropertyList createPropertyListOfAddresses(String propertyName, List<StorageNode> nodes) { + PropertyList list = new PropertyList(propertyName); + for (StorageNode storageNode : nodes) { + list.add(new PropertySimple("address", storageNode.getAddress())); } + return list; }
@Override @@ -214,7 +269,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
ResourceGroup group = getStorageNodeGroup(); resourceGroupManager.addResourcesToGroup(subjectManager.getOverlord(), group.getId(), - new int[] {resource.getId()}); + new int[]{resource.getId()}); }
@Override @@ -232,6 +287,13 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN }
@Override + public void addToStorageNodeGroup(StorageNode storageNode) { + storageNode.setOperationMode(OperationMode.NORMAL); + entityManager.merge(storageNode); + addStorageNodeToGroup(storageNode.getResource()); + } + + @Override public ResourceGroup getStorageNodeGroup() { Subject overlord = subjectManager.getOverlord();
@@ -398,6 +460,19 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN return runner.execute(); }
+ public StorageNode findStorageNodeByAddress(InetAddress address) { + TypedQuery<StorageNode> query = entityManager.<StorageNode> createNamedQuery(StorageNode.QUERY_FIND_BY_ADDRESS, + StorageNode.class); + query.setParameter("address", address.getHostAddress()); + List<StorageNode> result = query.getResultList(); + + if (result != null && result.size() > 0) { + return result.get(0); + } + + return null; + } + @Override @RequiredPermissions({ @RequiredPermission(Permission.MANAGE_SETTINGS), @RequiredPermission(Permission.MANAGE_INVENTORY) }) @@ -472,19 +547,6 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN entityManager.flush(); }
- private StorageNode findStorageNodeByAddress(String address) { - TypedQuery<StorageNode> query = entityManager.<StorageNode> createNamedQuery(StorageNode.QUERY_FIND_BY_ADDRESS, - StorageNode.class); - query.setParameter("address", address); - List<StorageNode> result = query.getResultList(); - - if (result != null && result.size() > 0) { - return result.get(0); - } - - return null; - } - private StorageNodeLoadComposite.MeasurementAggregateWithUnits getMeasurementAggregateWithUnits(Subject subject, int schedId, MeasurementUnits units, long beginTime, long endTime) { MeasurementAggregate measurementAggregate = measurementManager.getAggregate(subject, schedId, beginTime, @@ -641,63 +703,68 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
@Override public boolean updateConfiguration(Subject subject, StorageNodeConfigurationComposite storageNodeConfiguration) { - StorageNode storageNode = findStorageNodeByAddress(storageNodeConfiguration.getStorageNode().getAddress()); + try { + StorageNode storageNode = findStorageNodeByAddress(InetAddress.getByName( + storageNodeConfiguration.getStorageNode().getAddress())); + + if (storageNode != null && storageNode.getResource() != null) { + Configuration parameters = new Configuration(); + parameters.setSimpleValue("jmxPort", storageNodeConfiguration.getJmxPort() + ""); + if (storageNodeConfiguration.getHeapSize() != null) { + parameters.setSimpleValue("heapSize", storageNodeConfiguration.getHeapSize() + ""); + } + if (storageNodeConfiguration.getHeapNewSize() != null) { + parameters.setSimpleValue("heapNewSize", storageNodeConfiguration.getHeapNewSize() + ""); + } + if (storageNodeConfiguration.getThreadStackSize() != null) { + parameters.setSimpleValue("threadStackSize", storageNodeConfiguration.getThreadStackSize() + ""); + } + parameters.setSimpleValue("restartIfRequired", "false");
- if (storageNode != null && storageNode.getResource() != null) { - Configuration parameters = new Configuration(); - parameters.setSimpleValue("jmxPort", storageNodeConfiguration.getJmxPort() + ""); - if (storageNodeConfiguration.getHeapSize() != null) { - parameters.setSimpleValue("heapSize", storageNodeConfiguration.getHeapSize() + ""); - } - if (storageNodeConfiguration.getHeapNewSize() != null) { - parameters.setSimpleValue("heapNewSize", storageNodeConfiguration.getHeapNewSize() + ""); - } - if (storageNodeConfiguration.getThreadStackSize() != null) { - parameters.setSimpleValue("threadStackSize", storageNodeConfiguration.getThreadStackSize() + ""); - } - parameters.setSimpleValue("restartIfRequired", "false"); + Resource storageNodeResource = storageNode.getResource();
- Resource storageNodeResource = storageNode.getResource(); + boolean result = runOperationAndWaitForResult(subject, storageNodeResource, UPDATE_CONFIGURATION_OPERATION, + parameters);
- boolean result = runOperationAndWaitForResult(subject, storageNodeResource, UPDATE_CONFIGURATION_OPERATION, - parameters); + if (result) { + //2. Update the JMX port + //this is a fast operation compared to the restart + storageNode.setJmxPort(storageNodeConfiguration.getJmxPort()); + entityManager.merge(storageNode);
- if (result) { - //2. Update the JMX port - //this is a fast operation compared to the restart - storageNode.setJmxPort(storageNodeConfiguration.getJmxPort()); - entityManager.merge(storageNode); + //3. Restart the storage node + result = runOperationAndWaitForResult(subject, storageNodeResource, RESTART_OPERATION, + new Configuration());
- //3. Restart the storage node - result = runOperationAndWaitForResult(subject, storageNodeResource, RESTART_OPERATION, - new Configuration()); + //4. Update the plugin configuration to talk with the new server + //Up to this point communication with the storage node should not have been affected by the intermediate + //changes + Configuration storageNodePluginConfig = configurationManager.getPluginConfiguration(subject, + storageNodeResource.getId());
- //4. Update the plugin configuration to talk with the new server - //Up to this point communication with the storage node should not have been affected by the intermediate - //changes - Configuration storageNodePluginConfig = configurationManager.getPluginConfiguration(subject, - storageNodeResource.getId()); + String existingJMXPort = storageNodePluginConfig.getSimpleValue("jmxPort"); + String newJMXPort = storageNodeConfiguration.getJmxPort() + "";
- String existingJMXPort = storageNodePluginConfig.getSimpleValue("jmxPort"); - String newJMXPort = storageNodeConfiguration.getJmxPort() + ""; + if (!existingJMXPort.equals(newJMXPort)) { + storageNodePluginConfig.setSimpleValue("jmxPort", newJMXPort);
- if (!existingJMXPort.equals(newJMXPort)) { - storageNodePluginConfig.setSimpleValue("jmxPort", newJMXPort); + String existingConnectionURL = storageNodePluginConfig.getSimpleValue("connectorAddress"); + String newConnectionURL = existingConnectionURL.replace(":" + existingJMXPort + "/", ":" + + storageNodeConfiguration.getJmxPort() + "/"); + storageNodePluginConfig.setSimpleValue("connectorAddress", newConnectionURL);
- String existingConnectionURL = storageNodePluginConfig.getSimpleValue("connectorAddress"); - String newConnectionURL = existingConnectionURL.replace(":" + existingJMXPort + "/", ":" - + storageNodeConfiguration.getJmxPort() + "/"); - storageNodePluginConfig.setSimpleValue("connectorAddress", newConnectionURL); + configurationManager.updatePluginConfiguration(subject, storageNodeResource.getId(), + storageNodePluginConfig); + }
- configurationManager.updatePluginConfiguration(subject, storageNodeResource.getId(), - storageNodePluginConfig); + return result; } - - return result; } - }
- return false; + return false; + } catch (UnknownHostException e) { + throw new RuntimeException("Failed to resolve address for " + storageNodeConfiguration, e); + } }
@Override @@ -754,4 +821,129 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
return successResultFound; } -} + + @Override + public void prepareNewNodesForBootstrap() { + List<StorageNode> newStorageNodes = entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE) + .setParameter("operationMode", OperationMode.INSTALLED).getResultList(); + if (newStorageNodes.isEmpty()) { + throw new RuntimeException("Failed to find storage node to bootstrap into cluster."); + } + // Right now, without some user input, we can only reliably bootstrap one node at a + // time. To support bootstrapping multiple nodes concurrently, a mechanism will have + // to be put in place for the user to declare in advance the nodes that are coming + // online. Then we can wait until all declared nodes have been committed into + // inventory and announced to the cluster + StorageNode storageNode = newStorageNodes.get(0); + + if (log.isInfoEnabled()) { + log.info("Preparing to bootstrap " + storageNode + " into cluster..."); + } + + List<StorageNode> existingStorageNodes = getStorageNodes(); + + ResourceOperationSchedule schedule = new ResourceOperationSchedule(); + schedule.setResource(storageNode.getResource()); + schedule.setJobTrigger(JobTrigger.createNowTrigger()); + schedule.setSubject(subjectManager.getOverlord()); + schedule.setOperationName("prepareForBootstrap"); + + Configuration parameters = new Configuration(); + parameters.put(new PropertySimple("cqlPort", existingStorageNodes.get(0).getCqlPort())); + // TODO need to add support for storage_port in cassandra/storage plugins + parameters.put(new PropertySimple("gossipPort", 7100)); + parameters.put(createPropertyListOfAddresses("storageNodeIPAddresses", getStorageNodes())); + + schedule.setParameters(parameters); + + operationManager.scheduleResourceOperation(subjectManager.getOverlord(), schedule); + } + + @Override + public void runAddNodeMaintenance() { + log.info("Preparing to schedule addNodeMaintenance on the storage cluster..."); + + List<StorageNode> storageNodes = entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE, + StorageNode.class).setParameter("operationMode", OperationMode.NORMAL).getResultList(); + + int clusterSize = storageNodes.size(); + boolean isReadRepairNeeded; + + if (clusterSize >= 4) { + // At 4 nodes we increase the RF to 3. We are not increasing the RF beyond + // that for additional nodes; so, there is no need to run repair if we are + // expanding from a 4 node cluster since the RF remains the same. + isReadRepairNeeded = false; + } else if (clusterSize == 1) { + // The RF will increase since we are going from a single to a multi-node + // cluster; therefore, we want to run repair. + isReadRepairNeeded = true; + } else if (clusterSize == 2) { + if (storageNodes.size() > 3) { + // If we go from 2 to > 3 nodes we will increase the RF to 3; therefore + // we want to run repair. + isReadRepairNeeded = true; + } else { + // If we go from 2 to 3 nodes, we keep the RF at 2 so there is no need + // to run repair. + isReadRepairNeeded = false; + } + } else if (clusterSize == 3) { + // We are increasing the cluster size > 3 which means the RF will be + // updated to 3; therefore, we want to run repair. + isReadRepairNeeded = true; + } else { + // If we cluster size of zero, then something is really screwed up. It + // should always be > 0. + isReadRepairNeeded = storageNodes.size() > 1; + } + + if (isReadRepairNeeded) { + updateTopology(storageNodes); + } + + ResourceGroup storageNodeGroup = getStorageNodeGroup(); + + GroupOperationSchedule schedule = new GroupOperationSchedule(); + schedule.setGroup(storageNodeGroup); + schedule.setHaltOnFailure(false); + schedule.setExecutionOrder(new ArrayList<Resource>(storageNodeGroup.getExplicitResources())); + schedule.setJobTrigger(JobTrigger.createNowTrigger()); + schedule.setSubject(subjectManager.getOverlord()); + schedule.setOperationName(MAINTENANCE_OPERATION); + schedule.setDescription(MAINTENANCE_OPERATION_NOTE); + + Configuration config = new Configuration(); + config.put(createPropertyListOfAddresses(SEEDS_LIST, storageNodes)); + config.put(new PropertySimple(RUN_REPAIR_PROPERTY, isReadRepairNeeded)); + config.put(new PropertySimple(UPDATE_SEEDS_LIST, Boolean.TRUE)); + + schedule.setParameters(config); + + operationManager.scheduleGroupOperation(subjectManager.getOverlord(), schedule); + } + + private boolean updateTopology(List<StorageNode> storageNodes) { + String username = getRequiredStorageProperty(USERNAME_PROPERTY); + String password = getRequiredStorageProperty(PASSWORD_PROPERTY); + SchemaManager schemaManager = new SchemaManager(username, password, storageNodes); + try{ + return schemaManager.updateTopology(false); + } catch (Exception e) { + log.error("An error occurred while applying schema topology changes", e); + } + + return false; + } + + private String getRequiredStorageProperty(String property) { + String value = System.getProperty(property); + if (StringUtil.isEmpty(property)) { + throw new IllegalStateException("The system property [" + property + "] is not set. The RHQ " + + "server will not be able connect to the RHQ storage node(s). This property should be defined " + + "in rhq-server.properties."); + } + return value; + } + +} \ No newline at end of file diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java index 15fa85c..00ba9e7 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java @@ -18,6 +18,7 @@ */ package org.rhq.enterprise.server.cloud;
+import java.net.InetAddress; import java.util.List;
import javax.ejb.Local; @@ -106,6 +107,8 @@ public interface StorageNodeManagerLocal { */ PageList<Alert> findAllStorageNodeAlerts(Subject subject, StorageNode storageNode);
+ StorageNode findStorageNodeByAddress(InetAddress address); +
/** * Find ids for all resources and sub-resources of Storage Nodes that @@ -170,6 +173,8 @@ public interface StorageNodeManagerLocal { */ boolean storageNodeGroupExists();
+ void addToStorageNodeGroup(StorageNode storageNode); + /** * This method assumes the storage node resource group already exists; as such, it should only be called from places * in the code that are after the point(s) where the group has been created. @@ -181,4 +186,8 @@ public interface StorageNodeManagerLocal {
void scheduleOperationInNewTransaction(Subject subject, ResourceOperationSchedule schedule);
+ void prepareNewNodesForBootstrap(); + + void runAddNodeMaintenance(); + } diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/operation/OperationManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/operation/OperationManagerBean.java index 088f13e..5d3b1ae 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/operation/OperationManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/operation/OperationManagerBean.java @@ -96,6 +96,7 @@ import org.rhq.enterprise.server.resource.ResourceNotFoundException; import org.rhq.enterprise.server.resource.group.ResourceGroupManagerLocal; import org.rhq.enterprise.server.resource.group.ResourceGroupNotFoundException; import org.rhq.enterprise.server.scheduler.SchedulerLocal; +import org.rhq.enterprise.server.storage.StorageNodeOperationsHandler; import org.rhq.enterprise.server.util.CriteriaQueryGenerator; import org.rhq.enterprise.server.util.CriteriaQueryRunner;
@@ -123,6 +124,9 @@ public class OperationManagerBean implements OperationManagerLocal, OperationMan @EJB private SubjectManagerLocal subjectManager;
+ @EJB + private StorageNodeOperationsHandler storageNodeOperationsHandler; + @SuppressWarnings("unchecked") public List<IntegerOptionItem> getResourceNameOptionItems(int groupId) { String queryName = ResourceGroup.QUERY_FIND_RESOURCE_NAMES_BY_GROUP_ID; @@ -917,7 +921,7 @@ public class OperationManagerBean implements OperationManagerLocal, OperationMan if (history.getParameters() != null) { history.getParameters().getId(); // eagerly reload the parameters } - + storageNodeOperationsHandler.handleOperationUpdateIfNecessary(history); notifyAlertConditionCacheManager("updateOperationHistory", history); return history; } @@ -1723,6 +1727,7 @@ public class OperationManagerBean implements OperationManagerLocal, OperationMan if (!stillInProgress) { groupHistory.setErrorMessage((groupErrorMessage == null) ? null : groupErrorMessage.toString()); groupHistory.setStatus(groupStatus); + storageNodeOperationsHandler.handleGroupOperationUpdateIfNecessary(groupHistory); notifyAlertConditionCacheManager("checkForCompletedGroupOperation", groupHistory); } } diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterMonitor.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterMonitor.java index ec28888..0b404bb 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterMonitor.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterMonitor.java @@ -9,7 +9,9 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory;
import org.rhq.core.domain.cloud.Server; +import org.rhq.core.domain.cloud.StorageNode; import org.rhq.enterprise.server.auth.SubjectManagerLocal; +import org.rhq.enterprise.server.cloud.StorageNodeManagerLocal; import org.rhq.enterprise.server.cloud.TopologyManagerLocal; import org.rhq.enterprise.server.cloud.instance.ServerManagerLocal; import org.rhq.enterprise.server.util.LookupUtil; @@ -36,6 +38,18 @@ public class StorageClusterMonitor implements StorageStateListener { log.info("Taking server out of maintenance mode"); updateServerMode(Server.OperationMode.NORMAL); } + + StorageNodeManagerLocal storageNodeManager = LookupUtil.getStorageNodeManager(); + StorageNode newClusterNode = storageNodeManager.findStorageNodeByAddress(address); + + if (newClusterNode == null) { + log.error("Did not find storage node with address [" + address.getHostAddress() + "]. This should not " + + "happen."); + } else { + log.info("Adding " + newClusterNode + " to storage cluster and scheduling cluster maintenance..."); + storageNodeManager.addToStorageNodeGroup(newClusterNode); + storageNodeManager.runAddNodeMaintenance(); + } }
@Override diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandler.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandler.java new file mode 100644 index 0000000..96e8de8 --- /dev/null +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandler.java @@ -0,0 +1,17 @@ +package org.rhq.enterprise.server.storage; + +import javax.ejb.Asynchronous; + +import org.rhq.core.domain.operation.GroupOperationHistory; +import org.rhq.core.domain.operation.OperationHistory; + +/** + * @author John Sanda + */ +public interface StorageNodeOperationsHandler { + @Asynchronous + void handleOperationUpdateIfNecessary(OperationHistory operationHistory); + + @Asynchronous + void handleGroupOperationUpdateIfNecessary(GroupOperationHistory operationHistory); +} diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java new file mode 100644 index 0000000..6da5cca --- /dev/null +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java @@ -0,0 +1,90 @@ +package org.rhq.enterprise.server.storage; + +import javax.ejb.Asynchronous; +import javax.ejb.EJB; +import javax.ejb.Stateless; +import javax.persistence.EntityManager; +import javax.persistence.PersistenceContext; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import org.rhq.core.domain.cloud.StorageNode; +import org.rhq.core.domain.operation.GroupOperationHistory; +import org.rhq.core.domain.operation.OperationDefinition; +import org.rhq.core.domain.operation.OperationHistory; +import org.rhq.core.domain.operation.OperationRequestStatus; +import org.rhq.core.domain.resource.Resource; +import org.rhq.core.domain.resource.ResourceType; +import org.rhq.enterprise.server.RHQConstants; +import org.rhq.enterprise.server.cloud.StorageNodeManagerLocal; + +/** + * @author John Sanda + */ +@Stateless +public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHandler { + + private final Log log = LogFactory.getLog(StorageNodeOperationsHandlerBean.class); + + private static final String STORAGE_NODE_TYPE_NAME = "RHQ Storage Node"; + private static final String STORAGE_NODE_PLUGIN_NAME = "RHQStorage"; + + @PersistenceContext(unitName = RHQConstants.PERSISTENCE_UNIT_NAME) + private EntityManager entityManager; + + @EJB + private StorageNodeManagerLocal storageNodeManager; + + @Override + @Asynchronous + public void handleOperationUpdateIfNecessary(OperationHistory operationHistory) { +// if (isStorageNodeOperation(operationHistory.getOperationDefinition())) { +// if (operationHistory.getOperationDefinition().getName().equals("prepareForBootstrap")) { +// ResourceOperationHistory resourceOperationHistory = entityManager.find(ResourceOperationHistory.class, +// operationHistory.getId()); +// if (resourceOperationHistory.getStatus() == OperationRequestStatus.SUCCESS) { +// +// } +// StorageNode storageNode = findStorageNode(resourceOperationHistory.getResource()); +// storageNode.setOperationMode(StorageNode.OperationMode.NORMAL); +// } +// } + } + + private StorageNode findStorageNode(Resource resource) { + for (StorageNode storageNode : storageNodeManager.getStorageNodes()) { + if (storageNode.getResource().getId() == resource.getId()) { + return storageNode; + } + } + return null; + } + + @Override + @Asynchronous + public void handleGroupOperationUpdateIfNecessary(GroupOperationHistory groupOperationHistory) { + if (isStorageNodeOperation(groupOperationHistory.getOperationDefinition())) { + if (groupOperationHistory.getOperationDefinition().getName().equals("updateKnownNodes")) { + if (groupOperationHistory.getStatus() == OperationRequestStatus.SUCCESS) { + log.info("New storage has been successfully announced to the storage node cluster."); + storageNodeManager.prepareNewNodesForBootstrap(); + } else if (groupOperationHistory.getStatus() == OperationRequestStatus.FAILURE) { + log.warn("Failed to announce new storage node to the cluster. It cannot join the cluster until " + + "it has been announced to existing cluster nodes."); + } else if (groupOperationHistory.getStatus() == OperationRequestStatus.CANCELED) { + log.warn("New storage node has not been announced to the cluster. The group operation " + + groupOperationHistory.getOperationDefinition().getName() + " has been canceled. The new node " + + "cannot join the cluster until it has been announced to existing cluster nodes."); + } + } + } + } + + private boolean isStorageNodeOperation(OperationDefinition operationDefinition) { + ResourceType resourceType = operationDefinition.getResourceType(); + return resourceType.getName().equals(STORAGE_NODE_TYPE_NAME) && + resourceType.getPlugin().equals(STORAGE_NODE_PLUGIN_NAME); + } + +}
commit 313023782c4bdddf63d1e3773d499717ddaa2687 Author: Jirka Kremser jkremser@redhat.com Date: Mon Jul 29 14:45:30 2013 +0200
If there is just one group definition in ConfigurationEditor, normal form is used instead SectionStack component. Also the "Jump to Section" navigation is hidden for just one group to save some space on the screen.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/configuration/ConfigurationEditor.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/configuration/ConfigurationEditor.java index 779772f..418300f 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/configuration/ConfigurationEditor.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/configuration/ConfigurationEditor.java @@ -415,17 +415,29 @@ public class ConfigurationEditor extends EnhancedVLayout { EnhancedVLayout layout = new EnhancedVLayout(); List<PropertyGroupDefinition> groupDefinitions = configurationDefinition.getGroupDefinitions();
- if (groupDefinitions.isEmpty()) { - // No prop groups, so we just need a single form for the non-grouped props. + if (groupDefinitions.isEmpty() || groupDefinitions.size() == 1) { + // No or one prop groups, so we just need a single form for the non-grouped props + // and another one if there is just one group List<PropertyDefinition> propertyDefinitions = new ArrayList<PropertyDefinition>( configurationDefinition.getNonGroupedProperties()); - - DynamicForm form = buildPropertiesForm(propertyDefinitions, configuration); - form.setBorder("1px solid #AAA"); - form.validate(); - layout.addMember(form); + if (!propertyDefinitions.isEmpty()) { + DynamicForm form = buildPropertiesForm(propertyDefinitions, configuration); + form.setBorder("1px solid #AAA"); + form.validate(); + layout.addMember(form); + } + if (groupDefinitions.size() == 1) { + propertyDefinitions.addAll(configurationDefinition.getPropertiesInGroup(groupDefinitions.get(0) + .getName())); + DynamicForm groupForm = buildPropertiesForm(propertyDefinitions, configuration); + groupForm.setIsGroup(true); + groupForm.setGroupTitle(groupDefinitions.get(0).getDisplayName()); + groupForm.setBorder("1px solid #AAA"); + groupForm.validate(); + layout.addMember(groupForm); + } } else { - // One or more prop groups, so create a section stack with one section per group. + // Two or more prop groups, so create a section stack with one section per group. final SectionStack sectionStack = new SectionStack(); sectionStack.setVisibilityMode(VisibilityMode.MULTIPLE); sectionStack.setWidth100(); @@ -441,7 +453,6 @@ public class ConfigurationEditor extends EnhancedVLayout { // com.allen_sauer.gwt.log.client.Log.info("building: " + definition.getDisplayName()); sectionStack.addSection(buildGroupSection(definition)); } - this.toolStrip = buildToolStrip(layout, sectionStack); layout.addMember(toolStrip); layout.addMember(sectionStack);
commit 33821fe1c902246701d7dd423de3abcff948cdaa Author: Jirka Kremser jkremser@redhat.com Date: Mon Jul 29 13:19:16 2013 +0200
Adding the newly (6/28/13) issued certificate for accessing the pto calendar (mail.corp.redhat.com domain) to the rhq-ircbot keystore. This one should be valid until 6/28/15.
diff --git a/etc/rhq-ircBot/src/main/resources/org/rhq/etc/ircbot/cacerts.jks b/etc/rhq-ircBot/src/main/resources/org/rhq/etc/ircbot/cacerts.jks index 3d73cbc..431de7c 100644 Binary files a/etc/rhq-ircBot/src/main/resources/org/rhq/etc/ircbot/cacerts.jks and b/etc/rhq-ircBot/src/main/resources/org/rhq/etc/ircbot/cacerts.jks differ
commit f8b594d1eb6032d5e8f097c924bb96316b62b483 Author: Jirka Kremser jkremser@redhat.com Date: Mon Jul 29 12:54:06 2013 +0200
Calling the right setter.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index 7d861f1..049cf42 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -310,7 +310,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN if ((scheduleId = scheduleIdsMap.get(totalDiskUsedPercentageMetric)) != null) { StorageNodeLoadComposite.MeasurementAggregateWithUnits totalDiskUsedPercentageAggregateWithUnits = getMeasurementAggregateWithUnits( subject, scheduleId, MeasurementUnits.PERCENTAGE, beginTime, endTime); - result.setDataDiskUsedPercentage(totalDiskUsedPercentageAggregateWithUnits); + result.setTotalDiskUsedPercentage(totalDiskUsedPercentageAggregateWithUnits); } if ((scheduleId = scheduleIdsMap.get(freeDiskToDataRatioMetric)) != null) { MeasurementAggregate freeDiskToDataRatioAggregate = measurementManager.getAggregate(subject, @@ -754,4 +754,4 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
return successResultFound; } -} \ No newline at end of file +}
commit 817d5628d4afbe41c38c9e2d82cdd1a7ef079916 Author: John Sanda jsanda@redhat.com Date: Sun Jul 28 07:36:07 2013 -0400
override default ring delay to speed up test and hopefully fix jenkins failure
diff --git a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java index ef92510..bd171a4 100644 --- a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java +++ b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java @@ -29,6 +29,7 @@ import java.io.File; import java.io.FileInputStream; import java.io.FileReader; import java.net.InetAddress; +import java.util.Properties; import java.util.Set;
import com.google.common.collect.ImmutableSet; @@ -73,6 +74,7 @@ import org.rhq.core.system.ProcessExecution; import org.rhq.core.system.ProcessExecutionResults; import org.rhq.core.system.SystemInfo; import org.rhq.core.system.SystemInfoFactory; +import org.rhq.core.util.PropertiesFileUpdate; import org.rhq.core.util.stream.StreamUtil;
/** @@ -124,6 +126,18 @@ public class StorageNodeComponentITest { deployer.updateFilePerms(); deployer.updateStorageAuthConf(Sets.newHashSet(InetAddress.getByName(address)));
+ File confDir = new File(basedir, "conf"); + File cassandraJvmPropsFile = new File(confDir, "cassandra-jvm.properties"); + PropertiesFileUpdate propertiesUpdater = new PropertiesFileUpdate(cassandraJvmPropsFile.getAbsolutePath()); + Properties properties = propertiesUpdater.loadExistingProperties(); + + String jvmOpts = properties.getProperty("JVM_OPTS"); + jvmOpts = jvmOpts.substring(0, jvmOpts.lastIndexOf(""")); + jvmOpts = jvmOpts + " -Dcassandra.ring_delay_ms=100""; + properties.setProperty("JVM_OPTS", jvmOpts); + + propertiesUpdater.update(properties); + File binDir = new File(basedir, "bin"); SystemInfo systemInfo = SystemInfoFactory.createSystemInfo();
@@ -261,9 +275,8 @@ public class StorageNodeComponentITest { log.info("Waiting for node to boostrap..."); // When a node goes through bootstrap, StorageService sleeps for RING_DELAY ms // while it determines the ranges of the token ring it will own. RING_DELAY defaults - // to 30 seconds by default. - // TODO Override the default RING_DELAY to speed up tests - Thread.sleep(33000); + // to 30 seconds by default but we are overriding it to be 100 ms. + Thread.sleep(3000);
assertEquals(result.getResultCode(), OperationServicesResultCode.SUCCESS, "The operation failed: " + result.getErrorStackTrace());
commit 9c9ec83b26445713b67526f43c489493613a5abd Author: John Sanda jsanda@redhat.com Date: Sat Jul 27 14:08:18 2013 -0400
updating exception handling and logging in prepareForBootstrap
diff --git a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/InternodeAuthConfUpdateException.java b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/InternodeAuthConfUpdateException.java new file mode 100644 index 0000000..d1e6e56 --- /dev/null +++ b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/InternodeAuthConfUpdateException.java @@ -0,0 +1,22 @@ +package org.rhq.plugins.storage; + +/** + * @author John Sanda + */ +public class InternodeAuthConfUpdateException extends Exception { + + public InternodeAuthConfUpdateException() { + } + + public InternodeAuthConfUpdateException(String message) { + super(message); + } + + public InternodeAuthConfUpdateException(String message, Throwable cause) { + super(message, cause); + } + + public InternodeAuthConfUpdateException(Throwable cause) { + super(cause); + } +} diff --git a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java index eb0b9fd..2cb9501 100644 --- a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java +++ b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java @@ -29,12 +29,10 @@ import java.io.File; import java.io.FileNotFoundException; import java.io.FileReader; import java.io.FileWriter; -import java.io.IOException; import java.io.StringReader; import java.util.ArrayList; import java.util.HashSet; import java.util.List; -import java.util.Map; import java.util.Set;
import org.apache.commons.logging.Log; @@ -95,6 +93,14 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper return new File(pluginConfig.getSimpleValue("baseDir")); }
+ private File getConfDir() { + return new File(getBasedir(), "conf"); + } + + private File getInternodeAuthConfFile() { + return new File(getConfDir(), "rhq-storage-auth.conf"); + } + @Override public OperationResult invokeOperation(String name, Configuration parameters) throws Exception { if (name.equals("addNodeMaintenance")) { @@ -242,56 +248,24 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper ipAddresses.add(propertySimple.getStringValue()); }
- if (updateAuthFile(result, ipAddresses)) return result; - - EmsBean authBean = getEmsConnection().getBean("org.rhq.cassandra.auth:type=RhqInternodeAuthenticator"); - EmsOperation emsOperation = authBean.getOperation("reloadConfiguration"); - emsOperation.invoke(); - - result.setSimpleResult("Successfully updated the set of known nodes."); - - return result; - } - - private boolean updateAuthFile(OperationResult result, Set<String> ipAddresses) { - log.info("Updating known nodes to " + ipAddresses); + try { + updateInternodeAuthConfFile(ipAddresses);
- File confDir = new File(getBasedir(), "conf"); - File authFile = new File(confDir, "rhq-storage-auth.conf"); - File authBackupFile = new File(confDir, "." + authFile.getName() + ".bak"); + EmsBean authBean = getEmsConnection().getBean("org.rhq.cassandra.auth:type=RhqInternodeAuthenticator"); + EmsOperation emsOperation = authBean.getOperation("reloadConfiguration"); + emsOperation.invoke();
- if (authBackupFile.exists()) { - if (log.isDebugEnabled()) { - log.debug(authBackupFile + " already exists. Deleting it now in preparation of creating new backup " + - "for " + authFile.getName()); - } - if (!authBackupFile.delete()) { - String msg = "Failed to delete backup file " + authBackupFile + ". The operation will abort " + - "since " + authFile + " cannot reliably be backed up before making changes. Please delete " + - authBackupFile + " manually and reschedule the operation once the file has been removed."; - log.error(msg); - result.setErrorMessage(msg); - - return true; - } - } + result.setSimpleResult("Successfully updated the set of known nodes.");
- try { - StreamUtil.copy(new StringReader(StringUtil.collectionToString(ipAddresses, "\n")), - new FileWriter(authFile), true); - } catch (IOException e) { - log.error("An error occurred while updating " + authFile, e); - try { - log.info("Restoring back up file " + authBackupFile); - FileUtil.copyFile(authBackupFile, authFile); - authBackupFile.delete(); - } catch (IOException e1) { - log.error("Failed to revert backup of " + authFile, e1); - } - result.setErrorMessage("There was an unexpected error while updating " + authFile); - return true; + return result; + } catch (InternodeAuthConfUpdateException e) { + File authFile = getInternodeAuthConfFile(); + log.error("Failed to update set of trusted nodes in " + authFile + " due to the following error(s): " + + ThrowableUtil.getAllMessages(e)) ; + result.setErrorMessage("Failed to update set of trusted nodes in " + authFile + " due to the following " + + "error(s): " + ThrowableUtil.getAllMessages(e)); + return result; } - return false; }
private OperationResult prepareForBootstrap(Configuration params) { @@ -326,7 +300,6 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper } purgeDir(new File(configEditor.getSavedCachesDirectory()));
- log.info("Updating cluster settings");
String address = pluginConfig.getSimpleValue("host"); @@ -344,10 +317,9 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper configEditor.setStoragePort(gossipPort);
configEditor.save(); + log.info("Cluster configuration settings have been applied to " + yamlFile);
- if (updateAuthFile(result, new HashSet<String>(addresses))) { - return result; - } + updateInternodeAuthConfFile(new HashSet<String>(addresses));
log.info(this + " is ready to be bootstrap. Restarting storage node..."); OperationResult startResult = startNode(); @@ -376,6 +348,11 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper } } return result; + } catch (InternodeAuthConfUpdateException e) { + File authFile = getInternodeAuthConfFile(); + result.setErrorMessage("Failed to update " + authFile + " due to the following error(s): " + + ThrowableUtil.getAllMessages(e)); + return result; } }
@@ -384,23 +361,18 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper FileUtil.purge(dir, true); }
- private File getCommitLogDir(Map yamlConfig) { - return new File((String) yamlConfig.get("commitlog_directory")); - } + private void updateInternodeAuthConfFile(Set<String> ipAddresses) throws InternodeAuthConfUpdateException { + File authFile = getInternodeAuthConfFile();
- private List<File> getDataDirs(Map yamlConfig) { - List<File> dirs = new ArrayList<File>(); - List<String> dirNames = (List<String>) yamlConfig.get("data_file_directories"); + log.info("Updating " + authFile);
- for (String dirName : dirNames) { - dirs.add(new File(dirName)); + try { + StreamUtil.copy(new StringReader(StringUtil.collectionToString(ipAddresses, "\n")), + new FileWriter(authFile), true); + } catch (Exception e) { + log.error("An error occurred while trying to update " + authFile, e); + throw new InternodeAuthConfUpdateException("An error occurred while trying to update " + authFile, e); } - - return dirs; - } - - private File getSavedCachesDir(Map yamlConfig) { - return new File((String) yamlConfig.get("saved_caches_directory")); }
private OperationResult nodeAdded(Configuration params) { diff --git a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java index d10e428..ef92510 100644 --- a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java +++ b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java @@ -259,6 +259,10 @@ public class StorageNodeComponentITest { params, timeout);
log.info("Waiting for node to boostrap..."); + // When a node goes through bootstrap, StorageService sleeps for RING_DELAY ms + // while it determines the ranges of the token ring it will own. RING_DELAY defaults + // to 30 seconds by default. + // TODO Override the default RING_DELAY to speed up tests Thread.sleep(33000);
assertEquals(result.getResultCode(), OperationServicesResultCode.SUCCESS, "The operation failed: " +
commit b38c4b5ea27f83caaa15f1cd2d29daa8ec4ac969 Author: John Sanda jsanda@redhat.com Date: Sat Jul 27 12:49:30 2013 -0400
adding some initial test coverage for prepareForBootstrap operation
The prepareForBootstrap method has been refactored to use ConfigEditor but there is still a good bit of clean up to do.
diff --git a/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java b/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java index 679a84c..0b4a127 100644 --- a/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java +++ b/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java @@ -76,6 +76,22 @@ public class ConfigEditor { } }
+ public File getBackupFile() { + return backupFile; + } + + public String getCommitLogDirectory() { + return (String) config.get("commitlog_directory"); + } + + public List<String> getDataFileDirectories() { + return (List<String>) config.get("data_file_directories"); + } + + public String getSavedCachesDirectory() { + return (String) config.get("saved_caches_directory"); + } + public void setSeeds(String... seeds) { List seedProviderList = (List) config.get("seed_provider"); Map seedProvider = (Map) seedProviderList.get(0); diff --git a/modules/plugins/cassandra/pom.xml b/modules/plugins/cassandra/pom.xml index da90f09..bafc8d7 100644 --- a/modules/plugins/cassandra/pom.xml +++ b/modules/plugins/cassandra/pom.xml @@ -36,6 +36,11 @@ </dependency> <dependency> <groupId>${project.groupId}</groupId> + <artifactId>rhq-cassandra-util</artifactId> + <version>${project.version}</version> + </dependency> + <dependency> + <groupId>${project.groupId}</groupId> <artifactId>rhq-core-domain</artifactId> <version>${project.version}</version> <scope>provided</scope> @@ -89,6 +94,10 @@ <outputDirectory>${project.build.outputDirectory}/lib</outputDirectory> <artifactItems> <artifactItem> + <groupId>${project.groupId}</groupId> + <artifactId>rhq-cassandra-util</artifactId> + </artifactItem> + <artifactItem> <groupId>com.datastax.cassandra</groupId> <artifactId>cassandra-driver-core</artifactId> <version>${cassandra.driver.version}</version> diff --git a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java index 5933093..eb0b9fd 100644 --- a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java +++ b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java @@ -26,7 +26,6 @@ package org.rhq.plugins.storage;
import java.io.File; -import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.FileReader; import java.io.FileWriter; @@ -45,9 +44,10 @@ import org.mc4j.ems.connection.EmsConnection; import org.mc4j.ems.connection.bean.EmsBean; import org.mc4j.ems.connection.bean.attribute.EmsAttribute; import org.mc4j.ems.connection.bean.operation.EmsOperation; -import org.yaml.snakeyaml.DumperOptions; -import org.yaml.snakeyaml.Yaml; +import org.yaml.snakeyaml.error.YAMLException;
+import org.rhq.cassandra.util.ConfigEditor; +import org.rhq.cassandra.util.ConfigEditorException; import org.rhq.core.domain.configuration.Configuration; import org.rhq.core.domain.configuration.ConfigurationUpdateStatus; import org.rhq.core.domain.configuration.Property; @@ -277,27 +277,18 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper }
try { - FileUtil.copyFile(authFile, authBackupFile); - } catch (IOException e) { - String msg = "Failed to backup " + authFile + " prior to making updates. The operation will abort due " + - "to unexpected error"; - log.error(msg, e); - result.setErrorMessage(msg + ": " + ThrowableUtil.getRootMessage(e)); - return true; - } - - try { StreamUtil.copy(new StringReader(StringUtil.collectionToString(ipAddresses, "\n")), new FileWriter(authFile), true); } catch (IOException e) { log.error("An error occurred while updating " + authFile, e); try { + log.info("Restoring back up file " + authBackupFile); FileUtil.copyFile(authBackupFile, authFile); + authBackupFile.delete(); } catch (IOException e1) { log.error("Failed to revert backup of " + authFile, e1); } - result.setErrorMessage("There was an unexpected error while updating " + authFile + ". Make sure that " + - "it matches " + authBackupFile + " and then reschedule the operation."); + result.setErrorMessage("There was an unexpected error while updating " + authFile); return true; } return false; @@ -325,74 +316,67 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper String yamlProp = pluginConfig.getSimpleValue("yamlConfiguration"); File yamlFile = new File(yamlProp);
- DumperOptions options = new DumperOptions(); - options.setDefaultFlowStyle(DumperOptions.FlowStyle.BLOCK); - Yaml yaml = new Yaml(options); - - Map yamlConfig = null; + ConfigEditor configEditor = new ConfigEditor(yamlFile); try { - yamlConfig = (Map) yaml.load(new FileInputStream(yamlFile)); - } catch (FileNotFoundException e) { - log.error("Failed to load " + yamlFile, e); - log.error("Cannot prepare " + this + " for bootstrap. " + yamlFile + " must exist in order to make the " + - "necessary configuration changes."); - result.setErrorMessage("Cannot prepare storage node for bootstrap. It appears that " + yamlFile + - " does not exist. Make sure that it exists so that the necessary configuration changes can be made."); + configEditor.load();
- return result; - } + purgeDir(new File(configEditor.getCommitLogDirectory())); + for (String dir : configEditor.getDataFileDirectories()) { + purgeDir(new File(dir)); + } + purgeDir(new File(configEditor.getSavedCachesDirectory()));
- purgeDir(getCommitLogDir(yamlConfig)); - for (File dataDir : getDataDirs(yamlConfig)) { - purgeDir(dataDir); - } - purgeDir(getSavedCachesDir(yamlConfig));
- log.info("Updating cluster settings"); + log.info("Updating cluster settings");
- String address = pluginConfig.getSimpleValue("host"); - List<String> seeds = getAddresses(params.getList("storageNodeIPAddresses")); - // Make sure this node's address is not in the list; otherwise, it - // won't bootstrap properly. - seeds.remove(address); - try { - updateSeedsList(seeds); - } catch (IOException e) { - log.error("Failed to update seeds property in " + yamlFile, e); - result.setErrorMessage("Failed to prepared node for bootstrap due to unexpected error that occurred " + - "while updating seeds property in " + yamlFile + ":\n" + ThrowableUtil.getAllMessages(e)); - return result; - } + String address = pluginConfig.getSimpleValue("host"); + int cqlPort = Integer.parseInt(params.getSimpleValue("cqlPort")); + int gossipPort = Integer.parseInt(params.getSimpleValue("gossipPort")); + List<String> addresses = getAddresses(params.getList("storageNodeIPAddresses"));
- if (updateAuthFile(result, new HashSet<String>(seeds))) { - return result; - } + // Make sure this node's address is not in the list; otherwise, it + // won't bootstrap properly. + List<String> seeds = new ArrayList<String>(addresses); + seeds.remove(address);
- int cqlPort = Integer.parseInt(params.getSimpleValue("cqlPort")); - int gossipPort = Integer.parseInt(params.getSimpleValue("gossipPort")); + configEditor.setSeeds(seeds.toArray(new String[seeds.size()])); + configEditor.setNativeTransportPort(cqlPort); + configEditor.setStoragePort(gossipPort);
- yamlConfig.put("native_transport_port", cqlPort); - yamlConfig.put("storage_port", gossipPort); + configEditor.save();
- try { - yaml.dump(yamlConfig, new FileWriter(yamlFile)); - } catch (IOException e) { - log.error("Could not update cluster settings in " + yamlFile, e); - result.setErrorMessage("Could not update cluster settings in " + yamlFile + ":\n" + - ThrowableUtil.getAllMessages(e)); - return result; - } + if (updateAuthFile(result, new HashSet<String>(addresses))) { + return result; + }
- log.info(this + " is ready to be bootstrap. Restarting storage node..."); - OperationResult startResult = startNode(); - if (startResult.getErrorMessage() != null) { - log.error("Failed to restart storage node:\n" + startResult.getErrorMessage()); - result.setErrorMessage("Failed to restart storage node:\n" + startResult.getErrorMessage()); - } else { - result.setSimpleResult("The storage node was succesfully updated is now bootstrapping into the cluster."); - } + log.info(this + " is ready to be bootstrap. Restarting storage node..."); + OperationResult startResult = startNode(); + if (startResult.getErrorMessage() != null) { + log.error("Failed to restart storage node:\n" + startResult.getErrorMessage()); + result.setErrorMessage("Failed to restart storage node:\n" + startResult.getErrorMessage()); + } else { + result.setSimpleResult("The storage node was succesfully updated is now bootstrapping into the cluster."); + }
- return result; + return result; + } catch (ConfigEditorException e) { + log.error("There was an error while trying to update " + yamlFile, e); + if (e.getCause() instanceof YAMLException) { + log.info("Attempting to restore " + yamlFile); + try { + configEditor.restore(); + result.setErrorMessage("Failed to update configuration file [" + yamlFile + "]: " + + ThrowableUtil.getAllMessages(e.getCause())); + } catch (ConfigEditorException e1) { + log.error("Failed to restore " + yamlFile + ". A copy of the file prior to any modifications " + + "can be found at " + configEditor.getBackupFile()); + result.setErrorMessage("There was an error updating [" + yamlFile + "] and undoing the changes " + + "Failed. A copy of the file can be found at " + configEditor.getBackupFile() + ". See the " + + "agent logs for more details"); + } + } + return result; + } }
private void purgeDir(File dir) { diff --git a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java index eb4d545..d10e428 100644 --- a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java +++ b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java @@ -26,15 +26,25 @@ import static org.testng.Assert.assertNotNull; import static org.testng.Assert.assertTrue;
import java.io.File; +import java.io.FileInputStream; +import java.io.FileReader; import java.net.InetAddress; import java.util.Set;
+import com.google.common.collect.ImmutableSet; import com.google.common.collect.Sets;
+import org.apache.cassandra.config.Config; +import org.apache.cassandra.config.SeedProviderDef; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.hyperic.sigar.OperatingSystem; import org.testng.annotations.AfterSuite; import org.testng.annotations.BeforeSuite; import org.testng.annotations.Test; +import org.yaml.snakeyaml.Loader; +import org.yaml.snakeyaml.TypeDescription; +import org.yaml.snakeyaml.Yaml;
import org.rhq.cassandra.CassandraClusterManager; import org.rhq.cassandra.ClusterInitService; @@ -63,12 +73,15 @@ import org.rhq.core.system.ProcessExecution; import org.rhq.core.system.ProcessExecutionResults; import org.rhq.core.system.SystemInfo; import org.rhq.core.system.SystemInfoFactory; +import org.rhq.core.util.stream.StreamUtil;
/** * @author John Sanda */ public class StorageNodeComponentITest {
+ private final Log log = LogFactory.getLog(StorageNodeComponentITest.class); + private File basedir;
private Resource storageNode; @@ -232,7 +245,7 @@ public class StorageNodeComponentITest { }
@Test(dependsOnMethods = "restartStorageNode") - public void prepareForBootstrap() { + public void prepareForBootstrap() throws Exception { Configuration params = Configuration.builder().addSimple("cqlPort", 9242).addSimple("gossipPort", 7200) .openList("storageNodeIPAddresses", "storageNodeIPAddresse").addSimples("127.0.0.1", "127.0.0.2") .closeList().build(); @@ -245,10 +258,22 @@ public class StorageNodeComponentITest { OperationServicesResult result = operationsService.invokeOperation(operationContext, "prepareForBootstrap", params, timeout);
+ log.info("Waiting for node to boostrap..."); + Thread.sleep(33000); + assertEquals(result.getResultCode(), OperationServicesResultCode.SUCCESS, "The operation failed: " + result.getErrorStackTrace());
assertNodeIsUp("Expected " + storageNode + " to be up after the prepareForBootstrap operation completes."); + + assertThatInternodeAuthConfFileMatches("127.0.0.1", "127.0.0.2"); + + File confDir = new File(basedir, "conf"); + File cassandraYamlFile = new File(confDir, "cassandra.yaml"); + Config config = loadConfig(cassandraYamlFile); + + assertEquals(config.seed_provider.parameters.get("seeds"), "127.0.0.2", "Failed to update seeds " + + "property in " + cassandraYamlFile); }
private void assertNodeIsUp(String msg) { @@ -292,4 +317,28 @@ public class StorageNodeComponentITest { return type.getPlugin().equals("RHQStorage") && type.getName().equals("RHQ Storage Node"); }
+ private void assertThatInternodeAuthConfFileMatches(String... addresses) throws Exception { + File confDir = new File(basedir, "conf"); + File internodeAuthConfFile = new File(confDir, "rhq-storage-auth.conf"); + String contents = StreamUtil.slurp(new FileReader(internodeAuthConfFile)); + + Set<String> expected = ImmutableSet.copyOf(addresses); + Set<String> actual = ImmutableSet.copyOf(contents.split("\n")); + + assertEquals(actual, expected, "Failed to update internode authentication conf file " + + internodeAuthConfFile + "."); + } + + private Config loadConfig(File configFile) throws Exception { + FileInputStream inputStream = new FileInputStream(configFile); + org.yaml.snakeyaml.constructor.Constructor constructor = + new org.yaml.snakeyaml.constructor.Constructor(Config.class); + TypeDescription seedDesc = new TypeDescription(SeedProviderDef.class); + seedDesc.putMapPropertyType("parameters", String.class, String.class); + constructor.addTypeDescription(seedDesc); + Yaml yaml = new Yaml(new Loader(constructor)); + + return (Config) yaml.load(inputStream); + } + }
commit ba2427bc9dfc4e4c3ac9d778aa15d6c521535efe Author: John Sanda jsanda@redhat.com Date: Sat Jul 27 10:14:18 2013 -0400
initial commit for ConfigEditor which handles updating cassandra.yaml
diff --git a/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java b/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java new file mode 100644 index 0000000..679a84c --- /dev/null +++ b/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java @@ -0,0 +1,95 @@ +package org.rhq.cassandra.util; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.FileWriter; +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import org.yaml.snakeyaml.DumperOptions; +import org.yaml.snakeyaml.Yaml; + +import org.rhq.core.util.StringUtil; +import org.rhq.core.util.file.FileUtil; + +/** + * @author John Sanda + */ +public class ConfigEditor { + + private File configFile; + + private File backupFile; + + private Yaml yaml; + + private Map config; + + public ConfigEditor(File cassandraYamlFile) { + configFile = cassandraYamlFile; + } + + public void load() { + try { + DumperOptions options = new DumperOptions(); + options.setDefaultFlowStyle(DumperOptions.FlowStyle.BLOCK); + yaml = new Yaml(options); + config = (Map) yaml.load(new FileInputStream(configFile)); + createBackup(); + } catch (FileNotFoundException e) { + throw new ConfigEditorException("Failed to load " + configFile, e); + } + } + + public void save() { + try { + yaml.dump(config, new FileWriter(configFile)); + backupFile.delete(); + yaml = null; + config = null; + backupFile = null; + } catch (Exception e) { + throw new ConfigEditorException("Failed to save changes to " + configFile, e); + } + } + + public void restore() { + try { + FileUtil.copyFile(backupFile, configFile); + backupFile.delete(); + yaml = null; + config = null; + backupFile = null; + } catch (IOException e) { + throw new ConfigEditorException("Failed to restore " + configFile + " from " + backupFile, e); + } + } + + private void createBackup() { + backupFile = new File(configFile.getParent(), "." + configFile.getName() + ".bak"); + try { + FileUtil.copyFile(configFile, backupFile); + } catch (IOException e) { + throw new ConfigEditorException("Failed to create " + backupFile, e); + } + } + + public void setSeeds(String... seeds) { + List seedProviderList = (List) config.get("seed_provider"); + Map seedProvider = (Map) seedProviderList.get(0); + List paramsList = (List) seedProvider.get("parameters"); + Map params = (Map) paramsList.get(0); + params.put("seeds", StringUtil.arrayToString(seeds)); + } + + public void setNativeTransportPort(int port) { + config.put("native_transport_port", port); + } + + public void setStoragePort(int port) { + config.put("storage_port", port); + } + +} diff --git a/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditorException.java b/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditorException.java new file mode 100644 index 0000000..db9e7ea --- /dev/null +++ b/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditorException.java @@ -0,0 +1,21 @@ +package org.rhq.cassandra.util; + +/** + * @author John Sanda + */ +public class ConfigEditorException extends RuntimeException { + + public ConfigEditorException() { + } + + public ConfigEditorException(String message) { + } + + public ConfigEditorException(String message, Throwable cause) { + super(message, cause); + } + + public ConfigEditorException(Throwable cause) { + super(cause); + } +} diff --git a/modules/common/cassandra-util/src/test/java/org/rhq/cassandra/util/ConfigEditorTest.java b/modules/common/cassandra-util/src/test/java/org/rhq/cassandra/util/ConfigEditorTest.java new file mode 100644 index 0000000..cf344e2 --- /dev/null +++ b/modules/common/cassandra-util/src/test/java/org/rhq/cassandra/util/ConfigEditorTest.java @@ -0,0 +1,94 @@ +package org.rhq.cassandra.util; + +import static org.testng.Assert.assertEquals; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.InputStream; +import java.lang.reflect.Method; + +import org.apache.cassandra.config.Config; +import org.apache.cassandra.config.SeedProviderDef; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; +import org.yaml.snakeyaml.Loader; +import org.yaml.snakeyaml.TypeDescription; +import org.yaml.snakeyaml.Yaml; + +import org.rhq.core.util.file.FileUtil; +import org.rhq.core.util.stream.StreamUtil; + +/** + * @author John Sanda + */ +public class ConfigEditorTest { + + private File basedir; + + private File configFile; + + @BeforeMethod + public void initTestDir(Method test) throws Exception { + File dir = new File(getClass().getResource(".").toURI()); + basedir = new File(dir, getClass().getSimpleName() + "/" + test.getName()); + FileUtil.purge(basedir, true); + basedir.mkdirs(); + + configFile = new File(basedir, "cassandra.yaml"); + + InputStream inputStream = getClass().getResourceAsStream("/cassandra.yaml"); + FileOutputStream outputStream = new FileOutputStream(configFile); + StreamUtil.copy(inputStream, outputStream); + } + + @Test + public void updateSeeds() throws Exception { + ConfigEditor editor = new ConfigEditor(configFile); + editor.load(); + editor.setSeeds("127.0.0.1", "127.0.0.2", "127.0.0.3"); + editor.save(); + + Config config = loadConfig(); + + assertEquals(config.seed_provider.parameters.get("seeds"), "127.0.0.1,127.0.0.2,127.0.0.3", + "Failed to update seeds property."); + } + + @Test + public void updateNativeTransportPort() throws Exception { + ConfigEditor editor = new ConfigEditor(configFile); + editor.load(); + editor.setNativeTransportPort(9393); + editor.save(); + + Config config = loadConfig(); + + assertEquals(config.native_transport_port, (Integer) 9393, "Failed to update native_transport_port"); + } + + @Test + public void updateStoragePort() throws Exception { + ConfigEditor editor = new ConfigEditor(configFile); + editor.load(); + editor.setStoragePort(6767); + editor.save(); + + Config config = loadConfig(); + + assertEquals(config.storage_port, (Integer) 6767, "Failed to update storage_port"); + } + + private Config loadConfig() throws Exception { + FileInputStream inputStream = new FileInputStream(configFile); + org.yaml.snakeyaml.constructor.Constructor constructor = + new org.yaml.snakeyaml.constructor.Constructor(Config.class); + TypeDescription seedDesc = new TypeDescription(SeedProviderDef.class); + seedDesc.putMapPropertyType("parameters", String.class, String.class); + constructor.addTypeDescription(seedDesc); + Yaml yaml = new Yaml(new Loader(constructor)); + + return (Config) yaml.load(inputStream); + } + +} diff --git a/modules/common/cassandra-util/src/test/resources/cassandra.yaml b/modules/common/cassandra-util/src/test/resources/cassandra.yaml new file mode 100644 index 0000000..fd7973b --- /dev/null +++ b/modules/common/cassandra-util/src/test/resources/cassandra.yaml @@ -0,0 +1,690 @@ +# Cassandra storage config YAML + +# NOTE: +# See http://wiki.apache.org/cassandra/StorageConfiguration for +# full explanations of configuration directives +# /NOTE + +# The name of the cluster. This is mainly used to prevent machines in +# one logical cluster from joining another. +cluster_name: rhq + +# This defines the number of tokens randomly assigned to this node on the ring +# The more tokens, relative to other nodes, the larger the proportion of data +# that this node will store. You probably want all nodes to have the same number +# of tokens assuming they have equal hardware capability. +# +# If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility, +# and will use the initial_token as described below. +# +# Specifying initial_token will override this setting. +# +# If you already have a cluster with 1 token per node, and wish to migrate to +# multiple tokens per node, see http://wiki.apache.org/cassandra/Operations +num_tokens: 256 + +# If you haven't specified num_tokens, or have set it to the default of 1 then +# you should always specify InitialToken when setting up a production +# cluster for the first time, and often when adding capacity later. +# The principle is that each node should be given an equal slice of +# the token ring; see http://wiki.apache.org/cassandra/Operations +# for more details. +# +# If blank, Cassandra will request a token bisecting the range of +# the heaviest-loaded existing node. If there is no load information +# available, such as is the case with a new cluster, it will pick +# a random token, which will lead to hot spots. +#initial_token: + +# See http://wiki.apache.org/cassandra/HintedHandoff +hinted_handoff_enabled: true +# this defines the maximum amount of time a dead host will have hints +# generated. After it has been dead this long, new hints for it will not be +# created until it has been seen alive and gone down again. +max_hint_window_in_ms: 10800000 # 3 hours +# throttle in KB's per second, per delivery thread +hinted_handoff_throttle_in_kb: 1024 +# Number of threads with which to deliver hints; +# Consider increasing this number when you have multi-dc deployments, since +# cross-dc handoff tends to be slower +max_hints_delivery_threads: 2 + +# The following setting populates the page cache on memtable flush and compaction +# WARNING: Enable this setting only when the whole node's data fits in memory. +# Defaults to: false +# populate_io_cache_on_flush: false + +# Authentication backend, implementing IAuthenticator; used to identify users +# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator, +# PasswordAuthenticator}. +# +# - AllowAllAuthenticator performs no checks - set it to disable authentication. +# - PasswordAuthenticator relies on username/password pairs to authenticate +# users. It keeps usernames and hashed passwords in system_auth.credentials table. +# Please increase system_auth keyspace replication factor if you use this authenticator. +authenticator: org.apache.cassandra.auth.PasswordAuthenticator + +# Authorization backend, implementing IAuthorizer; used to limit access/provide permissions +# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer, +# CassandraAuthorizer}. +# +# - AllowAllAuthorizer allows any action to any user - set it to disable authorization. +# - CassandraAuthorizer stores permissions in system_auth.permissions table. Please +# increase system_auth keyspace replication factor if you use this authorizer. +authorizer: org.apache.cassandra.auth.CassandraAuthorizer + +# Validity period for permissions cache (fetching permissions can be an +# expensive operation depending on the authorizer, CassandraAuthorizer is +# one example). Defaults to 2000, set to 0 to disable. +# Will be disabled automatically for AllowAllAuthorizer. +permissions_validity_in_ms: 2000 + +# The partitioner is responsible for distributing rows (by key) across +# nodes in the cluster. Any IPartitioner may be used, including your +# own as long as it is on the classpath. Out of the box, Cassandra +# provides org.apache.cassandra.dht.{Murmur3Partitioner, RandomPartitioner +# ByteOrderedPartitioner, OrderPreservingPartitioner (deprecated)}. +# +# - RandomPartitioner distributes rows across the cluster evenly by md5. +# This is the default prior to 1.2 and is retained for compatibility. +# - Murmur3Partitioner is similar to RandomPartioner but uses Murmur3_128 +# Hash Function instead of md5. When in doubt, this is the best option. +# - ByteOrderedPartitioner orders rows lexically by key bytes. BOP allows +# scanning rows in key order, but the ordering can generate hot spots +# for sequential insertion workloads. +# - OrderPreservingPartitioner is an obsolete form of BOP, that stores +# - keys in a less-efficient format and only works with keys that are +# UTF8-encoded Strings. +# - CollatingOPP colates according to EN,US rules rather than lexical byte +# ordering. Use this as an example if you need custom collation. +# +# See http://wiki.apache.org/cassandra/Operations for more on +# partitioners and token selection. +partitioner: org.apache.cassandra.dht.Murmur3Partitioner + +# directories where Cassandra should store data on disk. +data_file_directories: + - /Users/jsanda/Development/redhat/rhq/rhq-data/storage-1/data + +# commit log +commitlog_directory: /Users/jsanda/Development/redhat/rhq/rhq-data/storage-1/commit_log + +# policy for data disk failures: +# stop: shut down gossip and Thrift, leaving the node effectively dead, but +# still inspectable via JMX. +# best_effort: stop using the failed disk and respond to requests based on +# remaining available sstables. This means you WILL see obsolete +# data at CL.ONE! +# ignore: ignore fatal errors and let requests fail, as in pre-1.2 Cassandra +disk_failure_policy: stop + +# Maximum size of the key cache in memory. +# +# Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the +# minimum, sometimes more. The key cache is fairly tiny for the amount of +# time it saves, so it's worthwhile to use it at large numbers. +# The row cache saves even more time, but must store the whole values of +# its rows, so it is extremely space-intensive. It's best to only use the +# row cache if you have hot rows or static rows. +# +# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. +# +# Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache. +key_cache_size_in_mb: + +# Duration in seconds after which Cassandra should +# safe the keys cache. Caches are saved to saved_caches_directory as +# specified in this configuration file. +# +# Saved caches greatly improve cold-start speeds, and is relatively cheap in +# terms of I/O for the key cache. Row cache saving is much more expensive and +# has limited use. +# +# Default is 14400 or 4 hours. +key_cache_save_period: 14400 + +# Number of keys from the key cache to save +# Disabled by default, meaning all keys are going to be saved +# key_cache_keys_to_save: 100 + +# Maximum size of the row cache in memory. +# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. +# +# Default value is 0, to disable row caching. +row_cache_size_in_mb: 0 + +# Duration in seconds after which Cassandra should +# safe the row cache. Caches are saved to saved_caches_directory as specified +# in this configuration file. +# +# Saved caches greatly improve cold-start speeds, and is relatively cheap in +# terms of I/O for the key cache. Row cache saving is much more expensive and +# has limited use. +# +# Default is 0 to disable saving the row cache. +row_cache_save_period: 0 + +# Number of keys from the row cache to save +# Disabled by default, meaning all keys are going to be saved +# row_cache_keys_to_save: 100 + +# The provider for the row cache to use. +# +# Supported values are: ConcurrentLinkedHashCacheProvider, SerializingCacheProvider +# +# SerializingCacheProvider serialises the contents of the row and stores +# it in native memory, i.e., off the JVM Heap. Serialized rows take +# significantly less memory than "live" rows in the JVM, so you can cache +# more rows in a given memory footprint. And storing the cache off-heap +# means you can use smaller heap sizes, reducing the impact of GC pauses. +# +# It is also valid to specify the fully-qualified class name to a class +# that implements org.apache.cassandra.cache.IRowCacheProvider. +# +# Defaults to SerializingCacheProvider +row_cache_provider: SerializingCacheProvider + +# saved caches +saved_caches_directory: /Users/jsanda/Development/redhat/rhq/rhq-data/storage-1/saved_caches + +# commitlog_sync may be either "periodic" or "batch." +# When in batch mode, Cassandra won't ack writes until the commit log +# has been fsynced to disk. It will wait up to +# commitlog_sync_batch_window_in_ms milliseconds for other writes, before +# performing the sync. +# +# commitlog_sync: batch +# commitlog_sync_batch_window_in_ms: 50 +# +# the other option is "periodic" where writes may be acked immediately +# and the CommitLog is simply synced every commitlog_sync_period_in_ms +# milliseconds. +commitlog_sync: periodic +commitlog_sync_period_in_ms: 10000 + +# The size of the individual commitlog file segments. A commitlog +# segment may be archived, deleted, or recycled once all the data +# in it (potentally from each columnfamily in the system) has been +# flushed to sstables. +# +# The default size is 32, which is almost always fine, but if you are +# archiving commitlog segments (see commitlog_archiving.properties), +# then you probably want a finer granularity of archiving; 8 or 16 MB +# is reasonable. +commitlog_segment_size_in_mb: 32 + +# any class that implements the SeedProvider interface and has a +# constructor that takes a Map<String, String> of parameters will do. +seed_provider: + # Addresses of hosts that are deemed contact points. + # Cassandra nodes use this list of hosts to find each other and learn + # the topology of the ring. You must change this if you are running + # multiple nodes! + - class_name: org.apache.cassandra.locator.SimpleSeedProvider + parameters: + # seeds is actually a comma-delimited list of addresses. + # Ex: "<ip1>,<ip2>,<ip3>" + - seeds: "127.0.0.1" + +# emergency pressure valve: each time heap usage after a full (CMS) +# garbage collection is above this fraction of the max, Cassandra will +# flush the largest memtables. +# +# Set to 1.0 to disable. Setting this lower than +# CMSInitiatingOccupancyFraction is not likely to be useful. +# +# RELYING ON THIS AS YOUR PRIMARY TUNING MECHANISM WILL WORK POORLY: +# it is most effective under light to moderate load, or read-heavy +# workloads; under truly massive write load, it will often be too +# little, too late. +flush_largest_memtables_at: 0.75 + +# emergency pressure valve #2: the first time heap usage after a full +# (CMS) garbage collection is above this fraction of the max, +# Cassandra will reduce cache maximum _capacity_ to the given fraction +# of the current _size_. Should usually be set substantially above +# flush_largest_memtables_at, since that will have less long-term +# impact on the system. +# +# Set to 1.0 to disable. Setting this lower than +# CMSInitiatingOccupancyFraction is not likely to be useful. +reduce_cache_sizes_at: 0.85 +reduce_cache_capacity_to: 0.6 + +# For workloads with more data than can fit in memory, Cassandra's +# bottleneck will be reads that need to fetch data from +# disk. "concurrent_reads" should be set to (16 * number_of_drives) in +# order to allow the operations to enqueue low enough in the stack +# that the OS and drives can reorder them. +# +# On the other hand, since writes are almost never IO bound, the ideal +# number of "concurrent_writes" is dependent on the number of cores in +# your system; (8 * number_of_cores) is a good rule of thumb. +concurrent_reads: 32 +concurrent_writes: 32 + +# Total memory to use for memtables. Cassandra will flush the largest +# memtable when this much memory is used. +# If omitted, Cassandra will set it to 1/3 of the heap. +# memtable_total_space_in_mb: 2048 + +# Total space to use for commitlogs. Since commitlog segments are +# mmapped, and hence use up address space, the default size is 32 +# on 32-bit JVMs, and 1024 on 64-bit JVMs. +# +# If space gets above this value (it will round up to the next nearest +# segment multiple), Cassandra will flush every dirty CF in the oldest +# segment and remove it. So a small total commitlog space will tend +# to cause more flush activity on less-active columnfamilies. +# commitlog_total_space_in_mb: 4096 + +# This sets the amount of memtable flush writer threads. These will +# be blocked by disk io, and each one will hold a memtable in memory +# while blocked. If you have a large heap and many data directories, +# you can increase this value for better flush performance. +# By default this will be set to the amount of data directories defined. +#memtable_flush_writers: 1 + +# the number of full memtables to allow pending flush, that is, +# waiting for a writer thread. At a minimum, this should be set to +# the maximum number of secondary indexes created on a single CF. +memtable_flush_queue_size: 4 + +# Whether to, when doing sequential writing, fsync() at intervals in +# order to force the operating system to flush the dirty +# buffers. Enable this to avoid sudden dirty buffer flushing from +# impacting read latencies. Almost always a good idea on SSD:s; not +# necessarily on platters. +trickle_fsync: false +trickle_fsync_interval_in_kb: 10240 + +# TCP port, for commands and data +storage_port: 7100 + +# SSL port, for encrypted communication. Unused unless enabled in +# encryption_options +ssl_storage_port: 7101 + +# Address to bind to and tell other Cassandra nodes to connect to. You +# _must_ change this if you want multiple nodes to be able to +# communicate! +# +# Leaving it blank leaves it up to InetAddress.getLocalHost(). This +# will always do the Right Thing *if* the node is properly configured +# (hostname, name resolution, etc), and the Right Thing is to use the +# address associated with the hostname (it might not be). +# +# Setting this to 0.0.0.0 is always wrong. +listen_address: 127.0.0.1 + +# Address to broadcast to other Cassandra nodes +# Leaving this blank will set it to the same value as listen_address +# broadcast_address: 1.2.3.4 + +# Internode authentication backend, implementing IInternodeAuthenticator; +# used to allow/disallow connections from peer nodes. +internode_authenticator: org.rhq.cassandra.auth.RhqInternodeAuthenticator + +# Whether to start the native transport server. +# Currently, only the thrift server is started by default because the native +# transport is considered beta. +# Please note that the address on which the native transport is bound is the +# same as the rpc_address. The port however is different and specified below. +start_native_transport: true +# port for the CQL native transport to listen for clients on +native_transport_port: 9142 +# The minimum and maximum threads for handling requests when the native +# transport is used. The meaning is those is similar to the one of +# rpc_min_threads and rpc_max_threads, though the default differ slightly and +# are the ones below: +# native_transport_min_threads: 16 +native_transport_max_threads: 128 + + +# Whether to start the thrift rpc server. +start_rpc: false +# The address to bind the Thrift RPC service to -- clients connect +# here. Unlike ListenAddress above, you *can* specify 0.0.0.0 here if +# you want Thrift to listen on all interfaces. +# +# Leaving this blank has the same effect it does for ListenAddress, +# (i.e. it will be based on the configured hostname of the node). +rpc_address: 127.0.0.1 +# port for Thrift to listen for clients on +rpc_port: 9160 + +# enable or disable keepalive on rpc connections +rpc_keepalive: true + +# Cassandra provides three out-of-the-box options for the RPC Server: +# +# sync -> One thread per thrift connection. For a very large number of clients, memory +# will be your limiting factor. On a 64 bit JVM, 128KB is the minimum stack size +# per thread, and that will correspond to your use of virtual memory (but physical memory +# may be limited depending on use of stack space). +# +# hsha -> Stands for "half synchronous, half asynchronous." All thrift clients are handled +# asynchronously using a small number of threads that does not vary with the amount +# of thrift clients (and thus scales well to many clients). The rpc requests are still +# synchronous (one thread per active request). +# +# The default is sync because on Windows hsha is about 30% slower. On Linux, +# sync/hsha performance is about the same, with hsha of course using less memory. +# +# Alternatively, can provide your own RPC server by providing the fully-qualified class name +# of an o.a.c.t.TServerFactory that can create an instance of it. +rpc_server_type: sync + +# Uncomment rpc_min|max_thread to set request pool size limits. +# +# Regardless of your choice of RPC server (see above), the number of maximum requests in the +# RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync +# RPC server, it also dictates the number of clients that can be connected at all). +# +# The default is unlimited and thus provide no protection against clients overwhelming the server. You are +# encouraged to set a maximum that makes sense for you in production, but do keep in mind that +# rpc_max_threads represents the maximum number of client requests this server may execute concurrently. +# +# rpc_min_threads: 16 +# rpc_max_threads: 2048 + +# uncomment to set socket buffer sizes on rpc connections +# rpc_send_buff_size_in_bytes: +# rpc_recv_buff_size_in_bytes: + +# uncomment to set socket buffer size for internode communication +# Note that when setting this, the buffer size is limited by net.core.wmem_max +# and when not setting it it is defined by net.ipv4.tcp_wmem +# See: +# /proc/sys/net/core/wmem_max +# /proc/sys/net/core/rmem_max +# /proc/sys/net/ipv4/tcp_wmem +# /proc/sys/net/ipv4/tcp_wmem +# and: man tcp +# internode_send_buff_size_in_bytes: +# internode_recv_buff_size_in_bytes: + +# Frame size for thrift (maximum field length). +thrift_framed_transport_size_in_mb: 15 + +# The max length of a thrift message, including all fields and +# internal thrift overhead. +thrift_max_message_length_in_mb: 16 + +# Set to true to have Cassandra create a hard link to each sstable +# flushed or streamed locally in a backups/ subdirectory of the +# Keyspace data. Removing these links is the operator's +# responsibility. +incremental_backups: false + +# Whether or not to take a snapshot before each compaction. Be +# careful using this option, since Cassandra won't clean up the +# snapshots for you. Mostly useful if you're paranoid when there +# is a data format change. +snapshot_before_compaction: false + +# Whether or not a snapshot is taken of the data before keyspace truncation +# or dropping of column families. The STRONGLY advised default of true +# should be used to provide data safety. If you set this flag to false, you will +# lose data on truncation or drop. +auto_snapshot: true + +# Add column indexes to a row after its contents reach this size. +# Increase if your column values are large, or if you have a very large +# number of columns. The competing causes are, Cassandra has to +# deserialize this much of the row to read a single column, so you want +# it to be small - at least if you do many partial-row reads - but all +# the index data is read for each access, so you don't want to generate +# that wastefully either. +column_index_size_in_kb: 64 + +# Size limit for rows being compacted in memory. Larger rows will spill +# over to disk and use a slower two-pass compaction process. A message +# will be logged specifying the row key. +in_memory_compaction_limit_in_mb: 64 + +# Number of simultaneous compactions to allow, NOT including +# validation "compactions" for anti-entropy repair. Simultaneous +# compactions can help preserve read performance in a mixed read/write +# workload, by mitigating the tendency of small sstables to accumulate +# during a single long running compactions. The default is usually +# fine and if you experience problems with compaction running too +# slowly or too fast, you should look at +# compaction_throughput_mb_per_sec first. +# +# concurrent_compactors defaults to the number of cores. +# Uncomment to make compaction mono-threaded, the pre-0.8 default. +#concurrent_compactors: 1 + +# Multi-threaded compaction. When enabled, each compaction will use +# up to one thread per core, plus one thread per sstable being merged. +# This is usually only useful for SSD-based hardware: otherwise, +# your concern is usually to get compaction to do LESS i/o (see: +# compaction_throughput_mb_per_sec), not more. +multithreaded_compaction: false + +# Throttles compaction to the given total throughput across the entire +# system. The faster you insert data, the faster you need to compact in +# order to keep the sstable count down, but in general, setting this to +# 16 to 32 times the rate you are inserting data is more than sufficient. +# Setting this to 0 disables throttling. Note that this account for all types +# of compaction, including validation compaction. +compaction_throughput_mb_per_sec: 16 + +# Track cached row keys during compaction, and re-cache their new +# positions in the compacted sstable. Disable if you use really large +# key caches. +compaction_preheat_key_cache: true + +# Throttles all outbound streaming file transfers on this node to the +# given total throughput in Mbps. This is necessary because Cassandra does +# mostly sequential IO when streaming data during bootstrap or repair, which +# can lead to saturating the network connection and degrading rpc performance. +# When unset, the default is 200 Mbps or 25 MB/s. +# stream_throughput_outbound_megabits_per_sec: 200 + +# How long the coordinator should wait for read operations to complete +read_request_timeout_in_ms: 20000 +# How long the coordinator should wait for seq or index scans to complete +range_request_timeout_in_ms: 20000 +# How long the coordinator should wait for writes to complete +write_request_timeout_in_ms: 20000 +# How long the coordinator should wait for truncates to complete +# (This can be much longer, because we need to flush all CFs +# to make sure we can clear out anythink in the commitlog that could +# cause truncated data to reappear.) +truncate_request_timeout_in_ms: 60000 +# The default timeout for other, miscellaneous operations +request_timeout_in_ms: 20000 + +# Enable operation timeout information exchange between nodes to accurately +# measure request timeouts, If disabled cassandra will assuming the request +# was forwarded to the replica instantly by the coordinator +# +# Warning: before enabling this property make sure to ntp is installed +# and the times are synchronized between the nodes. +cross_node_timeout: false + +# Enable socket timeout for streaming operation. +# When a timeout occurs during streaming, streaming is retried from the start +# of the current file. This *can* involve re-streaming an important amount of +# data, so you should avoid setting the value too low. +# Default value is 0, which never timeout streams. +# streaming_socket_timeout_in_ms: 0 + +# phi value that must be reached for a host to be marked down. +# most users should never need to adjust this. +# phi_convict_threshold: 8 + +# endpoint_snitch -- Set this to a class that implements +# IEndpointSnitch. The snitch has two functions: +# - it teaches Cassandra enough about your network topology to route +# requests efficiently +# - it allows Cassandra to spread replicas around your cluster to avoid +# correlated failures. It does this by grouping machines into +# "datacenters" and "racks." Cassandra will do its best not to have +# more than one replica on the same "rack" (which may not actually +# be a physical location) +# +# IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER, +# YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS +# ARE PLACED. +# +# Out of the box, Cassandra provides +# - SimpleSnitch: +# Treats Strategy order as proximity. This improves cache locality +# when disabling read repair, which can further improve throughput. +# Only appropriate for single-datacenter deployments. +# - PropertyFileSnitch: +# Proximity is determined by rack and data center, which are +# explicitly configured in cassandra-topology.properties. +# - GossipingPropertyFileSnitch +# The rack and datacenter for the local node are defined in +# cassandra-rackdc.properties and propagated to other nodes via gossip. If +# cassandra-topology.properties exists, it is used as a fallback, allowing +# migration from the PropertyFileSnitch. +# - RackInferringSnitch: +# Proximity is determined by rack and data center, which are +# assumed to correspond to the 3rd and 2nd octet of each node's +# IP address, respectively. Unless this happens to match your +# deployment conventions (as it did Facebook's), this is best used +# as an example of writing a custom Snitch class. +# - Ec2Snitch: +# Appropriate for EC2 deployments in a single Region. Loads Region +# and Availability Zone information from the EC2 API. The Region is +# treated as the Datacenter, and the Availability Zone as the rack. +# Only private IPs are used, so this will not work across multiple +# Regions. +# - Ec2MultiRegionSnitch: +# Uses public IPs as broadcast_address to allow cross-region +# connectivity. (Thus, you should set seed addresses to the public +# IP as well.) You will need to open the storage_port or +# ssl_storage_port on the public IP firewall. (For intra-Region +# traffic, Cassandra will switch to the private IP after +# establishing a connection.) +# +# You can use a custom Snitch by setting this to the full class name +# of the snitch, which will be assumed to be on your classpath. +endpoint_snitch: SimpleSnitch + +# controls how often to perform the more expensive part of host score +# calculation +dynamic_snitch_update_interval_in_ms: 100 +# controls how often to reset all host scores, allowing a bad host to +# possibly recover +dynamic_snitch_reset_interval_in_ms: 600000 +# if set greater than zero and read_repair_chance is < 1.0, this will allow +# 'pinning' of replicas to hosts in order to increase cache capacity. +# The badness threshold will control how much worse the pinned host has to be +# before the dynamic snitch will prefer other replicas over it. This is +# expressed as a double which represents a percentage. Thus, a value of +# 0.2 means Cassandra would continue to prefer the static snitch values +# until the pinned host was 20% worse than the fastest. +dynamic_snitch_badness_threshold: 0.1 + +# request_scheduler -- Set this to a class that implements +# RequestScheduler, which will schedule incoming client requests +# according to the specific policy. This is useful for multi-tenancy +# with a single Cassandra cluster. +# NOTE: This is specifically for requests from the client and does +# not affect inter node communication. +# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place +# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of +# client requests to a node with a separate queue for each +# request_scheduler_id. The scheduler is further customized by +# request_scheduler_options as described below. +request_scheduler: org.apache.cassandra.scheduler.NoScheduler + +# Scheduler Options vary based on the type of scheduler +# NoScheduler - Has no options +# RoundRobin +# - throttle_limit -- The throttle_limit is the number of in-flight +# requests per client. Requests beyond +# that limit are queued up until +# running requests can complete. +# The value of 80 here is twice the number of +# concurrent_reads + concurrent_writes. +# - default_weight -- default_weight is optional and allows for +# overriding the default which is 1. +# - weights -- Weights are optional and will default to 1 or the +# overridden default_weight. The weight translates into how +# many requests are handled during each turn of the +# RoundRobin, based on the scheduler id. +# +# request_scheduler_options: +# throttle_limit: 80 +# default_weight: 5 +# weights: +# Keyspace1: 1 +# Keyspace2: 5 + +# request_scheduler_id -- An identifer based on which to perform +# the request scheduling. Currently the only valid option is keyspace. +# request_scheduler_id: keyspace + +# index_interval controls the sampling of entries from the primrary +# row index in terms of space versus time. The larger the interval, +# the smaller and less effective the sampling will be. In technicial +# terms, the interval coresponds to the number of index entries that +# are skipped between taking each sample. All the sampled entries +# must fit in memory. Generally, a value between 128 and 512 here +# coupled with a large key cache size on CFs results in the best trade +# offs. This value is not often changed, however if you have many +# very small rows (many to an OS page), then increasing this will +# often lower memory usage without a impact on performance. +index_interval: 128 + +# Enable or disable inter-node encryption +# Default settings are TLS v1, RSA 1024-bit keys (it is imperative that +# users generate their own keys) TLS_RSA_WITH_AES_128_CBC_SHA as the cipher +# suite for authentication, key exchange and encryption of the actual data transfers. +# NOTE: No custom encryption options are enabled at the moment +# The available internode options are : all, none, dc, rack +# +# If set to dc cassandra will encrypt the traffic between the DCs +# If set to rack cassandra will encrypt the traffic between the racks +# +# The passwords used in these options must match the passwords used when generating +# the keystore and truststore. For instructions on generating these files, see: +# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSE... +# +server_encryption_options: + internode_encryption: none + keystore: conf/.keystore + keystore_password: cassandra + truststore: conf/.truststore + truststore_password: cassandra + # More advanced defaults below: + # protocol: TLS + # algorithm: SunX509 + # store_type: JKS + # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA] + # require_client_auth: false + +# enable or disable client/server encryption. +client_encryption_options: + enabled: false + keystore: conf/.keystore + keystore_password: cassandra + # require_client_auth: false + # Set trustore and truststore_password if require_client_auth is true + # truststore: conf/.truststore + # truststore_password: cassandra + # More advanced defaults below: + # protocol: TLS + # algorithm: SunX509 + # store_type: JKS + # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA] + +# internode_compression controls whether traffic between nodes is +# compressed. +# can be: all - all traffic is compressed +# dc - traffic between different datacenters is compressed +# none - nothing is compressed. +internode_compression: none + +# Enable or disable tcp_nodelay for inter-dc communication. +# Disabling it will result in larger (but fewer) network packets being sent, +# reducing overhead from the TCP protocol itself, at the cost of increasing +# latency if you block for cross-datacenter responses. +inter_dc_tcp_nodelay: true
commit 9981901120d09f9eaef388ef596f11f297ea55c7 Author: John Sanda jsanda@redhat.com Date: Fri Jul 26 15:42:46 2013 -0400
add storage node shutdown operation that uses pid file
There can be problems with getting the actual, current pid from Sigar, so the storage node plugin will first attempt to get the pid from the pid file which should exist on disk. If that fails, then we will attempt to get the pid from Sigar.
This implementation for shutdown does not work for Windows so need to revist it to add in Windows support. We may even consider delegating to rhqtcl.
diff --git a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java index 3e55a93..d648ad8 100644 --- a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java +++ b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java @@ -246,8 +246,6 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent
long pid = process.getPid(); try { - getEmsConnection().close(); - process.kill("KILL");
Configuration pluginConfig = getResourceContext().getPluginConfiguration(); @@ -266,7 +264,7 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent } }
- private void waitForNodeToGoDown() throws InterruptedException { + protected void waitForNodeToGoDown() throws InterruptedException { if (OperatingSystem.getInstance().getName().equals(OperatingSystem.NAME_MACOSX)) { // See this thread on VMWare forum: http://communities.vmware.com/message/2187972#2187972 // Unfortunately there is no work around for this failure on Mac OSX so the method will silently return on diff --git a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java index 125f4d2..5933093 100644 --- a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java +++ b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java @@ -28,6 +28,7 @@ package org.rhq.plugins.storage; import java.io.File; import java.io.FileInputStream; import java.io.FileNotFoundException; +import java.io.FileReader; import java.io.FileWriter; import java.io.IOException; import java.io.StringReader; @@ -39,6 +40,7 @@ import java.util.Set;
import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.hyperic.sigar.SigarException; import org.mc4j.ems.connection.EmsConnection; import org.mc4j.ems.connection.bean.EmsBean; import org.mc4j.ems.connection.bean.attribute.EmsAttribute; @@ -54,9 +56,11 @@ import org.rhq.core.domain.configuration.PropertyMap; import org.rhq.core.domain.configuration.PropertySimple; import org.rhq.core.pluginapi.configuration.ConfigurationFacet; import org.rhq.core.pluginapi.configuration.ConfigurationUpdateReport; +import org.rhq.core.pluginapi.inventory.ProcessScanResult; import org.rhq.core.pluginapi.inventory.ResourceContext; import org.rhq.core.pluginapi.operation.OperationFacet; import org.rhq.core.pluginapi.operation.OperationResult; +import org.rhq.core.system.ProcessInfo; import org.rhq.core.util.StringUtil; import org.rhq.core.util.exception.ThrowableUtil; import org.rhq.core.util.file.FileUtil; @@ -105,11 +109,80 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper return updateKnownNodes(parameters); } else if (name.equals("prepareForBootstrap")) { return prepareForBootstrap(parameters); + } else if (name.equals("shutdown")) { + return shutdownStorageNode(); } else { return super.invokeOperation(name, parameters); } }
+ private OperationResult shutdownStorageNode() { + OperationResult result = new OperationResult(); + File binDir = new File(getBasedir(), "bin"); + File pidFile = new File(binDir, "cassandra.pid"); + + try { + if (pidFile.exists()) { + long pid = readPidFile(pidFile); + log.info("Shutting down storage node with pid " + pid); + ProcessInfo process = findProcessInfo(pid); + if (process != null) { + try { + process.kill("KILL"); + waitForNodeToGoDown(); + pidFile.delete(); + result.setSimpleResult("Successfully storage node with pid " + pid); + } catch (SigarException e) { + log.error("Failed to delete storage node with pid " + process.getPid(), e); + result.setErrorMessage("Failed to delete storage node with pid " + pid + ": " + + ThrowableUtil.getAllMessages(e)); + } + } else { + log.warn("Could not find process info for pid " + pid); + result = shutdownUsingNativeProcessInfo(); + } + + } else { + log.warn("Did not find pid file " + pidFile + ". It should not be modified, deleted, or moved."); + result = shutdownUsingNativeProcessInfo(); + } + } catch (FileNotFoundException e) { + log.error("Could not read pid file " + pidFile, e); + result.setErrorMessage("Could not read pid file " + pidFile + ": " + ThrowableUtil.getAllMessages(e)); + } catch (InterruptedException e) { + log.warn("The shutdown operation was cancelled or interrupted. This interruption occurred while trying " + + "to verify that the storage node process has exited."); + result.setErrorMessage("The operation was cancelled or interrupted while trying to verify that the " + + "storage node process has exited."); + } + return result; + } + + private long readPidFile(File pidFile) throws FileNotFoundException { + return Long.parseLong(StreamUtil.slurp(new FileReader(pidFile))); + } + + private ProcessInfo findProcessInfo(long pid) { + List<ProcessScanResult> scanResults = getResourceContext().getNativeProcessesForType(); + + for (ProcessScanResult scanResult : scanResults) { + if (scanResult.getProcessInfo().getPid() == pid) { + return scanResult.getProcessInfo(); + } + } + return null; + } + + private OperationResult shutdownUsingNativeProcessInfo() throws InterruptedException { + log.warn("Could not obtain process info from pid file"); + log.info("Obtaining process info from the system to perform the shutdown"); + + OperationResult result = shutdownNode(); + waitForNodeToGoDown(); + + return result; + } + private OperationResult updateConfiguration(Configuration params) { boolean restartIsRequired = false;
@@ -237,7 +310,7 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper OperationResult result = new OperationResult();
log.info("Stopping storage node"); - OperationResult stopNodeResult = stopNode(); + OperationResult stopNodeResult = shutdownStorageNode(); if (stopNodeResult.getErrorMessage() != null) { log.error("Failed to stop storage node " + this + " Cannot prepare the node for bootstrap which means " + "that the storage node cannot join the cluster. Make sure the storage node is not running and retry " + diff --git a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java index f0744a4..eb4d545 100644 --- a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java +++ b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java @@ -231,6 +231,26 @@ public class StorageNodeComponentITest { assertNodeIsUp("Expected " + storageNode + " to be up after restarting it."); }
+ @Test(dependsOnMethods = "restartStorageNode") + public void prepareForBootstrap() { + Configuration params = Configuration.builder().addSimple("cqlPort", 9242).addSimple("gossipPort", 7200) + .openList("storageNodeIPAddresses", "storageNodeIPAddresse").addSimples("127.0.0.1", "127.0.0.2") + .closeList().build(); + + OperationManager operationManager = PluginContainer.getInstance().getOperationManager(); + OperationServicesAdapter operationsService = new OperationServicesAdapter(operationManager); + + long timeout = 1000 * 60; + OperationContextImpl operationContext = new OperationContextImpl(storageNode.getId()); + OperationServicesResult result = operationsService.invokeOperation(operationContext, "prepareForBootstrap", + params, timeout); + + assertEquals(result.getResultCode(), OperationServicesResultCode.SUCCESS, "The operation failed: " + + result.getErrorStackTrace()); + + assertNodeIsUp("Expected " + storageNode + " to be up after the prepareForBootstrap operation completes."); + } + private void assertNodeIsUp(String msg) { executeAvailabilityScan();
@@ -251,7 +271,6 @@ public class StorageNodeComponentITest {
private Availability getAvailability() { InventoryManager inventoryManager = PluginContainer.getInstance().getInventoryManager(); -// return inventoryManager.getAvailabilityIfKnown(storageNode); return inventoryManager.getCurrentAvailability(storageNode); }
commit 9b9c2ccb1e5336cf8630accb13e4118d36698a3a Author: Heiko W. Rupp hwr@redhat.com Date: Fri Jul 26 14:14:15 2013 +0200
Put the real version in here, as otherwise we have a circular dependency.
diff --git a/modules/enterprise/gui/coregui/pom.xml b/modules/enterprise/gui/coregui/pom.xml index 353f4d9..a227ed4 100644 --- a/modules/enterprise/gui/coregui/pom.xml +++ b/modules/enterprise/gui/coregui/pom.xml @@ -19,7 +19,7 @@
<properties> <!-- dependency versions --> - <gwt.version>${gwt.version}</gwt.version> + <gwt.version>2.5.0</gwt.version> <smartgwt.version>3.0</smartgwt.version>
<!-- If this is too much memory to allocate to your gwt:debug process then override this property in
commit db58d3fc79994b022810e7c2b38e067161cb66e0 Author: John Sanda jsanda@redhat.com Date: Thu Jul 25 22:08:20 2013 -0400
check cluster status using cql driver instead of using jmx
The server had been polling storage cluster nodes with a jmx call to make sure that at least one node is up for client requests. If no nodes are up, then the server goes into maintenance mode.
That check is no longer done with a quartz job using jmx. It is now performed with event notifications we receive from the cql driver. This means that the server no longer has to care about storage node jmx ports. This has a couple benefits. First, it reduces the complexity involved with changing the jmx port. Secondly, since the server no longer is making jmx calls to storage nodes, the port can be locked down to localhost access.
Any server side code that needs access to the driver's Session should instead use the new StorageSession class. StorageSession provides the same API, and it also encapsulates the event handling/propagation logic that is necesary for monitoring the cluster availability.
diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/measurement/MeasurementDataManagerBeanTest.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/measurement/MeasurementDataManagerBeanTest.java index 4cc67c0..9f49143 100644 --- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/measurement/MeasurementDataManagerBeanTest.java +++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/measurement/MeasurementDataManagerBeanTest.java @@ -39,7 +39,6 @@ import java.util.List;
import javax.ejb.EJB;
-import com.datastax.driver.core.Session; import com.datastax.driver.core.exceptions.NoHostAvailableException;
import org.joda.time.DateTime; @@ -60,13 +59,14 @@ import org.rhq.core.domain.resource.Agent; import org.rhq.core.domain.resource.Resource; import org.rhq.core.domain.resource.ResourceType; import org.rhq.enterprise.server.auth.SubjectManagerLocal; -import org.rhq.enterprise.server.storage.StorageClientManagerBean; import org.rhq.enterprise.server.drift.DriftServerPluginService; import org.rhq.enterprise.server.resource.ResourceManagerLocal; +import org.rhq.enterprise.server.storage.StorageClientManagerBean; import org.rhq.enterprise.server.test.AbstractEJB3Test; import org.rhq.enterprise.server.test.TransactionCallback; import org.rhq.enterprise.server.util.ResourceTreeHelper; import org.rhq.server.metrics.MetricsDAO; +import org.rhq.server.metrics.StorageSession; import org.rhq.server.metrics.domain.AggregateNumericMetric; import org.rhq.server.metrics.domain.AggregateType; import org.rhq.server.metrics.domain.MetricsTable; @@ -360,7 +360,7 @@ public class MeasurementDataManagerBeanTest extends AbstractEJB3Test {
private void purgeMetricsTables() { try { - Session session = storageClientManager.getSession(); + StorageSession session = storageClientManager.getSession();
session.execute("TRUNCATE " + MetricsTable.RAW); session.execute("TRUNCATE " + MetricsTable.ONE_HOUR); diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/measurement/test/MeasurementBaselineManagerTest.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/measurement/test/MeasurementBaselineManagerTest.java index 49048ee..fdf2b1a 100644 --- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/measurement/test/MeasurementBaselineManagerTest.java +++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/measurement/test/MeasurementBaselineManagerTest.java @@ -28,7 +28,6 @@ import java.util.Random; import javax.inject.Inject; import javax.persistence.Query;
-import com.datastax.driver.core.Session; import com.datastax.driver.core.exceptions.NoHostAvailableException;
import org.testng.annotations.Test; @@ -45,13 +44,14 @@ import org.rhq.core.domain.resource.Resource; import org.rhq.core.domain.resource.ResourceCategory; import org.rhq.core.domain.resource.ResourceType; import org.rhq.core.domain.util.PageControl; -import org.rhq.enterprise.server.storage.StorageClientManagerBean; import org.rhq.enterprise.server.measurement.MeasurementBaselineManagerLocal; import org.rhq.enterprise.server.measurement.MeasurementOOBManagerLocal; import org.rhq.enterprise.server.resource.ResourceManagerLocal; +import org.rhq.enterprise.server.storage.StorageClientManagerBean; import org.rhq.enterprise.server.test.AbstractEJB3Test; import org.rhq.enterprise.server.util.LookupUtil; import org.rhq.server.metrics.MetricsDAO; +import org.rhq.server.metrics.StorageSession; import org.rhq.server.metrics.domain.AggregateNumericMetric; import org.rhq.server.metrics.domain.AggregateType; import org.rhq.server.metrics.domain.MetricsTable; @@ -645,7 +645,7 @@ public class MeasurementBaselineManagerTest extends AbstractEJB3Test { // Query q = em.createNativeQuery(sql); // q.executeUpdate(); try { - Session session = storageClientManager.getSession(); + StorageSession session = storageClientManager.getSession(); session.execute("DELETE FROM " + MetricsTable.ONE_HOUR.getTableName() + " WHERE schedule_id = " + schedule.getId()); } catch (NoHostAvailableException e) { diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/core/StartupBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/core/StartupBean.java index 7ce77d8..fa88263 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/core/StartupBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/core/StartupBean.java @@ -450,7 +450,6 @@ public class StartupBean implements StartupLocal { }
storageClientManager.init(); - storageClusterHeartBeatJob.scheduleJob(); }
/** diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClientManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClientManagerBean.java index 2d48092..ae80e50 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClientManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClientManagerBean.java @@ -50,6 +50,7 @@ import org.rhq.server.metrics.DateTimeService; import org.rhq.server.metrics.MetricsConfiguration; import org.rhq.server.metrics.MetricsDAO; import org.rhq.server.metrics.MetricsServer; +import org.rhq.server.metrics.StorageSession;
/** * @author John Sanda @@ -70,7 +71,7 @@ public class StorageClientManagerBean { @EJB private StorageNodeManagerLocal storageNodeManager;
- private Session session; + private StorageSession session; private MetricsConfiguration metricsConfiguration; private MetricsDAO metricsDAO; private MetricsServer metricsServer; @@ -95,7 +96,12 @@ public class StorageClientManagerBean { String password = getRequiredStorageProperty(PASSWORD_PROP);
metricsConfiguration = new MetricsConfiguration(); - session = createSession(username, password, storageNodeManager.getStorageNodes()); + + Session wrappedSession = createSession(username, password, storageNodeManager.getStorageNodes()); + session = new StorageSession(wrappedSession); + + session.addStorageStateListener(new StorageClusterMonitor()); + metricsDAO = new MetricsDAO(session, metricsConfiguration);
Server server = serverManager.getServer(); @@ -121,15 +127,15 @@ public class StorageClientManagerBean { }
public MetricsDAO getMetricsDAO() { - return this.metricsDAO; + return metricsDAO; }
public MetricsServer getMetricsServer() { - return this.metricsServer; + return metricsServer; }
- public Session getSession() { - return this.session; + public StorageSession getSession() { + return session; }
public MetricsConfiguration getMetricsConfiguration() { diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterMonitor.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterMonitor.java new file mode 100644 index 0000000..ec28888 --- /dev/null +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterMonitor.java @@ -0,0 +1,69 @@ +package org.rhq.enterprise.server.storage; + +import java.net.InetAddress; +import java.util.concurrent.atomic.AtomicBoolean; + +import com.datastax.driver.core.exceptions.NoHostAvailableException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import org.rhq.core.domain.cloud.Server; +import org.rhq.enterprise.server.auth.SubjectManagerLocal; +import org.rhq.enterprise.server.cloud.TopologyManagerLocal; +import org.rhq.enterprise.server.cloud.instance.ServerManagerLocal; +import org.rhq.enterprise.server.util.LookupUtil; +import org.rhq.server.metrics.StorageStateListener; + +/** + * @author John Sanda + */ +public class StorageClusterMonitor implements StorageStateListener { + + private Log log = LogFactory.getLog(StorageClusterMonitor.class); + + private AtomicBoolean isClusterDown = new AtomicBoolean(false); + + public boolean isClusterDown() { + return isClusterDown.get(); + } + + @Override + public void onStorageNodeUp(InetAddress address) { + log.info("Storage node at " + address.getHostAddress() + " is up"); + + if (isClusterDown.compareAndSet(true, false)) { + log.info("Taking server out of maintenance mode"); + updateServerMode(Server.OperationMode.NORMAL); + } + } + + @Override + public void onStorageNodeDown(InetAddress address) { + log.info("Storage node at " + address.getHostAddress() + " is down"); + } + + @Override + public void onStorageNodeRemoved(InetAddress address) { + log.info("Storage node at " + address.getHostAddress() + " has been removed from the cluster"); + } + + @Override + public void onStorageClusterDown(NoHostAvailableException e) { + if (isClusterDown.compareAndSet(false, true)) { + log.error("The server cannot connect to any storage nodes. The server will now go into maintenance mode."); + updateServerMode(Server.OperationMode.MAINTENANCE); + } + } + + private void updateServerMode(Server.OperationMode mode) { + ServerManagerLocal serverManager = LookupUtil.getServerManager(); + TopologyManagerLocal topologyManager = LookupUtil.getTopologyManager(); + SubjectManagerLocal subjectManager = LookupUtil.getSubjectManager(); + + Server server = serverManager.getServer(); + + topologyManager.updateServerMode(subjectManager.getOverlord(), new Integer[] {server.getId()}, + Server.OperationMode.MAINTENANCE); + } +} diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsDAO.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsDAO.java index 3ea47e8..63c11f1 100644 --- a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsDAO.java +++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsDAO.java @@ -26,20 +26,13 @@ package org.rhq.server.metrics;
-import static com.datastax.driver.core.querybuilder.QueryBuilder.insertInto; - -import java.util.ArrayList; import java.util.Date; import java.util.List; import java.util.Map; -import java.util.Set;
import com.datastax.driver.core.BoundStatement; import com.datastax.driver.core.PreparedStatement; import com.datastax.driver.core.ResultSet; -import com.datastax.driver.core.ResultSetFuture; -import com.datastax.driver.core.Session; -import com.datastax.driver.core.Statement; import com.datastax.driver.core.exceptions.NoHostAvailableException;
import org.apache.commons.logging.Log; @@ -67,7 +60,7 @@ public class MetricsDAO {
private final Log log = LogFactory.getLog(MetricsDAO.class);
- private Session session; + private StorageSession storageSession;
private MetricsConfiguration configuration;
@@ -86,8 +79,8 @@ public class MetricsDAO { private PreparedStatement findTimeSliceForIndex; private PreparedStatement deleteIndexEntries;
- public MetricsDAO(Session session, MetricsConfiguration configuration) { - this.session = session; + public MetricsDAO(StorageSession session, MetricsConfiguration configuration) { + this.storageSession = session; this.configuration = configuration; initPreparedStatements(); } @@ -104,176 +97,118 @@ public class MetricsDAO { // re-initialized and re-prepared with the new TTLs. None of this would be necessary // if the TTL value could be a bound value.
- insertRawData = session.prepare( + insertRawData = storageSession.prepare( "INSERT INTO " + MetricsTable.RAW + " (schedule_id, time, value) VALUES (?, ?, ?) USING TTL " + configuration.getRawTTL());
- rawMetricsQuery = session.prepare("SELECT schedule_id, time, value FROM " + MetricsTable.RAW + + rawMetricsQuery = storageSession.prepare("SELECT schedule_id, time, value FROM " + MetricsTable.RAW + " WHERE schedule_id = ? AND time >= ? AND time < ? ORDER BY time");
- insertOneHourData = session.prepare("INSERT INTO " + MetricsTable.ONE_HOUR + "(schedule_id, time, " + + insertOneHourData = storageSession.prepare("INSERT INTO " + MetricsTable.ONE_HOUR + "(schedule_id, time, " + "type, value) VALUES (?, ?, ?, ?) USING TTL " + configuration.getOneHourTTL());
- insertSixHourData = session.prepare("INSERT INTO " + MetricsTable.SIX_HOUR + "(schedule_id, time, " + + insertSixHourData = storageSession.prepare("INSERT INTO " + MetricsTable.SIX_HOUR + "(schedule_id, time, " + "type, value) VALUES (?, ?, ?, ?) USING TTL " + configuration.getOneHourTTL());
- insertTwentyFourHourData = session.prepare("INSERT INTO " + MetricsTable.TWENTY_FOUR_HOUR + "(schedule_id, " + + insertTwentyFourHourData = storageSession.prepare("INSERT INTO " + MetricsTable.TWENTY_FOUR_HOUR + "(schedule_id, " + "time, type, value) VALUES (?, ?, ?, ?) USING TTL " + configuration.getOneHourTTL());
- updateMetricsIndex = session.prepare("INSERT INTO " + MetricsTable.INDEX + " (bucket, time, schedule_id) " + + updateMetricsIndex = storageSession.prepare("INSERT INTO " + MetricsTable.INDEX + " (bucket, time, schedule_id) " + "VALUES (?, ?, ?)");
- findLatestRawMetric = session.prepare("SELECT schedule_id, time, value FROM " + MetricsTable.RAW + + findLatestRawMetric = storageSession.prepare("SELECT schedule_id, time, value FROM " + MetricsTable.RAW + " WHERE schedule_id = ? ORDER BY time DESC LIMIT 1");
- findRawMetrics = session.prepare("SELECT schedule_id, time, value FROM " + MetricsTable.RAW + + findRawMetrics = storageSession.prepare("SELECT schedule_id, time, value FROM " + MetricsTable.RAW + " WHERE schedule_id = ? AND time >= ? AND time <= ?");
- findOneHourMetricsByDateRange = session.prepare("SELECT schedule_id, time, type, value FROM " + + findOneHourMetricsByDateRange = storageSession.prepare("SELECT schedule_id, time, type, value FROM " + MetricsTable.ONE_HOUR + " WHERE schedule_id = ? AND time >= ? AND time < ?");
- findSixHourMetricsByDateRange = session.prepare("SELECT schedule_id, time, type, value FROM " + findSixHourMetricsByDateRange = storageSession.prepare("SELECT schedule_id, time, type, value FROM " + MetricsTable.SIX_HOUR + " WHERE schedule_id = ? AND time >= ? AND time < ?");
- findTwentyFourHourMetricsByDateRange = session.prepare("SELECT schedule_id, time, type, value FROM " + + findTwentyFourHourMetricsByDateRange = storageSession.prepare("SELECT schedule_id, time, type, value FROM " + MetricsTable.TWENTY_FOUR_HOUR + " WHERE schedule_id = ? AND time >= ? AND time < ?");
- findIndexEntries = session.prepare("SELECT time, schedule_id FROM " + MetricsTable.INDEX + + findIndexEntries = storageSession.prepare("SELECT time, schedule_id FROM " + MetricsTable.INDEX + " WHERE bucket = ? AND time = ?");
- findTimeSliceForIndex = session.prepare("SELECT time FROM " + MetricsTable.INDEX + + findTimeSliceForIndex = storageSession.prepare("SELECT time FROM " + MetricsTable.INDEX + " WHERE bucket = ? AND time = ?");
- deleteIndexEntries = session.prepare("DELETE FROM " + MetricsTable.INDEX + " WHERE bucket = ? AND time = ?"); + deleteIndexEntries = storageSession.prepare("DELETE FROM " + MetricsTable.INDEX + " WHERE bucket = ? AND time = ?");
long endTime = System.currentTimeMillis(); log.info("Finished initializing prepared statements in " + (endTime - startTime) + " ms"); }
- public ResultSetFuture insertRawData(MeasurementDataNumeric data) { + public StorageResultSetFuture insertRawData(MeasurementDataNumeric data) { BoundStatement statement = insertRawData.bind(data.getScheduleId(), new Date(data.getTimestamp()), data.getValue()); - return session.executeAsync(statement); - } - - public List<MetricResultFuture<MeasurementDataNumeric>> insertRawMetricsAsync(Set<MeasurementDataNumeric> dataSet, - int ttl) { - try { - List<MetricResultFuture<MeasurementDataNumeric>> resultFutures = new ArrayList<MetricResultFuture<MeasurementDataNumeric>>(); - - String cql = "INSERT INTO raw_metrics (schedule_id, time, value) VALUES (?, ?, ?) " + "USING TTL " + ttl; - PreparedStatement statement = session.prepare(cql); - - for (MeasurementDataNumeric data : dataSet) { - BoundStatement boundStatement = statement.bind(data.getScheduleId(), new Date(data.getTimestamp()), - data.getValue()); - - resultFutures.add(new MetricResultFuture<MeasurementDataNumeric>(session.executeAsync(boundStatement), - data)); - } - - return resultFutures; - } catch (NoHostAvailableException e) { - throw new CQLException(e); - } + return storageSession.executeAsync(statement); }
public ResultSet insertOneHourData(int scheduleId, long timestamp, AggregateType type, double value) { BoundStatement statement = insertOneHourData.bind(scheduleId, new Date(timestamp), type.ordinal(), value); - return session.execute(statement); + return storageSession.execute(statement); }
public ResultSet insertSixHourData(int scheduleId, long timestamp, AggregateType type, double value) { BoundStatement statement = insertSixHourData.bind(scheduleId, new Date(timestamp), type.ordinal(), value); - return session.execute(statement); + return storageSession.execute(statement); }
public ResultSet insertTwentyFourHourData(int scheduleId, long timestamp, AggregateType type, double value) { BoundStatement statement = insertTwentyFourHourData.bind(scheduleId, new Date(timestamp), type.ordinal(), value); - return session.execute(statement); - } - - public List<MetricResultFuture<AggregateNumericMetric>> insertAggregatesAsync(MetricsTable table, - List<AggregateNumericMetric> metrics, int ttl) { - List<MetricResultFuture<AggregateNumericMetric>> updates = new ArrayList<MetricResultFuture<AggregateNumericMetric>>(); - - if (metrics.isEmpty()) { - return updates; - } - - try { - Statement statement = null; - - for (AggregateNumericMetric metric : metrics) { - statement = insertInto(table.getTableName()) - .value("schedule_id", metric.getScheduleId()) - .value("time", new Date(metric.getTimestamp())) - .value("type", AggregateType.MIN.ordinal()) - .value("value", metric.getMin()); - updates.add(new MetricResultFuture<AggregateNumericMetric>(session.executeAsync(statement), metric)); - - statement = insertInto(table.getTableName()) - .value("schedule_id", metric.getScheduleId()) - .value("time", new Date(metric.getTimestamp())) - .value("type", AggregateType.MAX.ordinal()) - .value("value", metric.getMax()); - updates.add(new MetricResultFuture<AggregateNumericMetric>(session.executeAsync(statement), metric)); - - statement = insertInto(table.getTableName()) - .value("schedule_id", metric.getScheduleId()) - .value("time", new Date(metric.getTimestamp())) - .value("type", AggregateType.AVG.ordinal()) - .value("value", metric.getAvg()); - updates.add(new MetricResultFuture<AggregateNumericMetric>(session.executeAsync(statement), metric)); - } - - return updates; - } catch (Exception e) { - throw new CQLException(e); - } + return storageSession.execute(statement); }
public Iterable<RawNumericMetric> findRawMetrics(int scheduleId, long startTime, long endTime) { try { BoundStatement boundStatement = rawMetricsQuery.bind(scheduleId, new Date(startTime), new Date(endTime)); - return new SimplePagedResult<RawNumericMetric>(boundStatement, new RawNumericMetricMapper(false), session); + return new SimplePagedResult<RawNumericMetric>(boundStatement, new RawNumericMetricMapper(false), + storageSession); } catch (NoHostAvailableException e) { throw new CQLException(e); } }
- public ResultSetFuture findRawMetricsAsync(int scheduleId, long startTime, long endTime) { + public StorageResultSetFuture findRawMetricsAsync(int scheduleId, long startTime, long endTime) { BoundStatement boundStatement = rawMetricsQuery.bind(scheduleId, new Date(startTime), new Date(endTime)); - return session.executeAsync(boundStatement); + return storageSession.executeAsync(boundStatement); }
public RawNumericMetric findLatestRawMetric(int scheduleId) { RawNumericMetricMapper mapper = new RawNumericMetricMapper(false); BoundStatement boundStatement = findLatestRawMetric.bind(scheduleId); - ResultSet resultSet = session.execute(boundStatement); + ResultSet resultSet = storageSession.execute(boundStatement);
return mapper.mapOne(resultSet); }
public Iterable<RawNumericMetric> findRawMetrics(List<Integer> scheduleIds, long startTime, long endTime) { return new ListPagedResult<RawNumericMetric>(findRawMetrics, scheduleIds, startTime, endTime, - new RawNumericMetricMapper(), session); + new RawNumericMetricMapper(), storageSession); }
public Iterable<AggregateNumericMetric> findOneHourMetrics(int scheduleId, long startTime, long endTime) { BoundStatement statement = findOneHourMetricsByDateRange.bind(scheduleId, new Date(startTime), new Date(endTime)); - return new SimplePagedResult<AggregateNumericMetric>(statement, new AggregateNumericMetricMapper(), session); + return new SimplePagedResult<AggregateNumericMetric>(statement, new AggregateNumericMetricMapper(), + storageSession); }
public Iterable<AggregateNumericMetric> findSixHourMetrics(int scheduleId, long startTime, long endTime) { BoundStatement statement = findSixHourMetricsByDateRange.bind(scheduleId, new Date(startTime), new Date(endTime)); - return new SimplePagedResult<AggregateNumericMetric>(statement, new AggregateNumericMetricMapper(), session); + return new SimplePagedResult<AggregateNumericMetric>(statement, new AggregateNumericMetricMapper(), + storageSession); }
public Iterable<AggregateNumericMetric> findTwentyFourHourMetrics(int scheduleId, long startTime, long endTime) { BoundStatement statement = findTwentyFourHourMetricsByDateRange.bind(scheduleId, new Date(startTime), new Date(endTime)); - return new SimplePagedResult<AggregateNumericMetric>(statement, new AggregateNumericMetricMapper(), session); + return new SimplePagedResult<AggregateNumericMetric>(statement, new AggregateNumericMetricMapper(), + storageSession); }
public Iterable<AggregateSimpleNumericMetric> findAggregatedSimpleOneHourMetric(int scheduleId, long startTime, @@ -281,52 +216,52 @@ public class MetricsDAO { BoundStatement statement = findOneHourMetricsByDateRange.bind(scheduleId, new Date(startTime), new Date(endTime)); return new SimplePagedResult<AggregateSimpleNumericMetric>(statement, new AggregateSimpleNumericMetricMapper(), - session); + storageSession); }
public Iterable<AggregateNumericMetric> findOneHourMetrics(List<Integer> scheduleIds, long startTime, long endTime) { return new ListPagedResult<AggregateNumericMetric>(findOneHourMetricsByDateRange, scheduleIds, startTime, endTime, - new AggregateNumericMetricMapper(), session); + new AggregateNumericMetricMapper(), storageSession); }
public Iterable<AggregateNumericMetric> findSixHourMetrics(List<Integer> scheduleIds, long startTime, long endTime) { return new ListPagedResult<AggregateNumericMetric>(findSixHourMetricsByDateRange, scheduleIds, startTime, endTime, - new AggregateNumericMetricMapper(), session); + new AggregateNumericMetricMapper(), storageSession); }
public Iterable<AggregateNumericMetric> findTwentyFourHourMetrics(List<Integer> scheduleIds, long startTime, long endTime) { return new ListPagedResult<AggregateNumericMetric>(findTwentyFourHourMetricsByDateRange, scheduleIds, startTime, endTime, - new AggregateNumericMetricMapper(), session); + new AggregateNumericMetricMapper(), storageSession); }
public Iterable<MetricsIndexEntry> findMetricsIndexEntries(final MetricsTable table, long timestamp) { BoundStatement statement = findIndexEntries.bind(table.toString(), new Date(timestamp)); - return new SimplePagedResult<MetricsIndexEntry>(statement, new MetricsIndexEntryMapper(table), session); + return new SimplePagedResult<MetricsIndexEntry>(statement, new MetricsIndexEntryMapper(table), storageSession); }
public ResultSet setFindTimeSliceForIndex(MetricsTable table, long timestamp) { BoundStatement statement = findTimeSliceForIndex.bind(table.toString(), new Date(timestamp)); - return session.execute(statement); + return storageSession.execute(statement); }
public void updateMetricsIndex(MetricsTable table, Map<Integer, Long> updates) { for (Integer scheduleId : updates.keySet()) { BoundStatement statement = updateMetricsIndex.bind(table.getTableName(), new Date(updates.get(scheduleId)), scheduleId); - session.execute(statement); + storageSession.execute(statement); } }
- public ResultSetFuture updateMetricsIndex(MetricsTable table, int scheduleId, long timestamp) { + public StorageResultSetFuture updateMetricsIndex(MetricsTable table, int scheduleId, long timestamp) { BoundStatement statement = updateMetricsIndex.bind(table.getTableName(), new Date(timestamp), scheduleId); - return session.executeAsync(statement); + return storageSession.executeAsync(statement); }
public void deleteMetricsIndexEntries(MetricsTable table, long timestamp) { BoundStatement statement = deleteIndexEntries.bind(table.getTableName(), new Date(timestamp)); - session.execute(statement); + storageSession.execute(statement); } } diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsServer.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsServer.java index 7395953..9756006 100644 --- a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsServer.java +++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsServer.java @@ -35,7 +35,6 @@ import java.util.concurrent.Semaphore; import java.util.concurrent.atomic.AtomicInteger;
import com.datastax.driver.core.ResultSet; -import com.datastax.driver.core.ResultSetFuture; import com.datastax.driver.core.Row; import com.google.common.util.concurrent.FutureCallback; import com.google.common.util.concurrent.Futures; @@ -299,7 +298,7 @@ public class MetricsServer {
for (final MeasurementDataNumeric data : dataSet) { semaphore.acquire(); - ResultSetFuture resultSetFuture = dao.insertRawData(data); + StorageResultSetFuture resultSetFuture = dao.insertRawData(data); Futures.addCallback(resultSetFuture, new FutureCallback<ResultSet>() { @Override public void onSuccess(ResultSet rows) { @@ -325,7 +324,7 @@ public class MetricsServer {
long timeSlice = dateTimeService.getTimeSlice(new DateTime(rawData.getTimestamp()), configuration.getRawTimeSliceDuration()).getMillis(); - ResultSetFuture resultSetFuture = dao.updateMetricsIndex(MetricsTable.ONE_HOUR, rawData.getScheduleId(), + StorageResultSetFuture resultSetFuture = dao.updateMetricsIndex(MetricsTable.ONE_HOUR, rawData.getScheduleId(), timeSlice); Futures.addCallback(resultSetFuture, new FutureCallback<ResultSet>() { @Override diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/StorageResultSetFuture.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/StorageResultSetFuture.java new file mode 100644 index 0000000..902bf4a --- /dev/null +++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/StorageResultSetFuture.java @@ -0,0 +1,76 @@ +package org.rhq.server.metrics; + +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import com.datastax.driver.core.ResultSet; +import com.datastax.driver.core.ResultSetFuture; +import com.datastax.driver.core.exceptions.NoHostAvailableException; +import com.google.common.util.concurrent.ListenableFuture; + +/** + * @author John Sanda + */ +public class StorageResultSetFuture implements ListenableFuture<ResultSet> { + + private ResultSetFuture wrapperFuture; + + private List<StorageStateListener> listeners; + + public StorageResultSetFuture(ResultSetFuture resultSetFuture, List<StorageStateListener> listeners) { + wrapperFuture = resultSetFuture; + this.listeners = listeners; + } + + @Override + public void addListener(Runnable listener, Executor executor) { + wrapperFuture.addListener(listener, executor); + } + + @Override + public boolean cancel(boolean mayInterruptIfRunning) { + return wrapperFuture.cancel(mayInterruptIfRunning); + } + + @Override + public boolean isCancelled() { + return wrapperFuture.isCancelled(); + } + + @Override + public boolean isDone() { + return wrapperFuture.isDone(); + } + + @Override + public ResultSet get() throws InterruptedException, ExecutionException { + try { + return wrapperFuture.get(); + } catch (ExecutionException e) { + return handleException(e); + } + } + + @Override + public ResultSet get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, + TimeoutException { + try { + return wrapperFuture.get(timeout, unit); + } catch (ExecutionException e) { + return handleException(e); + } + } + + private ResultSet handleException(ExecutionException e) throws ExecutionException { + if (e.getCause() instanceof NoHostAvailableException) { + NoHostAvailableException cause = (NoHostAvailableException) e.getCause(); + for (StorageStateListener listener : listeners) { + listener.onStorageClusterDown(cause); + } + } + throw e; + } +} diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/StorageSession.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/StorageSession.java new file mode 100644 index 0000000..3f7af3f --- /dev/null +++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/StorageSession.java @@ -0,0 +1,110 @@ +package org.rhq.server.metrics; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.Host; +import com.datastax.driver.core.PreparedStatement; +import com.datastax.driver.core.Query; +import com.datastax.driver.core.ResultSet; +import com.datastax.driver.core.ResultSetFuture; +import com.datastax.driver.core.Session; +import com.datastax.driver.core.exceptions.NoHostAvailableException; + +/** + * @author John Sanda + */ +public class StorageSession implements Host.StateListener { + + private Session wrappedSession; + + private List<StorageStateListener> listeners = new ArrayList<StorageStateListener>(); + + public StorageSession(Session wrappedSession) { + this.wrappedSession = wrappedSession; + this.wrappedSession.getCluster().register(this); + } + + public void addStorageStateListener(StorageStateListener listener) { + listeners.add(listener); + } + + public ResultSet execute(String query) { + try { + return wrappedSession.execute(query); + } catch (NoHostAvailableException e) { + return handleException(e); + } + } + + public ResultSet execute(Query query) { + try { + return wrappedSession.execute(query); + } catch (NoHostAvailableException e) { + return handleException(e); + } + } + + public StorageResultSetFuture executeAsync(String query) { + ResultSetFuture future = wrappedSession.executeAsync(query); + return new StorageResultSetFuture(future, listeners); + } + + public StorageResultSetFuture executeAsync(Query query) { + ResultSetFuture future = wrappedSession.executeAsync(query); + return new StorageResultSetFuture(future, listeners); + } + + public PreparedStatement prepare(String query) { + return wrappedSession.prepare(query); + } + + public void shutdown() { + wrappedSession.shutdown(); + } + + public boolean shutdown(long timeout, TimeUnit unit) { + return wrappedSession.shutdown(timeout, unit); + } + + public Cluster getCluster() { + return wrappedSession.getCluster(); + } + + @Override + public void onAdd(Host host) { + for (StorageStateListener listener : listeners) { + listener.onStorageNodeUp(host.getAddress()); + } + } + + @Override + public void onUp(Host host) { + for (StorageStateListener listener : listeners) { + listener.onStorageNodeUp(host.getAddress()); + } + } + + @Override + public void onDown(Host host) { + for (StorageStateListener listener : listeners) { + listener.onStorageNodeDown(host.getAddress()); + } + } + + @Override + public void onRemove(Host host) { + for (StorageStateListener listener : listeners) { + listener.onStorageNodeRemoved(host.getAddress()); + } + } + + private ResultSet handleException(NoHostAvailableException e) { + for (StorageStateListener listener : listeners) { + listener.onStorageClusterDown(e); + } + throw e; + } +} diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/StorageStateListener.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/StorageStateListener.java new file mode 100644 index 0000000..6a3a216 --- /dev/null +++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/StorageStateListener.java @@ -0,0 +1,20 @@ +package org.rhq.server.metrics; + +import java.net.InetAddress; + +import com.datastax.driver.core.exceptions.NoHostAvailableException; + +/** + * @author John Sanda + */ +public interface StorageStateListener { + + void onStorageNodeUp(InetAddress address); + + void onStorageNodeDown(InetAddress address); + + void onStorageNodeRemoved(InetAddress address); + + void onStorageClusterDown(NoHostAvailableException e); + +} diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/domain/ListPagedResult.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/domain/ListPagedResult.java index 1c1fdd2..f3abd20 100644 --- a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/domain/ListPagedResult.java +++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/domain/ListPagedResult.java @@ -33,10 +33,10 @@ import java.util.List; import com.datastax.driver.core.BoundStatement; import com.datastax.driver.core.PreparedStatement; import com.datastax.driver.core.ResultSet; -import com.datastax.driver.core.Session; import com.datastax.driver.core.exceptions.NoHostAvailableException;
import org.rhq.server.metrics.CQLException; +import org.rhq.server.metrics.StorageSession;
/** * This class helps paginate Cassandra results that have a list in the matching clause. Instead of running @@ -52,12 +52,12 @@ public class ListPagedResult<T> implements Iterable<T> { private final long startTime; private final long endTime; private final ResultSetMapper<T> mapper; - private final Session session; + private final StorageSession session;
private final PreparedStatement preparedStatement;
public ListPagedResult(PreparedStatement preparedStatement, List<Integer> scheduleIds, long startTime, long endTime, - ResultSetMapper<T> mapper, Session session) { + ResultSetMapper<T> mapper, StorageSession session) { this.preparedStatement = preparedStatement; this.scheduleIds = new LinkedList<Integer>(scheduleIds); this.startTime = startTime; diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/domain/SimplePagedResult.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/domain/SimplePagedResult.java index d14195c..e31db95 100644 --- a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/domain/SimplePagedResult.java +++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/domain/SimplePagedResult.java @@ -34,6 +34,7 @@ import com.datastax.driver.core.SimpleStatement; import com.datastax.driver.core.exceptions.NoHostAvailableException;
import org.rhq.server.metrics.CQLException; +import org.rhq.server.metrics.StorageSession;
/** * This class is just a placeholder for future pagination implementations once Cassandra gets native support for paging results. @@ -51,7 +52,7 @@ public class SimplePagedResult<T> implements Iterable<T> {
private final ResultSetMapper<T> mapper; private final Query query; - private final Session session; + private final StorageSession session; private final int pageSize;
/** @@ -60,7 +61,7 @@ public class SimplePagedResult<T> implements Iterable<T> { * @param session Cassandra session * @param pageSize page size */ - public SimplePagedResult(Query query, ResultSetMapper<T> mapper, Session session, int pageSize) { + public SimplePagedResult(Query query, ResultSetMapper<T> mapper, StorageSession session, int pageSize) { this.query = query; this.mapper = mapper; this.session = session; @@ -73,7 +74,7 @@ public class SimplePagedResult<T> implements Iterable<T> { * @param session Cassandra session * @param pageSize page size */ - public SimplePagedResult(String query, ResultSetMapper<T> mapper, Session session, int pageSize) { + public SimplePagedResult(String query, ResultSetMapper<T> mapper, StorageSession session, int pageSize) { this(new SimpleStatement(query), mapper, session, pageSize); }
@@ -82,7 +83,7 @@ public class SimplePagedResult<T> implements Iterable<T> { * @param mapper result set mapper * @param session Cassandra session */ - public SimplePagedResult(Query query, ResultSetMapper<T> mapper, Session session) { + public SimplePagedResult(Query query, ResultSetMapper<T> mapper, StorageSession session) { this(query, mapper, session, DEFAULT_PAGE_SIZE); }
@@ -92,7 +93,7 @@ public class SimplePagedResult<T> implements Iterable<T> { * @param session Cassandra session * @param pageSize page size */ - public SimplePagedResult(String query, ResultSetMapper<T> mapper, Session session) { + public SimplePagedResult(String query, ResultSetMapper<T> mapper, StorageSession session) { this(new SimpleStatement(query), mapper, session); }
diff --git a/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/CassandraIntegrationTest.java b/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/CassandraIntegrationTest.java index c4c48c8..b227b2d 100644 --- a/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/CassandraIntegrationTest.java +++ b/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/CassandraIntegrationTest.java @@ -33,6 +33,7 @@ import java.util.concurrent.CountDownLatch;
import com.datastax.driver.core.BoundStatement; import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.Host; import com.datastax.driver.core.PreparedStatement; import com.datastax.driver.core.ResultSet; import com.datastax.driver.core.Session; @@ -65,8 +66,12 @@ public class CassandraIntegrationTest {
protected static Session session;
+ protected static StorageSession storageSession; + private static DateTimeService dateTimeService;
+ private final Log log = LogFactory.getLog(CassandraIntegrationTest.class); + @BeforeSuite @DeployCluster(numNodes = 2, username = "rhqadmin", password = "rhqadmin", waitForSchemaAgreement = true) public void deployCluster() throws Exception { @@ -76,7 +81,31 @@ public class CassandraIntegrationTest { .addContactPoints("127.0.0.1", "127.0.02") .withCredentials("rhqadmin", "rhqadmin") .build(); + + cluster.register(new Host.StateListener() { + @Override + public void onAdd(Host host) { + log.info("host " + host + " added"); + } + + @Override + public void onUp(Host host) { + log.info("host " + host + " up"); + } + + @Override + public void onDown(Host host) { + log.info("host " + host + " down"); + } + + @Override + public void onRemove(Host host) { + log.info("host " + host + " removed"); + } + }); + session = cluster.connect("rhq"); + storageSession = new StorageSession(session); }
@AfterSuite(alwaysRun = true) @@ -98,7 +127,7 @@ public class CassandraIntegrationTest { BoundStatement boundStatement = statement.bind(scheduleId);
return new SimplePagedResult<AggregateNumericMetric>(boundStatement, new AggregateNumericMetricMapper(), - session); + storageSession); }
protected Iterable<AggregateNumericMetric> findAggregateMetricsWithMetadata(MetricsTable table, int scheduleId, @@ -109,11 +138,11 @@ public class CassandraIntegrationTest { "SELECT schedule_id, time, type, value, ttl(value), writetime(value) " + "FROM " + table + " " + "WHERE schedule_id = ? AND time >= ? AND time < ?"; - PreparedStatement statement = session.prepare(cql); + PreparedStatement statement = storageSession.prepare(cql); BoundStatement boundStatement = statement.bind(scheduleId, new Date(startTime), new Date(endTime));
return new SimplePagedResult<AggregateNumericMetric>(boundStatement, new AggregateNumericMetricMapper(true), - session); + storageSession); } catch (NoHostAvailableException e) { throw new CQLException(e); } diff --git a/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/ClusterMonitorTest.java b/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/ClusterMonitorTest.java new file mode 100644 index 0000000..871b7c5 --- /dev/null +++ b/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/ClusterMonitorTest.java @@ -0,0 +1,92 @@ +package org.rhq.server.metrics; + +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.ConsistencyLevel; +import com.datastax.driver.core.Host; +import com.datastax.driver.core.ResultSet; +import com.datastax.driver.core.Session; +import com.datastax.driver.core.querybuilder.QueryBuilder; +import com.google.common.util.concurrent.FutureCallback; +import com.google.common.util.concurrent.Futures; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import org.rhq.cassandra.util.ClusterBuilder; +import org.rhq.core.domain.measurement.MeasurementDataNumeric; + +/** + * @author John Sanda + */ +public class ClusterMonitorTest { + + private final Log log = LogFactory.getLog(ClusterMonitorTest.class); + +// @Test + public void monitorCluster() { + Cluster cluster = new ClusterBuilder() + .addContactPoints("127.0.0.1") +// .withCredentials("cassandra", "cassandra") + .withCredentials("rhqadmin", "rhqadmin") + .build(); + + cluster.register(new Host.StateListener() { + @Override + public void onAdd(Host host) { + log.info("host " + host + " ADDED"); + } + + @Override + public void onUp(Host host) { + log.info("host " + host + " UP"); + } + + @Override + public void onDown(Host host) { + log.info("host " + host + " DOWN"); + } + + @Override + public void onRemove(Host host) { + log.info("host " + host + " REMOVED"); + } + }); + + Session session = cluster.connect("rhq"); + StorageSession storageSession = new StorageSession(session); + + MetricsDAO dao = new MetricsDAO(storageSession, new MetricsConfiguration()); + + while (true) { + try { + Thread.sleep(10000); + try { +// session.execute("select * from system.schema_keyspaces"); + com.datastax.driver.core.Query query = QueryBuilder.select().from("rhq", "raw_metrics").setConsistencyLevel( + ConsistencyLevel.ALL); +// session.execute(query); +// session.execute("select * from rhq.raw_metrics"); +// log.info("query succeeded"); + StorageResultSetFuture future = dao.insertRawData + (new MeasurementDataNumeric(System.currentTimeMillis(), 123, 1.1)); + + Futures.addCallback(future, new FutureCallback<ResultSet>() { + @Override + public void onSuccess(ResultSet rows) { + log.info("insert succeeded"); + } + + @Override + public void onFailure(Throwable throwable) { + log.error("insert failed", throwable); + } + }); + } catch (Exception e) { + log.error("query failed", e); + } + } catch (InterruptedException e) { + } + } + } + +} diff --git a/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsBaselineCalculatorTest.java b/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsBaselineCalculatorTest.java index ab4bd03..2b58222 100644 --- a/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsBaselineCalculatorTest.java +++ b/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsBaselineCalculatorTest.java @@ -35,8 +35,6 @@ import java.util.Arrays; import java.util.List; import java.util.Random;
-import com.datastax.driver.core.Session; - import org.powermock.api.mockito.PowerMockito; import org.powermock.core.classloader.annotations.PrepareForTest; import org.powermock.modules.testng.PowerMockObjectFactory; @@ -71,9 +69,9 @@ public class MetricsBaselineCalculatorTest {
//tell the method story as it happens: mock dependencies and configure //those dependencies to get the method under test to completion. - Session mockSession = mock(Session.class); + StorageSession mockSession = mock(StorageSession.class); MetricsDAO mockMetricsDAO = mock(MetricsDAO.class); - PowerMockito.whenNew(MetricsDAO.class).withParameterTypes(Session.class, MetricsConfiguration.class) + PowerMockito.whenNew(MetricsDAO.class).withParameterTypes(StorageSession.class, MetricsConfiguration.class) .withArguments(eq(mockSession), eq(metricsConfiguration)).thenReturn(mockMetricsDAO);
when(mockMetricsDAO.findAggregatedSimpleOneHourMetric(eq(1), eq(0), @@ -147,9 +145,9 @@ public class MetricsBaselineCalculatorTest {
//tell the method story as it happens: mock dependencies and configure //those dependencies to get the method under test to completion. - Session mockSession = mock(Session.class); + StorageSession mockSession = mock(StorageSession.class); MetricsDAO mockMetricsDAO = mock(MetricsDAO.class); - PowerMockito.whenNew(MetricsDAO.class).withParameterTypes(Session.class, MetricsConfiguration.class) + PowerMockito.whenNew(MetricsDAO.class).withParameterTypes(StorageSession.class, MetricsConfiguration.class) .withArguments(eq(mockSession), eq(metricsConfiguration)).thenReturn(mockMetricsDAO);
when( @@ -214,9 +212,9 @@ public class MetricsBaselineCalculatorTest {
//tell the method story as it happens: mock dependencies and configure //those dependencies to get the method under test to completion. - Session mockSession = mock(Session.class); + StorageSession mockSession = mock(StorageSession.class); MetricsDAO mockMetricsDAO = mock(MetricsDAO.class); - PowerMockito.whenNew(MetricsDAO.class).withParameterTypes(Session.class, MetricsConfiguration.class) + PowerMockito.whenNew(MetricsDAO.class).withParameterTypes(StorageSession.class, MetricsConfiguration.class) .withArguments(eq(mockSession), eq(metricsConfiguration)).thenReturn(mockMetricsDAO);
when( diff --git a/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsDAOTest.java b/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsDAOTest.java index ee65292..b06dfda 100644 --- a/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsDAOTest.java +++ b/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsDAOTest.java @@ -38,7 +38,6 @@ import java.util.Map; import java.util.Random; import java.util.Set;
-import com.datastax.driver.core.ResultSetFuture; import com.google.common.collect.Lists; import com.google.common.util.concurrent.Futures;
@@ -78,7 +77,7 @@ public class MetricsDAOTest extends CassandraIntegrationTest {
@BeforeClass public void initDAO() throws Exception { - dao = new MetricsDAO(session, new MetricsConfiguration()); + dao = new MetricsDAO(storageSession, new MetricsConfiguration()); }
@BeforeMethod @@ -101,7 +100,7 @@ public class MetricsDAOTest extends CassandraIntegrationTest {
WaitForWrite waitForResults = new WaitForWrite(1);
- ResultSetFuture resultSetFuture = dao.insertRawData(expected); + StorageResultSetFuture resultSetFuture = dao.insertRawData(expected); Futures.addCallback(resultSetFuture, waitForResults); waitForResults.await("Failed to insert raw data");
@@ -131,7 +130,7 @@ public class MetricsDAOTest extends CassandraIntegrationTest { WaitForWrite waitForWrite = new WaitForWrite(data.size());
for (MeasurementDataNumeric raw : data) { - ResultSetFuture resultSetFuture = dao.insertRawData(raw); + StorageResultSetFuture resultSetFuture = dao.insertRawData(raw); Futures.addCallback(resultSetFuture, waitForWrite); } waitForWrite.await("Failed to insert raw data"); @@ -160,14 +159,14 @@ public class MetricsDAOTest extends CassandraIntegrationTest { WaitForWrite waitForWrite = new WaitForWrite(data.size());
for (MeasurementDataNumeric raw : data) { - ResultSetFuture resultSetFuture = dao.insertRawData(raw); + StorageResultSetFuture resultSetFuture = dao.insertRawData(raw); Futures.addCallback(resultSetFuture, waitForWrite); } waitForWrite.await("Failed to insert raw data");
RawNumericMetricMapper mapper = new RawNumericMetricMapper(); WaitForRead<RawNumericMetric> waitForRead = new WaitForRead<RawNumericMetric>(mapper); - ResultSetFuture resultSetFuture = dao.findRawMetricsAsync(scheduleId, + StorageResultSetFuture resultSetFuture = dao.findRawMetricsAsync(scheduleId, threeMinutesAgo.minusSeconds(5).getMillis(), oneMinuteAgo.plusSeconds(5).getMillis()); Futures.addCallback(resultSetFuture, waitForRead);
@@ -200,7 +199,7 @@ public class MetricsDAOTest extends CassandraIntegrationTest { WaitForWrite waitForWrite = new WaitForWrite(data.size());
for (MeasurementDataNumeric raw : data) { - ResultSetFuture resultSetFuture = dao.insertRawData(raw); + StorageResultSetFuture resultSetFuture = dao.insertRawData(raw); Futures.addCallback(resultSetFuture, waitForWrite); } waitForWrite.await("Failed to insert raw data"); @@ -321,9 +320,9 @@ public class MetricsDAOTest extends CassandraIntegrationTest {
WaitForWrite waitForWrite = new WaitForWrite(2);
- ResultSetFuture resultSetFuture1 = dao.updateMetricsIndex(MetricsTable.TWENTY_FOUR_HOUR, scheduleId1, + StorageResultSetFuture resultSetFuture1 = dao.updateMetricsIndex(MetricsTable.TWENTY_FOUR_HOUR, scheduleId1, hour0().getMillis()); - ResultSetFuture resultSetFuture2 = dao.updateMetricsIndex(MetricsTable.TWENTY_FOUR_HOUR, scheduleId2, + StorageResultSetFuture resultSetFuture2 = dao.updateMetricsIndex(MetricsTable.TWENTY_FOUR_HOUR, scheduleId2, hour0().getMillis());
Futures.addCallback(resultSetFuture1, waitForWrite); diff --git a/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsServerTest.java b/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsServerTest.java index 1ec4771..b4de4e0 100644 --- a/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsServerTest.java +++ b/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsServerTest.java @@ -43,7 +43,6 @@ import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicInteger;
-import com.datastax.driver.core.ResultSetFuture; import com.google.common.base.Throwables; import com.google.common.collect.Lists; import com.google.common.util.concurrent.Futures; @@ -113,7 +112,7 @@ public class MetricsServerTest extends CassandraIntegrationTest { dateTimeService.setConfiguration(configuration); metricsServer.setDateTimeService(dateTimeService);
- dao = new MetricsDAO(session, configuration); + dao = new MetricsDAO(storageSession, configuration); metricsServer.setDAO(dao);
purgeDB(); @@ -230,7 +229,7 @@ public class MetricsServerTest extends CassandraIntegrationTest { WaitForWrite waitForRawInserts = new WaitForWrite(rawMetrics.size());
for (MeasurementDataNumeric raw : rawMetrics) { - ResultSetFuture resultSetFuture = dao.insertRawData(raw); + StorageResultSetFuture resultSetFuture = dao.insertRawData(raw); Futures.addCallback(resultSetFuture, waitForRawInserts); } waitForRawInserts.await("Failed to insert raw data"); @@ -1008,7 +1007,7 @@ public class MetricsServerTest extends CassandraIntegrationTest { "SELECT schedule_id, time, value, ttl(value), writetime(value) " + "FROM " + MetricsTable.RAW + " " + "WHERE schedule_id = " + scheduleId + " AND time >= " + startTime + " AND time < " + endTime; - return new SimplePagedResult<RawNumericMetric>(cql, new RawNumericMetricMapper(true), session); + return new SimplePagedResult<RawNumericMetric>(cql, new RawNumericMetricMapper(true), storageSession); }
private static class WaitForRawInserts implements RawDataInsertedCallback { diff --git a/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/TimeoutTest.java b/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/TimeoutTest.java index e2b23ec..dabd110 100644 --- a/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/TimeoutTest.java +++ b/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/TimeoutTest.java @@ -35,7 +35,6 @@ import com.google.common.base.Throwables;
import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.testng.annotations.Test;
import org.rhq.core.domain.measurement.MeasurementDataNumeric;
@@ -43,7 +42,7 @@ public class TimeoutTest extends CassandraIntegrationTest {
private final Log log = LogFactory.getLog(TimeoutTest.class);
- @Test +// @Test public void generateTimeout() throws Exception { MetricsConfiguration configuration = new MetricsConfiguration();
@@ -54,7 +53,7 @@ public class TimeoutTest extends CassandraIntegrationTest { dateTimeService.setConfiguration(configuration); metricsServer.setDateTimeService(dateTimeService);
- MetricsDAO dao = new MetricsDAO(session, configuration); + MetricsDAO dao = new MetricsDAO(new StorageSession(session), configuration); metricsServer.setDAO(dao);
long time = hour0().getMillis(); diff --git a/modules/enterprise/server/server-metrics/src/test/resources/log4j.properties b/modules/enterprise/server/server-metrics/src/test/resources/log4j.properties index 67db049..59bd5db 100644 --- a/modules/enterprise/server/server-metrics/src/test/resources/log4j.properties +++ b/modules/enterprise/server/server-metrics/src/test/resources/log4j.properties @@ -39,4 +39,4 @@ log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout log4j.appender.CONSOLE.layout.ConversionPattern=%d{ABSOLUTE} %-5p %c %m%n
log4j.logger.org.rhq=DEBUG -log4j.logger.com.datastax=DEBUG +log4j.logger.com.datastax=WARN diff --git a/modules/helpers/metrics-simulator/src/main/java/org/rhq/metrics/simulator/Simulator.java b/modules/helpers/metrics-simulator/src/main/java/org/rhq/metrics/simulator/Simulator.java index 7ff8f6c..40e00bf 100644 --- a/modules/helpers/metrics-simulator/src/main/java/org/rhq/metrics/simulator/Simulator.java +++ b/modules/helpers/metrics-simulator/src/main/java/org/rhq/metrics/simulator/Simulator.java @@ -60,6 +60,7 @@ import org.rhq.metrics.simulator.stats.Stats; import org.rhq.server.metrics.DateTimeService; import org.rhq.server.metrics.MetricsDAO; import org.rhq.server.metrics.MetricsServer; +import org.rhq.server.metrics.StorageSession;
/** * @author John Sanda @@ -96,7 +97,9 @@ public class Simulator implements ShutdownManager { session = createSession(nodes, compression); }
- MetricsDAO metricsDAO = new MetricsDAO(session, plan.getMetricsServerConfiguration()); + StorageSession storageSession = new StorageSession(session); + + MetricsDAO metricsDAO = new MetricsDAO(storageSession, plan.getMetricsServerConfiguration()); MetricsServer metricsServer = new MetricsServer(); metricsServer.setDAO(metricsDAO); metricsServer.setConfiguration(plan.getMetricsServerConfiguration());
commit 638017e6ee0a4f752bafb6352f6d63cc17d5f5ad Author: John Sanda jsanda@redhat.com Date: Thu Jul 25 22:06:31 2013 -0400
upgrade to version 1.0.2 of datastax driver
diff --git a/pom.xml b/pom.xml index f909033..3f2adbd 100644 --- a/pom.xml +++ b/pom.xml @@ -178,7 +178,7 @@ <!-- cassandra dependency versions --> <cassandra.version>1.2.4</cassandra.version> <cassandra.thrift.version>0.7.0</cassandra.thrift.version> - <cassandra.driver.version>1.0.0-rhq-1.2.4</cassandra.driver.version> + <cassandra.driver.version>1.0.2-rhq-1.2.4</cassandra.driver.version> <cassandra.driver.netty.version>3.6.3.Final</cassandra.driver.netty.version> <cassandra.snappy.version>1.0.4.1-rhq-p1</cassandra.snappy.version> <cassandra.snakeyaml.version>1.6</cassandra.snakeyaml.version>
commit 5112e60ff848296ecce96d5ae7e363dbea6c0e12 Author: Stefan Negrea snegrea@redhat.com Date: Thu Jul 25 17:52:00 2013 -0500
Operations are now created in a new transaction. Also, change the way the configuration is updated.
The steps for a full configuration update: 1) Invoke config update operation on the plugin 2) Update the JMX port on the storage node entity 3) Restart the storage node server 4) Update the connection settings for the storage node if JMX port changed
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index 7a7eda4..7d861f1 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -35,6 +35,8 @@ import java.util.Queue;
import javax.ejb.EJB; import javax.ejb.Stateless; +import javax.ejb.TransactionAttribute; +import javax.ejb.TransactionAttributeType; import javax.persistence.EntityManager; import javax.persistence.PersistenceContext; import javax.persistence.TypedQuery; @@ -134,6 +136,9 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN @EJB private ConfigurationManagerLocal configurationManager;
+ @EJB + private StorageNodeManagerLocal storageNodeManger; + @Override public void linkResource(Resource resource) { List<StorageNode> storageNodes = this.getStorageNodes(); @@ -658,31 +663,49 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN parameters);
if (result) { + //2. Update the JMX port + //this is a fast operation compared to the restart + storageNode.setJmxPort(storageNodeConfiguration.getJmxPort()); + entityManager.merge(storageNode); + + //3. Restart the storage node + result = runOperationAndWaitForResult(subject, storageNodeResource, RESTART_OPERATION, + new Configuration()); + + //4. Update the plugin configuration to talk with the new server + //Up to this point communication with the storage node should not have been affected by the intermediate + //changes Configuration storageNodePluginConfig = configurationManager.getPluginConfiguration(subject, storageNodeResource.getId());
String existingJMXPort = storageNodePluginConfig.getSimpleValue("jmxPort"); - storageNodePluginConfig.setSimpleValue("jmxPort", storageNodeConfiguration.getJmxPort() + ""); + String newJMXPort = storageNodeConfiguration.getJmxPort() + "";
- String existingConnectionURL = storageNodePluginConfig.getSimpleValue("connectorAddress"); - String newConnectionURL = existingConnectionURL.replace(":" + existingJMXPort + "/", ":" - + storageNodeConfiguration.getJmxPort() + "/"); - storageNodePluginConfig.setSimpleValue("connectorAddress", newConnectionURL); + if (!existingJMXPort.equals(newJMXPort)) { + storageNodePluginConfig.setSimpleValue("jmxPort", newJMXPort);
- configurationManager.updatePluginConfiguration(subject, storageNodeResource.getId(), - storageNodePluginConfig); + String existingConnectionURL = storageNodePluginConfig.getSimpleValue("connectorAddress"); + String newConnectionURL = existingConnectionURL.replace(":" + existingJMXPort + "/", ":" + + storageNodeConfiguration.getJmxPort() + "/"); + storageNodePluginConfig.setSimpleValue("connectorAddress", newConnectionURL);
- storageNode.setJmxPort(storageNodeConfiguration.getJmxPort()); - entityManager.merge(storageNode); - entityManager.flush(); + configurationManager.updatePluginConfiguration(subject, storageNodeResource.getId(), + storageNodePluginConfig); + }
- return runOperationAndWaitForResult(subject, storageNodeResource, RESTART_OPERATION, null); + return result; } }
return false; }
+ @Override + @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) + public void scheduleOperationInNewTransaction(Subject subject, ResourceOperationSchedule schedule) { + operationManager.scheduleResourceOperation(subject, schedule); + } + private boolean runOperationAndWaitForResult(Subject subject, Resource storageNodeResource, String operationToRun, Configuration parameters) {
@@ -696,8 +719,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN newSchedule.setDescription("Run by StorageNodeManagerBean"); newSchedule.setParameters(parameters);
- operationManager.scheduleResourceOperation(subject, newSchedule); - entityManager.flush(); + storageNodeManger.scheduleOperationInNewTransaction(subject, newSchedule);
//waiting for the operation result then return it int iteration = 0; diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java index 69b16c4..15fa85c 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java @@ -28,6 +28,7 @@ import org.rhq.core.domain.cloud.StorageNode; import org.rhq.core.domain.cloud.StorageNodeConfigurationComposite; import org.rhq.core.domain.cloud.StorageNodeLoadComposite; import org.rhq.core.domain.criteria.StorageNodeCriteria; +import org.rhq.core.domain.operation.bean.ResourceOperationSchedule; import org.rhq.core.domain.resource.Resource; import org.rhq.core.domain.resource.group.ResourceGroup; import org.rhq.core.domain.util.PageList; @@ -178,4 +179,6 @@ public interface StorageNodeManagerLocal { */ ResourceGroup getStorageNodeGroup();
+ void scheduleOperationInNewTransaction(Subject subject, ResourceOperationSchedule schedule); + } diff --git a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml index a42040d..f5a4f6d 100644 --- a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml +++ b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml @@ -132,10 +132,10 @@
<operation name="updateConfiguration" description="Updates the node configuration. Will require a separate server restart for the settings to take effect."> <parameters> - <c:simple-property name="jmxPort" type="integer" description="JMX port JVM option."/> - <c:simple-property name="heapSize" type="string" description="The heap size to be used for both -Xms and -Xmx JVM options."/> - <c:simple-property name="heapNewSize" type="string" description="The heap new size to be used be used with -Xmn JVM option."/> - <c:simple-property name="threadStackSize" type="integer" description="The thread stack size. This memory is allocated to each thread off heap. The + <c:simple-property name="jmxPort" type="integer" required="false" description="JMX port JVM option."/> + <c:simple-property name="heapSize" type="string" required="false" description="The heap size to be used for both -Xms and -Xmx JVM options."/> + <c:simple-property name="heapNewSize" type="string" required="false" description="The heap new size to be used be used with -Xmn JVM option."/> + <c:simple-property name="threadStackSize" type="integer" required="false" description="The thread stack size. This memory is allocated to each thread off heap. The value should be an integer that will be interpreted in kilobytes."/> <c:simple-property name="restartIfRequired" type="boolean" required="true" default="false" description="If [true] then restart the server at the end of the update if and if only the updates made require a restart. If [false] the server will not be restarted regardless of the updates made."/> </parameters>
commit af85b8b1c687e025ea95898dda6a5c46f127d151 Author: Mike Thompson mithomps@redhat.com Date: Thu Jul 25 14:16:38 2013 -0700
[BZ 988574] - Consolidated Metrics Screen - UXD Redesign
diff --git a/modules/enterprise/gui/coregui/pom.xml b/modules/enterprise/gui/coregui/pom.xml index 969ab8e..353f4d9 100644 --- a/modules/enterprise/gui/coregui/pom.xml +++ b/modules/enterprise/gui/coregui/pom.xml @@ -19,7 +19,7 @@
<properties> <!-- dependency versions --> - <gwt.version>2.5.0</gwt.version> + <gwt.version>${gwt.version}</gwt.version> <smartgwt.version>3.0</smartgwt.version>
<!-- If this is too much memory to allocate to your gwt:debug process then override this property in @@ -136,16 +136,7 @@ </dependency>
- <!-- the GWT graphing library (note, this provides jquery 1.3.2. If we get rid of GFlot we will need - to provide jquery explcitly for jquery.sparkline support. See CoreGUI.gwt.xml for the jquery.sparkline - declaration and coregui/webapp/js for the lib inclusion.) --> - <!-- NOTE: soon to be deprecated by d3.js --> - <dependency> - <groupId>com.googlecode.gflot</groupId> - <artifactId>gflot</artifactId> - <version>2.4.3</version> - <scope>provided</scope> - </dependency> +
<!-- for file uploads --> <dependency> @@ -245,7 +236,7 @@ <plugin> <groupId>org.codehaus.mojo</groupId> <artifactId>gwt-maven-plugin</artifactId> - <version>2.5.0</version> + <version>${gwt.version}</version> <configuration> <noServer>true</noServer> <inplace>false</inplace> diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/IconEnum.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/IconEnum.java index d11467a..b48a712 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/IconEnum.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/IconEnum.java @@ -114,6 +114,12 @@ public enum IconEnum { STORAGE_NODE("global/StorageNode_16.png", "global/StorageNode_24.png"),
///////////////////////////// + // General + ///////////////////////////// + EXPANDED_ICON("[SKIN]/ListGrid/row_expanded.png"), + COLLAPSED_ICON("[SKIN]/ListGrid/row_collapsed.png"), + + ///////////////////////////// // Resource Specific Tabs ///////////////////////////// CALLTIME("global/Recent_16.png", "global/Recent_24.png"); diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/table/Table.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/table/Table.java index 4cfe199..4ce05d2 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/table/Table.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/table/Table.java @@ -1202,16 +1202,15 @@ public class Table<DS extends RPCDataSource> extends EnhancedHLayout implements }
if (hiddenItem != null) { - Log.debug("Found hidden items"); // Add the hidden item if it exists FormItem[] tmpItems = new FormItem[items.length + 1]; System.arraycopy(items, 0, tmpItems, 0, items.length); tmpItems[items.length] = hiddenItem; items = tmpItems; } - for (FormItem item : items) { - Log.debug(" ******** Form Items sent: " + item.getName() + ": " + item.getValue()); - } +// for (FormItem item : items) { +// Log.debug(" ******** Form Items sent: " + item.getName() + ": " + item.getValue()); +// }
super.setItems(items); } @@ -1223,19 +1222,16 @@ public class Table<DS extends RPCDataSource> extends EnhancedHLayout implements @Override public void onKeyPress(KeyPressEvent event) { if (event.getKeyName().equals("Enter")) { - Log.debug("Table.TableFilter Pressed Enter key");
if (null != searchBarItem) { if (searchBarItem.getSearchBar().isFilterEnabled()) { TextItem searchTextItem = searchBarItem.getSearchBar().getSearchTextItem(); String searchBarValue = searchTextItem.getValueAsString(); String hiddenValue = (String) hiddenItem.getValue(); - Log.debug("Table.TableFilter searchBarValue :" + searchBarValue + ", hiddenValue" + hiddenValue);
// Only send a fetch request if the user actually changed the search expression. if (!equals(searchBarValue, hiddenValue)) { hiddenItem.setValue(searchBarValue); - Log.debug("Table.TableFilter fetchFilteredTableData"); fetchFilteredTableData(); } } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/AbstractMetricGraph.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/AbstractMetricGraph.java index e75b65a..ace6154 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/AbstractMetricGraph.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/AbstractMetricGraph.java @@ -202,6 +202,10 @@ public abstract class AbstractMetricGraph extends VLayout implements HasD3Metric return metricGraphData.getPortalId(); }
+ public boolean isHideLegend(){ + return metricGraphData.isHideLegend(); + } + public void setGraphListView(AbstractD3GraphListView graphListView) { this.graphListView = graphListView; } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/ButtonBarDateTimeRangeEditor.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/ButtonBarDateTimeRangeEditor.java index 2075783..b4f1354 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/ButtonBarDateTimeRangeEditor.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/ButtonBarDateTimeRangeEditor.java @@ -77,7 +77,7 @@ public class ButtonBarDateTimeRangeEditor extends EnhancedVLayout { prefs = measurementUserPreferences.getMetricRangePreferences(); Log.debug("ButtonBarDateTimeRangeEditor initialized with start Date: " + new Date(prefs.begin) + " end Date: " + new Date(prefs.end)); - createButtons(); + //createButtons();
}
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/MetricGraphData.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/MetricGraphData.java index ecd3b46..5ae392f 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/MetricGraphData.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/MetricGraphData.java @@ -73,6 +73,7 @@ public class MetricGraphData implements JsonMetricProducer { private MeasurementOOBComposite lastOOB; private Integer chartHeight; private boolean isPortalGraph; + private boolean hideLegend;
private MetricGraphData(int portalId) { @@ -249,6 +250,14 @@ public class MetricGraphData implements JsonMetricProducer { return isPortalGraph; }
+ public boolean isHideLegend() { + return hideLegend; + } + + public void setHideLegend(boolean hideLegend) { + this.hideLegend = hideLegend; + } + public String getChartTitle() {
if(definition != null){ @@ -380,8 +389,8 @@ public class MetricGraphData implements JsonMetricProducer { * @see StackedBarMetricGraphImpl */ public boolean showBarAvgTrendLine() { + int numberOfAggBars = 0; for (MeasurementDataNumericHighLowComposite measurement : metricData) { - int numberOfAggBars = 0; boolean noValuesInCurrentBarUndefined = (!Double.isNaN(measurement.getValue()) && !Double.isNaN(measurement.getHighValue()) && !Double.isNaN(measurement.getLowValue())); boolean foundAggregateBar = (measurement.getValue() != measurement.getHighValue() || measurement.getHighValue() != measurement.getLowValue()); // if there exists a even one aggregate bar then I can short circuit this and exit diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityLineGraphType.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityLineGraphType.java deleted file mode 100644 index 34ca60b..0000000 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityLineGraphType.java +++ /dev/null @@ -1,311 +0,0 @@ -/* - * RHQ Management Platform - * Copyright (C) 2005-2012 Red Hat, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ -package org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype; - -import java.util.Date; -import java.util.List; - -import org.rhq.core.domain.measurement.Availability; -import org.rhq.core.domain.measurement.MeasurementUnits; -import org.rhq.core.domain.resource.group.composite.ResourceGroupAvailability; -import org.rhq.core.domain.resource.group.composite.ResourceGroupComposite; -import org.rhq.enterprise.gui.coregui.client.CoreGUI; -import org.rhq.enterprise.gui.coregui.client.Messages; -import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.AvailabilityGraphType; -import org.rhq.enterprise.gui.coregui.client.util.Log; -import org.rhq.enterprise.gui.coregui.client.util.MeasurementConverterClient; - -/** - * This is now old and for demonstration purposes only. - * Contains the javascript chart definition for an implementation of the d3 availability chart. This implementation is - * just a line that changes color based on availability type: up=green, down=red, orange=disabled, unknown=grey, - * empty=grey, warn=yellow. This version of the availability graph shows continuous intervals. - * @deprecated - * @see AvailabilityOverUnderGraphType - * - * @author Mike Thompson - */ -public class AvailabilityLineGraphType implements AvailabilityGraphType { - - private static Messages MSG = CoreGUI.getMessages(); - private List<Availability> availabilityList; - private List<ResourceGroupAvailability> groupAvailabilityList; - private Integer entityId; - - /** - * General constructor for stacked bar graph when you have all the data needed to produce the graph. (This is true - * for all cases but the dashboard portlet). - */ - public AvailabilityLineGraphType(Integer entityId) { - this.entityId = entityId; - } - - public void setAvailabilityList(List<Availability> availabilityList) { - this.availabilityList = availabilityList; - } - - public void setGroupAvailabilityList(List<ResourceGroupAvailability> groupAvailabilityList) { - this.groupAvailabilityList = groupAvailabilityList; - } - - public String getAvailabilityJson() { - StringBuilder sb = new StringBuilder("["); - if (null != availabilityList) { - // loop through the avail intervals - for (Availability availability : availabilityList) { - sb.append("{ "availType":"" + availability.getAvailabilityType() + "", "); - sb.append(" "availTypeMessage":"" + availability.getAvailabilityType()+ "", "); - sb.append(" "availStart":" + availability.getStartTime() + ", "); - // last record will be null - long endTime = availability.getEndTime() != null ? availability.getEndTime() : (new Date()).getTime(); - sb.append(" "availEnd":" + endTime + ", "); - - long availDuration = endTime - availability.getStartTime(); - String availDurationString = MeasurementConverterClient.format((double) availDuration, - MeasurementUnits.MILLISECONDS, true); - sb.append(" "availDuration": "" + availDurationString + "" },"); - - } - sb.setLength(sb.length() - 1); - - } else if (null != groupAvailabilityList) { - // loop through the group avail down intervals - for (ResourceGroupAvailability groupAvailability : groupAvailabilityList) { - // allows substitution for situations like WARN=MIXED for easier terminology - String availabilityTypeMessage = (groupAvailability.getGroupAvailabilityType().equals(ResourceGroupComposite.GroupAvailabilityType.WARN)) - ? MSG.chart_hover_availability_type_warn() : groupAvailability.getGroupAvailabilityType().name(); - - sb.append("{ "availType":"" + groupAvailability.getGroupAvailabilityType() + "", "); - sb.append(" "availTypeMessage":"" + availabilityTypeMessage + "", "); - sb.append(" "availStart":" + groupAvailability.getStartTime() + ", "); - // last record will be null - long endTime = groupAvailability.getEndTime() != null ? groupAvailability.getEndTime() : (new Date()) - .getTime(); - sb.append(" "availEnd":" + endTime + ", "); - - long availDuration = endTime - groupAvailability.getStartTime(); - String availDurationString = MeasurementConverterClient.format((double) availDuration, - MeasurementUnits.MILLISECONDS, true); - sb.append(" "availDuration": "" + availDurationString + "" },"); - - } - sb.setLength(sb.length() - 1); - } - - sb.append("]"); - Log.debug(sb.toString()); - return sb.toString(); - } - - /** - * The magic JSNI to draw the charts with d3. - */ - public native void drawJsniChart() /*-{ - console.log("Draw Availability chart"); - - var global = this, - // tidy up all of our interactions with java (via JSNI) thru AvailChartContext class - // NOTE: rhq.js has the javascript object constructors in it. - availChartContext = new $wnd.AvailChartContext(global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.AvailabilityLineGraphType::getChartId()(), - global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.AvailabilityLineGraphType::getAvailabilityJson()(), - global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.AvailabilityLineGraphType::getChartDateLabel()(), - global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.AvailabilityLineGraphType::getChartTimeLabel()(), - global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.AvailabilityLineGraphType::getChartHoverStartLabel()(), - global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.AvailabilityLineGraphType::getChartHoverEndLabel()(), - global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.AvailabilityLineGraphType::getChartHoverBarLabel()(), - global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.AvailabilityLineGraphType::getChartHoverAvailabilityLabel()(), - global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.AvailabilityLineGraphType::getChartHoverTimeFormat()(), - global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.AvailabilityLineGraphType::getChartHoverDateFormat()() - ); - - - var availabilityGraph = function () { - "use strict"; - // privates - - var margin = {top: 5, right: 5, bottom: 5, left: 40}, - barOffset = 10, - width = 750 - margin.left - margin.right + barOffset, - height = 20 - margin.top - margin.bottom, - pixelsOffHeight = 0, - svg; - - - function drawBars(availChartContext) { - var xAxisMin = $wnd.d3.min(availChartContext.data, function (d) { - return +d.availStart; - }), - xAxisMax = $wnd.d3.max(availChartContext.data, function (d) { - return +d.availEnd; - }), - - timeScale = $wnd.d3.time.scale() - .range([0, width]) - .domain([xAxisMin, xAxisMax]), - - yScale = $wnd.d3.scale.linear() - .clamp(true) - .rangeRound([height, 0]) - .domain([0, 4]), - - svg = $wnd.d3.select(availChartContext.chartSelection).append("g") - .attr("width", width + margin.left + margin.right) - .attr("height", height + margin.top + margin.bottom) - .attr("transform", "translate(" + margin.left + "," + margin.top + ")"); - - - // The gray bars at the bottom leading up - svg.selectAll("rect.availBars") - .data(availChartContext.data) - .enter().append("rect") - .attr("class", "availBars") - .attr("x", function (d) { - return timeScale(+d.availStart); - }) - .attr("y", function (d) { - return yScale(0); - }) - .attr("height", function (d) { - return height - yScale(4) - pixelsOffHeight; - }) - .attr("width", function (d) { - return timeScale(+d.availEnd) - timeScale(+d.availStart); - }) - - .attr("opacity", ".9") - .attr("fill", function (d) { - if (d.availType === 'DOWN') { - return "#FF1919"; // red - } - else if (d.availType === 'DISABLED') { - return "#FF9933"; // orange - } - else if (d.availType === 'UNKNOWN') { - return "#CCC"; // gray - } - else if (d.availType === 'UP') { - return "#198C19"; // green - } - else if (d.availType === 'WARN') { - return "#FFFF00"; // yellow - } - else if (d.availType === 'EMPTY') { - return "#CCC"; // gray - } - else { - // should not ever happen, but... - console.warn("AvailabilityType not valid."); - return "#000"; //black - } - }); - } - - function createHovers() { - $wnd.jQuery('svg rect.availBars').tipsy({ - gravity: 'n', - html: true, - trigger: 'hover', - title: function () { - var d = this.__data__; - return formatHovers(d); - }, - show: function (e, el) { - el.css({ 'z-index': '990000'}) - } - }); - } - - function formatHovers(d) { - var hoverString, - timeFormatter = $wnd.d3.time.format(availChartContext.chartHoverTimeFormat), - dateFormatter = $wnd.d3.time.format(availChartContext.chartHoverDateFormat), - availStart = new Date(+d.availStart), - availEnd = new Date(+d.availEnd); - - hoverString = - '<div class="chartHoverEnclosingDiv">' + - '<div class="chartHoverAlignRight"><span >' + availChartContext.hoverBarAvailabilityLabel + ': </span><span style="width:50px;">' + d.availTypeMessage + '</span></div>' + - '<div class="chartHoverAlignRight"><span >' + availChartContext.hoverStartLabel + ': </span><span style="width:50px;">' + timeFormatter(availStart) + '</span></div>' + - '<div class="chartHoverAlignRight"><span >' + ' </span><span style="width:50px;">' + dateFormatter(availStart) + '</span></div>' + - '<div class="chartHoverAlignRight"><span >' + availChartContext.hoverEndLabel + ': </span><span style="width:50px;">' + timeFormatter(availEnd) + '</span></div>' + - '<div class="chartHoverAlignRight"><span >' + ' </span><span style="width:50px;">' + dateFormatter(availEnd) + '</span></div>' + - '<div class="chartHoverAlignRight"><span >' + availChartContext.hoverBarLabel + ': </span><span style="width:50px;">' + d.availDuration + '</span></div>' + - '</div>'; - return hoverString; - - } - - return { - // Public API - draw: function (chartContext) { - "use strict"; - console.info("AvailabilityChart"); - //console.time("availabilityChart"); - - drawBars(availChartContext); - createHovers(); - //console.timeEnd("availabilityChart"); - } - }; // end public closure - - - }(); - - if (availChartContext.data !== undefined && availChartContext.data.length > 0) { - availabilityGraph.draw(availChartContext); - } - - }-*/; - - public String getChartId() { - return String.valueOf(entityId); - } - - public String getChartTimeLabel() { - return MSG.chart_time_label(); - } - - public String getChartDateLabel() { - return MSG.chart_date_label(); - } - - public String getChartHoverAvailabilityLabel() { - return MSG.chart_hover_availability_label(); - } - - public String getChartHoverStartLabel() { - return MSG.chart_hover_start_label(); - } - - public String getChartHoverEndLabel() { - return MSG.chart_hover_end_label(); - } - - public String getChartHoverBarLabel() { - return MSG.chart_hover_bar_label(); - } - - public String getChartHoverTimeFormat() { - return MSG.chart_hover_time_format(); - } - - public String getChartHoverDateFormat() { - return MSG.chart_hover_date_format(); - } -} diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityOverUnderGraphType.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityOverUnderGraphType.java index 7c5f80a..811a579 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityOverUnderGraphType.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityOverUnderGraphType.java @@ -311,13 +311,6 @@ public class AvailabilityOverUnderGraphType implements AvailabilityGraphType { }); }
- function timeFormat(formats) { - return function(date) { - var i = formats.length - 1, f = formats[i]; - while (!f[1](date)) f = formats[--i]; - return f[0](date); - } - }
function formatHovers(d) { var timeFormatter = $wnd.d3.time.format(availChartContext.chartHoverTimeFormat), @@ -345,11 +338,13 @@ public class AvailabilityOverUnderGraphType implements AvailabilityGraphType {
}();
- if (availChartContext.data !== undefined && availChartContext.data.length > 0) { + console.log("Avail Data records: "+availChartContext.data.length); + if (typeof availChartContext.data !== 'undefined' && availChartContext.data.length > 0) { availabilityGraph.draw(availChartContext); + console.log("Availability Chart Drawn"); }
- }-*/; + }-*/;
public String getChartId() { return String.valueOf(entityId); diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/LineMetricGraph.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/LineMetricGraph.java deleted file mode 100644 index 079b2ab..0000000 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/LineMetricGraph.java +++ /dev/null @@ -1,158 +0,0 @@ -/* - * RHQ Management Platform - * Copyright (C) 2005-2012 Red Hat, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ -package org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype; - -import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.AbstractMetricGraph; -import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.MetricGraphData; - -/** - * Contains the javascript chart definition for a d3 Line graph chart. - * NOTE: this class isn't used just provided as an example as how to create - * other graph types. - * - * @author Mike Thompson - */ -public final class LineMetricGraph extends AbstractMetricGraph { - - /** - * General constructor for stacked bar graph when you have all the data needed to - * produce the graph. (This is true for all cases but the dashboard portlet). - */ - public LineMetricGraph(MetricGraphData metricGraphData) { - setMetricGraphData(metricGraphData); - } - - /** - * The magic JSNI to draw the charts with d3. - */ - public native void drawJsniChart() /*-{ - console.log("Draw Metric Line jsni chart"); - var global = this, - chartId = global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.MetricGraphData::getChartId()(), - chartHandle = "#rChart-"+chartId, - chartSelection = chartHandle + " svg", - json = $wnd.jQuery.parseJSON(global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.MetricGraphData::getJsonMetrics()()), - yAxisLabel = global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.MetricGraphData::getChartTitle()(), - yAxisUnits = global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.MetricGraphData::getYAxisUnits()(), - xAxisLabel = global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.MetricGraphData::getXAxisTitle()(); - - console.log("chart id: "+chartSelection ); - console.log(global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.MetricGraphData::getJsonMetrics()()); - - - function draw(data){ - "use strict"; - - var margin = {top: 10, right: 5, bottom: 30, left: 70}, - width = 400 - margin.left - margin.right, - height = 150 - margin.top - margin.bottom; - - var timeScale = $wnd.d3.time.scale() - .range([0, width]) - .domain($wnd.d3.extent(data, function(d) { return d.x; })); - - var yScale = $wnd.d3.scale.linear() - .rangeRound([height, 0]) - .domain([$wnd.d3.min(data.map(function(x) {return x.low;})), $wnd.d3.max(data.map(function(x){return x.high;}))]); - - var xAxis = $wnd.d3.svg.axis() - .scale(timeScale) - .ticks(5) - .orient("bottom"); - - var yAxis = $wnd.d3.svg.axis() - .scale(yScale) - .ticks(5) - .orient("left"); - - var interpolation = "basis"; - - var line = $wnd.d3.svg.line() - .interpolate(interpolation) - .x(function(d) { return timeScale(d.x); }) - .y(function(d) { return yScale(+d.y); }); - - var highLine = $wnd.d3.svg.line() - .interpolate(interpolation) - .x(function(d) { return timeScale(d.x); }) - .y(function(d) { return yScale(+d.high); }); - - var lowLine = $wnd.d3.svg.line() - .interpolate(interpolation) - .x(function(d) { return timeScale(d.x); }) - .y(function(d) { return yScale(+d.low); }); - - var svg = $wnd.d3.select(chartSelection).append("g") - .attr("width", width + margin.left + margin.right) - .attr("height", height + margin.top + margin.bottom) - .attr("transform", "translate(" + margin.left + "," + margin.top + ")"); - - svg.append("g") - .attr("class", "x axis") - .attr("transform", "translate(0," + height + ")") - .call(xAxis); - - - svg.append("g") - .attr("class", "y axis") - .call(yAxis) - .append("text") - .attr("transform", "rotate(-90)") - .attr("y", -60) - .attr("dy", ".71em") - .style("text-anchor", "end") - .text(yAxisUnits === "NONE" ? "" : yAxisUnits); - - console.log("finished axes"); - - svg.append("path") - .datum(data) - .attr("class", "line") - .attr("fill", "none") - .attr("stroke", "steelblue") - .attr("stroke-width", "2") - .attr("d", line); - - svg.append("path") - .datum(data) - .attr("class", "highLine") - .attr("fill", "none") - .attr("stroke", "red") - .attr("stroke-width", "1.5") - //.attr("stroke-dasharray", "20,10,5,5,5,10") - .attr("stroke-dasharray", "5,5") - .attr("stroke-opacity", ".3") - .attr("d", highLine); - - svg.append("path") - .datum(data) - .attr("class", "lowLine") - .attr("fill", "none") - .attr("stroke", "blue") - .attr("stroke-width", "1.5") - .attr("stroke-dasharray", "5,5") - .attr("stroke-opacity", ".3") - .attr("d", lowLine); - - console.log("finished paths"); - }(data); - - }-*/; - -} diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/StackedBarMetricGraphImpl.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/StackedBarMetricGraphImpl.java index 9058ea5..67552d3 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/StackedBarMetricGraphImpl.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/StackedBarMetricGraphImpl.java @@ -73,7 +73,8 @@ public class StackedBarMetricGraphImpl extends AbstractMetricGraph { global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.AbstractMetricGraph::getButtonBarDateTimeFormat()(), global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.AbstractMetricGraph::getChartSingleValueLabel()(), global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.AbstractMetricGraph::getXAxisTimeFormatHours()(), - global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.AbstractMetricGraph::getXAxisTimeFormatHoursMinutes()() + global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.AbstractMetricGraph::getXAxisTimeFormatHoursMinutes()(), + global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.AbstractMetricGraph::isHideLegend()() );
@@ -217,8 +218,8 @@ public class StackedBarMetricGraphImpl extends AbstractMetricGraph { .attr("transform", "translate(" + margin.left + "," + (+titleHeight + titleSpace + margin.top) + ")");
legendUnDefined = (typeof min === 'undefined') || (typeof avg === 'undefined') || (typeof peak === 'undefined'); - if (!useSmallCharts() && !legendUnDefined) { - createMinAvgPeakSidePanel(chartContext.minChartTitle, min, chartContext.avgChartTitle, avg, chartContext.peakChartTitle, peak, chartContext.yAxisUnits); + if (!(chartContext.hideLegend && !useSmallCharts())) { + createMinAvgPeakSidePanel(chartContext.minChartTitle, min, chartContext.avgChartTitle, avg, chartContext.peakChartTitle, peak, chartContext.yAxisUnits); } }
@@ -693,7 +694,7 @@ public class StackedBarMetricGraphImpl extends AbstractMetricGraph { }; // end public closure }();
- if(typeof chartContext.data !== 'undefined' && chartContext.data.length > 0){ + if(typeof chartContext.data !== 'undefined' && chartContext.data !== null && chartContext.data.length > 0){ metricStackedBarGraph.draw(chartContext); }
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/D3GroupGraphListView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/D3GroupGraphListView.java index 59737ac..a9789c6 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/D3GroupGraphListView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/D3GroupGraphListView.java @@ -21,7 +21,6 @@ package org.rhq.enterprise.gui.coregui.client.inventory.groups.detail; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; -import java.util.EnumSet; import java.util.List;
import com.google.gwt.core.client.GWT; @@ -35,19 +34,17 @@ import org.rhq.core.domain.measurement.DataType; import org.rhq.core.domain.measurement.DisplayType; import org.rhq.core.domain.measurement.MeasurementDefinition; import org.rhq.core.domain.measurement.composite.MeasurementDataNumericHighLowComposite; -import org.rhq.core.domain.resource.ResourceType; import org.rhq.core.domain.resource.group.ResourceGroup; import org.rhq.core.domain.resource.group.composite.ResourceGroupAvailability; import org.rhq.enterprise.gui.coregui.client.CoreGUI; import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; import org.rhq.enterprise.gui.coregui.client.inventory.AutoRefresh; import org.rhq.enterprise.gui.coregui.client.inventory.common.AbstractD3GraphListView; -import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.AvailabilityOverUnderGraphType; import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.MetricGraphData; +import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.AvailabilityOverUnderGraphType; import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.StackedBarMetricGraphImpl; import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.MetricD3Graph; import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.avail.AvailabilityD3GraphView; -import org.rhq.enterprise.gui.coregui.client.inventory.resource.type.ResourceTypeRepository; import org.rhq.enterprise.gui.coregui.client.util.Log; import org.rhq.enterprise.gui.coregui.client.util.async.CountDownLatch;
@@ -55,7 +52,7 @@ import org.rhq.enterprise.gui.coregui.client.util.async.CountDownLatch; * Build the Group version of the View that shows the individual graph views. * @author Mike Thompson */ -public final class D3GroupGraphListView extends AbstractD3GraphListView implements AutoRefresh{ +public final class D3GroupGraphListView extends AbstractD3GraphListView implements AutoRefresh {
private ResourceGroup resourceGroup; private VLayout graphsVLayout; @@ -75,7 +72,8 @@ public final class D3GroupGraphListView extends AbstractD3GraphListView implemen
addMember(buttonBarDateTimeRangeEditor); if (showAvailabilityGraph) { - availabilityGraph = new AvailabilityD3GraphView<AvailabilityOverUnderGraphType>(new AvailabilityOverUnderGraphType(resourceGroup.getId())); + availabilityGraph = new AvailabilityD3GraphView<AvailabilityOverUnderGraphType>( + new AvailabilityOverUnderGraphType(resourceGroup.getId())); addMember(availabilityGraph); } graphsVLayout = new VLayout(); @@ -89,7 +87,6 @@ public final class D3GroupGraphListView extends AbstractD3GraphListView implemen addMember(graphsVLayout); }
- public void redrawGraphs() { this.onDraw(); } @@ -100,68 +97,60 @@ public final class D3GroupGraphListView extends AbstractD3GraphListView implemen private void buildGraphs() {
queryAvailability(EntityContext.forGroup(resourceGroup), buttonBarDateTimeRangeEditor.getStartTime(), - buttonBarDateTimeRangeEditor.getEndTime(), null); + buttonBarDateTimeRangeEditor.getEndTime(), null);
- ResourceTypeRepository.Cache.getInstance().getResourceTypes(resourceGroup.getResourceType().getId(), - EnumSet.of(ResourceTypeRepository.MetadataType.measurements), - new ResourceTypeRepository.TypeLoadedCallback() { - public void onTypesLoaded(final ResourceType type) { + final ArrayList<MeasurementDefinition> measurementDefinitions = new ArrayList<MeasurementDefinition>();
- final ArrayList<MeasurementDefinition> measurementDefinitions = new ArrayList<MeasurementDefinition>(); + for (MeasurementDefinition def : resourceGroup.getResourceType().getMetricDefinitions()) { + if (def.getDataType() == DataType.MEASUREMENT && def.getDisplayType() == DisplayType.SUMMARY) { + measurementDefinitions.add(def); + } + }
- for (MeasurementDefinition def : type.getMetricDefinitions()) { - if (def.getDataType() == DataType.MEASUREMENT && def.getDisplayType() == DisplayType.SUMMARY) { - measurementDefinitions.add(def); - } - } + Collections.sort(measurementDefinitions, new Comparator<MeasurementDefinition>() { + public int compare(MeasurementDefinition o1, MeasurementDefinition o2) { + return new Integer(o1.getDisplayOrder()).compareTo(o2.getDisplayOrder()); + } + });
- Collections.sort(measurementDefinitions, new Comparator<MeasurementDefinition>() { - public int compare(MeasurementDefinition o1, MeasurementDefinition o2) { - return new Integer(o1.getDisplayOrder()).compareTo(o2.getDisplayOrder()); - } - }); + int[] measDefIdArray = new int[measurementDefinitions.size()]; + for (int i = 0; i < measDefIdArray.length; i++) { + measDefIdArray[i] = measurementDefinitions.get(i).getId(); + }
- int[] measDefIdArray = new int[measurementDefinitions.size()]; - for (int i = 0; i < measDefIdArray.length; i++) { - measDefIdArray[i] = measurementDefinitions.get(i).getId(); - } + GWTServiceLookup.getMeasurementDataService().findDataForCompatibleGroup(resourceGroup.getId(), measDefIdArray, + buttonBarDateTimeRangeEditor.getStartTime(), buttonBarDateTimeRangeEditor.getEndTime(), 60, + new AsyncCallback<List<List<MeasurementDataNumericHighLowComposite>>>() { + @Override + public void onFailure(Throwable caught) { + CoreGUI.getErrorHandler().handleError(MSG.view_resource_monitor_graphs_loadFailed(), caught); + loadingLabel.setContents(MSG.view_resource_monitor_graphs_loadFailed()); + }
- GWTServiceLookup.getMeasurementDataService().findDataForCompatibleGroup(resourceGroup.getId(), - measDefIdArray, buttonBarDateTimeRangeEditor.getStartTime(), buttonBarDateTimeRangeEditor.getEndTime(), 60, - new AsyncCallback<List<List<MeasurementDataNumericHighLowComposite>>>() { - @Override - public void onFailure(Throwable caught) { - CoreGUI.getErrorHandler().handleError(MSG.view_resource_monitor_graphs_loadFailed(), - caught); - loadingLabel.setContents(MSG.view_resource_monitor_graphs_loadFailed()); - } - - @Override - public void onSuccess(List<List<MeasurementDataNumericHighLowComposite>> result) { - if (result.isEmpty()) { - loadingLabel.setContents(MSG.view_resource_monitor_graphs_noneAvailable()); - } else { - loadingLabel.hide(); - int i = 0; - for (List<MeasurementDataNumericHighLowComposite> data : result) { - buildIndividualGraph(measurementDefinitions.get(i++), data); - } - // There is a weird timing case when availabilityGraph can be null - if (availabilityGraph != null) { - availabilityGraph.setGroupAvailabilityList(groupAvailabilityList); - new Timer(){ - @Override - public void run() { - availabilityGraph.drawJsniChart(); - } - }.schedule(150); - } + @Override + public void onSuccess(List<List<MeasurementDataNumericHighLowComposite>> result) { + if (result.isEmpty()) { + loadingLabel.setContents(MSG.view_resource_monitor_graphs_noneAvailable()); + } else { + loadingLabel.hide(); + int i = 0; + for (List<MeasurementDataNumericHighLowComposite> data : result) { + buildIndividualGraph(measurementDefinitions.get(i++), data); + } + // There is a weird timing case when availabilityGraph can be null + if (availabilityGraph != null) { + availabilityGraph.setGroupAvailabilityList(groupAvailabilityList); + new Timer() { + @Override + public void run() { + availabilityGraph.drawJsniChart(); } - } - }); - + }.schedule(150); + } + } } }); + }
protected void queryAvailability(final EntityContext groupContext, Long startTime, Long endTime, @@ -197,7 +186,7 @@ public final class D3GroupGraphListView extends AbstractD3GraphListView implemen List<MeasurementDataNumericHighLowComposite> data) {
MetricGraphData metricGraphData = MetricGraphData.createForResourceGroup(resourceGroup.getId(), - resourceGroup.getName(), measurementDefinition, data ); + resourceGroup.getName(), measurementDefinition, data);
StackedBarMetricGraphImpl graph = GWT.create(StackedBarMetricGraphImpl.class); graph.setMetricGraphData(metricGraphData); @@ -207,11 +196,9 @@ public final class D3GroupGraphListView extends AbstractD3GraphListView implemen graphView.setWidth("95%"); graphView.setHeight(MULTI_CHART_HEIGHT);
- if(graphsVLayout != null){ + if (graphsVLayout != null) { graphsVLayout.addMember(graphView); } }
- - } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupMultiLineGraphListView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupMultiLineGraphListView.java new file mode 100644 index 0000000..101df60 --- /dev/null +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupMultiLineGraphListView.java @@ -0,0 +1,79 @@ +/* + * RHQ Management Platform + * Copyright (C) 2005-2012 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +package org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table; + + +import org.rhq.enterprise.gui.coregui.client.CoreGUI; +import org.rhq.enterprise.gui.coregui.client.Messages; + +/** + * A MultiLine version of the Composite group single metric multiple resource charts. + * + * @author Mike Thompson + */ +public final class CompositeGroupMultiLineGraphListView extends CompositeGroupD3GraphListView +{ + private static final Messages MSG = CoreGUI.getMessages(); + + public CompositeGroupMultiLineGraphListView(int groupId, int defId, boolean isAutogroup) + { + super(groupId, defId, isAutogroup); + } + + + + @Override + public native void drawJsniChart() /*-{ + console.log("Draw nvd3 charts for composite multiline graph"); + var chartId = global.@org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table.CompositeGroupD3GraphListView::getChartId()(), + chartHandle = "#mChart-"+chartId, + chartSelection = chartHandle + " svg", + yAxisUnits = this.@org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table.CompositeGroupD3GraphListView::getYAxisUnits()(), + xAxisLabel = this.@org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table.CompositeGroupD3GraphListView::getXAxisTitle()(), + xAxisTimeFormat = this.@org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table.CompositeGroupMultiLineGraphListView::getXAxisTimeFormatHoursMinutes()(); + json = eval(this.@org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table.CompositeGroupD3GraphListView::getJsonMetrics()()); + + $wnd.nv.addGraph(function() { + var chart = $wnd.nv.models.lineChart(); + + chart.xAxis.axisLabel(xAxisLabel) + .tickFormat(function(d) { return $wnd.d3.time.format(xAxisTimeFormat)(new Date(d)) }); + + chart.yAxis + .axisLabel(yAxisUnits) + .tickFormat($wnd.d3.format('.02f')); + + $wnd.d3.select(chartSelection) + .datum(json) + .transition().duration(300) + .call(chart); + + $wnd.nv.utils.windowResize(chart.update); + + return chart; + }); + + }-*/; + + + + public String getXAxisTimeFormatHoursMinutes() { + return MSG.chart_xaxis_time_format_hours_minutes(); + } +} diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/DashboardLinkUtility.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/DashboardLinkUtility.java new file mode 100644 index 0000000..8ca1ac5 --- /dev/null +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/DashboardLinkUtility.java @@ -0,0 +1,258 @@ +/* + * RHQ Management Platform + * Copyright (C) 2005-2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +package org.rhq.enterprise.gui.coregui.client.inventory.resource.detail; + +import java.util.HashMap; +import java.util.Map; +import java.util.TreeSet; + +import com.google.gwt.http.client.Request; +import com.google.gwt.http.client.RequestBuilder; +import com.google.gwt.http.client.RequestCallback; +import com.google.gwt.http.client.RequestException; +import com.google.gwt.http.client.Response; +import com.google.gwt.user.client.History; +import com.google.gwt.user.client.rpc.AsyncCallback; +import com.smartgwt.client.widgets.menu.Menu; +import com.smartgwt.client.widgets.menu.MenuItem; +import com.smartgwt.client.widgets.menu.events.ClickHandler; +import com.smartgwt.client.widgets.menu.events.MenuItemClickEvent; + +import org.rhq.core.domain.auth.Subject; +import org.rhq.core.domain.configuration.PropertySimple; +import org.rhq.core.domain.criteria.DashboardCriteria; +import org.rhq.core.domain.criteria.SubjectCriteria; +import org.rhq.core.domain.dashboard.Dashboard; +import org.rhq.core.domain.dashboard.DashboardPortlet; +import org.rhq.core.domain.measurement.DataType; +import org.rhq.core.domain.measurement.MeasurementDefinition; +import org.rhq.core.domain.measurement.MeasurementSchedule; +import org.rhq.core.domain.resource.Resource; +import org.rhq.core.domain.resource.ResourceType; +import org.rhq.core.domain.util.PageList; +import org.rhq.enterprise.gui.coregui.client.CoreGUI; +import org.rhq.enterprise.gui.coregui.client.Messages; +import org.rhq.enterprise.gui.coregui.client.UserSessionManager; +import org.rhq.enterprise.gui.coregui.client.dashboard.portlets.inventory.resource.graph.ResourceD3GraphPortlet; +import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; +import org.rhq.enterprise.gui.coregui.client.util.Log; +import org.rhq.enterprise.gui.coregui.client.util.message.Message; +import org.rhq.enterprise.gui.coregui.client.util.preferences.MeasurementUserPreferences; +import org.rhq.enterprise.gui.coregui.client.util.preferences.UserPreferences; + +/** + * Utility Class to build menus for linking to the Dashboard. + * @author Jay Shaughnessy + * @author Greg Hinkle + * @author Mike Thompson + */ +public class DashboardLinkUtility { + final static Messages MSG = CoreGUI.getMessages(); + + private DashboardLinkUtility() { + } + + public static MenuItem buildMetricsMenu(final ResourceType resourceType, final Resource resource, String label) { + + MenuItem measurements = new MenuItem(label); + final Menu measurementsSubMenu = new Menu(); + + DashboardCriteria criteria = new DashboardCriteria(); + GWTServiceLookup.getDashboardService().findDashboardsByCriteria(criteria, + new AsyncCallback<PageList<Dashboard>>() { + + public void onFailure(Throwable caught) { + CoreGUI.getErrorHandler().handleError(MSG.view_tree_common_contextMenu_loadFailed_dashboard(), + caught); + } + + public void onSuccess(PageList<Dashboard> result) { + //sort the display items alphabetically + TreeSet<String> ordered = new TreeSet<String>(); + Map<String, MeasurementDefinition> definitionMap = new HashMap<String, MeasurementDefinition>(); + for (MeasurementDefinition m : resourceType.getMetricDefinitions()) { + ordered.add(m.getDisplayName()); + definitionMap.put(m.getDisplayName(), m); + } + + for (String displayName : ordered) { + final MeasurementDefinition def = definitionMap.get(displayName); + //only add menu items for Measurement + if (def.getDataType().equals(DataType.MEASUREMENT)) { + MenuItem defItem = new MenuItem(def.getDisplayName()); + measurementsSubMenu.addItem(defItem); + Menu defSubItem = new Menu(); + defItem.setSubmenu(defSubItem); + + for (final Dashboard d : result) { + MenuItem addToDBItem = new MenuItem(MSG + .view_tree_common_contextMenu_addChartToDashboard(d.getName())); + defSubItem.addItem(addToDBItem); + + addToDBItem.addClickHandler(new ClickHandler() { + + public void onClick(MenuItemClickEvent menuItemClickEvent) { + DashboardPortlet p = new DashboardPortlet(MSG + .view_tree_common_contextMenu_resourceGraph(), ResourceD3GraphPortlet.KEY, + 250); + p.getConfiguration() + .put( + new PropertySimple(ResourceD3GraphPortlet.CFG_RESOURCE_ID, resource + .getId())); + p.getConfiguration().put( + new PropertySimple(ResourceD3GraphPortlet.CFG_DEFINITION_ID, def.getId())); + + d.addPortlet(p); + + GWTServiceLookup.getDashboardService().storeDashboard(d, + new AsyncCallback<Dashboard>() { + + public void onFailure(Throwable caught) { + CoreGUI.getErrorHandler().handleError( + MSG.view_tree_common_contextMenu_saveChartToDashboardFailure(), + caught); + } + + public void onSuccess(Dashboard result) { + CoreGUI + .getMessageCenter() + .notify( + new Message( + MSG.view_tree_common_contextMenu_saveChartToDashboardSuccessful(result + .getName()), Message.Severity.Info)); + } + }); + + } + + }); + + } + + //add new menu item for adding current graphable element to view if on Monitor/Graphs tab + String currentViewPath = History.getToken(); + if (currentViewPath.contains("Monitoring/Metrics")) { + MenuItem addGraphItem = new MenuItem(MSG.common_title_add_graph_to_view()); + defSubItem.addItem(addGraphItem); + + addGraphItem.addClickHandler(new ClickHandler() { + public void onClick(MenuItemClickEvent menuItemClickEvent) { + //generate javascript to call out to. + //Ex. menuLayers.hide();addMetric('${metric.resourceId},${metric.scheduleId}') + if (getScheduleDefinitionId(resource, def.getName()) > -1) { + final String resourceGraphElements = resource.getId() + "," + + getScheduleDefinitionId(resource, def.getName()); + + //Once, the portal-war will be rewritten to GWT and operations performed + //within the iframe + JSF will update the user preferences, the following + //2 lines could be uncommented and the lines below them refactorized + //MeasurementUserPreferences measurementPreferences = new MeasurementUserPreferences(UserSessionManager.getUserPreferences()); + //String selectedView = measurementPreferences.getSelectedView(String.valueOf(resource.getId())); + + final int sid = UserSessionManager.getSessionSubject().getId(); + SubjectCriteria c = new SubjectCriteria(); + c.addFilterId(sid); + + GWTServiceLookup.getSubjectService().findSubjectsByCriteria(c, + new AsyncCallback<PageList<Subject>>() { + public void onSuccess(PageList<Subject> result) { + if (result.size() > 0) { + UserPreferences uPreferences = new UserPreferences(result + .get(0)); + MeasurementUserPreferences mPreferences = new MeasurementUserPreferences( + uPreferences); + String selectedView = mPreferences.getSelectedView(String + .valueOf(resource.getId())); + + addNewMetric(String.valueOf(resource.getId()), + selectedView, resourceGraphElements); + } else { + Log.warn("DashboardLinkUtility: Error obtaining subject with id:" + sid); + } + } + + public void onFailure(Throwable caught) { + Log.warn("DashboardLinkUtility: Error obtaining subject with id:" + sid, caught); + } + }); + } + } + }); + } + } + } + + } + }); + measurements.setSubmenu(measurementsSubMenu); + return measurements; + } + + /** Locate the specific schedule definition using the definition identifier. + */ + private static int getScheduleDefinitionId(Resource resource, String definitionName) { + int id = -1; + if (resource.getSchedules() != null) { + boolean located = false; + MeasurementSchedule[] schedules = new MeasurementSchedule[resource.getSchedules().size()]; + resource.getSchedules().toArray(schedules); + for (int i = 0; (!located && i < resource.getSchedules().size()); i++) { + MeasurementSchedule schedule = schedules[i]; + MeasurementDefinition definition = schedule.getDefinition(); + if ((definition != null) && definition.getName().equals(definitionName)) { + located = true; + id = schedule.getId(); + } + } + } + return id; + } + + private static void addNewMetric(String id, String selectedView, String resourceGraphElements) { + //construct portal.war url to access + String baseUrl = "/resource/common/monitor/visibility/IndicatorCharts.do"; + baseUrl += "?id=" + id; + baseUrl += "&view=" + selectedView; + baseUrl += "&action=addChart&metric=" + resourceGraphElements; + final String url = baseUrl; + //initiate HTTP request + final RequestBuilder b = new RequestBuilder(RequestBuilder.GET, baseUrl); + + try { + b.setCallback(new RequestCallback() { + public void onResponseReceived(final Request request, final Response response) { + Log.trace("Successfully submitted request to add graph to view:" + url); + + //kick off a page reload. + String currentViewPath = History.getToken(); + CoreGUI.goToView(currentViewPath, true); + } + + @Override + public void onError(Request request, Throwable t) { + Log.trace("Error adding Metric:" + url, t); + } + }); + b.send(); + } catch (RequestException e) { + Log.warn("Error adding Metric:" + url, e); + } + + } +} diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/ResourceDetailView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/ResourceDetailView.java index 2d33d66..ae51195 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/ResourceDetailView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/ResourceDetailView.java @@ -66,9 +66,8 @@ import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.inventory import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.inventory.ResourceAgentView; import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.CalltimeView; import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.D3GraphListView; -import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.avail.ResourceAvailabilityView; import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.schedules.ResourceSchedulesView; -import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.table.MeasurementTableView; +import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.table.MetricsResourceView; import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.traits.TraitsView; import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.operation.history.ResourceOperationHistoryListView; import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.operation.schedule.ResourceOperationScheduleListView; @@ -129,10 +128,8 @@ public class ResourceDetailView extends
private SubTab summaryActivity; private SubTab summaryTimeline; - private SubTab monitorGraphs; private SubTab monitorMetrics; private SubTab monitorTraits; - private SubTab monitorAvail; private SubTab monitorSched; private SubTab monitorCallTime; private SubTab inventoryChildren; @@ -196,16 +193,11 @@ public class ResourceDetailView extends monitoringTab = new TwoLevelTab(new ViewName("Monitoring", MSG.view_tabs_common_monitoring()), IconEnum.SUSPECT_METRICS);
- monitorGraphs = new SubTab(monitoringTab, new ViewName("Graphs", MSG.view_tabs_common_graphs()), null); - - monitorMetrics = new SubTab(monitoringTab, new ViewName("Metrics", "Metrics"), null); + monitorMetrics = new SubTab(monitoringTab, new ViewName("Metrics", MSG.view_tabs_common_metrics()), null); monitorTraits = new SubTab(monitoringTab, new ViewName("Traits", MSG.view_tabs_common_traits()), null); - monitorAvail = new SubTab(monitoringTab, new ViewName("Availability", MSG.view_tabs_common_availability()), - null); monitorSched = new SubTab(monitoringTab, new ViewName("Schedules", MSG.view_tabs_common_schedules()), null); monitorCallTime = new SubTab(monitoringTab, new ViewName("CallTime", MSG.view_tabs_common_calltime()), null); - monitoringTab.registerSubTabs(monitorGraphs, monitorMetrics, monitorTraits, monitorAvail, - monitorSched, monitorCallTime); + monitoringTab.registerSubTabs( monitorMetrics, monitorTraits, monitorSched, monitorCallTime); tabs.add(monitoringTab);
eventsTab = new TwoLevelTab(new ViewName("Events", MSG.view_tabs_common_events()), IconEnum.EVENTS); @@ -395,22 +387,14 @@ public class ResourceDetailView extends
boolean visibleToIE8 = !BrowserUtility.isBrowserPreIE9();
- viewFactory = (!visibleToIE8) ? null : new ViewFactory() { - @Override - public Canvas createView() { - return createD3GraphListView(); - } - }; - updateSubTab(this.monitoringTab, this.monitorGraphs, visible, visibleToIE8, viewFactory); - // visible = same test as above viewFactory = (!visible) ? null : new ViewFactory() { @Override public Canvas createView() { - return new MeasurementTableView(resource.getId()); + return new MetricsResourceView(resource); } }; - updateSubTab(this.monitoringTab, this.monitorMetrics, visible, true, viewFactory); + updateSubTab(this.monitoringTab, this.monitorMetrics, visible, visibleToIE8, viewFactory);
visible = hasMetricsOfType(this.resourceComposite, DataType.TRAIT); viewFactory = (!visible) ? null : new ViewFactory() { @@ -421,13 +405,6 @@ public class ResourceDetailView extends }; updateSubTab(this.monitoringTab, this.monitorTraits, visible, true, viewFactory);
- updateSubTab(this.monitoringTab, this.monitorAvail, true, true, new ViewFactory() { - @Override - public Canvas createView() { - return new ResourceAvailabilityView(resourceComposite); - } - }); - updateSubTab(this.monitoringTab, this.monitorSched, hasMetricsOfType(this.resourceComposite, null), true, new ViewFactory() { @Override diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/ResourceTreeView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/ResourceTreeView.java index 071f831..a3990bb 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/ResourceTreeView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/ResourceTreeView.java @@ -32,11 +32,6 @@ import java.util.Set; import java.util.TreeMap; import java.util.TreeSet;
-import com.google.gwt.http.client.Request; -import com.google.gwt.http.client.RequestBuilder; -import com.google.gwt.http.client.RequestCallback; -import com.google.gwt.http.client.RequestException; -import com.google.gwt.http.client.Response; import com.google.gwt.user.client.History; import com.google.gwt.user.client.rpc.AsyncCallback; import com.smartgwt.client.data.DSCallback; @@ -60,17 +55,8 @@ import com.smartgwt.client.widgets.tree.events.DataArrivedHandler; import com.smartgwt.client.widgets.tree.events.NodeContextClickEvent; import com.smartgwt.client.widgets.tree.events.NodeContextClickHandler;
-import org.rhq.core.domain.auth.Subject; -import org.rhq.core.domain.configuration.PropertySimple; -import org.rhq.core.domain.criteria.DashboardCriteria; import org.rhq.core.domain.criteria.ResourceCriteria; import org.rhq.core.domain.criteria.ResourceGroupCriteria; -import org.rhq.core.domain.criteria.SubjectCriteria; -import org.rhq.core.domain.dashboard.Dashboard; -import org.rhq.core.domain.dashboard.DashboardPortlet; -import org.rhq.core.domain.measurement.DataType; -import org.rhq.core.domain.measurement.MeasurementDefinition; -import org.rhq.core.domain.measurement.MeasurementSchedule; import org.rhq.core.domain.operation.OperationDefinition; import org.rhq.core.domain.resource.Resource; import org.rhq.core.domain.resource.ResourceType; @@ -82,11 +68,9 @@ import org.rhq.core.domain.util.PageList; import org.rhq.enterprise.gui.coregui.client.CoreGUI; import org.rhq.enterprise.gui.coregui.client.ImageManager; import org.rhq.enterprise.gui.coregui.client.LinkManager; -import org.rhq.enterprise.gui.coregui.client.UserSessionManager; import org.rhq.enterprise.gui.coregui.client.ViewId; import org.rhq.enterprise.gui.coregui.client.ViewPath; import org.rhq.enterprise.gui.coregui.client.components.tree.EnhancedTreeNode; -import org.rhq.enterprise.gui.coregui.client.dashboard.portlets.inventory.resource.graph.ResourceD3GraphPortlet; import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; import org.rhq.enterprise.gui.coregui.client.gwt.ResourceGWTServiceAsync; import org.rhq.enterprise.gui.coregui.client.gwt.ResourceGroupGWTServiceAsync; @@ -101,8 +85,6 @@ import org.rhq.enterprise.gui.coregui.client.inventory.resource.type.ResourceTyp import org.rhq.enterprise.gui.coregui.client.util.Log; import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedVLayout; import org.rhq.enterprise.gui.coregui.client.util.message.Message; -import org.rhq.enterprise.gui.coregui.client.util.preferences.MeasurementUserPreferences; -import org.rhq.enterprise.gui.coregui.client.util.preferences.UserPreferences;
/** * @author Jay Shaughnessy @@ -562,7 +544,7 @@ public class ResourceTreeView extends EnhancedVLayout { resourceContextMenu.addItem(operations);
// Metric graph addition menu - resourceContextMenu.addItem(buildMetricsMenu(resourceType, resource)); + resourceContextMenu.addItem(DashboardLinkUtility.buildMetricsMenu(resourceType, resource, MSG.view_tree_common_contextMenu_measurements()));
// Create Child Menu and Manual Import Menu final Set<ResourceType> creatableChildTypes = getCreatableChildTypes(resourceType); @@ -725,190 +707,6 @@ public class ResourceTreeView extends EnhancedVLayout { tree.reloadChildren(refreshNode); }
- private MenuItem buildMetricsMenu(final ResourceType type, final Resource resource) { - MenuItem measurements = new MenuItem(MSG.view_tree_common_contextMenu_measurements()); - final Menu measurementsSubMenu = new Menu(); - - DashboardCriteria criteria = new DashboardCriteria(); - GWTServiceLookup.getDashboardService().findDashboardsByCriteria(criteria, - new AsyncCallback<PageList<Dashboard>>() { - - public void onFailure(Throwable caught) { - CoreGUI.getErrorHandler().handleError(MSG.view_tree_common_contextMenu_loadFailed_dashboard(), - caught); - } - - public void onSuccess(PageList<Dashboard> result) { - //sort the display items alphabetically - TreeSet<String> ordered = new TreeSet<String>(); - Map<String, MeasurementDefinition> definitionMap = new HashMap<String, MeasurementDefinition>(); - for (MeasurementDefinition m : type.getMetricDefinitions()) { - ordered.add(m.getDisplayName()); - definitionMap.put(m.getDisplayName(), m); - } - - for (String displayName : ordered) { - final MeasurementDefinition def = definitionMap.get(displayName); - //only add menu items for Measurement - if (def.getDataType().equals(DataType.MEASUREMENT)) { - MenuItem defItem = new MenuItem(def.getDisplayName()); - measurementsSubMenu.addItem(defItem); - Menu defSubItem = new Menu(); - defItem.setSubmenu(defSubItem); - - for (final Dashboard d : result) { - MenuItem addToDBItem = new MenuItem(MSG - .view_tree_common_contextMenu_addChartToDashboard(d.getName())); - defSubItem.addItem(addToDBItem); - - addToDBItem.addClickHandler(new ClickHandler() { - - public void onClick(MenuItemClickEvent menuItemClickEvent) { - DashboardPortlet p = new DashboardPortlet(MSG - .view_tree_common_contextMenu_resourceGraph(), ResourceD3GraphPortlet.KEY, - 250); - p.getConfiguration().put( - new PropertySimple(ResourceD3GraphPortlet.CFG_RESOURCE_ID, resource.getId())); - p.getConfiguration().put( - new PropertySimple(ResourceD3GraphPortlet.CFG_DEFINITION_ID, def.getId())); - - d.addPortlet(p); - - GWTServiceLookup.getDashboardService().storeDashboard(d, - new AsyncCallback<Dashboard>() { - - public void onFailure(Throwable caught) { - CoreGUI.getErrorHandler().handleError( - MSG.view_tree_common_contextMenu_saveChartToDashboardFailure(), - caught); - } - - public void onSuccess(Dashboard result) { - CoreGUI - .getMessageCenter() - .notify( - new Message( - MSG.view_tree_common_contextMenu_saveChartToDashboardSuccessful(result - .getName()), Message.Severity.Info)); - } - }); - - } - - }); - - - }//end dashboard iteration - - //add new menu item for adding current graphable element to view if on Monitor/Graphs tab - String currentViewPath = History.getToken(); - if (currentViewPath.contains("Monitoring/NewGraphs")) { - MenuItem addGraphItem = new MenuItem(MSG.common_title_add_graph_to_view()); - defSubItem.addItem(addGraphItem); - - addGraphItem.addClickHandler(new ClickHandler() { - public void onClick(MenuItemClickEvent menuItemClickEvent) { - //generate javascript to call out to. - //Ex. menuLayers.hide();addMetric('${metric.resourceId},${metric.scheduleId}') - if (getScheduleDefinitionId(resource, def.getName()) > -1) { - final String resourceGraphElements = resource.getId() + "," - + getScheduleDefinitionId(resource, def.getName()); - - //Once, the portal-war will be rewritten to GWT and operations performed - //within the iframe + JSF will update the user preferences, the following - //2 lines could be uncommented and the lines below them refactorized - //MeasurementUserPreferences measurementPreferences = new MeasurementUserPreferences(UserSessionManager.getUserPreferences()); - //String selectedView = measurementPreferences.getSelectedView(String.valueOf(resource.getId())); - - final int sid = UserSessionManager.getSessionSubject().getId(); - SubjectCriteria c = new SubjectCriteria(); - c.addFilterId(sid); - - GWTServiceLookup.getSubjectService().findSubjectsByCriteria(c, - new AsyncCallback<PageList<Subject>>() { - public void onSuccess(PageList<Subject> result) { - if (result.size() > 0) { - UserPreferences uPreferences = new UserPreferences(result - .get(0)); - MeasurementUserPreferences mPreferences = new MeasurementUserPreferences( - uPreferences); - String selectedView = mPreferences.getSelectedView(String - .valueOf(resource.getId())); - - addNewMetric(String.valueOf(resource.getId()), - selectedView, resourceGraphElements); - } else { - Log.trace("Error obtaining subject with id:" + sid); - } - } - - public void onFailure(Throwable caught) { - Log.trace("Error obtaining subject with id:" + sid, caught); - } - }); - } - } - }); - } // end add the "add to view" menu item - }//end trait exclusion - }//end measurement def iteration - - } - }); - measurements.setSubmenu(measurementsSubMenu); - return measurements; - } - - private void addNewMetric(String id, String selectedView, String resourceGraphElements) { - //construct portal.war url to access - String baseUrl = "/resource/common/monitor/visibility/IndicatorCharts.do"; - baseUrl += "?id=" + id; - baseUrl += "&view=" + selectedView; - baseUrl += "&action=addChart&metric=" + resourceGraphElements; - final String url = baseUrl; - //initiate HTTP request - final RequestBuilder b = new RequestBuilder(RequestBuilder.GET, baseUrl); - - try { - b.setCallback(new RequestCallback() { - public void onResponseReceived(final Request request, final Response response) { - Log.trace("Successfully submitted request to add graph to view:" + url); - - //kick off a page reload. - String currentViewPath = History.getToken(); - CoreGUI.goToView(currentViewPath, true); - } - - @Override - public void onError(Request request, Throwable t) { - Log.trace("Error adding Metric:" + url, t); - } - }); - b.send(); - } catch (RequestException e) { - Log.trace("Error adding Metric:" + url, e); - } - } - - /** Locate the specific schedule definition using the definition identifier. - */ - private int getScheduleDefinitionId(Resource resource, String definitionName) { - int id = -1; - if (resource.getSchedules() != null) { - boolean located = false; - MeasurementSchedule[] schedules = new MeasurementSchedule[resource.getSchedules().size()]; - resource.getSchedules().toArray(schedules); - for (int i = 0; (!located && i < resource.getSchedules().size()); i++) { - MeasurementSchedule schedule = schedules[i]; - MeasurementDefinition definition = schedule.getDefinition(); - if ((definition != null) && definition.getName().equals(definitionName)) { - located = true; - id = schedule.getId(); - } - } - } - return id; - }
private void setRootResource(Resource rootResource) { this.rootResource = rootResource; diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/D3GraphListView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/D3GraphListView.java index 65893ce..8b9f327 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/D3GraphListView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/D3GraphListView.java @@ -1,6 +1,6 @@ /* * RHQ Management Platform - * Copyright (C) 2005-2012 Red Hat, Inc. + * Copyright (C) 2005-2013 Red Hat, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify @@ -21,7 +21,6 @@ package org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitori import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; -import java.util.EnumSet; import java.util.List; import java.util.Set; import java.util.TreeSet; @@ -30,7 +29,6 @@ import com.google.gwt.core.client.GWT; import com.google.gwt.user.client.Timer; import com.google.gwt.user.client.rpc.AsyncCallback; import com.smartgwt.client.types.Overflow; -import com.smartgwt.client.widgets.form.fields.events.ClickHandler; import com.smartgwt.client.widgets.layout.VLayout;
import org.rhq.core.domain.common.EntityContext; @@ -41,16 +39,14 @@ import org.rhq.core.domain.measurement.MeasurementDefinition; import org.rhq.core.domain.measurement.composite.MeasurementDataNumericHighLowComposite; import org.rhq.core.domain.measurement.composite.MeasurementOOBComposite; import org.rhq.core.domain.resource.Resource; -import org.rhq.core.domain.resource.ResourceType; import org.rhq.core.domain.util.PageList; import org.rhq.enterprise.gui.coregui.client.CoreGUI; import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; import org.rhq.enterprise.gui.coregui.client.inventory.common.AbstractD3GraphListView; -import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.AvailabilityOverUnderGraphType; import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.MetricGraphData; +import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.AvailabilityOverUnderGraphType; import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.StackedBarMetricGraphImpl; import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.avail.AvailabilityD3GraphView; -import org.rhq.enterprise.gui.coregui.client.inventory.resource.type.ResourceTypeRepository; import org.rhq.enterprise.gui.coregui.client.util.Log; import org.rhq.enterprise.gui.coregui.client.util.async.Command; import org.rhq.enterprise.gui.coregui.client.util.async.CountDownLatch; @@ -115,17 +111,17 @@ public class D3GraphListView extends AbstractD3GraphListView { setOverflow(Overflow.HIDDEN); }
- @Override protected void onDraw() { super.onDraw(); - Log.debug("D3GraphListView.onDraw() for: " + resource.getName()+ " id: "+ resource.getId()); + Log.debug("D3GraphListView.onDraw() for: " + resource.getName() + " id: " + resource.getId()); destroyMembers();
addMember(buttonBarDateTimeRangeEditor);
if (showAvailabilityGraph) { - availabilityGraph = new AvailabilityD3GraphView<AvailabilityOverUnderGraphType>(new AvailabilityOverUnderGraphType(resource.getId())); + availabilityGraph = new AvailabilityD3GraphView<AvailabilityOverUnderGraphType>( + new AvailabilityOverUnderGraphType(resource.getId())); addMember(availabilityGraph); }
@@ -163,8 +159,8 @@ public class D3GraphListView extends AbstractD3GraphListView {
@Override public void onSuccess(List<Availability> availList) { - Log.debug("\nSuccessfully queried availability in: " - + (System.currentTimeMillis() - timerStart) + " ms."); + Log.debug("\nSuccessfully queried availability in: " + (System.currentTimeMillis() - timerStart) + + " ms."); availabilityList = availList; if (countDownLatch != null) { countDownLatch.countDown(); @@ -180,193 +176,179 @@ public class D3GraphListView extends AbstractD3GraphListView { private void queryAndBuildGraphs() { final long startTimer = System.currentTimeMillis();
- if(null != availabilityGraph){ + if (null != availabilityGraph) { queryAvailability(EntityContext.forResource(resource.getId()), buttonBarDateTimeRangeEditor.getStartTime(), buttonBarDateTimeRangeEditor.getEndTime(), null); }
- ResourceTypeRepository.Cache.getInstance().getResourceTypes(resource.getResourceType().getId(), - EnumSet.of(ResourceTypeRepository.MetadataType.measurements), - new ResourceTypeRepository.TypeLoadedCallback() { - public void onTypesLoaded(final ResourceType type) { + final ArrayList<MeasurementDefinition> measurementDefinitions = new ArrayList<MeasurementDefinition>(); + final ArrayList<MeasurementDefinition> summaryMeasurementDefinitions = new ArrayList<MeasurementDefinition>();
- final ArrayList<MeasurementDefinition> measurementDefinitions = new ArrayList<MeasurementDefinition>(); - final ArrayList<MeasurementDefinition> summaryMeasurementDefinitions = new ArrayList<MeasurementDefinition>(); + for (MeasurementDefinition def : resource.getResourceType().getMetricDefinitions()) { + if (def.getDataType() == DataType.MEASUREMENT && def.getDisplayType() == DisplayType.SUMMARY) { + summaryMeasurementDefinitions.add(def); + } + measurementDefinitions.add(def); + }
- for (MeasurementDefinition def : type.getMetricDefinitions()) { - if (def.getDataType() == DataType.MEASUREMENT && def.getDisplayType() == DisplayType.SUMMARY) { - summaryMeasurementDefinitions.add(def); - } - measurementDefinitions.add(def); - } + Collections.sort(measurementDefinitions, new Comparator<MeasurementDefinition>() { + @Override + public int compare(MeasurementDefinition o1, MeasurementDefinition o2) { + return new Integer(o1.getDisplayOrder()).compareTo(o2.getDisplayOrder()); + } + }); + Collections.sort(summaryMeasurementDefinitions, new Comparator<MeasurementDefinition>() { + @Override + public int compare(MeasurementDefinition o1, MeasurementDefinition o2) { + return new Integer(o1.getDisplayOrder()).compareTo(o2.getDisplayOrder()); + } + }); + + int[] measDefIdArray = new int[measurementDefinitions.size()]; + for (int i = 0; i < measDefIdArray.length; i++) { + measDefIdArray[i] = measurementDefinitions.get(i).getId(); + }
- Collections.sort(measurementDefinitions, new Comparator<MeasurementDefinition>() { - @Override - public int compare(MeasurementDefinition o1, MeasurementDefinition o2) { - return new Integer(o1.getDisplayOrder()).compareTo(o2.getDisplayOrder()); - } - }); - Collections.sort(summaryMeasurementDefinitions, new Comparator<MeasurementDefinition>() { - @Override - public int compare(MeasurementDefinition o1, MeasurementDefinition o2) { - return new Integer(o1.getDisplayOrder()).compareTo(o2.getDisplayOrder()); - } - }); - - int[] measDefIdArray = new int[measurementDefinitions.size()]; - for (int i = 0; i < measDefIdArray.length; i++) { - measDefIdArray[i] = measurementDefinitions.get(i).getId(); + // setting up a deferred Command to execute after all resource queries have completed (successfully or unsuccessfully) + // we know there are exactly 2 resources + final CountDownLatch countDownLatch = CountDownLatch.create(NUM_ASYNC_CALLS, new Command() { + @Override + /** + * Satisfied only after ALL of the metric queries AND availability have completed + */ + public void execute() { + Log.debug("Total Time for async metrics/avail query: " + (System.currentTimeMillis() - startTimer)); + if (null == metricsDataList || metricsDataList.isEmpty()) { + loadingLabel.setContents(MSG.view_resource_monitor_graphs_noneAvailable()); + } else { + loadingLabel.hide(); + if (useSummaryData) { + buildSummaryGraphs(metricsDataList, summaryMeasurementDefinitions, measurementDefinitions); + } else { + determineGraphsToBuild(metricsDataList, measurementDefinitions, definitionIds); } - - // setting up a deferred Command to execute after all resource queries have completed (successfully or unsuccessfully) - // we know there are exactly 2 resources - final CountDownLatch countDownLatch = CountDownLatch.create(NUM_ASYNC_CALLS, new Command() { - @Override - /** - * Satisfied only after ALL of the metric queries AND availability have completed - */ - public void execute() { - Log.debug("Total Time for async metrics/avail query: " - + (System.currentTimeMillis() - startTimer)); - if (null == metricsDataList || metricsDataList.isEmpty()) { - loadingLabel.setContents(MSG.view_resource_monitor_graphs_noneAvailable()); - } else { - loadingLabel.hide(); - if (useSummaryData) { - buildSummaryGraphs(metricsDataList, summaryMeasurementDefinitions, - measurementDefinitions); - } else { - determineGraphsToBuild(metricsDataList, measurementDefinitions, definitionIds); - } - // There is a weird timing case when availabilityGraph can be null - if (null != availabilityGraph) { - // we only need the first metricData since we are only taking the - // availability data set in there for the dropdowns already - availabilityGraph.setAvailabilityList(availabilityList); - new Timer(){ - @Override - public void run() { - availabilityGraph.drawJsniChart(); - } - }.schedule(150); - } + // There is a weird timing case when availabilityGraph can be null + if (null != availabilityGraph) { + // we only need the first metricData since we are only taking the + // availability data set in there for the dropdowns already + availabilityGraph.setAvailabilityList(availabilityList); + new Timer() { + @Override + public void run() { + availabilityGraph.drawJsniChart(); } + }.schedule(150); + } + }
- } - }); + } + });
- queryMetricData(measDefIdArray, countDownLatch); - queryOOBMetrics(resource, countDownLatch); - // now the countDown latch will run sometime asynchronously + queryMetricData(measDefIdArray, countDownLatch); + queryOOBMetrics(resource, countDownLatch); + // now the countDown latch will run sometime asynchronously + }
+ private void queryMetricData(final int[] measDefIdArray, final CountDownLatch countDownLatch) { + GWTServiceLookup.getMeasurementDataService().findDataForResource(resource.getId(), measDefIdArray, + buttonBarDateTimeRangeEditor.getStartTime(), buttonBarDateTimeRangeEditor.getEndTime(), 60, + new AsyncCallback<List<List<MeasurementDataNumericHighLowComposite>>>() { + @Override + public void onFailure(Throwable caught) { + CoreGUI.getErrorHandler().handleError(MSG.view_resource_monitor_graphs_loadFailed(), caught); + loadingLabel.setContents(MSG.view_resource_monitor_graphs_loadFailed()); + countDownLatch.countDown(); }
- private void queryMetricData(final int[] measDefIdArray, final CountDownLatch countDownLatch) { - GWTServiceLookup.getMeasurementDataService().findDataForResource(resource.getId(), measDefIdArray, - buttonBarDateTimeRangeEditor.getStartTime(), buttonBarDateTimeRangeEditor.getEndTime(), 60, - new AsyncCallback<List<List<MeasurementDataNumericHighLowComposite>>>() { - @Override - public void onFailure(Throwable caught) { - CoreGUI.getErrorHandler().handleError(MSG.view_resource_monitor_graphs_loadFailed(), - caught); - loadingLabel.setContents(MSG.view_resource_monitor_graphs_loadFailed()); - countDownLatch.countDown(); - } - - @Override - public void onSuccess(List<List<MeasurementDataNumericHighLowComposite>> metrics) { - metricsDataList = metrics; - Log.debug("Regular Metric graph data queried in: " - + (System.currentTimeMillis() - startTimer + " ms.")); - countDownLatch.countDown(); + @Override + public void onSuccess(List<List<MeasurementDataNumericHighLowComposite>> metrics) { + metricsDataList = metrics; + countDownLatch.countDown();
- } - }); } + }); + }
- private void queryOOBMetrics(final Resource resource, final CountDownLatch countDownLatch) { + private void queryOOBMetrics(final Resource resource, final CountDownLatch countDownLatch) {
- final long startTime = System.currentTimeMillis(); + final long startTime = System.currentTimeMillis();
- GWTServiceLookup.getMeasurementDataService().getHighestNOOBsForResource(resource.getId(), 60, + GWTServiceLookup.getMeasurementDataService().getHighestNOOBsForResource(resource.getId(), 60,
- new AsyncCallback<PageList<MeasurementOOBComposite>>() { - @Override - public void onSuccess(PageList<MeasurementOOBComposite> measurementOOBComposites) { + new AsyncCallback<PageList<MeasurementOOBComposite>>() { + @Override + public void onSuccess(PageList<MeasurementOOBComposite> measurementOOBComposites) {
- measurementOOBCompositeList = measurementOOBComposites; - Log.debug("\nSuccessfully queried "+measurementOOBCompositeList.size() +" OOB records in: " + (System.currentTimeMillis() - startTime) - + " ms."); - countDownLatch.countDown(); - } + measurementOOBCompositeList = measurementOOBComposites; + Log.debug("\nSuccessfully queried " + measurementOOBCompositeList.size() + " OOB records in: " + + (System.currentTimeMillis() - startTime) + " ms."); + countDownLatch.countDown(); + }
- @Override - public void onFailure(Throwable caught) { - Log.debug("Error retrieving out of bound metrics for resource [" + resource.getId() + "]:" - + caught.getMessage()); - countDownLatch.countDown(); - } - }); + @Override + public void onFailure(Throwable caught) { + Log.debug("Error retrieving out of bound metrics for resource [" + resource.getId() + "]:" + + caught.getMessage()); + countDownLatch.countDown(); + } + });
- } + }
- /** - * Spin through the measurement definitions (in order) checking to see if they are in the - * summary measurement definition set and if so build a graph. - * @param measurementData - * @param summaryMeasurementDefinitions - * @param measurementDefinitions - */ - private void buildSummaryGraphs(List<List<MeasurementDataNumericHighLowComposite>> measurementData, - List<MeasurementDefinition> summaryMeasurementDefinitions, - List<MeasurementDefinition> measurementDefinitions) { - Set<Integer> summaryIds = new TreeSet<Integer>(); - for (MeasurementDefinition summaryMeasurementDefinition : summaryMeasurementDefinitions) { - summaryIds.add(summaryMeasurementDefinition.getId()); - } + /** + * Spin through the measurement definitions (in order) checking to see if they are in the + * summary measurement definition set and if so build a graph. + * @param measurementData + * @param summaryMeasurementDefinitions + * @param measurementDefinitions + */ + private void buildSummaryGraphs(List<List<MeasurementDataNumericHighLowComposite>> measurementData, + List<MeasurementDefinition> summaryMeasurementDefinitions, List<MeasurementDefinition> measurementDefinitions) { + Set<Integer> summaryIds = new TreeSet<Integer>(); + for (MeasurementDefinition summaryMeasurementDefinition : summaryMeasurementDefinitions) { + summaryIds.add(summaryMeasurementDefinition.getId()); + }
- int i = 0; - for (MeasurementDefinition measurementDefinition : measurementDefinitions) { - if (summaryIds.contains(measurementDefinition.getId())) { - buildSingleGraph(measurementOOBCompositeList, measurementDefinition, - measurementData.get(i), MULTI_CHART_HEIGHT); - } - i++; - } + int i = 0; + for (MeasurementDefinition measurementDefinition : measurementDefinitions) { + if (summaryIds.contains(measurementDefinition.getId())) { + buildSingleGraph(measurementOOBCompositeList, measurementDefinition, measurementData.get(i), + MULTI_CHART_HEIGHT); + } + i++; + }
- } + }
- private void determineGraphsToBuild(List<List<MeasurementDataNumericHighLowComposite>> measurementData, - List<MeasurementDefinition> measurementDefinitions, Set<Integer> definitionIds) { - int i = 0; - for (List<MeasurementDataNumericHighLowComposite> metric : measurementData) { - - for (Integer selectedDefinitionId : definitionIds) { - final MeasurementDefinition measurementDefinition = measurementDefinitions.get(i); - final int measurementId = measurementDefinition.getId(); - - if (null != selectedDefinitionId) { - // single graph case - if (measurementId == selectedDefinitionId) { - buildSingleGraph(measurementOOBCompositeList, measurementDefinition, metric, SINGLE_CHART_HEIGHT); - } - } else { - // multiple graph case - buildSingleGraph(measurementOOBCompositeList, measurementDefinition, metric, MULTI_CHART_HEIGHT); - } - } - i++; + private void determineGraphsToBuild(List<List<MeasurementDataNumericHighLowComposite>> measurementData, + List<MeasurementDefinition> measurementDefinitions, Set<Integer> definitionIds) { + int i = 0; + for (List<MeasurementDataNumericHighLowComposite> metric : measurementData) { + + for (Integer selectedDefinitionId : definitionIds) { + final MeasurementDefinition measurementDefinition = measurementDefinitions.get(i); + final int measurementId = measurementDefinition.getId(); + + if (null != selectedDefinitionId) { + // single graph case + if (measurementId == selectedDefinitionId) { + buildSingleGraph(measurementOOBCompositeList, measurementDefinition, metric, + SINGLE_CHART_HEIGHT); } + } else { + // multiple graph case + buildSingleGraph(measurementOOBCompositeList, measurementDefinition, metric, MULTI_CHART_HEIGHT); } - }); - + } + i++; + } }
private void buildSingleGraph(PageList<MeasurementOOBComposite> measurementOOBCompositeList, MeasurementDefinition measurementDefinition, List<MeasurementDataNumericHighLowComposite> data, int height) {
MetricGraphData metricGraphData = MetricGraphData.createForResource(resource.getId(), resource.getName(), - measurementDefinition, data, measurementOOBCompositeList ); + measurementDefinition, data, measurementOOBCompositeList); StackedBarMetricGraphImpl graph = GWT.create(StackedBarMetricGraphImpl.class); graph.setMetricGraphData(metricGraphData); graph.setGraphListView(this); diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/MetricD3Graph.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/MetricD3Graph.java index 3b4ec1f..65807f9 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/MetricD3Graph.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/MetricD3Graph.java @@ -22,6 +22,7 @@ import com.google.gwt.user.client.Timer; import com.smartgwt.client.widgets.HTMLFlow;
import org.rhq.enterprise.gui.coregui.client.inventory.common.AbstractD3GraphListView; +import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.RedrawGraphs; import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.StackedBarMetricGraphImpl; import org.rhq.enterprise.gui.coregui.client.util.Log; import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedVLayout; @@ -30,7 +31,7 @@ import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedVLayout; * A D3 graph implementation for graphing Resource metrics. * Just the graph only. No avail graph no buttons just he graph. */ -public class MetricD3Graph<T extends AbstractD3GraphListView> extends EnhancedVLayout { +public class MetricD3Graph<T extends AbstractD3GraphListView> extends EnhancedVLayout implements RedrawGraphs{
protected StackedBarMetricGraphImpl graph; private HTMLFlow graphDiv = null; @@ -59,18 +60,18 @@ public class MetricD3Graph<T extends AbstractD3GraphListView> extends EnhancedVL */ private static String getSvgDefs() { return " <defs>" - + " <linearGradient id="headerGrad" x1="0%" y1="0%" x2="0%" y2="100%">" - + " <stop offset="0%" style="stop-color:#E6E6E6;stop-opacity:1"/>" - + " <stop offset="100%" style="stop-color:#F0F0F0;stop-opacity:1"/>" - + " </linearGradient>" - + " <pattern id="noDataStripes" patternUnits="userSpaceOnUse" x="0" y="0"" - + " width="6" height="3">" - + " <path d="M 0 0 6 0" style="stroke:#CCCCCC; fill:none;"/>" - + " </pattern>" - + " <pattern id="unknownStripes" patternUnits="userSpaceOnUse" x="0" y="0"" - + " width="6" height="3">" - + " <path d="M 0 0 6 0" style="stroke:#2E9EC2; fill:none;"/>" - + " </pattern>" + + " <linearGradient id="headerGrad" x1="0%" y1="0%" x2="0%" y2="100%">" + + " <stop offset="0%" style="stop-color:#E6E6E6;stop-opacity:1"/>" + + " <stop offset="100%" style="stop-color:#F0F0F0;stop-opacity:1"/>" + + " </linearGradient>" + + " <pattern id="noDataStripes" patternUnits="userSpaceOnUse" x="0" y="0"" + + " width="6" height="3">" + + " <path d="M 0 0 6 0" style="stroke:#CCCCCC; fill:none;"/>" + + " </pattern>" + + " <pattern id="unknownStripes" patternUnits="userSpaceOnUse" x="0" y="0"" + + " width="6" height="3">" + + " <path d="M 0 0 6 0" style="stroke:#2E9EC2; fill:none;"/>" + + " </pattern>" + "<pattern id="diagonalHatchFill" patternUnits="userSpaceOnUse" x="0" y="0" width="105" height="105">" + "<g style="fill:none; stroke:black; stroke-width:1">" + "<path d="M0 90 l15,15"/>" diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/avail/ResourceAvailabilityView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/avail/ResourceAvailabilityView.java deleted file mode 100644 index 6719070..0000000 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/avail/ResourceAvailabilityView.java +++ /dev/null @@ -1,399 +0,0 @@ -/* - * RHQ Management Platform - * Copyright 2012, Red Hat Middleware LLC, and individual contributors - * as indicated by the @author tags. See the copyright.txt file in the - * distribution for a full listing of individual contributors. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -package org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.avail; - -import java.util.ArrayList; -import java.util.Date; - -import com.google.gwt.user.client.rpc.AsyncCallback; -import com.smartgwt.client.data.DSRequest; -import com.smartgwt.client.data.DSResponse; -import com.smartgwt.client.data.Record; -import com.smartgwt.client.data.SortSpecifier; -import com.smartgwt.client.rpc.RPCResponse; -import com.smartgwt.client.types.Alignment; -import com.smartgwt.client.types.ListGridFieldType; -import com.smartgwt.client.types.SortDirection; -import com.smartgwt.client.widgets.form.DynamicForm; -import com.smartgwt.client.widgets.form.fields.FormItem; -import com.smartgwt.client.widgets.form.fields.StaticTextItem; -import com.smartgwt.client.widgets.grid.ListGridField; -import com.smartgwt.client.widgets.grid.ListGridRecord; -import com.smartgwt.client.widgets.layout.Layout; - -import org.rhq.core.domain.criteria.AvailabilityCriteria; -import org.rhq.core.domain.measurement.Availability; -import org.rhq.core.domain.measurement.MeasurementUnits; -import org.rhq.core.domain.resource.composite.ResourceAvailabilitySummary; -import org.rhq.core.domain.resource.composite.ResourceComposite; -import org.rhq.core.domain.util.PageControl; -import org.rhq.core.domain.util.PageList; -import org.rhq.core.domain.util.PageOrdering; -import org.rhq.enterprise.gui.coregui.client.CoreGUI; -import org.rhq.enterprise.gui.coregui.client.ImageManager; -import org.rhq.enterprise.gui.coregui.client.components.table.Table; -import org.rhq.enterprise.gui.coregui.client.components.table.TimestampCellFormatter; -import org.rhq.enterprise.gui.coregui.client.gwt.AvailabilityGWTServiceAsync; -import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; -import org.rhq.enterprise.gui.coregui.client.util.MeasurementConverterClient; -import org.rhq.enterprise.gui.coregui.client.util.RPCDataSource; -import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedVLayout; - -/** - * This shows the availability history for a resource. - * - * @author Jay Shaughnessy - * @author John Mazzitelli - */ -public class ResourceAvailabilityView extends EnhancedVLayout { - - private ResourceComposite resourceComposite; - private StaticTextItem currentField; - private StaticTextItem availField; - private StaticTextItem availTimeField; - private StaticTextItem downField; - private StaticTextItem downTimeField; - private StaticTextItem disabledField; - private StaticTextItem disabledTimeField; - private StaticTextItem failureCountField; - private StaticTextItem disabledCountField; - private StaticTextItem mtbfField; - private StaticTextItem mttrField; - private StaticTextItem unknownField; - private StaticTextItem currentTimeField; - - public ResourceAvailabilityView(ResourceComposite resourceComposite) { - super(); - - this.resourceComposite = resourceComposite; - - setWidth100(); - setHeight100(); - } - - @Override - protected void onInit() { - super.onInit(); - - addMember(createSummaryForm()); - addMember(createListView()); - } - - private DynamicForm createSummaryForm() { - DynamicForm form = new DynamicForm(); - form.setWidth100(); - form.setAutoHeight(); - form.setMargin(10); - form.setNumCols(4); - - // row 1 - currentField = new StaticTextItem("current", MSG.view_resource_monitor_availability_currentStatus()); - currentField.setWrapTitle(false); - currentField.setColSpan(4); - - // row 2 - availField = new StaticTextItem("avail", MSG.view_resource_monitor_availability_availability()); - availField.setWrapTitle(false); - prepareTooltip(availField, MSG.view_resource_monitor_availability_availability_tooltip()); - - availTimeField = new StaticTextItem("availTime", MSG.view_resource_monitor_availability_uptime()); - availTimeField.setWrapTitle(false); - prepareTooltip(availTimeField, MSG.view_resource_monitor_availability_uptime_tooltip()); - - // row 3 - downField = new StaticTextItem("down", MSG.view_resource_monitor_availability_down()); - downField.setWrapTitle(false); - prepareTooltip(downField, MSG.view_resource_monitor_availability_down_tooltip()); - - downTimeField = new StaticTextItem("downTime", MSG.view_resource_monitor_availability_downtime()); - downTimeField.setWrapTitle(false); - prepareTooltip(downTimeField, MSG.view_resource_monitor_availability_downtime_tooltip()); - - // row 4 - disabledField = new StaticTextItem("disabled", MSG.view_resource_monitor_availability_disabled()); - disabledField.setWrapTitle(false); - prepareTooltip(disabledField, MSG.view_resource_monitor_availability_disabled_tooltip()); - - disabledTimeField = new StaticTextItem("disabledTime", MSG.view_resource_monitor_availability_disabledTime()); - disabledTimeField.setWrapTitle(false); - prepareTooltip(disabledTimeField, MSG.view_resource_monitor_availability_disabledTime_tooltip()); - - // row 5 - failureCountField = new StaticTextItem("failureCount", MSG.view_resource_monitor_availability_numFailures()); - failureCountField.setWrapTitle(false); - prepareTooltip(failureCountField, MSG.view_resource_monitor_availability_numFailures_tooltip()); - - disabledCountField = new StaticTextItem("disabledCount", MSG.view_resource_monitor_availability_numDisabled()); - disabledCountField.setWrapTitle(false); - prepareTooltip(disabledCountField, MSG.view_resource_monitor_availability_numDisabled_tooltip()); - - // row 6 - mtbfField = new StaticTextItem("mtbf", MSG.view_resource_monitor_availability_mtbf()); - mtbfField.setWrapTitle(false); - prepareTooltip(mtbfField, MSG.view_resource_monitor_availability_mtbf_tooltip()); - - mttrField = new StaticTextItem("mttr", MSG.view_resource_monitor_availability_mttr()); - mttrField.setWrapTitle(false); - prepareTooltip(mttrField, MSG.view_resource_monitor_availability_mttr_tooltip()); - - // row 7 - unknownField = new StaticTextItem("unknown"); - unknownField.setWrapTitle(false); - unknownField.setColSpan(4); - unknownField.setShowTitle(false); - - // row 8 - currentTimeField = new StaticTextItem("currentTime"); - currentTimeField.setWrapTitle(false); - currentTimeField.setColSpan(4); - currentTimeField.setShowTitle(false); - - form.setItems(currentField, availField, availTimeField, downField, downTimeField, disabledField, - disabledTimeField, failureCountField, disabledCountField, mtbfField, mttrField, unknownField, - currentTimeField); - - reloadSummaryData(); - - return form; - } - - private void reloadSummaryData() { - GWTServiceLookup.getResourceService().getResourceAvailabilitySummary(resourceComposite.getResource().getId(), - new AsyncCallback<ResourceAvailabilitySummary>() { - - @Override - public void onSuccess(ResourceAvailabilitySummary result) { - - currentField.setValue(MSG.view_resource_monitor_availability_currentStatus_value(result - .getCurrent().getName(), TimestampCellFormatter.format(result.getLastChange().getTime()))); - availField.setValue(MeasurementConverterClient.format(result.getUpPercentage(), - MeasurementUnits.PERCENTAGE, true)); - availTimeField.setValue(MeasurementConverterClient.format((double) result.getUpTime(), - MeasurementUnits.MILLISECONDS, true)); - downField.setValue(MeasurementConverterClient.format(result.getDownPercentage(), - MeasurementUnits.PERCENTAGE, true)); - downTimeField.setValue(MeasurementConverterClient.format((double) result.getDownTime(), - MeasurementUnits.MILLISECONDS, true)); - disabledField.setValue(MeasurementConverterClient.format(result.getDisabledPercentage(), - MeasurementUnits.PERCENTAGE, true)); - disabledTimeField.setValue(MeasurementConverterClient.format((double) result.getDisabledTime(), - MeasurementUnits.MILLISECONDS, true)); - failureCountField.setValue(result.getFailures()); - disabledCountField.setValue(result.getDisabled()); - mtbfField.setValue(MeasurementConverterClient.format((double) result.getMTBF(), - MeasurementUnits.MILLISECONDS, true)); - mttrField.setValue(MeasurementConverterClient.format((double) result.getMTTR(), - MeasurementUnits.MILLISECONDS, true)); - - if (result.getUnknownTime() > 0L) { - unknownField.setValue(MSG.view_resource_monitor_availability_unknown(MeasurementConverterClient - .format((double) result.getUnknownTime(), MeasurementUnits.MILLISECONDS, true))); - } else { - unknownField.setValue(""); - } - - currentTimeField.setValue(MSG.view_resource_monitor_availability_currentAsOf(TimestampCellFormatter - .format(result.getCurrentTime()))); - } - - @Override - public void onFailure(Throwable caught) { - currentField.setValue(MSG.common_label_error()); - CoreGUI.getErrorHandler() - .handleError(MSG.view_resource_monitor_availability_summaryError(), caught); - } - }); - } - - private void prepareTooltip(FormItem item, String tooltip) { - item.setHoverWidth(400); - item.setPrompt(tooltip); - } - - private Table<ListView.DS> createListView() { - ListView listView = new ListView(resourceComposite.getResource().getId()); - return listView; - } - - private class ListView extends Table<ListView.DS> { - - private DS dataSource; - private int resourceId; - - public ListView(int resourceId) { - super(null, new SortSpecifier[] { new SortSpecifier("startTime", SortDirection.DESCENDING) }); - - this.resourceId = resourceId; - - setDataSource(getDataSource()); - } - - @Override - public DS getDataSource() { - if (null == this.dataSource) { - this.dataSource = new DS(resourceId); - } - return this.dataSource; - } - - @Override - public void refresh() { - super.refresh(); - reloadSummaryData(); - } - - @Override - protected void configureTableContents(Layout contents) { - super.configureTableContents(contents); - setAutoHeight(); - } - - @Override - protected void configureTable() { - ArrayList<ListGridField> dataSourceFields = getDataSource().getListGridFields(); - getListGrid().setFields(dataSourceFields.toArray(new ListGridField[dataSourceFields.size()])); - - super.configureTable(); - } - - private class DS extends RPCDataSource<Availability, AvailabilityCriteria> { - - public static final String ATTR_ID = "id"; - public static final String ATTR_AVAILABILITY = "availabilityType"; - public static final String ATTR_START_TIME = "startTime"; - public static final String ATTR_END_TIME = "endTime"; - - public static final String ATTR_DURATION = "duration"; - - private AvailabilityGWTServiceAsync availService = GWTServiceLookup.getAvailabilityService(); - private int resourceId; - - public DS(int resourceId) { - super(); - this.resourceId = resourceId; - addDataSourceFields(); - } - - /** - * The view that contains the list grid which will display this datasource's data will call this - * method to get the field information which is used to control the display of the data. - * - * @return list grid fields used to display the datasource data - */ - public ArrayList<ListGridField> getListGridFields() { - ArrayList<ListGridField> fields = new ArrayList<ListGridField>(6); - - ListGridField startTimeField = new ListGridField(ATTR_START_TIME, MSG.common_title_start()); - startTimeField.setCellFormatter(new TimestampCellFormatter()); - startTimeField.setShowHover(true); - startTimeField.setHoverCustomizer(TimestampCellFormatter.getHoverCustomizer(ATTR_START_TIME)); - startTimeField.setCanSortClientOnly(true); - fields.add(startTimeField); - - ListGridField endTimeField = new ListGridField(ATTR_END_TIME, MSG.common_title_end()); - endTimeField.setCellFormatter(new TimestampCellFormatter()); - endTimeField.setShowHover(true); - endTimeField.setHoverCustomizer(TimestampCellFormatter.getHoverCustomizer(ATTR_END_TIME)); - endTimeField.setCanSortClientOnly(true); - fields.add(endTimeField); - - ListGridField durationField = new ListGridField(ATTR_DURATION, MSG.common_title_duration()); - durationField.setAlign(Alignment.RIGHT); - fields.add(durationField); - - ListGridField availabilityField = new ListGridField(ATTR_AVAILABILITY, MSG.common_title_availability()); - availabilityField.setType(ListGridFieldType.IMAGE); - availabilityField.setAlign(Alignment.CENTER); - fields.add(availabilityField); - - return fields; - } - - @Override - protected AvailabilityCriteria getFetchCriteria(DSRequest request) { - AvailabilityCriteria c = new AvailabilityCriteria(); - c.addFilterResourceId(resourceId); - c.addFilterInitialAvailability(false); - - // This code is unlikely to be necessary as the encompassing view should be using an initial - // sort specifier. But just in case, make sure we set the initial sort. Note that we have to - // manipulate the PageControl directly as per the restrictions on getFetchCriteria() (see jdoc). - PageControl pageControl = getPageControl(request); - if (pageControl.getOrderingFields().isEmpty()) { - pageControl.initDefaultOrderingField("startTime", PageOrdering.DESC); - } - - return c; - } - - @Override - protected void executeFetch(final DSRequest request, final DSResponse response, - AvailabilityCriteria criteria) { - - this.availService.findAvailabilityByCriteria(criteria, new AsyncCallback<PageList<Availability>>() { - public void onFailure(Throwable caught) { - // TODO fix message - CoreGUI.getErrorHandler().handleError(MSG.common_label_error(), caught); - response.setStatus(RPCResponse.STATUS_FAILURE); - processResponse(request.getRequestId(), response); - } - - public void onSuccess(final PageList<Availability> result) { - response.setData(buildRecords(result)); - response.setTotalRows(result.size()); - processResponse(request.getRequestId(), response); - } - }); - } - - @Override - public Availability copyValues(Record from) { - return null; - } - - @Override - public ListGridRecord copyValues(Availability from) { - ListGridRecord record = new ListGridRecord(); - - record.setAttribute(ATTR_ID, from.getId()); - record.setAttribute(ATTR_AVAILABILITY, - ImageManager.getAvailabilityIconFromAvailType(from.getAvailabilityType())); - record.setAttribute(ATTR_START_TIME, new Date(from.getStartTime())); - if (null != from.getEndTime()) { - record.setAttribute(ATTR_END_TIME, new Date(from.getEndTime())); - long duration = from.getEndTime() - from.getStartTime(); - record.setAttribute(ATTR_DURATION, - MeasurementConverterClient.format((double) duration, MeasurementUnits.MILLISECONDS, true)); - - } else { - record.setAttribute(ATTR_END_TIME, MSG.common_label_none2()); - long duration = System.currentTimeMillis() - from.getStartTime(); - record.setAttribute(ATTR_DURATION, - MeasurementConverterClient.format((double) duration, MeasurementUnits.MILLISECONDS, true)); - - } - - return record; - } - } - } - -} diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/AddToDashboardComponent.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/AddToDashboardComponent.java new file mode 100644 index 0000000..ce4f8ef --- /dev/null +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/AddToDashboardComponent.java @@ -0,0 +1,185 @@ +/* + * RHQ Management Platform + * Copyright (C) 2005-2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +package org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.table; + +import java.util.LinkedHashMap; + +import com.google.gwt.user.client.rpc.AsyncCallback; +import com.smartgwt.client.widgets.IButton; +import com.smartgwt.client.widgets.events.ClickEvent; +import com.smartgwt.client.widgets.form.fields.SelectItem; +import com.smartgwt.client.widgets.form.fields.events.ChangeEvent; +import com.smartgwt.client.widgets.form.fields.events.ChangeHandler; +import com.smartgwt.client.widgets.grid.ListGridRecord; +import com.smartgwt.client.widgets.toolbar.ToolStrip; + +import org.rhq.core.domain.configuration.PropertySimple; +import org.rhq.core.domain.criteria.DashboardCriteria; +import org.rhq.core.domain.dashboard.Dashboard; +import org.rhq.core.domain.dashboard.DashboardPortlet; +import org.rhq.core.domain.measurement.MeasurementDefinition; +import org.rhq.core.domain.resource.Resource; +import org.rhq.core.domain.util.PageList; +import org.rhq.enterprise.gui.coregui.client.CoreGUI; +import org.rhq.enterprise.gui.coregui.client.dashboard.portlets.inventory.resource.graph.ResourceD3GraphPortlet; +import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; +import org.rhq.enterprise.gui.coregui.client.util.Log; +import org.rhq.enterprise.gui.coregui.client.util.enhanced.Enhanced; +import org.rhq.enterprise.gui.coregui.client.util.message.Message; + +/** + * @author Mike Thompson + */ +public class AddToDashboardComponent extends ToolStrip implements Enhanced { + final private Resource resource; + private SelectItem dashboardSelectItem; + private Dashboard selectedDashboard; + private IButton addToDashboardButton; + private LinkedHashMap<String, String> dashboardMenuMap; + private LinkedHashMap<Integer, Dashboard> dashboardMap; + private MetricsTableView.MetricsTableListGrid metricsListGrid; + + public AddToDashboardComponent(Resource resource) { + this.resource = resource; + setPadding(5); + setMembersMargin(15); + setWidth(300); + dashboardMenuMap = new LinkedHashMap<String, String>(); + dashboardMap = new LinkedHashMap<Integer, Dashboard>(); + createToolstrip(); + } + + @Override + protected void onDraw() { + super.onDraw(); + removeMembers(getMembers()); + createToolstrip(); + } + + private void createToolstrip() { + addSpacer(15); + dashboardSelectItem = new SelectItem(); + addToDashboardButton = new IButton(MSG.view_metric_addToDashboard()); + addToDashboardButton.disable(); + + dashboardSelectItem = new SelectItem(); + dashboardSelectItem.setTitle("Dashboards"); + dashboardSelectItem.setWidth(300); + dashboardSelectItem.setPickListWidth(210); + populateDashboardMenu(); + addFormItem(dashboardSelectItem); + addMember(addToDashboardButton); + + dashboardSelectItem.addChangeHandler(new ChangeHandler() { + @Override + public void onChange(ChangeEvent changeEvent) { + Integer selectedDashboardId = Integer.valueOf((String) changeEvent.getValue()); + selectedDashboard = dashboardMap.get(selectedDashboardId); + } + }); + addToDashboardButton.addClickHandler(new com.smartgwt.client.widgets.events.ClickHandler() { + @Override + public void onClick(ClickEvent clickEvent) { + ListGridRecord[] selectedRecords = metricsListGrid.getSelectedRecords(); + for (ListGridRecord selectedRecord : selectedRecords) { + for (MeasurementDefinition measurementDefinition : resource.getResourceType() + .getMetricDefinitions()) { + if (measurementDefinition.getId() == selectedRecord + .getAttributeAsInt(MetricsViewDataSource.FIELD_METRIC_DEF_ID)) { + Log.debug("Add to Dashboard -- Storing: " + measurementDefinition.getDisplayName() + + " in " + selectedDashboard.getName()); + storeDashboardMetric(selectedDashboard, resource, measurementDefinition); + break; + } + } + } + } + }); + } + + public void disableAddToDashboardButton(){ + addToDashboardButton.disable(); + } + + public void enableAddToDashboardButton(){ + addToDashboardButton.enable(); + } + + + public void populateDashboardMenu() { + dashboardMenuMap.clear(); + dashboardMap.clear(); + + DashboardCriteria criteria = new DashboardCriteria(); + GWTServiceLookup.getDashboardService().findDashboardsByCriteria(criteria, + new AsyncCallback<PageList<Dashboard>>() { + + public void onFailure(Throwable caught) { + CoreGUI.getErrorHandler().handleError(MSG.view_tree_common_contextMenu_loadFailed_dashboard(), + caught); + } + + public void onSuccess(PageList<Dashboard> dashboards) { + for (final Dashboard dashboard : dashboards) { + dashboardMenuMap.put(String.valueOf(dashboard.getId()), + MSG.view_tree_common_contextMenu_addChartToDashboard(dashboard.getName())); + dashboardMap.put(dashboard.getId(), dashboard); + } + selectedDashboard = dashboards.get(0); + dashboardSelectItem.setValueMap(dashboardMenuMap); + dashboardSelectItem.setValue(selectedDashboard.getId()); + } + }); + } + + /** + * The metrics list grid is not available on object creation so we must attach later after it has been initialized. + * @param metricsListGrid + */ + public void setMetricsListGrid(MetricsTableView.MetricsTableListGrid metricsListGrid) { + this.metricsListGrid = metricsListGrid; + } + + + private void storeDashboardMetric(Dashboard dashboard, Resource resource, MeasurementDefinition definition) { + DashboardPortlet dashboardPortlet = new DashboardPortlet(MSG.view_tree_common_contextMenu_resourceGraph(), + ResourceD3GraphPortlet.KEY, 250); + dashboardPortlet.getConfiguration().put( + new PropertySimple(ResourceD3GraphPortlet.CFG_RESOURCE_ID, resource.getId())); + dashboardPortlet.getConfiguration().put( + new PropertySimple(ResourceD3GraphPortlet.CFG_DEFINITION_ID, definition.getId())); + + dashboard.addPortlet(dashboardPortlet); + + GWTServiceLookup.getDashboardService().storeDashboard(dashboard, new AsyncCallback<Dashboard>() { + + public void onFailure(Throwable caught) { + CoreGUI.getErrorHandler().handleError(MSG.view_tree_common_contextMenu_saveChartToDashboardFailure(), + caught); + } + + public void onSuccess(Dashboard result) { + CoreGUI.getMessageCenter().notify( + new Message(MSG.view_tree_common_contextMenu_saveChartToDashboardSuccessful(result.getName()), + Message.Severity.Info)); + } + }); + } + +} diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MeasurementTableView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MeasurementTableView.java deleted file mode 100644 index 7022648..0000000 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MeasurementTableView.java +++ /dev/null @@ -1,237 +0,0 @@ -/* - * RHQ Management Platform - * Copyright (C) 2005-2011 Red Hat, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -package org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.table; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.Comparator; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Set; -import java.util.TreeSet; - -import com.google.gwt.user.client.rpc.AsyncCallback; -import com.smartgwt.client.types.SelectionStyle; -import com.smartgwt.client.widgets.Window; -import com.smartgwt.client.widgets.events.CloseClickEvent; -import com.smartgwt.client.widgets.events.CloseClickHandler; -import com.smartgwt.client.widgets.grid.ListGrid; -import com.smartgwt.client.widgets.grid.ListGridField; -import com.smartgwt.client.widgets.grid.ListGridRecord; - -import org.rhq.core.domain.criteria.ResourceCriteria; -import org.rhq.core.domain.measurement.MeasurementData; -import org.rhq.core.domain.measurement.MeasurementUnits; -import org.rhq.core.domain.resource.composite.ResourceComposite; -import org.rhq.core.domain.util.PageList; -import org.rhq.enterprise.gui.coregui.client.CoreGUI; -import org.rhq.enterprise.gui.coregui.client.components.measurement.UserPreferencesMeasurementRangeEditor; -import org.rhq.enterprise.gui.coregui.client.components.table.Table; -import org.rhq.enterprise.gui.coregui.client.components.table.TableAction; -import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; -import org.rhq.enterprise.gui.coregui.client.inventory.InventoryView; -import org.rhq.enterprise.gui.coregui.client.inventory.common.detail.summary.AbstractActivityView.ChartViewWindow; -import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.D3GraphListView; -import org.rhq.enterprise.gui.coregui.client.util.MeasurementConverterClient; -import org.rhq.enterprise.gui.coregui.client.util.message.Message; - -/** - * Views a resource's measurements in a tabular view. - * - * @author John Mazzitelli - */ -public class MeasurementTableView extends Table<MetricsTableDataSource> { - - private final int resourceId; - - public MeasurementTableView(int resourceId) { - super(); - this.resourceId = resourceId; - setDataSource(new MetricsTableDataSource(resourceId)); - } - - protected void configureTable() { - ArrayList<ListGridField> fields = getDataSource().getListGridFields(); - setListGridFields(fields.toArray(new ListGridField[0])); - addExtraWidget(new UserPreferencesMeasurementRangeEditor(), true); - addTableAction(MSG.view_measureTable_getLive(), new TableAction() { - @Override - public boolean isEnabled(ListGridRecord[] selection) { - return selection != null && selection.length > 0; - } - - @Override - public void executeAction(ListGridRecord[] selection, Object actionValue) { - if (selection == null || selection.length == 0) { - return; - } - // keyed on metric name - string[0] is the metric label, [1] is the units - final HashMap<String, String[]> scheduleNamesAndUnits = new HashMap<String, String[]>(); - int[] definitionIds = new int[selection.length]; - int i = 0; - for (ListGridRecord record : selection) { - Integer defId = record.getAttributeAsInt(MetricsTableDataSource.FIELD_METRIC_DEF_ID); - definitionIds[i++] = defId.intValue(); - - String name = record.getAttribute(MetricsTableDataSource.FIELD_METRIC_NAME); - String label = record.getAttribute(MetricsTableDataSource.FIELD_METRIC_LABEL); - String units = record.getAttribute(MetricsTableDataSource.FIELD_METRIC_UNITS); - if (units == null || units.length() < 1) { - units = MeasurementUnits.NONE.name(); - } - - scheduleNamesAndUnits.put(name, new String[] { label, units }); - } - - // actually go out and ask the agents for the data - GWTServiceLookup.getMeasurementDataService(60000).findLiveData(resourceId, definitionIds, - new AsyncCallback<Set<MeasurementData>>() { - @Override - public void onSuccess(Set<MeasurementData> result) { - if (result == null) { - result = new HashSet<MeasurementData>(0); - } - ArrayList<ListGridRecord> records = new ArrayList<ListGridRecord>(result.size()); - for (MeasurementData data : result) { - String[] nameAndUnits = scheduleNamesAndUnits.get(data.getName()); - if (nameAndUnits != null) { - double doubleValue; - if (data.getValue() instanceof Number) { - doubleValue = ((Number) data.getValue()).doubleValue(); - } else { - doubleValue = Double.parseDouble(data.getValue().toString()); - } - String value = MeasurementConverterClient.formatToSignificantPrecision( - new double[] { doubleValue }, MeasurementUnits.valueOf(nameAndUnits[1]), true)[0]; - - ListGridRecord record = new ListGridRecord(); - record.setAttribute("name", nameAndUnits[0]); - record.setAttribute("value", value); - records.add(record); - } - } - Collections.sort(records, new Comparator<ListGridRecord>() { - public int compare(ListGridRecord o1, ListGridRecord o2) { - return o1.getAttribute("name").compareTo(o2.getAttribute("name")); - } - }); - showLiveData(records); - } - - @Override - public void onFailure(Throwable caught) { - CoreGUI.getErrorHandler().handleError(MSG.view_measureTable_getLive_failure(), caught); - } - }); - } - }); - - - addTableAction(MSG.view_measureTable_chartMetricValues(), new TableAction() { - @Override - public boolean isEnabled(ListGridRecord[] selection) { - return selection != null && selection.length > 0; - } - - @Override - public void executeAction(ListGridRecord[] selection, Object actionValue) { - if (selection == null || selection.length == 0) { - return; - } - final TreeSet<Integer> definitionIds = new TreeSet<Integer>(); - for (ListGridRecord record : selection) { - Integer defId = record.getAttributeAsInt(MetricsTableDataSource.FIELD_METRIC_DEF_ID); - definitionIds.add(defId); - } - - ResourceCriteria criteria = new ResourceCriteria(); - criteria.addFilterId(resourceId); - criteria.fetchSchedules(true); - GWTServiceLookup.getResourceService().findResourceCompositesByCriteria(criteria, - new AsyncCallback<PageList<ResourceComposite>>() { - - @Override - public void onFailure(Throwable caught) { - CoreGUI.getMessageCenter().notify( - new Message(MSG.view_inventory_resource_loadFailed(String.valueOf(resourceId)), - Message.Severity.Warning)); - - CoreGUI.goToView(InventoryView.VIEW_ID.getName()); - } - - @Override - public void onSuccess(PageList<ResourceComposite> result) { - if (result.isEmpty()) { - onFailure(new Exception(MSG.view_inventory_resource_loadFailed(String - .valueOf(resourceId)))); - } else { - final ResourceComposite resourceComposite = result.get(0); - - ChartViewWindow window = new ChartViewWindow(""); - final D3GraphListView graphListView = D3GraphListView.createMultipleGraphs( - resourceComposite.getResource(), definitionIds, true); - - window.addItem(graphListView); - window.show(); - refreshTableInfo(); - - } - } - }); - - } - }); - } - - private void showLiveData(ArrayList<ListGridRecord> records) { - final Window liveDataWindow = new Window(); - liveDataWindow.setTitle(MSG.view_measureTable_live_title()); - liveDataWindow.setShowModalMask(true); - liveDataWindow.setShowMinimizeButton(false); - liveDataWindow.setShowMaximizeButton(true); - liveDataWindow.setShowCloseButton(true); - liveDataWindow.setShowResizer(true); - liveDataWindow.setCanDragResize(true); - liveDataWindow.setDismissOnEscape(true); - liveDataWindow.setIsModal(true); - liveDataWindow.setWidth(700); - liveDataWindow.setHeight(425); - liveDataWindow.setAutoCenter(true); - liveDataWindow.centerInPage(); - liveDataWindow.addCloseClickHandler(new CloseClickHandler() { - @Override - public void onCloseClick(CloseClickEvent event) { - liveDataWindow.destroy(); - refreshTableInfo(); - } - }); - - ListGrid liveDataGrid = new ListGrid(); - liveDataGrid.setShowAllRecords(true); - liveDataGrid.setData(records.toArray(new ListGridRecord[records.size()])); - liveDataGrid.setSelectionType(SelectionStyle.NONE); - ListGridField name = new ListGridField("name", MSG.common_title_metric()); - ListGridField value = new ListGridField("value", MSG.common_title_value()); - liveDataGrid.setFields(name, value); - - liveDataWindow.addItem(liveDataGrid); - liveDataWindow.show(); - } -} diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsResourceView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsResourceView.java new file mode 100644 index 0000000..bcea825 --- /dev/null +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsResourceView.java @@ -0,0 +1,167 @@ +/* + * RHQ Management Platform + * Copyright (C) 2005-2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +package org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.table; + +import java.util.List; + +import com.google.gwt.user.client.Timer; +import com.google.gwt.user.client.rpc.AsyncCallback; +import com.smartgwt.client.types.Overflow; +import com.smartgwt.client.types.VerticalAlignment; +import com.smartgwt.client.widgets.Img; +import com.smartgwt.client.widgets.events.ClickEvent; +import com.smartgwt.client.widgets.events.ClickHandler; + +import org.rhq.core.domain.common.EntityContext; +import org.rhq.core.domain.measurement.Availability; +import org.rhq.core.domain.resource.Resource; +import org.rhq.enterprise.gui.coregui.client.CoreGUI; +import org.rhq.enterprise.gui.coregui.client.IconEnum; +import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; +import org.rhq.enterprise.gui.coregui.client.inventory.common.AbstractD3GraphListView; +import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.AvailabilityOverUnderGraphType; +import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.avail.AvailabilityD3GraphView; +import org.rhq.enterprise.gui.coregui.client.util.BrowserUtility; +import org.rhq.enterprise.gui.coregui.client.util.Log; +import org.rhq.enterprise.gui.coregui.client.util.async.CountDownLatch; +import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedHLayout; + +/** + * The consolidated metrics view showing metric graphs and availability data both in graphical and tabular form. + * + * @author Mike Thompson + */ +public class MetricsResourceView extends AbstractD3GraphListView { + + private static final String COLLAPSED_TOOLTIP = MSG.chart_metrics_collapse_tooltip(); + private static final String EXPANDED_TOOLTIP = MSG.chart_metrics_expand_tooltip(); + + private final Resource resource; + private Img expandCollapseArrow; + private final MetricsTableView metricsTableView; + private final ResourceMetricAvailabilityView availabilityDetails; + + public MetricsResourceView(Resource resource) { + super(); + setOverflow(Overflow.AUTO); + setWidth100(); + setHeight100(); + this.resource = resource; + metricsTableView = new MetricsTableView(resource, this); + availabilityDetails = new ResourceMetricAvailabilityView(resource); + } + + + public void redrawGraphs() { + this.onDraw(); + } + + public void refreshGraphs(){ + new Timer() { + @Override + public void run() { + availabilityGraph.drawJsniChart(); + BrowserUtility.graphSparkLines(); + } + }.schedule(150); + } + + @Override + protected void onDraw() { + super.onDraw(); + Log.debug("MetricResourceView.onDraw() for: " + resource.getName() + " id: " + resource.getId()); + destroyMembers(); + + + addMember(buttonBarDateTimeRangeEditor); + + availabilityGraph = new AvailabilityD3GraphView<AvailabilityOverUnderGraphType>( + new AvailabilityOverUnderGraphType(resource.getId())); + + EnhancedHLayout expandCollapseHLayout = new EnhancedHLayout(); + + //add expand/collapse icon + expandCollapseArrow = new Img(IconEnum.COLLAPSED_ICON.getIcon16x16Path(), 16, 16); + expandCollapseArrow.setTooltip(COLLAPSED_TOOLTIP); + expandCollapseArrow.setLayoutAlign(VerticalAlignment.BOTTOM); + expandCollapseArrow.addClickHandler(new ClickHandler() { + private boolean collapsed = true; + + @Override + public void onClick(ClickEvent event) { + collapsed = !collapsed; + if (collapsed) { + expandCollapseArrow.setSrc(IconEnum.COLLAPSED_ICON.getIcon16x16Path()); + expandCollapseArrow.setTooltip(COLLAPSED_TOOLTIP); + availabilityDetails.hide(); + } else { + expandCollapseArrow.setSrc(IconEnum.EXPANDED_ICON.getIcon16x16Path()); + expandCollapseArrow.setTooltip(EXPANDED_TOOLTIP); + availabilityDetails.show(); + + } + refreshGraphs(); + } + }); + + + expandCollapseHLayout.addMember(expandCollapseArrow); + expandCollapseHLayout.addMember(availabilityGraph); + addMember(expandCollapseHLayout); + + availabilityDetails.hide(); + addMember(availabilityDetails); + + metricsTableView.setHeight100(); + addMember(metricsTableView); + + + queryAvailability(EntityContext.forResource(resource.getId()), buttonBarDateTimeRangeEditor.getStartTime(), + buttonBarDateTimeRangeEditor.getEndTime(), null); + } + + @Override + protected void queryAvailability(final EntityContext context, Long startTime, Long endTime, CountDownLatch notUsed ) { + + final long timerStart = System.currentTimeMillis(); + + // now return the availability + GWTServiceLookup.getAvailabilityService().getAvailabilitiesForResource(context.getResourceId(), startTime, + endTime, new AsyncCallback<List<Availability>>() { + @Override + public void onFailure(Throwable caught) { + CoreGUI.getErrorHandler().handleError(MSG.view_resource_monitor_availability_loadFailed(), caught); + } + + @Override + public void onSuccess(List<Availability> availList) { + Log.debug("\nSuccessfully queried availability in: " + (System.currentTimeMillis() - timerStart) + + " ms."); + availabilityGraph.setAvailabilityList(availList); + new Timer() { + @Override + public void run() { + availabilityGraph.drawJsniChart(); + } + }.schedule(150); + } + }); + } + +} diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableDataSource.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableDataSource.java index 318166d..57e62ab 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableDataSource.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableDataSource.java @@ -1,7 +1,13 @@ package org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.table;
import java.util.ArrayList; +import java.util.Arrays; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.List; +import java.util.Set;
+import com.google.gwt.user.client.Timer; import com.google.gwt.user.client.rpc.AsyncCallback; import com.smartgwt.client.data.DSRequest; import com.smartgwt.client.data.DSResponse; @@ -11,18 +17,28 @@ import com.smartgwt.client.widgets.grid.ListGridField; import com.smartgwt.client.widgets.grid.ListGridRecord;
import org.rhq.core.domain.criteria.Criteria; +import org.rhq.core.domain.criteria.ResourceCriteria; import org.rhq.core.domain.measurement.DataType; +import org.rhq.core.domain.measurement.MeasurementDefinition; import org.rhq.core.domain.measurement.MeasurementSchedule; +import org.rhq.core.domain.measurement.composite.MeasurementDataNumericHighLowComposite; import org.rhq.core.domain.measurement.ui.MetricDisplaySummary; import org.rhq.core.domain.measurement.ui.MetricDisplayValue; +import org.rhq.core.domain.resource.Resource; +import org.rhq.core.domain.resource.ResourceType; +import org.rhq.core.domain.resource.composite.ResourceComposite; +import org.rhq.core.domain.util.PageList; import org.rhq.enterprise.gui.coregui.client.CoreGUI; import org.rhq.enterprise.gui.coregui.client.UserSessionManager; import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; +import org.rhq.enterprise.gui.coregui.client.inventory.resource.type.ResourceTypeRepository; import org.rhq.enterprise.gui.coregui.client.util.BrowserUtility; +import org.rhq.enterprise.gui.coregui.client.util.Log; import org.rhq.enterprise.gui.coregui.client.util.MeasurementUtility; import org.rhq.enterprise.gui.coregui.client.util.RPCDataSource; +import org.rhq.enterprise.gui.coregui.client.util.async.Command; +import org.rhq.enterprise.gui.coregui.client.util.async.CountDownLatch; import org.rhq.enterprise.gui.coregui.client.util.preferences.MeasurementUserPreferences; -import org.rhq.enterprise.gui.coregui.client.util.preferences.UserPreferences;
/** * A simple data source to read in metric data summaries for a resource. @@ -31,9 +47,13 @@ import org.rhq.enterprise.gui.coregui.client.util.preferences.UserPreferences; * we just load them all in at once. * * @author John Mazzitelli + * @author Mike Thompson + * @todo: get rid of this once we have tested the new screen out */ +@Deprecated public class MetricsTableDataSource extends RPCDataSource<MetricDisplaySummary, Criteria> {
+ public static final String FIELD_SPARKLINE = "sparkline"; public static final String FIELD_METRIC_LABEL = "label"; public static final String FIELD_ALERT_COUNT = "alertCount"; public static final String FIELD_MIN_VALUE = "min"; @@ -44,11 +64,15 @@ public class MetricsTableDataSource extends RPCDataSource<MetricDisplaySummary, public static final String FIELD_METRIC_SCHED_ID = "schedId"; public static final String FIELD_METRIC_UNITS = "units"; public static final String FIELD_METRIC_NAME = "name"; - + public static final String FIELD_RESOURCE_ID = "resourceId"; private int resourceId; + private List<MetricDisplaySummary> metricDisplaySummaries; + private List<List<MeasurementDataNumericHighLowComposite>> metricsDataList; + private MeasurementUserPreferences measurementUserPrefs;
public MetricsTableDataSource(int resourceId) { this.resourceId = resourceId; + measurementUserPrefs = new MeasurementUserPreferences(UserSessionManager.getUserPreferences()); }
/** @@ -58,7 +82,25 @@ public class MetricsTableDataSource extends RPCDataSource<MetricDisplaySummary, * @return list grid fields used to display the datasource data */ public ArrayList<ListGridField> getListGridFields() { - ArrayList<ListGridField> fields = new ArrayList<ListGridField>(6); + ArrayList<ListGridField> fields = new ArrayList<ListGridField>(7); + + ListGridField sparklineField = new ListGridField(FIELD_SPARKLINE, "chart"); + sparklineField.setCellFormatter(new CellFormatter() { + @Override + public String format(Object value, ListGridRecord record, int rowNum, int colNum) { + if (value == null) { + return ""; + } + String contents = "<span id='sparkline_" + resourceId + "-" + + record.getAttributeAsInt(FIELD_METRIC_DEF_ID) + "' class='dynamicsparkline' width='70' " + + "values='" + record.getAttribute(FIELD_SPARKLINE) + "'></span>"; + return contents; + + } + }); + + sparklineField.setWidth(80); + fields.add(sparklineField);
ListGridField nameField = new ListGridField(FIELD_METRIC_LABEL, MSG.common_title_name()); nameField.setWidth("30%"); @@ -101,6 +143,7 @@ public class MetricsTableDataSource extends RPCDataSource<MetricDisplaySummary, MeasurementUtility.formatSimpleMetrics(from);
ListGridRecord record = new ListGridRecord(); + record.setAttribute(FIELD_SPARKLINE, getCsvMetricsForSparkline()); record.setAttribute(FIELD_METRIC_LABEL, from.getLabel()); record.setAttribute(FIELD_ALERT_COUNT, String.valueOf(from.getAlertCount())); record.setAttribute(FIELD_MIN_VALUE, getMetricStringValue(from.getMinMetric())); @@ -111,9 +154,32 @@ public class MetricsTableDataSource extends RPCDataSource<MetricDisplaySummary, record.setAttribute(FIELD_METRIC_SCHED_ID, from.getScheduleId()); record.setAttribute(FIELD_METRIC_UNITS, from.getUnits()); record.setAttribute(FIELD_METRIC_NAME, from.getMetricName()); + record.setAttribute(FIELD_RESOURCE_ID, resourceId); return record; }
+ private String getCsvMetricsForSparkline() { + StringBuilder sb = new StringBuilder(); + Log.debug("getCsvMetricsForSparkline.metricsDataList: " + metricsDataList.size()); + for (List<MeasurementDataNumericHighLowComposite> measurementData : metricsDataList) { + for (int i = 0; i < measurementData.size(); i++) { + // take the last 20 values + if (i >= measurementData.size() - 20) { + if (!Double.isNaN(measurementData.get(i).getValue())) { + sb.append((int) measurementData.get(i).getValue()); + sb.append(","); + } + } + } + if (sb.toString().endsWith(",")) { + sb.setLength(sb.length() - 1); + } + } + Log.debug("getCsvMetricsForSparkline: " + sb.toString()); + + return sb.toString(); + } + protected String getMetricStringValue(MetricDisplayValue value) { return (value != null) ? value.toString() : ""; } @@ -127,34 +193,54 @@ public class MetricsTableDataSource extends RPCDataSource<MetricDisplaySummary, @Override protected void executeFetch(final DSRequest request, final DSResponse response, final Criteria unused) {
- // see MetricsTableUIBean for the old JSF class to see where this came from - GWTServiceLookup.getMeasurementScheduleService().findSchedulesForResourceAndType(resourceId, DataType.MEASUREMENT, null, true, new AsyncCallback<ArrayList<MeasurementSchedule>>() { @Override - public void onSuccess(ArrayList<MeasurementSchedule> result) { - int[] schedIds = new int[result.size()]; + public void onSuccess(ArrayList<MeasurementSchedule> measurementSchedules) { + int[] scheduleIds = new int[measurementSchedules.size()]; int i = 0; - for (MeasurementSchedule measurementSchedule : result) { - schedIds[i++] = measurementSchedule.getId(); + for (MeasurementSchedule measurementSchedule : measurementSchedules) { + scheduleIds[i++] = measurementSchedule.getId(); }
- UserPreferences prefs = UserSessionManager.getUserPreferences(); - MeasurementUserPreferences mprefs = new MeasurementUserPreferences(prefs); - ArrayList<Long> range = mprefs.getMetricRangePreferences().getBeginEndTimes(); + final CountDownLatch countDownLatch = CountDownLatch.create(2, new Command() { + + @Override + public void execute() { + response.setData(buildRecords(metricDisplaySummaries)); + processResponse(request.getRequestId(), response); + + new Timer() { + + @Override + public void run() { + BrowserUtility.graphSparkLines(); + } + }.schedule(150); + Log.debug("*** Finished CountdownLatch for metrics loaded: " + metricsDataList.size()); + } + }); + + retrieveResourceMetrics(resourceId, countDownLatch); + GWTServiceLookup.getMeasurementChartsService().getMetricDisplaySummariesForResource(resourceId, - schedIds, range.get(0), range.get(1), new AsyncCallback<ArrayList<MetricDisplaySummary>>() { - @Override - public void onSuccess(ArrayList<MetricDisplaySummary> result) { - response.setData(buildRecords(result)); - processResponse(request.getRequestId(), response); - } + scheduleIds, measurementUserPrefs.getMetricRangePreferences().begin, + measurementUserPrefs.getMetricRangePreferences().end, + new AsyncCallback<ArrayList<MetricDisplaySummary>>() { + @Override + public void onSuccess(ArrayList<MetricDisplaySummary> metricDisplaySummaries) { + setMetricDisplaySummaries(metricDisplaySummaries); + countDownLatch.countDown(); + }
- @Override - public void onFailure(Throwable caught) { - CoreGUI.getErrorHandler().handleError("Cannot load metrics", caught); + @Override + public void onFailure(Throwable caught) { + CoreGUI.getErrorHandler().handleError("Cannot load metrics", caught); + countDownLatch.countDown(); + } } - }); + + ); }
@Override @@ -163,4 +249,87 @@ public class MetricsTableDataSource extends RPCDataSource<MetricDisplaySummary, } }); } + + void setMetricDisplaySummaries(List<MetricDisplaySummary> metricDisplaySummaries) { + this.metricDisplaySummaries = metricDisplaySummaries; + } + + public void retrieveResourceMetrics(final Integer resourceId, final CountDownLatch countDownLatch) { + + ResourceCriteria criteria = new ResourceCriteria(); + criteria.addFilterId(resourceId); + + //locate the resource + GWTServiceLookup.getResourceService().findResourceCompositesByCriteria(criteria, + new AsyncCallback<PageList<ResourceComposite>>() { + @Override + public void onFailure(Throwable caught) { + Log.debug("Error retrieving resource resource composite for resource [" + resourceId + "]:" + + caught.getMessage()); + } + + @Override + public void onSuccess(PageList<ResourceComposite> resourceCompositePageList) { + if (!resourceCompositePageList.isEmpty()) { + final ResourceComposite resourceComposite = resourceCompositePageList.get(0); + final Resource resource = resourceComposite.getResource(); + // Load the fully fetched ResourceType. + ResourceType resourceType = resource.getResourceType(); + ResourceTypeRepository.Cache.getInstance().getResourceTypes(resourceType.getId(), + EnumSet.of(ResourceTypeRepository.MetadataType.measurements), + new ResourceTypeRepository.TypeLoadedCallback() { + public void onTypesLoaded(ResourceType type) { + resource.setResourceType(type); + //metric definitions + Set<MeasurementDefinition> definitions = type.getMetricDefinitions(); + + //build id mapping for measurementDefinition instances Ex. Free Memory -> MeasurementDefinition[100071] + final HashMap<String, MeasurementDefinition> measurementDefMap = new HashMap<String, MeasurementDefinition>(); + for (MeasurementDefinition definition : definitions) { + measurementDefMap.put(definition.getDisplayName(), definition); + } + //bundle definition ids for asynch call. + int[] definitionArrayIds = new int[definitions.size()]; + final String[] displayOrder = new String[definitions.size()]; + measurementDefMap.keySet().toArray(displayOrder); + //sort the charting data ex. Free Memory, Free Swap Space,..System Load + Arrays.sort(displayOrder); + + //organize definitionArrayIds for ordered request on server. + int index = 0; + for (String definitionToDisplay : displayOrder) { + definitionArrayIds[index++] = measurementDefMap.get(definitionToDisplay) + .getId(); + } + + GWTServiceLookup.getMeasurementDataService().findDataForResource(resourceId, + definitionArrayIds, measurementUserPrefs.getMetricRangePreferences().begin, + measurementUserPrefs.getMetricRangePreferences().end, 60, + new AsyncCallback<List<List<MeasurementDataNumericHighLowComposite>>>() { + @Override + public void onFailure(Throwable caught) { + Log.warn("Error retrieving recent metrics charting data for resource [" + + resourceId + "]:" + caught.getMessage()); + } + + @Override + public void onSuccess( + List<List<MeasurementDataNumericHighLowComposite>> measurementDataList) { + + if (!measurementDataList.isEmpty()) { + metricsDataList = measurementDataList; + Log.debug("*** Setting metricsDataList.size: " + + metricsDataList.size()); + countDownLatch.countDown(); + } + } + }); + + } + }); + } + } + }); + + } } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableView.java new file mode 100644 index 0000000..09a7bc3 --- /dev/null +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableView.java @@ -0,0 +1,385 @@ +/* + * RHQ Management Platform + * Copyright (C) 2005-2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +package org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.table; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import com.google.gwt.core.client.GWT; +import com.google.gwt.user.client.Timer; +import com.google.gwt.user.client.rpc.AsyncCallback; +import com.smartgwt.client.types.ExpansionMode; +import com.smartgwt.client.types.SelectionStyle; +import com.smartgwt.client.widgets.Canvas; +import com.smartgwt.client.widgets.HTMLFlow; +import com.smartgwt.client.widgets.Window; +import com.smartgwt.client.widgets.events.CloseClickEvent; +import com.smartgwt.client.widgets.events.CloseClickHandler; +import com.smartgwt.client.widgets.grid.ListGrid; +import com.smartgwt.client.widgets.grid.ListGridField; +import com.smartgwt.client.widgets.grid.ListGridRecord; +import com.smartgwt.client.widgets.grid.events.DataArrivedEvent; +import com.smartgwt.client.widgets.grid.events.DataArrivedHandler; +import com.smartgwt.client.widgets.grid.events.RecordCollapseEvent; +import com.smartgwt.client.widgets.grid.events.RecordCollapseHandler; +import com.smartgwt.client.widgets.grid.events.RecordExpandEvent; +import com.smartgwt.client.widgets.grid.events.RecordExpandHandler; +import com.smartgwt.client.widgets.grid.events.SelectionChangedHandler; +import com.smartgwt.client.widgets.grid.events.SelectionEvent; +import com.smartgwt.client.widgets.grid.events.SortChangedHandler; +import com.smartgwt.client.widgets.grid.events.SortEvent; +import com.smartgwt.client.widgets.layout.VLayout; + +import org.rhq.core.domain.measurement.MeasurementData; +import org.rhq.core.domain.measurement.MeasurementDefinition; +import org.rhq.core.domain.measurement.MeasurementUnits; +import org.rhq.core.domain.measurement.composite.MeasurementDataNumericHighLowComposite; +import org.rhq.core.domain.resource.Resource; +import org.rhq.enterprise.gui.coregui.client.CoreGUI; +import org.rhq.enterprise.gui.coregui.client.UserSessionManager; +import org.rhq.enterprise.gui.coregui.client.components.table.Table; +import org.rhq.enterprise.gui.coregui.client.components.table.TableAction; +import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; +import org.rhq.enterprise.gui.coregui.client.inventory.common.AbstractD3GraphListView; +import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.MetricGraphData; +import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.RedrawGraphs; +import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.StackedBarMetricGraphImpl; +import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.MetricD3Graph; +import org.rhq.enterprise.gui.coregui.client.util.BrowserUtility; +import org.rhq.enterprise.gui.coregui.client.util.Log; +import org.rhq.enterprise.gui.coregui.client.util.MeasurementConverterClient; +import org.rhq.enterprise.gui.coregui.client.util.preferences.MeasurementUserPreferences; + +/** + * Views a resource's metrics in a tabular view with sparkline graph and optional detailed d3 graph. + * + * @author John Mazzitelli + * @author Mike Thompson + */ +public class MetricsTableView extends Table<MetricsViewDataSource> implements RedrawGraphs { + + private final Resource resource; + private final AbstractD3GraphListView abstractD3GraphListView; + + private final MeasurementUserPreferences measurementUserPrefs; + private final AddToDashboardComponent addToDashboardComponent; + private MetricsTableListGrid metricsTableListGrid; + + Set<Integer> expandedRows = new HashSet<Integer>(); + + public MetricsTableView(Resource resource, AbstractD3GraphListView abstractD3GraphListView) { + super(); + this.resource = resource; + this.abstractD3GraphListView = abstractD3GraphListView; + measurementUserPrefs = new MeasurementUserPreferences(UserSessionManager.getUserPreferences()); + setDataSource(new MetricsViewDataSource(resource)); + addToDashboardComponent = new AddToDashboardComponent(resource); + } + + /** + * Creates this Table's list grid (called by onInit()). Subclasses can override this if they require a custom + * subclass of ListGrid. + * + * @return this Table's list grid (must be an instance of ListGrid) + */ + @Override + protected ListGrid createListGrid() { + metricsTableListGrid = new MetricsTableListGrid(this, resource); + addToDashboardComponent.setMetricsListGrid(metricsTableListGrid); + return metricsTableListGrid; + } + + protected void configureTable() { + ArrayList<ListGridField> fields = getDataSource().getListGridFields(); + setListGridFields(fields.toArray(new ListGridField[0])); + + addTableAction(MSG.view_measureTable_getLive(), new ShowLiveDataTableAction(this)); + addExtraWidget(addToDashboardComponent, false); + addToDashboardComponent.disableAddToDashboardButton(); + metricsTableListGrid.addSelectionChangedHandler(new SelectionChangedHandler() { + @Override + public void onSelectionChanged(SelectionEvent selectionEvent) { + if(metricsTableListGrid.getSelectedRecords().length > 0){ + addToDashboardComponent.enableAddToDashboardButton(); + }else { + addToDashboardComponent.disableAddToDashboardButton(); + } + } + }); + } + + private static class ShowLiveDataTableAction implements TableAction { + private MetricsTableView metricsTableView; + + public ShowLiveDataTableAction(MetricsTableView metricsTableView) { + this.metricsTableView = metricsTableView; + } + + @Override + public boolean isEnabled(ListGridRecord[] selection) { + return selection != null && selection.length > 0; + } + + @Override + public void executeAction(ListGridRecord[] selection, Object actionValue) { + if (selection == null || selection.length == 0) { + return; + } + // keyed on metric name - string[0] is the metric label, [1] is the units + final HashMap<String, String[]> scheduleNamesAndUnits = new HashMap<String, String[]>(); + int[] definitionIds = new int[selection.length]; + int i = 0; + for (ListGridRecord record : selection) { + Integer defId = record.getAttributeAsInt(MetricsViewDataSource.FIELD_METRIC_DEF_ID); + definitionIds[i++] = defId; + + String name = record.getAttribute(MetricsViewDataSource.FIELD_METRIC_NAME); + String label = record.getAttribute(MetricsViewDataSource.FIELD_METRIC_LABEL); + String units = record.getAttribute(MetricsViewDataSource.FIELD_METRIC_UNITS); + if (units == null || units.length() < 1) { + units = MeasurementUnits.NONE.name(); + } + + scheduleNamesAndUnits.put(name, new String[] { label, units }); + } + + // actually go out and ask the agents for the data + GWTServiceLookup.getMeasurementDataService(60000).findLiveData(metricsTableView.resource.getId(), + definitionIds, new AsyncCallback<Set<MeasurementData>>() { + @Override + public void onSuccess(Set<MeasurementData> result) { + if (result == null) { + result = new HashSet<MeasurementData>(0); + } + ArrayList<ListGridRecord> records = new ArrayList<ListGridRecord>(result.size()); + for (MeasurementData data : result) { + String[] nameAndUnits = scheduleNamesAndUnits.get(data.getName()); + if (nameAndUnits != null) { + double doubleValue; + if (data.getValue() instanceof Number) { + doubleValue = ((Number) data.getValue()).doubleValue(); + } else { + doubleValue = Double.parseDouble(data.getValue().toString()); + } + String value = MeasurementConverterClient.formatToSignificantPrecision( + new double[] { doubleValue }, MeasurementUnits.valueOf(nameAndUnits[1]), true)[0]; + + ListGridRecord record = new ListGridRecord(); + record.setAttribute("name", nameAndUnits[0]); + record.setAttribute("value", value); + records.add(record); + } + } + Collections.sort(records, new Comparator<ListGridRecord>() { + public int compare(ListGridRecord o1, ListGridRecord o2) { + return o1.getAttribute("name").compareTo(o2.getAttribute("name")); + } + }); + showLiveData(records); + } + + @Override + public void onFailure(Throwable caught) { + CoreGUI.getErrorHandler().handleError(MSG.view_measureTable_getLive_failure(), caught); + } + }); + } + + private void showLiveData(ArrayList<ListGridRecord> records) { + final Window liveDataWindow = new Window(); + liveDataWindow.setTitle(MSG.view_measureTable_live_title()); + liveDataWindow.setShowModalMask(true); + liveDataWindow.setShowMinimizeButton(false); + liveDataWindow.setShowMaximizeButton(true); + liveDataWindow.setShowCloseButton(true); + liveDataWindow.setShowResizer(true); + liveDataWindow.setCanDragResize(true); + liveDataWindow.setDismissOnEscape(true); + liveDataWindow.setIsModal(true); + liveDataWindow.setWidth(700); + liveDataWindow.setHeight(425); + liveDataWindow.setAutoCenter(true); + liveDataWindow.centerInPage(); + liveDataWindow.addCloseClickHandler(new CloseClickHandler() { + @Override + public void onCloseClick(CloseClickEvent event) { + liveDataWindow.destroy(); + metricsTableView.refreshTableInfo(); + } + }); + + ListGrid liveDataGrid = new ListGrid(); + liveDataGrid.setShowAllRecords(true); + liveDataGrid.setData(records.toArray(new ListGridRecord[records.size()])); + liveDataGrid.setSelectionType(SelectionStyle.NONE); + ListGridField name = new ListGridField("name", MSG.common_title_metric()); + ListGridField value = new ListGridField("value", MSG.common_title_value()); + liveDataGrid.setFields(name, value); + + liveDataWindow.addItem(liveDataGrid); + liveDataWindow.show(); + } + + } + + @Override + /** + * Redraw Graphs in this context means to refresh the table and redraw open graphs. + */ + public void redrawGraphs() { + Log.debug("MetricsView.redrawGraphs."); + + new Timer() { + + @Override + public void run() { + BrowserUtility.graphSparkLines(); + } + }.schedule(150); + + } + + public class MetricsTableListGrid extends ListGrid { + + private static final int TREEVIEW_DETAIL_CHART_HEIGHT = 205; + private static final int NUM_METRIC_POINTS = 60; + private Resource resource; + final MetricsTableView metricsTableView; + + public MetricsTableListGrid(final MetricsTableView metricsTableView, final Resource resource) { + super(); + this.resource = resource; + this.metricsTableView = metricsTableView; + setCanExpandRecords(true); + setCanExpandMultipleRecords(true); + setExpansionMode(ExpansionMode.DETAIL_FIELD); + + addRecordExpandHandler(new RecordExpandHandler() { + @Override + public void onRecordExpand(RecordExpandEvent recordExpandEvent) { + metricsTableView.expandedRows.add(recordExpandEvent.getRecord().getAttributeAsInt( + MetricsViewDataSource.FIELD_METRIC_DEF_ID)); + redrawGraphs(); + } + + }); + addRecordCollapseHandler(new RecordCollapseHandler() { + @Override + public void onRecordCollapse(RecordCollapseEvent recordCollapseEvent) { + metricsTableView.expandedRows.remove(recordCollapseEvent.getRecord().getAttributeAsInt( + MetricsViewDataSource.FIELD_METRIC_DEF_ID)); + redrawGraphs(); + } + }); + addSortChangedHandler(new SortChangedHandler() { + @Override + public void onSortChanged(SortEvent sortEvent) { + redrawGraphs(); + } + }); + addDataArrivedHandler(new DataArrivedHandler() { + @Override + public void onDataArrived(DataArrivedEvent dataArrivedEvent) { + int startRow = dataArrivedEvent.getStartRow(); + int endRow = dataArrivedEvent.getEndRow(); + for (int i = startRow; i < endRow; i++) { + if (null != metricsTableView.expandedRows + && metricsTableView.expandedRows.contains(getRecord(i).getAttributeAsInt( + MetricsViewDataSource.FIELD_METRIC_DEF_ID))) { + expandRecord(getRecord(i)); + } + } + } + }); + + } + + @Override + protected Canvas getExpansionComponent(final ListGridRecord record) { + final Integer definitionId = record.getAttributeAsInt(MetricsViewDataSource.FIELD_METRIC_DEF_ID); + final Integer resourceId = record.getAttributeAsInt(MetricsViewDataSource.FIELD_RESOURCE_ID); + VLayout vLayout = new VLayout(); + vLayout.setPadding(5); + + final String chartId = "rChart-" + resourceId + "-" + definitionId; + HTMLFlow htmlFlow = new HTMLFlow(MetricD3Graph.createGraphMarkerTemplate(chartId, + TREEVIEW_DETAIL_CHART_HEIGHT)); + vLayout.addMember(htmlFlow); + + int[] definitionArrayIds = new int[1]; + definitionArrayIds[0] = definitionId; + GWTServiceLookup.getMeasurementDataService().findDataForResource(resourceId, definitionArrayIds, + measurementUserPrefs.getMetricRangePreferences().begin, + measurementUserPrefs.getMetricRangePreferences().end, NUM_METRIC_POINTS, + new AsyncCallback<List<List<MeasurementDataNumericHighLowComposite>>>() { + @Override + public void onFailure(Throwable caught) { + Log.warn("Error retrieving recent metrics charting data for resource [" + resourceId + "]:" + + caught.getMessage()); + } + + @Override + public void onSuccess(List<List<MeasurementDataNumericHighLowComposite>> results) { + if (!results.isEmpty()) { + + //load the data results for the given metric definition + List<MeasurementDataNumericHighLowComposite> measurementList = results.get(0); + + MeasurementDefinition measurementDefinition = null; + for (MeasurementDefinition definition : resource.getResourceType().getMetricDefinitions()) { + if (definition.getId() == definitionId) { + measurementDefinition = definition; + break; + } + } + + MetricGraphData metricGraphData = MetricGraphData.createForResource(resourceId, + resource.getName(), measurementDefinition, measurementList, null); + metricGraphData.setHideLegend(true); + + StackedBarMetricGraphImpl graph = GWT.create(StackedBarMetricGraphImpl.class); + graph.setMetricGraphData(metricGraphData); + final MetricD3Graph graphView = new MetricD3Graph(graph, abstractD3GraphListView); + new Timer() { + @Override + public void run() { + graphView.drawJsniChart(); + BrowserUtility.graphSparkLines(); + + } + }.schedule(150); + + } else { + Log.warn("No chart data retrieving for resource [" + resourceId + "-" + definitionId + "]"); + + } + } + }); + + return vLayout; + } + } + +} diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsViewDataSource.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsViewDataSource.java new file mode 100644 index 0000000..5c2fe25 --- /dev/null +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsViewDataSource.java @@ -0,0 +1,307 @@ +package org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.table; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Set; + +import com.google.gwt.user.client.Timer; +import com.google.gwt.user.client.rpc.AsyncCallback; +import com.smartgwt.client.data.DSRequest; +import com.smartgwt.client.data.DSResponse; +import com.smartgwt.client.data.Record; +import com.smartgwt.client.widgets.grid.CellFormatter; +import com.smartgwt.client.widgets.grid.ListGridField; +import com.smartgwt.client.widgets.grid.ListGridRecord; + +import org.rhq.core.domain.criteria.Criteria; +import org.rhq.core.domain.measurement.DataType; +import org.rhq.core.domain.measurement.MeasurementDefinition; +import org.rhq.core.domain.measurement.MeasurementSchedule; +import org.rhq.core.domain.measurement.composite.MeasurementDataNumericHighLowComposite; +import org.rhq.core.domain.measurement.ui.MetricDisplaySummary; +import org.rhq.core.domain.measurement.ui.MetricDisplayValue; +import org.rhq.core.domain.resource.Resource; +import org.rhq.enterprise.gui.coregui.client.CoreGUI; +import org.rhq.enterprise.gui.coregui.client.UserSessionManager; +import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; +import org.rhq.enterprise.gui.coregui.client.util.BrowserUtility; +import org.rhq.enterprise.gui.coregui.client.util.Log; +import org.rhq.enterprise.gui.coregui.client.util.MeasurementUtility; +import org.rhq.enterprise.gui.coregui.client.util.RPCDataSource; +import org.rhq.enterprise.gui.coregui.client.util.async.Command; +import org.rhq.enterprise.gui.coregui.client.util.async.CountDownLatch; +import org.rhq.enterprise.gui.coregui.client.util.preferences.MeasurementUserPreferences; + +/** + * A simple data source to read in metric data summaries for a resource. + * This doesn't support paging - everything is returned in one query. Since + * the number of metrics per resource is relatively small (never more than tens of them), + * we just load them all in at once. + * + * @author John Mazzitelli + * @author Mike Thompson + */ +public class MetricsViewDataSource extends RPCDataSource<MetricDisplaySummary, Criteria> { + + private static final int NUMBER_OF_METRIC_POINTS = 60; + public static final String FIELD_SPARKLINE = "sparkline"; + public static final String FIELD_METRIC_LABEL = "label"; + public static final String FIELD_ALERT_COUNT = "alertCount"; + public static final String FIELD_MIN_VALUE = "min"; + public static final String FIELD_MAX_VALUE = "max"; + public static final String FIELD_AVG_VALUE = "avg"; + public static final String FIELD_LAST_VALUE = "last"; + public static final String FIELD_METRIC_DEF_ID = "defId"; + public static final String FIELD_METRIC_SCHED_ID = "schedId"; + public static final String FIELD_METRIC_UNITS = "units"; + public static final String FIELD_METRIC_NAME = "name"; + public static final String FIELD_RESOURCE_ID = "resourceId"; + private final Resource resource; + private List<MetricDisplaySummary> metricDisplaySummaries; + private List<List<MeasurementDataNumericHighLowComposite>> metricsDataList; + private int[] definitionArrayIds; + private final MeasurementUserPreferences measurementUserPrefs; + + public MetricsViewDataSource(Resource resource) { + this.resource = resource; + measurementUserPrefs = new MeasurementUserPreferences(UserSessionManager.getUserPreferences()); + } + + /** + * The view that contains the list grid which will display this datasource's data will call this + * method to get the field information which is used to control the display of the data. + * + * @return list grid fields used to display the datasource data + */ + public ArrayList<ListGridField> getListGridFields() { + ArrayList<ListGridField> fields = new ArrayList<ListGridField>(7); + + ListGridField sparklineField = new ListGridField(FIELD_SPARKLINE, MSG.chart_metrics_sparkline_header()); + sparklineField.setCellFormatter(new CellFormatter() { + @Override + public String format(Object value, ListGridRecord record, int rowNum, int colNum) { + if (value == null) { + return ""; + } + String contents = "<span id='sparkline_" + resource.getId() + "-" + + record.getAttributeAsInt(FIELD_METRIC_DEF_ID) + "' class='dynamicsparkline' width='70' " + + "values='" + record.getAttribute(FIELD_SPARKLINE) + "'></span>"; + return contents; + + } + }); + + sparklineField.setWidth(80); + fields.add(sparklineField); + + ListGridField nameField = new ListGridField(FIELD_METRIC_LABEL, MSG.common_title_name()); + nameField.setWidth("30%"); + fields.add(nameField); + + ListGridField minField = new ListGridField(FIELD_MIN_VALUE, MSG.view_resource_monitor_table_min()); + minField.setWidth("15%"); + fields.add(minField); + + ListGridField maxField = new ListGridField(FIELD_MAX_VALUE, MSG.view_resource_monitor_table_max()); + maxField.setWidth("15%"); + fields.add(maxField); + + ListGridField avgField = new ListGridField(FIELD_AVG_VALUE, MSG.view_resource_monitor_table_avg()); + avgField.setWidth("15%"); + fields.add(avgField); + + ListGridField lastField = new ListGridField(FIELD_LAST_VALUE, MSG.view_resource_monitor_table_last()); + lastField.setWidth("15%"); + fields.add(lastField); + + ListGridField alertsField = new ListGridField(FIELD_ALERT_COUNT, MSG.common_title_alerts()); + alertsField.setWidth("10%"); + fields.add(alertsField); + + return fields; + } + + @Override + public MetricDisplaySummary copyValues(Record from) { + // we should never need this method - we only go in one direction + // if we ever need this, just have copyValues store an "object" attribute whose value is "from" + // which this method then just reads out. Since we don't need this now, save memory by not + // keeping the MetricDisplayValue around + return null; + } + + @Override + public ListGridRecord copyValues(MetricDisplaySummary from) { + MeasurementUtility.formatSimpleMetrics(from); + + ListGridRecord record = new ListGridRecord(); + record.setAttribute(FIELD_SPARKLINE, getCsvMetricsForSparkline(from.getDefinitionId())); + record.setAttribute(FIELD_METRIC_LABEL, from.getLabel()); + record.setAttribute(FIELD_ALERT_COUNT, String.valueOf(from.getAlertCount())); + record.setAttribute(FIELD_MIN_VALUE, getMetricStringValue(from.getMinMetric())); + record.setAttribute(FIELD_MAX_VALUE, getMetricStringValue(from.getMaxMetric())); + record.setAttribute(FIELD_AVG_VALUE, getMetricStringValue(from.getAvgMetric())); + record.setAttribute(FIELD_LAST_VALUE, getMetricStringValue(from.getLastMetric())); + record.setAttribute(FIELD_METRIC_DEF_ID, from.getDefinitionId()); + record.setAttribute(FIELD_METRIC_SCHED_ID, from.getScheduleId()); + record.setAttribute(FIELD_METRIC_UNITS, from.getUnits()); + record.setAttribute(FIELD_METRIC_NAME, from.getMetricName()); + record.setAttribute(FIELD_RESOURCE_ID, resource.getId()); + return record; + } + + private String getCsvMetricsForSparkline(int definitionId) { + StringBuilder sb = new StringBuilder(); + List<MeasurementDataNumericHighLowComposite> selectedMetricsList = getMeasurementsForMeasurementDefId(definitionId); + + for (int i = 0; i < selectedMetricsList.size(); i++) { + MeasurementDataNumericHighLowComposite measurementData = selectedMetricsList.get(i); + if (!Double.isNaN(measurementData.getValue())) { + sb.append((int) measurementData.getValue()); + sb.append(","); + } + } + + if (sb.toString().endsWith(",")) { + sb.setLength(sb.length() - 1); + } + + return sb.toString(); + } + + List<MeasurementDataNumericHighLowComposite> getMeasurementsForMeasurementDefId(int definitionId) { + int selectedIndex = 0; + + // find the ordinal position as specified when querying the metrics + for (int i = 0; i < definitionArrayIds.length; i++) { + if (definitionArrayIds[i] == definitionId) { + selectedIndex = i; + break; + } + } + + return metricsDataList.get(selectedIndex); + } + + protected String getMetricStringValue(MetricDisplayValue value) { + return (value != null) ? value.toString() : ""; + } + + @Override + protected Criteria getFetchCriteria(DSRequest request) { + // NOTE: we don't use criterias for this datasource, just return null + return null; + } + + @Override + protected void executeFetch(final DSRequest request, final DSResponse response, final Criteria unused) { + + GWTServiceLookup.getMeasurementScheduleService().findSchedulesForResourceAndType(resource.getId(), + DataType.MEASUREMENT, null, true, new AsyncCallback<ArrayList<MeasurementSchedule>>() { + @Override + public void onSuccess(ArrayList<MeasurementSchedule> measurementSchedules) { + int[] scheduleIds = new int[measurementSchedules.size()]; + int i = 0; + for (MeasurementSchedule measurementSchedule : measurementSchedules) { + scheduleIds[i++] = measurementSchedule.getId(); + } + + final CountDownLatch countDownLatch = CountDownLatch.create(2, new Command() { + + @Override + public void execute() { + response.setData(buildRecords(metricDisplaySummaries)); + processResponse(request.getRequestId(), response); + + new Timer() { + + @Override + public void run() { + BrowserUtility.graphSparkLines(); + } + }.schedule(150); + } + }); + + queryResourceMetrics(resource, measurementUserPrefs.getMetricRangePreferences().begin, + measurementUserPrefs.getMetricRangePreferences().end, countDownLatch); + queryMetricDisplaySummaries(scheduleIds, measurementUserPrefs.getMetricRangePreferences().begin, + measurementUserPrefs.getMetricRangePreferences().end, countDownLatch); + } + + @Override + public void onFailure(Throwable caught) { + CoreGUI.getErrorHandler().handleError("Cannot load schedules", caught); + } + }); + } + + private void queryMetricDisplaySummaries(int[] scheduleIds, Long startTime, Long endTime, + final CountDownLatch countDownLatch) { + GWTServiceLookup.getMeasurementChartsService().getMetricDisplaySummariesForResource(resource.getId(), + scheduleIds, startTime, endTime, new AsyncCallback<ArrayList<MetricDisplaySummary>>() { + @Override + public void onSuccess(ArrayList<MetricDisplaySummary> metricDisplaySummaries) { + setMetricDisplaySummaries(metricDisplaySummaries); + countDownLatch.countDown(); + } + + @Override + public void onFailure(Throwable caught) { + CoreGUI.getErrorHandler().handleError("Cannot load metrics", caught); + countDownLatch.countDown(); + } + } + + ); + } + + void setMetricDisplaySummaries(List<MetricDisplaySummary> metricDisplaySummaries) { + this.metricDisplaySummaries = metricDisplaySummaries; + } + + public void queryResourceMetrics(final Resource resource, Long startTime, Long endTime, + final CountDownLatch countDownLatch) { + Set<MeasurementDefinition> definitions = resource.getResourceType().getMetricDefinitions(); + + //build id mapping for measurementDefinition instances Ex. Free Memory -> MeasurementDefinition[100071] + final HashMap<String, MeasurementDefinition> measurementDefMap = new HashMap<String, MeasurementDefinition>(); + for (MeasurementDefinition definition : definitions) { + measurementDefMap.put(definition.getDisplayName(), definition); + } + //bundle definition ids for asynch call. + definitionArrayIds = new int[definitions.size()]; + final String[] displayOrder = new String[definitions.size()]; + measurementDefMap.keySet().toArray(displayOrder); + //sort the charting data ex. Free Memory, Free Swap Space,..System Load + Arrays.sort(displayOrder); + + //organize definitionArrayIds for ordered request on server. + int index = 0; + for (String definitionToDisplay : displayOrder) { + definitionArrayIds[index++] = measurementDefMap.get(definitionToDisplay).getId(); + } + + GWTServiceLookup.getMeasurementDataService().findDataForResource(resource.getId(), definitionArrayIds, + startTime, endTime, NUMBER_OF_METRIC_POINTS, + new AsyncCallback<List<List<MeasurementDataNumericHighLowComposite>>>() { + @Override + public void onFailure(Throwable caught) { + Log.warn("Error retrieving recent metrics charting data for resource [" + resource.getId() + "]:" + + caught.getMessage()); + } + + @Override + public void onSuccess(List<List<MeasurementDataNumericHighLowComposite>> measurementDataList) { + + if (null != measurementDataList && !measurementDataList.isEmpty()) { + metricsDataList = measurementDataList; + countDownLatch.countDown(); + } + } + }); + + } +} diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/ResourceMetricAvailabilityView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/ResourceMetricAvailabilityView.java new file mode 100644 index 0000000..9d6b892 --- /dev/null +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/ResourceMetricAvailabilityView.java @@ -0,0 +1,208 @@ +/* + * RHQ Management Platform + * Copyright 2012, Red Hat Middleware LLC, and individual contributors + * as indicated by the @author tags. See the copyright.txt file in the + * distribution for a full listing of individual contributors. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +package org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.table; + +import com.google.gwt.user.client.rpc.AsyncCallback; +import com.smartgwt.client.widgets.form.DynamicForm; +import com.smartgwt.client.widgets.form.fields.FormItem; +import com.smartgwt.client.widgets.form.fields.StaticTextItem; + +import org.rhq.core.domain.measurement.MeasurementUnits; +import org.rhq.core.domain.resource.Resource; +import org.rhq.core.domain.resource.composite.ResourceAvailabilitySummary; +import org.rhq.enterprise.gui.coregui.client.CoreGUI; +import org.rhq.enterprise.gui.coregui.client.components.table.TimestampCellFormatter; +import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; +import org.rhq.enterprise.gui.coregui.client.util.MeasurementConverterClient; +import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedVLayout; + +/** + * This shows the availability history for a resource. + * + * @author Jay Shaughnessy + * @author John Mazzitelli + * @author Mike Thompson + */ +public class ResourceMetricAvailabilityView extends EnhancedVLayout { + + private Resource resource; + private StaticTextItem currentField; + private StaticTextItem availField; + private StaticTextItem availTimeField; + private StaticTextItem downField; + private StaticTextItem downTimeField; + private StaticTextItem disabledField; + private StaticTextItem disabledTimeField; + private StaticTextItem failureCountField; + private StaticTextItem disabledCountField; + private StaticTextItem mtbfField; + private StaticTextItem mttrField; + private StaticTextItem unknownField; + private StaticTextItem currentTimeField; + + public ResourceMetricAvailabilityView(Resource resource) { + super(); + + this.resource = resource; + + setWidth100(); + setHeight(165); + } + + @Override + protected void onInit() { + super.onInit(); + + addMember(createSummaryForm()); + } + + private DynamicForm createSummaryForm() { + DynamicForm form = new DynamicForm(); + form.setWidth100(); + form.setAutoHeight(); + form.setMargin(10); + form.setNumCols(4); + + // row 1 + currentField = new StaticTextItem("current", MSG.view_resource_monitor_availability_currentStatus()); + currentField.setWrapTitle(false); + currentField.setColSpan(4); + + // row 2 + availField = new StaticTextItem("avail", MSG.view_resource_monitor_availability_availability()); + availField.setWrapTitle(false); + prepareTooltip(availField, MSG.view_resource_monitor_availability_availability_tooltip()); + + availTimeField = new StaticTextItem("availTime", MSG.view_resource_monitor_availability_uptime()); + availTimeField.setWrapTitle(false); + prepareTooltip(availTimeField, MSG.view_resource_monitor_availability_uptime_tooltip()); + + // row 3 + downField = new StaticTextItem("down", MSG.view_resource_monitor_availability_down()); + downField.setWrapTitle(false); + prepareTooltip(downField, MSG.view_resource_monitor_availability_down_tooltip()); + + downTimeField = new StaticTextItem("downTime", MSG.view_resource_monitor_availability_downtime()); + downTimeField.setWrapTitle(false); + prepareTooltip(downTimeField, MSG.view_resource_monitor_availability_downtime_tooltip()); + + // row 4 + disabledField = new StaticTextItem("disabled", MSG.view_resource_monitor_availability_disabled()); + disabledField.setWrapTitle(false); + prepareTooltip(disabledField, MSG.view_resource_monitor_availability_disabled_tooltip()); + + disabledTimeField = new StaticTextItem("disabledTime", MSG.view_resource_monitor_availability_disabledTime()); + disabledTimeField.setWrapTitle(false); + prepareTooltip(disabledTimeField, MSG.view_resource_monitor_availability_disabledTime_tooltip()); + + // row 5 + failureCountField = new StaticTextItem("failureCount", MSG.view_resource_monitor_availability_numFailures()); + failureCountField.setWrapTitle(false); + prepareTooltip(failureCountField, MSG.view_resource_monitor_availability_numFailures_tooltip()); + + disabledCountField = new StaticTextItem("disabledCount", MSG.view_resource_monitor_availability_numDisabled()); + disabledCountField.setWrapTitle(false); + prepareTooltip(disabledCountField, MSG.view_resource_monitor_availability_numDisabled_tooltip()); + + // row 6 + mtbfField = new StaticTextItem("mtbf", MSG.view_resource_monitor_availability_mtbf()); + mtbfField.setWrapTitle(false); + prepareTooltip(mtbfField, MSG.view_resource_monitor_availability_mtbf_tooltip()); + + mttrField = new StaticTextItem("mttr", MSG.view_resource_monitor_availability_mttr()); + mttrField.setWrapTitle(false); + prepareTooltip(mttrField, MSG.view_resource_monitor_availability_mttr_tooltip()); + + // row 7 + unknownField = new StaticTextItem("unknown"); + unknownField.setWrapTitle(false); + unknownField.setColSpan(4); + unknownField.setShowTitle(false); + + // row 8 + currentTimeField = new StaticTextItem("currentTime"); + currentTimeField.setWrapTitle(false); + currentTimeField.setColSpan(4); + currentTimeField.setShowTitle(false); + + form.setItems(currentField, availField, availTimeField, downField, downTimeField, disabledField, + disabledTimeField, failureCountField, disabledCountField, mtbfField, mttrField, unknownField, + currentTimeField); + + reloadSummaryData(); + + return form; + } + + private void reloadSummaryData() { + GWTServiceLookup.getResourceService().getResourceAvailabilitySummary(resource.getId(), + new AsyncCallback<ResourceAvailabilitySummary>() { + + @Override + public void onSuccess(ResourceAvailabilitySummary result) { + + currentField.setValue(MSG.view_resource_monitor_availability_currentStatus_value(result + .getCurrent().getName(), TimestampCellFormatter.format(result.getLastChange().getTime()))); + availField.setValue(MeasurementConverterClient.format(result.getUpPercentage(), + MeasurementUnits.PERCENTAGE, true)); + availTimeField.setValue(MeasurementConverterClient.format((double) result.getUpTime(), + MeasurementUnits.MILLISECONDS, true)); + downField.setValue(MeasurementConverterClient.format(result.getDownPercentage(), + MeasurementUnits.PERCENTAGE, true)); + downTimeField.setValue(MeasurementConverterClient.format((double) result.getDownTime(), + MeasurementUnits.MILLISECONDS, true)); + disabledField.setValue(MeasurementConverterClient.format(result.getDisabledPercentage(), + MeasurementUnits.PERCENTAGE, true)); + disabledTimeField.setValue(MeasurementConverterClient.format((double) result.getDisabledTime(), + MeasurementUnits.MILLISECONDS, true)); + failureCountField.setValue(result.getFailures()); + disabledCountField.setValue(result.getDisabled()); + mtbfField.setValue(MeasurementConverterClient.format((double) result.getMTBF(), + MeasurementUnits.MILLISECONDS, true)); + mttrField.setValue(MeasurementConverterClient.format((double) result.getMTTR(), + MeasurementUnits.MILLISECONDS, true)); + + if (result.getUnknownTime() > 0L) { + unknownField.setValue(MSG.view_resource_monitor_availability_unknown(MeasurementConverterClient + .format((double) result.getUnknownTime(), MeasurementUnits.MILLISECONDS, true))); + } else { + unknownField.setValue(""); + } + + currentTimeField.setValue(MSG.view_resource_monitor_availability_currentAsOf(TimestampCellFormatter + .format(result.getCurrentTime()))); + } + + @Override + public void onFailure(Throwable caught) { + currentField.setValue(MSG.common_label_error()); + CoreGUI.getErrorHandler() + .handleError(MSG.view_resource_monitor_availability_summaryError(), caught); + } + }); + } + + private void prepareTooltip(FormItem item, String tooltip) { + item.setHoverWidth(400); + item.setPrompt(tooltip); + } + +} diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/searchbar/BasicSearchStrategy.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/searchbar/BasicSearchStrategy.java index 1edd076..94fd1e8 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/searchbar/BasicSearchStrategy.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/searchbar/BasicSearchStrategy.java @@ -26,7 +26,6 @@ import com.smartgwt.client.data.fields.DataSourceTextField; import com.smartgwt.client.widgets.form.fields.events.KeyUpEvent; import com.smartgwt.client.widgets.grid.HoverCustomizer; import com.smartgwt.client.widgets.grid.ListGrid; -import com.smartgwt.client.widgets.grid.ListGridField; import com.smartgwt.client.widgets.grid.ListGridRecord; import com.smartgwt.client.widgets.grid.events.RecordClickEvent;
@@ -89,17 +88,14 @@ public class BasicSearchStrategy extends AbstractSearchStrategy { */ @Override public void onRecordClick(RecordClickEvent event) { - Log.debug("BasicSearchStrategy click");
String kind = event.getRecord().getAttribute(ATTR_KIND); String searchExpression;
if (kind.equals("SAVED") || kind.equals("GLOBAL")) { - Log.debug("Saved or Global Search Click"); searchExpression = event.getRecord().getAttribute(ATTR_PATTERN);
} else { - Log.debug("Regular Search Click"); searchExpression = event.getRecord().getAttribute(ATTR_NAME); }
@@ -112,14 +108,12 @@ public class BasicSearchStrategy extends AbstractSearchStrategy {
@Override public void searchFocusHandler() { - Log.debug("focus in BasicSearchStrategy"); String searchExpression = searchBar.getSearchTextItem().getValueAsString(); doSearch(searchExpression); }
@Override public void searchKeyUpHandler(KeyUpEvent keyUpEvent) { - Log.debug("Keyup in BasicSearchStrategy: " + keyUpEvent.getKeyName()); String searchExpression = searchBar.getSearchTextItem().getValueAsString(); doSearch(searchExpression); } @@ -131,20 +125,16 @@ public class BasicSearchStrategy extends AbstractSearchStrategy {
private void doSearch(String searchExpression) { if (isSearchInProgress) { - Log.debug("Adding pending search [" + searchExpression + "]"); pendingSearchExpression = (null == searchExpression) ? "" : searchExpression; return; }
- Log.debug("Search Start"); isSearchInProgress = true;
if (null == searchExpression || searchExpression.isEmpty()) { - Log.debug("Empty Search expression"); getSearchSuggestions(SearchSubsystem.RESOURCE, null, 0);
} else { - Log.debug("doSearch: " + searchExpression); getSearchSuggestions(SearchSubsystem.RESOURCE, searchBar.getSearchTextItem().getValueAsString(), searchBar .getSearchTextItem().getValueAsString().length()); } @@ -154,7 +144,6 @@ public class BasicSearchStrategy extends AbstractSearchStrategy {
final long suggestStart = System.currentTimeMillis();
- Log.debug("Searching for: " + expression);
searchService.getTabAwareSuggestions(searchSubsystem, expression, caretPosition, null, new AsyncCallback<List<SearchSuggestion>>() { @@ -179,7 +168,6 @@ public class BasicSearchStrategy extends AbstractSearchStrategy { ds.setFields(idField, valueField);
searchBarPickListGrid.setDataSource(ds); - ListGridField[] fields = searchBarPickListGrid.getAllFields(); searchBarPickListGrid.getField(ATTR_VALUE).setShowHover(true); searchBarPickListGrid.getField(ATTR_VALUE).setHoverCustomizer(new HoverCustomizer() {
@@ -223,7 +211,7 @@ public class BasicSearchStrategy extends AbstractSearchStrategy { searchBarPickListGrid.setData(new ListGridRecord[] {}); searchBarPickListGrid.fetchData(); } catch (Exception e) { - Log.debug("Caught exception on fetchData: " + e); + Log.info("Caught exception on fetchData: " + e); }
long suggestFetchTime = System.currentTimeMillis() - suggestStart; @@ -250,7 +238,6 @@ public class BasicSearchStrategy extends AbstractSearchStrategy {
@Override public void onFailure(Throwable caught) { - Log.debug("Search End"); isSearchInProgress = false; pendingSearchExpression = null; CoreGUI.getErrorHandler().handleError(MSG.view_searchBar_suggest_failSuggest(), caught); diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/util/enhanced/Enhanced.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/util/enhanced/Enhanced.java index 10505e9..a069a5d 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/util/enhanced/Enhanced.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/util/enhanced/Enhanced.java @@ -31,5 +31,5 @@ import org.rhq.enterprise.gui.coregui.client.Messages; */ public interface Enhanced {
- Messages MSG = CoreGUI.getMessages(); + final static Messages MSG = CoreGUI.getMessages(); } diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties index 5a42502..040537d 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties @@ -20,6 +20,10 @@ chart_hover_period_label = Period chart_hover_start_label = Start chart_hover_time_format = %I:%M:%S %p chart_ie_not_supported = Charting is not available for this browser. +chart_metrics= Metrics +chart_metrics_collapse_tooltip= Click here to see additional tabular availability data. +chart_metrics_expand_tooltip= Click here to collapse additional availability detail. +chart_metrics_sparkline_header= Chart chart_no_data_label = No Data chart_single_value_label = Value chart_slider_button_bar_day = Day @@ -1816,6 +1820,7 @@ view_messageCenter_messageTime = Time view_messageCenter_messageTitle = Message Center view_messageCenter_noRecentMessages = No Recent Messages view_messageCenter_stackTraceFollows = --- STACK TRACE FOLLOWS --- +view_metric_addToDashboard = Add to Dashboard view_metric_traits = Traits view_metric_viewTraitHistory = Value History for Trait [{0}] view_operationHistoryDetails_dateCompleted = Date Completed @@ -2135,7 +2140,7 @@ view_titleBar_common_updateTagsSuccessful = The tags for [{0}] have been updated view_titleBar_group_failInfo = Failed to get general info on group [{0}] with ID [{1}] view_titleBar_group_summary_collapsedTooltip = Click to show more details for this group view_titleBar_group_summary_expandedTooltip = Click to hide details for this group -view_tree_common_contextMenu_addChartToDashboard = Add chart to dashboard [{0}] +view_tree_common_contextMenu_addChartToDashboard = Add Graph to Dashboard [{0}] view_tree_common_contextMenu_editPluginConfiguration = Edit [{0}] Plugin Configuration view_tree_common_contextMenu_editResourceConfiguration = Edit [{0}] Resource Configuration view_tree_common_contextMenu_groupGraph = Group Metric Graph diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties index 69c8452..2e23eff 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties @@ -48,6 +48,10 @@ chart_hover_date_format = %d.%m.%y ##chart_hover_start_label = Start chart_hover_time_format = %H:%M:%S ##chart_ie_not_supported = Charting is not available for this browser. +##chart_metrics= Metrics +##chart_metrics_collapse_tooltip= Click here to see additional tabular availability data. +##chart_metrics_expand_tooltip= Click here to collapse additional availability detail. +##chart_metrics_sparkline_header= Chart ##chart_no_data_label = No Data ##chart_single_value_label = Value ##chart_slider_button_bar_day = Day @@ -1828,6 +1832,7 @@ view_messageCenter_messageTime = Äas view_messageCenter_messageTitle = Centrum zpráv view_messageCenter_noRecentMessages = Ŝádné nové zprávy view_messageCenter_stackTraceFollows = --- VÃPIS ZÃSOBNÃKU NÃÅœE --- +##view_metric_addToDashboard = Add to Dashboard view_metric_traits = Vlastnosti view_metric_viewTraitHistory = Historie hodnot pro vlastnost [{0}] view_operationHistoryDetails_dateCompleted = Datum ukonÄenà diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties index ddd4686..b4fe981 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties @@ -23,6 +23,10 @@ chart_hover_period_label = Zeitraum chart_hover_start_label = Start chart_hover_time_format = %H:%M:%S chart_ie_not_supported = Charting ist bei diesem Browser nicht unterstÃŒtzt +##chart_metrics= Metrics +##chart_metrics_collapse_tooltip= Click here to see additional tabular availability data. +##chart_metrics_expand_tooltip= Click here to collapse additional availability detail. +##chart_metrics_sparkline_header= Chart chart_no_data_label = Keine Daten vorhanden ##chart_single_value_label = Value chart_slider_button_bar_day = Tag @@ -1625,6 +1629,7 @@ view_messageCenter_messageTime = Zeitpukt view_messageCenter_messageTitle = Nachrichtencenter view_messageCenter_noRecentMessages = Keine aktuellen Nachrichten view_messageCenter_stackTraceFollows = --- STACK TRACE FOLGT --- +##view_metric_addToDashboard = Add to Dashboard view_metric_traits = Traits view_metric_viewTraitHistory = Werteverlauf fÃŒr Trait [{0}] ##view_operationCreateWizard_error_scheduleOperationFailure = Failed to schedule operation execution. diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties index 49cc7e6..c1a8521 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties @@ -24,6 +24,10 @@ ##chart_hover_date_format = %m/%d/%y ##chart_hover_time_format = %I:%M:%S %p ##chart_ie_not_supported = Charting is not available for this browser. +##chart_metrics= Metrics +##chart_metrics_collapse_tooltip= Click here to see additional tabular availability data. +##chart_metrics_expand_tooltip= Click here to collapse additional availability detail. +##chart_metrics_sparkline_header= Chart ##chart_no_data_label = No Data ##chart_single_value_label = Value ##chart_slider_button_bar_minute = Min @@ -1799,6 +1803,7 @@ view_messageCenter_messageTime = æé view_messageCenter_messageTitle = ã¡ãã»ãŒãžã»ã³ã¿ãŒ view_messageCenter_noRecentMessages = æè¿ã®ã¡ãã»ãŒãžã¯ãããŸãã view_messageCenter_stackTraceFollows = --- ã¹ã¿ãã¯ãã¬ãŒã¹ --- +##view_metric_addToDashboard = Add to Dashboard view_metric_traits = ãã¬ã€ã view_metric_viewTraitHistory = ãã¬ã€ã [{0}] ã®ããã®å€ã®å±¥æŽ view_operationHistoryDetails_dateCompleted = å®äºæ¥ diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties index 89c10c0..2958fbe 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties @@ -22,6 +22,10 @@ ##chart_hover_date_format = %m/%d/%y ##chart_hover_time_format = %I:%M:%S %p ##chart_ie_not_supported = Charting is not available for this browser. +##chart_metrics= Metrics +##chart_metrics_collapse_tooltip= Click here to see additional tabular availability data. +##chart_metrics_expand_tooltip= Click here to collapse additional availability detail. +##chart_metrics_sparkline_header= Chart ##chart_no_data_label = No Data ##chart_single_value_label = Value ##chart_slider_button_bar_minute = Min @@ -1539,6 +1543,7 @@ view_messageCenter_messageTime = ìê° view_messageCenter_messageTitle = ë©ìì§ ìŒí° view_messageCenter_noRecentMessages = ìµê·Œ ë©ìì§ë ììµëë€ view_messageCenter_stackTraceFollows = --- ì€í ì¶ì --- +##view_metric_addToDashboard = Add to Dashboard view_metric_traits = í¹ì± view_metric_viewTraitHistory = í¹ì± [{0}]ì ëí ê° êž°ë¡ view_operationHistoryDetails_dateCompleted = ìë£ìŒ diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties index 26546fb..62f31f5 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties @@ -21,6 +21,10 @@ ##chart_hover_date_format = %m/%d/%y ##chart_hover_time_format = %I:%M:%S %p ##chart_ie_not_supported = Charting is not available for this browser. +##chart_metrics= Metrics +##chart_metrics_collapse_tooltip= Click here to see additional tabular availability data. +##chart_metrics_expand_tooltip= Click here to collapse additional availability detail. +##chart_metrics_sparkline_header= Chart ##chart_no_data_label = No Data ##chart_single_value_label = Value ##chart_slider_button_bar_minute = Min @@ -1833,6 +1837,7 @@ view_messageCenter_messageTime = Tempo view_messageCenter_messageTitle = Centro de Mensagens view_messageCenter_noRecentMessages = N\u00E3o existem Mensagens Recentes view_messageCenter_stackTraceFollows = --- STACK TRACE FOLLOWS --- +##view_metric_addToDashboard = Add to Dashboard view_metric_traits = Traits view_metric_viewTraitHistory = Value History for Trait [{0}] view_operationCreateWizard_button_execute = Execute diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties index 5692d33..eca8c24 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties @@ -21,6 +21,10 @@ ##chart_hover_date_format = %m/%d/%y ##chart_hover_time_format = %I:%M:%S %p ##chart_ie_not_supported = Charting is not available for this browser. +##chart_metrics= Metrics +##chart_metrics_collapse_tooltip= Click here to see additional tabular availability data. +##chart_metrics_expand_tooltip= Click here to collapse additional availability detail. +##chart_metrics_sparkline_header= Chart ##chart_no_data_label = No Data ##chart_single_value_label = Value ##chart_slider_button_bar_minute = Min @@ -1728,6 +1732,7 @@ #view_messageCenter_messageTitle = Message Center #view_messageCenter_noRecentMessages = No Recent Messages #view_messageCenter_stackTraceFollows = --- STACK TRACE FOLLOWS --- +##view_metric_addToDashboard = Add to Dashboard #view_metric_traits = Traits #view_metric_viewTraitHistory = Value History for Trait [{0}] #view_operationHistoryDetails_dateCompleted = Date Completed diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties index f126e51..e71f1fb 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties @@ -21,6 +21,10 @@ ##chart_hover_date_format = %m/%d/%y ##chart_hover_time_format = %I:%M:%S %p ##chart_ie_not_supported = Charting is not available for this browser. +##chart_metrics= Metrics +##chart_metrics_collapse_tooltip= Click here to see additional tabular availability data. +##chart_metrics_expand_tooltip= Click here to collapse additional availability detail. +##chart_metrics_sparkline_header= Chart ##chart_no_data_label = No Data ##chart_single_value_label = Value ##chart_slider_button_bar_minute = Min @@ -1802,6 +1806,7 @@ view_messageCenter_messageTime = \u65f6\u95f4 view_messageCenter_messageTitle = \u6d88\u606f\u4e2d\u5fc3 view_messageCenter_noRecentMessages = \u65e0\u8fd1\u671f\u6d88\u606f view_messageCenter_stackTraceFollows = --- \u540e\u9762\u662f\u5f02\u5e38\u4fe1\u606f --- +##view_metric_addToDashboard = Add to Dashboard view_metric_traits = \u7279\u5f81 view_metric_viewTraitHistory = Value History for Trait [{0}] view_operationHistoryDetails_dateCompleted = \u5b8c\u6210\u65e5\u671f diff --git a/modules/enterprise/gui/coregui/src/main/webapp/js/rhq.js b/modules/enterprise/gui/coregui/src/main/webapp/js/rhq.js index 40c7ca8..ef65e69 100644 --- a/modules/enterprise/gui/coregui/src/main/webapp/js/rhq.js +++ b/modules/enterprise/gui/coregui/src/main/webapp/js/rhq.js @@ -38,9 +38,10 @@ if (!window.console.log) window.console.log = function () { * @param singleValueLabel * @param chartXaxisTimeFormatHours * @param chartXaxisTimeFormatHoursMinutes + * @param showLegend * @constructor */ -var ChartContext = function (chartId, chartHeight, metricsData, xAxisLabel, chartTitle, yAxisUnits, minChartTitle, avgChartTitle, peakChartTitle, dateLabel, timeLabel, downLabel, unknownLabel, noDataLabel, hoverStartLabel, hoverEndLabel, hoverPeriodLabel, hoverBarLabel, chartHoverTimeFormat, chartHoverDateFormat, isPortalGraph, portalId, buttonBarDateTimeFormat, singleValueLabel, chartXaxisTimeFormatHours, chartXaxisTimeFormatHoursMinutes) { +var ChartContext = function (chartId, chartHeight, metricsData, xAxisLabel, chartTitle, yAxisUnits, minChartTitle, avgChartTitle, peakChartTitle, dateLabel, timeLabel, downLabel, unknownLabel, noDataLabel, hoverStartLabel, hoverEndLabel, hoverPeriodLabel, hoverBarLabel, chartHoverTimeFormat, chartHoverDateFormat, isPortalGraph, portalId, buttonBarDateTimeFormat, singleValueLabel, chartXaxisTimeFormatHours, chartXaxisTimeFormatHoursMinutes, hideLegend) { "use strict"; if (!(this instanceof ChartContext)) { throw new Error("ChartContext function cannot be called as a function.") @@ -78,6 +79,7 @@ var ChartContext = function (chartId, chartHeight, metricsData, xAxisLabel, char this.buttonBarDateTimeFormat = buttonBarDateTimeFormat; this.chartXaxisTimeFormatHours = chartXaxisTimeFormatHours; this.chartXaxisTimeFormatHoursMinutes = chartXaxisTimeFormatHoursMinutes; + this.hideLegend = hideLegend;
}, /**
commit 1d740172121ae51d6a6c183a894710cc58b61619 Author: Thomas Segismont tsegismo@redhat.com Date: Thu Jul 25 18:06:07 2013 +0200
Bug 886126 - Datasource JNDI change takes too long to be propagated to JON server
In EAP5, JNDI name is the key of Datasource and Connection Factory managed components. Consequently, there is no way to change the JNDI name of such components without creating a new managed component, which will in the end be discovered by RHQ.
So in RHQ, jndi-name config property is now marked as read-only (it will still be required when creating a Datasource or a Connection Factory).
diff --git a/modules/plugins/jboss-as-5/src/main/resources/META-INF/rhq-plugin.xml b/modules/plugins/jboss-as-5/src/main/resources/META-INF/rhq-plugin.xml index 2b14f5b..9a53a80 100644 --- a/modules/plugins/jboss-as-5/src/main/resources/META-INF/rhq-plugin.xml +++ b/modules/plugins/jboss-as-5/src/main/resources/META-INF/rhq-plugin.xml @@ -133,6 +133,7 @@
<!ENTITY datasourceConnectionResourceConfigProps ' <c:simple-property name="jndi-name" + readOnly="true" displayName="JNDI Name" description="The global JNDI Name under which to bind the datasource" type="string"/> @@ -502,7 +503,7 @@ -->
<!ENTITY connectionFactoryConnectionResourceConfigProps ' - <c:simple-property required="true" name="jndi-name" displayName="JNDI Name" + <c:simple-property required="true" name="jndi-name" readOnly="true" displayName="JNDI Name" description="The global JNDI name to bind the connection factory under."/>
<c:simple-property required="true" name="rar-name" displayName="RAR Name">
commit 8fa51829ab0467d8518b095d303528664c7f58a3 Author: Heiko W. Rupp hwr@redhat.com Date: Thu Jul 25 17:17:27 2013 +0200
Make the parser check its input and bail out if it is not valid.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/helper/LinkDeserializer.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/helper/LinkDeserializer.java index e9582d4..f320d09 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/helper/LinkDeserializer.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/helper/LinkDeserializer.java @@ -20,7 +20,11 @@ package org.rhq.enterprise.server.rest.helper;
import java.io.IOException; +import java.util.regex.Matcher; +import java.util.regex.Pattern;
+import org.codehaus.jackson.JsonLocation; +import org.codehaus.jackson.JsonParseException; import org.codehaus.jackson.JsonParser; import org.codehaus.jackson.JsonProcessingException; import org.codehaus.jackson.map.DeserializationContext; @@ -44,25 +48,47 @@ import org.rhq.enterprise.server.rest.domain.Link; */ public class LinkDeserializer extends JsonDeserializer<Link>{
+ Pattern textPattern = Pattern.compile("\S+"); // Non whitespace; could possibly be narrowed + @Override public Link deserialize(JsonParser jp, DeserializationContext ctxt) throws IOException, JsonProcessingException {
String tmp = jp.getText(); // { + validate(jp, tmp,"{"); jp.nextToken(); // skip over { to the rel String rel = jp.getText(); + validateText(jp, rel); jp.nextToken(); // skip over { tmp = jp.getText(); + validate(jp, tmp,"{"); jp.nextToken(); // skip over "href" tmp = jp.getText(); -// jp.nextToken(); // skip over : + validate(jp, tmp,"href"); jp.nextToken(); // skip to "http:// ... " String href = jp.getText(); + validateText(jp, href); jp.nextToken(); // skip } tmp = jp.getText(); + validate(jp, tmp, "}"); jp.nextToken(); // skip } + tmp = jp.getText(); + validate(jp, tmp, "}");
Link link = new Link(rel,href);
return link; } + + private void validateText(JsonParser jsonParser, String input) throws JsonProcessingException { + Matcher m = textPattern.matcher(input); + if (!m.matches()) { + throw new JsonParseException("Unexpected token: " + input, jsonParser.getTokenLocation()); + } + } + + private void validate(JsonParser jsonParser, String input, String expected) throws JsonProcessingException { + if (!input.equals(expected)) { + throw new JsonParseException("Unexpected token: " + input, jsonParser.getTokenLocation()); + } + } } diff --git a/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/OperationsTest.java b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/OperationsTest.java index 3d39181..7789f9e 100644 --- a/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/OperationsTest.java +++ b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/OperationsTest.java @@ -18,7 +18,6 @@ */ package org.rhq.modules.integrationTests.restApi;
-import java.util.ArrayList; import java.util.List; import java.util.Map;
@@ -219,10 +218,61 @@ public class OperationsTest extends AbstractBase { .when() .delete("/operation/" + draftId); } + } + + @Test + public void testCatchBadLinkSerialization() throws Exception { + + // Test that when we get Links back in bad format, we + // correctly bail out. + + Operation draft = getADraftOperation(_platformId, discoveryDefinitionId); + + int draftId = draft.getId(); + draft.getParams().put("detailedDiscovery",true); + + String jsonWithBadLinkSer = // + "{\n" + + " "id": " + draftId + ",\n" + + " "name": "discovery",\n" + + " "readyToSubmit": false,\n" + + " "resourceId": " + _platformId + ",\n" + + " "definitionId": " + discoveryDefinitionId + ",\n" + + " "params": {\n" + + " "detailedDiscovery": true\n" + + " },\n" + + " "links": [\n" + + " {\n" + + " "rel": "edit",\n" + + " "href": "http://localhost:7080/rest/operation/" + draftId + ""\n" + + " }\n" + + " ]\n" + + "}"; + + try { + given() + .contentType(ContentType.JSON) + .pathParam("id", draftId) + .body(jsonWithBadLinkSer) + .log().everything() + .expect() + .statusCode(503) + .log().ifError() + .when() + .put("/operation/{id}"); + } finally { + // delete the draft again + expect() + .statusCode(204) + .when() + .delete("/operation/" + draftId); + }
}
+ + @Test public void testCreateDraftOperationAndScheduleExecution() throws Exception {
commit e2e18c6332505611c2b87e81b3369967a0c2a956 Author: Heiko W. Rupp hwr@redhat.com Date: Thu Jul 25 14:57:07 2013 +0200
Add a LinkSerializer to send links in correct format (one could argue not to send links at all).
diff --git a/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/OperationsTest.java b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/OperationsTest.java index 57c1a49..3d39181 100644 --- a/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/OperationsTest.java +++ b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/OperationsTest.java @@ -318,8 +318,6 @@ public class OperationsTest extends AbstractBase { assert draft != null; assert draft.getDefinitionId() == definitionId;
- draft.setLinks(new ArrayList<Link>()); // Clean out links TODO - System.out.println("--- Draft created --"); System.out.flush();
diff --git a/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/d/Link.java b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/d/Link.java index c3f7b2b..c199729 100644 --- a/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/d/Link.java +++ b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/d/Link.java @@ -20,11 +20,13 @@ package org.rhq.modules.integrationTests.restApi.d;
import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.fasterxml.jackson.databind.annotation.JsonSerialize;
/** * A Link inside objects * @author Heiko W. Rupp */ +@JsonSerialize(using = LinkSerializer.class) @JsonDeserialize(using = LinkDeserializer.class) public class Link {
diff --git a/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/d/LinkSerializer.java b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/d/LinkSerializer.java new file mode 100644 index 0000000..759c8dc --- /dev/null +++ b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/d/LinkSerializer.java @@ -0,0 +1,53 @@ +/* + * RHQ Management Platform + * Copyright (C) 2005-2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA + */ + +package org.rhq.modules.integrationTests.restApi.d; + +import java.io.IOException; + +import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.JsonSerializer; +import com.fasterxml.jackson.databind.SerializerProvider; + +/** + * Special serializer for Link objects that does not map the classical {rel:abc, href:xyz} scheme, + * but which puts the rel name "at the outside" like { abc : { href : xyz }} to make it easier for + * clients to access the link. + * See also https://bugzilla.redhat.com/show_bug.cgi?id=845244 + * @author Heiko W. Rupp + * @see LinkDeserializer + */ +public class LinkSerializer extends JsonSerializer<Link> { + + @Override + public void serialize(Link link, JsonGenerator jsonGenerator, + SerializerProvider serializerProvider) throws IOException, JsonProcessingException { + + jsonGenerator.writeStartObject(); + jsonGenerator.writeFieldName(link.getRel()); + + jsonGenerator.writeStartObject(); + jsonGenerator.writeFieldName("href"); + jsonGenerator.writeString(link.getHref()); + jsonGenerator.writeEndObject(); + + jsonGenerator.writeEndObject(); + } +}
commit 80144152f0d7b919df9cb09de7170250edb26b03 Author: Heiko W. Rupp hwr@redhat.com Date: Thu Jul 25 12:55:54 2013 +0200
BZ 974963 Allow to schedule operations that have no parameters.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/OperationsHandlerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/OperationsHandlerBean.java index 5e8e68e..070675e 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/OperationsHandlerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/OperationsHandlerBean.java @@ -303,12 +303,15 @@ public class OperationsHandlerBean extends AbstractRestBean {
// Validate parameters ConfigurationDefinition parameterDefinition = opDef.getParametersConfigurationDefinition(); - List<String> errorMessages = ConfigurationHelper.checkConfigurationWrtDefinition(parameters, parameterDefinition); - - if (errorMessages.size()>0) { - // Configuration is not ok - operation.setReadyToSubmit(false); - throw new BadArgumentException("Validation of parameters failed", StringUtils.getListAsString(errorMessages,", ")); + if (parameterDefinition!=null) { + // There are parameters defined, so lets validate them. + List<String> errorMessages = ConfigurationHelper.checkConfigurationWrtDefinition(parameters, parameterDefinition); + + if (errorMessages.size()>0) { + // Configuration is not ok + operation.setReadyToSubmit(false); + throw new BadArgumentException("Validation of parameters failed", StringUtils.getListAsString(errorMessages,", ")); + } } }
diff --git a/modules/integration-tests/rest-api/pom.xml b/modules/integration-tests/rest-api/pom.xml index 47bfe18..5d7bde5 100644 --- a/modules/integration-tests/rest-api/pom.xml +++ b/modules/integration-tests/rest-api/pom.xml @@ -18,7 +18,7 @@
<properties> <surefire-plugin.version>2.10</surefire-plugin.version> - <jackson.version>1.9.5</jackson.version> + <jackson.version>2.0.5</jackson.version> <rest-assured.version>1.8.1</rest-assured.version> </properties>
@@ -53,15 +53,14 @@ <scope>test</scope> </dependency> <dependency> - <groupId>org.codehaus.jackson</groupId> - <artifactId>jackson-core-asl</artifactId> + <groupId>com.fasterxml.jackson.core</groupId> + <artifactId>jackson-annotations</artifactId> <version>${jackson.version}</version> <scope>test</scope> </dependency> - <dependency> - <groupId>org.codehaus.jackson</groupId> - <artifactId>jackson-mapper-asl</artifactId> + <groupId>com.fasterxml.jackson.core</groupId> + <artifactId>jackson-core</artifactId> <version>${jackson.version}</version> <scope>test</scope> </dependency> diff --git a/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/OperationsTest.java b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/OperationsTest.java index 7800e5c..57c1a49 100644 --- a/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/OperationsTest.java +++ b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/OperationsTest.java @@ -18,6 +18,7 @@ */ package org.rhq.modules.integrationTests.restApi;
+import java.util.ArrayList; import java.util.List; import java.util.Map;
@@ -42,7 +43,8 @@ import static org.hamcrest.core.Is.is; */ public class OperationsTest extends AbstractBase {
- private int definitionId; + private int discoveryDefinitionId; + private int viewPLDefinitionId;
@Before public void setUp() throws Exception { @@ -58,15 +60,20 @@ public class OperationsTest extends AbstractBase { .when() .get("/operation/definitions");
- definitionId = -1; + discoveryDefinitionId = -1; List<Map<String,Object>> list = r.as(List.class); for (Map<String,Object> map : list) { - if (map.get("name").equals("discovery")) { - definitionId = (Integer) map.get("id"); + String name = (String) map.get("name"); + Integer id = (Integer) map.get("id"); + if (name.equals("discovery")) { + discoveryDefinitionId = id; + } + if (name.equals("viewProcessList")) { + viewPLDefinitionId = id; } }
- assert definitionId !=-1 : "No discovery operation found"; + assert discoveryDefinitionId !=-1 : "No discovery operation found"; }
@Test @@ -76,7 +83,7 @@ public class OperationsTest extends AbstractBase {
given() .header(acceptJson) - .pathParam("did",definitionId) + .pathParam("did", discoveryDefinitionId) .expect() .statusCode(200) .body("name",is("discovery")) @@ -92,7 +99,7 @@ public class OperationsTest extends AbstractBase {
given() .header(acceptJson) - .pathParam("did",-42) + .pathParam("did", -42) .expect() .statusCode(404) .when() @@ -107,7 +114,7 @@ public class OperationsTest extends AbstractBase {
given() .header(acceptJson) - .queryParam("resourceId",42) + .queryParam("resourceId", 42) .expect() .statusCode(404) .when() @@ -134,7 +141,7 @@ public class OperationsTest extends AbstractBase {
given() .header(acceptJson) - .pathParam("did",-42) + .pathParam("did", -42) .expect() .statusCode(406) .when() @@ -150,7 +157,7 @@ public class OperationsTest extends AbstractBase { given() .header(acceptJson) .queryParam("resourceId", 42) - .pathParam("definitionId", definitionId) + .pathParam("definitionId", discoveryDefinitionId) .expect() .statusCode(404) .when() @@ -164,7 +171,7 @@ public class OperationsTest extends AbstractBase {
given() .header(acceptJson) - .pathParam("definitionId", definitionId) + .pathParam("definitionId", discoveryDefinitionId) .expect() .statusCode(406) .when() @@ -174,20 +181,7 @@ public class OperationsTest extends AbstractBase { @Test public void testCreateDraftOperation() throws Exception {
- Operation draft = - given() - .header(acceptJson) - .pathParam("definitionId",definitionId) - .queryParam("resourceId",_platformId) - .expect() - .statusCode(200) - .log().ifError() - .when() - .post("/operation/definition/{definitionId}") - .as(Operation.class); - - assert draft != null; - assert draft.getDefinitionId() == definitionId; + Operation draft = getADraftOperation(_platformId, discoveryDefinitionId);
int draftId = draft.getId();
@@ -202,29 +196,17 @@ public class OperationsTest extends AbstractBase { @Test public void testCreateAndUpdateDraftOperation() throws Exception {
- Operation draft = - given() - .header(acceptJson) - .pathParam("definitionId",definitionId) - .queryParam("resourceId",_platformId) - .expect() - .statusCode(200) - .log().ifError() - .when() - .post("/operation/definition/{definitionId}") - .as(Operation.class); - - assert draft != null; - assert draft.getDefinitionId() == definitionId; + Operation draft = getADraftOperation(_platformId, discoveryDefinitionId);
int draftId = draft.getId(); - draft.getParams().put("detailed",true); + draft.getParams().put("detailedDiscovery",true);
try { given() .contentType(ContentType.JSON) .pathParam("id", draftId) .body(draft) + .log().everything() .expect() .statusCode(200) .log().ifError() @@ -246,25 +228,51 @@ public class OperationsTest extends AbstractBase {
int platformId = findIdOfARealPlatform();
- Operation draft = + Operation draft = getADraftOperation(platformId, discoveryDefinitionId); + + int draftId = draft.getId(); + + draft.setReadyToSubmit(true); + draft.getParams().put("detailedDiscovery", false); + + // update to schedule + Operation scheduled = given() - .header(acceptJson) - .pathParam("definitionId",definitionId) - .queryParam("resourceId",platformId) + .contentType(ContentType.JSON) + .pathParam("id",draftId) + .body(draft) .expect() .statusCode(200) .log().ifError() .when() - .post("/operation/definition/{definitionId}") + .put("/operation/{id}") .as(Operation.class);
- assert draft != null; - assert draft.getDefinitionId() == definitionId; + System.out.println(scheduled.getId()); + String history = findHistoryItem(scheduled); + + String historyId = history.substring(history.lastIndexOf("/")+1); + try { + waitAndCheckStatus(platformId, historyId); + + } finally { + + // Wait until the operation has finished and then delete + waitForTerminationAndDelete(historyId); + + } + } + + @Test + public void testCreateDraftOperationNoParamsAndScheduleExecution() throws Exception { + + int platformId = findIdOfARealPlatform(); + + Operation draft = getADraftOperation(platformId, viewPLDefinitionId);
int draftId = draft.getId();
draft.setReadyToSubmit(true); - draft.getParams().put("detailedDiscovery", false);
// update to schedule Operation scheduled = @@ -280,6 +288,45 @@ public class OperationsTest extends AbstractBase { .as(Operation.class);
System.out.println(scheduled.getId()); + String history = findHistoryItem(scheduled); + + String historyId = history.substring(history.lastIndexOf("/")+1); + try { + waitAndCheckStatus(platformId, historyId); + + } finally { + + // Wait until the operation has finished and then delete + waitForTerminationAndDelete(historyId); + + } + } + + private Operation getADraftOperation(int platformId, int definitionId) { + Operation draft = + given() + .header(acceptJson) + .pathParam("definitionId", definitionId) + .queryParam("resourceId",platformId) + .expect() + .statusCode(200) + .log().ifError() + .when() + .post("/operation/definition/{definitionId}") + .as(Operation.class); + + assert draft != null; + assert draft.getDefinitionId() == definitionId; + + draft.setLinks(new ArrayList<Link>()); // Clean out links TODO + + System.out.println("--- Draft created --"); + System.out.flush(); + + return draft; + } + + private String findHistoryItem(Operation scheduled) { String history = null; List<Link> links = scheduled.getLinks(); for (Link link : links) { @@ -288,76 +335,74 @@ public class OperationsTest extends AbstractBase { } } assert history != null; + return history; + }
- String historyId = history.substring(history.lastIndexOf("/")+1); - try { - Thread.sleep(15000); // we need to wait a little as the execution may take time + private void waitAndCheckStatus(int platformId, String historyId) throws InterruptedException { + Thread.sleep(15000); // we need to wait a little as the execution may take time
- given() - .pathParam("hid",historyId) - .expect() - .statusCode(200) - .log().ifError() - .when() - .get("/operation/history/{hid}"); + given() + .pathParam("hid", historyId) + .expect() + .statusCode(200) + .log().ifError() + .when() + .get("/operation/history/{hid}"); + + // See if we also find it when we are looking for histories on the resource + Response response = + given() + .queryParam("resourceId", platformId) + .header(acceptJson) + .expect() + .statusCode(200) + .log().ifError() + .when() + .get("/operation/history");
- // See if we also find it when we are looking for histories on the resource + // compare + List<Map<String,Object>> list = response.as(List.class); + boolean found = false; + for (Map<String,Object> map : list) { + if (map.get("jobId").equals(historyId)) { + found = true; + } + } + assert found; + } + + private void waitForTerminationAndDelete(String historyId) throws InterruptedException { + boolean done = false; + int count = 0; + while (!done) { Response response = given() - .queryParam("resourceId",platformId) .header(acceptJson) - .expect() - .statusCode(200) - .log().ifError() + .pathParam("hid", historyId) .when() - .get("/operation/history"); - - // compare - List<Map<String,Object>> list = response.as(List.class); - boolean found = false; - for (Map<String,Object> map : list) { - if (map.get("jobId").equals(historyId)) { - found = true; - } - } - assert found; + .get("/operation/history/{hid}");
- } finally { + JsonPath jsonPath = response.jsonPath(); + String status= jsonPath.getString("status"); + int code = response.statusCode();
- // Wait until the operation has finished and then delete - boolean done = false; - int count = 0; - while (!done) { - Response response = - given() - .header(acceptJson) - .pathParam("hid", historyId) - .when() - .get("/operation/history/{hid}"); - - JsonPath jsonPath = response.jsonPath(); - String status= jsonPath.getString("status"); - int code = response.statusCode(); - - if (code==200 && (status.equals("Success") || status.equals("Failed"))) { - done = true; - } else { - Thread.sleep(2000); - } - count ++; - assert count < 10 :"Waited for 20sec -- something is wrong"; + if (code==200 && (status.equals("Success") || status.equals("Failed"))) { + done = true; + } else { + Thread.sleep(2000); } - - // Delete the history item - given() - .pathParam("hid",historyId) - .expect() - .statusCode(204) - .log().ifError() - .when() - .delete("/operation/history/{hid}"); - + count ++; + assert count < 10 :"Waited for 20sec -- something is wrong"; } + + // Delete the history item + given() + .pathParam("hid", historyId) + .expect() + .statusCode(204) + .log().ifError() + .when() + .delete("/operation/history/{hid}"); }
@Test @@ -365,20 +410,7 @@ public class OperationsTest extends AbstractBase {
int platformId = findIdOfARealPlatform();
- Operation draft = - given() - .header(acceptJson) - .pathParam("definitionId",definitionId) - .queryParam("resourceId",platformId) - .expect() - .statusCode(200) - .log().ifError() - .when() - .post("/operation/definition/{definitionId}") - .as(Operation.class); - - assert draft != null; - assert draft.getDefinitionId() == definitionId; + Operation draft = getADraftOperation(platformId, discoveryDefinitionId);
int draftId = draft.getId();
@@ -423,20 +455,7 @@ public class OperationsTest extends AbstractBase {
int platformId = findIdOfARealPlatform();
- Operation draft = - given() - .header(acceptJson) - .pathParam("definitionId",definitionId) - .queryParam("resourceId",platformId) - .expect() - .statusCode(200) - .log().ifError() - .when() - .post("/operation/definition/{definitionId}") - .as(Operation.class); - - assert draft != null; - assert draft.getDefinitionId() == definitionId; + Operation draft = getADraftOperation(platformId, discoveryDefinitionId);
int draftId = draft.getId();
diff --git a/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/d/Link.java b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/d/Link.java index e16213c..c3f7b2b 100644 --- a/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/d/Link.java +++ b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/d/Link.java @@ -19,7 +19,7 @@
package org.rhq.modules.integrationTests.restApi.d;
-import org.codehaus.jackson.map.annotate.JsonDeserialize; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize;
/** * A Link inside objects diff --git a/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/d/LinkDeserializer.java b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/d/LinkDeserializer.java index 5d45724..c138761 100644 --- a/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/d/LinkDeserializer.java +++ b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/d/LinkDeserializer.java @@ -21,19 +21,19 @@ package org.rhq.modules.integrationTests.restApi.d;
import java.io.IOException;
-import org.codehaus.jackson.JsonNode; -import org.codehaus.jackson.JsonParser; -import org.codehaus.jackson.JsonProcessingException; -import org.codehaus.jackson.ObjectCodec; -import org.codehaus.jackson.map.DeserializationContext; -import org.codehaus.jackson.map.JsonDeserializer; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.core.ObjectCodec; +import com.fasterxml.jackson.databind.DeserializationContext; +import com.fasterxml.jackson.databind.JsonDeserializer; +import com.fasterxml.jackson.databind.JsonNode;
/** * Custom deserializer for link object that are now in the form { relname : { "href": link}} * * @author Heiko W. Rupp */ -public class LinkDeserializer extends JsonDeserializer<Link>{ +public class LinkDeserializer extends JsonDeserializer<Link> {
@Override public Link deserialize(JsonParser jsonParser, @@ -41,8 +41,8 @@ public class LinkDeserializer extends JsonDeserializer<Link>{
ObjectCodec oc = jsonParser.getCodec(); JsonNode node = oc.readTree(jsonParser); - String rel = node.getFieldNames().next(); - String href = node.getElements().next().get("href").getTextValue(); + String rel = node.fieldNames().next(); + String href = node.elements().next().get("href").textValue();
return new Link(rel,href);
commit 9e805285971e53b4483db2a6f15e5ae9c81d884f Author: Thomas Segismont tsegismo@redhat.com Date: Thu Jul 25 16:50:06 2013 +0200
Make CassandraNodeComponent shutdown operation wait for server to go down (unless running on OSX)
diff --git a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java index 37aa425..3e55a93 100644 --- a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java +++ b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java @@ -68,6 +68,7 @@ import org.rhq.core.pluginapi.util.ProcessExecutionUtility; import org.rhq.core.system.ProcessExecution; import org.rhq.core.system.ProcessExecutionResults; import org.rhq.core.system.ProcessInfo; +import org.rhq.core.system.ProcessInfo.ProcessInfoSnapshot; import org.rhq.core.system.SystemInfo; import org.rhq.core.util.StringUtil; import org.rhq.core.util.exception.ThrowableUtil; @@ -140,7 +141,6 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent if (isStorageServiceReachable()) { return AvailabilityType.UP; } - return AvailabilityType.DOWN; } finally { long totalTimeMillis = NANOSECONDS.toMillis(System.nanoTime() - start); @@ -190,6 +190,7 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent public OperationResult invokeOperation(String name, Configuration parameters) throws Exception { if (name.equals("shutdown")) { OperationResult operationResult = shutdownNode(); + waitForNodeToGoDown(); return operationResult; } else if (name.equals("start")) { return startNode(); @@ -238,7 +239,7 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent protected OperationResult stopNode() { ProcessInfo process = getResourceContext().getNativeProcess();
- if (processInfo == null) { + if (process == null) { LOG.warn("Failed to obtain process info. It appears Cassandra is already shutdown."); return new OperationResult("Failed to obtain process info. It appears Cassandra is already shutdown."); } @@ -265,6 +266,39 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent } }
+ private void waitForNodeToGoDown() throws InterruptedException { + if (OperatingSystem.getInstance().getName().equals(OperatingSystem.NAME_MACOSX)) { + // See this thread on VMWare forum: http://communities.vmware.com/message/2187972#2187972 + // Unfortunately there is no work around for this failure on Mac OSX so the method will silently return on + // this platform. + return; + } + for (ProcessInfoSnapshot processInfoSnapshot = getProcessInfoSnapshot();; processInfoSnapshot = getProcessInfoSnapshot()) { + if (processInfoSnapshot == null || !processInfoSnapshot.isRunning()) { + // Process not found, so it died, that's fine + // OR + // Process info says process is no longer running, that's fine as well + break; + } + if (getResourceContext().getComponentInvocationContext().isInterrupted()) { + // Operation canceled or timed out + throw new InterruptedException(); + } + // Process is still running, wait a second and check again + Thread.sleep(SECONDS.toMillis(2)); + } + } + + private ProcessInfoSnapshot getProcessInfoSnapshot() { + ProcessInfoSnapshot processInfoSnapshot = (processInfo == null) ? null : processInfo.freshSnapshot(); + if (processInfoSnapshot == null || !processInfoSnapshot.isRunning()) { + processInfo = getResourceContext().getNativeProcess(); + // Safe to get prior snapshot here, we've just recreated the process info instance + processInfoSnapshot = (processInfo == null) ? null : processInfo.priorSnaphot(); + } + return processInfoSnapshot; + } + protected OperationResult startNode() { Configuration pluginConfig = getResourceContext().getPluginConfiguration(); String baseDir = pluginConfig.getSimpleValue("baseDir");
commit 127c679b6d152398d4645f3e6dee488f3d043841 Author: Stefan Negrea snegrea@redhat.com Date: Thu Jul 25 08:51:47 2013 -0500
Add support for updating the JMX Port. The process sends a restart to the resource last to avoid putting the Server in maintenance mode.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index 1441785..7a7eda4 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -103,7 +103,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN private static final String RHQ_STORAGE_ADDRESS_PROPERTY = "host";
private static final int OPERATION_QUERY_TIMEOUT = 20000; - private static final int MAX_ITERATIONS = 6; + private static final int MAX_ITERATIONS = 10; private static final String UPDATE_CONFIGURATION_OPERATION = "updateConfiguration"; private static final String RESTART_OPERATION = "restart";
@@ -639,16 +639,45 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN StorageNode storageNode = findStorageNodeByAddress(storageNodeConfiguration.getStorageNode().getAddress());
if (storageNode != null && storageNode.getResource() != null) { - Resource storageNodeResource = storageNode.getResource(); Configuration parameters = new Configuration(); parameters.setSimpleValue("jmxPort", storageNodeConfiguration.getJmxPort() + ""); - parameters.setSimpleValue("heapSize", storageNodeConfiguration.getHeapSize() + ""); - parameters.setSimpleValue("heapNewSize", storageNodeConfiguration.getHeapNewSize() + ""); - parameters.setSimpleValue("threadStackSize", storageNodeConfiguration.getThreadStackSize() + ""); - parameters.setSimpleValue("restartIfRequired", "true"); + if (storageNodeConfiguration.getHeapSize() != null) { + parameters.setSimpleValue("heapSize", storageNodeConfiguration.getHeapSize() + ""); + } + if (storageNodeConfiguration.getHeapNewSize() != null) { + parameters.setSimpleValue("heapNewSize", storageNodeConfiguration.getHeapNewSize() + ""); + } + if (storageNodeConfiguration.getThreadStackSize() != null) { + parameters.setSimpleValue("threadStackSize", storageNodeConfiguration.getThreadStackSize() + ""); + } + parameters.setSimpleValue("restartIfRequired", "false");
- return runOperationAndWaitForResult(subject, storageNodeResource, UPDATE_CONFIGURATION_OPERATION, + Resource storageNodeResource = storageNode.getResource(); + + boolean result = runOperationAndWaitForResult(subject, storageNodeResource, UPDATE_CONFIGURATION_OPERATION, parameters); + + if (result) { + Configuration storageNodePluginConfig = configurationManager.getPluginConfiguration(subject, + storageNodeResource.getId()); + + String existingJMXPort = storageNodePluginConfig.getSimpleValue("jmxPort"); + storageNodePluginConfig.setSimpleValue("jmxPort", storageNodeConfiguration.getJmxPort() + ""); + + String existingConnectionURL = storageNodePluginConfig.getSimpleValue("connectorAddress"); + String newConnectionURL = existingConnectionURL.replace(":" + existingJMXPort + "/", ":" + + storageNodeConfiguration.getJmxPort() + "/"); + storageNodePluginConfig.setSimpleValue("connectorAddress", newConnectionURL); + + configurationManager.updatePluginConfiguration(subject, storageNodeResource.getId(), + storageNodePluginConfig); + + storageNode.setJmxPort(storageNodeConfiguration.getJmxPort()); + entityManager.merge(storageNode); + entityManager.flush(); + + return runOperationAndWaitForResult(subject, storageNodeResource, RESTART_OPERATION, null); + } }
return false; diff --git a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java index 41a0c42..125f4d2 100644 --- a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java +++ b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java @@ -117,6 +117,7 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper
//update storage node jvm settings Configuration config = new Configuration(); + config.put(new PropertySimple("jmxPort", params.getSimpleValue("jmxPort"))); config.put(new PropertySimple("minHeapSize", params.getSimpleValue("heapSize"))); config.put(new PropertySimple("maxHeapSize", params.getSimpleValue("heapSize"))); config.put(new PropertySimple("heapNewSize", params.getSimpleValue("heapNewSize"))); diff --git a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeConfigDelegate.java b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeConfigDelegate.java index 1667877..dd5b8a2 100644 --- a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeConfigDelegate.java +++ b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeConfigDelegate.java @@ -148,11 +148,17 @@ public class StorageNodeConfigDelegate implements ConfigurationFacet { } }
- private void updateCassandraJvmProps(Configuration config) throws IOException { + private void updateCassandraJvmProps(Configuration newConfig) throws IOException { PropertiesFileUpdate propertiesUpdater = new PropertiesFileUpdate(jvmOptsFile.getAbsolutePath()); Properties properties = propertiesUpdater.loadExistingProperties();
- String maxHeapSize = config.getSimpleValue("maxHeapSize"); + String jmxPort = newConfig.getSimpleValue("jmxPort"); + if (!StringUtil.isEmpty(jmxPort)) { + validateIntegerArg("jmx_port", jmxPort); + properties.setProperty("jmx_port", jmxPort); + } + + String maxHeapSize = newConfig.getSimpleValue("maxHeapSize"); if (!StringUtil.isEmpty(maxHeapSize)) { validateHeapArg("maxHeapSize", maxHeapSize); // We want min and max heap to be the same @@ -160,19 +166,19 @@ public class StorageNodeConfigDelegate implements ConfigurationFacet { properties.setProperty("heap_max", "-Xmx" + maxHeapSize); }
- String heapNewSize = config.getSimpleValue("heapNewSize"); + String heapNewSize = newConfig.getSimpleValue("heapNewSize"); if (!StringUtil.isEmpty(heapNewSize)) { validateHeapArg("heapNewSize", heapNewSize); properties.setProperty("heap_new", "-Xmn" + heapNewSize); }
- String threadStackSize = config.getSimpleValue("threadStackSize"); + String threadStackSize = newConfig.getSimpleValue("threadStackSize"); if (!StringUtil.isEmpty(threadStackSize)) { - validateStackArg(threadStackSize); + validateIntegerArg("threadStackSize", threadStackSize); properties.setProperty("thread_stack_size", "-Xss" + threadStackSize + "k"); }
- PropertySimple heapDumpOnOMMError = config.getSimple("heapDumpOnOOMError"); + PropertySimple heapDumpOnOMMError = newConfig.getSimple("heapDumpOnOOMError"); if (heapDumpOnOMMError != null) { if (heapDumpOnOMMError.getBooleanValue()) { properties.setProperty("heap_dump_on_OOMError", "-XX:+HeapDumpOnOutOfMemoryError"); @@ -181,7 +187,7 @@ public class StorageNodeConfigDelegate implements ConfigurationFacet { } }
- String heapDumpDir = useForwardSlash(config.getSimpleValue("heapDumpDir")); + String heapDumpDir = useForwardSlash(newConfig.getSimpleValue("heapDumpDir")); if (!StringUtil.isEmpty(heapDumpDir)) { properties.setProperty("heap_dump_dir", heapDumpDir); } @@ -209,7 +215,7 @@ public class StorageNodeConfigDelegate implements ConfigurationFacet {
String threadStackSize = config.getSimpleValue("threadStackSize"); if (!StringUtil.isEmpty(threadStackSize)) { - validateStackArg(threadStackSize); + validateIntegerArg("threadStackSize", threadStackSize); properties.setProperty("set.thread_stack_size", "-Xss" + threadStackSize + "k"); }
@@ -248,11 +254,11 @@ public class StorageNodeConfigDelegate implements ConfigurationFacet { } }
- private void validateStackArg(String value) { + private void validateIntegerArg(String name, String value) { try { Integer.parseInt(value); } catch (NumberFormatException e) { - throw new IllegalArgumentException(value + " is not a legal value for the property [threadStackSize]"); + throw new IllegalArgumentException(value + " is not a legal value for the property [" + name + "]"); } }
diff --git a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml index aca3ba2..a42040d 100644 --- a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml +++ b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml @@ -32,6 +32,7 @@ <c:simple-property name="baseDir" displayName="Base Directory" description="The base directory from which the Cassandra Daemon was launched." required="false"/> <c:simple-property name="yamlConfiguration" displayName="YAML Configuration File" description="YAML Configuration File"/> <c:simple-property name="nativeTransportPort" description="The port on which Cassandra listens for CQL client connections." default="9042" type="integer"/> + <c:simple-property name="jmxPort" description="The JMX port for Cassandra" default="7299" type="integer" readOnly="true"/> <c:simple-property name="host" description="The host on which cassandra listens to CQL client connections" default="localhost"/> <c:simple-property name="clusterName" description="Cluster name" default="localhost"/> <c:simple-property name="authenticator" required="true" default="org.apache.cassandra.auth.AllowAllAuthenticator" description="Cassandra client authenticator"> @@ -131,11 +132,11 @@
<operation name="updateConfiguration" description="Updates the node configuration. Will require a separate server restart for the settings to take effect."> <parameters> + <c:simple-property name="jmxPort" type="integer" description="JMX port JVM option."/> <c:simple-property name="heapSize" type="string" description="The heap size to be used for both -Xms and -Xmx JVM options."/> <c:simple-property name="heapNewSize" type="string" description="The heap new size to be used be used with -Xmn JVM option."/> <c:simple-property name="threadStackSize" type="integer" description="The thread stack size. This memory is allocated to each thread off heap. The value should be an integer that will be interpreted in kilobytes."/> - <c:simple-property name="jmxPort" type="integer" description="JMX port JVM option."/> <c:simple-property name="restartIfRequired" type="boolean" required="true" default="false" description="If [true] then restart the server at the end of the update if and if only the updates made require a restart. If [false] the server will not be restarted regardless of the updates made."/> </parameters> <results>
commit 925e8c881487e9e27c7b0801890ef6f95f48a352 Author: Stefan Negrea snegrea@redhat.com Date: Tue Jul 23 16:43:29 2013 -0500
Move the restart operation to the plugin rather than server bean. This simplifies the interface between the server bean and storage node plugin.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index 9416c67..1441785 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -645,21 +645,10 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN parameters.setSimpleValue("heapSize", storageNodeConfiguration.getHeapSize() + ""); parameters.setSimpleValue("heapNewSize", storageNodeConfiguration.getHeapNewSize() + ""); parameters.setSimpleValue("threadStackSize", storageNodeConfiguration.getThreadStackSize() + ""); + parameters.setSimpleValue("restartIfRequired", "true");
- boolean updateConfigurationResult = runOperationAndWaitForResult(subject, storageNodeResource, - UPDATE_CONFIGURATION_OPERATION, parameters); - - if (updateConfigurationResult) { - boolean restartResult = runOperationAndWaitForResult(subject, storageNodeResource, RESTART_OPERATION, - null); - - if (restartResult) { - storageNode.setJmxPort(storageNodeConfiguration.getJmxPort()); - entityManager.persist(storageNode); - - return true; - } - } + return runOperationAndWaitForResult(subject, storageNodeResource, UPDATE_CONFIGURATION_OPERATION, + parameters); }
return false; diff --git a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java index d9b35b9..41a0c42 100644 --- a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java +++ b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java @@ -111,6 +111,8 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper }
private OperationResult updateConfiguration(Configuration params) { + boolean restartIsRequired = false; + OperationResult result = new OperationResult("Configuration updated.");
//update storage node jvm settings @@ -125,6 +127,31 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper
if (!configurationUpdate.getStatus().equals(ConfigurationUpdateStatus.SUCCESS)) { result.setErrorMessage(configurationUpdate.getErrorMessage()); + } else { + if (params.getSimpleValue("heapSize") != null + || params.getSimpleValue("heapNewSize") != null + || params.getSimpleValue("threadStackSize") != null) { + restartIsRequired = true; + } + } + + //restart the server if: + //- requested by the user + //- the updates done require restart + boolean restartIfRequiredConfig = false; + if (params.getSimpleValue("restartIfRequired") != null) { + restartIfRequiredConfig = Boolean.parseBoolean(params.getSimpleValue("restartIfRequired")); + } + + if (restartIfRequiredConfig && restartIsRequired) { + try { + OperationResult restartResult = this.invokeOperation("restart", null); + if (restartResult.getErrorMessage() != null) { + result.setErrorMessage(restartResult.getErrorMessage()); + } + } catch (Exception e) { + result.setErrorMessage(e.getMessage()); + } }
return result; diff --git a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml index cd84de6..aca3ba2 100644 --- a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml +++ b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml @@ -136,6 +136,7 @@ <c:simple-property name="threadStackSize" type="integer" description="The thread stack size. This memory is allocated to each thread off heap. The value should be an integer that will be interpreted in kilobytes."/> <c:simple-property name="jmxPort" type="integer" description="JMX port JVM option."/> + <c:simple-property name="restartIfRequired" type="boolean" required="true" default="false" description="If [true] then restart the server at the end of the update if and if only the updates made require a restart. If [false] the server will not be restarted regardless of the updates made."/> </parameters> <results> <c:simple-property name="operationResult" description="Outcome of updating the configuration."/>
commit bddfb48623718d3eb99fad6cd1a67bae56a82572 Author: Thomas Segismont tsegismo@redhat.com Date: Thu Jul 25 12:16:39 2013 +0200
CassandraNodeComponent#startNode now starts C* with a short CLASSPATH to allow later discovery on Linux
diff --git a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java index 70ba4b2..37aa425 100644 --- a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java +++ b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java @@ -47,6 +47,7 @@ import com.datastax.driver.core.Session;
import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.hyperic.sigar.OperatingSystem; import org.hyperic.sigar.SigarException; import org.mc4j.ems.connection.EmsConnection; import org.mc4j.ems.connection.bean.EmsBean; @@ -67,7 +68,6 @@ import org.rhq.core.pluginapi.util.ProcessExecutionUtility; import org.rhq.core.system.ProcessExecution; import org.rhq.core.system.ProcessExecutionResults; import org.rhq.core.system.ProcessInfo; -import org.rhq.core.system.ProcessInfo.ProcessInfoSnapshot; import org.rhq.core.system.SystemInfo; import org.rhq.core.util.StringUtil; import org.rhq.core.util.exception.ThrowableUtil; @@ -186,16 +186,6 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent } }
- private ProcessInfoSnapshot getProcessInfoSnapshot() { - ProcessInfoSnapshot processInfoSnapshot = (processInfo == null) ? null : processInfo.freshSnapshot(); - if (processInfoSnapshot == null || !processInfoSnapshot.isRunning()) { - processInfo = getResourceContext().getNativeProcess(); - // Safe to get prior snapshot here, we've just recreated the process info instance - processInfoSnapshot = (processInfo == null) ? null : processInfo.priorSnaphot(); - } - return processInfoSnapshot; - } - @Override public OperationResult invokeOperation(String name, Configuration parameters) throws Exception { if (name.equals("shutdown")) { @@ -212,23 +202,6 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent return null; }
- private void waitForNodeToGoDown() throws InterruptedException { - for (ProcessInfoSnapshot processInfoSnapshot = getProcessInfoSnapshot();; processInfoSnapshot = getProcessInfoSnapshot()) { - if (processInfoSnapshot == null || !processInfoSnapshot.isRunning()) { - // Process not found, so it died, that's fine - // OR - // Process info says process is no longer running, that's fine as well - break; - } - if (getResourceContext().getComponentInvocationContext().isInterrupted()) { - // Operation canceled or timed out - throw new InterruptedException(); - } - // Process is still running, wait a second and check again - Thread.sleep(SECONDS.toMillis(1)); - } - } - @SuppressWarnings("rawtypes") protected OperationResult shutdownNode() { ResourceContext<?> context = getResourceContext(); @@ -292,21 +265,19 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent } }
- protected OperationResult startNode() { - ResourceContext<?> context = getResourceContext(); - Configuration pluginConfig = context.getPluginConfiguration(); + Configuration pluginConfig = getResourceContext().getPluginConfiguration(); String baseDir = pluginConfig.getSimpleValue("baseDir"); File binDir = new File(baseDir, "bin"); - File startScript = new File(binDir, getStartScript()); - File pidFile = new File(binDir, "cassandra.pid"); - - ProcessExecution scriptExe = ProcessExecutionUtility.createProcessExecution(startScript); - scriptExe.addArguments(asList("-p", pidFile.getAbsolutePath())); - SystemInfo systemInfo = context.getSystemInformation(); + if (!startScriptExists(binDir)) { + OperationResult failure = new OperationResult("Failed to start Cassandra daemon"); + failure.setErrorMessage("Start script does not exists"); + return failure; + } + ProcessExecution scriptExe = getProcessExecution(binDir); + SystemInfo systemInfo = getResourceContext().getSystemInformation(); ProcessExecutionResults results = systemInfo.executeProcess(scriptExe); - - if (results.getError() == null) { + if (results.getError() == null) { return new OperationResult("Successfully started Cassandra daemon"); } else { OperationResult failure = new OperationResult("Failed to start Cassandra daemon"); @@ -315,6 +286,30 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent } }
+ private boolean startScriptExists(File binDir) { + File file = new File(binDir, getStartScript()); + return file.exists() && !file.isDirectory(); + } + + private ProcessExecution getProcessExecution(File binDir) { + ProcessExecution scriptExe; + if (OperatingSystem.getInstance().getName().equals(OperatingSystem.NAME_WIN32)) { + File startScript = new File(binDir, getStartScript()); + scriptExe = ProcessExecutionUtility.createProcessExecution(startScript); + } else { + // On Linux, when Cassandra is started with an absolute path, the command line is too long and is truncated + // in /proc/pid/cmdline (beacuse of a long CLASSPATH made of absolute paths) + // This prevents the process from being later discovered because the process query argument criteria + // expects org.apache.cassandra.service.CassandraDaemon to be found + File startScript = new File("./" + getStartScript()); + scriptExe = ProcessExecutionUtility.createProcessExecution(startScript); + scriptExe.setCheckExecutableExists(false); + } + scriptExe.setWorkingDirectory(binDir.getAbsolutePath()); + scriptExe.addArguments(asList("-p", "cassandra.pid")); + return scriptExe; + } + protected OperationResult restartNode() { OperationResult result = shutdownNode();
diff --git a/modules/plugins/rhq-storage/pom.xml b/modules/plugins/rhq-storage/pom.xml index b1d50b8..ab97902 100644 --- a/modules/plugins/rhq-storage/pom.xml +++ b/modules/plugins/rhq-storage/pom.xml @@ -121,21 +121,27 @@ <plugin> <artifactId>maven-failsafe-plugin</artifactId> <version>2.13</version> + <configuration> + <includes> + <include>**/*ITest.java</include> + </includes> + <argLine>-Djava.library.path=${pc.lib.dir}</argLine> + <systemPropertyVariables> + <pc.plugins.dir>${pc.plugins.dir}</pc.plugins.dir> + </systemPropertyVariables> + <properties> + <property> + <name>listener</name> + <value>org.rhq.test.testng.StdoutReporter</value> + </property> + </properties> + </configuration> <executions> <execution> <id>integration-test</id> <goals> <goal>integration-test</goal> </goals> - <configuration> - <includes> - <include>**/*ITest.java</include> - </includes> - <argLine>-Djava.library.path=${pc.lib.dir}</argLine> - <systemPropertyVariables> - <pc.plugins.dir>${pc.plugins.dir}</pc.plugins.dir> - </systemPropertyVariables> - </configuration> </execution> <execution> <id>verify</id> @@ -155,6 +161,12 @@ <excludes> <exclude>**/*ITest.java</exclude> </excludes> + <properties> + <property> + <name>listener</name> + <value>org.rhq.test.testng.StdoutReporter</value> + </property> + </properties> </configuration> </plugin> </plugins> diff --git a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java index 5bc8b31..f0744a4 100644 --- a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java +++ b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java @@ -31,6 +31,7 @@ import java.util.Set;
import com.google.common.collect.Sets;
+import org.hyperic.sigar.OperatingSystem; import org.testng.annotations.AfterSuite; import org.testng.annotations.BeforeSuite; import org.testng.annotations.Test; @@ -113,14 +114,8 @@ public class StorageNodeComponentITest { File binDir = new File(basedir, "bin"); SystemInfo systemInfo = SystemInfoFactory.createSystemInfo();
- File startScript = new File("./cassandra"); - ProcessExecution startScriptExe = ProcessExecutionUtility.createProcessExecution(startScript); - startScriptExe.setWorkingDirectory(binDir.getAbsolutePath()); - startScriptExe.setCheckExecutableExists(false); - - startScriptExe.addArguments(asList("-p", "cassandra.pid")); - startScriptExe.setCaptureOutput(true); - ProcessExecutionResults results = systemInfo.executeProcess(startScriptExe); + ProcessExecution processExecution = getProcessExecution(binDir); + ProcessExecutionResults results = systemInfo.executeProcess(processExecution);
assertEquals(results.getExitCode(), (Integer) 0, "Cassandra failed to start: " + results.getCapturedOutput());
@@ -135,6 +130,22 @@ public class StorageNodeComponentITest { schemaManager.updateTopology(true); }
+ private ProcessExecution getProcessExecution(File binDir) { + ProcessExecution startScriptExe; + if (OperatingSystem.getInstance().getName().equals(OperatingSystem.NAME_WIN32)) { + File startScript = new File(binDir, "cassandra.bat"); + startScriptExe = ProcessExecutionUtility.createProcessExecution(startScript); + } else { + File startScript = new File("./cassandra"); + startScriptExe = ProcessExecutionUtility.createProcessExecution(startScript); + startScriptExe.setCheckExecutableExists(false); + } + startScriptExe.setWorkingDirectory(binDir.getAbsolutePath()); + startScriptExe.addArguments(asList("-p", "cassandra.pid")); + startScriptExe.setCaptureOutput(true); + return startScriptExe; + } + private void initPluginContainer() { PluginContainerConfiguration pcConfig = new PluginContainerConfiguration(); File pluginsDir = new File(System.getProperty("pc.plugins.dir"));
commit c1bfeb61b38892c39ac89d813e4a50687a1f56f1 Author: Thomas Segismont tsegismo@redhat.com Date: Thu Jul 25 11:04:44 2013 +0200
Update intentional changes file (methods added in StorageNodeManagerRemote)
diff --git a/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml b/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml index 26e9e95..44886b4 100644 --- a/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml +++ b/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml @@ -32,6 +32,20 @@ <difference> <className>org/rhq/enterprise/server/cloud/StorageNodeManagerRemote</className> <differenceType>7012</differenceType> <!-- method added to an interface --> + <method>org.rhq.core.domain.cloud.StorageNodeConfigurationComposite retrieveConfiguration(org.rhq.core.domain.auth.Subject, org.rhq.core.domain.cloud.StorageNode)</method> + <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> + </difference> + + <difference> + <className>org/rhq/enterprise/server/cloud/StorageNodeManagerRemote</className> + <differenceType>7012</differenceType> <!-- method added to an interface --> + <method>boolean updateConfiguration(org.rhq.core.domain.auth.Subject, org.rhq.core.domain.cloud.StorageNodeConfigurationComposite)</method> + <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> + </difference> + + <difference> + <className>org/rhq/enterprise/server/cloud/StorageNodeManagerRemote</className> + <differenceType>7012</differenceType> <!-- method added to an interface --> <method>org.rhq.core.domain.util.PageList findNotAcknowledgedStorageNodeAlerts(org.rhq.core.domain.auth.Subject, org.rhq.core.domain.cloud.StorageNode)</method> <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> </difference>
commit 79b44e371d1c1c9464ec0660ac0ad26a33a82c4b Author: Thomas Segismont tsegismo@redhat.com Date: Thu Jul 25 09:44:45 2013 +0200
Update intentional changes file (methods added in StorageNodeManagerRemote)
diff --git a/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml b/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml index 58c4eda..26e9e95 100644 --- a/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml +++ b/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml @@ -29,4 +29,18 @@ <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> </difference>
+ <difference> + <className>org/rhq/enterprise/server/cloud/StorageNodeManagerRemote</className> + <differenceType>7012</differenceType> <!-- method added to an interface --> + <method>org.rhq.core.domain.util.PageList findNotAcknowledgedStorageNodeAlerts(org.rhq.core.domain.auth.Subject, org.rhq.core.domain.cloud.StorageNode)</method> + <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> + </difference> + + <difference> + <className>org/rhq/enterprise/server/cloud/StorageNodeManagerRemote</className> + <differenceType>7012</differenceType> <!-- method added to an interface --> + <method>org.rhq.core.domain.util.PageList findAllStorageNodeAlerts(org.rhq.core.domain.auth.Subject, org.rhq.core.domain.cloud.StorageNode)</method> + <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> + </difference> + </differences>
commit 00326c0b70787a56e3cbc30e8a8951a22e99c681 Author: John Sanda jsanda@redhat.com Date: Wed Jul 24 23:07:53 2013 -0400
do availability check via jmx
CassandraNodeComponent previously was doing availability checks at the process level. Results were some times inconsistent in large part due to different behaviors on different platforms. Doing a JMX check is a more representative check since we perform most of the managemeent via JMX. It also eliminates any platform-specific issues.
The check is done via a direct JMX connection instead of EMS to avoid the caching issues with EMS that could yield inaccurate results.
diff --git a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java index c41e8e7..70ba4b2 100644 --- a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java +++ b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java @@ -35,6 +35,12 @@ import java.util.Date; import java.util.List; import java.util.Map;
+import javax.management.MBeanServerConnection; +import javax.management.ObjectName; +import javax.management.remote.JMXConnector; +import javax.management.remote.JMXConnectorFactory; +import javax.management.remote.JMXServiceURL; + import com.datastax.driver.core.Cluster; import com.datastax.driver.core.Cluster.Builder; import com.datastax.driver.core.Session; @@ -131,10 +137,11 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent public AvailabilityType getAvailability() { long start = System.nanoTime(); try { - // Get a fresh snapshot of the process - ProcessInfoSnapshot processInfoSnapshot = getProcessInfoSnapshot(); - return (processInfoSnapshot != null && processInfoSnapshot.isRunning()) ? AvailabilityType.UP - : AvailabilityType.DOWN; + if (isStorageServiceReachable()) { + return AvailabilityType.UP; + } + + return AvailabilityType.DOWN; } finally { long totalTimeMillis = NANOSECONDS.toMillis(System.nanoTime() - start); if (LOG.isDebugEnabled()) { @@ -146,6 +153,39 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent } }
+ private boolean isStorageServiceReachable() { + JMXConnector connector = null; + try { + Configuration pluginConfig = getResourceContext().getPluginConfiguration(); + String url = pluginConfig.getSimpleValue("connectorAddress"); + JMXServiceURL serviceURL = new JMXServiceURL(url); + connector = JMXConnectorFactory.connect(serviceURL, null); + + MBeanServerConnection serverConnection = connector.getMBeanServerConnection(); + ObjectName storageService = new ObjectName("org.apache.cassandra.db:type=StorageService"); + + // query an attribute to make sure it is in fact available + serverConnection.getAttribute(storageService, "NativeTransportRunning"); + + return true; + } catch (Exception e) { + if (LOG.isDebugEnabled()) { + LOG.debug("Failed to make JMX connection to StorageService", e); + } + return false; + } finally { + if (connector != null) { + try { + connector.close(); + } catch (IOException e) { + if (LOG.isDebugEnabled()) { + LOG.debug("An error occurred closing the JMX connector", e); + } + } + } + } + } + private ProcessInfoSnapshot getProcessInfoSnapshot() { ProcessInfoSnapshot processInfoSnapshot = (processInfo == null) ? null : processInfo.freshSnapshot(); if (processInfoSnapshot == null || !processInfoSnapshot.isRunning()) { @@ -158,10 +198,8 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent
@Override public OperationResult invokeOperation(String name, Configuration parameters) throws Exception { - if (name.equals("shutdown")) { OperationResult operationResult = shutdownNode(); - waitForNodeToGoDown(); return operationResult; } else if (name.equals("start")) { return startNode(); @@ -234,6 +272,8 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent
long pid = process.getPid(); try { + getEmsConnection().close(); + process.kill("KILL");
Configuration pluginConfig = getResourceContext().getPluginConfiguration();
commit c2bb725d469642b7985eba579f6b50c30a1a776f Author: Thomas Segismont tsegismo@redhat.com Date: Wed Jul 24 18:55:25 2013 +0200
Fix StorageNodeComponentITest.shutdownStorageNode
Fixed storage node module pom typo Made test Cassandra server start with relative paths in classpath (otherwise the command line is too long and gets truncated in /proc/pid/cmdline this preventing the process query to find the server) Made CassandraNodeComponent shutdown operation to wait for server to go down
diff --git a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java index 8d74ccc..c41e8e7 100644 --- a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java +++ b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java @@ -132,12 +132,7 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent long start = System.nanoTime(); try { // Get a fresh snapshot of the process - ProcessInfoSnapshot processInfoSnapshot = (processInfo == null) ? null : processInfo.freshSnapshot(); - if (processInfoSnapshot == null || !processInfoSnapshot.isRunning()) { - processInfo = getResourceContext().getNativeProcess(); - // Safe to get prior snapshot here, we've just recreated the process info instance - processInfoSnapshot = (processInfo == null) ? null : processInfo.priorSnaphot(); - } + ProcessInfoSnapshot processInfoSnapshot = getProcessInfoSnapshot(); return (processInfoSnapshot != null && processInfoSnapshot.isRunning()) ? AvailabilityType.UP : AvailabilityType.DOWN; } finally { @@ -151,11 +146,23 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent } }
+ private ProcessInfoSnapshot getProcessInfoSnapshot() { + ProcessInfoSnapshot processInfoSnapshot = (processInfo == null) ? null : processInfo.freshSnapshot(); + if (processInfoSnapshot == null || !processInfoSnapshot.isRunning()) { + processInfo = getResourceContext().getNativeProcess(); + // Safe to get prior snapshot here, we've just recreated the process info instance + processInfoSnapshot = (processInfo == null) ? null : processInfo.priorSnaphot(); + } + return processInfoSnapshot; + } + @Override public OperationResult invokeOperation(String name, Configuration parameters) throws Exception {
if (name.equals("shutdown")) { - return shutdownNode(); + OperationResult operationResult = shutdownNode(); + waitForNodeToGoDown(); + return operationResult; } else if (name.equals("start")) { return startNode(); } else if (name.equals("restart")) { @@ -167,6 +174,23 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent return null; }
+ private void waitForNodeToGoDown() throws InterruptedException { + for (ProcessInfoSnapshot processInfoSnapshot = getProcessInfoSnapshot();; processInfoSnapshot = getProcessInfoSnapshot()) { + if (processInfoSnapshot == null || !processInfoSnapshot.isRunning()) { + // Process not found, so it died, that's fine + // OR + // Process info says process is no longer running, that's fine as well + break; + } + if (getResourceContext().getComponentInvocationContext().isInterrupted()) { + // Operation canceled or timed out + throw new InterruptedException(); + } + // Process is still running, wait a second and check again + Thread.sleep(SECONDS.toMillis(1)); + } + } + @SuppressWarnings("rawtypes") protected OperationResult shutdownNode() { ResourceContext<?> context = getResourceContext(); diff --git a/modules/plugins/rhq-storage/pom.xml b/modules/plugins/rhq-storage/pom.xml index df79e40..b1d50b8 100644 --- a/modules/plugins/rhq-storage/pom.xml +++ b/modules/plugins/rhq-storage/pom.xml @@ -55,6 +55,20 @@ </dependency>
<dependency> + <groupId>${rhq.groupId}</groupId> + <artifactId>test-utils</artifactId> + <version>${project.version}</version> + <scope>test</scope> + <exclusions> + <exclusion> + <groupId>org.slf4j</groupId> + <artifactId>slf4j-nop</artifactId> + </exclusion> + </exclusions> + </dependency> + + + <dependency> <groupId>${project.groupId}</groupId> <artifactId>rhq-cassandra-schema</artifactId> <version>${project.version}</version> @@ -70,7 +84,7 @@ <phase>pre-integration-test</phase> <configuration> <target> - <property name="sigar.dir" value="${project.build.directory/sigar}"/> + <property name="sigar.dir" value="${project.build.directory}/sigar"/>
<mkdir dir="${pc.basedir}"/> <mkdir dir="${pc.lib.dir}"/> diff --git a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java index b668073..5bc8b31 100644 --- a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java +++ b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java @@ -1,3 +1,22 @@ +/* + * RHQ Management Platform + * Copyright (C) 2005-2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA + */ + package org.rhq.plugins.storage;
import static java.util.Arrays.asList; @@ -94,8 +113,10 @@ public class StorageNodeComponentITest { File binDir = new File(basedir, "bin"); SystemInfo systemInfo = SystemInfoFactory.createSystemInfo();
- File startScript = new File(binDir, "cassandra"); + File startScript = new File("./cassandra"); ProcessExecution startScriptExe = ProcessExecutionUtility.createProcessExecution(startScript); + startScriptExe.setWorkingDirectory(binDir.getAbsolutePath()); + startScriptExe.setCheckExecutableExists(false);
startScriptExe.addArguments(asList("-p", "cassandra.pid")); startScriptExe.setCaptureOutput(true); @@ -176,8 +197,7 @@ public class StorageNodeComponentITest {
assertFalse(pidFile.exists(), pidFile + " should be deleted when the storage node is shutdown.");
- // TODO why is this failing? - //assertNodeIsDown("Expected " + storageNode + " to be DOWN after shutting it down"); + assertNodeIsDown("Expected " + storageNode + " to be DOWN after shutting it down"); }
@Test(dependsOnMethods = "shutdownStorageNode") diff --git a/modules/plugins/rhq-storage/src/test/resources/log4j.properties b/modules/plugins/rhq-storage/src/test/resources/log4j.properties deleted file mode 100644 index 67db049..0000000 --- a/modules/plugins/rhq-storage/src/test/resources/log4j.properties +++ /dev/null @@ -1,42 +0,0 @@ -# -# /* -# * RHQ Management Platform -# * Copyright (C) 2005-2012 Red Hat, Inc. -# * All rights reserved. -# * -# * This program is free software; you can redistribute it and/or modify -# * it under the terms of the GNU General Public License, version 2, as -# * published by the Free Software Foundation, and/or the GNU Lesser -# * General Public License, version 2.1, also as published by the Free -# * Software Foundation. -# * -# * This program is distributed in the hope that it will be useful, -# * but WITHOUT ANY WARRANTY; without even the implied warranty of -# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# * GNU General Public License and the GNU Lesser General Public License -# * for more details. -# * -# * You should have received a copy of the GNU General Public License -# * and the GNU Lesser General Public License along with this program; -# * if not, write to the Free Software Foundation, Inc., -# * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -# */ -# - -log4j.rootCategory=WARN, FILE, CONSOLE - -log4j.appender.FILE=org.apache.log4j.DailyRollingFileAppender -log4j.appender.FILE.DatePattern='.'yyyy-MM-dd -log4j.appender.FILE.File=./target/test.log -log4j.appender.FILE.layout=org.apache.log4j.PatternLayout -log4j.appender.FILE.layout.ConversionPattern=%d{ABSOLUTE} %-5p %c %m%n -#log4j.appender.A1.layout.ConversionPattern=%d [%t] %-5p %c - %m%n -log4j.appender.FILE.Append=false - -log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender -log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout -#log4j.appender.CONSOLE.layout.ConversionPattern=%5p %d{HH:mm:ss,SSS} %m%n -log4j.appender.CONSOLE.layout.ConversionPattern=%d{ABSOLUTE} %-5p %c %m%n - -log4j.logger.org.rhq=DEBUG -log4j.logger.com.datastax=DEBUG diff --git a/modules/plugins/rhq-storage/src/test/resources/log4j.xml b/modules/plugins/rhq-storage/src/test/resources/log4j.xml new file mode 100644 index 0000000..ec3cd98 --- /dev/null +++ b/modules/plugins/rhq-storage/src/test/resources/log4j.xml @@ -0,0 +1,40 @@ +<?xml version="1.0" encoding="UTF-8"?> + +<!DOCTYPE log4j:configuration SYSTEM "log4j.dtd"> + +<!-- | For more configuration information and examples, see the Jakarta Log4j | website: http://jakarta.apache.org/log4j --> + +<log4j:configuration xmlns:log4j="http://jakarta.apache.org/log4j/%22%3E + + <appender name="CONSOLE" class="org.apache.log4j.ConsoleAppender"> + <param name="Target" value="System.out" /> + <param name="Threshold" value="WARN" /> + <layout class="org.apache.log4j.PatternLayout"> + <param name="ConversionPattern" value="%-5p %d{dd-MM HH:mm:ss,SSS} (%F:%M:%L) - %m%n" /> + </layout> + </appender> + + <appender name="FILE" class="org.apache.log4j.RollingFileAppender"> + <param name="File" value="target/test.log" /> + <param name="Append" value="false" /> + <param name="Threshold" value="DEBUG" /> + <layout class="org.apache.log4j.PatternLayout"> + <param name="ConversionPattern" value="%-5p %d{dd-MM HH:mm:ss,SSS} (%F:%M:%L) - %m%n" /> + </layout> + </appender> + + <logger name="org.rhq"> + <level value="DEBUG" /> + </logger> + + <logger name="com.datastax"> + <level value="DEBUG" /> + </logger> + + <root> + <level value="WARN" /> + <appender-ref ref="CONSOLE" /> + <appender-ref ref="FILE" /> + </root> + +</log4j:configuration>
commit 280733b7eb74489f2f31695b3536e804447eb349 Author: John Sanda jsanda@redhat.com Date: Wed Jul 24 07:47:24 2013 -0400
[BZ 987899] remove and create pid file during shutdown and start operations
diff --git a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java index cf24fcd..8d74ccc 100644 --- a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java +++ b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java @@ -18,6 +18,7 @@ */ package org.rhq.plugins.cassandra;
+import static java.util.Arrays.asList; import static java.util.concurrent.TimeUnit.NANOSECONDS; import static java.util.concurrent.TimeUnit.SECONDS; import static org.rhq.core.system.OperatingSystemType.WINDOWS; @@ -210,6 +211,14 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent long pid = process.getPid(); try { process.kill("KILL"); + + Configuration pluginConfig = getResourceContext().getPluginConfiguration(); + File basedir = new File(pluginConfig.getSimpleValue("baseDir")); + File binDir = new File(basedir, "bin"); + File pidFile = new File(binDir, "cassandra.pid"); + + pidFile.delete(); + return new OperationResult("Successfully shut down Cassandra daemon with pid " + pid); } catch (SigarException e) { LOG.warn("Failed to shut down Cassandra node with pid " + pid, e); @@ -226,8 +235,10 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent String baseDir = pluginConfig.getSimpleValue("baseDir"); File binDir = new File(baseDir, "bin"); File startScript = new File(binDir, getStartScript()); + File pidFile = new File(binDir, "cassandra.pid");
ProcessExecution scriptExe = ProcessExecutionUtility.createProcessExecution(startScript); + scriptExe.addArguments(asList("-p", pidFile.getAbsolutePath())); SystemInfo systemInfo = context.getSystemInformation(); ProcessExecutionResults results = systemInfo.executeProcess(scriptExe);
diff --git a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java index cd9f148..b668073 100644 --- a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java +++ b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java @@ -2,7 +2,9 @@ package org.rhq.plugins.storage;
import static java.util.Arrays.asList; import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertNotNull; +import static org.testng.Assert.assertTrue;
import java.io.File; import java.net.InetAddress; @@ -168,8 +170,34 @@ public class StorageNodeComponentITest { new Configuration(), timeout);
assertEquals(result.getResultCode(), OperationServicesResultCode.SUCCESS, "The shutdown operation failed"); + + File binDir = new File(basedir, "bin"); + File pidFile = new File(binDir, "cassandra.pid"); + + assertFalse(pidFile.exists(), pidFile + " should be deleted when the storage node is shutdown."); + // TODO why is this failing? - assertNodeIsDown("Expected " + storageNode + " to be DOWN after shutting it down"); + //assertNodeIsDown("Expected " + storageNode + " to be DOWN after shutting it down"); + } + + @Test(dependsOnMethods = "shutdownStorageNode") + public void restartStorageNode() { + OperationManager operationManager = PluginContainer.getInstance().getOperationManager(); + OperationServicesAdapter operationsService = new OperationServicesAdapter(operationManager); + + long timeout = 1000 * 60; + OperationContextImpl operationContext = new OperationContextImpl(storageNode.getId()); + OperationServicesResult result = operationsService.invokeOperation(operationContext, "start", + new Configuration(), timeout); + + assertEquals(result.getResultCode(), OperationServicesResultCode.SUCCESS, "The start operation failed."); + + File binDir = new File(basedir, "bin"); + File pidFile = new File(binDir, "cassandra.pid"); + + assertTrue(pidFile.exists(), pidFile + " should be created when starting the storage node."); + + assertNodeIsUp("Expected " + storageNode + " to be up after restarting it."); }
private void assertNodeIsUp(String msg) { @@ -192,7 +220,8 @@ public class StorageNodeComponentITest {
private Availability getAvailability() { InventoryManager inventoryManager = PluginContainer.getInstance().getInventoryManager(); - return inventoryManager.getAvailabilityIfKnown(storageNode); +// return inventoryManager.getAvailabilityIfKnown(storageNode); + return inventoryManager.getCurrentAvailability(storageNode); }
private void executeAvailabilityScan() {
commit 00162805c6037034f445c90390d46e36fd004a3a Author: John Sanda jsanda@redhat.com Date: Wed Jul 24 07:08:09 2013 -0400
uncommented code that was done while debugging tests
diff --git a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java index f76da22..cf24fcd 100644 --- a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java +++ b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java @@ -111,7 +111,7 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent clusterBuilder = clusterBuilder.withCredentials(username, password); }
-// this.cassandraSession = clusterBuilder.build().connect(clusterName); + this.cassandraSession = clusterBuilder.build().connect(clusterName); } catch (Exception e) { LOG.error("Connect to Cassandra " + host + ":" + nativePort, e); throw e;
commit eda94d462cb324316e19486c13308b2bd75dad2c Author: Heiko W. Rupp hwr@redhat.com Date: Wed Jul 24 11:54:53 2013 +0200
BZ 976786 Add a bit more wait time and an additional check if SUCCESS really means it. Return IN_PROGRESS otherwise.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/ResourceHandlerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/ResourceHandlerBean.java index 4bfbb7c..7a6fb33 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/ResourceHandlerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/ResourceHandlerBean.java @@ -721,6 +721,9 @@ public class ResourceHandlerBean extends AbstractRestBean {
@POST @Path("/") + @ApiErrors({ + @ApiError(code = 302, reason = "Creation is still happening. Check back with a GET on the Location.") + }) @ApiOperation(value = "Create a new resource as a child of an existing resource. ", notes= "If a handle is given, a content based resource is created; the content identified by the handle is not removed from the content store." + "If no handle is given, a resource is created from the data of the passed 'resource' object.") @@ -824,6 +827,11 @@ public class ResourceHandlerBean extends AbstractRestBean {
CreateResourceStatus status = history.getStatus();
+ try { + Thread.sleep(2000L); // give the agent time to do the work + } catch (InterruptedException e) { + ; // nothing + }
MediaType mediaType = headers.getAcceptableMediaTypes().get(0);
@@ -832,11 +840,16 @@ public class ResourceHandlerBean extends AbstractRestBean { if ( status == CreateResourceStatus.SUCCESS) {
ResourceWithType rwt = findCreatedResource(history.getParentResource().getId(),history.getCreatedResourceName(),uriInfo); - - builder = Response.ok(); - builder.entity(rwt); + if (rwt!=null) { + builder = Response.ok(); + builder.entity(rwt); + } else { + // History says we had success but due to internal timing + // the resource is not yet visible, so switch to in_progress + status = CreateResourceStatus.IN_PROGRESS; + } } - else if (status==CreateResourceStatus.IN_PROGRESS) { + if (status==CreateResourceStatus.IN_PROGRESS) {
try { Thread.sleep(2000L); // give the agent time to do the work @@ -865,6 +878,7 @@ public class ResourceHandlerBean extends AbstractRestBean { @GET @Path("/creationStatus/{id}") @ApiOperation("Get the status of a resource creation for content based resources.") + @ApiError(code = 302, reason = "Creation is still going on. Check back later with the same URL.") public Response getHistoryItem(@PathParam("id") int historyId, @Context HttpHeaders headers, @Context UriInfo uriInfo) {
CreateResourceHistory history; @@ -888,13 +902,17 @@ public class ResourceHandlerBean extends AbstractRestBean { if (status== CreateResourceStatus.SUCCESS) {
ResourceWithType rwt = findCreatedResource(history.getParentResource().getId(),history.getCreatedResourceName(),uriInfo); - - builder = Response.ok(); - setCachingHeader(builder, 600); - builder.entity(rwt); - + if (rwt!=null) { + builder = Response.ok(); + setCachingHeader(builder, 600); + builder.entity(rwt); + } else { + // History says we had success but due to internal timing + // the resource is not yet visible, so switch to in_progress + status = CreateResourceStatus.IN_PROGRESS; + } } - else if (status==CreateResourceStatus.IN_PROGRESS) { + if (status==CreateResourceStatus.IN_PROGRESS) {
UriBuilder uriBuilder = uriInfo.getRequestUriBuilder(); @@ -913,6 +931,14 @@ public class ResourceHandlerBean extends AbstractRestBean {
}
+ /** + * Find the created resource by its name and parent. Will only return it + * if the resource is already committed. + * @param parentId Id of the parent + * @param name Name of the resource to find + * @param uriInfo UriInfo object to fill links in the returned resource + * @return A ResourceWithType if found, null otherwise. + */ private ResourceWithType findCreatedResource(int parentId, String name, UriInfo uriInfo) { ResourceCriteria criteria = new ResourceCriteria(); criteria.setStrict(true); @@ -920,6 +946,9 @@ public class ResourceHandlerBean extends AbstractRestBean { criteria.addFilterName(name); criteria.addFilterInventoryStatus(InventoryStatus.COMMITTED); List<Resource> resources = resMgr.findResourcesByCriteria(caller,criteria); + if (resources.size()==0) { + return null; + } Resource res = resources.get(0); return fillRWT(res,uriInfo); } diff --git a/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/ContentTest.java b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/ContentTest.java index 8303513..b12eea3 100644 --- a/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/ContentTest.java +++ b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/ContentTest.java @@ -222,8 +222,10 @@ public class ContentTest extends AbstractBase { int status = response.getStatusCode(); String location = response.getHeader("Location");
- System.out.println("\nLocation " + location + "\n\n"); - assert location!=null; + if (status!=200) { + System.out.println("\nLocation " + location + "\n\n"); + assert location!=null; + }
// We need to check what we got. A 302 means the deploy is still // in progress, so we need to wait a little longer @@ -244,6 +246,7 @@ public class ContentTest extends AbstractBase {
createdResourceId = response.jsonPath().getInt("resourceId");
+ System.out.flush(); System.out.println("\n Deploy is done, resource Id = " + createdResourceId + " \n"); System.out.flush();
@@ -254,6 +257,7 @@ public class ContentTest extends AbstractBase { // Remove the uploaded content removeContent(handle, false);
+ System.out.flush(); System.out.println("\n Content removed \n"); System.out.flush();
commit 6c63ae75f9d688d9cbe3c62406c58242a574b789 Author: John Sanda jsanda@redhat.com Date: Tue Jul 23 22:23:06 2013 -0400
initial commit for StorageNodeComponentITest
This is a first stab at some integration tests for the storage plugin.
diff --git a/modules/plugins/rhq-storage/pom.xml b/modules/plugins/rhq-storage/pom.xml index a88f56e..df79e40 100644 --- a/modules/plugins/rhq-storage/pom.xml +++ b/modules/plugins/rhq-storage/pom.xml @@ -10,11 +10,16 @@
<groupId>org.rhq</groupId> <artifactId>rhq-rhqstorage-plugin</artifactId> - <packaging>jar</packaging>
<name>RHQ Storage Plugin</name> <description>A plugin for managing RHQ Storage Nodes</description>
+ <properties> + <pc.basedir>${project.build.directory}/plugin-container</pc.basedir> + <pc.plugins.dir>${pc.basedir}/plugins</pc.plugins.dir> + <pc.lib.dir>${pc.basedir}/lib</pc.lib.dir> + </properties> + <dependencies> <dependency> <groupId>${rhq.groupId}</groupId> @@ -27,7 +32,6 @@ <groupId>${rhq.groupId}</groupId> <artifactId>rhq-cassandra-plugin</artifactId> <version>${project.version}</version> - <!--<scope>provided</scope>--> </dependency>
<dependency> @@ -35,8 +39,113 @@ <artifactId>org-mc4j-ems</artifactId> <scope>provided</scope> </dependency> + + <dependency> + <groupId>${project.groupId}</groupId> + <artifactId>rhq-cassandra-ccm-core</artifactId> + <version>${project.version}</version> + <scope>test</scope> + </dependency> + + <dependency> + <groupId>${project.groupId}</groupId> + <artifactId>rhq-platform-plugin</artifactId> + <version>${project.version}</version> + <scope>test</scope> + </dependency> + + <dependency> + <groupId>${project.groupId}</groupId> + <artifactId>rhq-cassandra-schema</artifactId> + <version>${project.version}</version> + </dependency> </dependencies>
+ <build> + <plugins> + <plugin> + <artifactId>maven-antrun-plugin</artifactId> + <executions> + <execution> + <phase>pre-integration-test</phase> + <configuration> + <target> + <property name="sigar.dir" value="${project.build.directory/sigar}"/> + + <mkdir dir="${pc.basedir}"/> + <mkdir dir="${pc.lib.dir}"/> + <mkdir dir="${pc.plugins.dir}"/> + + <copy file="${org.rhq:rhq-platform-plugin:jar}" todir="${pc.plugins.dir}"/> + <copy file="${org.rhq:rhq-jmx-plugin:jar}" todir="${pc.plugins.dir}"/> + <copy file="${org.rhq:rhq-cassandra-plugin:jar}" todir="${pc.plugins.dir}"/> + <copy file="${project.build.directory}/${project.build.finalName}.jar" todir="${pc.plugins.dir}"/> + + <unzip src="${org.hyperic:sigar-dist:zip}" dest="${sigar.dir}"> + <patternset> + <include name="**/lib/sigar.jar" /> + <include name="**/lib/bcel*.jar" /> + <include name="**/lib/*.so" /> + <include name="**/lib/*.sl" /> + <include name="**/lib/*.dll" /> + <include name="**/lib/*.dylib" /> + </patternset> + </unzip> + <move todir="${pc.lib.dir}" flatten="true"> + <fileset dir="${sigar.dir}" includes="**/lib/*"/> + </move> + <delete dir="${sigar.dir}"/> + </target> + </configuration> + <goals> + <goal>run</goal> + </goals> + </execution> + </executions> + </plugin> + + <plugin> + <artifactId>maven-failsafe-plugin</artifactId> + <version>2.13</version> + <executions> + <execution> + <id>integration-test</id> + <goals> + <goal>integration-test</goal> + </goals> + <configuration> + <includes> + <include>**/*ITest.java</include> + </includes> + <argLine>-Djava.library.path=${pc.lib.dir}</argLine> + <systemPropertyVariables> + <pc.plugins.dir>${pc.plugins.dir}</pc.plugins.dir> + </systemPropertyVariables> + </configuration> + </execution> + <execution> + <id>verify</id> + <goals> + <goal>verify</goal> + </goals> + <configuration> + <testFailureIgnore>false</testFailureIgnore> + </configuration> + </execution> + </executions> + </plugin> + + <plugin> + <artifactId>maven-surefire-plugin</artifactId> + <configuration> + <excludes> + <exclude>**/*ITest.java</exclude> + </excludes> + </configuration> + </plugin> + </plugins> + </build> + <profiles> <profile> <id>dev</id> diff --git a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java new file mode 100644 index 0000000..cd9f148 --- /dev/null +++ b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java @@ -0,0 +1,216 @@ +package org.rhq.plugins.storage; + +import static java.util.Arrays.asList; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertNotNull; + +import java.io.File; +import java.net.InetAddress; +import java.util.Set; + +import com.google.common.collect.Sets; + +import org.testng.annotations.AfterSuite; +import org.testng.annotations.BeforeSuite; +import org.testng.annotations.Test; + +import org.rhq.cassandra.CassandraClusterManager; +import org.rhq.cassandra.ClusterInitService; +import org.rhq.cassandra.Deployer; +import org.rhq.cassandra.DeploymentOptions; +import org.rhq.cassandra.DeploymentOptionsFactory; +import org.rhq.cassandra.schema.SchemaManager; +import org.rhq.core.clientapi.server.discovery.InventoryReport; +import org.rhq.core.domain.cloud.StorageNode; +import org.rhq.core.domain.configuration.Configuration; +import org.rhq.core.domain.measurement.Availability; +import org.rhq.core.domain.measurement.AvailabilityType; +import org.rhq.core.domain.resource.Resource; +import org.rhq.core.domain.resource.ResourceType; +import org.rhq.core.pc.PluginContainer; +import org.rhq.core.pc.PluginContainerConfiguration; +import org.rhq.core.pc.inventory.InventoryManager; +import org.rhq.core.pc.operation.OperationContextImpl; +import org.rhq.core.pc.operation.OperationManager; +import org.rhq.core.pc.operation.OperationServicesAdapter; +import org.rhq.core.pc.plugin.FileSystemPluginFinder; +import org.rhq.core.pluginapi.operation.OperationServicesResult; +import org.rhq.core.pluginapi.operation.OperationServicesResultCode; +import org.rhq.core.pluginapi.util.ProcessExecutionUtility; +import org.rhq.core.system.ProcessExecution; +import org.rhq.core.system.ProcessExecutionResults; +import org.rhq.core.system.SystemInfo; +import org.rhq.core.system.SystemInfoFactory; + +/** + * @author John Sanda + */ +public class StorageNodeComponentITest { + + private File basedir; + + private Resource storageNode; + + @BeforeSuite + public void deployStorageNodeAndPluginContainer() throws Exception { + basedir = new File("target", "rhq-storage"); + + deployStorageNode(); + + initPluginContainer(); + } + + private void deployStorageNode() throws Exception { + DeploymentOptionsFactory factory = new DeploymentOptionsFactory(); + DeploymentOptions deploymentOptions = factory.newDeploymentOptions(); + String address = "127.0.0.1"; + + deploymentOptions.setSeeds(address); + deploymentOptions.setListenAddress(address); + deploymentOptions.setRpcAddress(address); + deploymentOptions.setBasedir(basedir.getAbsolutePath()); + deploymentOptions.setCommitLogDir(new File(basedir, "commit_log").getAbsolutePath()); + deploymentOptions.setDataDir(new File(basedir, "data").getAbsolutePath()); + deploymentOptions.setSavedCachesDir(new File(basedir, "saved_caches").getAbsolutePath()); + deploymentOptions.setCommitLogDir(new File(basedir, "logs").getAbsolutePath()); + deploymentOptions.setLoggingLevel("DEBUG"); + deploymentOptions.setNativeTransportPort(9142); + deploymentOptions.setJmxPort(7399); + deploymentOptions.setHeapSize("256M"); + deploymentOptions.setHeapNewSize("64M"); + + deploymentOptions.load(); + + Deployer deployer = new Deployer(); + deployer.setDeploymentOptions(deploymentOptions); + + deployer.unzipDistro(); + deployer.applyConfigChanges(); + deployer.updateFilePerms(); + deployer.updateStorageAuthConf(Sets.newHashSet(InetAddress.getByName(address))); + + File binDir = new File(basedir, "bin"); + SystemInfo systemInfo = SystemInfoFactory.createSystemInfo(); + + File startScript = new File(binDir, "cassandra"); + ProcessExecution startScriptExe = ProcessExecutionUtility.createProcessExecution(startScript); + + startScriptExe.addArguments(asList("-p", "cassandra.pid")); + startScriptExe.setCaptureOutput(true); + ProcessExecutionResults results = systemInfo.executeProcess(startScriptExe); + + assertEquals(results.getExitCode(), (Integer) 0, "Cassandra failed to start: " + results.getCapturedOutput()); + + StorageNode storageNode = new StorageNode(); + storageNode.parseNodeInformation("127.0.0.1|7399|9142"); + + ClusterInitService clusterInitService = new ClusterInitService(); + clusterInitService.waitForClusterToStart(asList(storageNode)); + + SchemaManager schemaManager = new SchemaManager("rhqadmin", "rhqadmin", "127.0.0.1|7399|9142"); + schemaManager.install(); + schemaManager.updateTopology(true); + } + + private void initPluginContainer() { + PluginContainerConfiguration pcConfig = new PluginContainerConfiguration(); + File pluginsDir = new File(System.getProperty("pc.plugins.dir")); + pcConfig.setPluginDirectory(pluginsDir); + pcConfig.setPluginFinder(new FileSystemPluginFinder(pluginsDir)); + + pcConfig.setInsideAgent(false); + PluginContainer.getInstance().setConfiguration(pcConfig); + PluginContainer.getInstance().initialize(); + } + + @AfterSuite + public void ShutdownPluginContainerAndStorageNode() throws Exception { + PluginContainer.getInstance().shutdown(); + shutdownStorageNodeIfNecessary(); + } + + private void shutdownStorageNodeIfNecessary() throws Exception { + File binDir = new File(basedir, "bin"); + File pidFile = new File(binDir, "cassandra.pid"); + + if (pidFile.exists()) { + CassandraClusterManager ccm = new CassandraClusterManager(); + ccm.killNode(basedir); + } + } + + @Test + public void discoverStorageNode() { + InventoryManager inventoryManager = PluginContainer.getInstance().getInventoryManager(); + InventoryReport inventoryReport = inventoryManager.executeServerScanImmediately(); + + if (inventoryReport.getAddedRoots().isEmpty()) { + // could be empty if the storage node is already in inventory from + // a prior discovery scan. + Resource platform = inventoryManager.getPlatform(); + storageNode = findCassandraNode(platform.getChildResources()); + } else { + storageNode = findCassandraNode(inventoryReport.getAddedRoots()); + } + + assertNotNull(storageNode, "Failed to discover Storage Node instance"); + assertNodeIsUp("Expected " + storageNode + " to be UP after discovery"); + } + + @Test(dependsOnMethods = "discoverStorageNode") + public void shutdownStorageNode() throws Exception { + OperationManager operationManager = PluginContainer.getInstance().getOperationManager(); + OperationServicesAdapter operationsService = new OperationServicesAdapter(operationManager); + + long timeout = 1000 * 60; + OperationContextImpl operationContext = new OperationContextImpl(storageNode.getId()); + OperationServicesResult result = operationsService.invokeOperation(operationContext, "shutdown", + new Configuration(), timeout); + + assertEquals(result.getResultCode(), OperationServicesResultCode.SUCCESS, "The shutdown operation failed"); + // TODO why is this failing? + assertNodeIsDown("Expected " + storageNode + " to be DOWN after shutting it down"); + } + + private void assertNodeIsUp(String msg) { + executeAvailabilityScan(); + + Availability availability = getAvailability(); + + assertNotNull(availability, "Unable to determine availability for " + storageNode); + assertEquals(availability.getAvailabilityType(), AvailabilityType.UP, msg); + } + + private void assertNodeIsDown(String msg) { + executeAvailabilityScan(); + + Availability availability = getAvailability(); + + assertNotNull(availability, "Unable to determine availability for " + storageNode); + assertEquals(availability.getAvailabilityType(), AvailabilityType.DOWN, msg); + } + + private Availability getAvailability() { + InventoryManager inventoryManager = PluginContainer.getInstance().getInventoryManager(); + return inventoryManager.getAvailabilityIfKnown(storageNode); + } + + private void executeAvailabilityScan() { + InventoryManager inventoryManager = PluginContainer.getInstance().getInventoryManager(); + inventoryManager.executeAvailabilityScanImmediately(false, true); + } + + private Resource findCassandraNode(Set<Resource> resources) { + for (Resource resource : resources) { + if (isCassandraNode(resource.getResourceType())) { + return resource; + } + } + return null; + } + + private boolean isCassandraNode(ResourceType type) { + return type.getPlugin().equals("RHQStorage") && type.getName().equals("RHQ Storage Node"); + } + +} diff --git a/modules/plugins/rhq-storage/src/test/resources/log4j.properties b/modules/plugins/rhq-storage/src/test/resources/log4j.properties new file mode 100644 index 0000000..67db049 --- /dev/null +++ b/modules/plugins/rhq-storage/src/test/resources/log4j.properties @@ -0,0 +1,42 @@ +# +# /* +# * RHQ Management Platform +# * Copyright (C) 2005-2012 Red Hat, Inc. +# * All rights reserved. +# * +# * This program is free software; you can redistribute it and/or modify +# * it under the terms of the GNU General Public License, version 2, as +# * published by the Free Software Foundation, and/or the GNU Lesser +# * General Public License, version 2.1, also as published by the Free +# * Software Foundation. +# * +# * This program is distributed in the hope that it will be useful, +# * but WITHOUT ANY WARRANTY; without even the implied warranty of +# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# * GNU General Public License and the GNU Lesser General Public License +# * for more details. +# * +# * You should have received a copy of the GNU General Public License +# * and the GNU Lesser General Public License along with this program; +# * if not, write to the Free Software Foundation, Inc., +# * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# */ +# + +log4j.rootCategory=WARN, FILE, CONSOLE + +log4j.appender.FILE=org.apache.log4j.DailyRollingFileAppender +log4j.appender.FILE.DatePattern='.'yyyy-MM-dd +log4j.appender.FILE.File=./target/test.log +log4j.appender.FILE.layout=org.apache.log4j.PatternLayout +log4j.appender.FILE.layout.ConversionPattern=%d{ABSOLUTE} %-5p %c %m%n +#log4j.appender.A1.layout.ConversionPattern=%d [%t] %-5p %c - %m%n +log4j.appender.FILE.Append=false + +log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender +log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout +#log4j.appender.CONSOLE.layout.ConversionPattern=%5p %d{HH:mm:ss,SSS} %m%n +log4j.appender.CONSOLE.layout.ConversionPattern=%d{ABSOLUTE} %-5p %c %m%n + +log4j.logger.org.rhq=DEBUG +log4j.logger.com.datastax=DEBUG
commit 51830a390be2244d6fc6cbd529a5857bb8fb9cba Author: John Sanda jsanda@redhat.com Date: Tue Jul 23 22:16:41 2013 -0400
first stab at prepareForBootstrap operation (which is currently broken)
This is clearly broken from some manual testing I did. Given that the implementation is a bit sloppy at the moment, this is a good time to get some automated tests in place. The operation will perform the following steps in the ordered specified:
1) shut down the storage node 2) update cassandra.yaml 3) update rhq-storage-auth.conf 4) restart the node
diff --git a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java index 0037bfe..f76da22 100644 --- a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java +++ b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java @@ -111,7 +111,7 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent clusterBuilder = clusterBuilder.withCredentials(username, password); }
- this.cassandraSession = clusterBuilder.build().connect(clusterName); +// this.cassandraSession = clusterBuilder.build().connect(clusterName); } catch (Exception e) { LOG.error("Connect to Cassandra " + host + ":" + nativePort, e); throw e; @@ -196,7 +196,17 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent operation = storageService.getOperation("drain", emptyParams); operation.invoke((Object[]) emptyParams);
- ProcessInfo process = context.getNativeProcess(); + return stopNode(); + } + + protected OperationResult stopNode() { + ProcessInfo process = getResourceContext().getNativeProcess(); + + if (processInfo == null) { + LOG.warn("Failed to obtain process info. It appears Cassandra is already shutdown."); + return new OperationResult("Failed to obtain process info. It appears Cassandra is already shutdown."); + } + long pid = process.getPid(); try { process.kill("KILL"); @@ -209,6 +219,7 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent } }
+ protected OperationResult startNode() { ResourceContext<?> context = getResourceContext(); Configuration pluginConfig = context.getPluginConfiguration(); diff --git a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java index 3b0aa5b..d9b35b9 100644 --- a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java +++ b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java @@ -26,11 +26,15 @@ package org.rhq.plugins.storage;
import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; import java.io.FileWriter; import java.io.IOException; import java.io.StringReader; +import java.util.ArrayList; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Set;
import org.apache.commons.logging.Log; @@ -39,6 +43,8 @@ import org.mc4j.ems.connection.EmsConnection; import org.mc4j.ems.connection.bean.EmsBean; import org.mc4j.ems.connection.bean.attribute.EmsAttribute; import org.mc4j.ems.connection.bean.operation.EmsOperation; +import org.yaml.snakeyaml.DumperOptions; +import org.yaml.snakeyaml.Yaml;
import org.rhq.core.domain.configuration.Configuration; import org.rhq.core.domain.configuration.ConfigurationUpdateStatus; @@ -48,6 +54,7 @@ import org.rhq.core.domain.configuration.PropertyMap; import org.rhq.core.domain.configuration.PropertySimple; import org.rhq.core.pluginapi.configuration.ConfigurationFacet; import org.rhq.core.pluginapi.configuration.ConfigurationUpdateReport; +import org.rhq.core.pluginapi.inventory.ResourceContext; import org.rhq.core.pluginapi.operation.OperationFacet; import org.rhq.core.pluginapi.operation.OperationResult; import org.rhq.core.util.StringUtil; @@ -96,6 +103,8 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper return updateConfiguration(parameters); } else if (name.equals("updateKnownNodes")) { return updateKnownNodes(parameters); + } else if (name.equals("prepareForBootstrap")) { + return prepareForBootstrap(parameters); } else { return super.invokeOperation(name, parameters); } @@ -132,6 +141,18 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper ipAddresses.add(propertySimple.getStringValue()); }
+ if (updateAuthFile(result, ipAddresses)) return result; + + EmsBean authBean = getEmsConnection().getBean("org.rhq.cassandra.auth:type=RhqInternodeAuthenticator"); + EmsOperation emsOperation = authBean.getOperation("reloadConfiguration"); + emsOperation.invoke(); + + result.setSimpleResult("Successfully updated the set of known nodes."); + + return result; + } + + private boolean updateAuthFile(OperationResult result, Set<String> ipAddresses) { log.info("Updating known nodes to " + ipAddresses);
File confDir = new File(getBasedir(), "conf"); @@ -150,7 +171,7 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper log.error(msg); result.setErrorMessage(msg);
- return result; + return true; } }
@@ -161,7 +182,7 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper "to unexpected error"; log.error(msg, e); result.setErrorMessage(msg + ": " + ThrowableUtil.getRootMessage(e)); - return result; + return true; }
try { @@ -176,18 +197,127 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper } result.setErrorMessage("There was an unexpected error while updating " + authFile + ". Make sure that " + "it matches " + authBackupFile + " and then reschedule the operation."); + return true; + } + return false; + } + + private OperationResult prepareForBootstrap(Configuration params) { + log.info("Preparing " + this + " for bootstrap..."); + + ResourceContext context = getResourceContext(); + OperationResult result = new OperationResult(); + + log.info("Stopping storage node"); + OperationResult stopNodeResult = stopNode(); + if (stopNodeResult.getErrorMessage() != null) { + log.error("Failed to stop storage node " + this + " Cannot prepare the node for bootstrap which means " + + "that the storage node cannot join the cluster. Make sure the storage node is not running and retry " + + "the operation"); + result.setErrorMessage("Failed to stop storage node. Cannot prepare the node for bootstrap which means " + + "that it cannot join the cluster. Make sure that the node is not running and retry the operation. " + + "Stopping the storage node failed with this error: " + stopNodeResult.getErrorMessage()); return result; }
- EmsBean authBean = getEmsConnection().getBean("org.rhq.cassandra.auth:type=RhqInternodeAuthenticator"); - EmsOperation emsOperation = authBean.getOperation("reloadConfiguration"); - emsOperation.invoke(); + Configuration pluginConfig = context.getPluginConfiguration(); + String yamlProp = pluginConfig.getSimpleValue("yamlConfiguration"); + File yamlFile = new File(yamlProp);
- result.setSimpleResult("Successfully updated the set of known nodes."); + DumperOptions options = new DumperOptions(); + options.setDefaultFlowStyle(DumperOptions.FlowStyle.BLOCK); + Yaml yaml = new Yaml(options); + + Map yamlConfig = null; + try { + yamlConfig = (Map) yaml.load(new FileInputStream(yamlFile)); + } catch (FileNotFoundException e) { + log.error("Failed to load " + yamlFile, e); + log.error("Cannot prepare " + this + " for bootstrap. " + yamlFile + " must exist in order to make the " + + "necessary configuration changes."); + result.setErrorMessage("Cannot prepare storage node for bootstrap. It appears that " + yamlFile + + " does not exist. Make sure that it exists so that the necessary configuration changes can be made."); + + return result; + } + + purgeDir(getCommitLogDir(yamlConfig)); + for (File dataDir : getDataDirs(yamlConfig)) { + purgeDir(dataDir); + } + purgeDir(getSavedCachesDir(yamlConfig)); + + log.info("Updating cluster settings"); + + String address = pluginConfig.getSimpleValue("host"); + List<String> seeds = getAddresses(params.getList("storageNodeIPAddresses")); + // Make sure this node's address is not in the list; otherwise, it + // won't bootstrap properly. + seeds.remove(address); + try { + updateSeedsList(seeds); + } catch (IOException e) { + log.error("Failed to update seeds property in " + yamlFile, e); + result.setErrorMessage("Failed to prepared node for bootstrap due to unexpected error that occurred " + + "while updating seeds property in " + yamlFile + ":\n" + ThrowableUtil.getAllMessages(e)); + return result; + } + + if (updateAuthFile(result, new HashSet<String>(seeds))) { + return result; + } + + int cqlPort = Integer.parseInt(params.getSimpleValue("cqlPort")); + int gossipPort = Integer.parseInt(params.getSimpleValue("gossipPort")); + + yamlConfig.put("native_transport_port", cqlPort); + yamlConfig.put("storage_port", gossipPort); + + try { + yaml.dump(yamlConfig, new FileWriter(yamlFile)); + } catch (IOException e) { + log.error("Could not update cluster settings in " + yamlFile, e); + result.setErrorMessage("Could not update cluster settings in " + yamlFile + ":\n" + + ThrowableUtil.getAllMessages(e)); + return result; + } + + log.info(this + " is ready to be bootstrap. Restarting storage node..."); + OperationResult startResult = startNode(); + if (startResult.getErrorMessage() != null) { + log.error("Failed to restart storage node:\n" + startResult.getErrorMessage()); + result.setErrorMessage("Failed to restart storage node:\n" + startResult.getErrorMessage()); + } else { + result.setSimpleResult("The storage node was succesfully updated is now bootstrapping into the cluster."); + }
return result; }
+ private void purgeDir(File dir) { + log.info("Purging " + dir); + FileUtil.purge(dir, true); + } + + private File getCommitLogDir(Map yamlConfig) { + return new File((String) yamlConfig.get("commitlog_directory")); + } + + private List<File> getDataDirs(Map yamlConfig) { + List<File> dirs = new ArrayList<File>(); + List<String> dirNames = (List<String>) yamlConfig.get("data_file_directories"); + + for (String dirName : dirNames) { + dirs.add(new File(dirName)); + } + + return dirs; + } + + private File getSavedCachesDir(Map yamlConfig) { + return new File((String) yamlConfig.get("saved_caches_directory")); + } + private OperationResult nodeAdded(Configuration params) { boolean runRepair = params.getSimple("runRepair").getBooleanValue(); boolean updateSeedsList = params.getSimple("updateSeedsList").getBooleanValue(); @@ -405,4 +535,10 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper boolean succeeded; String details; } + + @Override + public String toString() { + return StorageNodeComponent.class.getSimpleName() + "[resourceKey: " + getResourceContext().getResourceKey() + + "]"; + } } diff --git a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml index 1e39d6c..cd84de6 100644 --- a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml +++ b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml @@ -101,6 +101,16 @@ </parameters> </operation>
+ <operation name="prepareForBootstrap"> + <parameters> + <c:simple-property name="cqlPort" type="integer" displayName="CQL Port"/> + <c:simple-property name="gossipPort" type="integer"/> + <c:list-property name="storageNodeIPAddresses" displayName="Storage Node IP Addresses"> + <c:simple-property name="storageNodeIPAddress"/> + </c:list-property> + </parameters> + </operation> + <operation name="prepareForUpgrade" description="Prepares the storage node for upgrade (this operation consists of following steps: 1) turning off the RPC server, 2) turning off the gossiper, 3) taking the snapshot (backuping the data), 4) invoking the drain operation"> <parameters> <c:simple-property name="snapshotName" required="false" type="string" displayName="Snapshot Name"
commit bfd21b6b424c85e12b9290be16ba59b4ed1f31bb Author: Simeon Pinder spinder@fulliautomatix.conchfritter.com Date: Tue Jul 23 15:02:09 2013 -0400
[BZ 984649] fix module metadata.
diff --git a/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml b/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml index 145e3af..82ff294 100644 --- a/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml +++ b/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml @@ -290,7 +290,7 @@ <delete file="${jboss.home}/modules/system/layers/base/org/jgroups/main/jgroups-${jgroups.initial.version}.jar.index" /> <!-- Update the module metadata to the patched version --> <replace file="${jboss.home}/modules/system/layers/base/org/jgroups/main/module.xml" - token="jgroups-${jgroups.initial.version}.jar" value="${jgroups.patch.version}"/> + token="jgroups-${jgroups.initial.version}.jar" value="jgroups-${jgroups.patch.version}.jar"/> <!-- Copy in patched version --> <copy file="${settings.localRepository}/org/jgroups/jgroups/${jgroups.patch.version}/jgroups-${jgroups.patch.version}.jar" toDir="${jboss.home}/modules/system/layers/base/org/jgroups/main" verbose="true"/>
commit d4f78a42e9433975671b4a125db97ba475ba533c Author: Simeon Pinder spinder@fulliautomatix.conchfritter.com Date: Tue Jul 23 13:37:32 2013 -0400
Upgrading richfaces to latest patched version.
diff --git a/pom.xml b/pom.xml index 3662bc7..f909033 100644 --- a/pom.xml +++ b/pom.xml @@ -135,7 +135,7 @@ <postgresql.version>9.2-1002.jdbc4</postgresql.version> <h2.version>1.2.139</h2.version> <jtds.version>1.2.2</jtds.version> - <richfaces.version>3.3.3.Final</richfaces.version> + <richfaces.version>3.3.4.Final</richfaces.version> <jline.version>0.9.94</jline.version> <sigar.version>1.6.5.132-5</sigar.version> <sigar.zip.version>1.6.5</sigar.zip.version>
commit 1af62df790bbb17fa0ee80d5ab287e264ff4e5e1 Author: Simeon Pinder spinder@fulliautomatix.conchfritter.com Date: Tue Jul 23 12:28:52 2013 -0400
[BZ 984649] update jgroups usage to latest patched version.
diff --git a/modules/enterprise/server/appserver/pom.xml b/modules/enterprise/server/appserver/pom.xml index 0a61138..f1a4c7b 100644 --- a/modules/enterprise/server/appserver/pom.xml +++ b/modules/enterprise/server/appserver/pom.xml @@ -19,6 +19,8 @@
<properties> <rhq.dev.data.dir>${rhq.rootDir}/rhq-data</rhq.dev.data.dir> + <jgroups.initial.version>3.2.7.Final</jgroups.initial.version> + <jgroups.patch.version>3.2.10.Final</jgroups.patch.version> </properties>
<dependencies> @@ -72,6 +74,13 @@ <groupId>org.codehaus.groovy</groupId> <artifactId>groovy-all</artifactId> </dependency> + + <!-- Pull down the patched version of JGroups. See CVE 2013-4112 and BZ 984365 --> + <dependency> + <groupId>org.jgroups</groupId> + <artifactId>jgroups</artifactId> + <version>${jgroups.patch.version}</version> + </dependency> </dependencies>
<build> @@ -157,6 +166,8 @@ <property name="rhq.server.http.port" value="${rhq.server.http.port}" /> <property name="rhq.server.https.port" value="${rhq.server.https.port}" /> <property name="rhq.sync.endpoint-address" value="${rhq.sync.endpoint-address}" /> + <property name="jgroups.initial.version" value="${jgroups.initial.version}" /> + <property name="jgroups.patch.version" value="${jgroups.patch.version}" /> </ant> </target> </configuration> diff --git a/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml b/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml index a81b6cd..145e3af 100644 --- a/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml +++ b/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml @@ -283,6 +283,17 @@ </resources> </module> ]]></echo> + + <echo>Updating JGroups module component for EAP to ${jgroups.patch.version}</echo> + <!-- Remove the unpatched version --> + <delete file="${jboss.home}/modules/system/layers/base/org/jgroups/main/jgroups-${jgroups.initial.version}.jar" /> + <delete file="${jboss.home}/modules/system/layers/base/org/jgroups/main/jgroups-${jgroups.initial.version}.jar.index" /> + <!-- Update the module metadata to the patched version --> + <replace file="${jboss.home}/modules/system/layers/base/org/jgroups/main/module.xml" + token="jgroups-${jgroups.initial.version}.jar" value="${jgroups.patch.version}"/> + <!-- Copy in patched version --> + <copy file="${settings.localRepository}/org/jgroups/jgroups/${jgroups.patch.version}/jgroups-${jgroups.patch.version}.jar" + toDir="${jboss.home}/modules/system/layers/base/org/jgroups/main" verbose="true"/>
<echo>Generate SSL key for RHQ server - 128-bit key that expires in 20 years</echo> <property name="jboss.conf.dir" location="${jboss.home}/standalone/configuration" />
commit 300fe8599d7ad8f442e4c3c779f2bc847c454f49 Author: mtho11 mikecthompson@gmail.com Date: Wed Jul 31 12:09:39 2013 -0700
[BZ 990200] Static Analysis cleanup of lower risk items in coregui.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/LoginView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/LoginView.java index d896e63..0a80e6a 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/LoginView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/LoginView.java @@ -80,7 +80,7 @@ public class LoginView extends Canvas {
private static boolean loginShowing = false;
- private Messages MSG = CoreGUI.getMessages(); + private static final Messages MSG = CoreGUI.getMessages();
private Window window; private DynamicForm form; diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/UserSessionManager.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/UserSessionManager.java index ee36243..e1d29ee 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/UserSessionManager.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/UserSessionManager.java @@ -158,7 +158,7 @@ public class UserSessionManager { }
// If a session is active it will return valid session strings - if (sessionIdString != null && sessionIdString.length() > 0) { + if (sessionIdString.length() > 0) {
String[] parts = sessionIdString.split(":"); final int subjectId = Integer.parseInt(parts[0]); diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/form/NumberWithUnitsValidator.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/form/NumberWithUnitsValidator.java index 37e5589..33b72c3 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/form/NumberWithUnitsValidator.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/form/NumberWithUnitsValidator.java @@ -91,11 +91,7 @@ public class NumberWithUnitsValidator extends CustomValidator {
if (unitsToUse != null) { try { - if (MeasurementParser.parse(value.toString(), unitsToUse) != null) { - return true; - } else { - return false; - } + return MeasurementParser.parse(value.toString(), unitsToUse) != null; } catch (Exception e) { return false; } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/form/SimpleEditableFormItem.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/form/SimpleEditableFormItem.java index 494e22c..98bb4af 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/form/SimpleEditableFormItem.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/form/SimpleEditableFormItem.java @@ -48,7 +48,7 @@ import org.rhq.enterprise.gui.coregui.client.Messages; */ public class SimpleEditableFormItem extends CanvasItem {
- protected static Messages MSG = CoreGUI.getMessages(); + protected static final Messages MSG = CoreGUI.getMessages();
protected FormItem staticItem; protected FormItem editItem; diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/measurement/AbstractMeasurementRangeEditor.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/measurement/AbstractMeasurementRangeEditor.java index 328112f..b4abddc 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/measurement/AbstractMeasurementRangeEditor.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/measurement/AbstractMeasurementRangeEditor.java @@ -54,12 +54,12 @@ import org.rhq.enterprise.gui.coregui.client.util.message.Message.Severity; */ public abstract class AbstractMeasurementRangeEditor extends DynamicForm implements TableWidget {
- protected Messages MSG = CoreGUI.getMessages(); + protected static final Messages MSG = CoreGUI.getMessages();
//keyed map of translated date units Ex. minutes,hours,days protected static LinkedHashMap<String, String> lastUnits; //array of values available for displaying/selecting 'last N hours|minutes|days'. - protected static String[] lastValues; + protected static final String[] lastValues;
protected boolean advanced; private ButtonItem advancedSimpleButton; @@ -74,13 +74,13 @@ public abstract class AbstractMeasurementRangeEditor extends DynamicForm impleme private SpacerItem space;
private ButtonItem setButton; - public static String ENABLE_RANGE_ITEM = "ENABLE_RANGE_ITEM"; - public static String ADVANCED_BUTTON_ITEM = "advanced"; - public static String SIMPLE_VALUE_ITEM = "lastValues"; - public static String SIMPLE_UNIT_ITEM = "lastUnits"; - public static String ADVANCED_START_ITEM = "start"; - public static String ADVANCED_END_ITEM = "end"; - public static String SET_ITEM = "set"; + public static final String ENABLE_RANGE_ITEM = "ENABLE_RANGE_ITEM"; + public static final String ADVANCED_BUTTON_ITEM = "advanced"; + public static final String SIMPLE_VALUE_ITEM = "lastValues"; + public static final String SIMPLE_UNIT_ITEM = "lastUnits"; + public static final String ADVANCED_START_ITEM = "start"; + public static final String ADVANCED_END_ITEM = "end"; + public static final String SET_ITEM = "set";
static { Messages MSG = CoreGUI.getMessages(); diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/groups/FavoriteGroupsPortlet.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/groups/FavoriteGroupsPortlet.java index e9ab2c5..bf082b2 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/groups/FavoriteGroupsPortlet.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/groups/FavoriteGroupsPortlet.java @@ -118,7 +118,7 @@ public class FavoriteGroupsPortlet extends ResourceGroupListView implements Auto }
public static final class Factory implements PortletViewFactory { - public static PortletViewFactory INSTANCE = new Factory(); + public static final PortletViewFactory INSTANCE = new Factory();
public final Portlet getInstance(EntityContext context) {
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/groups/GroupBundleDeploymentsPortlet.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/groups/GroupBundleDeploymentsPortlet.java index 6432aa3..0ba8c26 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/groups/GroupBundleDeploymentsPortlet.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/groups/GroupBundleDeploymentsPortlet.java @@ -133,7 +133,7 @@ public class GroupBundleDeploymentsPortlet extends EnhancedVLayout implements Cu }
public static final class Factory implements PortletViewFactory { - public static PortletViewFactory INSTANCE = new Factory(); + public static final PortletViewFactory INSTANCE = new Factory();
public final Portlet getInstance(EntityContext context) {
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/groups/GroupConfigurationUpdatesPortlet.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/groups/GroupConfigurationUpdatesPortlet.java index 7c3bbb3..57bfa25 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/groups/GroupConfigurationUpdatesPortlet.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/groups/GroupConfigurationUpdatesPortlet.java @@ -222,7 +222,7 @@ public class GroupConfigurationUpdatesPortlet extends EnhancedVLayout implements }
public static final class Factory implements PortletViewFactory { - public static PortletViewFactory INSTANCE = new Factory(); + public static final PortletViewFactory INSTANCE = new Factory();
public final Portlet getInstance(EntityContext context) {
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/groups/GroupEventsPortlet.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/groups/GroupEventsPortlet.java index 596d5e3..50747a6 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/groups/GroupEventsPortlet.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/groups/GroupEventsPortlet.java @@ -138,7 +138,7 @@ public class GroupEventsPortlet extends EnhancedVLayout implements CustomSetting }
public static final class Factory implements PortletViewFactory { - public static PortletViewFactory INSTANCE = new Factory(); + public static final PortletViewFactory INSTANCE = new Factory();
public final Portlet getInstance(EntityContext context) {
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/groups/GroupMetricsPortlet.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/groups/GroupMetricsPortlet.java index 5ac5e9e..ca07fe8 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/groups/GroupMetricsPortlet.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/groups/GroupMetricsPortlet.java @@ -162,7 +162,7 @@ public class GroupMetricsPortlet extends EnhancedVLayout implements CustomSettin }
public static final class Factory implements PortletViewFactory { - public static PortletViewFactory INSTANCE = new Factory(); + public static final PortletViewFactory INSTANCE = new Factory();
public final Portlet getInstance(EntityContext context) {
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/groups/GroupOobsPortlet.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/groups/GroupOobsPortlet.java index 2be2613..0641b68 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/groups/GroupOobsPortlet.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/groups/GroupOobsPortlet.java @@ -132,7 +132,7 @@ public class GroupOobsPortlet extends EnhancedVLayout implements CustomSettingsP }
public static final class Factory implements PortletViewFactory { - public static PortletViewFactory INSTANCE = new Factory(); + public static final PortletViewFactory INSTANCE = new Factory();
public final Portlet getInstance(EntityContext context) {
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/groups/GroupOperationsPortlet.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/groups/GroupOperationsPortlet.java index e65e90c..1236778 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/groups/GroupOperationsPortlet.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/groups/GroupOperationsPortlet.java @@ -294,7 +294,7 @@ public class GroupOperationsPortlet extends EnhancedVLayout implements CustomSet }
public static final class Factory implements PortletViewFactory { - public static PortletViewFactory INSTANCE = new Factory(); + public static final PortletViewFactory INSTANCE = new Factory();
public final Portlet getInstance(EntityContext context) {
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/groups/GroupPkgHistoryPortlet.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/groups/GroupPkgHistoryPortlet.java index f6a89c3..1ced7e3 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/groups/GroupPkgHistoryPortlet.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/groups/GroupPkgHistoryPortlet.java @@ -130,7 +130,7 @@ public class GroupPkgHistoryPortlet extends EnhancedVLayout implements CustomSet }
public static final class Factory implements PortletViewFactory { - public static PortletViewFactory INSTANCE = new Factory(); + public static final PortletViewFactory INSTANCE = new Factory();
public final Portlet getInstance(EntityContext context) {
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/inventory/groups/graph/ResourceGroupD3GraphPortlet.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/inventory/groups/graph/ResourceGroupD3GraphPortlet.java index b19d761..89dfaca 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/inventory/groups/graph/ResourceGroupD3GraphPortlet.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/inventory/groups/graph/ResourceGroupD3GraphPortlet.java @@ -344,7 +344,7 @@ public class ResourceGroupD3GraphPortlet extends MetricD3Graph implements AutoRe }
public static final class Factory implements PortletViewFactory { - public static PortletViewFactory INSTANCE = new Factory(); + public static final PortletViewFactory INSTANCE = new Factory();
public final Portlet getInstance(EntityContext context) {
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/inventory/queue/AutodiscoveryPortlet.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/inventory/queue/AutodiscoveryPortlet.java index 3c8b127..e81c3cd 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/inventory/queue/AutodiscoveryPortlet.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/inventory/queue/AutodiscoveryPortlet.java @@ -191,7 +191,7 @@ public class AutodiscoveryPortlet extends ResourceAutodiscoveryView implements C }
public static final class Factory implements PortletViewFactory { - public static PortletViewFactory INSTANCE = new Factory(); + public static final PortletViewFactory INSTANCE = new Factory();
public final Portlet getInstance(EntityContext context) {
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/inventory/resource/FavoriteResourcesPortlet.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/inventory/resource/FavoriteResourcesPortlet.java index 2aa99c9..1783dec 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/inventory/resource/FavoriteResourcesPortlet.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/inventory/resource/FavoriteResourcesPortlet.java @@ -121,7 +121,7 @@ public class FavoriteResourcesPortlet extends ResourceSearchView implements Auto }
public static final class Factory implements PortletViewFactory { - public static PortletViewFactory INSTANCE = new Factory(); + public static final PortletViewFactory INSTANCE = new Factory();
public final Portlet getInstance(EntityContext context) {
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/inventory/resource/graph/ResourceD3GraphPortlet.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/inventory/resource/graph/ResourceD3GraphPortlet.java index 1854f9f..1913a6e 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/inventory/resource/graph/ResourceD3GraphPortlet.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/inventory/resource/graph/ResourceD3GraphPortlet.java @@ -324,7 +324,7 @@ public class ResourceD3GraphPortlet extends MetricD3Graph implements AutoRefresh }
public static final class Factory implements PortletViewFactory { - public static PortletViewFactory INSTANCE = new Factory(); + public static final PortletViewFactory INSTANCE = new Factory();
@Override public final Portlet getInstance(EntityContext context) { diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/platform/PlatformSummaryPortlet.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/platform/PlatformSummaryPortlet.java index f839d07..785ccbb 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/platform/PlatformSummaryPortlet.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/platform/PlatformSummaryPortlet.java @@ -263,7 +263,7 @@ public class PlatformSummaryPortlet extends Table<PlatformMetricDataSource> impl }
public static final class Factory implements PortletViewFactory { - public static PortletViewFactory INSTANCE = new Factory(); + public static final PortletViewFactory INSTANCE = new Factory();
public final Portlet getInstance(EntityContext context) { return new PlatformSummaryPortlet(); diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/recent/alerts/RecentAlertsPortlet.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/recent/alerts/RecentAlertsPortlet.java index e04c8ef..4edf8b2 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/recent/alerts/RecentAlertsPortlet.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/recent/alerts/RecentAlertsPortlet.java @@ -37,7 +37,7 @@ public class RecentAlertsPortlet extends AbstractRecentAlertsPortlet { }
public static final class Factory implements PortletViewFactory { - public static PortletViewFactory INSTANCE = new Factory(); + public static final PortletViewFactory INSTANCE = new Factory();
public final Portlet getInstance(EntityContext context) {
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/recent/operations/OperationHistoryPortlet.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/recent/operations/OperationHistoryPortlet.java index 3a27b54..497c2c8 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/recent/operations/OperationHistoryPortlet.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/recent/operations/OperationHistoryPortlet.java @@ -38,7 +38,7 @@ public class OperationHistoryPortlet extends AbstractOperationHistoryPortlet { }
public static final class Factory implements PortletViewFactory { - public static PortletViewFactory INSTANCE = new Factory(); + public static final PortletViewFactory INSTANCE = new Factory();
public final Portlet getInstance(EntityContext context) {
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/recent/operations/OperationSchedulePortlet.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/recent/operations/OperationSchedulePortlet.java index 949df1a..74c0f14 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/recent/operations/OperationSchedulePortlet.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/recent/operations/OperationSchedulePortlet.java @@ -71,9 +71,9 @@ public class OperationSchedulePortlet extends EnhancedVLayout implements CustomS
public static final String OPERATIONS_RANGE_SCHEDULED_ENABLED = "operations-scheduled-enabled"; public static final String OPERATIONS_RANGE_SCHEDULED = "operations-range-scheduled"; - public static String RANGE_DISABLED_MESSAGE = MSG.view_portlet_operations_disabled(); + public static final String RANGE_DISABLED_MESSAGE = MSG.view_portlet_operations_disabled(); //TODO: change this to use the Smart GWT default value. - public static String RANGE_DISABLED_MESSAGE_DEFAULT = MSG.common_msg_noItemsToShow(); + public static final String RANGE_DISABLED_MESSAGE_DEFAULT = MSG.common_msg_noItemsToShow();
private static final int WIDTH_RECENT_TIME = 150; private static final int WIDTH_RECENT_STATUS = 50; @@ -86,10 +86,10 @@ public class OperationSchedulePortlet extends EnhancedVLayout implements CustomS private ListGrid scheduledOperationsGrid = null;
private ScheduledOperationsDataSource dataSourceScheduled; - public static String unlimited = "-1"; + public static final String unlimited = "-1"; public static final String unlimitedString = MSG.common_label_unlimited(); - public static String defaultValue = "5"; - public static boolean defaultEnabled = true; + public static final String defaultValue = "5"; + public static final boolean defaultEnabled = true;
private Timer refreshTimer;
@@ -348,7 +348,7 @@ public class OperationSchedulePortlet extends EnhancedVLayout implements CustomS }
public static final class Factory implements PortletViewFactory { - public static PortletViewFactory INSTANCE = new Factory(); + public static final PortletViewFactory INSTANCE = new Factory();
public final Portlet getInstance(EntityContext context) {
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/recent/problems/ProblemResourcesPortlet.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/recent/problems/ProblemResourcesPortlet.java index 518ac29..3b45e54 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/recent/problems/ProblemResourcesPortlet.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/recent/problems/ProblemResourcesPortlet.java @@ -288,7 +288,7 @@ public class ProblemResourcesPortlet extends Table<ProblemResourcesDataSource> i }
public static final class Factory implements PortletViewFactory { - public static PortletViewFactory INSTANCE = new Factory(); + public static final PortletViewFactory INSTANCE = new Factory();
public final Portlet getInstance(EntityContext context) {
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/resource/ResourceAlertsPortlet.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/resource/ResourceAlertsPortlet.java index 3e3054f..44b4ebb 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/resource/ResourceAlertsPortlet.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/resource/ResourceAlertsPortlet.java @@ -48,7 +48,7 @@ public class ResourceAlertsPortlet extends AbstractRecentAlertsPortlet { }
public static final class Factory implements PortletViewFactory { - public static PortletViewFactory INSTANCE = new Factory(); + public static final PortletViewFactory INSTANCE = new Factory();
public final Portlet getInstance(EntityContext context) {
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/resource/ResourceBundleDeploymentsPortlet.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/resource/ResourceBundleDeploymentsPortlet.java index c2f21b5..4ac4fb2 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/resource/ResourceBundleDeploymentsPortlet.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/resource/ResourceBundleDeploymentsPortlet.java @@ -63,7 +63,7 @@ public class ResourceBundleDeploymentsPortlet extends GroupBundleDeploymentsPort }
public static final class Factory implements PortletViewFactory { - public static PortletViewFactory INSTANCE = new Factory(); + public static final PortletViewFactory INSTANCE = new Factory();
public final Portlet getInstance(EntityContext context) {
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/resource/ResourceConfigurationUpdatesPortlet.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/resource/ResourceConfigurationUpdatesPortlet.java index 4bad15f..f895fed 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/resource/ResourceConfigurationUpdatesPortlet.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/resource/ResourceConfigurationUpdatesPortlet.java @@ -74,7 +74,7 @@ public class ResourceConfigurationUpdatesPortlet extends GroupConfigurationUpdat }
public static final class Factory implements PortletViewFactory { - public static PortletViewFactory INSTANCE = new Factory(); + public static final PortletViewFactory INSTANCE = new Factory();
public final Portlet getInstance(EntityContext context) {
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/resource/ResourceEventsPortlet.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/resource/ResourceEventsPortlet.java index fc835bf..9f11f84 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/resource/ResourceEventsPortlet.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/resource/ResourceEventsPortlet.java @@ -64,7 +64,7 @@ public class ResourceEventsPortlet extends GroupEventsPortlet { }
public static final class Factory implements PortletViewFactory { - public static PortletViewFactory INSTANCE = new Factory(); + public static final PortletViewFactory INSTANCE = new Factory();
public final Portlet getInstance(EntityContext context) {
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/resource/ResourceMetricsPortlet.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/resource/ResourceMetricsPortlet.java index 3a0c67f..6a80252 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/resource/ResourceMetricsPortlet.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/resource/ResourceMetricsPortlet.java @@ -87,7 +87,7 @@ public class ResourceMetricsPortlet extends GroupMetricsPortlet { }
public static final class Factory implements PortletViewFactory { - public static PortletViewFactory INSTANCE = new Factory(); + public static final PortletViewFactory INSTANCE = new Factory();
public final Portlet getInstance(EntityContext context) {
@@ -120,7 +120,7 @@ public class ResourceMetricsPortlet extends GroupMetricsPortlet {
//result timeframe if enabled PropertySimple property = portletConfig.getSimple(Constant.METRIC_RANGE_ENABLE); - if (null != property && Boolean.valueOf(property.getBooleanValue())) {//then proceed setting + if (null != property && property.getBooleanValue()) {//then proceed setting
boolean isAdvanced = Boolean.valueOf(portletConfig.getSimpleValue(Constant.METRIC_RANGE_BEGIN_END_FLAG, Constant.METRIC_RANGE_BEGIN_END_FLAG_DEFAULT)); diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/resource/ResourceOobsPortlet.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/resource/ResourceOobsPortlet.java index 58560ea..51ab846 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/resource/ResourceOobsPortlet.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/resource/ResourceOobsPortlet.java @@ -62,7 +62,7 @@ public class ResourceOobsPortlet extends GroupOobsPortlet { }
public static final class Factory implements PortletViewFactory { - public static PortletViewFactory INSTANCE = new Factory(); + public static final PortletViewFactory INSTANCE = new Factory();
public final Portlet getInstance(EntityContext context) {
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/resource/ResourcePkgHistoryPortlet.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/resource/ResourcePkgHistoryPortlet.java index 7a58867..b619e83 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/resource/ResourcePkgHistoryPortlet.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/resource/ResourcePkgHistoryPortlet.java @@ -60,7 +60,7 @@ public class ResourcePkgHistoryPortlet extends GroupPkgHistoryPortlet { }
public static final class Factory implements PortletViewFactory { - public static PortletViewFactory INSTANCE = new Factory(); + public static final PortletViewFactory INSTANCE = new Factory();
public final Portlet getInstance(EntityContext context) {
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/summary/InventorySummaryPortlet.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/summary/InventorySummaryPortlet.java index 6c05c9b..c42a96e 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/summary/InventorySummaryPortlet.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/summary/InventorySummaryPortlet.java @@ -184,7 +184,7 @@ public class InventorySummaryPortlet extends EnhancedVLayout implements AutoRefr }
public static final class Factory implements PortletViewFactory { - public static PortletViewFactory INSTANCE = new Factory(); + public static final PortletViewFactory INSTANCE = new Factory();
public final Portlet getInstance(EntityContext context) { return new InventorySummaryPortlet(); diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/summary/TagCloudPortlet.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/summary/TagCloudPortlet.java index c08319c..1b20d64 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/summary/TagCloudPortlet.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/summary/TagCloudPortlet.java @@ -55,7 +55,7 @@ public class TagCloudPortlet extends TagCloudView implements Portlet { }
public static final class Factory implements PortletViewFactory { - public static PortletViewFactory INSTANCE = new Factory(); + public static final PortletViewFactory INSTANCE = new Factory();
public final Portlet getInstance(EntityContext context) {
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/util/MashupPortlet.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/util/MashupPortlet.java index 69cb503..0a5f574 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/util/MashupPortlet.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/util/MashupPortlet.java @@ -85,7 +85,7 @@ public class MashupPortlet extends HTMLPane implements ConfigurablePortlet { }
public static final class Factory implements PortletViewFactory { - public static PortletViewFactory INSTANCE = new Factory(); + public static final PortletViewFactory INSTANCE = new Factory();
public final Portlet getInstance(EntityContext context) {
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/util/MessagePortlet.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/util/MessagePortlet.java index 335eaac..65b8baf 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/util/MessagePortlet.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/util/MessagePortlet.java @@ -103,7 +103,7 @@ public class MessagePortlet extends HTMLPane implements ConfigurablePortlet { }
public static final class Factory implements PortletViewFactory { - public static PortletViewFactory INSTANCE = new Factory(); + public static final PortletViewFactory INSTANCE = new Factory();
public final Portlet getInstance(EntityContext context) { return new MessagePortlet(); diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/detail/summary/AbstractActivityView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/detail/summary/AbstractActivityView.java index b22907e..b657345 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/detail/summary/AbstractActivityView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/detail/summary/AbstractActivityView.java @@ -93,19 +93,19 @@ public abstract class AbstractActivityView extends EnhancedVLayout implements Re protected Canvas recentBundleDeployContent = new Canvas();
//retrieve localized text - public static String RECENT_MEASUREMENTS = MSG.common_title_recent_measurements(); - public static String RECENT_MEASUREMENTS_NONE = MSG.view_resource_inventory_activity_no_recent_metrics(); - public static String RECENT_MEASUREMENTS_GROUP_NONE = MSG.view_group_inventory_activity_no_recent_metrics(); - public static String RECENT_ALERTS = MSG.common_title_recent_alerts(); - public static String RECENT_OOB = MSG.common_title_recent_oob_metrics(); - public static String RECENT_OOB_NONE = MSG.view_resource_inventory_activity_no_recent_oob(); - public static String RECENT_CONFIGURATIONS = MSG.common_title_recent_configuration_updates(); - public static String RECENT_OPERATIONS = MSG.common_title_recent_operations(); - public static String RECENT_EVENTS = MSG.common_title_recent_event_counts(); - public static String RECENT_PKG_HISTORY = MSG.common_title_recent_pkg_history(); - public static String RECENT_BUNDLE_DEPLOY = MSG.common_title_recent_bundle_deployments(); - public static String SEE_MORE = MSG.common_msg_see_more(); - public static String RECENT_CRITERIA_EVENTS_NONE = MSG.view_resource_inventory_activity_criteria_no_recent_events(); + public static final String RECENT_MEASUREMENTS = MSG.common_title_recent_measurements(); + public static final String RECENT_MEASUREMENTS_NONE = MSG.view_resource_inventory_activity_no_recent_metrics(); + public static final String RECENT_MEASUREMENTS_GROUP_NONE = MSG.view_group_inventory_activity_no_recent_metrics(); + public static final String RECENT_ALERTS = MSG.common_title_recent_alerts(); + public static final String RECENT_OOB = MSG.common_title_recent_oob_metrics(); + public static final String RECENT_OOB_NONE = MSG.view_resource_inventory_activity_no_recent_oob(); + public static final String RECENT_CONFIGURATIONS = MSG.common_title_recent_configuration_updates(); + public static final String RECENT_OPERATIONS = MSG.common_title_recent_operations(); + public static final String RECENT_EVENTS = MSG.common_title_recent_event_counts(); + public static final String RECENT_PKG_HISTORY = MSG.common_title_recent_pkg_history(); + public static final String RECENT_BUNDLE_DEPLOY = MSG.common_title_recent_bundle_deployments(); + public static final String SEE_MORE = MSG.common_msg_see_more(); + public static final String RECENT_CRITERIA_EVENTS_NONE = MSG.view_resource_inventory_activity_criteria_no_recent_events(); public static final String CHART_TITLE = MSG.common_title_metric_chart();
private ResourceGroupComposite groupComposite = null; diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/OverviewForm.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/OverviewForm.java index 5f76555..5ae201c 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/OverviewForm.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/OverviewForm.java @@ -131,7 +131,7 @@ public class OverviewForm extends EnhancedDynamicForm {
private void buildForm(ResourceType type) { StringLengthValidator notEmptyOrNullValidator = new StringLengthValidator(1, null, false); - StringLengthValidator notNullValidator = new StringLengthValidator(null, null, false); + //StringLengthValidator notNullValidator = new StringLengthValidator(null, null, false);
List<FormItem> formItems = new ArrayList<FormItem>();
@@ -320,7 +320,7 @@ public class OverviewForm extends EnhancedDynamicForm {
Collections.sort(summaryTraitDefs, new Comparator<MeasurementDefinition>() { public int compare(MeasurementDefinition md1, MeasurementDefinition md2) { - return new Integer(md1.getDisplayOrder()).compareTo(md2.getDisplayOrder()); + return Integer.valueOf(md1.getDisplayOrder()).compareTo(md2.getDisplayOrder()); } });
commit c0adb33418b870b168d72d682225744664bea1b7 Author: Jay Shaughnessy jshaughn@redhat.com Date: Wed Jul 31 14:19:18 2013 -0400
Fine-grained bundle perms : checkpoint check-in - finished initial work-through of remote/local API methods and the associated coding to support the updated permission scheme - Global.MANAGE_BUNDLE is no longer used as an explicit permission but now only as a convenience for setting all of the new permissions. - merged the "FindXxxByCriteriaWithDestinationFilter" local methods into their respective "FindXxxByCriteria" methods. The base methods should be handling this filtering, otherwise they basically have a security hole.
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/bundle/Bundle.java b/modules/core/domain/src/main/java/org/rhq/core/domain/bundle/Bundle.java index 3a8569f..5af51af 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/bundle/Bundle.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/bundle/Bundle.java @@ -57,7 +57,7 @@ import org.rhq.core.domain.tagging.Tag; * @author John Mazzitelli */ @Entity -@NamedQueries( { +@NamedQueries({ // Below queries primarily used for domain test code. @NamedQuery(name = Bundle.QUERY_FIND_ALL, query = "SELECT b FROM Bundle b"), // @NamedQuery(name = Bundle.QUERY_FIND_BY_NAME, query = "SELECT b FROM Bundle b WHERE :name = b.name"), @@ -182,6 +182,29 @@ public class Bundle implements Serializable { this.bundleVersions = bundleVersions; }
+ public Set<BundleGroup> getBundleGroups() { + return bundleGroups; + } + + public void setBundleGroups(Set<BundleGroup> bundleGroups) { + this.bundleGroups = bundleGroups; + } + + public void addBundleGroup(BundleGroup bundleGroup) { + if (this.bundleGroups == null) { + this.bundleGroups = new HashSet<BundleGroup>(); + } + this.bundleGroups.add(bundleGroup); + } + + public boolean removeBundleGroup(BundleGroup bundleGroup) { + if (this.bundleGroups != null) { + return this.bundleGroups.remove(bundleGroup); + } else { + return false; + } + } + public List<BundleDestination> getDestinations() { return destinations; } diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/criteria/BundleCriteria.java b/modules/core/domain/src/main/java/org/rhq/core/domain/criteria/BundleCriteria.java index 5e6f8c1..32f2f9d 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/criteria/BundleCriteria.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/criteria/BundleCriteria.java @@ -145,6 +145,12 @@ public class BundleCriteria extends TaggedCriteria { this.fetchBundleVersions = fetchBundleVersions; }
+ /** + * Unless called by an InventoryManager the destinations will be filtered to those the caller can view, based + * on the resource groups associated with his roles and the group associated wuth the destination. + * + * @param fetchDestinations + */ public void fetchDestinations(boolean fetchDestinations) { this.fetchDestinations = fetchDestinations; } @@ -166,4 +172,9 @@ public class BundleCriteria extends TaggedCriteria { addSortField("description"); this.sortDescription = sortDescription; } + + public boolean isInventoryManagerRequired() { + return fetchDestinations; + } + } diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/criteria/BundleResourceDeploymentCriteria.java b/modules/core/domain/src/main/java/org/rhq/core/domain/criteria/BundleResourceDeploymentCriteria.java index 7ec26d7..4924d6f 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/criteria/BundleResourceDeploymentCriteria.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/criteria/BundleResourceDeploymentCriteria.java @@ -18,15 +18,12 @@ */ package org.rhq.core.domain.criteria;
-import java.util.List; - import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlRootElement;
import org.rhq.core.domain.bundle.BundleDeploymentStatus; import org.rhq.core.domain.bundle.BundleResourceDeployment; -import org.rhq.core.domain.util.CriteriaUtils;
/** * @author Jay Shaughnessy @@ -85,11 +82,7 @@ public class BundleResourceDeploymentCriteria extends Criteria {
/** * By setting this fetch to true, it will cause {@link #isInventoryManagerRequired()} to return true as well. - * - * In practice, however, MANAGE_INVENTORY is too restrictive; a bundle manager who has MANAGE_BUNDLE but not - * MANAGE_INVENTORY would not be able to see any resource deployments. So, until it is possible to handle granular - * authorization checks on optionally fetched criteria data, a bundle manager will be allowed to see resource - * deployments to any platform. + * Without Inventory Manager permission results will be limited to only the resources viewable to the caller. * * @see org.rhq.enterprise.server.bundle.BundleManagerBean#findBundleResourceDeploymentsByCriteria(org.rhq.core.domain.auth.Subject, BundleResourceDeploymentCriteria) */ diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/criteria/BundleVersionCriteria.java b/modules/core/domain/src/main/java/org/rhq/core/domain/criteria/BundleVersionCriteria.java index 0bc999f..b5dd0e4 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/criteria/BundleVersionCriteria.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/criteria/BundleVersionCriteria.java @@ -79,6 +79,12 @@ public class BundleVersionCriteria extends TaggedCriteria { this.fetchBundle = fetchBundle; }
+ /** + * Unless called by an InventoryManager the deployments will be filtered to those the caller can view, based + * on the resource groups associated with his roles and the destination associated with the deployments. + * + * @param fetchDestinations + */ public void fetchBundleDeployments(boolean fetchBundleDeployments) { this.fetchBundleDeployments = fetchBundleDeployments; } @@ -91,4 +97,8 @@ public class BundleVersionCriteria extends TaggedCriteria { this.fetchConfigurationDefinition = fetchConfigurationDefinition; }
+ public boolean isInventoryManagerRequired() { + return fetchBundleDeployments; + } + } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/bundle/deployment/BundleDeploymentDataSource.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/bundle/deployment/BundleDeploymentDataSource.java index 3f71fb1..576f8a8 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/bundle/deployment/BundleDeploymentDataSource.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/bundle/deployment/BundleDeploymentDataSource.java @@ -99,7 +99,7 @@ public class BundleDeploymentDataSource extends RPCDataSource<BundleDeployment, @Override protected void executeFetch(final DSRequest request, final DSResponse response, final BundleDeploymentCriteria criteria) { - bundleService.findBundleDeploymentsByCriteriaWithDestinationFilter(criteria, new AsyncCallback<PageList<BundleDeployment>>() { + bundleService.findBundleDeploymentsByCriteria(criteria, new AsyncCallback<PageList<BundleDeployment>>() { public void onFailure(Throwable caught) { CoreGUI.getErrorHandler().handleError(MSG.view_bundle_deploy_loadDeployFailure(), caught); } @@ -109,7 +109,6 @@ public class BundleDeploymentDataSource extends RPCDataSource<BundleDeployment, processResponse(request.getRequestId(), response); } }); - }
@Override diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/bundle/list/BundleView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/bundle/list/BundleView.java index bd54a07..a3bc0d8 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/bundle/list/BundleView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/bundle/list/BundleView.java @@ -62,9 +62,9 @@ import org.rhq.enterprise.gui.coregui.client.components.tagging.TagsChangedCallb import org.rhq.enterprise.gui.coregui.client.gwt.BundleGWTServiceAsync; import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; import org.rhq.enterprise.gui.coregui.client.util.StringUtility; -import org.rhq.enterprise.gui.coregui.client.util.message.Message; import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedIButton; import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedVLayout; +import org.rhq.enterprise.gui.coregui.client.util.message.Message;
public class BundleView extends EnhancedVLayout implements BookmarkableView { private DynamicForm form; @@ -277,7 +277,7 @@ public class BundleView extends EnhancedVLayout implements BookmarkableView { criteria.fetchDestinations(true); criteria.fetchTags(true);
- GWTServiceLookup.getBundleService().findBundlesByCriteriaWithDestinationFilter(criteria, + GWTServiceLookup.getBundleService().findBundlesByCriteria(criteria, new AsyncCallback<PageList<Bundle>>() { public void onFailure(Throwable caught) { CoreGUI.getErrorHandler().handleError(MSG.view_bundle_list_error3(), caught); diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/bundle/version/BundleVersionView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/bundle/version/BundleVersionView.java index 52f73f3..3ac67dd 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/bundle/version/BundleVersionView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/bundle/version/BundleVersionView.java @@ -58,9 +58,9 @@ import org.rhq.enterprise.gui.coregui.client.components.tagging.TagEditorView; import org.rhq.enterprise.gui.coregui.client.components.tagging.TagsChangedCallback; import org.rhq.enterprise.gui.coregui.client.gwt.BundleGWTServiceAsync; import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; -import org.rhq.enterprise.gui.coregui.client.util.message.Message; import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedIButton; import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedVLayout; +import org.rhq.enterprise.gui.coregui.client.util.message.Message;
/** * @author Greg Hinkle @@ -268,7 +268,7 @@ public class BundleVersionView extends EnhancedVLayout implements BookmarkableVi criteria.fetchConfigurationDefinition(true); criteria.fetchTags(true);
- bundleManager.findBundleVersionsByCriteriaWithDestinationFilter(criteria, + bundleManager.findBundleVersionsByCriteria(criteria, new AsyncCallback<PageList<BundleVersion>>() { public void onFailure(Throwable caught) { CoreGUI.getErrorHandler().handleError(MSG.view_bundle_version_loadFailure(), caught); diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/BundleGWTService.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/BundleGWTService.java index cd108b7..2a45f65 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/BundleGWTService.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/BundleGWTService.java @@ -47,8 +47,6 @@ public interface BundleGWTService extends RemoteService {
ResourceTypeBundleConfiguration getResourceTypeBundleConfiguration(int compatGroupId) throws RuntimeException;
- BundleVersion createBundleVersion(int bundleId, String name, String version, String recipe) throws RuntimeException; - BundleVersion createBundleVersionViaURL(String url, String username, String password) throws RuntimeException;
BundleVersion createBundleVersionViaRecipe(String recipe) throws RuntimeException; @@ -74,14 +72,9 @@ public interface BundleGWTService extends RemoteService {
PageList<BundleGroup> findBundleGroupsByCriteria(BundleGroupCriteria criteria) throws RuntimeException;
- PageList<Bundle> findBundlesByCriteriaWithDestinationFilter(BundleCriteria criteria) throws RuntimeException; - PageList<BundleDeployment> findBundleDeploymentsByCriteria(BundleDeploymentCriteria criteria) throws RuntimeException;
- PageList<BundleDeployment> findBundleDeploymentsByCriteriaWithDestinationFilter(BundleDeploymentCriteria criteria) - throws RuntimeException; - PageList<BundleDestination> findBundleDestinationsByCriteria(BundleDestinationCriteria criteria) throws RuntimeException;
@@ -92,8 +85,6 @@ public interface BundleGWTService extends RemoteService {
PageList<BundleVersion> findBundleVersionsByCriteria(BundleVersionCriteria criteria) throws RuntimeException;
- PageList<BundleVersion> findBundleVersionsByCriteriaWithDestinationFilter(BundleVersionCriteria criteria) throws RuntimeException; - PageList<BundleWithLatestVersionComposite> findBundlesWithLatestVersionCompositesByCriteria(BundleCriteria criteria) throws RuntimeException;
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/BundleGWTServiceImpl.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/BundleGWTServiceImpl.java index 88458b8..01b17a1 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/BundleGWTServiceImpl.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/BundleGWTServiceImpl.java @@ -110,18 +110,6 @@ public class BundleGWTServiceImpl extends AbstractGWTServiceImpl implements Bund }
@Override - public BundleVersion createBundleVersion(int bundleId, String name, String version, String recipe) - throws RuntimeException { - try { - BundleVersion results = bundleManager.createBundleVersion(getSessionSubject(), bundleId, name, null, - version, recipe); - return SerialUtility.prepare(results, "createBundleVersion"); - } catch (Throwable t) { - throw getExceptionToThrowToClient(t); - } - } - - @Override public void deleteBundles(int[] bundleIds) throws RuntimeException { try { bundleManager.deleteBundles(getSessionSubject(), bundleIds); @@ -246,16 +234,6 @@ public class BundleGWTServiceImpl extends AbstractGWTServiceImpl implements Bund }
@Override - public PageList<Bundle> findBundlesByCriteriaWithDestinationFilter(BundleCriteria criteria) throws RuntimeException { - try { - PageList<Bundle> results = bundleManager.findBundlesByCriteriaWithDestinationFilter(getSessionSubject(), criteria); - return SerialUtility.prepare(results, "findBundlesByCriteriaWithDestinationFilter"); - } catch (Throwable t) { - throw getExceptionToThrowToClient(t); - } - } - - @Override public PageList<BundleDeployment> findBundleDeploymentsByCriteria(BundleDeploymentCriteria criteria) throws RuntimeException { try { @@ -268,18 +246,6 @@ public class BundleGWTServiceImpl extends AbstractGWTServiceImpl implements Bund }
@Override - public PageList<BundleDeployment> findBundleDeploymentsByCriteriaWithDestinationFilter(BundleDeploymentCriteria criteria) - throws RuntimeException { - try { - PageList<BundleDeployment> result = bundleManager.findBundleDeploymentsByCriteriaWithDestinationFilter(getSessionSubject(), - criteria); - return SerialUtility.prepare(result, "BundleService.findBundleDeploymentsByCriteriaWithDestinationFilter"); - } catch (Throwable t) { - throw getExceptionToThrowToClient(t); - } - } - - @Override public PageList<BundleDestination> findBundleDestinationsByCriteria(BundleDestinationCriteria criteria) throws RuntimeException { try { @@ -324,16 +290,6 @@ public class BundleGWTServiceImpl extends AbstractGWTServiceImpl implements Bund }
@Override - public PageList<BundleVersion> findBundleVersionsByCriteriaWithDestinationFilter(BundleVersionCriteria criteria) throws RuntimeException { - try { - PageList<BundleVersion> results = bundleManager.findBundleVersionsByCriteriaWithDestinationFilter(getSessionSubject(), criteria); - return SerialUtility.prepare(results, "findBundleVersionsByCriteriaWithDestinationFilter"); - } catch (Throwable t) { - throw getExceptionToThrowToClient(t); - } - } - - @Override public PageList<BundleWithLatestVersionComposite> findBundlesWithLatestVersionCompositesByCriteria( BundleCriteria criteria) throws RuntimeException { try { diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/bundle/BundleManagerBeanTest.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/bundle/BundleManagerBeanTest.java index f60d426..c4d9a79 100644 --- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/bundle/BundleManagerBeanTest.java +++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/bundle/BundleManagerBeanTest.java @@ -860,7 +860,7 @@ public class BundleManagerBeanTest extends AbstractEJB3Test { int size = brd.getBundleResourceDeploymentHistories().size(); assertTrue(size > 0); String auditMessage = "BundleTest-Message"; - bundleManager.addBundleResourceDeploymentHistory(overlord, brd.getId(), new BundleResourceDeploymentHistory( + bundleManager.addBundleResourceDeploymentHistoryInNewTrans(overlord, brd.getId(), new BundleResourceDeploymentHistory( overlord.getName(), auditMessage, auditMessage, BundleResourceDeploymentHistory.Category.DEPLOY_STEP, BundleResourceDeploymentHistory.Status.SUCCESS, auditMessage, auditMessage));
@@ -1242,7 +1242,7 @@ public class BundleManagerBeanTest extends AbstractEJB3Test { final BundleType type = createBundleType(name); final String recipe = "deploy -f " + TEST_PREFIX + ".zip -d @@ test.path @@"; final BundleVersion bundleVerison = bundleManager.createBundleAndBundleVersion(overlord, fullName, - "description", type.getId(), fullName, fullName + "-desc", "3.0", recipe); + "description", type.getId(), 0, fullName, fullName + "-desc", "3.0", recipe); assertNotNull(bundleVerison);
// find the previously created bundle @@ -1272,7 +1272,7 @@ public class BundleManagerBeanTest extends AbstractEJB3Test { final String fullName = TEST_PREFIX + "-bundle-" + name; final String recipe = "deploy -f " + TEST_PREFIX + ".zip -d @@ test.path @@"; final BundleVersion bundleVerison = bundleManager.createBundleAndBundleVersion(overlord, fullName, - "description", bundle.getBundleType().getId(), fullName, fullName + "-desc", "3.0", recipe); + "description", bundle.getBundleType().getId(), 0, fullName, fullName + "-desc", "3.0", recipe);
// find the newly created bundle BundleCriteria c = new BundleCriteria(); @@ -1284,6 +1284,12 @@ public class BundleManagerBeanTest extends AbstractEJB3Test { assertEquals(1, bundles.size()); }
+ @Test(enabled = false) + public void testNoAuthz() throws Exception { + // create + + } + // helper methods private BundleType createBundleType(String name) throws Exception { final String fullName = TEST_PREFIX + "-type-" + name; @@ -1302,7 +1308,7 @@ public class BundleManagerBeanTest extends AbstractEJB3Test {
private Bundle createBundle(String name, BundleType bt) throws Exception { final String fullName = TEST_PREFIX + "-bundle-" + name; - Bundle b = bundleManager.createBundle(overlord, fullName, fullName + "-desc", bt.getId()); + Bundle b = bundleManager.createBundle(overlord, fullName, fullName + "-desc", bt.getId(), 0);
assert b.getId() > 0; assert b.getName().endsWith(fullName); diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/resource/metadata/ResourceMetadataManagerBeanTest.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/resource/metadata/ResourceMetadataManagerBeanTest.java index 48649d2..46f2bef 100644 --- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/resource/metadata/ResourceMetadataManagerBeanTest.java +++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/resource/metadata/ResourceMetadataManagerBeanTest.java @@ -684,7 +684,7 @@ public class ResourceMetadataManagerBeanTest extends MetadataBeanTest { BundleType bundleType = bundleMgr.getBundleType(subjectMgr.getOverlord(), bundleTypeName); assertNotNull("Cannot create bundle. Unable to find bundle type for [name: " + bundleTypeName + "]", bundleType); Bundle bundle = bundleMgr.createBundle(subjectMgr.getOverlord(), bundleName, "test bundle: " + bundleName, - bundleType.getId()); + bundleType.getId(), 0);
assertNotNull("Failed create bundle for [name: " + bundleName + "]", bundle); } diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/tagging/TagManagerBeanTest.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/tagging/TagManagerBeanTest.java index 83a5dfe..bef8921 100644 --- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/tagging/TagManagerBeanTest.java +++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/tagging/TagManagerBeanTest.java @@ -259,7 +259,7 @@ public class TagManagerBeanTest extends AbstractEJB3Test { private Bundle createBundle() throws Exception { ResourceType resourceType = SessionTestHelper.createNewResourceType(em); BundleType bundleType = bundleManager.createBundleType(overlord, getRandomString(), resourceType.getId()); - return bundleManager.createBundle(overlord, getRandomString(), getRandomString(), bundleType.getId()); + return bundleManager.createBundle(overlord, getRandomString(), getRandomString(), bundleType.getId(), 0); }
private BundleVersion createBundleVersion() throws Exception { diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerBean.java index 51e59ff..a9882c4 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerBean.java @@ -30,6 +30,7 @@ import java.net.URL; import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; @@ -197,9 +198,8 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot
@Override @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) - @RequiredPermission(Permission.MANAGE_BUNDLE) - public BundleResourceDeploymentHistory addBundleResourceDeploymentHistory(Subject subject, int bundleDeploymentId, - BundleResourceDeploymentHistory history) throws Exception { + public BundleResourceDeploymentHistory addBundleResourceDeploymentHistoryInNewTrans(Subject subject, + int bundleDeploymentId, BundleResourceDeploymentHistory history) throws Exception {
BundleResourceDeployment resourceDeployment = entityManager.find(BundleResourceDeployment.class, bundleDeploymentId); @@ -214,8 +214,8 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot }
@Override - @RequiredPermission(Permission.MANAGE_BUNDLE) - public Bundle createBundle(Subject subject, String name, String description, int bundleTypeId) throws Exception { + public Bundle createBundle(Subject subject, String name, String description, int bundleTypeId, int bundleGroupId) + throws Exception { if (null == name || "".equals(name.trim())) { throw new IllegalArgumentException("Invalid bundleName: " + name); } @@ -225,8 +225,15 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot throw new IllegalArgumentException("Invalid bundleTypeId: " + bundleTypeId); }
- // create and add the required Repo. the Repo is a detached object which helps in its eventual - // removal. + BundleGroup bundleGroup = null; + if (bundleGroupId > 0) { + bundleGroup = entityManager.find(BundleGroup.class, bundleGroupId); + if (null == bundleGroup) { + throw new IllegalArgumentException("Invalid bundleGroupId: " + bundleGroupId); + } + } + + // create and add the required Repo. the Repo is a detached object which helps in its eventual removal. Repo repo = new Repo(name); repo.setCandidate(false); repo.setSyncSchedule(null); @@ -249,6 +256,9 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot Bundle bundle = new Bundle(name, bundleType, repo, packageType); bundle.setDescription(description); bundle.setPackageType(packageType); + if (null != bundleGroup) { + bundle.addBundleGroup(bundleGroup); + }
log.info("Creating bundle: " + bundle); entityManager.persist(bundle); @@ -258,7 +268,6 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot
@Override @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) - @RequiredPermission(Permission.MANAGE_BUNDLE) public BundleDeployment createBundleDeploymentInNewTrans(Subject subject, int bundleVersionId, int bundleDestinationId, String name, String description, Configuration configuration) throws Exception {
@@ -287,7 +296,7 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot throw new IllegalArgumentException("Invalid bundleDestinationId: " + bundleDestinationId); }
- checkBundleDeploymentAuthz(subject, bundleVersion.getBundle().getId(), bundleDestination.getGroup().getId()); + checkDeployBundleAuthz(subject, bundleVersion.getBundle().getId(), bundleDestination.getGroup().getId());
String name = getBundleDeploymentNameImpl(subject, bundleDestination, bundleVersion, null); return this.createBundleDeploymentImpl(subject, bundleVersion, bundleDestination, name, description, @@ -348,7 +357,7 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot } ResourceGroup group = entityManager.find(ResourceGroup.class, groups.get(0).getId());
- checkBundleDeploymentAuthz(subject, bundle.getId(), groupId); + checkDeployBundleAuthz(subject, bundle.getId(), groupId);
BundleDestination dest = new BundleDestination(bundle, name, group, destBaseDirName, deployDir); dest.setDescription(description); @@ -383,6 +392,8 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot + "] or prevDeploymentId [" + prevDeploymentId + "]"); }
+ checkDeployBundleAuthz(subject, bundleVersion.getBundle().getId(), bundleDestination.getGroup().getId()); + return getBundleDeploymentNameImpl(subject, bundleDestination, bundleVersion, prevDeployment); }
@@ -442,7 +453,7 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot }
@Override - @RequiredPermission(Permission.MANAGE_BUNDLE) + @RequiredPermission(Permission.CREATE_BUNDLES) public BundleType createBundleType(Subject subject, String name, int resourceTypeId) throws Exception { if (null == name || "".equals(name.trim())) { throw new IllegalArgumentException("Invalid bundleTypeName: " + name); @@ -459,22 +470,21 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot }
@Override - @RequiredPermission(Permission.MANAGE_BUNDLE) public BundleVersion createBundleAndBundleVersion(Subject subject, String bundleName, String bundleDescription, - int bundleTypeId, String bundleVersionName, String bundleVersionDescription, String version, String recipe) - throws Exception { + int bundleTypeId, int bundleGroupId, String bundleVersionName, String bundleVersionDescription, String version, + String recipe) throws Exception {
// first see if the bundle exists or not; if not, create one BundleCriteria criteria = new BundleCriteria(); criteria.setStrict(true); criteria.addFilterBundleTypeId(Integer.valueOf(bundleTypeId)); criteria.addFilterName(bundleName); - criteria.clearPaging();//disable paging as the code assumes all the results will be returned. + criteria.clearPaging(); //disable paging as the code assumes all the results will be returned.
PageList<Bundle> bundles = findBundlesByCriteria(subject, criteria); Bundle bundle; if (bundles.getTotalSize() == 0) { - bundle = createBundle(subject, bundleName, bundleDescription, bundleTypeId); + bundle = createBundle(subject, bundleName, bundleDescription, bundleTypeId, bundleGroupId); } else { bundle = bundles.get(0); } @@ -487,7 +497,6 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot
@Override @SuppressWarnings("unchecked") - @RequiredPermission(Permission.MANAGE_BUNDLE) public BundleVersion createBundleVersion(Subject subject, int bundleId, String name, String description, String version, String recipe) throws Exception { if (null == name || "".equals(name.trim())) { @@ -533,7 +542,7 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot versionOrder = ((Number) bv[1]).intValue(); needToUpdateOrder = true; } else { - break; // comparision > 0, means our new version is higher than what's in the DB, because we DESC ordered, we can stop + break; // Comparison > 0, means our new version is higher than what's in the DB, because we DESC ordered, we can stop } }
@@ -753,7 +762,8 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot
if (isInitialVersion) { checkCreateInitialBundleVersionAuthz(subject, initialBundleGroupId); - bundle = bundleManager.createBundle(subject, bundleName, bundleDescription, bundleType.getId()); + bundle = bundleManager.createBundle(subject, bundleName, bundleDescription, bundleType.getId(), + initialBundleGroupId); createdBundle = true; } else { bundle = bundles.get(0); @@ -762,8 +772,8 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot }
// now create the bundle version with the bundle we either found or created - BundleVersion bundleVersion = bundleManager.createBundleVersion(subject, bundle.getId(), name, description, - version, recipe); + BundleVersion bundleVersion = bundleManager.createBundleVersion(subjectManager.getOverlord(), bundle.getId(), + name, description, version, recipe);
// now that we have the bundle version we can actually create the bundle files that were provided in // the bundle distribution @@ -979,7 +989,6 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot }
@Override - @RequiredPermission(Permission.MANAGE_BUNDLE) public BundleFile addBundleFileViaPackageVersion(Subject subject, int bundleVersionId, String name, int packageVersionId) throws Exception {
@@ -1010,7 +1019,6 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot }
@Override - @RequiredPermission(Permission.MANAGE_BUNDLE) @TransactionAttribute(TransactionAttributeType.NEVER) public void purgeBundleDestination(final Subject subject, int bundleDestinationId) throws Exception { // find the live bundle deployment for this destination, and get all the resource deployments for that live deployment @@ -1031,6 +1039,13 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot return; // nothing to do }
+ // Although they likely will, the actual resource deployments may no longer match the members of the destination's + // resource group (if group membership has changed). We still use the group for authz because that is the more + // intuitive, manageable way to do this. Otherwise the subject would need view perms on each of the previously + // deployed-to resources, and that may be tricky to provide. + checkDeployBundleAuthz(subject, liveDeployment.getBundleVersion().getBundle().getId(), liveDeployment + .getDestination().getGroup().getId()); + // we need to obtain the bundle type (the remote plugin container needs it). our first criteria can't fetch this deep, we have to do another query. BundleVersionCriteria bvc = new BundleVersionCriteria(); bvc.addFilterId(liveDeployment.getBundleVersion().getId()); @@ -1082,7 +1097,8 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot BundleResourceDeploymentHistory history = new BundleResourceDeploymentHistory(subject.getName(), "Purge Requested", "User [" + subject.getName() + "] requested to purge this deployment", null, BundleResourceDeploymentHistory.Status.SUCCESS, null, null); - bundleManager.addBundleResourceDeploymentHistory(subject, resourceDeploy.getId(), history); + bundleManager.addBundleResourceDeploymentHistoryInNewTrans(subjectManager.getOverlord(), + resourceDeploy.getId(), history);
// get a connection to the agent and tell it to purge the bundle from the file system Subject overlord = subjectManager.getOverlord(); @@ -1116,8 +1132,6 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot }
@Override - @RequiredPermission(Permission.MANAGE_SECURITY) - // no one should be calling us except overlord public void _finalizePurge(Subject subject, BundleDeployment bundleDeployment, Map<BundleResourceDeployment, String> failedToPurge) throws Exception {
@@ -1154,14 +1168,12 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot }
@Override - @RequiredPermission(Permission.MANAGE_BUNDLE) public BundleDeployment scheduleBundleDeployment(Subject subject, int bundleDeploymentId, boolean isCleanDeployment) throws Exception { return scheduleBundleDeploymentImpl(subject, bundleDeploymentId, isCleanDeployment, false, null); }
@Override - @RequiredPermission(Permission.MANAGE_BUNDLE) public BundleDeployment scheduleRevertBundleDeployment(Subject subject, int bundleDestinationId, String deploymentDescription, boolean isCleanDeployment) throws Exception {
@@ -1189,6 +1201,9 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot + "] can not be reverted. There is no prior deployment for destinationId [" + bundleDestinationId + "]"); }
+ checkDeployBundleAuthz(subject, liveDeployment.getBundleVersion().getBundle().getId(), liveDeployment + .getDestination().getGroup().getId()); + // A revert is done by deploying a new deployment that mirrors "prevDeployment". It uses the same // bundleVersion, destination and config as prevDeployment. It can have a new name and new desc, and // may opt to clean the deploy dir. It must be a new deployment so that all status/auditing/history starts @@ -1225,6 +1240,8 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot + "] group has no members. Invalid deployment destination"); }
+ checkDeployBundleAuthz(subject, newDeployment.getBundleVersion().getBundle().getId(), group.getId()); + for (Resource groupMember : groupMembers) { try { scheduleBundleResourceDeployment(subject, newDeployment, groupMember, isCleanDeployment, isRevert); @@ -1277,8 +1294,8 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot
// The BundleResourceDeployment record must exist in the db before the agent request because the agent may try // to add History to it during immediate deployments. So, create and persist it (requires a new trans). - BundleResourceDeployment resourceDeployment = bundleManager.createBundleResourceDeployment(subject, - deployment.getId(), bundleTargetResourceId); + BundleResourceDeployment resourceDeployment = bundleManager.createBundleResourceDeploymentInNewTrans( + subjectManager.getOverlord(), deployment.getId(), bundleTargetResourceId);
if (null != bundleTarget.getResourceType().getResourceTypeBundleConfiguration()) {
@@ -1292,7 +1309,8 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot AUDIT_ACTION_DEPLOYMENT_REQUESTED, deployment.getName(), null, BundleResourceDeploymentHistory.Status.SUCCESS, "Requested deployment time: " + request.getRequestedDeployTimeAsString(), null); - bundleManager.addBundleResourceDeploymentHistory(subject, resourceDeployment.getId(), history); + bundleManager.addBundleResourceDeploymentHistoryInNewTrans(subjectManager.getOverlord(), + resourceDeployment.getId(), history);
BundleScheduleResponse response = bundleAgentService.schedule(request);
@@ -1303,7 +1321,8 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot history = new BundleResourceDeploymentHistory(subject.getName(), AUDIT_ACTION_DEPLOYMENT, deployment.getName(), null, BundleResourceDeploymentHistory.Status.FAILURE, response.getErrorMessage(), null); - bundleManager.addBundleResourceDeploymentHistory(subject, resourceDeployment.getId(), history); + bundleManager.addBundleResourceDeploymentHistoryInNewTrans(subject, resourceDeployment.getId(), + history); } } catch (Throwable t) { // fail the unlaunched resource deployment @@ -1311,7 +1330,8 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot this.AUDIT_ACTION_DEPLOYMENT, deployment.getName(), null, BundleResourceDeploymentHistory.Status.FAILURE, "Failed to schedule, agent on [" + bundleTarget + "] may be down: " + t, null); - bundleManager.addBundleResourceDeploymentHistory(subject, resourceDeployment.getId(), failureHistory); + bundleManager.addBundleResourceDeploymentHistoryInNewTrans(subject, resourceDeployment.getId(), + failureHistory); bundleManager.setBundleResourceDeploymentStatus(subject, resourceDeployment.getId(), BundleDeploymentStatus.FAILURE); } @@ -1324,7 +1344,7 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot "Target resource is not of a type that can have bundles deployed to it [resource=" + bundleTarget.getName() + "; id=" + bundleTarget.getId() + "]. Fix target group for destination [" + deployment.getDestination().getName() + "]", null); - bundleManager.addBundleResourceDeploymentHistory(subject, resourceDeployment.getId(), history); + bundleManager.addBundleResourceDeploymentHistoryInNewTrans(subject, resourceDeployment.getId(), history); }
return resourceDeployment; @@ -1332,7 +1352,6 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot
@Override @TransactionAttribute(TransactionAttributeType.NOT_SUPPORTED) - @RequiredPermission(Permission.MANAGE_BUNDLE) public BundleScheduleRequest getScheduleRequest(Subject subject, int resourceDeploymentId, boolean isCleanDeployment, boolean isRevert) throws Exception {
@@ -1391,8 +1410,7 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot
@Override @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) - @RequiredPermission(Permission.MANAGE_BUNDLE) - public BundleResourceDeployment createBundleResourceDeployment(Subject subject, int bundleDeploymentId, + public BundleResourceDeployment createBundleResourceDeploymentInNewTrans(Subject subject, int bundleDeploymentId, int resourceId) throws Exception {
BundleDeployment deployment = entityManager.find(BundleDeployment.class, bundleDeploymentId); @@ -1411,7 +1429,6 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot }
@Override - @RequiredPermission(Permission.MANAGE_BUNDLE) public BundleResourceDeployment setBundleResourceDeploymentStatus(Subject subject, int resourceDeploymentId, BundleDeploymentStatus status) throws Exception {
@@ -1456,14 +1473,7 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot return resourceDeployment; }
- // @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) - // public BundleGroupDeployment createBundleGroupDeployment(BundleGroupDeployment groupDeployment) throws Exception { - // entityManager.persist(groupDeployment); - // return groupDeployment; - //} - @Override - @RequiredPermission(Permission.MANAGE_BUNDLE) public Set<String> getBundleVersionFilenames(Subject subject, int bundleVersionId, boolean withoutBundleFileOnly) throws Exception {
@@ -1472,6 +1482,8 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot throw new IllegalArgumentException("Invalid bundleVersionId: " + bundleVersionId); }
+ checkCreateBundleVersionAuthz(subject, bundleVersion.getBundle().getId()); + // parse the recipe (validation occurs here) and get the config def and list of files BundleType bundleType = bundleVersion.getBundle().getBundleType(); RecipeParseResults parseResults = BundleManagerHelper.getPluginContainer().getBundleServerPluginManager() @@ -1499,11 +1511,9 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot }
return result; - }
@Override - @RequiredPermission(Permission.MANAGE_BUNDLE) public HashMap<String, Boolean> getAllBundleVersionFilenames(Subject subject, int bundleVersionId) throws Exception {
BundleVersion bundleVersion = entityManager.find(BundleVersion.class, bundleVersionId); @@ -1511,6 +1521,8 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot throw new IllegalArgumentException("Invalid bundleVersionId: " + bundleVersionId); }
+ checkCreateBundleVersionAuthz(subject, bundleVersion.getBundle().getId()); + // parse the recipe (validation occurs here) and get the config def and list of files BundleType bundleType = bundleVersion.getBundle().getBundleType(); RecipeParseResults parseResults = BundleManagerHelper.getPluginContainer().getBundleServerPluginManager() @@ -1533,7 +1545,6 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot }
return result; - }
@Override @@ -1557,44 +1568,40 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot @Override public PageList<BundleDeployment> findBundleDeploymentsByCriteria(Subject subject, BundleDeploymentCriteria criteria) { CriteriaQueryGenerator generator = new CriteriaQueryGenerator(subject, criteria); - CriteriaQueryRunner<BundleDeployment> queryRunner = new CriteriaQueryRunner<BundleDeployment>(criteria, - generator, entityManager); - return queryRunner.execute(); - } - - /** - * Fetch bundle deployments by criteria and then filter on destinations on the result objects to limit what the user can see - * @param subject Caller - * @param criteria criteria to fetch the deployments - * @return List of deployments with destinations filtered. - */ - @Override - public PageList<BundleDeployment> findBundleDeploymentsByCriteriaWithDestinationFilter(Subject subject, - BundleDeploymentCriteria criteria) { - - PageList<BundleDeployment> deployments = findBundleDeploymentsByCriteria(subject, criteria); - if (authorizationManager.isInventoryManager(subject)) - return deployments;
- PageList<BundleDeployment> resultingDeployments = new PageList<BundleDeployment>(deployments.getPageControl()); - // We now have visible destinations - go over the resultingDeployments and only include the ones with vis. destinations - for (BundleDeployment deployment : deployments) { + // filter by bundles that are viewable + if (!authorizationManager.hasGlobalPermission(subject, Permission.VIEW_BUNDLES)) { + generator.setAuthorizationResourceFragment(CriteriaQueryGenerator.AuthorizationTokenType.BUNDLE, null, + subject.getId()); + }
- int bundleId = deployment.getBundleVersion().getBundle().getId(); - BundleDestinationCriteria destinationCriteria = new BundleDestinationCriteria(); - destinationCriteria.addFilterBundleId(bundleId); - List<BundleDestination> destinations = findBundleDestinationsByCriteria(subject, destinationCriteria); - if (destinationsContains(destinations, deployment.getDestination())) - resultingDeployments.add(deployment); + CriteriaQueryRunner<BundleDeployment> queryRunner = new CriteriaQueryRunner<BundleDeployment>(criteria, + generator, entityManager); + PageList<BundleDeployment> result = queryRunner.execute(); + + // The result currently holds bundle deployments for bundles viewable by the caller, but we must remove + // those bundle deployments for destinations not viewable by the user. In essence we wanted two authz tokens, + // one for bundle and one for resource group, but we can only supply one. + if (!(result.isEmpty() || authorizationManager.isInventoryManager(subject))) { + + for (Iterator<BundleDeployment> i = result.iterator(); i.hasNext();) { + BundleDeployment bd = i.next(); + int groupId = bd.getDestination().getGroup().getId(); + if (!authorizationManager.canViewGroup(subject, groupId)) { + i.remove(); + } + } } - return resultingDeployments; + + return result; }
@Override public PageList<BundleDestination> findBundleDestinationsByCriteria(Subject subject, BundleDestinationCriteria criteria) { CriteriaQueryGenerator generator = new CriteriaQueryGenerator(subject, criteria); - // Filter by destinations that are viewable + + // Filter by destinations (resource groups) that are viewable if (!authorizationManager.isInventoryManager(subject)) { generator.setAuthorizationResourceFragment(CriteriaQueryGenerator.AuthorizationTokenType.GROUP, subject.getId()); @@ -1610,74 +1617,95 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot
CriteriaQueryGenerator generator = new CriteriaQueryGenerator(subject, criteria);
- if (!authorizationManager.isInventoryManager(subject)) { - if (criteria.isInventoryManagerRequired()) { - // TODO: MANAGE_INVENTORY was too restrictive as a bundle manager could not then - // see his resource deployments. Until we can handle granular authorization checks on - // optionally fetched resource member data, allow a bundle manager to see - // resource deployments to any resource. - if (!authorizationManager.hasGlobalPermission(subject, Permission.MANAGE_BUNDLE)) { - throw new PermissionException("Subject [" + subject.getName() - + "] requires InventoryManager or BundleManager permission for requested query criteria."); - } - } - // TODO limit target groups according to visibility + // filter by bundles that are viewable + if (!authorizationManager.hasGlobalPermission(subject, Permission.VIEW_BUNDLES)) { + generator.setAuthorizationResourceFragment(CriteriaQueryGenerator.AuthorizationTokenType.BUNDLE, null, + subject.getId()); }
CriteriaQueryRunner<BundleResourceDeployment> queryRunner = new CriteriaQueryRunner<BundleResourceDeployment>( criteria, generator, entityManager); + PageList<BundleResourceDeployment> result = queryRunner.execute();
- return queryRunner.execute(); + // if necessary, filter results to resources that are viewable + if (!result.isEmpty() && criteria.isInventoryManagerRequired() + && !authorizationManager.isInventoryManager(subject)) { + + // try to authorize in one call, if that fails then go (slow) fine-grained + List<Integer> resourceIds = new ArrayList<Integer>(result.size()); + for (BundleResourceDeployment brd : result) { + int resourceId = brd.getResource().getId(); + resourceIds.add(resourceId); + } + + if (!authorizationManager.canViewResources(subject, resourceIds)) { + for (Integer resourceId : resourceIds) { + if (!authorizationManager.canViewResource(subject, resourceId)) { + for (Iterator<BundleResourceDeployment> i = result.iterator(); i.hasNext();) { + BundleResourceDeployment brd = i.next(); + if (brd.getResource().getId() == resourceId.intValue()) { + i.remove(); + } + } + } + } + } + } + + return result; }
@Override public PageList<BundleVersion> findBundleVersionsByCriteria(Subject subject, BundleVersionCriteria criteria) { CriteriaQueryGenerator generator = new CriteriaQueryGenerator(subject, criteria); - CriteriaQueryRunner<BundleVersion> queryRunner = new CriteriaQueryRunner<BundleVersion>(criteria, generator, - entityManager); - return queryRunner.execute(); - } - - /** - * Fetch bundle versions by criteria and then filter destination on the result objects to limit what the user can see - * @param subject Caller - * @param criteria criteria to fetch the bundles - * @return List of versions with destinations filtered. - */ - @Override - public PageList<BundleVersion> findBundleVersionsByCriteriaWithDestinationFilter(Subject subject, - BundleVersionCriteria criteria) {
- PageList<BundleVersion> versions = findBundleVersionsByCriteria(subject, criteria); - if (authorizationManager.isInventoryManager(subject)) { - return versions; + if (!authorizationManager.hasGlobalPermission(subject, Permission.VIEW_BUNDLES)) { + generator.setAuthorizationResourceFragment(CriteriaQueryGenerator.AuthorizationTokenType.BUNDLE, null, + subject.getId()); } - // Not inv manager -> restrict visible deployments by visible destinations
- for (BundleVersion version : versions) { + CriteriaQueryRunner<BundleVersion> queryRunner = new CriteriaQueryRunner<BundleVersion>(criteria, generator, + entityManager); + PageList<BundleVersion> result = queryRunner.execute(); + + // If asking for optional data that the subject may not be able to see then ensure that the optional + // data is filtered appropriately. In this case only deployments to destinations viewable by the subject + // can be returned. The result currently holds bundle versions viewable by the caller, but the bundle version + // may have been deployed to destinations for which the user does not have access to the destination's + // resource group. (BZ 694741) + if (!result.isEmpty() && criteria.isInventoryManagerRequired() + && !authorizationManager.isInventoryManager(subject)) { + + // this works because findBundleDestinationsByCriteria() authorizes against resource group associations + + for (BundleVersion bundleVersion : result) { + int numDeployments = bundleVersion.getBundleDeployments().size(); + if (0 == numDeployments) { + continue; + }
- Bundle bundle = version.getBundle(); - BundleDestinationCriteria destinationCriteria = new BundleDestinationCriteria(); - destinationCriteria.addFilterBundleId(bundle.getId()); - destinationCriteria.clearPaging();//disable paging as the code assumes all the results will be returned. + Bundle bundle = bundleVersion.getBundle(); + BundleDestinationCriteria destinationCriteria = new BundleDestinationCriteria(); + destinationCriteria.clearPaging(); //disable paging as the code assumes all the results will be returned. + destinationCriteria.addFilterBundleId(bundle.getId()); + + // get the viewable destinations and use to filter the deployments + List<BundleDestination> destinations = findBundleDestinationsByCriteria(subject, destinationCriteria); + List<BundleDeployment> filteredDeployments = new ArrayList<BundleDeployment>(numDeployments); + entityManager.detach(bundleVersion); // make sure we don't persist the filtered data + for (BundleDeployment deployment : bundleVersion.getBundleDeployments()) { + if (containsDestination(destinations, deployment.getDestination())) + filteredDeployments.add(deployment); + }
- List<BundleDestination> destinations = findBundleDestinationsByCriteria(subject, destinationCriteria); - List<BundleDeployment> resultingDeployments = new ArrayList<BundleDeployment>(version - .getBundleDeployments().size()); - // We now have visible destinations - go over the resultingDeployments and only include the ones with vis. destinations - for (BundleDeployment deployment : version.getBundleDeployments()) { - if (destinationsContains(destinations, deployment.getDestination())) - resultingDeployments.add(deployment); + bundleVersion.setBundleDeployments(filteredDeployments); } - - version.setBundleDeployments(resultingDeployments); }
- return versions; - + return result; }
- private boolean destinationsContains(List<BundleDestination> list, BundleDestination dest) { + private boolean containsDestination(List<BundleDestination> list, BundleDestination dest) { int id = dest.getId(); for (BundleDestination destination : list) { if (destination.getId() == id) @@ -1689,6 +1717,13 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot @Override public PageList<BundleFile> findBundleFilesByCriteria(Subject subject, BundleFileCriteria criteria) { CriteriaQueryGenerator generator = new CriteriaQueryGenerator(subject, criteria); + + // filter by bundles that are viewable + if (!authorizationManager.hasGlobalPermission(subject, Permission.VIEW_BUNDLES)) { + generator.setAuthorizationResourceFragment(CriteriaQueryGenerator.AuthorizationTokenType.BUNDLE, null, + subject.getId()); + } + CriteriaQueryRunner<BundleFile> queryRunner = new CriteriaQueryRunner<BundleFile>(criteria, generator, entityManager); return queryRunner.execute(); @@ -1700,40 +1735,35 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot CriteriaQueryGenerator generator = new CriteriaQueryGenerator(subject, criteria);
if (!authorizationManager.hasGlobalPermission(subject, Permission.VIEW_BUNDLES)) { - generator.setAuthorizationResourceFragment(CriteriaQueryGenerator.AuthorizationTokenType.BUNDLE, null, subject.getId()); }
CriteriaQueryRunner<Bundle> queryRunner = new CriteriaQueryRunner<Bundle>(criteria, generator, entityManager); - return queryRunner.execute(); - } - - /** - * Fetch bundles by criteria and then filter destination on the result objects to limit what the user can see - * @param subject Caller - * @param criteria criteria to fetch the bundles - * @return List of bundles with destinations filtered. - */ - @Override - public PageList<Bundle> findBundlesByCriteriaWithDestinationFilter(Subject subject, BundleCriteria criteria) { - // First get the bundles - PageList<Bundle> bundles = findBundlesByCriteria(subject, criteria); - if (authorizationManager.isInventoryManager(subject)) { - return bundles; - } - // Not inv manager -> restrict visible destinations - PageList<Bundle> result = new PageList<Bundle>(bundles.size(), bundles.getPageControl()); - for (Bundle bundle : bundles.getValues()) { // TODO clone the bundle and return the modified clones - BundleDestinationCriteria destinationCriteria = new BundleDestinationCriteria(); - destinationCriteria.addFilterBundleId(bundle.getId()); - destinationCriteria.clearPaging();//disable paging as the code assumes all the results will be returned. - - List<BundleDestination> destinations = findBundleDestinationsByCriteria(subject, destinationCriteria); - bundle.setDestinations(destinations); + PageList<Bundle> result = queryRunner.execute(); + + // If asking for optional data that the subject may not be able to see then ensure that the optional + // data is filtered appropriately. In this case only destinations viewable by the subject can be returned. + // The result currently holds bundles viewable by the caller, but the bundle may have been deployed to + // destinations for which the user does not have access to the destination's resource group. (BZ 694741) + if (!result.isEmpty() && criteria.isInventoryManagerRequired() + && !authorizationManager.isInventoryManager(subject)) { + + // this works because findBundleDestinationsByCriteria() authorizes against resource group associations + for (Bundle bundle : result) { + if (bundle.getDestinations().isEmpty()) { + continue; + } + BundleDestinationCriteria destinationCriteria = new BundleDestinationCriteria(); + destinationCriteria.clearPaging(); //disable paging as the code assumes all the results will be returned. + destinationCriteria.addFilterBundleId(bundle.getId()); + List<BundleDestination> destinations = findBundleDestinationsByCriteria(subject, destinationCriteria); + entityManager.detach(bundle); // make sure the narrowed set of destinations does not get persisted + bundle.setDestinations(destinations); + } }
- return bundles; + return result; }
@Override @@ -1750,6 +1780,11 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot + " ( SELECT COUNT(bv3) FROM bundle.bundleVersions bv3 WHERE bv3.bundle.id = bundle.id) AS deploymentCount ) "; generator.alterProjection(replacementSelectList);
+ if (!authorizationManager.hasGlobalPermission(subject, Permission.VIEW_BUNDLES)) { + generator.setAuthorizationResourceFragment(CriteriaQueryGenerator.AuthorizationTokenType.BUNDLE, null, + subject.getId()); + } + CriteriaQueryRunner<BundleWithLatestVersionComposite> queryRunner = new CriteriaQueryRunner<BundleWithLatestVersionComposite>( criteria, generator, entityManager); PageList<BundleWithLatestVersionComposite> results = queryRunner.execute(); @@ -1771,13 +1806,14 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot
@Override @SuppressWarnings("unchecked") - @RequiredPermission(Permission.MANAGE_BUNDLE) public void deleteBundle(Subject subject, int bundleId) throws Exception { Bundle bundle = this.entityManager.find(Bundle.class, bundleId); if (null == bundle) { return; }
+ checkDeleteBundleAuthz(subject, bundleId); + Query q = entityManager.createNamedQuery(BundleVersion.QUERY_FIND_BY_BUNDLE_ID); q.setParameter("bundleId", bundleId); List<BundleVersion> bvs = q.getResultList(); @@ -1797,12 +1833,15 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot }
@Override - @RequiredPermission(Permission.MANAGE_BUNDLE) public void deleteBundleDeployment(Subject subject, int bundleDeploymentId) throws Exception { BundleDeployment doomed = this.entityManager.find(BundleDeployment.class, bundleDeploymentId); if (null == doomed) { return; } + + checkDeployBundleAuthz(subject, doomed.getBundleVersion().getBundle().getId(), doomed.getDestination() + .getGroup().getId()); + // only allow deployments to be deleted if they are finished if (BundleDeploymentStatus.SUCCESS == doomed.getStatus() || BundleDeploymentStatus.FAILURE == doomed.getStatus() @@ -1814,13 +1853,14 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot }
@Override - @RequiredPermission(Permission.MANAGE_BUNDLE) public void deleteBundleDestination(Subject subject, int destinationId) throws Exception { BundleDestination doomed = this.entityManager.find(BundleDestination.class, destinationId); if (null == doomed) { return; }
+ checkDeployBundleAuthz(subject, doomed.getBundle().getId(), doomed.getGroup().getId()); + // deployments replace other deployments and have a self-referring FK. The deployments // need to be removed in a way that will ensure that a replaced deployment is not removed // prior to the replacer. To do this we'll just blanket update all the doomed deployments @@ -1834,26 +1874,26 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot }
@Override - @RequiredPermission(Permission.MANAGE_BUNDLE) public void deleteBundleVersion(Subject subject, int bundleVersionId, boolean deleteBundleIfEmpty) throws Exception { BundleVersion bundleVersion = this.entityManager.find(BundleVersion.class, bundleVersionId); if (null == bundleVersion) { return; }
+ int bundleId = bundleVersion.getBundle().getId(); + + checkDeleteBundleAuthz(subject, bundleId); + // After we delete this bundle version, this is the version order value that is being removed. // Later we need to re-order the other bundles that are newer than this so their version orders are readjusted. int doomedBundleVersionOrder = bundleVersion.getVersionOrder();
- int bundleId = bundleVersion.getBundle().getId(); - // deployments replace other deployments and have a self-referring FK. The deployments // need to be removed in a way that will ensure that a replaced deployment is not removed // prior to the replacer. To do this we'll just blanket update all the doomed deployments // to break the FK dependency with nulls. Query q = entityManager.createNamedQuery(BundleDeployment.QUERY_UPDATE_FOR_VERSION_REMOVE); q.setParameter("bundleVersionId", bundleVersionId); - @SuppressWarnings("unused") int rowsUpdated = q.executeUpdate(); entityManager.flush();
@@ -1970,10 +2010,10 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot public PageList<BundleGroup> findBundleGroupsByCriteria(Subject subject, BundleGroupCriteria criteria) { CriteriaQueryGenerator generator = new CriteriaQueryGenerator(subject, criteria);
- if (!authorizationManager.hasGlobalPermission(subject, Permission.VIEW_BUNDLES)) { - - generator.setAuthorizationResourceFragment(CriteriaQueryGenerator.AuthorizationTokenType.BUNDLE, null, - subject.getId()); + // filter by bundle groups that are viewable + if (!authorizationManager.hasGlobalPermission(subject, Permission.MANAGE_BUNDLE_GROUPS)) { + generator.setAuthorizationResourceFragment(CriteriaQueryGenerator.AuthorizationTokenType.BUNDLE_GROUP, + null, subject.getId()); }
CriteriaQueryRunner<BundleGroup> queryRunner = new CriteriaQueryRunner<BundleGroup>(criteria, generator, @@ -1983,11 +2023,29 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot
@Override public void unassignBundlesFromBundleGroup(Subject subject, int bundleGroupId, int[] bundleIds) { - // TODO Auto-generated method stub + BundleGroup bundleGroup = entityManager.find(BundleGroup.class, bundleGroupId); + if (null == bundleGroup) { + throw new IllegalArgumentException("BundleGroup does not exist for bundleGroupId [" + bundleGroupId + "]"); + } + + checkAssignBundleGroupAuthz(subject, bundleGroupId, bundleIds);
+ for (int bundleId : bundleIds) { + Bundle bundle = entityManager.find(Bundle.class, bundleId); + if (null == bundle) { + throw new IllegalArgumentException("Bundle does not exist for bundleId [" + bundleId + "]"); + } + + bundleGroup.removeBundle(bundle); + } }
/** + * Required Permissions: Either: + * - Global.CREATE_BUNDLES and Global.VIEW_BUNDLES + * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG + * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG + * * @param subject * @param bundleGroupId null or 0 for unassigned initial bundle version creation * @throws PermissionException @@ -2026,6 +2084,11 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot }
/** + * Required Permissions: Either: + * - Global.CREATE_BUNDLES and Global.VIEW_BUNDLES + * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG + * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG + * * @param subject * @param bundleId required, bundleId of bundle in which bundle version is being created/updated * @throws PermissionException @@ -2062,6 +2125,10 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot }
/** + * Requires VIEW permission for the relevant bundle and either: + * - Global.CREATE_BUNDLE + * - BundleGroup.CREATE_BUNDLES_IN_GROUP or BundleGroup.ASSIGN_BUNDLES_TO_GROUP for the relevant bundle group + * * @param subject * @param bundleGroupId an existing bundle group * @param bundleIds existing bundles @@ -2109,8 +2176,12 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot return; }
- private void checkBundleDeploymentAuthz(Subject subject, int bundleId, int resourceGroupId) - throws PermissionException { + /** + * Required Permissions: Either: + * - Global.DEPLOY_BUNDLES and a view of the relevant bundle and a view of the relevant resource group (may involve multiple roles) + * - Resource.DEPLOY_BUNDLES_TO_GROUP and a view of the relevant bundle and a view of the relevant resource group (may involve multiple roles) + */ + private void checkDeployBundleAuthz(Subject subject, int bundleId, int resourceGroupId) throws PermissionException {
boolean hasResourceGroupView = authorizationManager.hasGroupPermission(subject, Permission.VIEW_RESOURCE, resourceGroupId); @@ -2143,4 +2214,46 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot
return; } + + /** + * Required Permissions: Either: + * - Global.DELETE_BUNDLES and Global.VIEW_BUNDLES + * - Global.DELETE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG + * - BundleGroup.DELETE_BUNDLES_FROM_GROUP for bundle group BG and the relevant bundle is assigned to BG + * + * @param subject + * @param bundleId required, bundleId of bundle, or the bundle version, being deleted + * @throws PermissionException + */ + private void checkDeleteBundleAuthz(Subject subject, int bundleId) throws PermissionException { + + if (bundleId <= 0) { + throw new IllegalArgumentException( + "Must supply valid bundleId for bundle version being deleted. BundleId specified [" + bundleId + "]"); + } + + Set<Permission> globalPerms = authorizationManager.getExplicitGlobalPermissions(subject); + boolean hasGlobalDeleteBundles = globalPerms.contains(Permission.DELETE_BUNDLES); + + if (hasGlobalDeleteBundles && globalPerms.contains(Permission.VIEW_BUNDLES)) { + return; + } + + if (hasGlobalDeleteBundles) { + if (authorizationManager.hasBundlePermission(subject, Permission.VIEW_BUNDLES_IN_GROUP, bundleId)) { + return; + } + } else { + if (authorizationManager.hasBundlePermission(subject, Permission.DELETE_BUNDLES_FROM_GROUP, bundleId)) { + return; + } + } + + String msg = "Subject [" + + subject.getName() + + "] requires either Global.DELETE_BUNDLES + BundleGroup.VIEW_BUNDLES_IN_GROUP, or BundleGroup.DELETE_BUNDLES_FROM_GROUP, to delete bundle [" + + bundleId + "]."; + throw new PermissionException(msg); + } + } diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerLocal.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerLocal.java index b4d7d1b..01ca620 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerLocal.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerLocal.java @@ -33,10 +33,6 @@ import org.rhq.core.domain.bundle.BundleResourceDeploymentHistory; import org.rhq.core.domain.bundle.BundleType; import org.rhq.core.domain.bundle.BundleVersion; import org.rhq.core.domain.configuration.Configuration; -import org.rhq.core.domain.criteria.BundleCriteria; -import org.rhq.core.domain.criteria.BundleDeploymentCriteria; -import org.rhq.core.domain.criteria.BundleVersionCriteria; -import org.rhq.core.domain.util.PageList;
/** * Local interface to the manager responsible for creating and managing bundles. @@ -55,51 +51,64 @@ public interface BundleManagerLocal extends BundleManagerRemote { // - legacy reasons
/** + * Internal use only + * </p> * Called internally to add history when action is taken against a deployment. This executes * in a New Transaction and supports deployBundle and Agent requests. - * + * </p> + * This method performs NO AUTHZ! + * </p> * @param subject * @param resourceDeploymentId id of the deployment appending the history record * @param history * @return the persisted history */ - BundleResourceDeploymentHistory addBundleResourceDeploymentHistory(Subject subject, int resourceDeploymentId, - BundleResourceDeploymentHistory history) throws Exception; + BundleResourceDeploymentHistory addBundleResourceDeploymentHistoryInNewTrans(Subject subject, + int resourceDeploymentId, BundleResourceDeploymentHistory history) throws Exception;
/** - * Mainly Used For Testing - * + * Internal use only, and test entry point. + * </p> + * This method performs NO AUTHZ! + * </p> * @param subject user that must have proper permissions * @param name not null or empty * @param description optional long description of the bundle * @param bundleTypeId valid bundleType + * @param bundleGroupId bundle group, existing bundle group for bundle assignment, or 0 for unassigned * @return the persisted Bundle (id is assigned) */ - Bundle createBundle(Subject subject, String name, String description, int bundleTypeId) throws Exception; + Bundle createBundle(Subject subject, String name, String description, int bundleTypeId, int bundleGroupId) + throws Exception;
/** - * Mainly Used For Testing + * Internal use only and test entry point. * - * Convienence method that combines {@link #createBundle(Subject, String, int)} and {@link #createBundleVersion(Subject, int, String, String, String)}. + * Convenience method that combines {@link #createBundle(Subject, String, int)} and {@link #createBundleVersion(Subject, int, String, String, String)}. * This will first check to see if a bundle with the given type/name exists - if it doesn't, it will be created. If it does, it will be reused. * This will then create the bundle version that will be associated with the bundle that was created or found. - * + * </p> + * This method performs NO AUTHZ! + * </p> * @param subject user that must have proper permissions * @param bundleName name of the bundle to use (if not found, it will be created) * @param bundleDescription optional long description of the bundle * @param bundleTypeId the bundle type for the new bundle (if it is created) for which this will be the first version + * @param bundleGroupId the bundle group for the new bundle (if it is created) for which this will be the first version. 0 to leave unassigned. * @param bundleVersionName name of the bundle version * @param bundleVersionDescription optional long description of the bundle version * @param version optional. If not supplied set to 1.0 for first version, or incremented (as best as possible) for subsequent version * @return the persisted BundleVersion (id is assigned) */ BundleVersion createBundleAndBundleVersion(Subject subject, String bundleName, String bundleDescription, - int bundleTypeId, String bundleVersionName, String bundleVersionDescription, String version, String recipe) - throws Exception; + int bundleTypeId, int bundleGroupId, String bundleVersionName, String bundleVersionDescription, String version, + String recipe) throws Exception;
/** - * Mainly Used For Testing - * + * Internal use only, test entry point + * </p> + * This method performs NO AUTHZ! + * </p> * @param subject user that must have proper permissions * @param bundleId the bundle for which this will be the next version * @param name not null or empty @@ -112,8 +121,11 @@ public interface BundleManagerLocal extends BundleManagerRemote {
/** * Not generally called. For use by Server Side Plugins when registering a Bundle Plugin. - * - * @param subject must be InventoryManager + * </p> + * Required Permissions: + * - Global.CREATE_BUNDLES + * </p> + * @param subject * @param name not null or empty * @param resourceTypeId id of the ResourceType that handles this BundleType * @return the persisted BundleType (id is assigned) @@ -121,26 +133,50 @@ public interface BundleManagerLocal extends BundleManagerRemote { BundleType createBundleType(Subject subject, String name, int resourceTypeId) throws Exception;
/** - * This is typically not called directly, typically scheduleBundleResourceDeployment() is called externally. This executes - * in a New Transaction and supports scheduleBundleResourceDeployment. + * This is typically not called directly, typically scheduleBundleResourceDeployment() is called externally. + * This executes in a New Transaction and supports scheduleBundleResourceDeployment. + * </p> + * This method performs NO AUTHZ! + * </p> */ - BundleResourceDeployment createBundleResourceDeployment(Subject subject, int bundleDeploymentId, int resourceId) - throws Exception; + BundleResourceDeployment createBundleResourceDeploymentInNewTrans(Subject subject, int bundleDeploymentId, + int resourceId) throws Exception;
/** * Similar to {@link BundleManagerRemote#createBundleDeployment(Subject, int, int, String, Configuration)} but * supplies the internally generated deploymentName and has different transaction semantics. Useful when an * slsb method needs to both create a deployment and schedules it prior to returning to an external caller. + * </p> + * This method performs NO AUTHZ! + * </p> */ public BundleDeployment createBundleDeploymentInNewTrans(Subject subject, int bundleVersionId, int bundleDestinationId, String name, String description, Configuration configuration) throws Exception;
- // added here because the same method in @Remote was commented out to bypass a WSProvide issue + /** + * Used by GUI + * </p> + * Required Permissions: Either: + * - Global.CREATE_BUNDLES and Global.VIEW_BUNDLES + * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG + * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG + * </p> + * + * @param subject + * @param bundleVersionId + * @return Map, filename to foundInBundleVersion + * @throws Exception + */ HashMap<String, Boolean> getAllBundleVersionFilenames(Subject subject, int bundleVersionId) throws Exception;
/** - * Needed by the Bundle Deploy and Revert wizards GUI to generate a deployment name for display. - * + * Used by GUI. Needed by the Bundle Deploy and Revert wizards GUI to generate a deployment name for display. + * <pre> + * Required Permissions: Either: + * - Global.DEPLOY_BUNDLES and a view of the relevant bundle and a view of the relevant resource group (may involve multiple roles) + * - Resource.DEPLOY_BUNDLES_TO_GROUP and a view of the relevant bundle and a view of the relevant resource group (may involve multiple roles) + * </pre> + * * @param subject * @param bundleDestinationId required * @param bundleVersionId required for progressive deployment, -1 for revert @@ -151,18 +187,26 @@ public interface BundleManagerLocal extends BundleManagerRemote { int prevDeploymentId);
/** - * Not for general consumption. A special case method to build the pojo that can be sent to the agent to + * Internal use only. A special case method to build the pojo that can be sent to the agent to * schedule the deployment request. Uses NOT_SUPPORTED transaction attribute to avoid having the cleaned pojo * affect the persistence context. + * </p> + * This method performs NO AUTHZ! + * </p> + * * @throws Exception */ public BundleScheduleRequest getScheduleRequest(Subject subject, int resourceDeploymentId, boolean isCleanDeployment, boolean isRevert) throws Exception;
/** - * This is a simple attempt at delete, typically used for removing a poorly defined deployment before it is - * actually scheduled for deployment. The status must be PENDING. It will - * fail if anything actually refers to it. + * Used by GUI. This is a simple attempt at delete, typically used for removing a poorly defined deployment before it is + * actually scheduled for deployment. The status must be PENDING. It will fail if anything actually refers to it. + * <pre> + * Required Permissions: Either: + * - Global.DEPLOY_BUNDLES and a view of the relevant bundle and a view of the relevant resource group (may involve multiple roles) + * - Resource.DEPLOY_BUNDLES_TO_GROUP and a view of the relevant bundle and a view of the relevant resource group (may involve multiple roles) + * </pre> * * @param subject * @param bundleDeploymentId @@ -171,8 +215,13 @@ public interface BundleManagerLocal extends BundleManagerRemote { void deleteBundleDeployment(Subject subject, int bundleDeploymentId) throws Exception;
/** - * This is a simple attempt at delete, typically used for removing a poorly defined destination. It will + * Used by GUI. This is a simple attempt at delete, typically used for removing a poorly defined destination. It will * fail if any actual deployments are referring to the destination. + * <pre> + * Required Permissions: Either: + * - Global.DEPLOY_BUNDLES and a view of the relevant bundle and a view of the relevant resource group (may involve multiple roles) + * - Resource.DEPLOY_BUNDLES_TO_GROUP and a view of the relevant bundle and a view of the relevant resource group (may involve multiple roles) + * </pre> * * @param subject * @param bundleDestinationId @@ -181,7 +230,11 @@ public interface BundleManagerLocal extends BundleManagerRemote { void deleteBundleDestination(Subject subject, int bundleDestinationId) throws Exception;
/** - * Called internally to set deployment status. Typically to a completion status when deployment ends. + * Internal use only. Called internally to set deployment status. Typically to a completion status when deployment + * ends. + * </p> + * This method performs NO AUTHZ! + * </p> * * @param subject * @param resourceDeploymentId id of the resource deployment appending the history record @@ -192,9 +245,14 @@ public interface BundleManagerLocal extends BundleManagerRemote { BundleDeploymentStatus status) throws Exception;
/** - * This is for internal use only - when {@link #purgeBundleDestination(Subject, int)} is done, it + * Internal use only + * </p> + * When {@link #purgeBundleDestination(Subject, int)} is done, it * calls this so the purge can be finalized. This is required because this method is called with * a transactional context, as opposed to the main purge method. + * </p> + * This method performs NO AUTHZ! + * </p> * * @param subject * @param bundleDeployment @@ -204,29 +262,4 @@ public interface BundleManagerLocal extends BundleManagerRemote { void _finalizePurge(Subject subject, BundleDeployment bundleDeployment, Map<BundleResourceDeployment, String> failedToPurge) throws Exception;
- /** - * Fetch bundles by criteria and then on the result objects - * @param subject Caller - * @param criteria criteria to fetch the bundles - * @return List of bundles with destinations filtered. - */ - PageList<Bundle> findBundlesByCriteriaWithDestinationFilter(Subject subject, BundleCriteria criteria); - - /** - * Fetch bundle versions by criteria and then filter destination on the result objects to limit what the user can see - * @param subject Caller - * @param criteria criteria to fetch the bundles - * @return List of bundles with destinations filtered. - */ - PageList<BundleVersion> findBundleVersionsByCriteriaWithDestinationFilter(Subject subject, - BundleVersionCriteria criteria); - - /** - * Fetch bundle deployments by criteria and then filter on destinations on the result objects to limit what the user can see - * @param subject Caller - * @param criteria criteria to fetch the deployments - * @return List of deployments with destinations filtered. - */ - PageList<BundleDeployment> findBundleDeploymentsByCriteriaWithDestinationFilter(Subject subject, - BundleDeploymentCriteria criteria); } diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerRemote.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerRemote.java index 5ff63d0..c1b9eef 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerRemote.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerRemote.java @@ -72,12 +72,13 @@ public interface BundleManagerRemote { /** * Adds a BundleFile to the BundleVersion and implicitly creates the backing PackageVersion. If the PackageVersion * already exists use {@link #addBundleFileViaPackageVersion(Subject, int, String, int)} - * </p> + * <pre> * Required Permissions: Either: * - Global.CREATE_BUNDLES and Global.VIEW_BUNDLES * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG - * + * </pre> + * * @param subject user that must have proper permissions * @param bundleVersionId id of the BundleVersion incorporating this BundleFile * @param name name of the BundleFile (and the resulting Package) @@ -94,12 +95,13 @@ public interface BundleManagerRemote { * A convenience method taking a byte array as opposed to a stream for the file bits. * WARNING: obviously, this requires the entire bundle file to have been loaded fully in memory. * For very large files, this could cause OutOfMemoryErrors. - * </p> + * <pre> * Required Permissions: Either: * - Global.CREATE_BUNDLES and Global.VIEW_BUNDLES * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG - * + * </pre> + * * @see {@link #addBundleFile(Subject, int, String, String, Architecture, InputStream)} */ BundleFile addBundleFileViaByteArray(Subject subject, int bundleVersionId, String name, String version, @@ -107,12 +109,13 @@ public interface BundleManagerRemote {
/** * A convenience method taking a URL String whose content will be streamed to the server and used for the file bits. - * </p> + * <pre> * Required Permissions: Either: * - Global.CREATE_BUNDLES and Global.VIEW_BUNDLES * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG - * + * </pre> + * * @see #addBundleFile(Subject, int, String, String, Architecture, InputStream) */ BundleFile addBundleFileViaURL(Subject subject, int bundleVersionId, String name, String version, @@ -121,12 +124,13 @@ public interface BundleManagerRemote { /** * A variant of {@link #addBundleFileViaURL(Subject, int, String, String, Architecture, String)} supporting the * HTTP basic authentication. - * </p> + * <pre> * Required Permissions: Either: * - Global.CREATE_BUNDLES and Global.VIEW_BUNDLES * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG - * + * </pre> + * * @see #addBundleFileViaURL(Subject, int, String, String, Architecture, String) */ BundleFile addBundleFileViaURL(Subject subject, int bundleVersionId, String name, String version, @@ -134,12 +138,13 @@ public interface BundleManagerRemote {
/** * A convenience method taking an existing PackageVersion as opposed to a stream for the file bits. - * </p> + * <pre> * Required Permissions: Either: * - Global.CREATE_BUNDLES and Global.VIEW_BUNDLES * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG - * + * </pre> + * * @see {@link #addBundleFile(Subject, int, String, String, Architecture, InputStream)} */ BundleFile addBundleFileViaPackageVersion(Subject subject, int bundleVersionId, String name, int packageVersionId) @@ -147,11 +152,12 @@ public interface BundleManagerRemote {
/** * Assign the specified bundles to the specified bundle group. - * </p> + * <pre> * Requires VIEW permission for the relevant bundle and either: * - Global.CREATE_BUNDLE * - BundleGroup.CREATE_BUNDLES_IN_GROUP or BundleGroup.ASSIGN_BUNDLES_TO_GROUP for the relevant bundle group - * + * </pre> + * * @param subject * @param bundleGroupId * @param bundleIds @@ -163,11 +169,12 @@ public interface BundleManagerRemote { * call. This provides useful, uniform naming for display. An optional, custom description * can be added. This call defines a deployment. The defined deployment can then be * scheduled in a separate call. - * </p> + * <pre> * Required Permissions: Either: * - Global.DEPLOY_BUNDLES and a view of the relevant bundle and a view of the relevant resource group (may involve multiple roles) * - Resource.DEPLOY_BUNDLES_TO_GROUP and a view of the relevant bundle and a view of the relevant resource group (may involve multiple roles) - * + * </pre> + * * @param subject user that must have proper permissions * @param bundleVersionId the BundleVersion being deployed by this deployment * @param bundleDestinationId the BundleDestination for the deployment @@ -182,11 +189,12 @@ public interface BundleManagerRemote {
/** * Creates a bundle destination that describes a target for the bundle deployments. - * </p> + * <pre> * Required Permissions: Either: * - Global.DEPLOY_BUNDLES and a view of the relevant bundle and a view of the relevant resource group (may involve multiple roles) * - Resource.DEPLOY_BUNDLES_TO_GROUP and a view of the relevant bundle and a view of the relevant resource group (may involve multiple roles) - * + * </pre> + * * @param subject user must have MANAGE_INVENTORY permission * @param bundleId the Bundle to be deployed to this Destination * @param name a name for this destination. not null or empty @@ -225,12 +233,13 @@ public interface BundleManagerRemote { * </p> * If this bundle version is the initial version of a new bundle that needs to be created, the subject must * have Global.VIEW_BUNDLES because the new bundle will not be associated with any bundle group. - * </p> + * <pre> * Required Permissions: Either: * - Global.CREATE_BUNDLES and Global.VIEW_BUNDLES * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG - * + * </pre> + * * @param subject user that must have proper permissions * @param recipe the recipe that defines the bundle version to be created * @return the persisted BundleVersion with alot of the internal relationships filled in to help the caller @@ -244,19 +253,21 @@ public interface BundleManagerRemote { * should be assigned to. * If bundleGroupId is 0, then the new bundle will not be associated with any bundle group - this is only * allowed if the caller has the permission Global.VIEW_BUNDLES. - * </p> + * <pre> * Required Permissions: Either: * - Global.CREATE_BUNDLES and Global.VIEW_BUNDLES * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG - * + * </pre> + * * @param subject user that must have proper permissions * @param bundleGroupId identifies the bundle group that the new bundle will be associated with; 0 if no group * @param recipe the recipe that defines the bundle version to be created * @return the persisted BundleVersion with alot of the internal relationships filled in to help the caller * understand all that this method did. */ - BundleVersion createInitialBundleVersionViaRecipe(Subject subject, int bundleGroupId, String recipe) throws Exception; + BundleVersion createInitialBundleVersionViaRecipe(Subject subject, int bundleGroupId, String recipe) + throws Exception;
/** * Creates a bundle version based on a Bundle Distribution file. Typically a zip file, the bundle distribution @@ -267,12 +278,13 @@ public interface BundleManagerRemote { * </p> * If this bundle version is the initial version of a new bundle that needs to be created, the subject must * have Global.VIEW_BUNDLES because the new bundle will not be associated with any bundle group. - * </p> + * <pre> * Required Permissions: Either: * - Global.CREATE_BUNDLES and Global.VIEW_BUNDLES * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG - * + * </pre> + * * @param subject user that must have proper permissions * @param distributionFile a local Bundle Distribution file. It must be read accessible by the RHQ server process. * @return the persisted BundleVersion with alot of the internal relationships filled in to help the caller @@ -286,12 +298,13 @@ public interface BundleManagerRemote { * should be assigned to. * If bundleGroupId is 0, then the new bundle will not be associated with any bundle group - this is only * allowed if the caller has the permission Global.VIEW_BUNDLES. - * </p> + * <pre> * Required Permissions: Either: * - Global.CREATE_BUNDLES and Global.VIEW_BUNDLES * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG - * + * </pre> + * * @param subject user that must have proper permissions * @param bundleGroupId identifies the bundle group that the new bundle will be associated with; 0 if no group * @param distributionFile a local Bundle Distribution file. It must be read accessible by the RHQ server process. @@ -299,7 +312,8 @@ public interface BundleManagerRemote { * understand all that this method did. Bundle files specifically are returned. * @throws Exception */ - BundleVersion createInitialBundleVersionViaFile(Subject subject, int bundleGroupId, File distributionFile) throws Exception; + BundleVersion createInitialBundleVersionViaFile(Subject subject, int bundleGroupId, File distributionFile) + throws Exception;
/** * Creates a bundle version based on the actual bytes of a Bundle Distribution file. This is essentially @@ -310,12 +324,13 @@ public interface BundleManagerRemote { * </p> * If this bundle version is the initial version of a new bundle that needs to be created, the subject must * have Global.VIEW_BUNDLES because the new bundle will not be associated with any bundle group. - * </p> + * <pre> * Required Permissions: Either: * - Global.CREATE_BUNDLES and Global.VIEW_BUNDLES * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG - * + * </pre> + * * @param subject user that must have proper permissions * @param fileBytes the file bits that make up the entire bundle distribution file * @return the persisted BundleVersion with a lot of the internal relationships filled in to help the caller @@ -329,12 +344,13 @@ public interface BundleManagerRemote { * should be assigned to. * If bundleGroupId is 0, then the new bundle will not be associated with any bundle group - this is only * allowed if the caller has the permission Global.VIEW_BUNDLES. - * </p> + * <pre> * Required Permissions: Either: * - Global.CREATE_BUNDLES and Global.VIEW_BUNDLES * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG - * + * </pre> + * * @param subject user that must have proper permissions * @param bundleGroupId identifies the bundle group that the new bundle will be associated with; 0 if no group * @param fileBytes the file bits that make up the entire bundle distribution file @@ -342,7 +358,8 @@ public interface BundleManagerRemote { * understand all that this method did. Bundle files specifically are returned. * @throws Exception */ - BundleVersion createInitialBundleVersionViaByteArray(Subject subject, int bundleGroupId, byte[] fileBytes) throws Exception; + BundleVersion createInitialBundleVersionViaByteArray(Subject subject, int bundleGroupId, byte[] fileBytes) + throws Exception;
/** * Creates a bundle version based on a Bundle Distribution file. Typically a zip file, the bundle distribution @@ -355,12 +372,13 @@ public interface BundleManagerRemote { * </p> * If this bundle version is the initial version of a new bundle that needs to be created, the subject must * have Global.VIEW_BUNDLES because the new bundle will not be associated with any bundle group. - * </p> + * <pre> * Required Permissions: Either: * - Global.CREATE_BUNDLES and Global.VIEW_BUNDLES * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG - * + * </pre> + * * @param subject user that must have proper permissions * @param distributionFileUrl a URL String to the Bundle Distribution file. It must be live, resolvable and read accessible * by the RHQ server process. @@ -376,12 +394,13 @@ public interface BundleManagerRemote { * should be assigned to. * If bundleGroupId is 0, then the new bundle will not be associated with any bundle group - this is only * allowed if the caller has the permission Global.VIEW_BUNDLES. - * </p> + * <pre> * Required Permissions: Either: * - Global.CREATE_BUNDLES and Global.VIEW_BUNDLES * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG - * + * </pre> + * * @param subject user that must have proper permissions * @param bundleGroupId identifies the bundle group that the new bundle will be associated with; 0 if no group * @param distributionFileUrl a URL String to the Bundle Distribution file. It must be live, resolvable and read accessible @@ -390,7 +409,8 @@ public interface BundleManagerRemote { * understand all that this method did. Bundle files specifically are returned. * @throws Exception */ - BundleVersion createInitialBundleVersionViaURL(Subject subject, int bundleGroupId, String distributionFileUrl) throws Exception; + BundleVersion createInitialBundleVersionViaURL(Subject subject, int bundleGroupId, String distributionFileUrl) + throws Exception;
/** * A version of the {@link #createBundleVersionViaURL(org.rhq.core.domain.auth.Subject, String)} that accepts a @@ -398,12 +418,13 @@ public interface BundleManagerRemote { * </p> * If this bundle version is the initial version of a new bundle that needs to be created, the subject must * have Global.VIEW_BUNDLES because the new bundle will not be associated with any bundle group. - * </p> + * <pre> * Required Permissions: Either: * - Global.CREATE_BUNDLES and Global.VIEW_BUNDLES * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG - * + * </pre> + * * @see #createBundleVersionViaURL(org.rhq.core.domain.auth.Subject, String) */ BundleVersion createBundleVersionViaURL(Subject subject, String distributionFileUrl, String username, @@ -415,27 +436,30 @@ public interface BundleManagerRemote { * should be assigned to. * If bundleGroupId is 0, then the new bundle will not be associated with any bundle group - this is only * allowed if the caller has the permission Global.VIEW_BUNDLES. - * </p> + * <pre> * Required Permissions: Either: * - Global.CREATE_BUNDLES and Global.VIEW_BUNDLES * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG - * + * </pre> + * * @see #createBundleVersionViaURL(org.rhq.core.domain.auth.Subject, String) */ - BundleVersion createInitialBundleVersionViaURL(Subject subject, int bundleGroupId, String distributionFileUrl, String username, String password) throws Exception; + BundleVersion createInitialBundleVersionViaURL(Subject subject, int bundleGroupId, String distributionFileUrl, + String username, String password) throws Exception;
/** * Remove everything associated with the Bundles with the exception of files laid down by related deployments. * Deployed files are left as is on the deployment platforms but the bundle mechanism will no longer track * the deployment of all bundles that have been deleted. * The bundles that are deleted will be removed from all bundle groups that it was a member of. - * </p> + * <pre> * Required Permissions: Either: * - Global.DELETE_BUNDLES and Global.VIEW_BUNDLES * - Global.DELETE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG * - BundleGroup.DELETE_BUNDLES_FROM_GROUP for bundle group BG and the relevant bundle is assigned to BG - * + * </pre> + * * @param subject user that must have proper permissions * @param bundleIds IDs of all bundles to be deleted * @throws Exception if any part of the removal fails. @@ -447,12 +471,13 @@ public interface BundleManagerRemote { * Deployed files are left as is on the deployment platforms but the bundle mechanism will no longer track * the deployment. * The bundles that are deleted will be removed from all bundle groups that it was a member of. - * </p> + * <pre> * Required Permissions: Either: * - Global.DELETE_BUNDLES and Global.VIEW_BUNDLES * - Global.DELETE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG * - BundleGroup.DELETE_BUNDLES_FROM_GROUP for bundle group BG and the relevant bundle is assigned to BG - * + * </pre> + * * @param subject user that must have proper permissions * @param bundleId the id of the bundle to remove * @throws Exception if any part of the removal fails. @@ -476,12 +501,13 @@ public interface BundleManagerRemote { * Deployed files are left as is on the deployment platforms but the bundle mechanism will no longer track * the deployment. * The deleted bundle version will no longer exist in any bundle group. - * </p> + * <pre> * Required Permissions: Either: * - Global.DELETE_BUNDLES and Global.VIEW_BUNDLES * - Global.DELETE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG * - BundleGroup.DELETE_BUNDLES_FROM_GROUP for bundle group BG and the relevant bundle is assigned to BG - * + * </pre> + * * @param subject user that must have proper permissions * @param bundleVersionId the id of the bundle version to remove * @param deleteBundleIfEmpty if <code>true</code> and if this method deletes the last bundle version for its @@ -490,14 +516,40 @@ public interface BundleManagerRemote { */ void deleteBundleVersion(Subject subject, int bundleVersionId, boolean deleteBundleIfEmpty) throws Exception;
+ /** + * Return the <code>Bundles</code> narrowed by the supplied Criteria. The results are implicitly + * narrowed to those bundles viewable by the <code>subject</code>. + */ PageList<Bundle> findBundlesByCriteria(Subject subject, BundleCriteria criteria);
+ /** + * Return the <code>BundleGroups</code> narrowed by the supplied Criteria. The results are implicitly + * narrowed to those bundle groups viewable by the <code>subject</code>. + */ PageList<BundleGroup> findBundleGroupsByCriteria(Subject subject, BundleGroupCriteria criteria);
+ /** + * Return the <code>BundleDeployments</code> narrowed by the supplied Criteria. The results are implicitly + * narrowed to those for bundles and destination groups viewable by the <code>subject</code>. + */ PageList<BundleDeployment> findBundleDeploymentsByCriteria(Subject subject, BundleDeploymentCriteria criteria);
+ /** + * Return the <code>BundleDestinations</code> narrowed by the supplied Criteria. The results are implicitly + * narrowed to those with destination resource groups viewable by the <code>subject</code>. + */ PageList<BundleDestination> findBundleDestinationsByCriteria(Subject subject, BundleDestinationCriteria criteria);
+ /** + * Note that this can involves permissions on bundles and resources. Results will always be narrowed to the bundles + * viewable by <code>subject</code>. If optionally requesting the relevant Resources + * via <code>BundleResourceDeploymentCriteria.fetchResources(true)</code> the results will be further narrowed to + * the viewable resources. + * + * @param subject + * @param criteria + * @return + */ PageList<BundleResourceDeployment> findBundleResourceDeploymentsByCriteria(Subject subject, BundleResourceDeploymentCriteria criteria);
@@ -516,12 +568,13 @@ public interface BundleManagerRemote { * Determine the files required for a BundleVersion and return all of the filenames or optionally, just those * that lack BundleFiles for the BundleVersion. The recipe may be parsed as part of this call. * This is needed as part of the bundle creation workflow, hence why creation permissions are needed. - * </p> + * <pre> * Required Permissions: Either: * - Global.CREATE_BUNDLES and Global.VIEW_BUNDLES * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG - * + * </pre> + * * @param subject user that must have proper permissions * @param bundleVersionId the BundleVersion being queried * @param withoutBundleFileOnly if true omit any filenames that already have a corresponding BundleFile for @@ -534,7 +587,7 @@ public interface BundleManagerRemote {
/** * Purges the destination's live deployment content from the remote platforms. - * </p> + * </pre> * Required Permissions: Either: * - Global.DEPLOY_BUNDLES and a view of the relevant bundle and a view of the relevant resource group (may involve multiple roles) * - Resource.DEPLOY_BUNDLES_TO_GROUP and a view of the relevant bundle and a view of the relevant resource group (may involve multiple roles) @@ -550,7 +603,7 @@ public interface BundleManagerRemote { * complete. The returned BundleDeployment can be used to track the history of the individual deployments. * <br/><br/> * TODO: Add the scheduling capability, currently it's Immediate. - * </p> + * </pre> * Required Permissions: Either: * - Global.DEPLOY_BUNDLES and a view of the relevant bundle and a view of the relevant resource group (may involve multiple roles) * - Resource.DEPLOY_BUNDLES_TO_GROUP and a view of the relevant bundle and a view of the relevant resource group (may involve multiple roles) @@ -574,7 +627,7 @@ public interface BundleManagerRemote { * call. This provides useful, uniform naming for display. An optional, custom description can be added. * <br/><br/> * TODO: Add the scheduling capability, currently it's Immediate. - * </p> + * </pre> * Required Permissions: Either: * - Global.DEPLOY_BUNDLES and a view of the relevant bundle and a view of the relevant resource group (may involve multiple roles) * - Resource.DEPLOY_BUNDLES_TO_GROUP and a view of the relevant bundle and a view of the relevant resource group (may involve multiple roles) @@ -592,7 +645,7 @@ public interface BundleManagerRemote {
/** * Unassign the specified bundles from the specified bundle group. - * </p> + * </pre> * Requires VIEW permission for the relevant bundles and either: * - Global.DELETE_BUNDLE * - BundleGroup.DELETE_BUNDLES_FROM_GROUP or BundleGroup.UNASSIGN_BUNDLES_FROM_GROUP for the relevant bundle group diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleServerServiceImpl.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleServerServiceImpl.java index a78d7e9..4e73171 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleServerServiceImpl.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleServerServiceImpl.java @@ -56,7 +56,7 @@ public class BundleServerServiceImpl implements BundleServerService { public void addDeploymentHistory(int bundleDeploymentId, BundleResourceDeploymentHistory history) { try { BundleManagerLocal bm = LookupUtil.getBundleManager(); - bm.addBundleResourceDeploymentHistory(LookupUtil.getSubjectManager().getOverlord(), bundleDeploymentId, + bm.addBundleResourceDeploymentHistoryInNewTrans(LookupUtil.getSubjectManager().getOverlord(), bundleDeploymentId, history); } catch (Exception e) { log.error("Failed to add history to deployment id: " + bundleDeploymentId, e); diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/util/CriteriaQueryGenerator.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/util/CriteriaQueryGenerator.java index e65d9f3..0a1060d 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/util/CriteriaQueryGenerator.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/util/CriteriaQueryGenerator.java @@ -69,7 +69,8 @@ public final class CriteriaQueryGenerator { public enum AuthorizationTokenType { RESOURCE, // specifies the resource alias to join on for standard res-group-role-subject authorization checking GROUP, // specifies the group alias to join on for standard group-role-subject authorization checking - BUNDLE; // specifies the bundle alias to join on for standard bundle-bundleGroup-role-subject authorization checking + BUNDLE, // specifies the bundle alias to join on for standard bundle-bundleGroup-role-subject authorization checking + BUNDLE_GROUP; // specifies the bundle group alias to join on for standard bundleGroup-role-subject authorization checking }
private Criteria criteria; @@ -125,6 +126,8 @@ public final class CriteriaQueryGenerator { setAuthorizationResourceFragment(type, defaultFragment, subjectId); } else if (type == AuthorizationTokenType.BUNDLE) { setAuthorizationBundleFragment(subjectId); + } else if (type == AuthorizationTokenType.BUNDLE_GROUP) { + setAuthorizationBundleGroupFragment(subjectId); } }
@@ -249,6 +252,39 @@ public final class CriteriaQueryGenerator { customAuthzFragment = customAuthzFragment.replace("%aliasWithFragment%", aliasReplacement); customAuthzFragment = customAuthzFragment.replace("%innerAlias%", innerAliasReplacement); customAuthzFragment = customAuthzFragment.replace("%subjectId%", String.valueOf(subjectId)); + this.authorizationCustomConditionFragment = customAuthzFragment; + + // If the query results are narrowed by requiredPerms generate the fragment now. It's done + // here for two reasons. First, it seems to make sense to apply this only when an authFragment is + // being used. Second, because one day the query may be less brute force and may modify or + // leverage the joinFragment above. But, after extensive trying a more elegant + // query could not be constructed due to Hibernate limitations. So, for now, here it is... + List<Permission> requiredPerms = this.criteria.getRequiredPermissions(); + if (!(null == requiredPerms || requiredPerms.isEmpty())) { + this.authorizationPermsFragment = "" // + + "( SELECT COUNT(DISTINCT p)" + NL // + + " FROM Subject innerSubject" + NL // + + " JOIN innerSubject.roles r" + NL // + + " JOIN r.permissions p" + NL // + + " WHERE innerSubject.id = " + this.authorizationSubjectId + NL // + + " AND p IN ( :requiredPerms ) ) = :requiredPermsSize" + NL; + } + } + + public void setAuthorizationBundleGroupFragment(int subjectId) { + String fragment = "bundleGroup"; + String customAuthzFragment = "" // + + "( %aliasWithFragment%.id IN ( SELECT %innerAlias%.id " + NL // + + " FROM %alias% innerAlias " + NL // + + " JOIN %innerAlias%.roles r JOIN r.subjects s " + NL // + + " WHERE s.id = %subjectId% ) ) " + NL; + String aliasReplacement = criteria.getAlias() + (fragment != null ? "." + fragment : ""); + String innerAliasReplacement = "innerAlias" + (fragment != null ? "." + fragment : ""); + customAuthzFragment = customAuthzFragment.replace("%alias%", criteria.getAlias()); + customAuthzFragment = customAuthzFragment.replace("%aliasWithFragment%", aliasReplacement); + customAuthzFragment = customAuthzFragment.replace("%innerAlias%", innerAliasReplacement); + customAuthzFragment = customAuthzFragment.replace("%subjectId%", String.valueOf(subjectId)); + this.authorizationCustomConditionFragment = customAuthzFragment;
// If the query results are narrowed by requiredPerms generate the fragment now. It's done // here for two reasons. First, it seems to make sense to apply this only when an authFragment is @@ -724,9 +760,9 @@ public final class CriteriaQueryGenerator { private void addPersistentBag(String fieldName) { Field f = findField(fieldName); if (f == null) { - LOG.warn( - "Failed to add persistent bag collection [" + fieldName + "] on class [" + criteria.getPersistentClass().getName() + - "]. There doesn't seem to be a field of that name on the class or any of its superclasses."); + LOG.warn("Failed to add persistent bag collection [" + fieldName + "] on class [" + + criteria.getPersistentClass().getName() + + "]. There doesn't seem to be a field of that name on the class or any of its superclasses."); } else { persistentBagFields.add(f); } @@ -735,9 +771,9 @@ public final class CriteriaQueryGenerator { private void addJoinFetch(String fieldName) { Field f = findField(fieldName); if (f == null) { - LOG.warn( - "Failed to add join fetch field [" + fieldName + "] on class [" + criteria.getPersistentClass().getName() + - "]. There doesn't seem to be a field of that name on the class or any of its superclasses."); + LOG.warn("Failed to add join fetch field [" + fieldName + "] on class [" + + criteria.getPersistentClass().getName() + + "]. There doesn't seem to be a field of that name on the class or any of its superclasses."); } else { joinFetchFields.add(f); }
commit b2098e8b9b670709054aa796dfa4312a38c58b7a Author: John Sanda jsanda@redhat.com Date: Wed Jul 31 13:36:33 2013 -0400
fix check for determining whether or not read repair is needed when adding nodes
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index ee0f406..1c96e27 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -866,19 +866,21 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN List<StorageNode> storageNodes = entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE, StorageNode.class).setParameter("operationMode", OperationMode.NORMAL).getResultList();
- int clusterSize = storageNodes.size(); + // The previous cluster size will be the current size - 1 since we currently only + // support deploying one node at a time. + int previousClusterSize = storageNodes.size() - 1; boolean isReadRepairNeeded;
- if (clusterSize >= 4) { + if (previousClusterSize >= 4) { // At 4 nodes we increase the RF to 3. We are not increasing the RF beyond // that for additional nodes; so, there is no need to run repair if we are // expanding from a 4 node cluster since the RF remains the same. isReadRepairNeeded = false; - } else if (clusterSize == 1) { + } else if (previousClusterSize == 1) { // The RF will increase since we are going from a single to a multi-node // cluster; therefore, we want to run repair. isReadRepairNeeded = true; - } else if (clusterSize == 2) { + } else if (previousClusterSize == 2) { if (storageNodes.size() > 3) { // If we go from 2 to > 3 nodes we will increase the RF to 3; therefore // we want to run repair. @@ -888,7 +890,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN // to run repair. isReadRepairNeeded = false; } - } else if (clusterSize == 3) { + } else if (previousClusterSize == 3) { // We are increasing the cluster size > 3 which means the RF will be // updated to 3; therefore, we want to run repair. isReadRepairNeeded = true;
commit cee13fdf4fe40573568616c2729a0e6f97f6fe5f Author: John Sanda jsanda@redhat.com Date: Tue Jul 30 16:41:18 2013 -0400
update itest
diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/src/test/resources/expected.cassandra-jvm.properties b/modules/common/cassandra-ccm/cassandra-ccm-core/src/test/resources/expected.cassandra-jvm.properties index 774a831..588fb3d 100644 --- a/modules/common/cassandra-ccm/cassandra-ccm-core/src/test/resources/expected.cassandra-jvm.properties +++ b/modules/common/cassandra-ccm/cassandra-ccm-core/src/test/resources/expected.cassandra-jvm.properties @@ -13,7 +13,7 @@ heap_new=-Xmn${HEAP_NEWSIZE}
heap_dump_on_OOMError="-XX:+HeapDumpOnOutOfMemoryError"
-heap_dump_dir="" +heap_dump_dir=
thread_stack_size=-Xss180k
commit 83a5f3ddbaf7ea7467a0d8a50bc531efe10fd70b Author: Simeon Pinder spinder@redhat.com Date: Wed Jul 31 11:05:43 2013 -0400
i)ensure jdk 6+ versions and ii)reuse userDN between the steps.
diff --git a/modules/helpers/ldap-tool/pom.xml b/modules/helpers/ldap-tool/pom.xml index 770f22c..91416d5 100644 --- a/modules/helpers/ldap-tool/pom.xml +++ b/modules/helpers/ldap-tool/pom.xml @@ -60,6 +60,16 @@ </execution> </executions> </plugin> + <!-- Build for JDK 1.6 and later. --> + <plugin> + <groupId>org.apache.maven.plugins</groupId> + <artifactId>maven-compiler-plugin</artifactId> + <version>2.0.2</version> + <configuration> + <source>1.6</source> + <target>1.6</target> + </configuration> + </plugin>
<plugin> <artifactId>maven-release-plugin</artifactId> diff --git a/modules/helpers/ldap-tool/src/main/java/org/rhq/TestLdapSettings.java b/modules/helpers/ldap-tool/src/main/java/org/rhq/TestLdapSettings.java index 89b0698..bc322ab 100644 --- a/modules/helpers/ldap-tool/src/main/java/org/rhq/TestLdapSettings.java +++ b/modules/helpers/ldap-tool/src/main/java/org/rhq/TestLdapSettings.java @@ -57,10 +57,10 @@ import javax.swing.border.TitledBorder; * LDAP calls during auth/authz operations. * * The specific LDAP logic below needs to mirror the latest RHQ code and allow the user - * to test our their configuration without requring a specific RHQ/JON build as a dependency. + * to test our their configuration without requiring a specific RHQ/JON build as a dependency. * * NOTE: To avoid a runtime dependency on specific versions of RHQ or JON, the small implementation - * methods were copied into this class with minimatl changes for logging and ui messaging. The + * methods were copied into this class with minimal changes for logging and ui messaging. The * definitive implementation for each 'copied' method can be found in LDAPGroupManagerBean. * * @author Simeon Pinder @@ -85,6 +85,7 @@ public class TestLdapSettings extends JFrame { private JMenuBar menuBar; private String advdb = "**Verbose:debug ----"; private static final String BASEDN_DELIMITER = ";"; + private String userDN;
private static final long serialVersionUID = 1L; int textBoxWidth = 20; @@ -419,7 +420,7 @@ public class TestLdapSettings extends JFrame { } // testing a valid user involves a filtered ldap search // using the loginProperty, and optionally searchFilter - String userDN = ""; + userDN = ""; if (proceed) { // default loginProperty to cn if it's not set if (loginProperty.isEmpty()) { @@ -895,9 +896,8 @@ public class TestLdapSettings extends JFrame { // Load any search filter String groupSearchFilter = (String) systemConfig.get(SystemSetting.LDAP_GROUP_FILTER.getInternalName()); String groupMemberFilter = (String) systemConfig.get(SystemSetting.LDAP_GROUP_MEMBER.getInternalName()); - String userDn = (String) systemConfig.get(Context.SECURITY_PRINCIPAL);
- String testUserDN = userDn; + String testUserDN = userDN; String ldapServer = (String) systemConfig.get(Context.PROVIDER_URL);
Properties env = getProperties(ldapServer);
commit af02d59186c22bd6fce00804fdccee5a6c2d8903 Author: Simeon Pinder spinder@redhat.com Date: Wed Jul 31 07:02:29 2013 -0400
i)Fix issue with authz validation in TestLdapSettings ii)Remove old TestLdapSettings source.
diff --git a/etc/dev-utils/TestLdapSettings.java b/etc/dev-utils/TestLdapSettings.java deleted file mode 100644 index 2e29b3d..0000000 --- a/etc/dev-utils/TestLdapSettings.java +++ /dev/null @@ -1,738 +0,0 @@ -package com.test; - -import java.awt.BorderLayout; -import java.awt.Color; -import java.awt.Dimension; -import java.awt.FlowLayout; -import java.awt.event.ActionEvent; -import java.awt.event.ActionListener; -import java.awt.event.WindowAdapter; -import java.awt.event.WindowEvent; -import java.io.PrintWriter; -import java.io.StringWriter; -import java.io.UnsupportedEncodingException; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Properties; -import java.util.Set; - -import javax.naming.CompositeName; -import javax.naming.Context; -import javax.naming.NamingEnumeration; -import javax.naming.directory.Attribute; -import javax.naming.directory.Attributes; -import javax.naming.directory.SearchControls; -import javax.naming.directory.SearchResult; -import javax.naming.ldap.InitialLdapContext; -import javax.swing.Box; -import javax.swing.BoxLayout; -import javax.swing.JButton; -import javax.swing.JCheckBox; -import javax.swing.JFrame; -import javax.swing.JLabel; -import javax.swing.JPanel; -import javax.swing.JScrollPane; -import javax.swing.JTextArea; -import javax.swing.JTextField; -import javax.swing.border.LineBorder; -import javax.swing.border.TitledBorder; - -//import org.rhq.enterprise.server.util.security.UntrustedSSLSocketFactory; - -/* Is a development test tool that allows the user to simulate the RHQ server side - * LDAP calls during auth/authz operations. - * - * @author Simeon Pinder - */ -public class TestLdapSettings extends JFrame { - //shared fields - private JTextArea testResults; - private JCheckBox ssl; - private JTextField testUserNameValue; - private JTextField testUserPasswordValue; - private HashMap<String, JTextField> fieldMappings; - private String[] keys; - private JCheckBox enableLdapReferral; - private JCheckBox enableVerboseDebugging; - private JCheckBox enableVerboseGroupParsing; - private String advdb = "**Verbose:debug ----"; - - private static final long serialVersionUID = 1L; - int textBoxWidth = 20; - - public static void main(String args[]) { - new TestLdapSettings(); - } - - // Configure window properties - private TestLdapSettings() { - - setTitle("Check LDAP Settings: Simulates LDAP checks/queries of RHQ LDAP integration"); - getContentPane().setLayout(new BorderLayout()); - // top panel definition - JPanel top = new JPanel(); - top.setLayout(new BoxLayout(top, BoxLayout.Y_AXIS)); - top.setBorder(LineBorder.createGrayLineBorder()); - keys = new String[] { "URL:", "Search Filter:", - "Search Base:","Login Property", - "Username:", "Group Search Filter:", - "Password:", "Group Member Filter:", - }; - fieldMappings = loadUiFields(top, keys); - - //add the two checkboxes for additiona debugging options - enableLdapReferral= new JCheckBox("[follow] ldap referrals"); - enableLdapReferral.setSelected(false); - enableVerboseDebugging= new JCheckBox("more verbose logging"); - enableVerboseDebugging.setSelected(false); - enableVerboseGroupParsing= new JCheckBox("more detailed group parsing"); - enableVerboseGroupParsing.setSelected(false); - //put into row display - JPanel advancedDebugRegion = new JPanel(); - advancedDebugRegion.setLayout(new FlowLayout(FlowLayout.LEFT)); - LineBorder advancedBorder = new LineBorder(Color.BLACK, 2); - TitledBorder debugBorder = new TitledBorder(advancedBorder, "Debug:"); - advancedDebugRegion.setBorder(debugBorder); - advancedDebugRegion.add(enableLdapReferral); - advancedDebugRegion.add(enableVerboseDebugging); - advancedDebugRegion.add(enableVerboseGroupParsing); - top.add(advancedDebugRegion); - - ssl = new JCheckBox("SSL:"); - ssl.setEnabled(false); - top.add(ssl); - // test user auth region - JPanel testUserRegion = new JPanel(); - testUserRegion.setLayout(new FlowLayout(FlowLayout.LEFT)); - LineBorder border = new LineBorder(Color.BLUE, 2); - TitledBorder tBorder = new TitledBorder(border, "Authentication/Authorization Check Credentials: (insert valid ldap user assigned to group)"); - testUserRegion.setBorder(tBorder); - JLabel testUserName = new JLabel("Test UserName:"); - testUserNameValue = new JTextField(textBoxWidth); - JLabel testUserPassword = new JLabel("Test Password:"); - testUserPasswordValue = new JTextField(textBoxWidth); - testUserRegion.add(testUserName); - testUserRegion.add(testUserNameValue); - testUserRegion.add(testUserPassword); - testUserRegion.add(testUserPasswordValue); - top.add(testUserRegion); - - // center - JPanel center = new JPanel(); - center.setLayout(new BoxLayout(center, BoxLayout.X_AXIS)); - // build center panel - buildCenterPanel(center); - - // final component layout - getContentPane().add(top, BorderLayout.NORTH); - getContentPane().add(center, BorderLayout.CENTER); - this.setSize(720, 700); - addWindowListener(new WindowAdapter() { - public void windowClosing(WindowEvent e) { - System.exit(0); - } - }); - setVisible(true); - } - - // define the center display panel. - private void buildCenterPanel(JPanel center) { - // First element is Test Button - JButton test = new JButton("Test Settings"); - center.add(test); - // second is large text box that display ldap queries - testResults = new JTextArea("(click button to test settings values: simulates 4 separate checks showing ldap filters used)", - 40, 40); - JScrollPane jsp = new JScrollPane(testResults); - center.add(jsp); - test.addActionListener(new ActionListener() { - public void actionPerformed(ActionEvent e) { - testResults.setText("");//clear out empty msg - //trim spaces from all fields - String ldapServer = fieldMappings.get(keys[0]).getText().trim(); - String searchFilter = fieldMappings.get(keys[1]).getText().trim(); - String searchBase = fieldMappings.get(keys[2]).getText().trim(); - String loginProperty = fieldMappings.get(keys[3]).getText().trim(); - String bindUserName = fieldMappings.get(keys[4]).getText().trim(); - String groupSearchFilter = fieldMappings.get(keys[5]).getText().trim(); - String bindPassword = fieldMappings.get(keys[6]).getText().trim(); - String groupMemberFilter = fieldMappings.get(keys[7]).getText().trim(); - String testUserName = testUserNameValue.getText().trim(); - String testUserPassword = testUserPasswordValue.getText().trim(); - // validate initial required elements - String msg = null; - boolean proceed = true; - //valid required details set. - if (ldapServer.isEmpty() || bindUserName.isEmpty() - || bindPassword.isEmpty() || searchBase.isEmpty()) { - msg ="STEP-1:FAIL: "+ keys[0] + ", " + keys[2] + ", " + keys[4] + ", " - + keys[6] + " cannot be empty to proceed."; - log(msg); - proceed = false; - } - Properties env; - InitialLdapContext ctx = null; - if (proceed) {// attempt initial ldap bind from RHQ server - msg = "STEP-1:TESTING: Attempting to bind to server:" + ldapServer - + "\n with user '" + bindUserName - + "' and password entered."; - log(msg); - env = getProperties(ldapServer); - env.setProperty(Context.SECURITY_PRINCIPAL, bindUserName); - env.setProperty(Context.SECURITY_CREDENTIALS, bindPassword); - env.setProperty(Context.SECURITY_AUTHENTICATION, "simple"); - try { - ctx = new InitialLdapContext(env, null); - msg = "STEP-1:PASS: LDAP bind credentials are correct. Successfully connected to '" - + ldapServer - + "'.\n This means the LDAP Bind credentials for the RHQ Server authentication/authorization requests to ldap server " - + "are correct."; - if(enableVerboseDebugging.isSelected()){ - msg+="\n"+advdb+" LDAP simple authentication bind successful."; - } - log(msg); - proceed = true; - } catch (Exception ex) { - msg = "STEP-1:FAIL: Unable to connect to the LDAP server with credentials specified.\n"; - msg+="Exception:"+ex.getMessage(); - if(enableVerboseDebugging.isSelected()){ - msg = appendStacktraceToMsg(msg, ex); - } - log(msg); - proceed = false; - } - } - if (proceed) {// retrieve test credentials to test run auth - // load search controls - SearchControls searchControls = getSearchControls(); - // validating searchFilter and test user/pass creds - proceed = true; - if (testUserName.isEmpty() || (testUserPassword.isEmpty())) { - msg = "STEP-2:FAIL: Test Username/Password fields cannot be empty for this step."; - log(msg); - proceed = false; - } - // testing a valid user involves a filtered ldap search - // using the loginProperty, and optionally searchFilter - String userDN = ""; - if (proceed) { - // default loginProperty to cn if it's not set - if (loginProperty.isEmpty()) { - loginProperty = "cn"; - if(enableVerboseDebugging.isSelected()){ - String mesg = "As you have not specified a login property, defaulting to 'cn'"; - log(advdb+" "+msg); - } - } - String filter; - if (!searchFilter.isEmpty()) { - filter = "(&(" + loginProperty + "=" + testUserName - + ")" + "(" + searchFilter + "))"; - } else { - filter = "(" + loginProperty + "=" + testUserName - + ")"; - } - if(enableVerboseDebugging.isSelected()){ - log(advdb+" The searchfilter is optionally appended to login property for additional shared attribute across users."); - } - msg = "STEP-2:TESTING: To validate the test user the following LDAP filtered component will be used to find matching users:\n"; - msg += filter; - log(msg); - // test out the search on the target ldap server - try { - String[] baseDNs = searchBase.split(";"); - for (int x = 0; x < baseDNs.length; x++) { - NamingEnumeration answer = ctx.search( - baseDNs[x], filter, searchControls); - if(enableVerboseDebugging.isSelected()){ - log(advdb+" this search was excuted against DN component '"+baseDNs[x]+"'."); - } - // boolean ldapApiNpeFound = false; - if (!answer.hasMoreElements()) { - msg="STEP-2:WARN Unable to locate a matching users for the filter'"+filter+ - "'. Please check your loginProperty. Usually 'cn' or 'uid'"; - log(msg); - continue; - } - // Going with the first match - SearchResult si = (SearchResult) answer.next(); - - // Construct the UserDN - userDN = null; - - try { - userDN = si.getNameInNamespace(); - } catch (UnsupportedOperationException use) { - userDN = new CompositeName(si.getName()).get(0); - if (si.isRelative()) { - userDN += "," + baseDNs[x]; - } - } - - msg = "STEP-2:PASS: The test user '" - + testUserName - + "' was succesfully located, and the following userDN will be used in authorization check:\n"; - msg += userDN; - log(msg); - - ctx.addToEnvironment(Context.SECURITY_PRINCIPAL, userDN); - ctx.addToEnvironment(Context.SECURITY_CREDENTIALS,testUserPassword); - ctx.addToEnvironment(Context.SECURITY_AUTHENTICATION,"simple"); - - // if successful then verified that user and pw - // are valid ldap credentials - ctx.reconnect(null); - msg = "STEP-2:PASS: The user '" - + testUserName - + "' was succesfully authenticated using userDN '" - + userDN + "' and password provided.\n" - +"*Note: the loginProperty must match the loginProperty listed in dn: for the user. It is the DN that RHQ will lookup and use."; - log(msg); - } - } catch (Exception ex) { - msg = "STEP-2:FAIL: There was an error while searching for or authenticating the user '" - + testUserName + "'\n"; - msg += ex.getMessage(); - if(enableVerboseDebugging.isSelected()){ - msg = appendStacktraceToMsg(msg, ex); - } - log(msg); - proceed=false; - } - try { - ctx.addToEnvironment(Context.SECURITY_PRINCIPAL, bindUserName); - ctx.addToEnvironment(Context.SECURITY_CREDENTIALS, bindPassword); - ctx.addToEnvironment(Context.SECURITY_AUTHENTICATION, "simple"); - ctx.reconnect(null); - } catch (Exception ex) { - msg = "STEP-2:WARN: There was an error when switching back to the bind user '" - + bindUserName + "'\n"; - msg += ex.getMessage(); - if(enableVerboseDebugging.isSelected()){ - msg = appendStacktraceToMsg(msg, ex); - } - log(msg); - } - - } - // with authentication completed, now check authorization. - // validate filter components to list all available groups - proceed = false; - if (!groupSearchFilter.isEmpty()) { - Set<Map<String, String>> ret = new HashSet<Map<String, String>>(); - String filter = String - .format("(%s)", groupSearchFilter); - msg = "STEP-3:TESTING: This ldap filter " - + filter - + " will be used to locate ALL available LDAP groups"; - log(msg); - try { - String[] baseDNs = searchBase.split(";"); - for (int x = 0; x < baseDNs.length; x++) { - if(enableVerboseDebugging.isSelected()){ - log(advdb+" this search was excuted against DN component '"+baseDNs[x]+"'."); - } - NamingEnumeration answer = ctx.search( - baseDNs[x], filter, searchControls); - - if(enableVerboseGroupParsing.isSelected()){//in this mode report initial state of Enumeration results - log(advdb+" LDAP Group Search/Enumeration found "+((answer.hasMore())? " SOME ":" NO ")+" matching group(s)."); - } - - boolean ldapApiEnumerationBugEncountered = false; - while ((!ldapApiEnumerationBugEncountered) - && answer.hasMoreElements()) { - // We use the first match - SearchResult si = null; - try { - si = (SearchResult) answer.next(); - } catch (NullPointerException npe) { - if(enableVerboseDebugging.isSelected()){ - log(advdb+" NullPtr exception detected. If known LDAP api enum npe ignore: "+npe.getMessage()+"."); - } - ldapApiEnumerationBugEncountered = true; - break; - } - Map<String, String> entry = new HashMap<String, String>(); - if(enableVerboseDebugging.isSelected()||enableVerboseGroupParsing.isSelected()){ - Attributes attributeContainer = si.getAttributes(); - NamingEnumeration<? extends Attribute> attributes = attributeContainer.getAll(); - String attributesReturned = " "; - while(attributes.hasMore()){ - attributesReturned+=attributes.next().getID()+","; - } - String dbugMesg="\n"+advdb+" Group search LDAP ("+attributeContainer.size()+") attributes located for group '"+si.getName()+"' are ["+ - attributesReturned.substring(0, attributesReturned.length()-1)+"]."; - //directly update here to shorten messages for lots of groups - testResults.setText(testResults.getText() + dbugMesg); - - //additionally parse attribute ids and values for illegal ldap characters - if(enableVerboseGroupParsing.isSelected()){ - attributes = attributeContainer.getAll(); - String currentAttributeId =""; - String currentValue =""; - //spinder: 3/17/11: should we bail on first bad data or display them all? - while(attributes.hasMore()){ - boolean badData = false; - Attribute att = attributes.next(); - currentAttributeId =att.getID(); - if(containsIllegalLdap(currentAttributeId)){ - log(advdb+" LDAP Group: bad atrribute data detected for group '"+si.getName()+"' for attribute '"+currentAttributeId+"'."); - badData=true; - } - if(att.getAll()!=null){ - NamingEnumeration<?> enumer = att.getAll(); - while(enumer.hasMore()){ - currentValue = enumer.next()+""; - if(containsIllegalLdap(currentValue)){ - log(advdb+" LDAP Group: bad data detected for group '"+si.getName()+"' with attribute '"+currentAttributeId+"' and value:"+currentValue); - badData=true; - } - } - } - if(badData){ - log(advdb+"** LDAP Group: Some bad LDAP data detected for group '"+si.getName()+"'."); - } - } - } - } - - Attribute commonNameAttr = si.getAttributes() - .get("cn"); - if(commonNameAttr!=null){ - String name = (String) commonNameAttr.get(); - name = name.trim(); - Attribute desc = si.getAttributes().get( - "description"); - String description = desc != null ? (String) desc - .get() - : ""; - description = description.trim(); - entry.put("id", name); - entry.put("name", name); - entry.put("description", description); - ret.add(entry); - }else{//unable to retrieve details for specific group. - log(advdb+" There was an error retrieving 'cn' attribute for group '"+si.getName()+"'. Not adding to returned list of groups. "); - } - } - } - msg = "STEP-3:TESTING: Using Group Search Filter '" + filter - + "', " + ret.size() - + " ldap group(s) were located.\n"; - if (ret.size() > 0) { - HashMap<String, String>[] ldapLists = new HashMap[ret.size()]; - ret.toArray(ldapLists); - if(enableVerboseGroupParsing.isSelected()){//in this mode go beyond the first ten results. - msg += "STEP-3:PASS: Listing 'all' of the ldap groups located: \n"; - for (int i = 0; i < ret.size(); i++) { - msg += ldapLists[i] + "\n"; - } - }else{//otherwise only show first 10[subset of available groups] - msg += "STEP-3:PASS: Listing a few(<=10) of the ldap groups located: \n"; - for (int i = 0; (i < ret.size() && i < 10); i++) { - msg += ldapLists[i] + "\n"; - } - } - proceed = true;// then can proceed to next step. - } - log(msg); - } catch (Exception ex) { - msg = "STEP-3:FAIL: There was an error searching with the groupFilter supplied: " - + groupSearchFilter + "'\n"; - msg += ex.getMessage(); - if(enableVerboseDebugging.isSelected()){ - msg = appendStacktraceToMsg(msg, ex); - } - log(msg); - proceed=false; - } - } else { - msg = "STEP-3:FAIL: Group Search Filter: cannot be empty to proceed."; - log(msg); - proceed=false; - } - // retrieve lists of authorized groups available for the - if (proceed) { - // check groupMember - if (!groupMemberFilter.isEmpty()) { - Set<Map<String, String>> ret = new HashSet<Map<String, String>>(); - String filter = String.format("(&(%s)(%s=%s))", - groupSearchFilter, groupMemberFilter, - LDAPStringUtil.encodeForFilter(userDN)); - msg = "STEP-4:TESTING: about to do ldap search with filter \n'" - + filter - + "'\n to locate groups that test user IS authorized to access."; - log(msg); - try { - String[] baseDNs = searchBase.split(";"); - for (int x = 0; x < baseDNs.length; x++) { - NamingEnumeration answer = ctx.search( - baseDNs[x], filter, searchControls); - boolean ldapApiEnumerationBugEncountered = false; - //BZ:582471- ldap api bug change - while ((!ldapApiEnumerationBugEncountered) - && answer.hasMoreElements()) { - // We use the first match - SearchResult si = null; - try { - si = (SearchResult) answer.next(); - } catch (NullPointerException npe) { - ldapApiEnumerationBugEncountered = true; - break; - } - Map<String, String> entry = new HashMap<String, String>(); - String name = (String) si - .getAttributes().get("cn") - .get(); - name = name.trim(); - Attribute desc = si.getAttributes() - .get("description"); - String description = desc != null ? (String) desc - .get() - : ""; - description = description.trim(); - entry.put("id", name); - entry.put("name", name); - entry.put("description", description); - ret.add(entry); - } - } - msg = "STEP-4:TESTING: Using Group Search Filter '" - + filter + "', " + ret.size() - + " ldap group(s) were located.\n"; - if (ret.size() > 0) { - HashMap<String, String>[] ldapLists = new HashMap[ret - .size()]; - ret.toArray(ldapLists); - msg += "STEP-4:PASS: Listing a few of the ldap groups located: \n"; - // iterate over first ten or less to demonstrate retrieve - for (int i = 0; (i < ret.size() && i < 10); i++) { - msg += ldapLists[i] + "\n"; - } - proceed = true;// then can proceed to next - // step. - }else{ - msg+="STEP-4:WARN: With current settings, test user is not authorized for any groups. Is this correct?"; - } - log(msg); - } catch (Exception ex) { - msg = "STEP-4:FAIL: There was an error searching with the groupFilter supplied: " - + groupSearchFilter + "'\n"; - msg += ex.getMessage(); - if(enableVerboseDebugging.isSelected()){ - msg = appendStacktraceToMsg(msg, ex); - } - log(msg); - proceed=false; - } - - } else { - msg = "STEP-4:FAIL: Group Member Filter must be non-empty to proceed with simulating authorization check for test user."; - log(msg); - } - } - if(proceed){ - msg="COMPLETED:PASS: The current settings, for successful steps, should be correct to enter into your RHQ server."; - msg+="\n\n\n\n When you encounter failures, warnings or other unexpected results you should use an external "; - msg+="LDAP search utility to check that the generated filters return the expected LDAP results."; - log(msg); - } - } - } - - private boolean containsIllegalLdap(String currentValue) { - boolean invalidData = false; - if((currentValue!=null)&&(!currentValue.trim().isEmpty())){ - //TODO: spinder 3/17/11: need to figure out regex to filter/detect bad data in returned ldap. Giving up for now. -// String regex = "(?<=(?:[^\]|^)(\\)+|[^\]|^)[/,+"><;=#]|(?<=(?:[^\]|^)(\\)+|[^\]|^)\(?!\|[/,+"><;=#]| $|(?<=^\) )|^"; -// regex = "(?<=(?:[^\\]|^)(\\\\)+|[^\\]|^)[/,+\"><;=#]|(?<=(?:[^\\]|^)(\\\\)+|[^\\]|^)\\(?!\\|[/,+\"><;=#]| $|(?<=^\\) )|^"; -// System.out.println("++++++++ CURR VAL:"+currentValue+":INV-CHeck:"+currentValue.matches(",+"\<;\n=/")+":NEWCHECK:"+(currentValue.matches(regex))); -// if(currentValue.matches(",+"\<;\n=/")){ -// invalidData=true; -// } -// String badList = ",+"\<;\n="; - String badList = "+"\<;\n"; - for(char car :currentValue.toCharArray()){ - for(char c :badList.toCharArray()){ - if(car == c){ - invalidData=true; - } - } - } - - } - return invalidData; - } - - private String appendStacktraceToMsg(String msg, Exception ex) { - String moreVerbose = ""; - moreVerbose+=advdb+" Exception type:"+ex.getClass()+"\n"; - moreVerbose+=advdb+" Exception stack trace reference:"+ex.getStackTrace()+"\n"; - if(ex.getStackTrace()!=null){ - StringWriter sw = new StringWriter(); - PrintWriter pw = new PrintWriter(sw); - ex.printStackTrace(pw); - moreVerbose+=advdb+" stack trace reference:"+sw.toString(); - } - msg+="\n"+moreVerbose; - return msg; - } - }); - } - - // throw the label and fields together, two to a row. - private HashMap<String, JTextField> loadUiFields(JPanel top, - String[] componentKeys) { - HashMap<String, JTextField> mappings = new HashMap<String, JTextField>(); - for (int i = 0; i < componentKeys.length; i++) { - String firstLabelKey = componentKeys[i]; - String secondLabelKey = componentKeys[++i]; - // locate second key - JPanel row = new JPanel(); - row.setLayout(new FlowLayout(FlowLayout.LEFT)); - JLabel label1 = new JLabel(firstLabelKey); - label1.setSize(textBoxWidth, 5); - JTextField value1 = new JTextField(textBoxWidth); - JLabel label2 = new JLabel(secondLabelKey); - JTextField value2 = new JTextField(textBoxWidth); - row.add(label1); - row.add(value1); - row.add(Box.createRigidArea(new Dimension(0, 5))); - row.add(label2); - row.add(value2); - mappings.put(firstLabelKey, value1); - mappings.put(secondLabelKey, value2); - top.add(row); - } - - return mappings; - } - - private Properties getProperties(String contentProvider) { - Properties env = new Properties(); - env.setProperty(Context.INITIAL_CONTEXT_FACTORY, - "com.sun.jndi.ldap.LdapCtxFactory"); - env.setProperty(Context.PROVIDER_URL, contentProvider); - if(!enableLdapReferral.isSelected()){ - env.setProperty(Context.REFERRAL, "ignore"); - }else{ - String msg="**---- You have chosen to tell LDAP servers to [FOLLOW] context referrals. Default is [IGNORE] referrals. --**"; - log(msg); - env.setProperty(Context.REFERRAL, "follow"); - } - -// // Setup SSL if requested -// String protocol = ssl.isSelected()? "ssl":""; -// if ((protocol != null) && protocol.equals("ssl")) { -// String ldapSocketFactory = env -// .getProperty("java.naming.ldap.factory.socket"); -// if (ldapSocketFactory == null) { -// env.put("java.naming.ldap.factory.socket", -// UntrustedSSLSocketFactory.class.getName()); -// } -// env.put(Context.SECURITY_PROTOCOL, "ssl"); -// } - - return env; - } - - private String delineate() { - String line = "-"; - for (int i = 0; i < 30; i++) { - line += "-"; - } - return line; - } - - private void log(String msg) { - String message = "\n" + delineate() + "\n"; - message += msg; - message += "\n" + delineate() + "\n\n"; - testResults.setText(testResults.getText() + message); - } - - private SearchControls getSearchControls() { - int scope = SearchControls.SUBTREE_SCOPE; - int timeLimit = 0; - long countLimit = 0; - String[] returnedAttributes = null; - boolean returnObject = false; - boolean deference = false; - SearchControls constraints = new SearchControls(scope, countLimit, - timeLimit, returnedAttributes, returnObject, deference); - return constraints; - } -} - -class LDAPStringUtil { - - /** - * <p>Encode a string so that it can be used in an LDAP search filter.</p> - * - * <p>The following table shows the characters that are encoded and their - * encoded version.</p> - * - * <table> - * <tr><th align="center">Character</th><th>Encoded As</th></tr> - * <tr><td align="center">*</td><td>\2a</td></tr> - * <tr><td align="center">(</td><td>\28</td></tr> - * <tr><td align="center">)</td><td>\29</td></tr> - * <tr><td align="center"></td><td>\5c</td></tr> - * <tr><td align="center"><code>null</code></td><td>\00</td></tr> - * </table> - * - * <p>In addition to encoding the above characters, any non-ASCII character - * (any character with a hex value greater then <code>0x7f</code>) is also - * encoded and rewritten as a UTF-8 character or sequence of characters in - * hex notation.</p> - * - * @param filterString a string that is to be encoded - * @return the encoded version of <code>filterString</code> suitable for use - * in a LDAP search filter - * @see <a href="http://tools.ietf.org/html/rfc4515">RFC 4515</a> - */ - public static String encodeForFilter(final String filterString) { - if (filterString != null && filterString.length() > 0) { - StringBuilder encString = new StringBuilder(filterString.length()); - for (int i = 0; i < filterString.length(); i++) { - char ch = filterString.charAt(i); - switch (ch) { - case '*': // encode a wildcard * character - encString.append("\2a"); - break; - case '(': // encode a open parenthesis ( character - encString.append("\28"); - break; - case ')': // encode a close parenthesis ) character - encString.append("\29"); - break; - case '\': // encode a backslash \ character - encString.append("\5c"); - break; - case '\u0000': // encode a null character - encString.append("\00"); - break; - default: - if (ch <= 0x7f) { // an ASCII character - encString.append(ch); - } else if (ch >= 0x80) { // encode to UTF-8 - try { - byte[] utf8bytes = String.valueOf(ch).getBytes("UTF8"); - for (byte b : utf8bytes) { - encString.append(String.format("\%02x", b)); - } - } catch (UnsupportedEncodingException e) { - // ignore - } - } - } - } - return encString.toString(); - } - return filterString; - } - -} - diff --git a/modules/helpers/ldap-tool/src/main/java/org/rhq/TestLdapSettings.java b/modules/helpers/ldap-tool/src/main/java/org/rhq/TestLdapSettings.java index f3aee15..89b0698 100644 --- a/modules/helpers/ldap-tool/src/main/java/org/rhq/TestLdapSettings.java +++ b/modules/helpers/ldap-tool/src/main/java/org/rhq/TestLdapSettings.java @@ -895,7 +895,9 @@ public class TestLdapSettings extends JFrame { // Load any search filter String groupSearchFilter = (String) systemConfig.get(SystemSetting.LDAP_GROUP_FILTER.getInternalName()); String groupMemberFilter = (String) systemConfig.get(SystemSetting.LDAP_GROUP_MEMBER.getInternalName()); - String testUserDN=userName; + String userDn = (String) systemConfig.get(Context.SECURITY_PRINCIPAL); + + String testUserDN = userDn; String ldapServer = (String) systemConfig.get(Context.PROVIDER_URL);
Properties env = getProperties(ldapServer);
commit 2c1c633a863bde145a8372efdc5b9d4c408da62b Author: Simeon Pinder spinder@redhat.com Date: Tue Jul 30 19:07:57 2013 -0400
Make warning to users more prominent and accessible.
diff --git a/modules/helpers/ldap-tool/src/main/java/org/rhq/TestLdapSettings.java b/modules/helpers/ldap-tool/src/main/java/org/rhq/TestLdapSettings.java index b9667ea..f3aee15 100644 --- a/modules/helpers/ldap-tool/src/main/java/org/rhq/TestLdapSettings.java +++ b/modules/helpers/ldap-tool/src/main/java/org/rhq/TestLdapSettings.java @@ -39,6 +39,9 @@ import javax.swing.JButton; import javax.swing.JCheckBox; import javax.swing.JFrame; import javax.swing.JLabel; +import javax.swing.JMenu; +import javax.swing.JMenuBar; +import javax.swing.JMenuItem; import javax.swing.JPanel; import javax.swing.JPasswordField; import javax.swing.JScrollPane; @@ -79,6 +82,7 @@ public class TestLdapSettings extends JFrame { private JCheckBox iterativeVerboseLogging; private JCheckBox enablePosixGroups; private JCheckBox enable32xFeatures; + private JMenuBar menuBar; private String advdb = "**Verbose:debug ----"; private static final String BASEDN_DELIMITER = ";";
@@ -95,10 +99,10 @@ public class TestLdapSettings extends JFrame { final String warnMessage = "<html>***WARNING: Depending upon<br>" + "i)how the ldap server is configured <br>" + "ii)client query paging settings <br>" + - " enabling <b>'more detailed logging'</b>,<br>" + - " <b>'more group parsing'</b> AND/OR <b>'also log to console'</b> may cause the console to hang/freeze <br>" + + " enabling <b>'more verbose logging'</b>,<br>" + + " <b>'more detailed group parsing'</b> AND/OR <b>'also log to console'</b> may cause the console to hang/freeze <br>" + " as the LDAP tool continues to parse large query results. If that occurs it is <br>" + - " suggested that you kill tool and re-run with 'also log to console' so that the console logs<br>" + + " suggested that you stop this tool and re-run your queries with <b>'also log to console'</b> so that the console logs<br>" + " will show which dataset is causing the delay and then you should modify your search|group|member<br>" + " filters accordingly to <b>return smaller results</b> and/or <b>consume larger payloads</b>.<br>" + "***WARNING</html>"; @@ -108,6 +112,12 @@ public class TestLdapSettings extends JFrame {
setTitle("Check LDAP Settings: Simulates LDAP checks/queries of RHQ LDAP integration"); getContentPane().setLayout(new BorderLayout()); + menuBar = new JMenuBar(); + JMenu menu = new JMenu("View ***Warning"); + JMenuItem menuItem = new JMenuItem(warnMessage); + menu.add(menuItem); + menuBar.add(menu); + setJMenuBar(menuBar); // top panel definition top = new JPanel(); top.setLayout(new BoxLayout(top, BoxLayout.Y_AXIS));
commit 7c68b4a80c77e822743fdcac857a9364dbaa02d7 Author: Simeon Pinder spinder@redhat.com Date: Tue Jul 30 17:27:46 2013 -0400
Move under org.rhq like other components.
diff --git a/.classpath b/.classpath index e5e7a21..386316a 100644 --- a/.classpath +++ b/.classpath @@ -215,6 +215,7 @@ <classpathentry kind="src" path="modules/plugins/rhq-storage/src/main/java"/> <classpathentry kind="src" path="modules/enterprise/server/data-migration/src/main/java"/> <classpathentry kind="src" path="modules/enterprise/server/data-migration/src/test/java"/> + <classpathentry kind="src" path="modules/helpers/ldap-tool/src/main/java"/> <classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.7"/> <classpathentry exported="true" kind="var" path="M2_REPO/org/apache/httpcomponents/httpclient/4.2.3/httpclient-4.2.3.jar" sourcepath="M2_REPO/org/apache/httpcomponents/httpclient/4.2.3/httpclient-4.2.3-sources.jar"/> <classpathentry exported="true" kind="var" path="M2_REPO/commons-io/commons-io/2.1/commons-io-2.1.jar" sourcepath="M2_REPO/commons-io/commons-io/2.1/commons-io-2.1-sources.jar"/> diff --git a/modules/helpers/ldap-tool/pom.xml b/modules/helpers/ldap-tool/pom.xml index 28f8678..770f22c 100644 --- a/modules/helpers/ldap-tool/pom.xml +++ b/modules/helpers/ldap-tool/pom.xml @@ -19,6 +19,7 @@ <properties> <executable.name>TestLdapSettings</executable.name> <tool.version>1.0.1</tool.version> + <test.package>org.rhq</test.package> </properties>
<name>RHQ ldap test tool</name> @@ -33,8 +34,8 @@ <configuration> <archive> <manifest> - <packageName>com.test</packageName> - <mainClass>com.test.TestLdapSettings</mainClass> + <packageName>${test.package}</packageName> + <mainClass>${test.package}.TestLdapSettings</mainClass> </manifest> </archive> </configuration> diff --git a/modules/helpers/ldap-tool/src/main/java/com/test/TestLdapSettings.java b/modules/helpers/ldap-tool/src/main/java/com/test/TestLdapSettings.java deleted file mode 100644 index 75ff277..0000000 --- a/modules/helpers/ldap-tool/src/main/java/com/test/TestLdapSettings.java +++ /dev/null @@ -1,1285 +0,0 @@ -package com.test; - -import java.awt.BorderLayout; -import java.awt.Color; -import java.awt.Component; -import java.awt.Dimension; -import java.awt.FlowLayout; -import java.awt.event.ActionEvent; -import java.awt.event.ActionListener; -import java.awt.event.ItemEvent; -import java.awt.event.ItemListener; -import java.awt.event.WindowAdapter; -import java.awt.event.WindowEvent; -import java.io.IOException; -import java.io.PrintWriter; -import java.io.StringWriter; -import java.io.UnsupportedEncodingException; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Properties; -import java.util.Set; - -import javax.naming.Context; -import javax.naming.NamingEnumeration; -import javax.naming.NamingException; -import javax.naming.directory.Attribute; -import javax.naming.directory.Attributes; -import javax.naming.directory.InvalidSearchFilterException; -import javax.naming.directory.SearchControls; -import javax.naming.directory.SearchResult; -import javax.naming.ldap.Control; -import javax.naming.ldap.InitialLdapContext; -import javax.naming.ldap.PagedResultsControl; -import javax.naming.ldap.PagedResultsResponseControl; -import javax.swing.Box; -import javax.swing.BoxLayout; -import javax.swing.JButton; -import javax.swing.JCheckBox; -import javax.swing.JFrame; -import javax.swing.JLabel; -import javax.swing.JPanel; -import javax.swing.JPasswordField; -import javax.swing.JScrollPane; -import javax.swing.JTextArea; -import javax.swing.JTextField; -import javax.swing.SwingUtilities; -import javax.swing.UIManager; -import javax.swing.UnsupportedLookAndFeelException; -import javax.swing.border.LineBorder; -import javax.swing.border.TitledBorder; -import javax.swing.event.ChangeEvent; -import javax.swing.event.ChangeListener; - -//import org.rhq.enterprise.server.util.security.UntrustedSSLSocketFactory; - -/* Is a development test tool that allows the user to simulate the RHQ server side - * LDAP calls during auth/authz operations. - * - * The specific LDAP logic below needs to mirror the latest RHQ code and allow the user - * to test our their configuration without requring a specific RHQ/JON build as a dependency. - * - * NOTE: To avoid a runtime dependency on specific versions of RHQ or JON, the small implementation - * methods were copied into this class with minimatl changes for logging and ui messaging. The - * definitive implementation for each 'copied' method can be found in LDAPGroupManagerBean. - * - * @author Simeon Pinder - */ -public class TestLdapSettings extends JFrame { - //shared fields - private JTextArea testResults; - private JCheckBox showPasswords; - private JCheckBox ssl; - private JLabel groupPageSizeName; - private JTextField groupMemberQueryValue; - private JTextField testUserNameValue; - private JTextField testUserPasswordValue; - private HashMap<String, JTextField> fieldMappings; - private String[] keys; - private JCheckBox enableLdapReferral; - private JCheckBox enableVerboseDebugging; - private JCheckBox enableVerboseGroupParsing; - private JCheckBox iterativeVerboseLogging; - private JCheckBox enablePosixGroups; - private JCheckBox enable32xFeatures; - private String advdb = "**Verbose:debug ----"; - private static final String BASEDN_DELIMITER = ";"; - - private static final long serialVersionUID = 1L; - int textBoxWidth = 20; - private static JPanel top = null; - private static JPanel testUserRegion = null; - private static Properties env=null; - - public static void main(String args[]) { - new TestLdapSettings(); - } - //After enabling support for Query parsing, we need to warn users of the effects. - final String warnMessage = "<html>***WARNING: Depending upon<br>" + - "i)how the ldap server is configured <br>" + - "ii)client query paging settings <br>" + - " enabling <b>'more detailed logging'</b>,<br>" + - " <b>'more group parsing'</b> AND/OR <b>'also log to console'</b> may cause the console to hang/freeze <br>" + - " as the LDAP tool continues to parse large query results. If that occurs it is <br>" + - " suggested that you kill tool and re-run with 'also log to console' so that the console logs<br>" + - " will show which dataset is causing the delay and then you should modify your search|group|member<br>" + - " filters accordingly to <b>return smaller results</b> and/or <b>consume larger payloads</b>.<br>" + - "***WARNING</html>"; - - // Configure window properties - private TestLdapSettings() { - - setTitle("Check LDAP Settings: Simulates LDAP checks/queries of RHQ LDAP integration"); - getContentPane().setLayout(new BorderLayout()); - // top panel definition - top = new JPanel(); - top.setLayout(new BoxLayout(top, BoxLayout.Y_AXIS)); - top.setBorder(LineBorder.createGrayLineBorder()); - //define checkbox here as it's checked when generating UI. - showPasswords = new JCheckBox("show passwords:"); - showPasswords.setSelected(false); - - keys = new String[] { "URL:", "Search Filter:", - "Search Base:","Login Property", - "Username:", "Group Search Filter:", - "Password:", "Group Member Filter:", - }; - fieldMappings = loadUiFields(top, keys); - - //add the two checkboxes for additiona debugging options - enableLdapReferral= new JCheckBox("[follow] ldap referrals"); - enableLdapReferral.setSelected(false); - enableVerboseDebugging= new JCheckBox("more verbose logging"); - enableVerboseDebugging.setSelected(false); - enableVerboseDebugging.setToolTipText(warnMessage); - enableVerboseGroupParsing= new JCheckBox("more detailed group parsing"); - enableVerboseGroupParsing.setSelected(false); - enableVerboseGroupParsing.setToolTipText("*Take care when using this mode with a large number of groups* Every group discovered is parsed/listed."); - iterativeVerboseLogging= new JCheckBox("also log to console"); - iterativeVerboseLogging.setSelected(false); - iterativeVerboseLogging.setToolTipText("This mode is useful when the test tool is having difficulty returning results from large queries."); - iterativeVerboseLogging.setToolTipText(warnMessage); - enablePosixGroups= new JCheckBox("is Posix Group"); - enablePosixGroups.setSelected(false); - enablePosixGroups.setEnabled(false); - - //put into 3.2.x functionality row - JPanel jon32xRegion = new JPanel(); - jon32xRegion.setLayout(new FlowLayout(FlowLayout.LEFT)); - LineBorder jon32xLineBorder = new LineBorder(Color.BLACK, 2); - TitledBorder jon32xBorder = new TitledBorder(jon32xLineBorder, "JON 3.2.x/RHQ 4.8.x specific features:"); - jon32xRegion.setBorder(jon32xBorder); - enable32xFeatures= new JCheckBox("enable JON 3.2.x/RHQ 4.8.x features"); - enable32xFeatures.setToolTipText("This enables features not available before RHQ 4.8.x/JON 3.2.x."); - enable32xFeatures.setSelected(false); - enable32xFeatures.addItemListener(new ItemListener() { - @Override - public void itemStateChanged(ItemEvent e) { - if(enable32xFeatures.isSelected()){ - groupPageSizeName.setEnabled(true); - groupMemberQueryValue.setEnabled(true); - groupMemberQueryValue.setEditable(true); - groupMemberQueryValue.setText("1000"); - enablePosixGroups.setEnabled(true); - }else{ - groupMemberQueryValue.setText(""); - groupPageSizeName.setEnabled(false); - groupMemberQueryValue.setEnabled(false); - groupMemberQueryValue.setEditable(false); - enablePosixGroups.setEnabled(false); - enablePosixGroups.setSelected(false); - } - } - }); - - jon32xRegion.add(enable32xFeatures); - groupPageSizeName = new JLabel("Group Query Page Size:"); - groupPageSizeName.setEnabled(false); - groupMemberQueryValue = new JTextField(10); - groupMemberQueryValue.setText("1000"); - groupMemberQueryValue.setEditable(false); - jon32xRegion.add(groupPageSizeName); - jon32xRegion.add(groupMemberQueryValue); - jon32xRegion.add(enablePosixGroups); - top.add(jon32xRegion); - - //put into row display - JPanel advancedDebugRegion = new JPanel(); - advancedDebugRegion.setLayout(new FlowLayout(FlowLayout.LEFT)); - LineBorder advancedBorder = new LineBorder(Color.BLACK, 2); - TitledBorder debugBorder = new TitledBorder(advancedBorder, "Debug: **Warning --<hover HERE>**"); - advancedDebugRegion.setBorder(debugBorder); - advancedDebugRegion.add(enableLdapReferral); - advancedDebugRegion.add(enableVerboseDebugging); - advancedDebugRegion.add(enableVerboseGroupParsing); - advancedDebugRegion.add(iterativeVerboseLogging); - advancedDebugRegion.setToolTipText(warnMessage); - top.add(advancedDebugRegion); - - JPanel securityPanel = new JPanel(); - securityPanel.setLayout(new FlowLayout(FlowLayout.LEFT)); - showPasswords.addItemListener(new ItemListener() { - @Override - public void itemStateChanged(ItemEvent e) { - SwingUtilities.invokeLater(new Runnable() { - @Override - public void run() { - //store off existing value - String existingValue = ""; - String existingTestUserPass = ""; - JTextField current = fieldMappings.get("Password:"); - if(current instanceof JPasswordField){ - JPasswordField pass = ((JPasswordField)current); - if(pass!=null){ - char[] charArray = pass.getPassword(); - if(charArray.length>0){ - existingValue = new String(charArray); - } - } - }else{ - existingValue = current.getText(); - } - //save off test user password as well - if(testUserPasswordValue instanceof JPasswordField){ - JPasswordField pass = ((JPasswordField)testUserPasswordValue); - if(pass!=null){ - char[] charArray = pass.getPassword(); - if(charArray.length>0){ - existingTestUserPass = new String(charArray); - } - } - }else{ - existingTestUserPass=testUserPasswordValue.getText(); - } - - JTextField updatedContainer = null; - if(showPasswords.isSelected()){ - updatedContainer = new JTextField(textBoxWidth); - updatedContainer.setText(existingValue); - testUserPasswordValue = new JTextField(textBoxWidth); - testUserPasswordValue.setText(existingTestUserPass); - }else{ - updatedContainer = new JPasswordField(textBoxWidth); - updatedContainer.setText(existingValue); - testUserPasswordValue = new JPasswordField(textBoxWidth); - testUserPasswordValue.setText(existingTestUserPass); - } - //locate the JPanel and rebuild it Should be at index 3 - JPanel passwordRow = (JPanel) top.getComponent(3); -// JTextField jf = (JTextField) passwordRow.getComponent(1); - //store off existing components - Component[] existing = new Component[passwordRow.getComponentCount()]; - for(int i=0; i<passwordRow.getComponentCount();i++){ - existing[i] = passwordRow.getComponent(i); - } - passwordRow.removeAll(); - for(int j=0;j<existing.length;j++){ - if(j==1){//insert new JTextField instead - passwordRow.add(updatedContainer); - }else{ - passwordRow.add(existing[j]); - } - } - //reload testUserRegion - //store off existing components - Component[] existingTest = new Component[testUserRegion.getComponentCount()]; - for(int i=0; i<testUserRegion.getComponentCount();i++){ - existingTest[i] = testUserRegion.getComponent(i); - } - testUserRegion.removeAll(); - for(int j=0;j<existingTest.length;j++){ - if(j==3){//insert new JTextField instead - testUserRegion.add(testUserPasswordValue); - }else{ - testUserRegion.add(existingTest[j]); - } - } - - top.revalidate(); - top.repaint(); - } - }); - } - }); - securityPanel.add(showPasswords); - ssl = new JCheckBox("SSL:"); - ssl.setEnabled(false); - securityPanel.add(ssl); - top.add(securityPanel); - - // test user auth region - testUserRegion = new JPanel(); - testUserRegion.setLayout(new FlowLayout(FlowLayout.LEFT)); - LineBorder border = new LineBorder(Color.BLUE, 2); - TitledBorder tBorder = new TitledBorder(border, "Authentication/Authorization Check Credentials: (insert valid ldap user assigned to group)"); - testUserRegion.setBorder(tBorder); - JLabel testUserName = new JLabel("Test UserName:"); - testUserNameValue = new JTextField(textBoxWidth); - JLabel testUserPassword = new JLabel("Test Password:"); -// testUserPasswordValue = new JTextField(textBoxWidth); - testUserPasswordValue = new JPasswordField(textBoxWidth); - testUserRegion.add(testUserName); - testUserRegion.add(testUserNameValue); - testUserRegion.add(testUserPassword); - testUserRegion.add(testUserPasswordValue); - top.add(testUserRegion); - - // center - JPanel center = new JPanel(); - center.setLayout(new BoxLayout(center, BoxLayout.X_AXIS)); - // build center panel - buildCenterPanel(center); - - // final component layout - getContentPane().add(top, BorderLayout.NORTH); - getContentPane().add(center, BorderLayout.CENTER); - this.setSize(720, 700); - addWindowListener(new WindowAdapter() { - public void windowClosing(WindowEvent e) { - System.exit(0); - } - }); - setVisible(true); - } - - // define the center display panel. - private void buildCenterPanel(JPanel center) { - // First element is Test Button - JButton test = new JButton("Test Settings"); - center.add(test); - // second is large text box that display ldap queries - testResults = new JTextArea("(click button to test settings values: simulates 4 separate checks showing ldap filters used)", - 40, 40); - JScrollPane jsp = new JScrollPane(testResults); - center.add(jsp); - test.addActionListener(new ActionListener() { - public void actionPerformed(ActionEvent e) { - testResults.setText("");//clear out empty msg - //trim spaces from all fields - String ldapServer = fieldMappings.get(keys[0]).getText().trim(); - String searchFilter = fieldMappings.get(keys[1]).getText().trim(); - String searchBase = fieldMappings.get(keys[2]).getText().trim(); - String loginProperty = fieldMappings.get(keys[3]).getText().trim(); - String bindUserName = fieldMappings.get(keys[4]).getText().trim(); - String groupSearchFilter = fieldMappings.get(keys[5]).getText().trim(); - String bindPassword = fieldMappings.get(keys[6]).getText().trim(); - String groupMemberFilter = fieldMappings.get(keys[7]).getText().trim(); - String groupMemberQuerySize = groupMemberQueryValue.getText().trim(); - String testUserName = testUserNameValue.getText().trim(); - String testUserPassword = testUserPasswordValue.getText().trim(); - // validate initial required elements - String msg = null; - boolean proceed = true; - //valid required details set. - if (ldapServer.isEmpty() || bindUserName.isEmpty() - || bindPassword.isEmpty() || searchBase.isEmpty()) { - msg ="STEP-1:FAIL: "+ keys[0] + ", " + keys[2] + ", " + keys[4] + ", " - + keys[6] + " cannot be empty to proceed."; - log(msg); - proceed = false; - } - env = null; - InitialLdapContext ctx = null; - if (proceed) {// attempt initial ldap bind from RHQ server - msg = "STEP-1:TESTING: Attempting to bind to server:" + ldapServer - + "\n with user '" + bindUserName - + "' and password entered."; - log(msg); - env = getProperties(ldapServer); - env.setProperty(Context.SECURITY_PRINCIPAL, bindUserName); - env.setProperty(Context.SECURITY_CREDENTIALS, bindPassword); - env.setProperty(Context.SECURITY_AUTHENTICATION, "simple"); - //put the rest of the LDAP properties into the Properties instance for use later. - //there still needs to be separate variables since some are for UI validation. - env.setProperty(SystemSetting.LDAP_GROUP_FILTER.getInternalName(), groupSearchFilter); - env.setProperty(SystemSetting.LDAP_GROUP_MEMBER.getInternalName(), groupMemberFilter); - env.setProperty(SystemSetting.LDAP_BASE_DN.getInternalName(), searchBase); - env.setProperty(SystemSetting.LDAP_LOGIN_PROPERTY.getInternalName(), loginProperty); - env.setProperty(SystemSetting.LDAP_BIND_DN.getInternalName(), bindUserName); - env.setProperty(SystemSetting.LDAP_BIND_PW.getInternalName(), bindPassword); - env.setProperty(SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE.getInternalName(), groupMemberQuerySize); - - for(Object key :env.keySet()){ - System.out.println(key+"="+env.getProperty(key+"")); - } - - try { - ctx = new InitialLdapContext(env, null); - msg = "STEP-1:PASS: LDAP bind credentials are correct. Successfully connected to '" - + ldapServer - + "'.\n This means the LDAP Bind credentials for the RHQ Server authentication/authorization requests to ldap server " - + "are correct."; - if(enableVerboseDebugging.isSelected()){ - msg+="\n"+advdb+" LDAP simple authentication bind successful."; - } - log(msg); - proceed = true; - } catch (Exception ex) { - msg = "STEP-1:FAIL: Unable to connect to the LDAP server with credentials specified.\n"; - msg+="Exception:"+ex.getMessage(); - if(enableVerboseDebugging.isSelected()){ - msg = appendStacktraceToMsg(msg, ex); - } - log(msg); - proceed = false; - } - } - if (proceed) {// retrieve test credentials to test run auth - // load search controls - SearchControls searchControls = getSearchControls(); - // validating searchFilter and test user/pass creds - proceed = true; - if (testUserName.isEmpty() || (testUserPassword.isEmpty())) { - msg = "STEP-2:FAIL: Test Username/Password fields cannot be empty for this step."; - log(msg); - proceed = false; - } - // testing a valid user involves a filtered ldap search - // using the loginProperty, and optionally searchFilter - String userDN = ""; - if (proceed) { - // default loginProperty to cn if it's not set - if (loginProperty.isEmpty()) { - loginProperty = "cn"; - if(enableVerboseDebugging.isSelected()){ - String mesg = "As you have not specified a login property, defaulting to 'cn'"; - log(advdb+" "+msg); - } - } - String filter; - if (!searchFilter.isEmpty()) { - filter = "(&(" + loginProperty + "=" + testUserName - + ")" + "(" + searchFilter + "))"; - } else { - filter = "(" + loginProperty + "=" + testUserName - + ")"; - } - if(enableVerboseDebugging.isSelected()){ - log(advdb+" The searchfilter is optionally appended to login property for additional shared attribute across users."); - } - msg = "STEP-2:TESTING: To validate the test user the following LDAP filtered component will be used to find matching users:\n"; - msg += filter; - log(msg); - // test out the search on the target ldap server - try { - String[] baseDNs = searchBase.split(";"); - for (int x = 0; x < baseDNs.length; x++) { - NamingEnumeration answer = ctx.search( - baseDNs[x], filter, searchControls); - if(enableVerboseDebugging.isSelected()){ - log(advdb+" this search was excuted against DN component '"+baseDNs[x]+"'."); - } - // boolean ldapApiNpeFound = false; - if (!answer.hasMoreElements()) { - msg="STEP-2:WARN Unable to locate a matching users for the filter'"+filter+ - "'. Please check your loginProperty. Usually 'cn' or 'uid'"; - log(msg); - continue; - } - // Going with the first match - SearchResult si = (SearchResult) answer.next(); - - // Construct the UserDN - userDN = si.getName() + "," + baseDNs[x]; - msg = "STEP-2:PASS: The test user '" - + testUserName - + "' was succesfully located, and the following userDN will be used in authorization check:\n"; - msg += userDN; - log(msg); - - ctx.addToEnvironment(Context.SECURITY_PRINCIPAL, userDN); - ctx.addToEnvironment(Context.SECURITY_CREDENTIALS,testUserPassword); - ctx.addToEnvironment(Context.SECURITY_AUTHENTICATION,"simple"); - - // if successful then verified that user and pw - // are valid ldap credentials - ctx.reconnect(null); - msg = "STEP-2:PASS: The user '" - + testUserName - + "' was succesfully authenticated using userDN '" - + userDN + "' and password provided.\n" - +"*Note: the loginProperty must match the loginProperty listed in dn: for the user. It is the DN that RHQ will lookup and use."; - log(msg); - } - } catch (Exception ex) { - msg = "STEP-2:FAIL: There was an error while searching for or authenticating the user '" - + testUserName + "'\n"; - msg += ex.getMessage(); - if(enableVerboseDebugging.isSelected()){ - msg = appendStacktraceToMsg(msg, ex); - } - log(msg); - proceed=false; - } - } - // with authentication completed, now check authorization. - // validate filter components to list all available groups - proceed = false; - if (!groupSearchFilter.isEmpty()) { - Set<Map<String, String>> ret = new HashSet<Map<String, String>>(); - String filter = null; - - if (groupSearchFilter.startsWith("(") && groupSearchFilter.endsWith(")")){ - filter = groupSearchFilter; // RFC 2254 does not allow for ((expression)) - }else{ - filter = String - .format("(%s)", groupSearchFilter); - } - msg = "STEP-3:TESTING: This ldap filter " - + filter - + " will be used to locate ALL available LDAP groups"; - log(msg); - - Properties systemConfig = populateProperties(env); - - ret = buildGroup(systemConfig, filter); - msg = "STEP-3:TESTING: Using Group Search Filter '" - + filter + "', " + ret.size() - + " ldap group(s) were located.\n"; - if (ret.size() > 0) { - HashMap<String, String>[] ldapLists = new HashMap[ret - .size()]; - ret.toArray(ldapLists); - // in this mode go beyond the first ten results. - if (enableVerboseGroupParsing.isSelected()) { - msg += "STEP-3:PASS: Listing 'all' of the ldap groups located: \n"; - for (int i = 0; i < ret.size(); i++) { - msg += ldapLists[i] + "\n"; - } - } else {// otherwise only show first 10[subset of - // available groups] - msg += "STEP-3:PASS: Listing a few(<=10) of the ldap groups located: \n"; - for (int i = 0; (i < ret.size() && i < 10); i++) { - msg += ldapLists[i] + "\n"; - } - } - proceed = true;// then can proceed to next step. - } - log(msg); - } else { - msg = "STEP-3:FAIL: Group Search Filter: cannot be empty to proceed."; - log(msg); - proceed=false; - } - // retrieve lists of authorized groups available for the - if (proceed) { - // check groupMember - if (!groupMemberFilter.isEmpty()) { -// Map<String, String> userDetails = new HashMap<String, String>(); -// userDetails = findLdapUserDetails(userDN); - Set<String> userDetails = findAvailableGroupsFor(testUserName); - - if(!userDetails.isEmpty()){ - proceed=true; - } - } else { - msg = "STEP-4:FAIL: Group Member Filter must be non-empty to proceed with simulating authorization check for test user."; - log(msg); - } - } - if(proceed){ - msg="COMPLETED:PASS: The current settings, for successful steps, should be correct to enter into your RHQ server."; - msg+="\n\n\n\n When you encounter failures, warnings or other unexpected results you should use an external "; - msg+="LDAP search utility to check that the generated filters return the expected LDAP results."; - log(msg); - } - } - } - }); - } - - private String appendStacktraceToMsg(String msg, Exception ex) { - String moreVerbose = ""; - moreVerbose+=advdb+" Exception type:"+ex.getClass()+"\n"; - moreVerbose+=advdb+" Exception stack trace reference:"+ex.getStackTrace()+"\n"; - if(ex.getStackTrace()!=null){ - StringWriter sw = new StringWriter(); - PrintWriter pw = new PrintWriter(sw); - ex.printStackTrace(pw); - moreVerbose+=advdb+" stack trace reference:"+sw.toString(); - } - msg+="\n"+moreVerbose; - return msg; - } - - private boolean containsIllegalLdap(String currentValue) { - boolean invalidData = false; - if((currentValue!=null)&&(!currentValue.trim().isEmpty())){ - //TODO: spinder 3/17/11: need to figure out regex to filter/detect bad data in returned ldap. Giving up for now. -// String regex = "(?<=(?:[^\]|^)(\\)+|[^\]|^)[/,+"><;=#]|(?<=(?:[^\]|^)(\\)+|[^\]|^)\(?!\|[/,+"><;=#]| $|(?<=^\) )|^"; -// regex = "(?<=(?:[^\\]|^)(\\\\)+|[^\\]|^)[/,+\"><;=#]|(?<=(?:[^\\]|^)(\\\\)+|[^\\]|^)\\(?!\\|[/,+\"><;=#]| $|(?<=^\\) )|^"; -// System.out.println("++++++++ CURR VAL:"+currentValue+":INV-CHeck:"+currentValue.matches(",+"\<;\n=/")+":NEWCHECK:"+(currentValue.matches(regex))); -// if(currentValue.matches(",+"\<;\n=/")){ -// invalidData=true; -// } -// String badList = ",+"\<;\n="; - String badList = "+"\<;\n"; - for(char car :currentValue.toCharArray()){ - for(char c :badList.toCharArray()){ - if(car == c){ - invalidData=true; - } - } - } - - } - return invalidData; - } - /** - * @throws NamingException - * @see org.jboss.security.auth.spi.UsernamePasswordLoginModule#validatePassword(java.lang.String,java.lang.String) - */ - protected Set<Map<String, String>> buildGroup(Properties systemConfig, String filter) { - Set<Map<String, String>> groupDetailsMap = new HashSet<Map<String, String>>(); - // Load our LDAP specific properties - // Load the BaseDN - String baseDN = (String) systemConfig.get(SystemSetting.LDAP_BASE_DN.getInternalName()); - - // Load the LoginProperty - String loginProperty = (String) systemConfig.get(SystemSetting.LDAP_LOGIN_PROPERTY.getInternalName()); - if (loginProperty == null) { - // Use the default - loginProperty = "cn"; - } - // Load any information we may need to bind - String bindDN = (String) systemConfig.get(SystemSetting.LDAP_BIND_DN.getInternalName()); - String bindPW = (String) systemConfig.get(SystemSetting.LDAP_BIND_PW.getInternalName()); - if (bindDN != null) { - systemConfig.setProperty(Context.SECURITY_PRINCIPAL, bindDN); - systemConfig.setProperty(Context.SECURITY_CREDENTIALS, bindPW); - systemConfig.setProperty(Context.SECURITY_AUTHENTICATION, "simple"); - } - try { - InitialLdapContext ctx = new InitialLdapContext(systemConfig, null); - SearchControls searchControls = getSearchControls(); - /*String filter = "(&(objectclass=groupOfUniqueNames)(uniqueMember=uid=" + userName - + ",ou=People, dc=rhndev, dc=redhat, dc=com))";*/ - - //modify the search control to only include the attributes we will use - String[] attributes = { "cn", "description" }; - searchControls.setReturningAttributes(attributes); - - //BZ:964250: add rfc 2696 - //default to 1000 results. System setting page size from UI should be non-negative integer > 0. - //additionally as system settings are modifiable via CLI which may not have param checking enabled do some - //more checking. - int defaultPageSize = 1000; - // only if they're enabled in the UI. - if (enable32xFeatures.isSelected()) { - String groupPageSize = systemConfig.getProperty( - SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE - .getInternalName(), "" + defaultPageSize); - if ((groupPageSize != null) - && (!groupPageSize.trim().isEmpty())) { - int passedInPageSize = -1; - try { - passedInPageSize = Integer - .valueOf(groupPageSize.trim()); - if (passedInPageSize > 0) { - defaultPageSize = passedInPageSize; - if(enableVerboseDebugging.isSelected()){ - log(advdb - + " LDAP Group Query Page Sizing of '"+defaultPageSize+"' is being requested from server."); - } - } - } catch (NumberFormatException nfe) { - // log issue and do nothing. Go with the default. - String msg = "LDAP Group Page Size passed in '" - + groupPageSize - + "' in is invalid. Defaulting to 1000 results." - + nfe.getMessage(); - log(msg); - } - } - ctx.setRequestControls(new Control[] { new PagedResultsControl( - defaultPageSize, Control.CRITICAL) }); - } - // Loop through each configured base DN. It may be useful - // in the future to allow for a filter to be configured for - // each BaseDN, but for now the filter will apply to all. - String[] baseDNs = baseDN.split(BASEDN_DELIMITER); - - for (int x = 0; x < baseDNs.length; x++) { - if (enableVerboseDebugging.isSelected()) { - log(advdb - + " this search was excuted against DN component '" - + baseDNs[x] + "'."); - } - executeGroupSearch(filter, groupDetailsMap, ctx, searchControls, baseDNs, x); - - // continually parsing pages of results until we're done. - // only if they're enabled in the UI. - if (enable32xFeatures.isSelected()) { - // handle paged results if they're being used here - byte[] cookie = null; - Control[] controls = ctx.getResponseControls(); - if (controls != null) { - for (Control control : controls) { - if (control instanceof PagedResultsResponseControl) { - PagedResultsResponseControl pagedResult = (PagedResultsResponseControl) control; - cookie = pagedResult.getCookie(); - } - } - } - - while (cookie != null) { - String msg = "RFC 2696 is supported by the server and we are paging through the results. "+ - groupDetailsMap.size()+" results returned so far."; - if(enableVerboseGroupParsing.isSelected()){ - log(advdb - + msg); - } - // ensure the next requests contains the session/cookie - // details - ctx.setRequestControls(new Control[] { new PagedResultsControl( - defaultPageSize, cookie, Control.CRITICAL) }); - executeGroupSearch(filter, groupDetailsMap, ctx, - searchControls, baseDNs, x); - // empty out cookie - cookie = null; - // test for further iterations - controls = ctx.getResponseControls(); - if (controls != null) { - for (Control control : controls) { - if (control instanceof PagedResultsResponseControl) { - PagedResultsResponseControl pagedResult = (PagedResultsResponseControl) control; - cookie = pagedResult.getCookie(); - } - } - } - } - } - }//end of for loop - } catch (NamingException e) { - if (e instanceof InvalidSearchFilterException) { - InvalidSearchFilterException fException = (InvalidSearchFilterException) e; - String message = "The ldap group filter defined is invalid "; - log(message); - } - //TODO: check for ldap connection/unavailable/etc. exceptions. - else { - String mesg = "LDAP communication error: " + e.getMessage(); - log(mesg); - } - } catch (IOException iex) { - String msg = "Unexpected LDAP communciation error:" + iex.getMessage(); - log(msg); - } - - return groupDetailsMap; - } - - /** Executes the LDAP group query using the filters, context and search controls, etc. parameters passed in. - * The matching groups located during processing this pages of results are added as new entries to the - * groupDetailsMap passed in. - * - * @param filter - * @param groupDetailsMap - * @param ctx - * @param searchControls - * @param baseDNs - * @param x - * @throws NamingException - */ - private void executeGroupSearch(String filter, Set<Map<String, String>> groupDetailsMap, InitialLdapContext ctx, - SearchControls searchControls, String[] baseDNs, int x) throws NamingException { - //execute search based on controls and context passed in. - NamingEnumeration<SearchResult> answer = ctx.search(baseDNs[x], filter, searchControls); - boolean ldapApiEnumerationBugEncountered = false; - while ((!ldapApiEnumerationBugEncountered) && answer.hasMoreElements()) {//BZ:582471- ldap api bug change - // We use the first match - SearchResult si = null; - try { - si = answer.next(); - } catch (NullPointerException npe) { - if (enableVerboseDebugging.isSelected()) { - log(advdb - + " NullPtr exception detected. If known LDAP api enum npe ignore: " - + npe.getMessage() + "."); - } - ldapApiEnumerationBugEncountered = true; - break; - } - - if (enableVerboseDebugging.isSelected() - || enableVerboseGroupParsing.isSelected()) { - Attributes attributeContainer = si.getAttributes(); - NamingEnumeration<? extends Attribute> attributes = attributeContainer - .getAll(); - String attributesReturned = " "; - while (attributes.hasMore()) { - attributesReturned += attributes.next().getID() + ","; - } - String dbugMesg = "\n" - + advdb - + " Group search LDAP (" - + attributeContainer.size() - + ") attributes located for group '" - + si.getName() - + "' are [" - + attributesReturned.substring(0, - attributesReturned.length() - 1) + "]."; - // directly update here to shorten messages for lots of groups - testResults.setText(testResults.getText() + dbugMesg); - //This flag can be used in the unlikely case that the UI hangs during a test operation.: - if(iterativeVerboseLogging.isSelected()){ - System.out.println(dbugMesg); - } - - // additionally parse attribute ids and values for illegal ldap - // characters - if (enableVerboseGroupParsing.isSelected()) { - attributes = attributeContainer.getAll(); - String currentAttributeId = ""; - String currentValue = ""; - // spinder: 3/17/11: should we bail on first bad data or - // display them all? - while (attributes.hasMore()) { - boolean badData = false; - Attribute att = attributes.next(); - currentAttributeId = att.getID(); - if (containsIllegalLdap(currentAttributeId)) { - log(advdb - + " LDAP Group: bad atrribute data detected for group '" - + si.getName() + "' for attribute '" - + currentAttributeId + "'."); - badData = true; - } - if (att.getAll() != null) { - NamingEnumeration<?> enumer = att.getAll(); - while (enumer.hasMore()) { - currentValue = enumer.next() + ""; - if (containsIllegalLdap(currentValue)) { - log(advdb - + " LDAP Group: bad data detected for group '" - + si.getName() - + "' with attribute '" - + currentAttributeId - + "' and value:" + currentValue); - badData = true; - } - } - } - if (badData) { - log(advdb - + "** LDAP Group: Some bad LDAP data detected for group '" - + si.getName() + "'."); - } - } - } - } - - Map<String, String> entry = new HashMap<String, String>(); - // String name = (String) si.getAttributes().get("cn").get(); - Attribute commonNameAttr = si.getAttributes().get("cn"); - if (commonNameAttr != null) { - String name = (String) commonNameAttr.get(); - name = name.trim(); - Attribute desc = si.getAttributes().get("description"); - String description = desc != null ? (String) desc.get() : ""; - description = description.trim(); - entry.put("id", name); - entry.put("name", name); - entry.put("description", description); - groupDetailsMap.add(entry); - } else {// unable to retrieve details for specific group. - log(advdb - + " There was an error retrieving 'cn' attribute for group '" - + si.getName() - + "'. Not adding to returned list of groups. "); - } - } - } - - public Map<String, String> findLdapUserDetails(String userName) { - // Load our LDAP specific properties - Properties systemConfig = env; - HashMap<String, String> userDetails = new HashMap<String, String>(); - - // Load the BaseDN - String baseDN = (String) systemConfig.get(SystemSetting.LDAP_BASE_DN.getInternalName()); - - // Load the LoginProperty - String loginProperty = (String) systemConfig.get(SystemSetting.LDAP_LOGIN_PROPERTY.getInternalName()); - if (loginProperty == null) { - // Use the default - loginProperty = "cn"; - } - // Load any information we may need to bind - String bindDN = (String) systemConfig.get(SystemSetting.LDAP_BIND_DN.getInternalName()); - String bindPW = (String) systemConfig.get(SystemSetting.LDAP_BIND_PW.getInternalName()); - - // Load any search filter - String groupSearchFilter = (String) systemConfig.get(SystemSetting.LDAP_GROUP_FILTER.getInternalName()); - String groupMemberFilter = (String) systemConfig.get(SystemSetting.LDAP_GROUP_MEMBER.getInternalName()); - String testUserDN=userName; - String ldapServer = (String) systemConfig.get(Context.PROVIDER_URL); - - Properties env = getProperties(ldapServer); - - if (bindDN != null) { - env.setProperty(Context.SECURITY_PRINCIPAL, bindDN); - env.setProperty(Context.SECURITY_CREDENTIALS, bindPW); - env.setProperty(Context.SECURITY_AUTHENTICATION, "simple"); - } - - try { - InitialLdapContext ctx = new InitialLdapContext(env, null); - SearchControls searchControls = getSearchControls(); - - String filter = String.format("(&(%s)(%s=%s))", - groupSearchFilter, groupMemberFilter, - testUserDN); - - generateUiLoggingForStep4LdapFilter(userName, filter); - - // Loop through each configured base DN. It may be useful - // in the future to allow for a filter to be configured for - // each BaseDN, but for now the filter will apply to all. - String[] baseDNs = baseDN.split(BASEDN_DELIMITER); - for (int x = 0; x < baseDNs.length; x++) { - NamingEnumeration<SearchResult> answer = ctx.search(baseDNs[x], filter, searchControls); - if (!answer.hasMoreElements()) { //BZ:582471- ldap api bug change - // Nothing found for this DN, move to the next one if we have one. - continue; - } - - // We use the first match - SearchResult si = answer.next(); - //generate the DN - String userDN = null; - try { - userDN = si.getNameInNamespace(); - } catch (UnsupportedOperationException use) { - userDN = si.getName(); - if (userDN.startsWith(""")) { - userDN = userDN.substring(1, userDN.length()); - } - if (userDN.endsWith(""")) { - userDN = userDN.substring(0, userDN.length() - 1); - } - userDN = userDN + "," + baseDNs[x]; - } - userDetails.put("dn", userDN); - - // Construct the UserDN - NamingEnumeration<String> keys = si.getAttributes().getIDs(); - while (keys.hasMore()) { - String key = keys.next(); - Attribute value = si.getAttributes().get(key); - if ((value != null) && (value.get() != null)) { - userDetails.put(key, value.get().toString()); - } - } -// return userDetails; - }//end of for loop - generateUiLoggingStep4Authz(filter); - return userDetails; - } catch (Exception ex) { - generateUiLoggingStep4Exception(ex); - } - return userDetails; - } - - public Set<String> findAvailableGroupsFor(String userName) { - // Load our LDAP specific properties - Properties options = env; - String groupFilter = options.getProperty(SystemSetting.LDAP_GROUP_FILTER.getInternalName(), ""); - String groupMember = options.getProperty(SystemSetting.LDAP_GROUP_MEMBER.getInternalName(), ""); - String groupUsePosix = options.getProperty(SystemSetting.LDAP_GROUP_USE_POSIX.getInternalName(), "false"); - if (groupUsePosix == null) { - groupUsePosix = Boolean.toString(false);//default to false - } - boolean usePosixGroups = Boolean.valueOf(groupUsePosix); - String userAttribute = getUserAttribute(options, userName, usePosixGroups); - Set<String> ldapSet = new HashSet<String>(); - - if (userAttribute != null && userAttribute.trim().length() > 0) { - //TODO: spinder 4/21/10 put in error/debug logging messages for badly formatted filter combinations - String filter = ""; - //form assumes examples where groupFilter is like 'objectclass=groupOfNames' and groupMember is 'member' - // to produce ldap filter like (&(objectclass=groupOfNames)(member=cn=Administrator,ou=People,dc=test,dc=com)) - // or like (&(objectclass=groupOfNames)(memberUid=Administrator)) for posixGroups. - filter = String.format("(&(%s)(%s=%s))", groupFilter, groupMember, encodeForFilter(userAttribute)); - - Set<Map<String, String>> matched = buildGroup(options, filter); -// log.trace("Located '" + matched.size() + "' LDAP groups for user '" + userName -// + "' using following ldap filter '" + filter + "'."); - - //iterate to extract just the group names. - for (Map<String, String> match : matched) { - ldapSet.add(match.get("id")); - } - } else { -// log.debug("Group lookup will not be performed due to no UserDN found for user " + userName); - } - - return ldapSet; - } - - private void generateUiLoggingStep4Exception(Exception ex) { - String groupSearchFilter = env - .getProperty(SystemSetting.LDAP_GROUP_FILTER - .getInternalName()); - String msg = "STEP-4:FAIL: There was an error searching with the groupFilter supplied: " - + groupSearchFilter + "'\n"; - msg += ex.getMessage(); - if (enableVerboseDebugging.isSelected()) { - msg = appendStacktraceToMsg(msg, ex); - } - log(msg); - } - - private void generateUiLoggingStep4Authz(String filter) { - Set<Map<String, String>> groups = buildGroup(env, filter); - String msg = "STEP-4:TESTING: Using Group Search Filter '" - + filter + "', " + groups.size() - + " ldap group(s) were located.\n"; - if (groups.size() > 0) { - HashMap<String, String>[] ldapLists = new HashMap[groups - .size()]; - groups.toArray(ldapLists); - msg += "STEP-4:PASS: Listing a few of the ldap groups located: \n"; - // iterate over first ten or less to demonstrate retrieve - for (int i = 0; (i < groups.size() && i < 10); i++) { - msg += ldapLists[i] + "\n"; - } - }else{ - msg+="STEP-4:WARN: With current settings, test user is not authorized for any groups. Is this correct?"; - } - log(msg); - } - - private void generateUiLoggingForStep4LdapFilter(String userName, - String filter) { - String msg = "STEP-4:TESTING: about to do ldap search with filter \n'" - + filter - + "'\n to locate groups that test user '"+userName+"' IS authorized to access."; - log(msg); - } - - - // throw the label and fields together, two to a row. - private HashMap<String, JTextField> loadUiFields(JPanel top, - String[] componentKeys) { - HashMap<String, JTextField> mappings = new HashMap<String, JTextField>(); - for (int i = 0; i < componentKeys.length; i++) { - String firstLabelKey = componentKeys[i]; - String secondLabelKey = componentKeys[++i]; - // locate second key - JPanel row = new JPanel(); - row.setLayout(new FlowLayout(FlowLayout.LEFT)); - JLabel label1 = new JLabel(firstLabelKey); - label1.setSize(textBoxWidth, 5); -// JTextField value1 = new JTextField(textBoxWidth); - JTextField value1 = null; - if (firstLabelKey.equalsIgnoreCase("Password:")&&(!showPasswords.isSelected())) { - value1 = new JPasswordField(textBoxWidth); - } else { - value1 = new JTextField(textBoxWidth); - } - JLabel label2 = new JLabel(secondLabelKey); - JTextField value2 = new JTextField(textBoxWidth); - row.add(label1); - row.add(value1); - row.add(Box.createRigidArea(new Dimension(0, 5))); - row.add(label2); - row.add(value2); - mappings.put(firstLabelKey, value1); - mappings.put(secondLabelKey, value2); - top.add(row); - } - - return mappings; - } - - private Properties getProperties(String contentProvider) { - Properties env = new Properties(); - env.setProperty(Context.INITIAL_CONTEXT_FACTORY, - "com.sun.jndi.ldap.LdapCtxFactory"); - env.setProperty(Context.PROVIDER_URL, contentProvider); - if(!enableLdapReferral.isSelected()){ - env.setProperty(Context.REFERRAL, "ignore"); - }else{ - String msg="**---- You have chosen to tell LDAP servers to [FOLLOW] context referrals. Default is [IGNORE] referrals. --**"; - log(msg); - env.setProperty(Context.REFERRAL, "follow"); - } - -// // Setup SSL if requested -// String protocol = ssl.isSelected()? "ssl":""; -// if ((protocol != null) && protocol.equals("ssl")) { -// String ldapSocketFactory = env -// .getProperty("java.naming.ldap.factory.socket"); -// if (ldapSocketFactory == null) { -// env.put("java.naming.ldap.factory.socket", -// UntrustedSSLSocketFactory.class.getName()); -// } -// env.put(Context.SECURITY_PROTOCOL, "ssl"); -// } - - return env; - } - - private String delineate() { - String line = "-"; - for (int i = 0; i < 30; i++) { - line += "-"; - } - return line; - } - - /** Takes care of delineating messages and conditional logging contents passed in. - * @param msg - */ - private void log(String msg) { - String message = "\n" + delineate() + "\n"; - message += msg; - message += "\n" + delineate() + "\n\n"; - //This flag can be used in the unlikely case that the UI hangs during a test operation.: - if(iterativeVerboseLogging.isSelected()){ - System.out.println(message); - } - testResults.setText(testResults.getText() + message); - } - - private SearchControls getSearchControls() { - int scope = SearchControls.SUBTREE_SCOPE; - int timeLimit = 0; - long countLimit = 0; - String[] returnedAttributes = null; - boolean returnObject = false; - boolean deference = false; - SearchControls constraints = new SearchControls(scope, countLimit, - timeLimit, returnedAttributes, returnObject, deference); - return constraints; - } - - /** Translate SystemSettings to familiar Properties instance since we're - * passing not one but multiple values. - * - * @param systemSettings - * @return - */ - private Properties populateProperties(Properties existing) { - Properties properties = new Properties(); - if(existing!=null){ - properties = existing; - } - for (SystemSetting entry : SystemSetting.values()) { - if(entry!=null){ - switch(entry){ - case LDAP_BASED_JAAS_PROVIDER: - properties.put(entry.getInternalName(), ""); - break; - } - } - } - return properties; - } - - /**Build/retrieve the user DN. Not usually a property. - * - * @param options - * @param userName - * @param usePosixGroups boolean indicating whether we search for groups with posixGroup format - * @return - */ - private String getUserAttribute(Properties options, String userName, boolean usePosixGroups) { - Map<String, String> details = findLdapUserDetails(userName); - String userAttribute = null; - if (usePosixGroups) {//return just the username as posixGroup member search uses (&(%s)(memberUid=username)) - userAttribute = userName; - } else {//this is the default where group search uses (&(%s)(uniqueMember={userDn})) - userAttribute = details.get("dn"); - } - - return userAttribute; - } - - /** See LDAPStringUtil.encodeForFilter() for original code/source/author/etc. - * <p>Encode a string so that it can be used in an LDAP search filter.</p> - * - * <p>The following table shows the characters that are encoded and their - * encoded version.</p> - * - * <table> - * <tr><th align="center">Character</th><th>Encoded As</th></tr> - * <tr><td align="center">*</td><td>\2a</td></tr> - * <tr><td align="center">(</td><td>\28</td></tr> - * <tr><td align="center">)</td><td>\29</td></tr> - * <tr><td align="center"></td><td>\5c</td></tr> - * <tr><td align="center"><code>null</code></td><td>\00</td></tr> - * </table> - * - * <p>In addition to encoding the above characters, any non-ASCII character - * (any character with a hex value greater then <code>0x7f</code>) is also - * encoded and rewritten as a UTF-8 character or sequence of characters in - * hex notation.</p> - * - * @param filterString a string that is to be encoded - * @return the encoded version of <code>filterString</code> suitable for use - * in a LDAP search filter - * @see <a href="http://tools.ietf.org/html/rfc4515">RFC 4515</a> - */ - public static String encodeForFilter(final String filterString) { - if (filterString != null && filterString.length() > 0) { - StringBuilder encString = new StringBuilder(filterString.length()); - for (int i = 0; i < filterString.length(); i++) { - char ch = filterString.charAt(i); - switch (ch) { - case '*': // encode a wildcard * character - encString.append("\2a"); - break; - case '(': // encode a open parenthesis ( character - encString.append("\28"); - break; - case ')': // encode a close parenthesis ) character - encString.append("\29"); - break; - case '\': // encode a backslash \ character - encString.append("\5c"); - break; - case '\u0000': // encode a null character - encString.append("\00"); - break; - default: - if (ch <= 0x7f) { // an ASCII character - encString.append(ch); - } else if (ch >= 0x80) { // encode to UTF-8 - try { - byte[] utf8bytes = String.valueOf(ch).getBytes("UTF8"); - for (byte b : utf8bytes) { - encString.append(String.format("\%02x", b)); - } - } catch (UnsupportedEncodingException e) { - // ignore - } - } - } - } - return encString.toString(); - } - return filterString; - } -} - -//Mock up the upgraded system properties approach to use SystemSetting -enum SystemSetting { - LDAP_BASED_JAAS_PROVIDER("CAM_JAAS_PROVIDER"), - LDAP_NAMING_PROVIDER_URL("CAM_LDAP_NAMING_PROVIDER_URL"), - USE_SSL_FOR_LDAP("CAM_LDAP_PROTOCOL"), - LDAP_LOGIN_PROPERTY("CAM_LDAP_LOGIN_PROPERTY"), - LDAP_FILTER("CAM_LDAP_FILTER"), - LDAP_GROUP_FILTER("CAM_LDAP_GROUP_FILTER"), - LDAP_GROUP_MEMBER("CAM_LDAP_GROUP_MEMBER"), - LDAP_GROUP_QUERY_PAGE_SIZE("CAM_LDAP_GROUP_QUERY_PAGE_SIZE"), - LDAP_BASE_DN("CAM_LDAP_BASE_DN"), - LDAP_BIND_DN("CAM_LDAP_BIND_DN"), - LDAP_BIND_PW("CAM_LDAP_BIND_PW"), - LDAP_NAMING_FACTORY("CAM_LDAP_NAMING_FACTORY_INITIAL"), - LDAP_GROUP_USE_POSIX("CAM_LDAP_GROUP_USE_POSIX"), - ; - - private String internalName; - - private SystemSetting(String name) { - this.internalName = name; - } - - public String getInternalName() { - return internalName; - } - - public static SystemSetting getByInternalName(String internalName) { - for (SystemSetting p : SystemSetting.values()) { - if (p.internalName.equals(internalName)) { - return p; - } - } - return null; - } -} - - diff --git a/modules/helpers/ldap-tool/src/main/java/org/rhq/TestLdapSettings.java b/modules/helpers/ldap-tool/src/main/java/org/rhq/TestLdapSettings.java new file mode 100644 index 0000000..b9667ea --- /dev/null +++ b/modules/helpers/ldap-tool/src/main/java/org/rhq/TestLdapSettings.java @@ -0,0 +1,1277 @@ +package org.rhq; + +import java.awt.BorderLayout; +import java.awt.Color; +import java.awt.Component; +import java.awt.Dimension; +import java.awt.FlowLayout; +import java.awt.event.ActionEvent; +import java.awt.event.ActionListener; +import java.awt.event.ItemEvent; +import java.awt.event.ItemListener; +import java.awt.event.WindowAdapter; +import java.awt.event.WindowEvent; +import java.io.IOException; +import java.io.PrintWriter; +import java.io.StringWriter; +import java.io.UnsupportedEncodingException; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Properties; +import java.util.Set; + +import javax.naming.Context; +import javax.naming.NamingEnumeration; +import javax.naming.NamingException; +import javax.naming.directory.Attribute; +import javax.naming.directory.Attributes; +import javax.naming.directory.InvalidSearchFilterException; +import javax.naming.directory.SearchControls; +import javax.naming.directory.SearchResult; +import javax.naming.ldap.Control; +import javax.naming.ldap.InitialLdapContext; +import javax.naming.ldap.PagedResultsControl; +import javax.naming.ldap.PagedResultsResponseControl; +import javax.swing.Box; +import javax.swing.BoxLayout; +import javax.swing.JButton; +import javax.swing.JCheckBox; +import javax.swing.JFrame; +import javax.swing.JLabel; +import javax.swing.JPanel; +import javax.swing.JPasswordField; +import javax.swing.JScrollPane; +import javax.swing.JTextArea; +import javax.swing.JTextField; +import javax.swing.SwingUtilities; +import javax.swing.border.LineBorder; +import javax.swing.border.TitledBorder; + +//import org.rhq.enterprise.server.util.security.UntrustedSSLSocketFactory; + +/* Is a development test tool that allows the user to simulate the RHQ server side + * LDAP calls during auth/authz operations. + * + * The specific LDAP logic below needs to mirror the latest RHQ code and allow the user + * to test our their configuration without requring a specific RHQ/JON build as a dependency. + * + * NOTE: To avoid a runtime dependency on specific versions of RHQ or JON, the small implementation + * methods were copied into this class with minimatl changes for logging and ui messaging. The + * definitive implementation for each 'copied' method can be found in LDAPGroupManagerBean. + * + * @author Simeon Pinder + */ +public class TestLdapSettings extends JFrame { + //shared fields + private JTextArea testResults; + private JCheckBox showPasswords; + private JCheckBox ssl; + private JLabel groupPageSizeName; + private JTextField groupMemberQueryValue; + private JTextField testUserNameValue; + private JTextField testUserPasswordValue; + private HashMap<String, JTextField> fieldMappings; + private String[] keys; + private JCheckBox enableLdapReferral; + private JCheckBox enableVerboseDebugging; + private JCheckBox enableVerboseGroupParsing; + private JCheckBox iterativeVerboseLogging; + private JCheckBox enablePosixGroups; + private JCheckBox enable32xFeatures; + private String advdb = "**Verbose:debug ----"; + private static final String BASEDN_DELIMITER = ";"; + + private static final long serialVersionUID = 1L; + int textBoxWidth = 20; + private static JPanel top = null; + private static JPanel testUserRegion = null; + private static Properties env=null; + + public static void main(String args[]) { + new TestLdapSettings(); + } + //After enabling support for Query parsing, we need to warn users of the effects. + final String warnMessage = "<html>***WARNING: Depending upon<br>" + + "i)how the ldap server is configured <br>" + + "ii)client query paging settings <br>" + + " enabling <b>'more detailed logging'</b>,<br>" + + " <b>'more group parsing'</b> AND/OR <b>'also log to console'</b> may cause the console to hang/freeze <br>" + + " as the LDAP tool continues to parse large query results. If that occurs it is <br>" + + " suggested that you kill tool and re-run with 'also log to console' so that the console logs<br>" + + " will show which dataset is causing the delay and then you should modify your search|group|member<br>" + + " filters accordingly to <b>return smaller results</b> and/or <b>consume larger payloads</b>.<br>" + + "***WARNING</html>"; + + // Configure window properties + private TestLdapSettings() { + + setTitle("Check LDAP Settings: Simulates LDAP checks/queries of RHQ LDAP integration"); + getContentPane().setLayout(new BorderLayout()); + // top panel definition + top = new JPanel(); + top.setLayout(new BoxLayout(top, BoxLayout.Y_AXIS)); + top.setBorder(LineBorder.createGrayLineBorder()); + //define checkbox here as it's checked when generating UI. + showPasswords = new JCheckBox("show passwords:"); + showPasswords.setSelected(false); + + keys = new String[] { "URL:", "Search Filter:", + "Search Base:","Login Property", + "Username:", "Group Search Filter:", + "Password:", "Group Member Filter:", + }; + fieldMappings = loadUiFields(top, keys); + + //add the two checkboxes for additiona debugging options + enableLdapReferral= new JCheckBox("[follow] ldap referrals"); + enableLdapReferral.setSelected(false); + enableVerboseDebugging= new JCheckBox("more verbose logging"); + enableVerboseDebugging.setSelected(false); + enableVerboseDebugging.setToolTipText(warnMessage); + enableVerboseGroupParsing= new JCheckBox("more detailed group parsing"); + enableVerboseGroupParsing.setSelected(false); + enableVerboseGroupParsing.setToolTipText("*Take care when using this mode with a large number of groups* Every group discovered is parsed/listed."); + iterativeVerboseLogging= new JCheckBox("also log to console"); + iterativeVerboseLogging.setSelected(false); + iterativeVerboseLogging.setToolTipText("This mode is useful when the test tool is having difficulty returning results from large queries."); + iterativeVerboseLogging.setToolTipText(warnMessage); + enablePosixGroups= new JCheckBox("is Posix Group"); + enablePosixGroups.setSelected(false); + enablePosixGroups.setEnabled(false); + + //put into 3.2.x functionality row + JPanel jon32xRegion = new JPanel(); + jon32xRegion.setLayout(new FlowLayout(FlowLayout.LEFT)); + LineBorder jon32xLineBorder = new LineBorder(Color.BLACK, 2); + TitledBorder jon32xBorder = new TitledBorder(jon32xLineBorder, "JON 3.2.x/RHQ 4.8.x specific features:"); + jon32xRegion.setBorder(jon32xBorder); + enable32xFeatures= new JCheckBox("enable JON 3.2.x/RHQ 4.8.x features"); + enable32xFeatures.setToolTipText("This enables features not available before RHQ 4.8.x/JON 3.2.x."); + enable32xFeatures.setSelected(false); + enable32xFeatures.addItemListener(new ItemListener() { + @Override + public void itemStateChanged(ItemEvent e) { + if(enable32xFeatures.isSelected()){ + groupPageSizeName.setEnabled(true); + groupMemberQueryValue.setEnabled(true); + groupMemberQueryValue.setEditable(true); + groupMemberQueryValue.setText("1000"); + enablePosixGroups.setEnabled(true); + }else{ + groupMemberQueryValue.setText(""); + groupPageSizeName.setEnabled(false); + groupMemberQueryValue.setEnabled(false); + groupMemberQueryValue.setEditable(false); + enablePosixGroups.setEnabled(false); + enablePosixGroups.setSelected(false); + } + } + }); + + jon32xRegion.add(enable32xFeatures); + groupPageSizeName = new JLabel("Group Query Page Size:"); + groupPageSizeName.setEnabled(false); + groupMemberQueryValue = new JTextField(10); + groupMemberQueryValue.setText("1000"); + groupMemberQueryValue.setEditable(false); + jon32xRegion.add(groupPageSizeName); + jon32xRegion.add(groupMemberQueryValue); + jon32xRegion.add(enablePosixGroups); + top.add(jon32xRegion); + + //put into row display + JPanel advancedDebugRegion = new JPanel(); + advancedDebugRegion.setLayout(new FlowLayout(FlowLayout.LEFT)); + LineBorder advancedBorder = new LineBorder(Color.BLACK, 2); + TitledBorder debugBorder = new TitledBorder(advancedBorder, "Debug: **Warning --<hover HERE>**"); + advancedDebugRegion.setBorder(debugBorder); + advancedDebugRegion.add(enableLdapReferral); + advancedDebugRegion.add(enableVerboseDebugging); + advancedDebugRegion.add(enableVerboseGroupParsing); + advancedDebugRegion.add(iterativeVerboseLogging); + advancedDebugRegion.setToolTipText(warnMessage); + top.add(advancedDebugRegion); + + JPanel securityPanel = new JPanel(); + securityPanel.setLayout(new FlowLayout(FlowLayout.LEFT)); + showPasswords.addItemListener(new ItemListener() { + @Override + public void itemStateChanged(ItemEvent e) { + SwingUtilities.invokeLater(new Runnable() { + @Override + public void run() { + //store off existing value + String existingValue = ""; + String existingTestUserPass = ""; + JTextField current = fieldMappings.get("Password:"); + if(current instanceof JPasswordField){ + JPasswordField pass = ((JPasswordField)current); + if(pass!=null){ + char[] charArray = pass.getPassword(); + if(charArray.length>0){ + existingValue = new String(charArray); + } + } + }else{ + existingValue = current.getText(); + } + //save off test user password as well + if(testUserPasswordValue instanceof JPasswordField){ + JPasswordField pass = ((JPasswordField)testUserPasswordValue); + if(pass!=null){ + char[] charArray = pass.getPassword(); + if(charArray.length>0){ + existingTestUserPass = new String(charArray); + } + } + }else{ + existingTestUserPass=testUserPasswordValue.getText(); + } + + JTextField updatedContainer = null; + if(showPasswords.isSelected()){ + updatedContainer = new JTextField(textBoxWidth); + updatedContainer.setText(existingValue); + testUserPasswordValue = new JTextField(textBoxWidth); + testUserPasswordValue.setText(existingTestUserPass); + }else{ + updatedContainer = new JPasswordField(textBoxWidth); + updatedContainer.setText(existingValue); + testUserPasswordValue = new JPasswordField(textBoxWidth); + testUserPasswordValue.setText(existingTestUserPass); + } + //locate the JPanel and rebuild it Should be at index 3 + JPanel passwordRow = (JPanel) top.getComponent(3); +// JTextField jf = (JTextField) passwordRow.getComponent(1); + //store off existing components + Component[] existing = new Component[passwordRow.getComponentCount()]; + for(int i=0; i<passwordRow.getComponentCount();i++){ + existing[i] = passwordRow.getComponent(i); + } + passwordRow.removeAll(); + for(int j=0;j<existing.length;j++){ + if(j==1){//insert new JTextField instead + passwordRow.add(updatedContainer); + }else{ + passwordRow.add(existing[j]); + } + } + //reload testUserRegion + //store off existing components + Component[] existingTest = new Component[testUserRegion.getComponentCount()]; + for(int i=0; i<testUserRegion.getComponentCount();i++){ + existingTest[i] = testUserRegion.getComponent(i); + } + testUserRegion.removeAll(); + for(int j=0;j<existingTest.length;j++){ + if(j==3){//insert new JTextField instead + testUserRegion.add(testUserPasswordValue); + }else{ + testUserRegion.add(existingTest[j]); + } + } + + top.revalidate(); + top.repaint(); + } + }); + } + }); + securityPanel.add(showPasswords); + ssl = new JCheckBox("SSL:"); + ssl.setEnabled(false); + securityPanel.add(ssl); + top.add(securityPanel); + + // test user auth region + testUserRegion = new JPanel(); + testUserRegion.setLayout(new FlowLayout(FlowLayout.LEFT)); + LineBorder border = new LineBorder(Color.BLUE, 2); + TitledBorder tBorder = new TitledBorder(border, "Authentication/Authorization Check Credentials: (insert valid ldap user assigned to group)"); + testUserRegion.setBorder(tBorder); + JLabel testUserName = new JLabel("Test UserName:"); + testUserNameValue = new JTextField(textBoxWidth); + JLabel testUserPassword = new JLabel("Test Password:"); +// testUserPasswordValue = new JTextField(textBoxWidth); + testUserPasswordValue = new JPasswordField(textBoxWidth); + testUserRegion.add(testUserName); + testUserRegion.add(testUserNameValue); + testUserRegion.add(testUserPassword); + testUserRegion.add(testUserPasswordValue); + top.add(testUserRegion); + + // center + JPanel center = new JPanel(); + center.setLayout(new BoxLayout(center, BoxLayout.X_AXIS)); + // build center panel + buildCenterPanel(center); + + // final component layout + getContentPane().add(top, BorderLayout.NORTH); + getContentPane().add(center, BorderLayout.CENTER); + this.setSize(720, 700); + addWindowListener(new WindowAdapter() { + public void windowClosing(WindowEvent e) { + System.exit(0); + } + }); + setVisible(true); + } + + // define the center display panel. + private void buildCenterPanel(JPanel center) { + // First element is Test Button + JButton test = new JButton("Test Settings"); + center.add(test); + // second is large text box that display ldap queries + testResults = new JTextArea("(click button to test settings values: simulates 4 separate checks showing ldap filters used)", + 40, 40); + JScrollPane jsp = new JScrollPane(testResults); + center.add(jsp); + test.addActionListener(new ActionListener() { + public void actionPerformed(ActionEvent e) { + testResults.setText("");//clear out empty msg + //trim spaces from all fields + String ldapServer = fieldMappings.get(keys[0]).getText().trim(); + String searchFilter = fieldMappings.get(keys[1]).getText().trim(); + String searchBase = fieldMappings.get(keys[2]).getText().trim(); + String loginProperty = fieldMappings.get(keys[3]).getText().trim(); + String bindUserName = fieldMappings.get(keys[4]).getText().trim(); + String groupSearchFilter = fieldMappings.get(keys[5]).getText().trim(); + String bindPassword = fieldMappings.get(keys[6]).getText().trim(); + String groupMemberFilter = fieldMappings.get(keys[7]).getText().trim(); + String groupMemberQuerySize = groupMemberQueryValue.getText().trim(); + String testUserName = testUserNameValue.getText().trim(); + String testUserPassword = testUserPasswordValue.getText().trim(); + // validate initial required elements + String msg = null; + boolean proceed = true; + //valid required details set. + if (ldapServer.isEmpty() || bindUserName.isEmpty() + || bindPassword.isEmpty() || searchBase.isEmpty()) { + msg ="STEP-1:FAIL: "+ keys[0] + ", " + keys[2] + ", " + keys[4] + ", " + + keys[6] + " cannot be empty to proceed."; + log(msg); + proceed = false; + } + env = null; + InitialLdapContext ctx = null; + if (proceed) {// attempt initial ldap bind from RHQ server + msg = "STEP-1:TESTING: Attempting to bind to server:" + ldapServer + + "\n with user '" + bindUserName + + "' and password entered."; + log(msg); + env = getProperties(ldapServer); + env.setProperty(Context.SECURITY_PRINCIPAL, bindUserName); + env.setProperty(Context.SECURITY_CREDENTIALS, bindPassword); + env.setProperty(Context.SECURITY_AUTHENTICATION, "simple"); + //put the rest of the LDAP properties into the Properties instance for use later. + //there still needs to be separate variables since some are for UI validation. + env.setProperty(SystemSetting.LDAP_GROUP_FILTER.getInternalName(), groupSearchFilter); + env.setProperty(SystemSetting.LDAP_GROUP_MEMBER.getInternalName(), groupMemberFilter); + env.setProperty(SystemSetting.LDAP_BASE_DN.getInternalName(), searchBase); + env.setProperty(SystemSetting.LDAP_LOGIN_PROPERTY.getInternalName(), loginProperty); + env.setProperty(SystemSetting.LDAP_BIND_DN.getInternalName(), bindUserName); + env.setProperty(SystemSetting.LDAP_BIND_PW.getInternalName(), bindPassword); + env.setProperty(SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE.getInternalName(), groupMemberQuerySize); + + try { + ctx = new InitialLdapContext(env, null); + msg = "STEP-1:PASS: LDAP bind credentials are correct. Successfully connected to '" + + ldapServer + + "'.\n This means the LDAP Bind credentials for the RHQ Server authentication/authorization requests to ldap server " + + "are correct."; + if(enableVerboseDebugging.isSelected()){ + msg+="\n"+advdb+" LDAP simple authentication bind successful."; + } + log(msg); + proceed = true; + } catch (Exception ex) { + msg = "STEP-1:FAIL: Unable to connect to the LDAP server with credentials specified.\n"; + msg+="Exception:"+ex.getMessage(); + if(enableVerboseDebugging.isSelected()){ + msg = appendStacktraceToMsg(msg, ex); + } + log(msg); + proceed = false; + } + } + if (proceed) {// retrieve test credentials to test run auth + // load search controls + SearchControls searchControls = getSearchControls(); + // validating searchFilter and test user/pass creds + proceed = true; + if (testUserName.isEmpty() || (testUserPassword.isEmpty())) { + msg = "STEP-2:FAIL: Test Username/Password fields cannot be empty for this step."; + log(msg); + proceed = false; + } + // testing a valid user involves a filtered ldap search + // using the loginProperty, and optionally searchFilter + String userDN = ""; + if (proceed) { + // default loginProperty to cn if it's not set + if (loginProperty.isEmpty()) { + loginProperty = "cn"; + if(enableVerboseDebugging.isSelected()){ + String mesg = "As you have not specified a login property, defaulting to 'cn'"; + log(advdb+" "+msg); + } + } + String filter; + if (!searchFilter.isEmpty()) { + filter = "(&(" + loginProperty + "=" + testUserName + + ")" + "(" + searchFilter + "))"; + } else { + filter = "(" + loginProperty + "=" + testUserName + + ")"; + } + if(enableVerboseDebugging.isSelected()){ + log(advdb+" The searchfilter is optionally appended to login property for additional shared attribute across users."); + } + msg = "STEP-2:TESTING: To validate the test user the following LDAP filtered component will be used to find matching users:\n"; + msg += filter; + log(msg); + // test out the search on the target ldap server + try { + String[] baseDNs = searchBase.split(";"); + for (int x = 0; x < baseDNs.length; x++) { + NamingEnumeration answer = ctx.search( + baseDNs[x], filter, searchControls); + if(enableVerboseDebugging.isSelected()){ + log(advdb+" this search was excuted against DN component '"+baseDNs[x]+"'."); + } + // boolean ldapApiNpeFound = false; + if (!answer.hasMoreElements()) { + msg="STEP-2:WARN Unable to locate a matching users for the filter'"+filter+ + "'. Please check your loginProperty. Usually 'cn' or 'uid'"; + log(msg); + continue; + } + // Going with the first match + SearchResult si = (SearchResult) answer.next(); + + // Construct the UserDN + userDN = si.getName() + "," + baseDNs[x]; + msg = "STEP-2:PASS: The test user '" + + testUserName + + "' was succesfully located, and the following userDN will be used in authorization check:\n"; + msg += userDN; + log(msg); + + ctx.addToEnvironment(Context.SECURITY_PRINCIPAL, userDN); + ctx.addToEnvironment(Context.SECURITY_CREDENTIALS,testUserPassword); + ctx.addToEnvironment(Context.SECURITY_AUTHENTICATION,"simple"); + + // if successful then verified that user and pw + // are valid ldap credentials + ctx.reconnect(null); + msg = "STEP-2:PASS: The user '" + + testUserName + + "' was succesfully authenticated using userDN '" + + userDN + "' and password provided.\n" + +"*Note: the loginProperty must match the loginProperty listed in dn: for the user. It is the DN that RHQ will lookup and use."; + log(msg); + } + } catch (Exception ex) { + msg = "STEP-2:FAIL: There was an error while searching for or authenticating the user '" + + testUserName + "'\n"; + msg += ex.getMessage(); + if(enableVerboseDebugging.isSelected()){ + msg = appendStacktraceToMsg(msg, ex); + } + log(msg); + proceed=false; + } + } + // with authentication completed, now check authorization. + // validate filter components to list all available groups + proceed = false; + if (!groupSearchFilter.isEmpty()) { + Set<Map<String, String>> ret = new HashSet<Map<String, String>>(); + String filter = null; + + if (groupSearchFilter.startsWith("(") && groupSearchFilter.endsWith(")")){ + filter = groupSearchFilter; // RFC 2254 does not allow for ((expression)) + }else{ + filter = String + .format("(%s)", groupSearchFilter); + } + msg = "STEP-3:TESTING: This ldap filter " + + filter + + " will be used to locate ALL available LDAP groups"; + log(msg); + + Properties systemConfig = populateProperties(env); + + ret = buildGroup(systemConfig, filter); + msg = "STEP-3:TESTING: Using Group Search Filter '" + + filter + "', " + ret.size() + + " ldap group(s) were located.\n"; + if (ret.size() > 0) { + HashMap<String, String>[] ldapLists = new HashMap[ret + .size()]; + ret.toArray(ldapLists); + // in this mode go beyond the first ten results. + if (enableVerboseGroupParsing.isSelected()) { + msg += "STEP-3:PASS: Listing 'all' of the ldap groups located: \n"; + for (int i = 0; i < ret.size(); i++) { + msg += ldapLists[i] + "\n"; + } + } else {// otherwise only show first 10[subset of + // available groups] + msg += "STEP-3:PASS: Listing a few(<=10) of the ldap groups located: \n"; + for (int i = 0; (i < ret.size() && i < 10); i++) { + msg += ldapLists[i] + "\n"; + } + } + proceed = true;// then can proceed to next step. + } + log(msg); + } else { + msg = "STEP-3:FAIL: Group Search Filter: cannot be empty to proceed."; + log(msg); + proceed=false; + } + // retrieve lists of authorized groups available for the + if (proceed) { + // check groupMember + if (!groupMemberFilter.isEmpty()) { +// Map<String, String> userDetails = new HashMap<String, String>(); +// userDetails = findLdapUserDetails(userDN); + Set<String> userDetails = findAvailableGroupsFor(testUserName); + + if(!userDetails.isEmpty()){ + proceed=true; + } + } else { + msg = "STEP-4:FAIL: Group Member Filter must be non-empty to proceed with simulating authorization check for test user."; + log(msg); + } + } + if(proceed){ + msg="COMPLETED:PASS: The current settings, for successful steps, should be correct to enter into your RHQ server."; + msg+="\n\n\n\n When you encounter failures, warnings or other unexpected results you should use an external "; + msg+="LDAP search utility to check that the generated filters return the expected LDAP results."; + log(msg); + } + } + } + }); + } + + private String appendStacktraceToMsg(String msg, Exception ex) { + String moreVerbose = ""; + moreVerbose+=advdb+" Exception type:"+ex.getClass()+"\n"; + moreVerbose+=advdb+" Exception stack trace reference:"+ex.getStackTrace()+"\n"; + if(ex.getStackTrace()!=null){ + StringWriter sw = new StringWriter(); + PrintWriter pw = new PrintWriter(sw); + ex.printStackTrace(pw); + moreVerbose+=advdb+" stack trace reference:"+sw.toString(); + } + msg+="\n"+moreVerbose; + return msg; + } + + private boolean containsIllegalLdap(String currentValue) { + boolean invalidData = false; + if((currentValue!=null)&&(!currentValue.trim().isEmpty())){ + //TODO: spinder 3/17/11: need to figure out regex to filter/detect bad data in returned ldap. Giving up for now. +// String regex = "(?<=(?:[^\]|^)(\\)+|[^\]|^)[/,+"><;=#]|(?<=(?:[^\]|^)(\\)+|[^\]|^)\(?!\|[/,+"><;=#]| $|(?<=^\) )|^"; +// regex = "(?<=(?:[^\\]|^)(\\\\)+|[^\\]|^)[/,+\"><;=#]|(?<=(?:[^\\]|^)(\\\\)+|[^\\]|^)\\(?!\\|[/,+\"><;=#]| $|(?<=^\\) )|^"; +// System.out.println("++++++++ CURR VAL:"+currentValue+":INV-CHeck:"+currentValue.matches(",+"\<;\n=/")+":NEWCHECK:"+(currentValue.matches(regex))); +// if(currentValue.matches(",+"\<;\n=/")){ +// invalidData=true; +// } +// String badList = ",+"\<;\n="; + String badList = "+"\<;\n"; + for(char car :currentValue.toCharArray()){ + for(char c :badList.toCharArray()){ + if(car == c){ + invalidData=true; + } + } + } + + } + return invalidData; + } + /** + * @throws NamingException + * @see org.jboss.security.auth.spi.UsernamePasswordLoginModule#validatePassword(java.lang.String,java.lang.String) + */ + protected Set<Map<String, String>> buildGroup(Properties systemConfig, String filter) { + Set<Map<String, String>> groupDetailsMap = new HashSet<Map<String, String>>(); + // Load our LDAP specific properties + // Load the BaseDN + String baseDN = (String) systemConfig.get(SystemSetting.LDAP_BASE_DN.getInternalName()); + + // Load the LoginProperty + String loginProperty = (String) systemConfig.get(SystemSetting.LDAP_LOGIN_PROPERTY.getInternalName()); + if (loginProperty == null) { + // Use the default + loginProperty = "cn"; + } + // Load any information we may need to bind + String bindDN = (String) systemConfig.get(SystemSetting.LDAP_BIND_DN.getInternalName()); + String bindPW = (String) systemConfig.get(SystemSetting.LDAP_BIND_PW.getInternalName()); + if (bindDN != null) { + systemConfig.setProperty(Context.SECURITY_PRINCIPAL, bindDN); + systemConfig.setProperty(Context.SECURITY_CREDENTIALS, bindPW); + systemConfig.setProperty(Context.SECURITY_AUTHENTICATION, "simple"); + } + try { + InitialLdapContext ctx = new InitialLdapContext(systemConfig, null); + SearchControls searchControls = getSearchControls(); + /*String filter = "(&(objectclass=groupOfUniqueNames)(uniqueMember=uid=" + userName + + ",ou=People, dc=rhndev, dc=redhat, dc=com))";*/ + + //modify the search control to only include the attributes we will use + String[] attributes = { "cn", "description" }; + searchControls.setReturningAttributes(attributes); + + //BZ:964250: add rfc 2696 + //default to 1000 results. System setting page size from UI should be non-negative integer > 0. + //additionally as system settings are modifiable via CLI which may not have param checking enabled do some + //more checking. + int defaultPageSize = 1000; + // only if they're enabled in the UI. + if (enable32xFeatures.isSelected()) { + String groupPageSize = systemConfig.getProperty( + SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE + .getInternalName(), "" + defaultPageSize); + if ((groupPageSize != null) + && (!groupPageSize.trim().isEmpty())) { + int passedInPageSize = -1; + try { + passedInPageSize = Integer + .valueOf(groupPageSize.trim()); + if (passedInPageSize > 0) { + defaultPageSize = passedInPageSize; + if(enableVerboseDebugging.isSelected()){ + log(advdb + + " LDAP Group Query Page Sizing of '"+defaultPageSize+"' is being requested from server."); + } + } + } catch (NumberFormatException nfe) { + // log issue and do nothing. Go with the default. + String msg = "LDAP Group Page Size passed in '" + + groupPageSize + + "' in is invalid. Defaulting to 1000 results." + + nfe.getMessage(); + log(msg); + } + } + ctx.setRequestControls(new Control[] { new PagedResultsControl( + defaultPageSize, Control.CRITICAL) }); + } + // Loop through each configured base DN. It may be useful + // in the future to allow for a filter to be configured for + // each BaseDN, but for now the filter will apply to all. + String[] baseDNs = baseDN.split(BASEDN_DELIMITER); + + for (int x = 0; x < baseDNs.length; x++) { + if (enableVerboseDebugging.isSelected()) { + log(advdb + + " this search was excuted against DN component '" + + baseDNs[x] + "'."); + } + executeGroupSearch(filter, groupDetailsMap, ctx, searchControls, baseDNs, x); + + // continually parsing pages of results until we're done. + // only if they're enabled in the UI. + if (enable32xFeatures.isSelected()) { + // handle paged results if they're being used here + byte[] cookie = null; + Control[] controls = ctx.getResponseControls(); + if (controls != null) { + for (Control control : controls) { + if (control instanceof PagedResultsResponseControl) { + PagedResultsResponseControl pagedResult = (PagedResultsResponseControl) control; + cookie = pagedResult.getCookie(); + } + } + } + + while (cookie != null) { + String msg = "RFC 2696 is supported by the server and we are paging through the results. "+ + groupDetailsMap.size()+" results returned so far."; + if(enableVerboseGroupParsing.isSelected()){ + log(advdb + + msg); + } + // ensure the next requests contains the session/cookie + // details + ctx.setRequestControls(new Control[] { new PagedResultsControl( + defaultPageSize, cookie, Control.CRITICAL) }); + executeGroupSearch(filter, groupDetailsMap, ctx, + searchControls, baseDNs, x); + // empty out cookie + cookie = null; + // test for further iterations + controls = ctx.getResponseControls(); + if (controls != null) { + for (Control control : controls) { + if (control instanceof PagedResultsResponseControl) { + PagedResultsResponseControl pagedResult = (PagedResultsResponseControl) control; + cookie = pagedResult.getCookie(); + } + } + } + } + } + }//end of for loop + } catch (NamingException e) { + if (e instanceof InvalidSearchFilterException) { + InvalidSearchFilterException fException = (InvalidSearchFilterException) e; + String message = "The ldap group filter defined is invalid "; + log(message); + } + //TODO: check for ldap connection/unavailable/etc. exceptions. + else { + String mesg = "LDAP communication error: " + e.getMessage(); + log(mesg); + } + } catch (IOException iex) { + String msg = "Unexpected LDAP communciation error:" + iex.getMessage(); + log(msg); + } + + return groupDetailsMap; + } + + /** Executes the LDAP group query using the filters, context and search controls, etc. parameters passed in. + * The matching groups located during processing this pages of results are added as new entries to the + * groupDetailsMap passed in. + * + * @param filter + * @param groupDetailsMap + * @param ctx + * @param searchControls + * @param baseDNs + * @param x + * @throws NamingException + */ + private void executeGroupSearch(String filter, Set<Map<String, String>> groupDetailsMap, InitialLdapContext ctx, + SearchControls searchControls, String[] baseDNs, int x) throws NamingException { + //execute search based on controls and context passed in. + NamingEnumeration<SearchResult> answer = ctx.search(baseDNs[x], filter, searchControls); + boolean ldapApiEnumerationBugEncountered = false; + while ((!ldapApiEnumerationBugEncountered) && answer.hasMoreElements()) {//BZ:582471- ldap api bug change + // We use the first match + SearchResult si = null; + try { + si = answer.next(); + } catch (NullPointerException npe) { + if (enableVerboseDebugging.isSelected()) { + log(advdb + + " NullPtr exception detected. If known LDAP api enum npe ignore: " + + npe.getMessage() + "."); + } + ldapApiEnumerationBugEncountered = true; + break; + } + + if (enableVerboseDebugging.isSelected() + || enableVerboseGroupParsing.isSelected()) { + Attributes attributeContainer = si.getAttributes(); + NamingEnumeration<? extends Attribute> attributes = attributeContainer + .getAll(); + String attributesReturned = " "; + while (attributes.hasMore()) { + attributesReturned += attributes.next().getID() + ","; + } + String dbugMesg = "\n" + + advdb + + " Group search LDAP (" + + attributeContainer.size() + + ") attributes located for group '" + + si.getName() + + "' are [" + + attributesReturned.substring(0, + attributesReturned.length() - 1) + "]."; + // directly update here to shorten messages for lots of groups + testResults.setText(testResults.getText() + dbugMesg); + //This flag can be used in the unlikely case that the UI hangs during a test operation.: + if(iterativeVerboseLogging.isSelected()){ + System.out.println(dbugMesg); + } + + // additionally parse attribute ids and values for illegal ldap + // characters + if (enableVerboseGroupParsing.isSelected()) { + attributes = attributeContainer.getAll(); + String currentAttributeId = ""; + String currentValue = ""; + // spinder: 3/17/11: should we bail on first bad data or + // display them all? + while (attributes.hasMore()) { + boolean badData = false; + Attribute att = attributes.next(); + currentAttributeId = att.getID(); + if (containsIllegalLdap(currentAttributeId)) { + log(advdb + + " LDAP Group: bad atrribute data detected for group '" + + si.getName() + "' for attribute '" + + currentAttributeId + "'."); + badData = true; + } + if (att.getAll() != null) { + NamingEnumeration<?> enumer = att.getAll(); + while (enumer.hasMore()) { + currentValue = enumer.next() + ""; + if (containsIllegalLdap(currentValue)) { + log(advdb + + " LDAP Group: bad data detected for group '" + + si.getName() + + "' with attribute '" + + currentAttributeId + + "' and value:" + currentValue); + badData = true; + } + } + } + if (badData) { + log(advdb + + "** LDAP Group: Some bad LDAP data detected for group '" + + si.getName() + "'."); + } + } + } + } + + Map<String, String> entry = new HashMap<String, String>(); + // String name = (String) si.getAttributes().get("cn").get(); + Attribute commonNameAttr = si.getAttributes().get("cn"); + if (commonNameAttr != null) { + String name = (String) commonNameAttr.get(); + name = name.trim(); + Attribute desc = si.getAttributes().get("description"); + String description = desc != null ? (String) desc.get() : ""; + description = description.trim(); + entry.put("id", name); + entry.put("name", name); + entry.put("description", description); + groupDetailsMap.add(entry); + } else {// unable to retrieve details for specific group. + log(advdb + + " There was an error retrieving 'cn' attribute for group '" + + si.getName() + + "'. Not adding to returned list of groups. "); + } + } + } + + public Map<String, String> findLdapUserDetails(String userName) { + // Load our LDAP specific properties + Properties systemConfig = env; + HashMap<String, String> userDetails = new HashMap<String, String>(); + + // Load the BaseDN + String baseDN = (String) systemConfig.get(SystemSetting.LDAP_BASE_DN.getInternalName()); + + // Load the LoginProperty + String loginProperty = (String) systemConfig.get(SystemSetting.LDAP_LOGIN_PROPERTY.getInternalName()); + if (loginProperty == null) { + // Use the default + loginProperty = "cn"; + } + // Load any information we may need to bind + String bindDN = (String) systemConfig.get(SystemSetting.LDAP_BIND_DN.getInternalName()); + String bindPW = (String) systemConfig.get(SystemSetting.LDAP_BIND_PW.getInternalName()); + + // Load any search filter + String groupSearchFilter = (String) systemConfig.get(SystemSetting.LDAP_GROUP_FILTER.getInternalName()); + String groupMemberFilter = (String) systemConfig.get(SystemSetting.LDAP_GROUP_MEMBER.getInternalName()); + String testUserDN=userName; + String ldapServer = (String) systemConfig.get(Context.PROVIDER_URL); + + Properties env = getProperties(ldapServer); + + if (bindDN != null) { + env.setProperty(Context.SECURITY_PRINCIPAL, bindDN); + env.setProperty(Context.SECURITY_CREDENTIALS, bindPW); + env.setProperty(Context.SECURITY_AUTHENTICATION, "simple"); + } + + try { + InitialLdapContext ctx = new InitialLdapContext(env, null); + SearchControls searchControls = getSearchControls(); + + String filter = String.format("(&(%s)(%s=%s))", + groupSearchFilter, groupMemberFilter, + testUserDN); + + generateUiLoggingForStep4LdapFilter(userName, filter); + + // Loop through each configured base DN. It may be useful + // in the future to allow for a filter to be configured for + // each BaseDN, but for now the filter will apply to all. + String[] baseDNs = baseDN.split(BASEDN_DELIMITER); + for (int x = 0; x < baseDNs.length; x++) { + NamingEnumeration<SearchResult> answer = ctx.search(baseDNs[x], filter, searchControls); + if (!answer.hasMoreElements()) { //BZ:582471- ldap api bug change + // Nothing found for this DN, move to the next one if we have one. + continue; + } + + // We use the first match + SearchResult si = answer.next(); + //generate the DN + String userDN = null; + try { + userDN = si.getNameInNamespace(); + } catch (UnsupportedOperationException use) { + userDN = si.getName(); + if (userDN.startsWith(""")) { + userDN = userDN.substring(1, userDN.length()); + } + if (userDN.endsWith(""")) { + userDN = userDN.substring(0, userDN.length() - 1); + } + userDN = userDN + "," + baseDNs[x]; + } + userDetails.put("dn", userDN); + + // Construct the UserDN + NamingEnumeration<String> keys = si.getAttributes().getIDs(); + while (keys.hasMore()) { + String key = keys.next(); + Attribute value = si.getAttributes().get(key); + if ((value != null) && (value.get() != null)) { + userDetails.put(key, value.get().toString()); + } + } +// return userDetails; + }//end of for loop + generateUiLoggingStep4Authz(filter); + return userDetails; + } catch (Exception ex) { + generateUiLoggingStep4Exception(ex); + } + return userDetails; + } + + public Set<String> findAvailableGroupsFor(String userName) { + // Load our LDAP specific properties + Properties options = env; + String groupFilter = options.getProperty(SystemSetting.LDAP_GROUP_FILTER.getInternalName(), ""); + String groupMember = options.getProperty(SystemSetting.LDAP_GROUP_MEMBER.getInternalName(), ""); + String groupUsePosix = options.getProperty(SystemSetting.LDAP_GROUP_USE_POSIX.getInternalName(), "false"); + if (groupUsePosix == null) { + groupUsePosix = Boolean.toString(false);//default to false + } + boolean usePosixGroups = Boolean.valueOf(groupUsePosix); + String userAttribute = getUserAttribute(options, userName, usePosixGroups); + Set<String> ldapSet = new HashSet<String>(); + + if (userAttribute != null && userAttribute.trim().length() > 0) { + //TODO: spinder 4/21/10 put in error/debug logging messages for badly formatted filter combinations + String filter = ""; + //form assumes examples where groupFilter is like 'objectclass=groupOfNames' and groupMember is 'member' + // to produce ldap filter like (&(objectclass=groupOfNames)(member=cn=Administrator,ou=People,dc=test,dc=com)) + // or like (&(objectclass=groupOfNames)(memberUid=Administrator)) for posixGroups. + filter = String.format("(&(%s)(%s=%s))", groupFilter, groupMember, encodeForFilter(userAttribute)); + + Set<Map<String, String>> matched = buildGroup(options, filter); +// log.trace("Located '" + matched.size() + "' LDAP groups for user '" + userName +// + "' using following ldap filter '" + filter + "'."); + + //iterate to extract just the group names. + for (Map<String, String> match : matched) { + ldapSet.add(match.get("id")); + } + } else { +// log.debug("Group lookup will not be performed due to no UserDN found for user " + userName); + } + + return ldapSet; + } + + private void generateUiLoggingStep4Exception(Exception ex) { + String groupSearchFilter = env + .getProperty(SystemSetting.LDAP_GROUP_FILTER + .getInternalName()); + String msg = "STEP-4:FAIL: There was an error searching with the groupFilter supplied: " + + groupSearchFilter + "'\n"; + msg += ex.getMessage(); + if (enableVerboseDebugging.isSelected()) { + msg = appendStacktraceToMsg(msg, ex); + } + log(msg); + } + + private void generateUiLoggingStep4Authz(String filter) { + Set<Map<String, String>> groups = buildGroup(env, filter); + String msg = "STEP-4:TESTING: Using Group Search Filter '" + + filter + "', " + groups.size() + + " ldap group(s) were located.\n"; + if (groups.size() > 0) { + HashMap<String, String>[] ldapLists = new HashMap[groups + .size()]; + groups.toArray(ldapLists); + msg += "STEP-4:PASS: Listing a few of the ldap groups located: \n"; + // iterate over first ten or less to demonstrate retrieve + for (int i = 0; (i < groups.size() && i < 10); i++) { + msg += ldapLists[i] + "\n"; + } + }else{ + msg+="STEP-4:WARN: With current settings, test user is not authorized for any groups. Is this correct?"; + } + log(msg); + } + + private void generateUiLoggingForStep4LdapFilter(String userName, + String filter) { + String msg = "STEP-4:TESTING: about to do ldap search with filter \n'" + + filter + + "'\n to locate groups that test user '"+userName+"' IS authorized to access."; + log(msg); + } + + + // throw the label and fields together, two to a row. + private HashMap<String, JTextField> loadUiFields(JPanel top, + String[] componentKeys) { + HashMap<String, JTextField> mappings = new HashMap<String, JTextField>(); + for (int i = 0; i < componentKeys.length; i++) { + String firstLabelKey = componentKeys[i]; + String secondLabelKey = componentKeys[++i]; + // locate second key + JPanel row = new JPanel(); + row.setLayout(new FlowLayout(FlowLayout.LEFT)); + JLabel label1 = new JLabel(firstLabelKey); + label1.setSize(textBoxWidth, 5); +// JTextField value1 = new JTextField(textBoxWidth); + JTextField value1 = null; + if (firstLabelKey.equalsIgnoreCase("Password:")&&(!showPasswords.isSelected())) { + value1 = new JPasswordField(textBoxWidth); + } else { + value1 = new JTextField(textBoxWidth); + } + JLabel label2 = new JLabel(secondLabelKey); + JTextField value2 = new JTextField(textBoxWidth); + row.add(label1); + row.add(value1); + row.add(Box.createRigidArea(new Dimension(0, 5))); + row.add(label2); + row.add(value2); + mappings.put(firstLabelKey, value1); + mappings.put(secondLabelKey, value2); + top.add(row); + } + + return mappings; + } + + private Properties getProperties(String contentProvider) { + Properties env = new Properties(); + env.setProperty(Context.INITIAL_CONTEXT_FACTORY, + "com.sun.jndi.ldap.LdapCtxFactory"); + env.setProperty(Context.PROVIDER_URL, contentProvider); + if(!enableLdapReferral.isSelected()){ + env.setProperty(Context.REFERRAL, "ignore"); + }else{ + String msg="**---- You have chosen to tell LDAP servers to [FOLLOW] context referrals. Default is [IGNORE] referrals. --**"; + log(msg); + env.setProperty(Context.REFERRAL, "follow"); + } + +// // Setup SSL if requested +// String protocol = ssl.isSelected()? "ssl":""; +// if ((protocol != null) && protocol.equals("ssl")) { +// String ldapSocketFactory = env +// .getProperty("java.naming.ldap.factory.socket"); +// if (ldapSocketFactory == null) { +// env.put("java.naming.ldap.factory.socket", +// UntrustedSSLSocketFactory.class.getName()); +// } +// env.put(Context.SECURITY_PROTOCOL, "ssl"); +// } + + return env; + } + + private String delineate() { + String line = "-"; + for (int i = 0; i < 30; i++) { + line += "-"; + } + return line; + } + + /** Takes care of delineating messages and conditional logging contents passed in. + * @param msg + */ + private void log(String msg) { + String message = "\n" + delineate() + "\n"; + message += msg; + message += "\n" + delineate() + "\n\n"; + //This flag can be used in the unlikely case that the UI hangs during a test operation.: + if(iterativeVerboseLogging.isSelected()){ + System.out.println(message); + } + testResults.setText(testResults.getText() + message); + } + + private SearchControls getSearchControls() { + int scope = SearchControls.SUBTREE_SCOPE; + int timeLimit = 0; + long countLimit = 0; + String[] returnedAttributes = null; + boolean returnObject = false; + boolean deference = false; + SearchControls constraints = new SearchControls(scope, countLimit, + timeLimit, returnedAttributes, returnObject, deference); + return constraints; + } + + /** Translate SystemSettings to familiar Properties instance since we're + * passing not one but multiple values. + * + * @param systemSettings + * @return + */ + private Properties populateProperties(Properties existing) { + Properties properties = new Properties(); + if(existing!=null){ + properties = existing; + } + for (SystemSetting entry : SystemSetting.values()) { + if(entry!=null){ + switch(entry){ + case LDAP_BASED_JAAS_PROVIDER: + properties.put(entry.getInternalName(), ""); + break; + } + } + } + return properties; + } + + /**Build/retrieve the user DN. Not usually a property. + * + * @param options + * @param userName + * @param usePosixGroups boolean indicating whether we search for groups with posixGroup format + * @return + */ + private String getUserAttribute(Properties options, String userName, boolean usePosixGroups) { + Map<String, String> details = findLdapUserDetails(userName); + String userAttribute = null; + if (usePosixGroups) {//return just the username as posixGroup member search uses (&(%s)(memberUid=username)) + userAttribute = userName; + } else {//this is the default where group search uses (&(%s)(uniqueMember={userDn})) + userAttribute = details.get("dn"); + } + + return userAttribute; + } + + /** See LDAPStringUtil.encodeForFilter() for original code/source/author/etc. + * <p>Encode a string so that it can be used in an LDAP search filter.</p> + * + * <p>The following table shows the characters that are encoded and their + * encoded version.</p> + * + * <table> + * <tr><th align="center">Character</th><th>Encoded As</th></tr> + * <tr><td align="center">*</td><td>\2a</td></tr> + * <tr><td align="center">(</td><td>\28</td></tr> + * <tr><td align="center">)</td><td>\29</td></tr> + * <tr><td align="center"></td><td>\5c</td></tr> + * <tr><td align="center"><code>null</code></td><td>\00</td></tr> + * </table> + * + * <p>In addition to encoding the above characters, any non-ASCII character + * (any character with a hex value greater then <code>0x7f</code>) is also + * encoded and rewritten as a UTF-8 character or sequence of characters in + * hex notation.</p> + * + * @param filterString a string that is to be encoded + * @return the encoded version of <code>filterString</code> suitable for use + * in a LDAP search filter + * @see <a href="http://tools.ietf.org/html/rfc4515">RFC 4515</a> + */ + public static String encodeForFilter(final String filterString) { + if (filterString != null && filterString.length() > 0) { + StringBuilder encString = new StringBuilder(filterString.length()); + for (int i = 0; i < filterString.length(); i++) { + char ch = filterString.charAt(i); + switch (ch) { + case '*': // encode a wildcard * character + encString.append("\2a"); + break; + case '(': // encode a open parenthesis ( character + encString.append("\28"); + break; + case ')': // encode a close parenthesis ) character + encString.append("\29"); + break; + case '\': // encode a backslash \ character + encString.append("\5c"); + break; + case '\u0000': // encode a null character + encString.append("\00"); + break; + default: + if (ch <= 0x7f) { // an ASCII character + encString.append(ch); + } else if (ch >= 0x80) { // encode to UTF-8 + try { + byte[] utf8bytes = String.valueOf(ch).getBytes("UTF8"); + for (byte b : utf8bytes) { + encString.append(String.format("\%02x", b)); + } + } catch (UnsupportedEncodingException e) { + // ignore + } + } + } + } + return encString.toString(); + } + return filterString; + } +} + +//Mock up the upgraded system properties approach to use SystemSetting +enum SystemSetting { + LDAP_BASED_JAAS_PROVIDER("CAM_JAAS_PROVIDER"), + LDAP_NAMING_PROVIDER_URL("CAM_LDAP_NAMING_PROVIDER_URL"), + USE_SSL_FOR_LDAP("CAM_LDAP_PROTOCOL"), + LDAP_LOGIN_PROPERTY("CAM_LDAP_LOGIN_PROPERTY"), + LDAP_FILTER("CAM_LDAP_FILTER"), + LDAP_GROUP_FILTER("CAM_LDAP_GROUP_FILTER"), + LDAP_GROUP_MEMBER("CAM_LDAP_GROUP_MEMBER"), + LDAP_GROUP_QUERY_PAGE_SIZE("CAM_LDAP_GROUP_QUERY_PAGE_SIZE"), + LDAP_BASE_DN("CAM_LDAP_BASE_DN"), + LDAP_BIND_DN("CAM_LDAP_BIND_DN"), + LDAP_BIND_PW("CAM_LDAP_BIND_PW"), + LDAP_NAMING_FACTORY("CAM_LDAP_NAMING_FACTORY_INITIAL"), + LDAP_GROUP_USE_POSIX("CAM_LDAP_GROUP_USE_POSIX"), + ; + + private String internalName; + + private SystemSetting(String name) { + this.internalName = name; + } + + public String getInternalName() { + return internalName; + } + + public static SystemSetting getByInternalName(String internalName) { + for (SystemSetting p : SystemSetting.values()) { + if (p.internalName.equals(internalName)) { + return p; + } + } + return null; + } +} + +
commit 251234a787556629f0a9efcc82c1206079680e4e Author: Simeon Pinder spinder@redhat.com Date: Tue Jul 30 17:11:49 2013 -0400
Move TestLdapSettings to it's own module.
diff --git a/modules/helpers/ldap-tool/pom.xml b/modules/helpers/ldap-tool/pom.xml new file mode 100644 index 0000000..28f8678 --- /dev/null +++ b/modules/helpers/ldap-tool/pom.xml @@ -0,0 +1,74 @@ +<?xml version="1.0" encoding="UTF-8"?> + +<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> + + <modelVersion>4.0.0</modelVersion> + + <parent> + <groupId>org.rhq</groupId> + <artifactId>rhq-parent</artifactId> + <version>4.9.0-SNAPSHOT</version> + <relativePath>../../../pom.xml</relativePath> + </parent> + + <groupId>org.rhq.helpers</groupId> + <artifactId>ldap-tool</artifactId> + <packaging>jar</packaging> + <version>4.9.0-SNAPSHOT</version> + + <properties> + <executable.name>TestLdapSettings</executable.name> + <tool.version>1.0.1</tool.version> + </properties> + + <name>RHQ ldap test tool</name> + <description>Executable jar to exercise LDAP settings used by RHQ with external LDAP server.</description> + + <build> + + <plugins> + + <plugin> + <artifactId>maven-jar-plugin</artifactId> + <configuration> + <archive> + <manifest> + <packageName>com.test</packageName> + <mainClass>com.test.TestLdapSettings</mainClass> + </manifest> + </archive> + </configuration> + </plugin> + <plugin> + <groupId>org.apache.maven.plugins</groupId> + <artifactId>maven-antrun-plugin</artifactId> + <version>1.7</version> + <executions> + <execution> + <id>tool-finalize</id> + <phase>verify</phase> + <configuration> + <target> + <copy file="${project.build.directory}/${project.build.finalName}.jar" + tofile="${project.build.directory}/${executable.name}.jar"/> + </target> + </configuration> + <goals> + <goal>run</goal> + </goals> + </execution> + </executions> + </plugin> + + <plugin> + <artifactId>maven-release-plugin</artifactId> + <version>2.1</version> + </plugin> + + </plugins> + + </build> + +</project> + + diff --git a/modules/helpers/ldap-tool/src/main/java/com/test/TestLdapSettings.java b/modules/helpers/ldap-tool/src/main/java/com/test/TestLdapSettings.java new file mode 100644 index 0000000..75ff277 --- /dev/null +++ b/modules/helpers/ldap-tool/src/main/java/com/test/TestLdapSettings.java @@ -0,0 +1,1285 @@ +package com.test; + +import java.awt.BorderLayout; +import java.awt.Color; +import java.awt.Component; +import java.awt.Dimension; +import java.awt.FlowLayout; +import java.awt.event.ActionEvent; +import java.awt.event.ActionListener; +import java.awt.event.ItemEvent; +import java.awt.event.ItemListener; +import java.awt.event.WindowAdapter; +import java.awt.event.WindowEvent; +import java.io.IOException; +import java.io.PrintWriter; +import java.io.StringWriter; +import java.io.UnsupportedEncodingException; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Properties; +import java.util.Set; + +import javax.naming.Context; +import javax.naming.NamingEnumeration; +import javax.naming.NamingException; +import javax.naming.directory.Attribute; +import javax.naming.directory.Attributes; +import javax.naming.directory.InvalidSearchFilterException; +import javax.naming.directory.SearchControls; +import javax.naming.directory.SearchResult; +import javax.naming.ldap.Control; +import javax.naming.ldap.InitialLdapContext; +import javax.naming.ldap.PagedResultsControl; +import javax.naming.ldap.PagedResultsResponseControl; +import javax.swing.Box; +import javax.swing.BoxLayout; +import javax.swing.JButton; +import javax.swing.JCheckBox; +import javax.swing.JFrame; +import javax.swing.JLabel; +import javax.swing.JPanel; +import javax.swing.JPasswordField; +import javax.swing.JScrollPane; +import javax.swing.JTextArea; +import javax.swing.JTextField; +import javax.swing.SwingUtilities; +import javax.swing.UIManager; +import javax.swing.UnsupportedLookAndFeelException; +import javax.swing.border.LineBorder; +import javax.swing.border.TitledBorder; +import javax.swing.event.ChangeEvent; +import javax.swing.event.ChangeListener; + +//import org.rhq.enterprise.server.util.security.UntrustedSSLSocketFactory; + +/* Is a development test tool that allows the user to simulate the RHQ server side + * LDAP calls during auth/authz operations. + * + * The specific LDAP logic below needs to mirror the latest RHQ code and allow the user + * to test our their configuration without requring a specific RHQ/JON build as a dependency. + * + * NOTE: To avoid a runtime dependency on specific versions of RHQ or JON, the small implementation + * methods were copied into this class with minimatl changes for logging and ui messaging. The + * definitive implementation for each 'copied' method can be found in LDAPGroupManagerBean. + * + * @author Simeon Pinder + */ +public class TestLdapSettings extends JFrame { + //shared fields + private JTextArea testResults; + private JCheckBox showPasswords; + private JCheckBox ssl; + private JLabel groupPageSizeName; + private JTextField groupMemberQueryValue; + private JTextField testUserNameValue; + private JTextField testUserPasswordValue; + private HashMap<String, JTextField> fieldMappings; + private String[] keys; + private JCheckBox enableLdapReferral; + private JCheckBox enableVerboseDebugging; + private JCheckBox enableVerboseGroupParsing; + private JCheckBox iterativeVerboseLogging; + private JCheckBox enablePosixGroups; + private JCheckBox enable32xFeatures; + private String advdb = "**Verbose:debug ----"; + private static final String BASEDN_DELIMITER = ";"; + + private static final long serialVersionUID = 1L; + int textBoxWidth = 20; + private static JPanel top = null; + private static JPanel testUserRegion = null; + private static Properties env=null; + + public static void main(String args[]) { + new TestLdapSettings(); + } + //After enabling support for Query parsing, we need to warn users of the effects. + final String warnMessage = "<html>***WARNING: Depending upon<br>" + + "i)how the ldap server is configured <br>" + + "ii)client query paging settings <br>" + + " enabling <b>'more detailed logging'</b>,<br>" + + " <b>'more group parsing'</b> AND/OR <b>'also log to console'</b> may cause the console to hang/freeze <br>" + + " as the LDAP tool continues to parse large query results. If that occurs it is <br>" + + " suggested that you kill tool and re-run with 'also log to console' so that the console logs<br>" + + " will show which dataset is causing the delay and then you should modify your search|group|member<br>" + + " filters accordingly to <b>return smaller results</b> and/or <b>consume larger payloads</b>.<br>" + + "***WARNING</html>"; + + // Configure window properties + private TestLdapSettings() { + + setTitle("Check LDAP Settings: Simulates LDAP checks/queries of RHQ LDAP integration"); + getContentPane().setLayout(new BorderLayout()); + // top panel definition + top = new JPanel(); + top.setLayout(new BoxLayout(top, BoxLayout.Y_AXIS)); + top.setBorder(LineBorder.createGrayLineBorder()); + //define checkbox here as it's checked when generating UI. + showPasswords = new JCheckBox("show passwords:"); + showPasswords.setSelected(false); + + keys = new String[] { "URL:", "Search Filter:", + "Search Base:","Login Property", + "Username:", "Group Search Filter:", + "Password:", "Group Member Filter:", + }; + fieldMappings = loadUiFields(top, keys); + + //add the two checkboxes for additiona debugging options + enableLdapReferral= new JCheckBox("[follow] ldap referrals"); + enableLdapReferral.setSelected(false); + enableVerboseDebugging= new JCheckBox("more verbose logging"); + enableVerboseDebugging.setSelected(false); + enableVerboseDebugging.setToolTipText(warnMessage); + enableVerboseGroupParsing= new JCheckBox("more detailed group parsing"); + enableVerboseGroupParsing.setSelected(false); + enableVerboseGroupParsing.setToolTipText("*Take care when using this mode with a large number of groups* Every group discovered is parsed/listed."); + iterativeVerboseLogging= new JCheckBox("also log to console"); + iterativeVerboseLogging.setSelected(false); + iterativeVerboseLogging.setToolTipText("This mode is useful when the test tool is having difficulty returning results from large queries."); + iterativeVerboseLogging.setToolTipText(warnMessage); + enablePosixGroups= new JCheckBox("is Posix Group"); + enablePosixGroups.setSelected(false); + enablePosixGroups.setEnabled(false); + + //put into 3.2.x functionality row + JPanel jon32xRegion = new JPanel(); + jon32xRegion.setLayout(new FlowLayout(FlowLayout.LEFT)); + LineBorder jon32xLineBorder = new LineBorder(Color.BLACK, 2); + TitledBorder jon32xBorder = new TitledBorder(jon32xLineBorder, "JON 3.2.x/RHQ 4.8.x specific features:"); + jon32xRegion.setBorder(jon32xBorder); + enable32xFeatures= new JCheckBox("enable JON 3.2.x/RHQ 4.8.x features"); + enable32xFeatures.setToolTipText("This enables features not available before RHQ 4.8.x/JON 3.2.x."); + enable32xFeatures.setSelected(false); + enable32xFeatures.addItemListener(new ItemListener() { + @Override + public void itemStateChanged(ItemEvent e) { + if(enable32xFeatures.isSelected()){ + groupPageSizeName.setEnabled(true); + groupMemberQueryValue.setEnabled(true); + groupMemberQueryValue.setEditable(true); + groupMemberQueryValue.setText("1000"); + enablePosixGroups.setEnabled(true); + }else{ + groupMemberQueryValue.setText(""); + groupPageSizeName.setEnabled(false); + groupMemberQueryValue.setEnabled(false); + groupMemberQueryValue.setEditable(false); + enablePosixGroups.setEnabled(false); + enablePosixGroups.setSelected(false); + } + } + }); + + jon32xRegion.add(enable32xFeatures); + groupPageSizeName = new JLabel("Group Query Page Size:"); + groupPageSizeName.setEnabled(false); + groupMemberQueryValue = new JTextField(10); + groupMemberQueryValue.setText("1000"); + groupMemberQueryValue.setEditable(false); + jon32xRegion.add(groupPageSizeName); + jon32xRegion.add(groupMemberQueryValue); + jon32xRegion.add(enablePosixGroups); + top.add(jon32xRegion); + + //put into row display + JPanel advancedDebugRegion = new JPanel(); + advancedDebugRegion.setLayout(new FlowLayout(FlowLayout.LEFT)); + LineBorder advancedBorder = new LineBorder(Color.BLACK, 2); + TitledBorder debugBorder = new TitledBorder(advancedBorder, "Debug: **Warning --<hover HERE>**"); + advancedDebugRegion.setBorder(debugBorder); + advancedDebugRegion.add(enableLdapReferral); + advancedDebugRegion.add(enableVerboseDebugging); + advancedDebugRegion.add(enableVerboseGroupParsing); + advancedDebugRegion.add(iterativeVerboseLogging); + advancedDebugRegion.setToolTipText(warnMessage); + top.add(advancedDebugRegion); + + JPanel securityPanel = new JPanel(); + securityPanel.setLayout(new FlowLayout(FlowLayout.LEFT)); + showPasswords.addItemListener(new ItemListener() { + @Override + public void itemStateChanged(ItemEvent e) { + SwingUtilities.invokeLater(new Runnable() { + @Override + public void run() { + //store off existing value + String existingValue = ""; + String existingTestUserPass = ""; + JTextField current = fieldMappings.get("Password:"); + if(current instanceof JPasswordField){ + JPasswordField pass = ((JPasswordField)current); + if(pass!=null){ + char[] charArray = pass.getPassword(); + if(charArray.length>0){ + existingValue = new String(charArray); + } + } + }else{ + existingValue = current.getText(); + } + //save off test user password as well + if(testUserPasswordValue instanceof JPasswordField){ + JPasswordField pass = ((JPasswordField)testUserPasswordValue); + if(pass!=null){ + char[] charArray = pass.getPassword(); + if(charArray.length>0){ + existingTestUserPass = new String(charArray); + } + } + }else{ + existingTestUserPass=testUserPasswordValue.getText(); + } + + JTextField updatedContainer = null; + if(showPasswords.isSelected()){ + updatedContainer = new JTextField(textBoxWidth); + updatedContainer.setText(existingValue); + testUserPasswordValue = new JTextField(textBoxWidth); + testUserPasswordValue.setText(existingTestUserPass); + }else{ + updatedContainer = new JPasswordField(textBoxWidth); + updatedContainer.setText(existingValue); + testUserPasswordValue = new JPasswordField(textBoxWidth); + testUserPasswordValue.setText(existingTestUserPass); + } + //locate the JPanel and rebuild it Should be at index 3 + JPanel passwordRow = (JPanel) top.getComponent(3); +// JTextField jf = (JTextField) passwordRow.getComponent(1); + //store off existing components + Component[] existing = new Component[passwordRow.getComponentCount()]; + for(int i=0; i<passwordRow.getComponentCount();i++){ + existing[i] = passwordRow.getComponent(i); + } + passwordRow.removeAll(); + for(int j=0;j<existing.length;j++){ + if(j==1){//insert new JTextField instead + passwordRow.add(updatedContainer); + }else{ + passwordRow.add(existing[j]); + } + } + //reload testUserRegion + //store off existing components + Component[] existingTest = new Component[testUserRegion.getComponentCount()]; + for(int i=0; i<testUserRegion.getComponentCount();i++){ + existingTest[i] = testUserRegion.getComponent(i); + } + testUserRegion.removeAll(); + for(int j=0;j<existingTest.length;j++){ + if(j==3){//insert new JTextField instead + testUserRegion.add(testUserPasswordValue); + }else{ + testUserRegion.add(existingTest[j]); + } + } + + top.revalidate(); + top.repaint(); + } + }); + } + }); + securityPanel.add(showPasswords); + ssl = new JCheckBox("SSL:"); + ssl.setEnabled(false); + securityPanel.add(ssl); + top.add(securityPanel); + + // test user auth region + testUserRegion = new JPanel(); + testUserRegion.setLayout(new FlowLayout(FlowLayout.LEFT)); + LineBorder border = new LineBorder(Color.BLUE, 2); + TitledBorder tBorder = new TitledBorder(border, "Authentication/Authorization Check Credentials: (insert valid ldap user assigned to group)"); + testUserRegion.setBorder(tBorder); + JLabel testUserName = new JLabel("Test UserName:"); + testUserNameValue = new JTextField(textBoxWidth); + JLabel testUserPassword = new JLabel("Test Password:"); +// testUserPasswordValue = new JTextField(textBoxWidth); + testUserPasswordValue = new JPasswordField(textBoxWidth); + testUserRegion.add(testUserName); + testUserRegion.add(testUserNameValue); + testUserRegion.add(testUserPassword); + testUserRegion.add(testUserPasswordValue); + top.add(testUserRegion); + + // center + JPanel center = new JPanel(); + center.setLayout(new BoxLayout(center, BoxLayout.X_AXIS)); + // build center panel + buildCenterPanel(center); + + // final component layout + getContentPane().add(top, BorderLayout.NORTH); + getContentPane().add(center, BorderLayout.CENTER); + this.setSize(720, 700); + addWindowListener(new WindowAdapter() { + public void windowClosing(WindowEvent e) { + System.exit(0); + } + }); + setVisible(true); + } + + // define the center display panel. + private void buildCenterPanel(JPanel center) { + // First element is Test Button + JButton test = new JButton("Test Settings"); + center.add(test); + // second is large text box that display ldap queries + testResults = new JTextArea("(click button to test settings values: simulates 4 separate checks showing ldap filters used)", + 40, 40); + JScrollPane jsp = new JScrollPane(testResults); + center.add(jsp); + test.addActionListener(new ActionListener() { + public void actionPerformed(ActionEvent e) { + testResults.setText("");//clear out empty msg + //trim spaces from all fields + String ldapServer = fieldMappings.get(keys[0]).getText().trim(); + String searchFilter = fieldMappings.get(keys[1]).getText().trim(); + String searchBase = fieldMappings.get(keys[2]).getText().trim(); + String loginProperty = fieldMappings.get(keys[3]).getText().trim(); + String bindUserName = fieldMappings.get(keys[4]).getText().trim(); + String groupSearchFilter = fieldMappings.get(keys[5]).getText().trim(); + String bindPassword = fieldMappings.get(keys[6]).getText().trim(); + String groupMemberFilter = fieldMappings.get(keys[7]).getText().trim(); + String groupMemberQuerySize = groupMemberQueryValue.getText().trim(); + String testUserName = testUserNameValue.getText().trim(); + String testUserPassword = testUserPasswordValue.getText().trim(); + // validate initial required elements + String msg = null; + boolean proceed = true; + //valid required details set. + if (ldapServer.isEmpty() || bindUserName.isEmpty() + || bindPassword.isEmpty() || searchBase.isEmpty()) { + msg ="STEP-1:FAIL: "+ keys[0] + ", " + keys[2] + ", " + keys[4] + ", " + + keys[6] + " cannot be empty to proceed."; + log(msg); + proceed = false; + } + env = null; + InitialLdapContext ctx = null; + if (proceed) {// attempt initial ldap bind from RHQ server + msg = "STEP-1:TESTING: Attempting to bind to server:" + ldapServer + + "\n with user '" + bindUserName + + "' and password entered."; + log(msg); + env = getProperties(ldapServer); + env.setProperty(Context.SECURITY_PRINCIPAL, bindUserName); + env.setProperty(Context.SECURITY_CREDENTIALS, bindPassword); + env.setProperty(Context.SECURITY_AUTHENTICATION, "simple"); + //put the rest of the LDAP properties into the Properties instance for use later. + //there still needs to be separate variables since some are for UI validation. + env.setProperty(SystemSetting.LDAP_GROUP_FILTER.getInternalName(), groupSearchFilter); + env.setProperty(SystemSetting.LDAP_GROUP_MEMBER.getInternalName(), groupMemberFilter); + env.setProperty(SystemSetting.LDAP_BASE_DN.getInternalName(), searchBase); + env.setProperty(SystemSetting.LDAP_LOGIN_PROPERTY.getInternalName(), loginProperty); + env.setProperty(SystemSetting.LDAP_BIND_DN.getInternalName(), bindUserName); + env.setProperty(SystemSetting.LDAP_BIND_PW.getInternalName(), bindPassword); + env.setProperty(SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE.getInternalName(), groupMemberQuerySize); + + for(Object key :env.keySet()){ + System.out.println(key+"="+env.getProperty(key+"")); + } + + try { + ctx = new InitialLdapContext(env, null); + msg = "STEP-1:PASS: LDAP bind credentials are correct. Successfully connected to '" + + ldapServer + + "'.\n This means the LDAP Bind credentials for the RHQ Server authentication/authorization requests to ldap server " + + "are correct."; + if(enableVerboseDebugging.isSelected()){ + msg+="\n"+advdb+" LDAP simple authentication bind successful."; + } + log(msg); + proceed = true; + } catch (Exception ex) { + msg = "STEP-1:FAIL: Unable to connect to the LDAP server with credentials specified.\n"; + msg+="Exception:"+ex.getMessage(); + if(enableVerboseDebugging.isSelected()){ + msg = appendStacktraceToMsg(msg, ex); + } + log(msg); + proceed = false; + } + } + if (proceed) {// retrieve test credentials to test run auth + // load search controls + SearchControls searchControls = getSearchControls(); + // validating searchFilter and test user/pass creds + proceed = true; + if (testUserName.isEmpty() || (testUserPassword.isEmpty())) { + msg = "STEP-2:FAIL: Test Username/Password fields cannot be empty for this step."; + log(msg); + proceed = false; + } + // testing a valid user involves a filtered ldap search + // using the loginProperty, and optionally searchFilter + String userDN = ""; + if (proceed) { + // default loginProperty to cn if it's not set + if (loginProperty.isEmpty()) { + loginProperty = "cn"; + if(enableVerboseDebugging.isSelected()){ + String mesg = "As you have not specified a login property, defaulting to 'cn'"; + log(advdb+" "+msg); + } + } + String filter; + if (!searchFilter.isEmpty()) { + filter = "(&(" + loginProperty + "=" + testUserName + + ")" + "(" + searchFilter + "))"; + } else { + filter = "(" + loginProperty + "=" + testUserName + + ")"; + } + if(enableVerboseDebugging.isSelected()){ + log(advdb+" The searchfilter is optionally appended to login property for additional shared attribute across users."); + } + msg = "STEP-2:TESTING: To validate the test user the following LDAP filtered component will be used to find matching users:\n"; + msg += filter; + log(msg); + // test out the search on the target ldap server + try { + String[] baseDNs = searchBase.split(";"); + for (int x = 0; x < baseDNs.length; x++) { + NamingEnumeration answer = ctx.search( + baseDNs[x], filter, searchControls); + if(enableVerboseDebugging.isSelected()){ + log(advdb+" this search was excuted against DN component '"+baseDNs[x]+"'."); + } + // boolean ldapApiNpeFound = false; + if (!answer.hasMoreElements()) { + msg="STEP-2:WARN Unable to locate a matching users for the filter'"+filter+ + "'. Please check your loginProperty. Usually 'cn' or 'uid'"; + log(msg); + continue; + } + // Going with the first match + SearchResult si = (SearchResult) answer.next(); + + // Construct the UserDN + userDN = si.getName() + "," + baseDNs[x]; + msg = "STEP-2:PASS: The test user '" + + testUserName + + "' was succesfully located, and the following userDN will be used in authorization check:\n"; + msg += userDN; + log(msg); + + ctx.addToEnvironment(Context.SECURITY_PRINCIPAL, userDN); + ctx.addToEnvironment(Context.SECURITY_CREDENTIALS,testUserPassword); + ctx.addToEnvironment(Context.SECURITY_AUTHENTICATION,"simple"); + + // if successful then verified that user and pw + // are valid ldap credentials + ctx.reconnect(null); + msg = "STEP-2:PASS: The user '" + + testUserName + + "' was succesfully authenticated using userDN '" + + userDN + "' and password provided.\n" + +"*Note: the loginProperty must match the loginProperty listed in dn: for the user. It is the DN that RHQ will lookup and use."; + log(msg); + } + } catch (Exception ex) { + msg = "STEP-2:FAIL: There was an error while searching for or authenticating the user '" + + testUserName + "'\n"; + msg += ex.getMessage(); + if(enableVerboseDebugging.isSelected()){ + msg = appendStacktraceToMsg(msg, ex); + } + log(msg); + proceed=false; + } + } + // with authentication completed, now check authorization. + // validate filter components to list all available groups + proceed = false; + if (!groupSearchFilter.isEmpty()) { + Set<Map<String, String>> ret = new HashSet<Map<String, String>>(); + String filter = null; + + if (groupSearchFilter.startsWith("(") && groupSearchFilter.endsWith(")")){ + filter = groupSearchFilter; // RFC 2254 does not allow for ((expression)) + }else{ + filter = String + .format("(%s)", groupSearchFilter); + } + msg = "STEP-3:TESTING: This ldap filter " + + filter + + " will be used to locate ALL available LDAP groups"; + log(msg); + + Properties systemConfig = populateProperties(env); + + ret = buildGroup(systemConfig, filter); + msg = "STEP-3:TESTING: Using Group Search Filter '" + + filter + "', " + ret.size() + + " ldap group(s) were located.\n"; + if (ret.size() > 0) { + HashMap<String, String>[] ldapLists = new HashMap[ret + .size()]; + ret.toArray(ldapLists); + // in this mode go beyond the first ten results. + if (enableVerboseGroupParsing.isSelected()) { + msg += "STEP-3:PASS: Listing 'all' of the ldap groups located: \n"; + for (int i = 0; i < ret.size(); i++) { + msg += ldapLists[i] + "\n"; + } + } else {// otherwise only show first 10[subset of + // available groups] + msg += "STEP-3:PASS: Listing a few(<=10) of the ldap groups located: \n"; + for (int i = 0; (i < ret.size() && i < 10); i++) { + msg += ldapLists[i] + "\n"; + } + } + proceed = true;// then can proceed to next step. + } + log(msg); + } else { + msg = "STEP-3:FAIL: Group Search Filter: cannot be empty to proceed."; + log(msg); + proceed=false; + } + // retrieve lists of authorized groups available for the + if (proceed) { + // check groupMember + if (!groupMemberFilter.isEmpty()) { +// Map<String, String> userDetails = new HashMap<String, String>(); +// userDetails = findLdapUserDetails(userDN); + Set<String> userDetails = findAvailableGroupsFor(testUserName); + + if(!userDetails.isEmpty()){ + proceed=true; + } + } else { + msg = "STEP-4:FAIL: Group Member Filter must be non-empty to proceed with simulating authorization check for test user."; + log(msg); + } + } + if(proceed){ + msg="COMPLETED:PASS: The current settings, for successful steps, should be correct to enter into your RHQ server."; + msg+="\n\n\n\n When you encounter failures, warnings or other unexpected results you should use an external "; + msg+="LDAP search utility to check that the generated filters return the expected LDAP results."; + log(msg); + } + } + } + }); + } + + private String appendStacktraceToMsg(String msg, Exception ex) { + String moreVerbose = ""; + moreVerbose+=advdb+" Exception type:"+ex.getClass()+"\n"; + moreVerbose+=advdb+" Exception stack trace reference:"+ex.getStackTrace()+"\n"; + if(ex.getStackTrace()!=null){ + StringWriter sw = new StringWriter(); + PrintWriter pw = new PrintWriter(sw); + ex.printStackTrace(pw); + moreVerbose+=advdb+" stack trace reference:"+sw.toString(); + } + msg+="\n"+moreVerbose; + return msg; + } + + private boolean containsIllegalLdap(String currentValue) { + boolean invalidData = false; + if((currentValue!=null)&&(!currentValue.trim().isEmpty())){ + //TODO: spinder 3/17/11: need to figure out regex to filter/detect bad data in returned ldap. Giving up for now. +// String regex = "(?<=(?:[^\]|^)(\\)+|[^\]|^)[/,+"><;=#]|(?<=(?:[^\]|^)(\\)+|[^\]|^)\(?!\|[/,+"><;=#]| $|(?<=^\) )|^"; +// regex = "(?<=(?:[^\\]|^)(\\\\)+|[^\\]|^)[/,+\"><;=#]|(?<=(?:[^\\]|^)(\\\\)+|[^\\]|^)\\(?!\\|[/,+\"><;=#]| $|(?<=^\\) )|^"; +// System.out.println("++++++++ CURR VAL:"+currentValue+":INV-CHeck:"+currentValue.matches(",+"\<;\n=/")+":NEWCHECK:"+(currentValue.matches(regex))); +// if(currentValue.matches(",+"\<;\n=/")){ +// invalidData=true; +// } +// String badList = ",+"\<;\n="; + String badList = "+"\<;\n"; + for(char car :currentValue.toCharArray()){ + for(char c :badList.toCharArray()){ + if(car == c){ + invalidData=true; + } + } + } + + } + return invalidData; + } + /** + * @throws NamingException + * @see org.jboss.security.auth.spi.UsernamePasswordLoginModule#validatePassword(java.lang.String,java.lang.String) + */ + protected Set<Map<String, String>> buildGroup(Properties systemConfig, String filter) { + Set<Map<String, String>> groupDetailsMap = new HashSet<Map<String, String>>(); + // Load our LDAP specific properties + // Load the BaseDN + String baseDN = (String) systemConfig.get(SystemSetting.LDAP_BASE_DN.getInternalName()); + + // Load the LoginProperty + String loginProperty = (String) systemConfig.get(SystemSetting.LDAP_LOGIN_PROPERTY.getInternalName()); + if (loginProperty == null) { + // Use the default + loginProperty = "cn"; + } + // Load any information we may need to bind + String bindDN = (String) systemConfig.get(SystemSetting.LDAP_BIND_DN.getInternalName()); + String bindPW = (String) systemConfig.get(SystemSetting.LDAP_BIND_PW.getInternalName()); + if (bindDN != null) { + systemConfig.setProperty(Context.SECURITY_PRINCIPAL, bindDN); + systemConfig.setProperty(Context.SECURITY_CREDENTIALS, bindPW); + systemConfig.setProperty(Context.SECURITY_AUTHENTICATION, "simple"); + } + try { + InitialLdapContext ctx = new InitialLdapContext(systemConfig, null); + SearchControls searchControls = getSearchControls(); + /*String filter = "(&(objectclass=groupOfUniqueNames)(uniqueMember=uid=" + userName + + ",ou=People, dc=rhndev, dc=redhat, dc=com))";*/ + + //modify the search control to only include the attributes we will use + String[] attributes = { "cn", "description" }; + searchControls.setReturningAttributes(attributes); + + //BZ:964250: add rfc 2696 + //default to 1000 results. System setting page size from UI should be non-negative integer > 0. + //additionally as system settings are modifiable via CLI which may not have param checking enabled do some + //more checking. + int defaultPageSize = 1000; + // only if they're enabled in the UI. + if (enable32xFeatures.isSelected()) { + String groupPageSize = systemConfig.getProperty( + SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE + .getInternalName(), "" + defaultPageSize); + if ((groupPageSize != null) + && (!groupPageSize.trim().isEmpty())) { + int passedInPageSize = -1; + try { + passedInPageSize = Integer + .valueOf(groupPageSize.trim()); + if (passedInPageSize > 0) { + defaultPageSize = passedInPageSize; + if(enableVerboseDebugging.isSelected()){ + log(advdb + + " LDAP Group Query Page Sizing of '"+defaultPageSize+"' is being requested from server."); + } + } + } catch (NumberFormatException nfe) { + // log issue and do nothing. Go with the default. + String msg = "LDAP Group Page Size passed in '" + + groupPageSize + + "' in is invalid. Defaulting to 1000 results." + + nfe.getMessage(); + log(msg); + } + } + ctx.setRequestControls(new Control[] { new PagedResultsControl( + defaultPageSize, Control.CRITICAL) }); + } + // Loop through each configured base DN. It may be useful + // in the future to allow for a filter to be configured for + // each BaseDN, but for now the filter will apply to all. + String[] baseDNs = baseDN.split(BASEDN_DELIMITER); + + for (int x = 0; x < baseDNs.length; x++) { + if (enableVerboseDebugging.isSelected()) { + log(advdb + + " this search was excuted against DN component '" + + baseDNs[x] + "'."); + } + executeGroupSearch(filter, groupDetailsMap, ctx, searchControls, baseDNs, x); + + // continually parsing pages of results until we're done. + // only if they're enabled in the UI. + if (enable32xFeatures.isSelected()) { + // handle paged results if they're being used here + byte[] cookie = null; + Control[] controls = ctx.getResponseControls(); + if (controls != null) { + for (Control control : controls) { + if (control instanceof PagedResultsResponseControl) { + PagedResultsResponseControl pagedResult = (PagedResultsResponseControl) control; + cookie = pagedResult.getCookie(); + } + } + } + + while (cookie != null) { + String msg = "RFC 2696 is supported by the server and we are paging through the results. "+ + groupDetailsMap.size()+" results returned so far."; + if(enableVerboseGroupParsing.isSelected()){ + log(advdb + + msg); + } + // ensure the next requests contains the session/cookie + // details + ctx.setRequestControls(new Control[] { new PagedResultsControl( + defaultPageSize, cookie, Control.CRITICAL) }); + executeGroupSearch(filter, groupDetailsMap, ctx, + searchControls, baseDNs, x); + // empty out cookie + cookie = null; + // test for further iterations + controls = ctx.getResponseControls(); + if (controls != null) { + for (Control control : controls) { + if (control instanceof PagedResultsResponseControl) { + PagedResultsResponseControl pagedResult = (PagedResultsResponseControl) control; + cookie = pagedResult.getCookie(); + } + } + } + } + } + }//end of for loop + } catch (NamingException e) { + if (e instanceof InvalidSearchFilterException) { + InvalidSearchFilterException fException = (InvalidSearchFilterException) e; + String message = "The ldap group filter defined is invalid "; + log(message); + } + //TODO: check for ldap connection/unavailable/etc. exceptions. + else { + String mesg = "LDAP communication error: " + e.getMessage(); + log(mesg); + } + } catch (IOException iex) { + String msg = "Unexpected LDAP communciation error:" + iex.getMessage(); + log(msg); + } + + return groupDetailsMap; + } + + /** Executes the LDAP group query using the filters, context and search controls, etc. parameters passed in. + * The matching groups located during processing this pages of results are added as new entries to the + * groupDetailsMap passed in. + * + * @param filter + * @param groupDetailsMap + * @param ctx + * @param searchControls + * @param baseDNs + * @param x + * @throws NamingException + */ + private void executeGroupSearch(String filter, Set<Map<String, String>> groupDetailsMap, InitialLdapContext ctx, + SearchControls searchControls, String[] baseDNs, int x) throws NamingException { + //execute search based on controls and context passed in. + NamingEnumeration<SearchResult> answer = ctx.search(baseDNs[x], filter, searchControls); + boolean ldapApiEnumerationBugEncountered = false; + while ((!ldapApiEnumerationBugEncountered) && answer.hasMoreElements()) {//BZ:582471- ldap api bug change + // We use the first match + SearchResult si = null; + try { + si = answer.next(); + } catch (NullPointerException npe) { + if (enableVerboseDebugging.isSelected()) { + log(advdb + + " NullPtr exception detected. If known LDAP api enum npe ignore: " + + npe.getMessage() + "."); + } + ldapApiEnumerationBugEncountered = true; + break; + } + + if (enableVerboseDebugging.isSelected() + || enableVerboseGroupParsing.isSelected()) { + Attributes attributeContainer = si.getAttributes(); + NamingEnumeration<? extends Attribute> attributes = attributeContainer + .getAll(); + String attributesReturned = " "; + while (attributes.hasMore()) { + attributesReturned += attributes.next().getID() + ","; + } + String dbugMesg = "\n" + + advdb + + " Group search LDAP (" + + attributeContainer.size() + + ") attributes located for group '" + + si.getName() + + "' are [" + + attributesReturned.substring(0, + attributesReturned.length() - 1) + "]."; + // directly update here to shorten messages for lots of groups + testResults.setText(testResults.getText() + dbugMesg); + //This flag can be used in the unlikely case that the UI hangs during a test operation.: + if(iterativeVerboseLogging.isSelected()){ + System.out.println(dbugMesg); + } + + // additionally parse attribute ids and values for illegal ldap + // characters + if (enableVerboseGroupParsing.isSelected()) { + attributes = attributeContainer.getAll(); + String currentAttributeId = ""; + String currentValue = ""; + // spinder: 3/17/11: should we bail on first bad data or + // display them all? + while (attributes.hasMore()) { + boolean badData = false; + Attribute att = attributes.next(); + currentAttributeId = att.getID(); + if (containsIllegalLdap(currentAttributeId)) { + log(advdb + + " LDAP Group: bad atrribute data detected for group '" + + si.getName() + "' for attribute '" + + currentAttributeId + "'."); + badData = true; + } + if (att.getAll() != null) { + NamingEnumeration<?> enumer = att.getAll(); + while (enumer.hasMore()) { + currentValue = enumer.next() + ""; + if (containsIllegalLdap(currentValue)) { + log(advdb + + " LDAP Group: bad data detected for group '" + + si.getName() + + "' with attribute '" + + currentAttributeId + + "' and value:" + currentValue); + badData = true; + } + } + } + if (badData) { + log(advdb + + "** LDAP Group: Some bad LDAP data detected for group '" + + si.getName() + "'."); + } + } + } + } + + Map<String, String> entry = new HashMap<String, String>(); + // String name = (String) si.getAttributes().get("cn").get(); + Attribute commonNameAttr = si.getAttributes().get("cn"); + if (commonNameAttr != null) { + String name = (String) commonNameAttr.get(); + name = name.trim(); + Attribute desc = si.getAttributes().get("description"); + String description = desc != null ? (String) desc.get() : ""; + description = description.trim(); + entry.put("id", name); + entry.put("name", name); + entry.put("description", description); + groupDetailsMap.add(entry); + } else {// unable to retrieve details for specific group. + log(advdb + + " There was an error retrieving 'cn' attribute for group '" + + si.getName() + + "'. Not adding to returned list of groups. "); + } + } + } + + public Map<String, String> findLdapUserDetails(String userName) { + // Load our LDAP specific properties + Properties systemConfig = env; + HashMap<String, String> userDetails = new HashMap<String, String>(); + + // Load the BaseDN + String baseDN = (String) systemConfig.get(SystemSetting.LDAP_BASE_DN.getInternalName()); + + // Load the LoginProperty + String loginProperty = (String) systemConfig.get(SystemSetting.LDAP_LOGIN_PROPERTY.getInternalName()); + if (loginProperty == null) { + // Use the default + loginProperty = "cn"; + } + // Load any information we may need to bind + String bindDN = (String) systemConfig.get(SystemSetting.LDAP_BIND_DN.getInternalName()); + String bindPW = (String) systemConfig.get(SystemSetting.LDAP_BIND_PW.getInternalName()); + + // Load any search filter + String groupSearchFilter = (String) systemConfig.get(SystemSetting.LDAP_GROUP_FILTER.getInternalName()); + String groupMemberFilter = (String) systemConfig.get(SystemSetting.LDAP_GROUP_MEMBER.getInternalName()); + String testUserDN=userName; + String ldapServer = (String) systemConfig.get(Context.PROVIDER_URL); + + Properties env = getProperties(ldapServer); + + if (bindDN != null) { + env.setProperty(Context.SECURITY_PRINCIPAL, bindDN); + env.setProperty(Context.SECURITY_CREDENTIALS, bindPW); + env.setProperty(Context.SECURITY_AUTHENTICATION, "simple"); + } + + try { + InitialLdapContext ctx = new InitialLdapContext(env, null); + SearchControls searchControls = getSearchControls(); + + String filter = String.format("(&(%s)(%s=%s))", + groupSearchFilter, groupMemberFilter, + testUserDN); + + generateUiLoggingForStep4LdapFilter(userName, filter); + + // Loop through each configured base DN. It may be useful + // in the future to allow for a filter to be configured for + // each BaseDN, but for now the filter will apply to all. + String[] baseDNs = baseDN.split(BASEDN_DELIMITER); + for (int x = 0; x < baseDNs.length; x++) { + NamingEnumeration<SearchResult> answer = ctx.search(baseDNs[x], filter, searchControls); + if (!answer.hasMoreElements()) { //BZ:582471- ldap api bug change + // Nothing found for this DN, move to the next one if we have one. + continue; + } + + // We use the first match + SearchResult si = answer.next(); + //generate the DN + String userDN = null; + try { + userDN = si.getNameInNamespace(); + } catch (UnsupportedOperationException use) { + userDN = si.getName(); + if (userDN.startsWith(""")) { + userDN = userDN.substring(1, userDN.length()); + } + if (userDN.endsWith(""")) { + userDN = userDN.substring(0, userDN.length() - 1); + } + userDN = userDN + "," + baseDNs[x]; + } + userDetails.put("dn", userDN); + + // Construct the UserDN + NamingEnumeration<String> keys = si.getAttributes().getIDs(); + while (keys.hasMore()) { + String key = keys.next(); + Attribute value = si.getAttributes().get(key); + if ((value != null) && (value.get() != null)) { + userDetails.put(key, value.get().toString()); + } + } +// return userDetails; + }//end of for loop + generateUiLoggingStep4Authz(filter); + return userDetails; + } catch (Exception ex) { + generateUiLoggingStep4Exception(ex); + } + return userDetails; + } + + public Set<String> findAvailableGroupsFor(String userName) { + // Load our LDAP specific properties + Properties options = env; + String groupFilter = options.getProperty(SystemSetting.LDAP_GROUP_FILTER.getInternalName(), ""); + String groupMember = options.getProperty(SystemSetting.LDAP_GROUP_MEMBER.getInternalName(), ""); + String groupUsePosix = options.getProperty(SystemSetting.LDAP_GROUP_USE_POSIX.getInternalName(), "false"); + if (groupUsePosix == null) { + groupUsePosix = Boolean.toString(false);//default to false + } + boolean usePosixGroups = Boolean.valueOf(groupUsePosix); + String userAttribute = getUserAttribute(options, userName, usePosixGroups); + Set<String> ldapSet = new HashSet<String>(); + + if (userAttribute != null && userAttribute.trim().length() > 0) { + //TODO: spinder 4/21/10 put in error/debug logging messages for badly formatted filter combinations + String filter = ""; + //form assumes examples where groupFilter is like 'objectclass=groupOfNames' and groupMember is 'member' + // to produce ldap filter like (&(objectclass=groupOfNames)(member=cn=Administrator,ou=People,dc=test,dc=com)) + // or like (&(objectclass=groupOfNames)(memberUid=Administrator)) for posixGroups. + filter = String.format("(&(%s)(%s=%s))", groupFilter, groupMember, encodeForFilter(userAttribute)); + + Set<Map<String, String>> matched = buildGroup(options, filter); +// log.trace("Located '" + matched.size() + "' LDAP groups for user '" + userName +// + "' using following ldap filter '" + filter + "'."); + + //iterate to extract just the group names. + for (Map<String, String> match : matched) { + ldapSet.add(match.get("id")); + } + } else { +// log.debug("Group lookup will not be performed due to no UserDN found for user " + userName); + } + + return ldapSet; + } + + private void generateUiLoggingStep4Exception(Exception ex) { + String groupSearchFilter = env + .getProperty(SystemSetting.LDAP_GROUP_FILTER + .getInternalName()); + String msg = "STEP-4:FAIL: There was an error searching with the groupFilter supplied: " + + groupSearchFilter + "'\n"; + msg += ex.getMessage(); + if (enableVerboseDebugging.isSelected()) { + msg = appendStacktraceToMsg(msg, ex); + } + log(msg); + } + + private void generateUiLoggingStep4Authz(String filter) { + Set<Map<String, String>> groups = buildGroup(env, filter); + String msg = "STEP-4:TESTING: Using Group Search Filter '" + + filter + "', " + groups.size() + + " ldap group(s) were located.\n"; + if (groups.size() > 0) { + HashMap<String, String>[] ldapLists = new HashMap[groups + .size()]; + groups.toArray(ldapLists); + msg += "STEP-4:PASS: Listing a few of the ldap groups located: \n"; + // iterate over first ten or less to demonstrate retrieve + for (int i = 0; (i < groups.size() && i < 10); i++) { + msg += ldapLists[i] + "\n"; + } + }else{ + msg+="STEP-4:WARN: With current settings, test user is not authorized for any groups. Is this correct?"; + } + log(msg); + } + + private void generateUiLoggingForStep4LdapFilter(String userName, + String filter) { + String msg = "STEP-4:TESTING: about to do ldap search with filter \n'" + + filter + + "'\n to locate groups that test user '"+userName+"' IS authorized to access."; + log(msg); + } + + + // throw the label and fields together, two to a row. + private HashMap<String, JTextField> loadUiFields(JPanel top, + String[] componentKeys) { + HashMap<String, JTextField> mappings = new HashMap<String, JTextField>(); + for (int i = 0; i < componentKeys.length; i++) { + String firstLabelKey = componentKeys[i]; + String secondLabelKey = componentKeys[++i]; + // locate second key + JPanel row = new JPanel(); + row.setLayout(new FlowLayout(FlowLayout.LEFT)); + JLabel label1 = new JLabel(firstLabelKey); + label1.setSize(textBoxWidth, 5); +// JTextField value1 = new JTextField(textBoxWidth); + JTextField value1 = null; + if (firstLabelKey.equalsIgnoreCase("Password:")&&(!showPasswords.isSelected())) { + value1 = new JPasswordField(textBoxWidth); + } else { + value1 = new JTextField(textBoxWidth); + } + JLabel label2 = new JLabel(secondLabelKey); + JTextField value2 = new JTextField(textBoxWidth); + row.add(label1); + row.add(value1); + row.add(Box.createRigidArea(new Dimension(0, 5))); + row.add(label2); + row.add(value2); + mappings.put(firstLabelKey, value1); + mappings.put(secondLabelKey, value2); + top.add(row); + } + + return mappings; + } + + private Properties getProperties(String contentProvider) { + Properties env = new Properties(); + env.setProperty(Context.INITIAL_CONTEXT_FACTORY, + "com.sun.jndi.ldap.LdapCtxFactory"); + env.setProperty(Context.PROVIDER_URL, contentProvider); + if(!enableLdapReferral.isSelected()){ + env.setProperty(Context.REFERRAL, "ignore"); + }else{ + String msg="**---- You have chosen to tell LDAP servers to [FOLLOW] context referrals. Default is [IGNORE] referrals. --**"; + log(msg); + env.setProperty(Context.REFERRAL, "follow"); + } + +// // Setup SSL if requested +// String protocol = ssl.isSelected()? "ssl":""; +// if ((protocol != null) && protocol.equals("ssl")) { +// String ldapSocketFactory = env +// .getProperty("java.naming.ldap.factory.socket"); +// if (ldapSocketFactory == null) { +// env.put("java.naming.ldap.factory.socket", +// UntrustedSSLSocketFactory.class.getName()); +// } +// env.put(Context.SECURITY_PROTOCOL, "ssl"); +// } + + return env; + } + + private String delineate() { + String line = "-"; + for (int i = 0; i < 30; i++) { + line += "-"; + } + return line; + } + + /** Takes care of delineating messages and conditional logging contents passed in. + * @param msg + */ + private void log(String msg) { + String message = "\n" + delineate() + "\n"; + message += msg; + message += "\n" + delineate() + "\n\n"; + //This flag can be used in the unlikely case that the UI hangs during a test operation.: + if(iterativeVerboseLogging.isSelected()){ + System.out.println(message); + } + testResults.setText(testResults.getText() + message); + } + + private SearchControls getSearchControls() { + int scope = SearchControls.SUBTREE_SCOPE; + int timeLimit = 0; + long countLimit = 0; + String[] returnedAttributes = null; + boolean returnObject = false; + boolean deference = false; + SearchControls constraints = new SearchControls(scope, countLimit, + timeLimit, returnedAttributes, returnObject, deference); + return constraints; + } + + /** Translate SystemSettings to familiar Properties instance since we're + * passing not one but multiple values. + * + * @param systemSettings + * @return + */ + private Properties populateProperties(Properties existing) { + Properties properties = new Properties(); + if(existing!=null){ + properties = existing; + } + for (SystemSetting entry : SystemSetting.values()) { + if(entry!=null){ + switch(entry){ + case LDAP_BASED_JAAS_PROVIDER: + properties.put(entry.getInternalName(), ""); + break; + } + } + } + return properties; + } + + /**Build/retrieve the user DN. Not usually a property. + * + * @param options + * @param userName + * @param usePosixGroups boolean indicating whether we search for groups with posixGroup format + * @return + */ + private String getUserAttribute(Properties options, String userName, boolean usePosixGroups) { + Map<String, String> details = findLdapUserDetails(userName); + String userAttribute = null; + if (usePosixGroups) {//return just the username as posixGroup member search uses (&(%s)(memberUid=username)) + userAttribute = userName; + } else {//this is the default where group search uses (&(%s)(uniqueMember={userDn})) + userAttribute = details.get("dn"); + } + + return userAttribute; + } + + /** See LDAPStringUtil.encodeForFilter() for original code/source/author/etc. + * <p>Encode a string so that it can be used in an LDAP search filter.</p> + * + * <p>The following table shows the characters that are encoded and their + * encoded version.</p> + * + * <table> + * <tr><th align="center">Character</th><th>Encoded As</th></tr> + * <tr><td align="center">*</td><td>\2a</td></tr> + * <tr><td align="center">(</td><td>\28</td></tr> + * <tr><td align="center">)</td><td>\29</td></tr> + * <tr><td align="center"></td><td>\5c</td></tr> + * <tr><td align="center"><code>null</code></td><td>\00</td></tr> + * </table> + * + * <p>In addition to encoding the above characters, any non-ASCII character + * (any character with a hex value greater then <code>0x7f</code>) is also + * encoded and rewritten as a UTF-8 character or sequence of characters in + * hex notation.</p> + * + * @param filterString a string that is to be encoded + * @return the encoded version of <code>filterString</code> suitable for use + * in a LDAP search filter + * @see <a href="http://tools.ietf.org/html/rfc4515">RFC 4515</a> + */ + public static String encodeForFilter(final String filterString) { + if (filterString != null && filterString.length() > 0) { + StringBuilder encString = new StringBuilder(filterString.length()); + for (int i = 0; i < filterString.length(); i++) { + char ch = filterString.charAt(i); + switch (ch) { + case '*': // encode a wildcard * character + encString.append("\2a"); + break; + case '(': // encode a open parenthesis ( character + encString.append("\28"); + break; + case ')': // encode a close parenthesis ) character + encString.append("\29"); + break; + case '\': // encode a backslash \ character + encString.append("\5c"); + break; + case '\u0000': // encode a null character + encString.append("\00"); + break; + default: + if (ch <= 0x7f) { // an ASCII character + encString.append(ch); + } else if (ch >= 0x80) { // encode to UTF-8 + try { + byte[] utf8bytes = String.valueOf(ch).getBytes("UTF8"); + for (byte b : utf8bytes) { + encString.append(String.format("\%02x", b)); + } + } catch (UnsupportedEncodingException e) { + // ignore + } + } + } + } + return encString.toString(); + } + return filterString; + } +} + +//Mock up the upgraded system properties approach to use SystemSetting +enum SystemSetting { + LDAP_BASED_JAAS_PROVIDER("CAM_JAAS_PROVIDER"), + LDAP_NAMING_PROVIDER_URL("CAM_LDAP_NAMING_PROVIDER_URL"), + USE_SSL_FOR_LDAP("CAM_LDAP_PROTOCOL"), + LDAP_LOGIN_PROPERTY("CAM_LDAP_LOGIN_PROPERTY"), + LDAP_FILTER("CAM_LDAP_FILTER"), + LDAP_GROUP_FILTER("CAM_LDAP_GROUP_FILTER"), + LDAP_GROUP_MEMBER("CAM_LDAP_GROUP_MEMBER"), + LDAP_GROUP_QUERY_PAGE_SIZE("CAM_LDAP_GROUP_QUERY_PAGE_SIZE"), + LDAP_BASE_DN("CAM_LDAP_BASE_DN"), + LDAP_BIND_DN("CAM_LDAP_BIND_DN"), + LDAP_BIND_PW("CAM_LDAP_BIND_PW"), + LDAP_NAMING_FACTORY("CAM_LDAP_NAMING_FACTORY_INITIAL"), + LDAP_GROUP_USE_POSIX("CAM_LDAP_GROUP_USE_POSIX"), + ; + + private String internalName; + + private SystemSetting(String name) { + this.internalName = name; + } + + public String getInternalName() { + return internalName; + } + + public static SystemSetting getByInternalName(String internalName) { + for (SystemSetting p : SystemSetting.values()) { + if (p.internalName.equals(internalName)) { + return p; + } + } + return null; + } +} + +
commit 78f4063d9d6dd5a6be04bc28ea67f16a10973cf6 Author: Simeon Pinder spinder@redhat.com Date: Tue Jul 30 17:07:58 2013 -0400
Load ldap properties in same way as before refactor and deprecate old property load mechanism as does not work well with reusing existing properties.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java index 097b993..8a0e321 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java @@ -586,7 +586,34 @@ SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE.name(), "" } } //now load default/shared LDAP properties as we always have - properties = getProperties(properties); + // Set our default factory name if one is not given + String factoryName = properties.getProperty(SystemSetting.LDAP_NAMING_FACTORY.name()); + properties.setProperty(Context.INITIAL_CONTEXT_FACTORY, factoryName); + + // Setup SSL if requested + String value = properties.getProperty(SystemSetting.USE_SSL_FOR_LDAP.name()); + boolean ldapSsl = "ssl".equalsIgnoreCase(value); + if (ldapSsl) { + String ldapSocketFactory = properties.getProperty("java.naming.ldap.factory.socket"); + if (ldapSocketFactory == null) { + properties.put("java.naming.ldap.factory.socket", UntrustedSSLSocketFactory.class.getName()); + } + properties.put(Context.SECURITY_PROTOCOL, "ssl"); + } + + // Set the LDAP url + String providerUrl = properties.getProperty(SystemSetting.LDAP_NAMING_PROVIDER_URL.name()); + if (providerUrl == null) { + int port = (ldapSsl) ? 636 : 389; + providerUrl = "ldap://localhost:" + port; + } + + properties.setProperty(Context.PROVIDER_URL, providerUrl); + + // Follow referrals automatically + properties.setProperty(Context.REFERRAL, "ignore"); //BZ:582471- active directory query change + + // properties = getProperties(properties); } return properties; } @@ -598,6 +625,7 @@ SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE.name(), "" * * @return properties that are to be used when connecting to LDAP server */ + @Deprecated private Properties getProperties(Properties systemConfig) { Properties env = new Properties(systemConfig); // Set our default factory name if one is not given
commit 13ee16caf7ff65fe5af0033c1583b906c36c1288 Author: Simeon Pinder spinder@redhat.com Date: Mon Jul 29 12:02:56 2013 -0400
Include default ldap property settings in initialization as well.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java index cd27c32..097b993 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java @@ -585,6 +585,8 @@ SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE.name(), "" } } } + //now load default/shared LDAP properties as we always have + properties = getProperties(properties); } return properties; }
commit 9460e0244be9494d7a7168322c91dac6c1c20fa9 Author: Simeon Pinder spinder@redhat.com Date: Mon Jul 29 01:32:10 2013 -0400
Fix some deprecated property references.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java index 02de558..cd27c32 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java @@ -98,7 +98,7 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { Properties systemConfig = populateProperties(systemManager.getSystemSettings(subjectManager.getOverlord()));
//retrieve the filters. - String groupFilter = (String) systemConfig.get(SystemSetting.LDAP_GROUP_FILTER.getInternalName()); + String groupFilter = (String) systemConfig.get(SystemSetting.LDAP_GROUP_FILTER.name()); if ((groupFilter != null) && (!groupFilter.trim().isEmpty())) { String filter; if (groupFilter.startsWith("(") && groupFilter.endsWith(")")) { @@ -116,9 +116,9 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal {
public Set<String> findAvailableGroupsFor(String userName) { Properties options = populateProperties(systemManager.getSystemSettings(subjectManager.getOverlord())); - String groupFilter = options.getProperty(SystemSetting.LDAP_GROUP_FILTER.getInternalName(), ""); - String groupMember = options.getProperty(SystemSetting.LDAP_GROUP_MEMBER.getInternalName(), ""); - String groupUsePosix = options.getProperty(SystemSetting.LDAP_GROUP_USE_POSIX.getInternalName(), "false"); + String groupFilter = options.getProperty(SystemSetting.LDAP_GROUP_FILTER.name(), ""); + String groupMember = options.getProperty(SystemSetting.LDAP_GROUP_MEMBER.name(), ""); + String groupUsePosix = options.getProperty(SystemSetting.LDAP_GROUP_USE_POSIX.name(), "false"); if (groupUsePosix == null) { groupUsePosix = Boolean.toString(false);//default to false } @@ -299,20 +299,20 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { HashMap<String, String> userDetails = new HashMap<String, String>();
// Load the BaseDN - String baseDN = (String) systemConfig.get(SystemSetting.LDAP_BASE_DN.getInternalName()); + String baseDN = (String) systemConfig.get(SystemSetting.LDAP_BASE_DN.name());
// Load the LoginProperty - String loginProperty = (String) systemConfig.get(SystemSetting.LDAP_LOGIN_PROPERTY.getInternalName()); + String loginProperty = (String) systemConfig.get(SystemSetting.LDAP_LOGIN_PROPERTY.name()); if (loginProperty == null) { // Use the default loginProperty = "cn"; } // Load any information we may need to bind - String bindDN = (String) systemConfig.get(SystemSetting.LDAP_BASE_DN.getInternalName()); - String bindPW = (String) systemConfig.get(SystemSetting.LDAP_BIND_PW.getInternalName()); + String bindDN = (String) systemConfig.get(SystemSetting.LDAP_BIND_DN.name()); + String bindPW = (String) systemConfig.get(SystemSetting.LDAP_BIND_PW.name());
// Load any search filter - String searchFilter = (String) systemConfig.get(SystemSetting.LDAP_FILTER.getInternalName()); + String searchFilter = (String) systemConfig.get(SystemSetting.LDAP_FILTER.name()); if (bindDN != null) { systemConfig.setProperty(Context.SECURITY_PRINCIPAL, bindDN); systemConfig.setProperty(Context.SECURITY_CREDENTIALS, bindPW); @@ -384,17 +384,17 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { Set<Map<String, String>> groupDetailsMap = new HashSet<Map<String, String>>(); //Load our LDAP specific properties // Load the BaseDN - String baseDN = (String) systemConfig.get(SystemSetting.LDAP_BASE_DN.getInternalName()); + String baseDN = (String) systemConfig.get(SystemSetting.LDAP_BASE_DN.name());
// Load the LoginProperty - String loginProperty = (String) systemConfig.get(SystemSetting.LDAP_LOGIN_PROPERTY.getInternalName()); + String loginProperty = (String) systemConfig.get(SystemSetting.LDAP_LOGIN_PROPERTY.name()); if (loginProperty == null) { // Use the default loginProperty = "cn"; } // Load any information we may need to bind - String bindDN = (String) systemConfig.get(SystemSetting.LDAP_BIND_DN.getInternalName()); - String bindPW = (String) systemConfig.get(SystemSetting.LDAP_BIND_PW.getInternalName()); + String bindDN = (String) systemConfig.get(SystemSetting.LDAP_BIND_DN.name()); + String bindPW = (String) systemConfig.get(SystemSetting.LDAP_BIND_PW.name()); if (bindDN != null) { systemConfig.setProperty(Context.SECURITY_PRINCIPAL, bindDN); systemConfig.setProperty(Context.SECURITY_CREDENTIALS, bindPW); @@ -411,7 +411,7 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { searchControls.setReturningAttributes(attributes);
//detect whether to use Query Page Control - String groupUseQueryPaging = systemConfig.getProperty(SystemSetting.LDAP_GROUP_PAGING.getInternalName(), + String groupUseQueryPaging = systemConfig.getProperty(SystemSetting.LDAP_GROUP_PAGING.name(), "false"); if (groupUseQueryPaging == null) { groupUseQueryPaging = Boolean.toString(false);//default to false @@ -426,7 +426,7 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { // only if they're enabled in the UI. if (useQueryPaging) { String groupPageSize = systemConfig.getProperty( - SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE.getInternalName(), "" +SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE.name(), "" + defaultPageSize); if ((groupPageSize != null) && (!groupPageSize.trim().isEmpty())) { int passedInPageSize = -1; @@ -599,7 +599,7 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { private Properties getProperties(Properties systemConfig) { Properties env = new Properties(systemConfig); // Set our default factory name if one is not given - String factoryName = env.getProperty(SystemSetting.LDAP_NAMING_FACTORY.getInternalName()); + String factoryName = env.getProperty(SystemSetting.LDAP_NAMING_FACTORY.name()); env.setProperty(Context.INITIAL_CONTEXT_FACTORY, factoryName);
// Setup SSL if requested @@ -614,7 +614,7 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { }
// Set the LDAP url - String providerUrl = env.getProperty(RHQConstants.LDAPUrl); + String providerUrl = env.getProperty(SystemSetting.LDAP_NAMING_PROVIDER_URL.name()); if (providerUrl == null) { int port = (ldapSsl) ? 636 : 389; providerUrl = "ldap://localhost:" + port;
commit b5897cf41d3b3a0a5c337b70fe6211380a9e73a7 Author: Simeon Pinder spinder@redhat.com Date: Wed Jun 19 13:51:17 2013 -0400
Some more doc and cleanup/refactor.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java index 4c0c2fc..02de558 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java @@ -156,6 +156,7 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { throw new IllegalArgumentException("Role with id [" + roleId + "] does not exist."); }
+ //add some code to synch up the current list of ldap groups. Set<LdapGroup> currentGroups = role.getLdapGroups(); List<String> currentGroupNames = new ArrayList<String>(currentGroups.size()); for (LdapGroup group : currentGroups) { @@ -167,10 +168,12 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { newGroupNames.add(group.getName()); }
+ //figure out which ones are new then add them. List<String> namesOfGroupsToBeAdded = new ArrayList<String>(newGroupNames); namesOfGroupsToBeAdded.removeAll(currentGroupNames); addLdapGroupsToRole(subject, roleId, namesOfGroupsToBeAdded);
+ //figure out which ones need to be removed. then remove them. List<String> namesOfGroupsToBeRemoved = new ArrayList<String>(currentGroupNames); namesOfGroupsToBeRemoved.removeAll(newGroupNames); int[] idsOfGroupsToBeRemoved = new int[namesOfGroupsToBeRemoved.size()]; @@ -290,11 +293,10 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { }
public Map<String, String> findLdapUserDetails(String userName) { + // Load our LDAP specific properties Properties systemConfig = populateProperties(systemManager.getSystemSettings(subjectManager.getOverlord()));
HashMap<String, String> userDetails = new HashMap<String, String>(); - // Load our LDAP specific properties - Properties env = getProperties(systemConfig);
// Load the BaseDN String baseDN = (String) systemConfig.get(SystemSetting.LDAP_BASE_DN.getInternalName()); @@ -312,13 +314,13 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { // Load any search filter String searchFilter = (String) systemConfig.get(SystemSetting.LDAP_FILTER.getInternalName()); if (bindDN != null) { - env.setProperty(Context.SECURITY_PRINCIPAL, bindDN); - env.setProperty(Context.SECURITY_CREDENTIALS, bindPW); - env.setProperty(Context.SECURITY_AUTHENTICATION, "simple"); + systemConfig.setProperty(Context.SECURITY_PRINCIPAL, bindDN); + systemConfig.setProperty(Context.SECURITY_CREDENTIALS, bindPW); + systemConfig.setProperty(Context.SECURITY_AUTHENTICATION, "simple"); }
try { - InitialLdapContext ctx = new InitialLdapContext(env, null); + InitialLdapContext ctx = new InitialLdapContext(systemConfig, null); SearchControls searchControls = getSearchControls();
// Add the search filter if specified. This only allows for a single search filter.. i.e. foo=bar.
commit 8ebda9f14429ffc5f74ce4d83f0ebf952fb8bf63 Author: Simeon Pinder spinder@redhat.com Date: Wed Jun 19 13:36:52 2013 -0400
Use property internal name.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java index d80732e..4c0c2fc 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java @@ -380,7 +380,7 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { */ protected Set<Map<String, String>> buildGroup(Properties systemConfig, String filter) { Set<Map<String, String>> groupDetailsMap = new HashSet<Map<String, String>>(); - + //Load our LDAP specific properties // Load the BaseDN String baseDN = (String) systemConfig.get(SystemSetting.LDAP_BASE_DN.getInternalName());
@@ -423,7 +423,8 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { int defaultPageSize = 1000; // only if they're enabled in the UI. if (useQueryPaging) { - String groupPageSize = systemConfig.getProperty(SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE.name(), "" + String groupPageSize = systemConfig.getProperty( + SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE.getInternalName(), "" + defaultPageSize); if ((groupPageSize != null) && (!groupPageSize.trim().isEmpty())) { int passedInPageSize = -1;
commit e205af70c89490329d8a3e0d157af05758f77cee Author: Simeon Pinder spinder@redhat.com Date: Mon Jun 17 12:26:59 2013 -0400
Clean up some deprecated usage of Properties by LDAP, and small refactoring.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java index 7473321..d80732e 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java @@ -94,27 +94,30 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { private SystemManagerLocal systemManager;
public Set<Map<String, String>> findAvailableGroups() { - Properties systemConfig = systemManager.getSystemConfiguration(subjectManager.getOverlord()); - Set<Map<String, String>> emptyAvailableGroups = new HashSet<Map<String, String>>(); + //load current system properties + Properties systemConfig = populateProperties(systemManager.getSystemSettings(subjectManager.getOverlord()));
//retrieve the filters. - String groupFilter = (String) systemConfig.get(RHQConstants.LDAPGroupFilter); + String groupFilter = (String) systemConfig.get(SystemSetting.LDAP_GROUP_FILTER.getInternalName()); if ((groupFilter != null) && (!groupFilter.trim().isEmpty())) { String filter; - if (groupFilter.startsWith("(") && groupFilter.endsWith(")")) + if (groupFilter.startsWith("(") && groupFilter.endsWith(")")) { filter = groupFilter; // RFC 2254 does not allow for ((expression)) - else + } else { filter = String.format("(%s)", groupFilter); // not wrapped in (), wrap it + }
return buildGroup(systemConfig, filter); } + + Set<Map<String, String>> emptyAvailableGroups = new HashSet<Map<String, String>>(); return emptyAvailableGroups; }
public Set<String> findAvailableGroupsFor(String userName) { - Properties options = systemManager.getSystemConfiguration(subjectManager.getOverlord()); - String groupFilter = options.getProperty(RHQConstants.LDAPGroupFilter, ""); - String groupMember = options.getProperty(RHQConstants.LDAPGroupMember, ""); + Properties options = populateProperties(systemManager.getSystemSettings(subjectManager.getOverlord())); + String groupFilter = options.getProperty(SystemSetting.LDAP_GROUP_FILTER.getInternalName(), ""); + String groupMember = options.getProperty(SystemSetting.LDAP_GROUP_MEMBER.getInternalName(), ""); String groupUsePosix = options.getProperty(SystemSetting.LDAP_GROUP_USE_POSIX.getInternalName(), "false"); if (groupUsePosix == null) { groupUsePosix = Boolean.toString(false);//default to false @@ -287,26 +290,27 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { }
public Map<String, String> findLdapUserDetails(String userName) { - Properties systemConfig = systemManager.getSystemConfiguration(subjectManager.getOverlord()); + Properties systemConfig = populateProperties(systemManager.getSystemSettings(subjectManager.getOverlord())); + HashMap<String, String> userDetails = new HashMap<String, String>(); // Load our LDAP specific properties Properties env = getProperties(systemConfig);
// Load the BaseDN - String baseDN = (String) systemConfig.get(RHQConstants.LDAPBaseDN); + String baseDN = (String) systemConfig.get(SystemSetting.LDAP_BASE_DN.getInternalName());
// Load the LoginProperty - String loginProperty = (String) systemConfig.get(RHQConstants.LDAPLoginProperty); + String loginProperty = (String) systemConfig.get(SystemSetting.LDAP_LOGIN_PROPERTY.getInternalName()); if (loginProperty == null) { // Use the default loginProperty = "cn"; } // Load any information we may need to bind - String bindDN = (String) systemConfig.get(RHQConstants.LDAPBindDN); - String bindPW = (String) systemConfig.get(RHQConstants.LDAPBindPW); + String bindDN = (String) systemConfig.get(SystemSetting.LDAP_BASE_DN.getInternalName()); + String bindPW = (String) systemConfig.get(SystemSetting.LDAP_BIND_PW.getInternalName());
// Load any search filter - String searchFilter = (String) systemConfig.get(RHQConstants.LDAPFilter); + String searchFilter = (String) systemConfig.get(SystemSetting.LDAP_FILTER.getInternalName()); if (bindDN != null) { env.setProperty(Context.SECURITY_PRINCIPAL, bindDN); env.setProperty(Context.SECURITY_CREDENTIALS, bindPW); @@ -375,29 +379,27 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { * @see org.jboss.security.auth.spi.UsernamePasswordLoginModule#validatePassword(java.lang.String,java.lang.String) */ protected Set<Map<String, String>> buildGroup(Properties systemConfig, String filter) { - Set<Map<String, String>> ret = new HashSet<Map<String, String>>(); - // Load our LDAP specific properties - Properties env = getProperties(systemConfig); + Set<Map<String, String>> groupDetailsMap = new HashSet<Map<String, String>>();
// Load the BaseDN - String baseDN = (String) systemConfig.get(RHQConstants.LDAPBaseDN); + String baseDN = (String) systemConfig.get(SystemSetting.LDAP_BASE_DN.getInternalName());
// Load the LoginProperty - String loginProperty = (String) systemConfig.get(RHQConstants.LDAPLoginProperty); + String loginProperty = (String) systemConfig.get(SystemSetting.LDAP_LOGIN_PROPERTY.getInternalName()); if (loginProperty == null) { // Use the default loginProperty = "cn"; } // Load any information we may need to bind - String bindDN = (String) systemConfig.get(RHQConstants.LDAPBindDN); - String bindPW = (String) systemConfig.get(RHQConstants.LDAPBindPW); + String bindDN = (String) systemConfig.get(SystemSetting.LDAP_BIND_DN.getInternalName()); + String bindPW = (String) systemConfig.get(SystemSetting.LDAP_BIND_PW.getInternalName()); if (bindDN != null) { - env.setProperty(Context.SECURITY_PRINCIPAL, bindDN); - env.setProperty(Context.SECURITY_CREDENTIALS, bindPW); - env.setProperty(Context.SECURITY_AUTHENTICATION, "simple"); + systemConfig.setProperty(Context.SECURITY_PRINCIPAL, bindDN); + systemConfig.setProperty(Context.SECURITY_CREDENTIALS, bindPW); + systemConfig.setProperty(Context.SECURITY_AUTHENTICATION, "simple"); } try { - InitialLdapContext ctx = new InitialLdapContext(env, null); + InitialLdapContext ctx = new InitialLdapContext(systemConfig, null); SearchControls searchControls = getSearchControls(); /*String filter = "(&(objectclass=groupOfUniqueNames)(uniqueMember=uid=" + userName + ",ou=People, dc=rhndev, dc=redhat, dc=com))";*/ @@ -421,8 +423,7 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { int defaultPageSize = 1000; // only if they're enabled in the UI. if (useQueryPaging) { - Properties options = populateProperties(systemManager.getSystemSettings(subjectManager.getOverlord())); - String groupPageSize = options.getProperty(SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE.name(), "" + String groupPageSize = systemConfig.getProperty(SystemSetting.LDAP_GROUP_QUERY_PAGE_SIZE.name(), "" + defaultPageSize); if ((groupPageSize != null) && (!groupPageSize.trim().isEmpty())) { int passedInPageSize = -1; @@ -445,11 +446,12 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { String[] baseDNs = baseDN.split(BASEDN_DELIMITER);
for (int x = 0; x < baseDNs.length; x++) { - executeGroupSearch(filter, ret, ctx, searchControls, baseDNs, x); + executeGroupSearch(filter, groupDetailsMap, ctx, searchControls, baseDNs, x);
// continually parsing pages of results until we're done. // only if they're enabled in the UI. if (useQueryPaging) { + //handle paged results if they're being used here byte[] cookie = null; Control[] controls = ctx.getResponseControls(); @@ -466,7 +468,7 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { //ensure the next requests contains the session/cookie details ctx.setRequestControls(new Control[] { new PagedResultsControl(defaultPageSize, cookie, Control.CRITICAL) }); - executeGroupSearch(filter, ret, ctx, searchControls, baseDNs, x); + executeGroupSearch(filter, groupDetailsMap, ctx, searchControls, baseDNs, x); //empty out cookie cookie = null; //test for further iterations @@ -479,6 +481,25 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { } } } + //continually parsing pages of results until we're done. + while (cookie != null) { + //ensure the next requests contains the session/cookie details + ctx.setRequestControls(new Control[] { new PagedResultsControl(defaultPageSize, cookie, + Control.CRITICAL) }); + executeGroupSearch(filter, groupDetailsMap, ctx, searchControls, baseDNs, x); + //empty out cookie + cookie = null; + //test for further iterations + controls = ctx.getResponseControls(); + if (controls != null) { + for (Control control : controls) { + if (control instanceof PagedResultsResponseControl) { + PagedResultsResponseControl pagedResult = (PagedResultsResponseControl) control; + cookie = pagedResult.getCookie(); + } + } + } + } } } } @@ -498,32 +519,7 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { log.error("Unexpected LDAP communciation error:" + iex.getMessage(), iex); throw new LdapCommunicationException(iex); } - - return ret; - } - - /** Translate SystemSettings to familiar Properties instance since we're - * passing not one but multiple values. - * - * @param systemSettings - * @return - */ - private Properties populateProperties(SystemSettings systemSettings) { - Properties properties = null; - if (systemSettings != null) { - properties = new Properties(); - Set<Entry<SystemSetting, String>> entries = systemSettings.entrySet(); - for (Entry<SystemSetting, String> entry : entries) { - SystemSetting key = entry.getKey(); - if (key != null) { - String value = entry.getValue(); - if (value != null) { - properties.put(key.name(), value); - } - } - } - } - return properties; + return groupDetailsMap; }
/** Executes the LDAP group query using the filters, context and search controls, etc. parameters passed in. @@ -566,6 +562,30 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { } }
+ /** Translate SystemSettings to familiar Properties instance since we're + * passing not one but multiple values. + * + * @param systemSettings + * @return + */ + private Properties populateProperties(SystemSettings systemSettings) { + Properties properties = null; + if (systemSettings != null) { + properties = new Properties(); + Set<Entry<SystemSetting, String>> entries = systemSettings.entrySet(); + for (Entry<SystemSetting, String> entry : entries) { + SystemSetting key = entry.getKey(); + if (key != null) { + String value = entry.getValue(); + if (value != null) { + properties.put(key.name(), value); + } + } + } + } + return properties; + } + /** * Load a default set of properties to use when connecting to the LDAP server. If basic authentication is needed, * the caller must set Context.SECURITY_PRINCIPAL, Context.SECURITY_CREDENTIALS and Context.SECURITY_AUTHENTICATION @@ -576,7 +596,7 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { private Properties getProperties(Properties systemConfig) { Properties env = new Properties(systemConfig); // Set our default factory name if one is not given - String factoryName = env.getProperty(RHQConstants.LDAPFactory); + String factoryName = env.getProperty(SystemSetting.LDAP_NAMING_FACTORY.getInternalName()); env.setProperty(Context.INITIAL_CONTEXT_FACTORY, factoryName);
// Setup SSL if requested
commit 0db28524acf4160725c7b0101b7d923903ea93b0 Author: Jirka Kremser jkremser@redhat.com Date: Wed Jul 31 15:09:45 2013 +0200
[BZ 976827] - Description text area in General properties tab under Alerts -> Definition is not getting set to blank one it has been edited. Instead it displays the previous value that was stored - setting an empty string if descriptionField.getValue() == null
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/alert/definitions/GeneralPropertiesAlertDefinitionForm.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/alert/definitions/GeneralPropertiesAlertDefinitionForm.java index 1f47a81..d501787 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/alert/definitions/GeneralPropertiesAlertDefinitionForm.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/alert/definitions/GeneralPropertiesAlertDefinitionForm.java @@ -175,9 +175,7 @@ public class GeneralPropertiesAlertDefinitionForm extends DynamicForm implements public void saveAlertDefinition() { alertDefinition.setName(nameField.getValue().toString()); Object description = descriptionField.getValue(); - if (null != description) { - alertDefinition.setDescription(description.toString()); - } + alertDefinition.setDescription(null == description ? "" : description.toString());
String prioritySelected = prioritySelection.getValue().toString(); alertDefinition.setPriority(AlertPriority.valueOf(prioritySelected));
commit 0a006570d7b5fd255a6124f388a3023fe1df010e Author: Mike Thompson mithomps@redhat.com Date: Tue Jul 30 17:43:16 2013 -0700
i18n availability summary pie graph.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilitySummaryPieGraphType.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilitySummaryPieGraphType.java index 057a77f..fb8e096 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilitySummaryPieGraphType.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilitySummaryPieGraphType.java @@ -18,14 +18,15 @@ */ package org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype;
+import java.util.ArrayList; +import java.util.List; + import com.smartgwt.client.widgets.HTMLFlow; + import org.rhq.core.domain.measurement.MeasurementUnits; import org.rhq.enterprise.gui.coregui.client.util.Log; import org.rhq.enterprise.gui.coregui.client.util.MeasurementConverterClient;
-import java.util.ArrayList; -import java.util.List; - /** * Contains the javascript chart definition for an implementation of the d3 availability chart. This implementation is * just a line that changes color based on availability type: up=green, down=red, orange=disabled, unknown=grey, @@ -48,7 +49,8 @@ public class AvailabilitySummaryPieGraphType {
StringBuilder divAndSvgDefs = new StringBuilder(); divAndSvgDefs.append("<div id="availSummaryChart" >"); - divAndSvgDefs.append("<svg xmlns="http://www.w3.org/2000/svg%5C" version="1.1" style="height:"+HEIGHT+"px;" ></svg>"); + divAndSvgDefs.append("<svg xmlns="http://www.w3.org/2000/svg%5C" version="1.1" style="height:" + HEIGHT + + "px;" ></svg>"); divAndSvgDefs.append("</div>"); HTMLFlow graphFlow = new HTMLFlow(divAndSvgDefs.toString()); graphFlow.setWidth(WIDTH); @@ -56,7 +58,8 @@ public class AvailabilitySummaryPieGraphType { return graphFlow; }
- public void setAvailabilityData(String upLabel, double upPercent, String downLabel, double downPercent, String disabledLabel, double disabledPercent ){ + public void setAvailabilityData(String upLabel, double upPercent, String downLabel, double downPercent, + String disabledLabel, double disabledPercent) { availabilitySummaries = new ArrayList<AvailabilitySummary>(); availabilitySummaries.add(new AvailabilitySummary(upLabel, upPercent)); availabilitySummaries.add(new AvailabilitySummary(downLabel, downPercent)); @@ -70,8 +73,9 @@ public class AvailabilitySummaryPieGraphType { // loop through the avail intervals for (AvailabilitySummary availabilitySummary : availabilitySummaries) { sb.append("{ "label":"" + availabilitySummary.getLabel() + "", "); - sb.append(" "value": "" + MeasurementConverterClient.format(availabilitySummary.getValue(), - MeasurementUnits.PERCENTAGE, true) + "" },"); + sb.append(" "value": "" + + MeasurementConverterClient.format(availabilitySummary.getValue(), MeasurementUnits.PERCENTAGE, + true) + "" },"); } sb.setLength(sb.length() - 1); } @@ -87,47 +91,50 @@ public class AvailabilitySummaryPieGraphType { public native void drawJsniChart() /*-{ console.log("Draw Availability Summary Pie Chart");
- var w = 100, - h = 100, - r = h/2, - color = $wnd.d3.scale.category10(), - data = this.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.AvailabilitySummaryPieGraphType::getAvailabilitySummaryJson()(), - vis = $wnd.d3.select("#availSummaryChart svg") - .append("g") - .data(data) - .attr("width", w) - .attr("height", h) - .attr("transform", "translate(" + r + "," + r + ")"), - arc = $wnd.d3.svg.arc() - .outerRadius(r), - pie = $wnd.d3.layout.pie(), - arcs = vis.selectAll("g.slice") - .data(pie) - .enter() - .append("g") - .attr("class", "slice"); + var global = this, + w = 100, + h = 100, + r = h / 2, + color = $wnd.d3.scale.category10(), + data = global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.AvailabilitySummaryPieGraphType::getAvailabilitySummaryJson()(), + vis = $wnd.d3.select("#availSummaryChart svg") + .append("g") + .data(data) + .attr("width", w) + .attr("height", h) + .attr("transform", "translate(" + r + "," + r + ")"), + arc = $wnd.d3.svg.arc() + .outerRadius(r), + pie = $wnd.d3.layout.pie(), + arcs = vis.selectAll("g.slice") + .data(pie) + .enter() + .append("g") + .attr("class", "slice");
arcs.append("path") - .attr("fill", function (d, i) { - return color(i); - }) - .attr("d", arc); + .attr("fill", function (d, i) { + return color(i); + }) + .attr("d", arc);
arcs.append("text") - .attr("transform", function (d) { - d.innerRadius = 0; - d.outerRadius = r; - return "translate(" + arc.centroid(d) + ")"; - }) - .attr("text-anchor", "middle") - .text(function (d, i) { - return data[i].value; - }); + .attr("transform", function (d) { + d.innerRadius = 0; + d.outerRadius = r; + return "translate(" + arc.centroid(d) + ")"; + }) + .attr("text-anchor", "middle") + .style("font-size", "9px") + .style("font-family", "Arial, Verdana, sans-serif;") + .attr("fill", "#000") + .text(function (d, i) { + return data[i].value; + }); console.log("done with avail summary pie graph");
}-*/;
- private static class AvailabilitySummary { final private String label; final private double value; diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/AddToDashboardComponent.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/AddToDashboardComponent.java index ce4f8ef..52b0d86 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/AddToDashboardComponent.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/AddToDashboardComponent.java @@ -23,6 +23,7 @@ import java.util.LinkedHashMap; import com.google.gwt.user.client.rpc.AsyncCallback; import com.smartgwt.client.widgets.IButton; import com.smartgwt.client.widgets.events.ClickEvent; +import com.smartgwt.client.widgets.form.DynamicForm; import com.smartgwt.client.widgets.form.fields.SelectItem; import com.smartgwt.client.widgets.form.fields.events.ChangeEvent; import com.smartgwt.client.widgets.form.fields.events.ChangeHandler; @@ -79,7 +80,7 @@ public class AddToDashboardComponent extends ToolStrip implements Enhanced { addToDashboardButton.disable();
dashboardSelectItem = new SelectItem(); - dashboardSelectItem.setTitle("Dashboards"); + dashboardSelectItem.setTitle(MSG.chart_metrics_add_to_dashboard_label()); dashboardSelectItem.setWidth(300); dashboardSelectItem.setPickListWidth(210); populateDashboardMenu(); @@ -102,7 +103,7 @@ public class AddToDashboardComponent extends ToolStrip implements Enhanced { .getMetricDefinitions()) { if (measurementDefinition.getId() == selectedRecord .getAttributeAsInt(MetricsViewDataSource.FIELD_METRIC_DEF_ID)) { - Log.debug("Add to Dashboard -- Storing: " + measurementDefinition.getDisplayName() + Log.info("Add to Dashboard -- Storing: " + measurementDefinition.getDisplayName() + " in " + selectedDashboard.getName()); storeDashboardMetric(selectedDashboard, resource, measurementDefinition); break; diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/ResourceMetricAvailabilityView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/ResourceMetricAvailabilityView.java index cffcfd7..5835f61 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/ResourceMetricAvailabilityView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/ResourceMetricAvailabilityView.java @@ -40,7 +40,7 @@ import org.rhq.enterprise.gui.coregui.client.util.MeasurementConverterClient; import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedVLayout;
/** - * This shows the availability history for a resource. + * This view shows the detail availability data in tabular form and a pie chart for available %. * * @author Jay Shaughnessy * @author John Mazzitelli @@ -49,6 +49,7 @@ import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedVLayout; public class ResourceMetricAvailabilityView extends EnhancedVLayout {
private Resource resource; + private DynamicForm form; private StaticTextItem currentField; private StaticTextItem availTimeField; private StaticTextItem downTimeField; @@ -81,7 +82,7 @@ public class ResourceMetricAvailabilityView extends EnhancedVLayout {
private DynamicForm createSummaryForm() { - DynamicForm form = new DynamicForm(); + form = new DynamicForm(); form.setWidth100(); form.setAutoHeight(); form.setMargin(10); @@ -138,14 +139,13 @@ public class ResourceMetricAvailabilityView extends EnhancedVLayout { currentTimeField.setShowTitle(false);
CanvasItem availPieChartItem = new CanvasItem(); - //@todo: i18n - availPieChartItem.setTitle("Availability"); + availPieChartItem.setTitle(MSG.pie_chart_availability_summary_label()); availPieChartItem.setCanvas(availabilitySummaryPieGraph.createGraphMarker()); availPieChartItem.setRowSpan(3); availPieChartItem.setVAlign(VerticalAlignment.TOP); availPieChartItem.setTitleVAlign(VerticalAlignment.TOP); - availPieChartItem.setHeight(60); - availPieChartItem.setWidth(60); + availPieChartItem.setHeight(AvailabilitySummaryPieGraphType.HEIGHT); + availPieChartItem.setWidth(AvailabilitySummaryPieGraphType.WIDTH);
form.setItems(currentField, availPieChartItem, availTimeField, downTimeField, disabledTimeField, failureCountField, disabledCountField, mtbfField, mttrField, unknownField, @@ -164,17 +164,15 @@ public class ResourceMetricAvailabilityView extends EnhancedVLayout { public void onSuccess(ResourceAvailabilitySummary result) { Log.debug("reloadSummaryData");
- //@todo: i18n availabilitySummaryPieGraph.setAvailabilityData( - "Up", result.getUpPercentage(), - "Down", result.getDownPercentage(), - "Disabled" ,result.getDisabledPercentage() + MSG.pie_chart_availability_summary_up(), result.getUpPercentage(), + MSG.pie_chart_availability_summary_down(), result.getDownPercentage(), + MSG.pie_chart_availability_summary_disabled() ,result.getDisabledPercentage() ); new Timer(){
@Override public void run() { - Log.debug("Run Avail Graph"); availabilitySummaryPieGraph.drawJsniChart(); } }.schedule(150); diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties index abcfa62..7f2e7d4 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties @@ -21,6 +21,7 @@ chart_hover_start_label = Start chart_hover_time_format = %I:%M:%S %p chart_ie_not_supported = Charting is not available for this browser. chart_metrics= Metrics +chart_metrics_add_to_dashboard_label= Dashboards chart_metrics_collapse_tooltip= Click here to see additional tabular availability data. chart_metrics_expand_tooltip= Click here to collapse additional availability detail. chart_metrics_sparkline_header= Chart @@ -459,6 +460,10 @@ filter_from_date = From filter_to_date = To group_tree_groupOfResourceType = Group of [{0}] group_tree_partialClusterTooltip = {0} out of {1} group members have a ''{2}'' resource +pie_chart_availability_summary_disabled = Disabled +pie_chart_availability_summary_down = Down +pie_chart_availability_summary_label = Availability +pie_chart_availability_summary_up = Up util_ancestry_parentAncestry = Parent Ancestry for: util_errorHandler_nullException = exception was null util_monitoringRequestCallback_error_checkServerStatusFailure = Unable to determine login status - check Server status. diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties index 3a79eeb..941eb60 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties @@ -49,6 +49,7 @@ chart_hover_date_format = %d.%m.%y chart_hover_time_format = %H:%M:%S ##chart_ie_not_supported = Charting is not available for this browser. ##chart_metrics= Metrics +##chart_metrics_add_to_dashboard_label= Dashboards ##chart_metrics_collapse_tooltip= Click here to see additional tabular availability data. ##chart_metrics_expand_tooltip= Click here to collapse additional availability detail. ##chart_metrics_sparkline_header= Chart @@ -478,6 +479,10 @@ filter_from_date = Od filter_to_date = Do group_tree_groupOfResourceType = Skupina: [{0}] group_tree_partialClusterTooltip = {0} z {1} Älenů skupiny má "{2}" zdroj +##pie_chart_availability_summary_disabled = Disabled +##pie_chart_availability_summary_down = Down +##pie_chart_availability_summary_label = Availability +##pie_chart_availability_summary_up = Up util_ancestry_parentAncestry = Původ rodiÄe pro: util_errorHandler_nullException = vÃœjimka byla null util_monitoringRequestCallback_error_checkServerStatusFailure = NepodaÅilo se urÄit stav pÅihlaÅ¡ovánà - zkontrolujte stav serveru. diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties index e79504c..e30a19e 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties @@ -27,6 +27,7 @@ chart_ie_not_supported = Charting ist bei diesem Browser nicht unterstÃŒtzt ##chart_metrics_collapse_tooltip= Click here to see additional tabular availability data. ##chart_metrics_expand_tooltip= Click here to collapse additional availability detail. ##chart_metrics_sparkline_header= Chart +##chart_metrics_add_to_dashboard_label= Dashboards chart_no_data_label = Keine Daten vorhanden ##chart_single_value_label = Value chart_slider_button_bar_day = Tag @@ -457,6 +458,10 @@ filter_from_date = Von filter_to_date = Bis group_tree_groupOfResourceType = Gruppe von [{0}] group_tree_partialClusterTooltip = {0} der {1} Gruppenmitglieder haben eine ''{2}'' Ressource +##pie_chart_availability_summary_disabled = Disabled +##pie_chart_availability_summary_down = Down +##pie_chart_availability_summary_label = Availability +##pie_chart_availability_summary_up = Up util_disambiguationReportDecorator_pluginSuffix = ({0} Plugin) util_errorHandler_nullException = Exception war null ##util_monitoringRequestCallback_error_checkServerStatusFailure = Unable to determine login status - check Server status. diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties index 3f1f701..bed024b 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties @@ -28,6 +28,7 @@ ##chart_metrics_collapse_tooltip= Click here to see additional tabular availability data. ##chart_metrics_expand_tooltip= Click here to collapse additional availability detail. ##chart_metrics_sparkline_header= Chart +##chart_metrics_add_to_dashboard_label= Dashboards ##chart_no_data_label = No Data ##chart_single_value_label = Value ##chart_slider_button_bar_minute = Min @@ -455,6 +456,10 @@ filter_from_date = éå§ filter_to_date = çµäº ##group_tree_groupOfResourceType = Group of [{0}] group_tree_partialClusterTooltip = {1} ã®äžã® {0} ã°ã«ãŒãã¡ã³ããŒã ''{2}'' ãªãœãŒã¹ãæã£ãŠããŸã +##pie_chart_availability_summary_disabled = Disabled +##pie_chart_availability_summary_down = Down +##pie_chart_availability_summary_label = Availability +##pie_chart_availability_summary_up = Up util_ancestry_parentAncestry = 芪ã®å ç¥ : util_disambiguationReportDecorator_pluginSuffix = ({0} ãã©ã°ã€ã³) util_errorHandler_nullException = äŸå€ã¯ null ã§ãã diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties index 3d40d83..416f4b4 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties @@ -26,6 +26,7 @@ ##chart_metrics_collapse_tooltip= Click here to see additional tabular availability data. ##chart_metrics_expand_tooltip= Click here to collapse additional availability detail. ##chart_metrics_sparkline_header= Chart +##chart_metrics_add_to_dashboard_label= Dashboards ##chart_no_data_label = No Data ##chart_single_value_label = Value ##chart_slider_button_bar_minute = Min @@ -412,6 +413,10 @@ favorites_resources = ìŠê²šì°Ÿêž° 늬ìì€ filter_from_date = ìì group_tree_groupOfResourceType = Group of [{0}] group_tree_partialClusterTooltip = {1} ì€ {0} 귞룹 ë©€ë²ê° ''{2}'' ììì ê°ì§ê³ ììµëë€. +##pie_chart_availability_summary_disabled = Disabled +##pie_chart_availability_summary_down = Down +##pie_chart_availability_summary_label = Availability +##pie_chart_availability_summary_up = Up util_ancestry_parentAncestry = ë¶ëªšì ì¡°ì: util_errorHandler_nullException = ììžë nullìŽììµëë€. util_monitoringRequestCallback_error_checkServerStatusFailure = ë¡ê·žìž ìí륌 íìží ì ììµëë€ - ìë²ì ìí륌 íìžíììì diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties index de43fa4..47cac36 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties @@ -25,6 +25,7 @@ ##chart_metrics_collapse_tooltip= Click here to see additional tabular availability data. ##chart_metrics_expand_tooltip= Click here to collapse additional availability detail. ##chart_metrics_sparkline_header= Chart +##chart_metrics_add_to_dashboard_label= Dashboards ##chart_no_data_label = No Data ##chart_single_value_label = Value ##chart_slider_button_bar_minute = Min @@ -460,6 +461,10 @@ favorites_resources = Recursos Favoritos ##filter_to_date = To ##group_tree_groupOfResourceType = Group of [{0}] group_tree_partialClusterTooltip = {0} out of {1} group members have a ''{2}'' resource +##pie_chart_availability_summary_disabled = Disabled +##pie_chart_availability_summary_down = Down +##pie_chart_availability_summary_label = Availability +##pie_chart_availability_summary_up = Up util_ancestry_parentAncestry = Ancestral para: util_errorHandler_nullException = Exce\u00E7\u00E3o nula util_monitoringRequestCallback_error_checkServerStatusFailure = Imposs\u00EDvel verificar o status do login - verifique o status do Servidor. diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties index 669d8d6..99f34f9 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties @@ -25,6 +25,7 @@ ##chart_metrics_collapse_tooltip= Click here to see additional tabular availability data. ##chart_metrics_expand_tooltip= Click here to collapse additional availability detail. ##chart_metrics_sparkline_header= Chart +##chart_metrics_add_to_dashboard_label= Dashboards ##chart_no_data_label = No Data ##chart_single_value_label = Value ##chart_slider_button_bar_minute = Min @@ -2549,6 +2550,10 @@ favorites_resources = ÐзбÑаММÑе ÑеÑÑÑÑÑ filter_from_date = ÐÑ filter_to_date = ÐП group_tree_partialClusterTooltip = {0} Оз {1} ÑлеМПв гÑÑÐ¿Ð¿Ñ ÐžÐŒÐµÑÑ ÑеÑÑÑÑ ''{2}'' +##pie_chart_availability_summary_disabled = Disabled +##pie_chart_availability_summary_down = Down +##pie_chart_availability_summary_label = Availability +##pie_chart_availability_summary_up = Up util_errorHandler_nullException = ÐПзМОклП null ОÑклÑÑеМОе util_monitoringRequestCallback_error_checkServerStatusFailure = Ðе ÑЎаеÑÑÑ ÐŸÐ¿ÑеЎелОÑÑ ÑÑаÑÑÑ Ð°Ð²ÑПÑОзаÑОО - пÑПвеÑÑÑе ÑПÑÑПÑМОе ÑеÑвеÑа. util_userPerm_loadFailGlobal = Ðе ÑЎаеÑÑÑ Ð·Ð°Ð³ÑÑзОÑÑ Ð²Ð°ÑО глПбалÑÐœÑе пПлМПЌПÑÐžÑ - Ме пÑеЎПÑÑавлеМÑ. diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties index b15023f..69ca0f6 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties @@ -25,6 +25,7 @@ ##chart_metrics_collapse_tooltip= Click here to see additional tabular availability data. ##chart_metrics_expand_tooltip= Click here to collapse additional availability detail. ##chart_metrics_sparkline_header= Chart +##chart_metrics_add_to_dashboard_label= Dashboards ##chart_no_data_label = No Data ##chart_single_value_label = Value ##chart_slider_button_bar_minute = Min @@ -450,6 +451,10 @@ favorites_resources = \u8d44\u6e90\u6536\u85cf\u5939 ##filter_to_date = To ##group_tree_groupOfResourceType = Group of [{0}] group_tree_partialClusterTooltip = \u9664\u53bb {1},{0}\u7684\u7ec4\u6210\u5458\u62e5\u6709 \u4e00\u4e2a''{2}''\u8d44\u6e90 +##pie_chart_availability_summary_disabled = Disabled +##pie_chart_availability_summary_down = Down +##pie_chart_availability_summary_label = Availability +##pie_chart_availability_summary_up = Up util_ancestry_parentAncestry = Parent Ancestry for: util_errorHandler_nullException = \u5f02\u5e38\u662f null util_monitoringRequestCallback_error_checkServerStatusFailure = \u65e0\u6cd5\u786e\u5b9a\u767b\u5f55\u72b6\u6001- \u68c0\u67e5\u670d\u52a1\u5668\u72b6\u6001.
commit 5fc0c3dcb14fa1406e24e57d213a0cbda0205255 Author: Mike Thompson mithomps@redhat.com Date: Tue Jul 30 11:42:37 2013 -0700
Fix bug with new metrics database and not much history results in NPE on metrics page.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableView.java index 82a9d98..bb67587 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableView.java @@ -121,9 +121,9 @@ public class MetricsTableView extends Table<MetricsViewDataSource> implements Re metricsTableListGrid.addSelectionChangedHandler(new SelectionChangedHandler() { @Override public void onSelectionChanged(SelectionEvent selectionEvent) { - if(metricsTableListGrid.getSelectedRecords().length > 0){ + if (metricsTableListGrid.getSelectedRecords().length > 0) { addToDashboardComponent.enableAddToDashboardButton(); - }else { + } else { addToDashboardComponent.disableAddToDashboardButton(); } } @@ -304,11 +304,15 @@ public class MetricsTableView extends Table<MetricsViewDataSource> implements Re public void onDataArrived(DataArrivedEvent dataArrivedEvent) { int startRow = dataArrivedEvent.getStartRow(); int endRow = dataArrivedEvent.getEndRow(); + for (int i = startRow; i < endRow; i++) { - if (null != metricsTableView && null != metricsTableView.expandedRows - && metricsTableView.expandedRows.contains(getRecord(i).getAttributeAsInt( - MetricsViewDataSource.FIELD_METRIC_DEF_ID))) { - expandRecord(getRecord(i)); + ListGridRecord listGridRecord = getRecord(i); + if (null != listGridRecord) { + int metricDefinitionId = listGridRecord + .getAttributeAsInt(MetricsViewDataSource.FIELD_METRIC_DEF_ID); + if (null != metricsTableView && metricsTableView.expandedRows.contains(metricDefinitionId)) { + expandRecord(listGridRecord); + } } } }
commit 49886f1a8f52cbf5166498280d4ee28e679621f7 Author: mtho11 mikecthompson@gmail.com Date: Tue Jul 30 10:59:02 2013 -0700
Change layout of fields in availability detail form of metric page.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityOverUnderGraphType.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityOverUnderGraphType.java index ee06724..3e573dd 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityOverUnderGraphType.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityOverUnderGraphType.java @@ -230,8 +230,6 @@ public class AvailabilityOverUnderGraphType implements AvailabilityGraphType { .attr("height", height + margin.top + margin.bottom) .attr("transform", "translate(" + margin.left + "," + margin.top + ")");
- console.error("avail.chartSelection: "+ availChartContext.chartSelection); -
svg.selectAll("rect.availBars") .data(availChartContext.data) diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilitySummaryPieGraphType.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilitySummaryPieGraphType.java index 23effb0..057a77f 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilitySummaryPieGraphType.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilitySummaryPieGraphType.java @@ -18,7 +18,10 @@ */ package org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype;
+import com.smartgwt.client.widgets.HTMLFlow; +import org.rhq.core.domain.measurement.MeasurementUnits; import org.rhq.enterprise.gui.coregui.client.util.Log; +import org.rhq.enterprise.gui.coregui.client.util.MeasurementConverterClient;
import java.util.ArrayList; import java.util.List; @@ -32,11 +35,27 @@ import java.util.List; */ public class AvailabilitySummaryPieGraphType {
+ public static final int HEIGHT = 100; + public static final int WIDTH = 100; + private List<AvailabilitySummary> availabilitySummaries;
public AvailabilitySummaryPieGraphType() { }
+ public HTMLFlow createGraphMarker() { + Log.debug("drawGraph marker in AvailabilitySummaryPieGraph"); + + StringBuilder divAndSvgDefs = new StringBuilder(); + divAndSvgDefs.append("<div id="availSummaryChart" >"); + divAndSvgDefs.append("<svg xmlns="http://www.w3.org/2000/svg%5C" version="1.1" style="height:"+HEIGHT+"px;" ></svg>"); + divAndSvgDefs.append("</div>"); + HTMLFlow graphFlow = new HTMLFlow(divAndSvgDefs.toString()); + graphFlow.setWidth(WIDTH); + graphFlow.setHeight(HEIGHT); + return graphFlow; + } + public void setAvailabilityData(String upLabel, double upPercent, String downLabel, double downPercent, String disabledLabel, double disabledPercent ){ availabilitySummaries = new ArrayList<AvailabilitySummary>(); availabilitySummaries.add(new AvailabilitySummary(upLabel, upPercent)); @@ -51,7 +70,8 @@ public class AvailabilitySummaryPieGraphType { // loop through the avail intervals for (AvailabilitySummary availabilitySummary : availabilitySummaries) { sb.append("{ "label":"" + availabilitySummary.getLabel() + "", "); - sb.append(" "value": "" + availabilitySummary.getValue() * 100 + "" },"); + sb.append(" "value": "" + MeasurementConverterClient.format(availabilitySummary.getValue(), + MeasurementUnits.PERCENTAGE, true) + "" },"); } sb.setLength(sb.length() - 1); } @@ -67,26 +87,21 @@ public class AvailabilitySummaryPieGraphType { public native void drawJsniChart() /*-{ console.log("Draw Availability Summary Pie Chart");
- var data = this.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.AvailabilitySummaryPieGraphType::getAvailabilitySummaryJson()(); - var w = 100, h = 100, - r = 30, - color = $wnd.d3.scale.category10(); - - var vis = $wnd.d3.select("#availSummaryChart svg") + r = h/2, + color = $wnd.d3.scale.category10(), + data = this.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.AvailabilitySummaryPieGraphType::getAvailabilitySummaryJson()(), + vis = $wnd.d3.select("#availSummaryChart svg") .append("g") .data(data) .attr("width", w) .attr("height", h) - .attr("transform", "translate(" + r + "," + r + ")"); - - var arc = $wnd.d3.svg.arc() - .outerRadius(r); - - var pie = $wnd.d3.layout.pie(); - - var arcs = vis.selectAll("g.slice") + .attr("transform", "translate(" + r + "," + r + ")"), + arc = $wnd.d3.svg.arc() + .outerRadius(r), + pie = $wnd.d3.layout.pie(), + arcs = vis.selectAll("g.slice") .data(pie) .enter() .append("g") @@ -108,7 +123,7 @@ public class AvailabilitySummaryPieGraphType { .text(function (d, i) { return data[i].value; }); - console.warn("done with avail summary pie graph"); + console.log("done with avail summary pie graph");
}-*/;
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/ResourceMetricAvailabilityView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/ResourceMetricAvailabilityView.java index 961ea5a..cffcfd7 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/ResourceMetricAvailabilityView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/ResourceMetricAvailabilityView.java @@ -22,8 +22,9 @@ package org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitori
import com.google.gwt.user.client.Timer; import com.google.gwt.user.client.rpc.AsyncCallback; -import com.smartgwt.client.widgets.HTMLFlow; +import com.smartgwt.client.types.VerticalAlignment; import com.smartgwt.client.widgets.form.DynamicForm; +import com.smartgwt.client.widgets.form.fields.CanvasItem; import com.smartgwt.client.widgets.form.fields.FormItem; import com.smartgwt.client.widgets.form.fields.StaticTextItem;
@@ -49,11 +50,8 @@ public class ResourceMetricAvailabilityView extends EnhancedVLayout {
private Resource resource; private StaticTextItem currentField; - private StaticTextItem availField; private StaticTextItem availTimeField; - private StaticTextItem downField; private StaticTextItem downTimeField; - private StaticTextItem disabledField; private StaticTextItem disabledTimeField; private StaticTextItem failureCountField; private StaticTextItem disabledCountField; @@ -71,29 +69,16 @@ public class ResourceMetricAvailabilityView extends EnhancedVLayout { availabilitySummaryPieGraph = new AvailabilitySummaryPieGraphType();
setWidth100(); - setHeight(265); + setHeight(165); }
@Override protected void onInit() { super.onInit(); - addMember(createGraphMarker()); addMember(createSummaryForm()); }
- public HTMLFlow createGraphMarker() { - Log.debug("drawGraph marker in AvailabilitySummaryPieGraph"); - - StringBuilder divAndSvgDefs = new StringBuilder(); - divAndSvgDefs.append("<div id="availSummaryChart" - + "" ><svg xmlns="http://www.w3.org/2000/svg%5C" version="1.1" style="height:100px;">"); - divAndSvgDefs.append("</svg></div>"); - HTMLFlow graph = new HTMLFlow(divAndSvgDefs.toString()); - graph.setWidth100(); - graph.setHeight(100); - //addMember(graph); - return graph; - } +
private DynamicForm createSummaryForm() { DynamicForm form = new DynamicForm(); @@ -108,28 +93,16 @@ public class ResourceMetricAvailabilityView extends EnhancedVLayout { currentField.setColSpan(4);
// row 2 - availField = new StaticTextItem("avail", MSG.view_resource_monitor_availability_availability()); - availField.setWrapTitle(false); - prepareTooltip(availField, MSG.view_resource_monitor_availability_availability_tooltip()); - availTimeField = new StaticTextItem("availTime", MSG.view_resource_monitor_availability_uptime()); availTimeField.setWrapTitle(false); prepareTooltip(availTimeField, MSG.view_resource_monitor_availability_uptime_tooltip());
// row 3 - downField = new StaticTextItem("down", MSG.view_resource_monitor_availability_down()); - downField.setWrapTitle(false); - prepareTooltip(downField, MSG.view_resource_monitor_availability_down_tooltip()); - downTimeField = new StaticTextItem("downTime", MSG.view_resource_monitor_availability_downtime()); downTimeField.setWrapTitle(false); prepareTooltip(downTimeField, MSG.view_resource_monitor_availability_downtime_tooltip());
// row 4 - disabledField = new StaticTextItem("disabled", MSG.view_resource_monitor_availability_disabled()); - disabledField.setWrapTitle(false); - prepareTooltip(disabledField, MSG.view_resource_monitor_availability_disabled_tooltip()); - disabledTimeField = new StaticTextItem("disabledTime", MSG.view_resource_monitor_availability_disabledTime()); disabledTimeField.setWrapTitle(false); prepareTooltip(disabledTimeField, MSG.view_resource_monitor_availability_disabledTime_tooltip()); @@ -164,7 +137,17 @@ public class ResourceMetricAvailabilityView extends EnhancedVLayout { currentTimeField.setColSpan(4); currentTimeField.setShowTitle(false);
- form.setItems(currentField, availField, availTimeField, downField, downTimeField, disabledField, + CanvasItem availPieChartItem = new CanvasItem(); + //@todo: i18n + availPieChartItem.setTitle("Availability"); + availPieChartItem.setCanvas(availabilitySummaryPieGraph.createGraphMarker()); + availPieChartItem.setRowSpan(3); + availPieChartItem.setVAlign(VerticalAlignment.TOP); + availPieChartItem.setTitleVAlign(VerticalAlignment.TOP); + availPieChartItem.setHeight(60); + availPieChartItem.setWidth(60); + + form.setItems(currentField, availPieChartItem, availTimeField, downTimeField, disabledTimeField, failureCountField, disabledCountField, mtbfField, mttrField, unknownField, currentTimeField);
@@ -181,6 +164,7 @@ public class ResourceMetricAvailabilityView extends EnhancedVLayout { public void onSuccess(ResourceAvailabilitySummary result) { Log.debug("reloadSummaryData");
+ //@todo: i18n availabilitySummaryPieGraph.setAvailabilityData( "Up", result.getUpPercentage(), "Down", result.getDownPercentage(), @@ -197,16 +181,10 @@ public class ResourceMetricAvailabilityView extends EnhancedVLayout {
currentField.setValue(MSG.view_resource_monitor_availability_currentStatus_value(result .getCurrent().getName(), TimestampCellFormatter.format(result.getLastChange().getTime()))); - availField.setValue(MeasurementConverterClient.format(result.getUpPercentage(), - MeasurementUnits.PERCENTAGE, true)); availTimeField.setValue(MeasurementConverterClient.format((double) result.getUpTime(), MeasurementUnits.MILLISECONDS, true)); - downField.setValue(MeasurementConverterClient.format(result.getDownPercentage(), - MeasurementUnits.PERCENTAGE, true)); downTimeField.setValue(MeasurementConverterClient.format((double) result.getDownTime(), MeasurementUnits.MILLISECONDS, true)); - disabledField.setValue(MeasurementConverterClient.format(result.getDisabledPercentage(), - MeasurementUnits.PERCENTAGE, true)); disabledTimeField.setValue(MeasurementConverterClient.format((double) result.getDisabledTime(), MeasurementUnits.MILLISECONDS, true)); failureCountField.setValue(result.getFailures());
commit 3d310c939d5018a9878a8fd836a220a5b2f018f4 Author: mtho11 mikecthompson@gmail.com Date: Mon Jul 29 20:07:11 2013 -0700
Availability pie chart for metrics page.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityOverUnderGraphType.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityOverUnderGraphType.java index 811a579..ee06724 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityOverUnderGraphType.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityOverUnderGraphType.java @@ -230,6 +230,8 @@ public class AvailabilityOverUnderGraphType implements AvailabilityGraphType { .attr("height", height + margin.top + margin.bottom) .attr("transform", "translate(" + margin.left + "," + margin.top + ")");
+ console.error("avail.chartSelection: "+ availChartContext.chartSelection); +
svg.selectAll("rect.availBars") .data(availChartContext.data) @@ -329,7 +331,6 @@ public class AvailabilityOverUnderGraphType implements AvailabilityGraphType { // Public API draw: function (availChartContext) { "use strict"; - console.log("AvailabilityChart"); drawBars(availChartContext); createHovers(); } @@ -338,7 +339,6 @@ public class AvailabilityOverUnderGraphType implements AvailabilityGraphType {
}();
- console.log("Avail Data records: "+availChartContext.data.length); if (typeof availChartContext.data !== 'undefined' && availChartContext.data.length > 0) { availabilityGraph.draw(availChartContext); console.log("Availability Chart Drawn"); diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilitySummaryPieGraphType.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilitySummaryPieGraphType.java new file mode 100644 index 0000000..23effb0 --- /dev/null +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilitySummaryPieGraphType.java @@ -0,0 +1,133 @@ +/* + * RHQ Management Platform + * Copyright (C) 2005-2012 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +package org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype; + +import org.rhq.enterprise.gui.coregui.client.util.Log; + +import java.util.ArrayList; +import java.util.List; + +/** + * Contains the javascript chart definition for an implementation of the d3 availability chart. This implementation is + * just a line that changes color based on availability type: up=green, down=red, orange=disabled, unknown=grey, + * empty=grey, warn=yellow. This version of the availability graph shows continuous intervals. + * + * @author Mike Thompson + */ +public class AvailabilitySummaryPieGraphType { + + private List<AvailabilitySummary> availabilitySummaries; + + public AvailabilitySummaryPieGraphType() { + } + + public void setAvailabilityData(String upLabel, double upPercent, String downLabel, double downPercent, String disabledLabel, double disabledPercent ){ + availabilitySummaries = new ArrayList<AvailabilitySummary>(); + availabilitySummaries.add(new AvailabilitySummary(upLabel, upPercent)); + availabilitySummaries.add(new AvailabilitySummary(downLabel, downPercent)); + availabilitySummaries.add(new AvailabilitySummary(disabledLabel, disabledPercent)); + + } + + public String getAvailabilitySummaryJson() { + StringBuilder sb = new StringBuilder("["); + if (null != availabilitySummaries) { + // loop through the avail intervals + for (AvailabilitySummary availabilitySummary : availabilitySummaries) { + sb.append("{ "label":"" + availabilitySummary.getLabel() + "", "); + sb.append(" "value": "" + availabilitySummary.getValue() * 100 + "" },"); + } + sb.setLength(sb.length() - 1); + } + + sb.append("]"); + Log.debug(sb.toString()); + return sb.toString(); + } + + /** + * The magic JSNI to draw the charts with d3. + */ + public native void drawJsniChart() /*-{ + console.log("Draw Availability Summary Pie Chart"); + + var data = this.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.AvailabilitySummaryPieGraphType::getAvailabilitySummaryJson()(); + + var w = 100, + h = 100, + r = 30, + color = $wnd.d3.scale.category10(); + + var vis = $wnd.d3.select("#availSummaryChart svg") + .append("g") + .data(data) + .attr("width", w) + .attr("height", h) + .attr("transform", "translate(" + r + "," + r + ")"); + + var arc = $wnd.d3.svg.arc() + .outerRadius(r); + + var pie = $wnd.d3.layout.pie(); + + var arcs = vis.selectAll("g.slice") + .data(pie) + .enter() + .append("g") + .attr("class", "slice"); + + arcs.append("path") + .attr("fill", function (d, i) { + return color(i); + }) + .attr("d", arc); + + arcs.append("text") + .attr("transform", function (d) { + d.innerRadius = 0; + d.outerRadius = r; + return "translate(" + arc.centroid(d) + ")"; + }) + .attr("text-anchor", "middle") + .text(function (d, i) { + return data[i].value; + }); + console.warn("done with avail summary pie graph"); + + }-*/; + + + private static class AvailabilitySummary { + final private String label; + final private double value; + + private AvailabilitySummary(String label, double value) { + this.label = label; + this.value = value; + } + + private String getLabel() { + return label; + } + + private double getValue() { + return value; + } + } +} diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableView.java index 09a7bc3..82a9d98 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableView.java @@ -305,7 +305,7 @@ public class MetricsTableView extends Table<MetricsViewDataSource> implements Re int startRow = dataArrivedEvent.getStartRow(); int endRow = dataArrivedEvent.getEndRow(); for (int i = startRow; i < endRow; i++) { - if (null != metricsTableView.expandedRows + if (null != metricsTableView && null != metricsTableView.expandedRows && metricsTableView.expandedRows.contains(getRecord(i).getAttributeAsInt( MetricsViewDataSource.FIELD_METRIC_DEF_ID))) { expandRecord(getRecord(i)); diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/ResourceMetricAvailabilityView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/ResourceMetricAvailabilityView.java index 9d6b892..961ea5a 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/ResourceMetricAvailabilityView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/ResourceMetricAvailabilityView.java @@ -20,7 +20,9 @@
package org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.table;
+import com.google.gwt.user.client.Timer; import com.google.gwt.user.client.rpc.AsyncCallback; +import com.smartgwt.client.widgets.HTMLFlow; import com.smartgwt.client.widgets.form.DynamicForm; import com.smartgwt.client.widgets.form.fields.FormItem; import com.smartgwt.client.widgets.form.fields.StaticTextItem; @@ -31,6 +33,8 @@ import org.rhq.core.domain.resource.composite.ResourceAvailabilitySummary; import org.rhq.enterprise.gui.coregui.client.CoreGUI; import org.rhq.enterprise.gui.coregui.client.components.table.TimestampCellFormatter; import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; +import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.AvailabilitySummaryPieGraphType; +import org.rhq.enterprise.gui.coregui.client.util.Log; import org.rhq.enterprise.gui.coregui.client.util.MeasurementConverterClient; import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedVLayout;
@@ -58,22 +62,39 @@ public class ResourceMetricAvailabilityView extends EnhancedVLayout { private StaticTextItem unknownField; private StaticTextItem currentTimeField;
+ private AvailabilitySummaryPieGraphType availabilitySummaryPieGraph; + public ResourceMetricAvailabilityView(Resource resource) { super();
this.resource = resource; + availabilitySummaryPieGraph = new AvailabilitySummaryPieGraphType();
setWidth100(); - setHeight(165); + setHeight(265); }
@Override protected void onInit() { super.onInit(); - + addMember(createGraphMarker()); addMember(createSummaryForm()); }
+ public HTMLFlow createGraphMarker() { + Log.debug("drawGraph marker in AvailabilitySummaryPieGraph"); + + StringBuilder divAndSvgDefs = new StringBuilder(); + divAndSvgDefs.append("<div id="availSummaryChart" + + "" ><svg xmlns="http://www.w3.org/2000/svg%5C" version="1.1" style="height:100px;">"); + divAndSvgDefs.append("</svg></div>"); + HTMLFlow graph = new HTMLFlow(divAndSvgDefs.toString()); + graph.setWidth100(); + graph.setHeight(100); + //addMember(graph); + return graph; + } + private DynamicForm createSummaryForm() { DynamicForm form = new DynamicForm(); form.setWidth100(); @@ -158,6 +179,21 @@ public class ResourceMetricAvailabilityView extends EnhancedVLayout {
@Override public void onSuccess(ResourceAvailabilitySummary result) { + Log.debug("reloadSummaryData"); + + availabilitySummaryPieGraph.setAvailabilityData( + "Up", result.getUpPercentage(), + "Down", result.getDownPercentage(), + "Disabled" ,result.getDisabledPercentage() + ); + new Timer(){ + + @Override + public void run() { + Log.debug("Run Avail Graph"); + availabilitySummaryPieGraph.drawJsniChart(); + } + }.schedule(150);
currentField.setValue(MSG.view_resource_monitor_availability_currentStatus_value(result .getCurrent().getName(), TimestampCellFormatter.format(result.getLastChange().getTime()))); @@ -189,6 +225,8 @@ public class ResourceMetricAvailabilityView extends EnhancedVLayout {
currentTimeField.setValue(MSG.view_resource_monitor_availability_currentAsOf(TimestampCellFormatter .format(result.getCurrentTime()))); + + }
@Override
commit b8bb84b69f992e3fd06fdd2c70b7482f421c6909 Author: John Sanda jsanda@redhat.com Date: Tue Jul 30 16:13:09 2013 -0400
forgot to add test resource file in previous commit
diff --git a/modules/plugins/rhq-storage/src/test/resources/cassandra.yaml b/modules/plugins/rhq-storage/src/test/resources/cassandra.yaml new file mode 100644 index 0000000..fd7973b --- /dev/null +++ b/modules/plugins/rhq-storage/src/test/resources/cassandra.yaml @@ -0,0 +1,690 @@ +# Cassandra storage config YAML + +# NOTE: +# See http://wiki.apache.org/cassandra/StorageConfiguration for +# full explanations of configuration directives +# /NOTE + +# The name of the cluster. This is mainly used to prevent machines in +# one logical cluster from joining another. +cluster_name: rhq + +# This defines the number of tokens randomly assigned to this node on the ring +# The more tokens, relative to other nodes, the larger the proportion of data +# that this node will store. You probably want all nodes to have the same number +# of tokens assuming they have equal hardware capability. +# +# If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility, +# and will use the initial_token as described below. +# +# Specifying initial_token will override this setting. +# +# If you already have a cluster with 1 token per node, and wish to migrate to +# multiple tokens per node, see http://wiki.apache.org/cassandra/Operations +num_tokens: 256 + +# If you haven't specified num_tokens, or have set it to the default of 1 then +# you should always specify InitialToken when setting up a production +# cluster for the first time, and often when adding capacity later. +# The principle is that each node should be given an equal slice of +# the token ring; see http://wiki.apache.org/cassandra/Operations +# for more details. +# +# If blank, Cassandra will request a token bisecting the range of +# the heaviest-loaded existing node. If there is no load information +# available, such as is the case with a new cluster, it will pick +# a random token, which will lead to hot spots. +#initial_token: + +# See http://wiki.apache.org/cassandra/HintedHandoff +hinted_handoff_enabled: true +# this defines the maximum amount of time a dead host will have hints +# generated. After it has been dead this long, new hints for it will not be +# created until it has been seen alive and gone down again. +max_hint_window_in_ms: 10800000 # 3 hours +# throttle in KB's per second, per delivery thread +hinted_handoff_throttle_in_kb: 1024 +# Number of threads with which to deliver hints; +# Consider increasing this number when you have multi-dc deployments, since +# cross-dc handoff tends to be slower +max_hints_delivery_threads: 2 + +# The following setting populates the page cache on memtable flush and compaction +# WARNING: Enable this setting only when the whole node's data fits in memory. +# Defaults to: false +# populate_io_cache_on_flush: false + +# Authentication backend, implementing IAuthenticator; used to identify users +# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator, +# PasswordAuthenticator}. +# +# - AllowAllAuthenticator performs no checks - set it to disable authentication. +# - PasswordAuthenticator relies on username/password pairs to authenticate +# users. It keeps usernames and hashed passwords in system_auth.credentials table. +# Please increase system_auth keyspace replication factor if you use this authenticator. +authenticator: org.apache.cassandra.auth.PasswordAuthenticator + +# Authorization backend, implementing IAuthorizer; used to limit access/provide permissions +# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer, +# CassandraAuthorizer}. +# +# - AllowAllAuthorizer allows any action to any user - set it to disable authorization. +# - CassandraAuthorizer stores permissions in system_auth.permissions table. Please +# increase system_auth keyspace replication factor if you use this authorizer. +authorizer: org.apache.cassandra.auth.CassandraAuthorizer + +# Validity period for permissions cache (fetching permissions can be an +# expensive operation depending on the authorizer, CassandraAuthorizer is +# one example). Defaults to 2000, set to 0 to disable. +# Will be disabled automatically for AllowAllAuthorizer. +permissions_validity_in_ms: 2000 + +# The partitioner is responsible for distributing rows (by key) across +# nodes in the cluster. Any IPartitioner may be used, including your +# own as long as it is on the classpath. Out of the box, Cassandra +# provides org.apache.cassandra.dht.{Murmur3Partitioner, RandomPartitioner +# ByteOrderedPartitioner, OrderPreservingPartitioner (deprecated)}. +# +# - RandomPartitioner distributes rows across the cluster evenly by md5. +# This is the default prior to 1.2 and is retained for compatibility. +# - Murmur3Partitioner is similar to RandomPartioner but uses Murmur3_128 +# Hash Function instead of md5. When in doubt, this is the best option. +# - ByteOrderedPartitioner orders rows lexically by key bytes. BOP allows +# scanning rows in key order, but the ordering can generate hot spots +# for sequential insertion workloads. +# - OrderPreservingPartitioner is an obsolete form of BOP, that stores +# - keys in a less-efficient format and only works with keys that are +# UTF8-encoded Strings. +# - CollatingOPP colates according to EN,US rules rather than lexical byte +# ordering. Use this as an example if you need custom collation. +# +# See http://wiki.apache.org/cassandra/Operations for more on +# partitioners and token selection. +partitioner: org.apache.cassandra.dht.Murmur3Partitioner + +# directories where Cassandra should store data on disk. +data_file_directories: + - /Users/jsanda/Development/redhat/rhq/rhq-data/storage-1/data + +# commit log +commitlog_directory: /Users/jsanda/Development/redhat/rhq/rhq-data/storage-1/commit_log + +# policy for data disk failures: +# stop: shut down gossip and Thrift, leaving the node effectively dead, but +# still inspectable via JMX. +# best_effort: stop using the failed disk and respond to requests based on +# remaining available sstables. This means you WILL see obsolete +# data at CL.ONE! +# ignore: ignore fatal errors and let requests fail, as in pre-1.2 Cassandra +disk_failure_policy: stop + +# Maximum size of the key cache in memory. +# +# Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the +# minimum, sometimes more. The key cache is fairly tiny for the amount of +# time it saves, so it's worthwhile to use it at large numbers. +# The row cache saves even more time, but must store the whole values of +# its rows, so it is extremely space-intensive. It's best to only use the +# row cache if you have hot rows or static rows. +# +# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. +# +# Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache. +key_cache_size_in_mb: + +# Duration in seconds after which Cassandra should +# safe the keys cache. Caches are saved to saved_caches_directory as +# specified in this configuration file. +# +# Saved caches greatly improve cold-start speeds, and is relatively cheap in +# terms of I/O for the key cache. Row cache saving is much more expensive and +# has limited use. +# +# Default is 14400 or 4 hours. +key_cache_save_period: 14400 + +# Number of keys from the key cache to save +# Disabled by default, meaning all keys are going to be saved +# key_cache_keys_to_save: 100 + +# Maximum size of the row cache in memory. +# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. +# +# Default value is 0, to disable row caching. +row_cache_size_in_mb: 0 + +# Duration in seconds after which Cassandra should +# safe the row cache. Caches are saved to saved_caches_directory as specified +# in this configuration file. +# +# Saved caches greatly improve cold-start speeds, and is relatively cheap in +# terms of I/O for the key cache. Row cache saving is much more expensive and +# has limited use. +# +# Default is 0 to disable saving the row cache. +row_cache_save_period: 0 + +# Number of keys from the row cache to save +# Disabled by default, meaning all keys are going to be saved +# row_cache_keys_to_save: 100 + +# The provider for the row cache to use. +# +# Supported values are: ConcurrentLinkedHashCacheProvider, SerializingCacheProvider +# +# SerializingCacheProvider serialises the contents of the row and stores +# it in native memory, i.e., off the JVM Heap. Serialized rows take +# significantly less memory than "live" rows in the JVM, so you can cache +# more rows in a given memory footprint. And storing the cache off-heap +# means you can use smaller heap sizes, reducing the impact of GC pauses. +# +# It is also valid to specify the fully-qualified class name to a class +# that implements org.apache.cassandra.cache.IRowCacheProvider. +# +# Defaults to SerializingCacheProvider +row_cache_provider: SerializingCacheProvider + +# saved caches +saved_caches_directory: /Users/jsanda/Development/redhat/rhq/rhq-data/storage-1/saved_caches + +# commitlog_sync may be either "periodic" or "batch." +# When in batch mode, Cassandra won't ack writes until the commit log +# has been fsynced to disk. It will wait up to +# commitlog_sync_batch_window_in_ms milliseconds for other writes, before +# performing the sync. +# +# commitlog_sync: batch +# commitlog_sync_batch_window_in_ms: 50 +# +# the other option is "periodic" where writes may be acked immediately +# and the CommitLog is simply synced every commitlog_sync_period_in_ms +# milliseconds. +commitlog_sync: periodic +commitlog_sync_period_in_ms: 10000 + +# The size of the individual commitlog file segments. A commitlog +# segment may be archived, deleted, or recycled once all the data +# in it (potentally from each columnfamily in the system) has been +# flushed to sstables. +# +# The default size is 32, which is almost always fine, but if you are +# archiving commitlog segments (see commitlog_archiving.properties), +# then you probably want a finer granularity of archiving; 8 or 16 MB +# is reasonable. +commitlog_segment_size_in_mb: 32 + +# any class that implements the SeedProvider interface and has a +# constructor that takes a Map<String, String> of parameters will do. +seed_provider: + # Addresses of hosts that are deemed contact points. + # Cassandra nodes use this list of hosts to find each other and learn + # the topology of the ring. You must change this if you are running + # multiple nodes! + - class_name: org.apache.cassandra.locator.SimpleSeedProvider + parameters: + # seeds is actually a comma-delimited list of addresses. + # Ex: "<ip1>,<ip2>,<ip3>" + - seeds: "127.0.0.1" + +# emergency pressure valve: each time heap usage after a full (CMS) +# garbage collection is above this fraction of the max, Cassandra will +# flush the largest memtables. +# +# Set to 1.0 to disable. Setting this lower than +# CMSInitiatingOccupancyFraction is not likely to be useful. +# +# RELYING ON THIS AS YOUR PRIMARY TUNING MECHANISM WILL WORK POORLY: +# it is most effective under light to moderate load, or read-heavy +# workloads; under truly massive write load, it will often be too +# little, too late. +flush_largest_memtables_at: 0.75 + +# emergency pressure valve #2: the first time heap usage after a full +# (CMS) garbage collection is above this fraction of the max, +# Cassandra will reduce cache maximum _capacity_ to the given fraction +# of the current _size_. Should usually be set substantially above +# flush_largest_memtables_at, since that will have less long-term +# impact on the system. +# +# Set to 1.0 to disable. Setting this lower than +# CMSInitiatingOccupancyFraction is not likely to be useful. +reduce_cache_sizes_at: 0.85 +reduce_cache_capacity_to: 0.6 + +# For workloads with more data than can fit in memory, Cassandra's +# bottleneck will be reads that need to fetch data from +# disk. "concurrent_reads" should be set to (16 * number_of_drives) in +# order to allow the operations to enqueue low enough in the stack +# that the OS and drives can reorder them. +# +# On the other hand, since writes are almost never IO bound, the ideal +# number of "concurrent_writes" is dependent on the number of cores in +# your system; (8 * number_of_cores) is a good rule of thumb. +concurrent_reads: 32 +concurrent_writes: 32 + +# Total memory to use for memtables. Cassandra will flush the largest +# memtable when this much memory is used. +# If omitted, Cassandra will set it to 1/3 of the heap. +# memtable_total_space_in_mb: 2048 + +# Total space to use for commitlogs. Since commitlog segments are +# mmapped, and hence use up address space, the default size is 32 +# on 32-bit JVMs, and 1024 on 64-bit JVMs. +# +# If space gets above this value (it will round up to the next nearest +# segment multiple), Cassandra will flush every dirty CF in the oldest +# segment and remove it. So a small total commitlog space will tend +# to cause more flush activity on less-active columnfamilies. +# commitlog_total_space_in_mb: 4096 + +# This sets the amount of memtable flush writer threads. These will +# be blocked by disk io, and each one will hold a memtable in memory +# while blocked. If you have a large heap and many data directories, +# you can increase this value for better flush performance. +# By default this will be set to the amount of data directories defined. +#memtable_flush_writers: 1 + +# the number of full memtables to allow pending flush, that is, +# waiting for a writer thread. At a minimum, this should be set to +# the maximum number of secondary indexes created on a single CF. +memtable_flush_queue_size: 4 + +# Whether to, when doing sequential writing, fsync() at intervals in +# order to force the operating system to flush the dirty +# buffers. Enable this to avoid sudden dirty buffer flushing from +# impacting read latencies. Almost always a good idea on SSD:s; not +# necessarily on platters. +trickle_fsync: false +trickle_fsync_interval_in_kb: 10240 + +# TCP port, for commands and data +storage_port: 7100 + +# SSL port, for encrypted communication. Unused unless enabled in +# encryption_options +ssl_storage_port: 7101 + +# Address to bind to and tell other Cassandra nodes to connect to. You +# _must_ change this if you want multiple nodes to be able to +# communicate! +# +# Leaving it blank leaves it up to InetAddress.getLocalHost(). This +# will always do the Right Thing *if* the node is properly configured +# (hostname, name resolution, etc), and the Right Thing is to use the +# address associated with the hostname (it might not be). +# +# Setting this to 0.0.0.0 is always wrong. +listen_address: 127.0.0.1 + +# Address to broadcast to other Cassandra nodes +# Leaving this blank will set it to the same value as listen_address +# broadcast_address: 1.2.3.4 + +# Internode authentication backend, implementing IInternodeAuthenticator; +# used to allow/disallow connections from peer nodes. +internode_authenticator: org.rhq.cassandra.auth.RhqInternodeAuthenticator + +# Whether to start the native transport server. +# Currently, only the thrift server is started by default because the native +# transport is considered beta. +# Please note that the address on which the native transport is bound is the +# same as the rpc_address. The port however is different and specified below. +start_native_transport: true +# port for the CQL native transport to listen for clients on +native_transport_port: 9142 +# The minimum and maximum threads for handling requests when the native +# transport is used. The meaning is those is similar to the one of +# rpc_min_threads and rpc_max_threads, though the default differ slightly and +# are the ones below: +# native_transport_min_threads: 16 +native_transport_max_threads: 128 + + +# Whether to start the thrift rpc server. +start_rpc: false +# The address to bind the Thrift RPC service to -- clients connect +# here. Unlike ListenAddress above, you *can* specify 0.0.0.0 here if +# you want Thrift to listen on all interfaces. +# +# Leaving this blank has the same effect it does for ListenAddress, +# (i.e. it will be based on the configured hostname of the node). +rpc_address: 127.0.0.1 +# port for Thrift to listen for clients on +rpc_port: 9160 + +# enable or disable keepalive on rpc connections +rpc_keepalive: true + +# Cassandra provides three out-of-the-box options for the RPC Server: +# +# sync -> One thread per thrift connection. For a very large number of clients, memory +# will be your limiting factor. On a 64 bit JVM, 128KB is the minimum stack size +# per thread, and that will correspond to your use of virtual memory (but physical memory +# may be limited depending on use of stack space). +# +# hsha -> Stands for "half synchronous, half asynchronous." All thrift clients are handled +# asynchronously using a small number of threads that does not vary with the amount +# of thrift clients (and thus scales well to many clients). The rpc requests are still +# synchronous (one thread per active request). +# +# The default is sync because on Windows hsha is about 30% slower. On Linux, +# sync/hsha performance is about the same, with hsha of course using less memory. +# +# Alternatively, can provide your own RPC server by providing the fully-qualified class name +# of an o.a.c.t.TServerFactory that can create an instance of it. +rpc_server_type: sync + +# Uncomment rpc_min|max_thread to set request pool size limits. +# +# Regardless of your choice of RPC server (see above), the number of maximum requests in the +# RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync +# RPC server, it also dictates the number of clients that can be connected at all). +# +# The default is unlimited and thus provide no protection against clients overwhelming the server. You are +# encouraged to set a maximum that makes sense for you in production, but do keep in mind that +# rpc_max_threads represents the maximum number of client requests this server may execute concurrently. +# +# rpc_min_threads: 16 +# rpc_max_threads: 2048 + +# uncomment to set socket buffer sizes on rpc connections +# rpc_send_buff_size_in_bytes: +# rpc_recv_buff_size_in_bytes: + +# uncomment to set socket buffer size for internode communication +# Note that when setting this, the buffer size is limited by net.core.wmem_max +# and when not setting it it is defined by net.ipv4.tcp_wmem +# See: +# /proc/sys/net/core/wmem_max +# /proc/sys/net/core/rmem_max +# /proc/sys/net/ipv4/tcp_wmem +# /proc/sys/net/ipv4/tcp_wmem +# and: man tcp +# internode_send_buff_size_in_bytes: +# internode_recv_buff_size_in_bytes: + +# Frame size for thrift (maximum field length). +thrift_framed_transport_size_in_mb: 15 + +# The max length of a thrift message, including all fields and +# internal thrift overhead. +thrift_max_message_length_in_mb: 16 + +# Set to true to have Cassandra create a hard link to each sstable +# flushed or streamed locally in a backups/ subdirectory of the +# Keyspace data. Removing these links is the operator's +# responsibility. +incremental_backups: false + +# Whether or not to take a snapshot before each compaction. Be +# careful using this option, since Cassandra won't clean up the +# snapshots for you. Mostly useful if you're paranoid when there +# is a data format change. +snapshot_before_compaction: false + +# Whether or not a snapshot is taken of the data before keyspace truncation +# or dropping of column families. The STRONGLY advised default of true +# should be used to provide data safety. If you set this flag to false, you will +# lose data on truncation or drop. +auto_snapshot: true + +# Add column indexes to a row after its contents reach this size. +# Increase if your column values are large, or if you have a very large +# number of columns. The competing causes are, Cassandra has to +# deserialize this much of the row to read a single column, so you want +# it to be small - at least if you do many partial-row reads - but all +# the index data is read for each access, so you don't want to generate +# that wastefully either. +column_index_size_in_kb: 64 + +# Size limit for rows being compacted in memory. Larger rows will spill +# over to disk and use a slower two-pass compaction process. A message +# will be logged specifying the row key. +in_memory_compaction_limit_in_mb: 64 + +# Number of simultaneous compactions to allow, NOT including +# validation "compactions" for anti-entropy repair. Simultaneous +# compactions can help preserve read performance in a mixed read/write +# workload, by mitigating the tendency of small sstables to accumulate +# during a single long running compactions. The default is usually +# fine and if you experience problems with compaction running too +# slowly or too fast, you should look at +# compaction_throughput_mb_per_sec first. +# +# concurrent_compactors defaults to the number of cores. +# Uncomment to make compaction mono-threaded, the pre-0.8 default. +#concurrent_compactors: 1 + +# Multi-threaded compaction. When enabled, each compaction will use +# up to one thread per core, plus one thread per sstable being merged. +# This is usually only useful for SSD-based hardware: otherwise, +# your concern is usually to get compaction to do LESS i/o (see: +# compaction_throughput_mb_per_sec), not more. +multithreaded_compaction: false + +# Throttles compaction to the given total throughput across the entire +# system. The faster you insert data, the faster you need to compact in +# order to keep the sstable count down, but in general, setting this to +# 16 to 32 times the rate you are inserting data is more than sufficient. +# Setting this to 0 disables throttling. Note that this account for all types +# of compaction, including validation compaction. +compaction_throughput_mb_per_sec: 16 + +# Track cached row keys during compaction, and re-cache their new +# positions in the compacted sstable. Disable if you use really large +# key caches. +compaction_preheat_key_cache: true + +# Throttles all outbound streaming file transfers on this node to the +# given total throughput in Mbps. This is necessary because Cassandra does +# mostly sequential IO when streaming data during bootstrap or repair, which +# can lead to saturating the network connection and degrading rpc performance. +# When unset, the default is 200 Mbps or 25 MB/s. +# stream_throughput_outbound_megabits_per_sec: 200 + +# How long the coordinator should wait for read operations to complete +read_request_timeout_in_ms: 20000 +# How long the coordinator should wait for seq or index scans to complete +range_request_timeout_in_ms: 20000 +# How long the coordinator should wait for writes to complete +write_request_timeout_in_ms: 20000 +# How long the coordinator should wait for truncates to complete +# (This can be much longer, because we need to flush all CFs +# to make sure we can clear out anythink in the commitlog that could +# cause truncated data to reappear.) +truncate_request_timeout_in_ms: 60000 +# The default timeout for other, miscellaneous operations +request_timeout_in_ms: 20000 + +# Enable operation timeout information exchange between nodes to accurately +# measure request timeouts, If disabled cassandra will assuming the request +# was forwarded to the replica instantly by the coordinator +# +# Warning: before enabling this property make sure to ntp is installed +# and the times are synchronized between the nodes. +cross_node_timeout: false + +# Enable socket timeout for streaming operation. +# When a timeout occurs during streaming, streaming is retried from the start +# of the current file. This *can* involve re-streaming an important amount of +# data, so you should avoid setting the value too low. +# Default value is 0, which never timeout streams. +# streaming_socket_timeout_in_ms: 0 + +# phi value that must be reached for a host to be marked down. +# most users should never need to adjust this. +# phi_convict_threshold: 8 + +# endpoint_snitch -- Set this to a class that implements +# IEndpointSnitch. The snitch has two functions: +# - it teaches Cassandra enough about your network topology to route +# requests efficiently +# - it allows Cassandra to spread replicas around your cluster to avoid +# correlated failures. It does this by grouping machines into +# "datacenters" and "racks." Cassandra will do its best not to have +# more than one replica on the same "rack" (which may not actually +# be a physical location) +# +# IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER, +# YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS +# ARE PLACED. +# +# Out of the box, Cassandra provides +# - SimpleSnitch: +# Treats Strategy order as proximity. This improves cache locality +# when disabling read repair, which can further improve throughput. +# Only appropriate for single-datacenter deployments. +# - PropertyFileSnitch: +# Proximity is determined by rack and data center, which are +# explicitly configured in cassandra-topology.properties. +# - GossipingPropertyFileSnitch +# The rack and datacenter for the local node are defined in +# cassandra-rackdc.properties and propagated to other nodes via gossip. If +# cassandra-topology.properties exists, it is used as a fallback, allowing +# migration from the PropertyFileSnitch. +# - RackInferringSnitch: +# Proximity is determined by rack and data center, which are +# assumed to correspond to the 3rd and 2nd octet of each node's +# IP address, respectively. Unless this happens to match your +# deployment conventions (as it did Facebook's), this is best used +# as an example of writing a custom Snitch class. +# - Ec2Snitch: +# Appropriate for EC2 deployments in a single Region. Loads Region +# and Availability Zone information from the EC2 API. The Region is +# treated as the Datacenter, and the Availability Zone as the rack. +# Only private IPs are used, so this will not work across multiple +# Regions. +# - Ec2MultiRegionSnitch: +# Uses public IPs as broadcast_address to allow cross-region +# connectivity. (Thus, you should set seed addresses to the public +# IP as well.) You will need to open the storage_port or +# ssl_storage_port on the public IP firewall. (For intra-Region +# traffic, Cassandra will switch to the private IP after +# establishing a connection.) +# +# You can use a custom Snitch by setting this to the full class name +# of the snitch, which will be assumed to be on your classpath. +endpoint_snitch: SimpleSnitch + +# controls how often to perform the more expensive part of host score +# calculation +dynamic_snitch_update_interval_in_ms: 100 +# controls how often to reset all host scores, allowing a bad host to +# possibly recover +dynamic_snitch_reset_interval_in_ms: 600000 +# if set greater than zero and read_repair_chance is < 1.0, this will allow +# 'pinning' of replicas to hosts in order to increase cache capacity. +# The badness threshold will control how much worse the pinned host has to be +# before the dynamic snitch will prefer other replicas over it. This is +# expressed as a double which represents a percentage. Thus, a value of +# 0.2 means Cassandra would continue to prefer the static snitch values +# until the pinned host was 20% worse than the fastest. +dynamic_snitch_badness_threshold: 0.1 + +# request_scheduler -- Set this to a class that implements +# RequestScheduler, which will schedule incoming client requests +# according to the specific policy. This is useful for multi-tenancy +# with a single Cassandra cluster. +# NOTE: This is specifically for requests from the client and does +# not affect inter node communication. +# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place +# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of +# client requests to a node with a separate queue for each +# request_scheduler_id. The scheduler is further customized by +# request_scheduler_options as described below. +request_scheduler: org.apache.cassandra.scheduler.NoScheduler + +# Scheduler Options vary based on the type of scheduler +# NoScheduler - Has no options +# RoundRobin +# - throttle_limit -- The throttle_limit is the number of in-flight +# requests per client. Requests beyond +# that limit are queued up until +# running requests can complete. +# The value of 80 here is twice the number of +# concurrent_reads + concurrent_writes. +# - default_weight -- default_weight is optional and allows for +# overriding the default which is 1. +# - weights -- Weights are optional and will default to 1 or the +# overridden default_weight. The weight translates into how +# many requests are handled during each turn of the +# RoundRobin, based on the scheduler id. +# +# request_scheduler_options: +# throttle_limit: 80 +# default_weight: 5 +# weights: +# Keyspace1: 1 +# Keyspace2: 5 + +# request_scheduler_id -- An identifer based on which to perform +# the request scheduling. Currently the only valid option is keyspace. +# request_scheduler_id: keyspace + +# index_interval controls the sampling of entries from the primrary +# row index in terms of space versus time. The larger the interval, +# the smaller and less effective the sampling will be. In technicial +# terms, the interval coresponds to the number of index entries that +# are skipped between taking each sample. All the sampled entries +# must fit in memory. Generally, a value between 128 and 512 here +# coupled with a large key cache size on CFs results in the best trade +# offs. This value is not often changed, however if you have many +# very small rows (many to an OS page), then increasing this will +# often lower memory usage without a impact on performance. +index_interval: 128 + +# Enable or disable inter-node encryption +# Default settings are TLS v1, RSA 1024-bit keys (it is imperative that +# users generate their own keys) TLS_RSA_WITH_AES_128_CBC_SHA as the cipher +# suite for authentication, key exchange and encryption of the actual data transfers. +# NOTE: No custom encryption options are enabled at the moment +# The available internode options are : all, none, dc, rack +# +# If set to dc cassandra will encrypt the traffic between the DCs +# If set to rack cassandra will encrypt the traffic between the racks +# +# The passwords used in these options must match the passwords used when generating +# the keystore and truststore. For instructions on generating these files, see: +# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSE... +# +server_encryption_options: + internode_encryption: none + keystore: conf/.keystore + keystore_password: cassandra + truststore: conf/.truststore + truststore_password: cassandra + # More advanced defaults below: + # protocol: TLS + # algorithm: SunX509 + # store_type: JKS + # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA] + # require_client_auth: false + +# enable or disable client/server encryption. +client_encryption_options: + enabled: false + keystore: conf/.keystore + keystore_password: cassandra + # require_client_auth: false + # Set trustore and truststore_password if require_client_auth is true + # truststore: conf/.truststore + # truststore_password: cassandra + # More advanced defaults below: + # protocol: TLS + # algorithm: SunX509 + # store_type: JKS + # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA] + +# internode_compression controls whether traffic between nodes is +# compressed. +# can be: all - all traffic is compressed +# dc - traffic between different datacenters is compressed +# none - nothing is compressed. +internode_compression: none + +# Enable or disable tcp_nodelay for inter-dc communication. +# Disabling it will result in larger (but fewer) network packets being sent, +# reducing overhead from the TCP protocol itself, at the cost of increasing +# latency if you block for cross-datacenter responses. +inter_dc_tcp_nodelay: true
commit 13371f91328e26f66d687196acbc6b7e5aab4bd0 Author: John Sanda jsanda@redhat.com Date: Tue Jul 30 14:28:38 2013 -0400
add resource config support for cql and gossip ports in rhq-storage plugin
diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/cassandra-jvm.properties b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/cassandra-jvm.properties index 1faee9d..2eb5ab5 100644 --- a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/cassandra-jvm.properties +++ b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/cassandra-jvm.properties @@ -13,7 +13,7 @@ heap_new="-Xmn64M"
heap_dump_on_OOMError="-XX:+HeapDumpOnOutOfMemoryError"
-heap_dump_dir="" +heap_dump_dir=
thread_stack_size="-Xss180k"
diff --git a/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java b/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java index 5276c84..c53c19b 100644 --- a/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java +++ b/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java @@ -108,7 +108,7 @@ public class ConfigEditor { return (Integer) config.get("native_transport_port"); }
- public void setNativeTransportPort(int port) { + public void setNativeTransportPort(Integer port) { config.put("native_transport_port", port); }
@@ -116,7 +116,7 @@ public class ConfigEditor { return (Integer) config.get("storage_port"); }
- public void setStoragePort(int port) { + public void setStoragePort(Integer port) { config.put("storage_port", port); }
diff --git a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeConfigDelegate.java b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeConfigDelegate.java index dd5b8a2..edaf9f9 100644 --- a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeConfigDelegate.java +++ b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeConfigDelegate.java @@ -5,6 +5,12 @@ import java.io.FileInputStream; import java.io.IOException; import java.util.Properties;
+import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.yaml.snakeyaml.error.YAMLException; + +import org.rhq.cassandra.util.ConfigEditor; +import org.rhq.cassandra.util.ConfigEditorException; import org.rhq.core.domain.configuration.Configuration; import org.rhq.core.domain.configuration.ConfigurationUpdateStatus; import org.rhq.core.domain.configuration.PropertySimple; @@ -18,12 +24,16 @@ import org.rhq.core.util.StringUtil; */ public class StorageNodeConfigDelegate implements ConfigurationFacet {
+ private Log log = LogFactory.getLog(StorageNodeConfigDelegate.class); + private File jvmOptsFile; private File wrapperEnvFile; + private File cassandraYamlFile;
public StorageNodeConfigDelegate(File basedir) { File confDir = new File(basedir, "conf"); jvmOptsFile = new File(confDir, "cassandra-jvm.properties"); + cassandraYamlFile = new File(confDir, "cassandra.yaml");
// for windows, config props also get propagated to the wrapper env if (isWindows()) { @@ -60,6 +70,11 @@ public class StorageNodeConfigDelegate implements ConfigurationFacet { config.put(new PropertySimple("heapDumpDir", new File(basedir, "bin").getAbsolutePath())); }
+ ConfigEditor yamlEditor = new ConfigEditor(cassandraYamlFile); + yamlEditor.load(); + config.put(new PropertySimple("cqlPort", yamlEditor.getNativeTransportPort())); + config.put(new PropertySimple("gossipPort", yamlEditor.getStoragePort())); + return config; }
@@ -135,6 +150,7 @@ public class StorageNodeConfigDelegate implements ConfigurationFacet { Configuration config = configurationUpdateReport.getConfiguration();
updateCassandraJvmProps(config); + updateCassandraYaml(config);
if (isWindows()) { updateWrapperEnv(config); @@ -145,6 +161,8 @@ public class StorageNodeConfigDelegate implements ConfigurationFacet { configurationUpdateReport.setErrorMessage("No configuration update was applied: " + e.getMessage()); } catch (IOException e) { configurationUpdateReport.setErrorMessageFromThrowable(e); + } catch (ConfigEditorException e) { + configurationUpdateReport.setErrorMessageFromThrowable(e); } }
@@ -195,6 +213,43 @@ public class StorageNodeConfigDelegate implements ConfigurationFacet { propertiesUpdater.update(properties); }
+ private void updateCassandraYaml(Configuration newConfig) { + ConfigEditor editor = new ConfigEditor(cassandraYamlFile); + try { + editor.load(); + + PropertySimple cqlPortProperty = newConfig.getSimple("cqlPort"); + if (cqlPortProperty != null) { + editor.setNativeTransportPort(cqlPortProperty.getIntegerValue()); + } + + PropertySimple gossipPortProperty = newConfig.getSimple("gossipPort"); + if (gossipPortProperty != null) { + editor.setStoragePort(gossipPortProperty.getIntegerValue()); + } + + editor.save(); + } catch (ConfigEditorException e) { + if (e.getCause() instanceof YAMLException) { + log.error("Failed to update " + cassandraYamlFile); + log.info("Attempting to restore " + cassandraYamlFile); + try { + editor.restore(); + throw e; + } catch (ConfigEditorException e1) { + log.error("Failed to restore " + cassandraYamlFile + ". A copy of the file prior to any " + + "modifications can be found at " + editor.getBackupFile()); + throw new ConfigEditorException("There was an error updating " + cassandraYamlFile + " and " + + "undoing the changes failed. A copy of the file can be found at " + editor.getBackupFile() + + ". See the agent logs for more details.", e); + } + } else { + log.error("No updates were made to " + cassandraYamlFile + " due to an unexpected error", e); + throw e; + } + } + } + private void updateWrapperEnv(Configuration config) throws IOException { PropertiesFileUpdate propertiesUpdater = new PropertiesFileUpdate(wrapperEnvFile.getAbsolutePath()); Properties properties = propertiesUpdater.loadExistingProperties(); diff --git a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml index f5a4f6d..84bb832 100644 --- a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml +++ b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml @@ -175,6 +175,17 @@ <c:simple-property name="heapDumpDir" displayName="Heap Dump Directory" required="false" description="The directory in which heap dumps will be written."/> </c:group> + <c:group name="Ports"> + <c:simple-property name="cqlPort" + displayName="CQL Port" + type="integer" + description="The port on which to listen for client requests over the native, CQL protocol. + This is a shared, cluster-wide setting."/> + <c:simple-property name="gossipPort" + type="integer" + description="The port used for internode communication. This is a shared, cluster-wide setting."/> + + </c:group> </resource-configuration>
<server name="Cassandra Server JVM" diff --git a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeConfigDelegateTest.java b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeConfigDelegateTest.java index 64d14b2..15861c1 100644 --- a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeConfigDelegateTest.java +++ b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeConfigDelegateTest.java @@ -6,17 +6,20 @@ import java.io.File; import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.IOException; +import java.io.InputStream; import java.lang.reflect.Method; import java.util.Properties;
import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test;
+import org.rhq.cassandra.util.ConfigEditor; import org.rhq.core.domain.configuration.Configuration; import org.rhq.core.domain.configuration.ConfigurationUpdateStatus; import org.rhq.core.domain.configuration.PropertySimple; import org.rhq.core.pluginapi.configuration.ConfigurationUpdateReport; import org.rhq.core.util.file.FileUtil; +import org.rhq.core.util.stream.StreamUtil;
/** * @author John Sanda @@ -25,6 +28,8 @@ public class StorageNodeConfigDelegateTest {
private File basedir;
+ private File cassandraYamlFile; + private StorageNodeConfigDelegate configDelegate;
@BeforeMethod @@ -33,6 +38,11 @@ public class StorageNodeConfigDelegateTest { basedir = new File(dir, getClass().getSimpleName() + "/" + test.getName()); FileUtil.purge(basedir, true); configDelegate = new StorageNodeConfigDelegate(basedir); + + cassandraYamlFile = new File(confDir(), "cassandra.yaml"); + InputStream inputStream = getClass().getResourceAsStream("/cassandra.yaml"); + FileOutputStream outputStream = new FileOutputStream(cassandraYamlFile); + StreamUtil.copy(inputStream, outputStream); }
@@ -55,13 +65,24 @@ public class StorageNodeConfigDelegateTest { public void updateValidConfig() throws Exception { createDefaultConfig();
- Configuration config = new Configuration(); - config.put(new PropertySimple("minHeapSize", "1024M")); - config.put(new PropertySimple("maxHeapSize", "1024M")); - config.put(new PropertySimple("heapNewSize", "256M")); - config.put(new PropertySimple("threadStackSize", "240")); - config.put(new PropertySimple("heapDumpOnOOMError", true)); - config.put(new PropertySimple("heapDumpDir", confDir())); + Configuration config = Configuration.builder() + .addSimple("minHeapSize", "1024M") + .addSimple("maxHeapSize", "1024M") + .addSimple("heapNewSize", "256M") + .addSimple("threadStackSize", "240") + .addSimple("heapDumpOnOOMError", true) + .addSimple("heapDumpDir", confDir()) + .addSimple("cqlPort", 9595) + .addSimple("gossipPort", 9696) + .build(); +// config.put(new PropertySimple("minHeapSize", "1024M")); +// config.put(new PropertySimple("maxHeapSize", "1024M")); +// config.put(new PropertySimple("heapNewSize", "256M")); +// config.put(new PropertySimple("threadStackSize", "240")); +// config.put(new PropertySimple("heapDumpOnOOMError", true)); +// config.put(new PropertySimple("heapDumpDir", confDir())); +// config.put(new PropertySimple("cqlPort", 9595)); +// config.put(new PropertySimple("gossipPort", 9696));
ConfigurationUpdateReport report = new ConfigurationUpdateReport(config);
@@ -78,6 +99,12 @@ public class StorageNodeConfigDelegateTest { "Failed to update property [heap_dump_on_OOMError]"); assertEquals(properties.getProperty("heap_dump_dir"), confDir().getAbsolutePath(), "Failed to update property [heap_dump_dir]"); + + ConfigEditor yamlEditor = new ConfigEditor(cassandraYamlFile); + yamlEditor.load(); + + assertEquals(yamlEditor.getNativeTransportPort(), (Integer) 9595, "Failed to update native_transport_port"); + assertEquals(yamlEditor.getStoragePort(), (Integer) 9696, "Failed to update storage_port"); }
@Test
commit 6de86c41792464bdab2197227ba32df34e77e16d Author: John Sanda jsanda@redhat.com Date: Tue Jul 30 13:17:15 2013 -0400
make dataFileDirectories property read/write
diff --git a/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java b/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java index 9e1f86b..5276c84 100644 --- a/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java +++ b/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java @@ -88,6 +88,10 @@ public class ConfigEditor { return (List<String>) config.get("data_file_directories"); }
+ public void setDataFileDirectories(List<String> dirs) { + config.put("data_file_directories", dirs); + } + public String getSavedCachesDirectory() { return (String) config.get("saved_caches_directory"); } diff --git a/modules/common/cassandra-util/src/test/java/org/rhq/cassandra/util/ConfigEditorTest.java b/modules/common/cassandra-util/src/test/java/org/rhq/cassandra/util/ConfigEditorTest.java index f339b48..9c3cc16 100644 --- a/modules/common/cassandra-util/src/test/java/org/rhq/cassandra/util/ConfigEditorTest.java +++ b/modules/common/cassandra-util/src/test/java/org/rhq/cassandra/util/ConfigEditorTest.java @@ -1,5 +1,6 @@ package org.rhq.cassandra.util;
+import static java.util.Arrays.asList; import static org.testng.Assert.assertEquals;
import java.io.File; @@ -88,6 +89,24 @@ public class ConfigEditorTest { assertEquals(editor.getStoragePort(), config.storage_port, "Failed to fetch storage_port"); }
+ @Test + public void updateDataFilesDirectories() throws Exception { + ConfigEditor editor = new ConfigEditor(configFile); + editor.load(); + editor.setDataFileDirectories(asList("/data/dir1", "/data/dir2", "data/dir3")); + editor.save(); + + Config config = loadConfig(); + + assertEquals(config.data_file_directories, new String[] {"/data/dir1", "/data/dir2", "data/dir3"}, + "Failed to update data_file_directories"); + + editor.load(); + + assertEquals(editor.getDataFileDirectories().toArray(new String[3]), config.data_file_directories, + "Failed to fetch data_file_directories"); + } + private Config loadConfig() throws Exception { FileInputStream inputStream = new FileInputStream(configFile); org.yaml.snakeyaml.constructor.Constructor constructor =
commit 8affd85fb953f7b32e1439cefc436285546cb662 Author: John Sanda jsanda@redhat.com Date: Tue Jul 30 12:57:25 2013 -0400
make storagePort and nativeTransportPort read/write properties
diff --git a/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java b/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java index 0b4a127..9e1f86b 100644 --- a/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java +++ b/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java @@ -100,10 +100,18 @@ public class ConfigEditor { params.put("seeds", StringUtil.arrayToString(seeds)); }
+ public Integer getNativeTransportPort() { + return (Integer) config.get("native_transport_port"); + } + public void setNativeTransportPort(int port) { config.put("native_transport_port", port); }
+ public Integer getStoragePort() { + return (Integer) config.get("storage_port"); + } + public void setStoragePort(int port) { config.put("storage_port", port); } diff --git a/modules/common/cassandra-util/src/test/java/org/rhq/cassandra/util/ConfigEditorTest.java b/modules/common/cassandra-util/src/test/java/org/rhq/cassandra/util/ConfigEditorTest.java index cf344e2..f339b48 100644 --- a/modules/common/cassandra-util/src/test/java/org/rhq/cassandra/util/ConfigEditorTest.java +++ b/modules/common/cassandra-util/src/test/java/org/rhq/cassandra/util/ConfigEditorTest.java @@ -65,6 +65,11 @@ public class ConfigEditorTest { Config config = loadConfig();
assertEquals(config.native_transport_port, (Integer) 9393, "Failed to update native_transport_port"); + + editor.load(); + + assertEquals(editor.getNativeTransportPort(), config.native_transport_port, + "Failed to fetch native_transport_port"); }
@Test @@ -77,6 +82,10 @@ public class ConfigEditorTest { Config config = loadConfig();
assertEquals(config.storage_port, (Integer) 6767, "Failed to update storage_port"); + + editor.load(); + + assertEquals(editor.getStoragePort(), config.storage_port, "Failed to fetch storage_port"); }
private Config loadConfig() throws Exception {
commit 490af59ea3141e94f865475df51ba061db82a716 Author: John Sanda jsanda@redhat.com Date: Tue Jul 30 10:53:24 2013 -0400
adding back support for deploying additional nodes with internode authentication
When the user installs a new storage node the following steps occur,
* The node is committed into inventory (but not part of storage cluster) * StorageNode entity is created with mode set to INSTALLED * New node is announced to the cluster * Server schedules operation for cluster nodes to update internode auth conf file to include the new node * Server prepares new node for bootstrap * Schedules operation on new node to * Apply cluster settings * Update internode auth conf settings * Restart the node so it bootstraps into cluster * Token ranges will be assigned to node * New node will stream data from other nodes * Server is notified that new node is up and part of the cluster * The operation mode is set to NORMAL * addNodeMaintenance operation is run on each cluster node * Updates replication_factor if necessary * Runs repair if necessary * Deletes old keys * Updates seeds property
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java index 575edc74..f4d3934 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java @@ -56,6 +56,8 @@ import org.rhq.core.domain.resource.Resource; + " FROM StorageNode s " // + "LEFT JOIN FETCH s.resource r " // + " WHERE s.address = :address"), + @NamedQuery(name = StorageNode.QUERY_FIND_ALL_BY_MODE, query = + "SELECT s FROM StorageNode s WHERE s.operationMode = :operationMode"), @NamedQuery(name = StorageNode.QUERY_FIND_ALL_NOT_INSTALLED, query = "SELECT s FROM StorageNode s WHERE NOT s.operationMode = 'INSTALLED'"), @NamedQuery(name = StorageNode.QUERY_FIND_ALL_NORMAL, query = "SELECT s FROM StorageNode s WHERE s.operationMode = 'NORMAL'"), @NamedQuery(name = StorageNode.QUERY_DELETE_BY_ID, query = "" // @@ -91,6 +93,7 @@ public class StorageNode implements Serializable {
public static final String QUERY_FIND_ALL = "StorageNode.findAll"; public static final String QUERY_FIND_BY_ADDRESS = "StorageNode.findByAddress"; + public static final String QUERY_FIND_ALL_BY_MODE = "StorageNode.findAllByMode"; public static final String QUERY_FIND_ALL_NOT_INSTALLED = "StorageNode.findAllCloudMembers"; public static final String QUERY_DELETE_BY_ID = "StorageNode.deleteById"; public static final String QUERY_FIND_ALL_NORMAL = "StorageNode.findAllNormalCloudMembers"; diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index 049cf42..ee0f406 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -24,6 +24,8 @@ */ package org.rhq.enterprise.server.cloud;
+import java.net.InetAddress; +import java.net.UnknownHostException; import java.util.ArrayList; import java.util.Arrays; import java.util.Date; @@ -47,6 +49,7 @@ import org.quartz.JobDataMap; import org.quartz.SimpleTrigger; import org.quartz.Trigger;
+import org.rhq.cassandra.schema.SchemaManager; import org.rhq.core.domain.alert.Alert; import org.rhq.core.domain.auth.Subject; import org.rhq.core.domain.authz.Permission; @@ -57,6 +60,8 @@ import org.rhq.core.domain.cloud.StorageNodeConfigurationComposite; import org.rhq.core.domain.cloud.StorageNodeLoadComposite; import org.rhq.core.domain.common.JobTrigger; import org.rhq.core.domain.configuration.Configuration; +import org.rhq.core.domain.configuration.PropertyList; +import org.rhq.core.domain.configuration.PropertySimple; import org.rhq.core.domain.criteria.AlertCriteria; import org.rhq.core.domain.criteria.ResourceGroupCriteria; import org.rhq.core.domain.criteria.ResourceOperationHistoryCriteria; @@ -73,6 +78,7 @@ import org.rhq.core.domain.resource.group.ResourceGroup; import org.rhq.core.domain.util.PageControl; import org.rhq.core.domain.util.PageList; import org.rhq.core.domain.util.PageOrdering; +import org.rhq.core.util.StringUtil; import org.rhq.enterprise.server.RHQConstants; import org.rhq.enterprise.server.alert.AlertManagerLocal; import org.rhq.enterprise.server.auth.SubjectManagerLocal; @@ -82,6 +88,7 @@ import org.rhq.enterprise.server.cloud.instance.ServerManagerLocal; import org.rhq.enterprise.server.configuration.ConfigurationManagerLocal; import org.rhq.enterprise.server.measurement.MeasurementDataManagerLocal; import org.rhq.enterprise.server.operation.OperationManagerLocal; +import org.rhq.enterprise.server.resource.ResourceManagerLocal; import org.rhq.enterprise.server.resource.ResourceTypeManagerLocal; import org.rhq.enterprise.server.resource.group.ResourceGroupManagerLocal; import org.rhq.enterprise.server.rest.reporting.MeasurementConverter; @@ -100,6 +107,14 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
private final Log log = LogFactory.getLog(StorageNodeManagerBean.class);
+ private static final String USERNAME_PROPERTY = "rhq.cassandra.username"; + private static final String PASSWORD_PROPERTY = "rhq.cassandra.password"; + private final static String MAINTENANCE_OPERATION = "addNodeMaintenance"; + private final static String MAINTENANCE_OPERATION_NOTE = "Topology change maintenance."; + private final static String RUN_REPAIR_PROPERTY = "runRepair"; + private final static String UPDATE_SEEDS_LIST = "updateSeedsList"; + private final static String SEEDS_LIST = "seedsList"; + private static final String RHQ_STORAGE_CQL_PORT_PROPERTY = "nativeTransportPort"; private static final String RHQ_STORAGE_JMX_PORT_PROPERTY = "jmxPort"; private static final String RHQ_STORAGE_ADDRESS_PROPERTY = "host"; @@ -139,46 +154,86 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN @EJB private StorageNodeManagerLocal storageNodeManger;
+ @EJB + private ResourceManagerLocal resourceManager; + @Override public void linkResource(Resource resource) { - List<StorageNode> storageNodes = this.getStorageNodes(); - Configuration resourceConfig = resource.getPluginConfiguration(); - String configAddress = resourceConfig.getSimpleValue(RHQ_STORAGE_ADDRESS_PROPERTY); - - if (configAddress != null) { - // TODO Do not add the node to the group until we have verified it has joined the cluster - // StorageNodeMaintenanceJob currently determines if a new node has successfully joined the cluster. - addStorageNodeToGroup(resource); - - boolean storageNodeFound = false; - if (storageNodes != null) { - for (StorageNode storageNode : storageNodes) { - if (configAddress.equals(storageNode.getAddress())) { - storageNode.setResource(resource); - storageNode.setOperationMode(OperationMode.NORMAL); - storageNodeFound = true; - break; - } - } - } + String address = resourceConfig.getSimpleValue(RHQ_STORAGE_ADDRESS_PROPERTY);
- if (!storageNodeFound) { - int cqlPort = Integer.parseInt(resourceConfig.getSimpleValue(RHQ_STORAGE_CQL_PORT_PROPERTY)); - int jmxPort = Integer.parseInt(resourceConfig.getSimpleValue(RHQ_STORAGE_JMX_PORT_PROPERTY)); + if (log.isInfoEnabled()) { + log.info("Linking " + resource + " to storage node at " + address); + } + try { + StorageNode storageNode = findStorageNodeByAddress(InetAddress.getByName(address));
- StorageNode storageNode = new StorageNode(); - storageNode.setAddress(configAddress); - storageNode.setCqlPort(cqlPort); - storageNode.setJmxPort(jmxPort); + if (storageNode != null) { + if (log.isInfoEnabled()) { + log.info(storageNode + " is an existing storage node. No cluster maintenance is necessary."); + } storageNode.setResource(resource); storageNode.setOperationMode(OperationMode.NORMAL); + addStorageNodeToGroup(resource); + } else { + storageNode = new StorageNode(); + storageNode.setAddress(address); + storageNode.setCqlPort(Integer.parseInt(resourceConfig.getSimpleValue(RHQ_STORAGE_CQL_PORT_PROPERTY))); + storageNode.setJmxPort(Integer.parseInt(resourceConfig.getSimpleValue(RHQ_STORAGE_JMX_PORT_PROPERTY))); + storageNode.setResource(resource); + storageNode.setOperationMode(OperationMode.INSTALLED);
entityManager.persist(storageNode);
-// scheduleQuartzJob(storageNodes.size()); + if (log.isInfoEnabled()) { + log.info(storageNode + " is a new storage node and not part of the storage node cluster."); + log.info("Scheduling maintenance operations to bring " + storageNode + " into the cluster..."); + } + + announceNewNode(storageNode); } + } catch (UnknownHostException e) { + throw new RuntimeException("Could not resolve address [" + address + "]. The resource " + resource + + " cannot be linked to a storage node", e); + } + } + + private void announceNewNode(StorageNode newStorageNode) { + if (log.isInfoEnabled()) { + log.info("Announcing " + newStorageNode + " to storage node cluster."); + } + + ResourceGroup storageNodeGroup = getStorageNodeGroup(); + + GroupOperationSchedule schedule = new GroupOperationSchedule(); + schedule.setGroup(storageNodeGroup); + schedule.setHaltOnFailure(false); + schedule.setExecutionOrder(new ArrayList<Resource>(storageNodeGroup.getExplicitResources())); + schedule.setJobTrigger(JobTrigger.createNowTrigger()); + schedule.setSubject(subjectManager.getOverlord()); + schedule.setOperationName("updateKnownNodes"); + + Configuration parameters = new Configuration(); + parameters.put(createPropertyListOfAddresses("ipAddresses", combine(getStorageNodes(), newStorageNode))); + schedule.setParameters(parameters); + + operationManager.scheduleGroupOperation(subjectManager.getOverlord(), schedule); + } + + private List<StorageNode> combine(List<StorageNode> storageNodes, StorageNode storageNode) { + List<StorageNode> newList = new ArrayList<StorageNode>(storageNodes.size() + 1); + newList.addAll(storageNodes); + newList.add(storageNode); + + return newList; + } + + private PropertyList createPropertyListOfAddresses(String propertyName, List<StorageNode> nodes) { + PropertyList list = new PropertyList(propertyName); + for (StorageNode storageNode : nodes) { + list.add(new PropertySimple("address", storageNode.getAddress())); } + return list; }
@Override @@ -214,7 +269,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
ResourceGroup group = getStorageNodeGroup(); resourceGroupManager.addResourcesToGroup(subjectManager.getOverlord(), group.getId(), - new int[] {resource.getId()}); + new int[]{resource.getId()}); }
@Override @@ -232,6 +287,13 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN }
@Override + public void addToStorageNodeGroup(StorageNode storageNode) { + storageNode.setOperationMode(OperationMode.NORMAL); + entityManager.merge(storageNode); + addStorageNodeToGroup(storageNode.getResource()); + } + + @Override public ResourceGroup getStorageNodeGroup() { Subject overlord = subjectManager.getOverlord();
@@ -398,6 +460,19 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN return runner.execute(); }
+ public StorageNode findStorageNodeByAddress(InetAddress address) { + TypedQuery<StorageNode> query = entityManager.<StorageNode> createNamedQuery(StorageNode.QUERY_FIND_BY_ADDRESS, + StorageNode.class); + query.setParameter("address", address.getHostAddress()); + List<StorageNode> result = query.getResultList(); + + if (result != null && result.size() > 0) { + return result.get(0); + } + + return null; + } + @Override @RequiredPermissions({ @RequiredPermission(Permission.MANAGE_SETTINGS), @RequiredPermission(Permission.MANAGE_INVENTORY) }) @@ -472,19 +547,6 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN entityManager.flush(); }
- private StorageNode findStorageNodeByAddress(String address) { - TypedQuery<StorageNode> query = entityManager.<StorageNode> createNamedQuery(StorageNode.QUERY_FIND_BY_ADDRESS, - StorageNode.class); - query.setParameter("address", address); - List<StorageNode> result = query.getResultList(); - - if (result != null && result.size() > 0) { - return result.get(0); - } - - return null; - } - private StorageNodeLoadComposite.MeasurementAggregateWithUnits getMeasurementAggregateWithUnits(Subject subject, int schedId, MeasurementUnits units, long beginTime, long endTime) { MeasurementAggregate measurementAggregate = measurementManager.getAggregate(subject, schedId, beginTime, @@ -641,63 +703,68 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
@Override public boolean updateConfiguration(Subject subject, StorageNodeConfigurationComposite storageNodeConfiguration) { - StorageNode storageNode = findStorageNodeByAddress(storageNodeConfiguration.getStorageNode().getAddress()); + try { + StorageNode storageNode = findStorageNodeByAddress(InetAddress.getByName( + storageNodeConfiguration.getStorageNode().getAddress())); + + if (storageNode != null && storageNode.getResource() != null) { + Configuration parameters = new Configuration(); + parameters.setSimpleValue("jmxPort", storageNodeConfiguration.getJmxPort() + ""); + if (storageNodeConfiguration.getHeapSize() != null) { + parameters.setSimpleValue("heapSize", storageNodeConfiguration.getHeapSize() + ""); + } + if (storageNodeConfiguration.getHeapNewSize() != null) { + parameters.setSimpleValue("heapNewSize", storageNodeConfiguration.getHeapNewSize() + ""); + } + if (storageNodeConfiguration.getThreadStackSize() != null) { + parameters.setSimpleValue("threadStackSize", storageNodeConfiguration.getThreadStackSize() + ""); + } + parameters.setSimpleValue("restartIfRequired", "false");
- if (storageNode != null && storageNode.getResource() != null) { - Configuration parameters = new Configuration(); - parameters.setSimpleValue("jmxPort", storageNodeConfiguration.getJmxPort() + ""); - if (storageNodeConfiguration.getHeapSize() != null) { - parameters.setSimpleValue("heapSize", storageNodeConfiguration.getHeapSize() + ""); - } - if (storageNodeConfiguration.getHeapNewSize() != null) { - parameters.setSimpleValue("heapNewSize", storageNodeConfiguration.getHeapNewSize() + ""); - } - if (storageNodeConfiguration.getThreadStackSize() != null) { - parameters.setSimpleValue("threadStackSize", storageNodeConfiguration.getThreadStackSize() + ""); - } - parameters.setSimpleValue("restartIfRequired", "false"); + Resource storageNodeResource = storageNode.getResource();
- Resource storageNodeResource = storageNode.getResource(); + boolean result = runOperationAndWaitForResult(subject, storageNodeResource, UPDATE_CONFIGURATION_OPERATION, + parameters);
- boolean result = runOperationAndWaitForResult(subject, storageNodeResource, UPDATE_CONFIGURATION_OPERATION, - parameters); + if (result) { + //2. Update the JMX port + //this is a fast operation compared to the restart + storageNode.setJmxPort(storageNodeConfiguration.getJmxPort()); + entityManager.merge(storageNode);
- if (result) { - //2. Update the JMX port - //this is a fast operation compared to the restart - storageNode.setJmxPort(storageNodeConfiguration.getJmxPort()); - entityManager.merge(storageNode); + //3. Restart the storage node + result = runOperationAndWaitForResult(subject, storageNodeResource, RESTART_OPERATION, + new Configuration());
- //3. Restart the storage node - result = runOperationAndWaitForResult(subject, storageNodeResource, RESTART_OPERATION, - new Configuration()); + //4. Update the plugin configuration to talk with the new server + //Up to this point communication with the storage node should not have been affected by the intermediate + //changes + Configuration storageNodePluginConfig = configurationManager.getPluginConfiguration(subject, + storageNodeResource.getId());
- //4. Update the plugin configuration to talk with the new server - //Up to this point communication with the storage node should not have been affected by the intermediate - //changes - Configuration storageNodePluginConfig = configurationManager.getPluginConfiguration(subject, - storageNodeResource.getId()); + String existingJMXPort = storageNodePluginConfig.getSimpleValue("jmxPort"); + String newJMXPort = storageNodeConfiguration.getJmxPort() + "";
- String existingJMXPort = storageNodePluginConfig.getSimpleValue("jmxPort"); - String newJMXPort = storageNodeConfiguration.getJmxPort() + ""; + if (!existingJMXPort.equals(newJMXPort)) { + storageNodePluginConfig.setSimpleValue("jmxPort", newJMXPort);
- if (!existingJMXPort.equals(newJMXPort)) { - storageNodePluginConfig.setSimpleValue("jmxPort", newJMXPort); + String existingConnectionURL = storageNodePluginConfig.getSimpleValue("connectorAddress"); + String newConnectionURL = existingConnectionURL.replace(":" + existingJMXPort + "/", ":" + + storageNodeConfiguration.getJmxPort() + "/"); + storageNodePluginConfig.setSimpleValue("connectorAddress", newConnectionURL);
- String existingConnectionURL = storageNodePluginConfig.getSimpleValue("connectorAddress"); - String newConnectionURL = existingConnectionURL.replace(":" + existingJMXPort + "/", ":" - + storageNodeConfiguration.getJmxPort() + "/"); - storageNodePluginConfig.setSimpleValue("connectorAddress", newConnectionURL); + configurationManager.updatePluginConfiguration(subject, storageNodeResource.getId(), + storageNodePluginConfig); + }
- configurationManager.updatePluginConfiguration(subject, storageNodeResource.getId(), - storageNodePluginConfig); + return result; } - - return result; } - }
- return false; + return false; + } catch (UnknownHostException e) { + throw new RuntimeException("Failed to resolve address for " + storageNodeConfiguration, e); + } }
@Override @@ -754,4 +821,129 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
return successResultFound; } -} + + @Override + public void prepareNewNodesForBootstrap() { + List<StorageNode> newStorageNodes = entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE) + .setParameter("operationMode", OperationMode.INSTALLED).getResultList(); + if (newStorageNodes.isEmpty()) { + throw new RuntimeException("Failed to find storage node to bootstrap into cluster."); + } + // Right now, without some user input, we can only reliably bootstrap one node at a + // time. To support bootstrapping multiple nodes concurrently, a mechanism will have + // to be put in place for the user to declare in advance the nodes that are coming + // online. Then we can wait until all declared nodes have been committed into + // inventory and announced to the cluster + StorageNode storageNode = newStorageNodes.get(0); + + if (log.isInfoEnabled()) { + log.info("Preparing to bootstrap " + storageNode + " into cluster..."); + } + + List<StorageNode> existingStorageNodes = getStorageNodes(); + + ResourceOperationSchedule schedule = new ResourceOperationSchedule(); + schedule.setResource(storageNode.getResource()); + schedule.setJobTrigger(JobTrigger.createNowTrigger()); + schedule.setSubject(subjectManager.getOverlord()); + schedule.setOperationName("prepareForBootstrap"); + + Configuration parameters = new Configuration(); + parameters.put(new PropertySimple("cqlPort", existingStorageNodes.get(0).getCqlPort())); + // TODO need to add support for storage_port in cassandra/storage plugins + parameters.put(new PropertySimple("gossipPort", 7100)); + parameters.put(createPropertyListOfAddresses("storageNodeIPAddresses", getStorageNodes())); + + schedule.setParameters(parameters); + + operationManager.scheduleResourceOperation(subjectManager.getOverlord(), schedule); + } + + @Override + public void runAddNodeMaintenance() { + log.info("Preparing to schedule addNodeMaintenance on the storage cluster..."); + + List<StorageNode> storageNodes = entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE, + StorageNode.class).setParameter("operationMode", OperationMode.NORMAL).getResultList(); + + int clusterSize = storageNodes.size(); + boolean isReadRepairNeeded; + + if (clusterSize >= 4) { + // At 4 nodes we increase the RF to 3. We are not increasing the RF beyond + // that for additional nodes; so, there is no need to run repair if we are + // expanding from a 4 node cluster since the RF remains the same. + isReadRepairNeeded = false; + } else if (clusterSize == 1) { + // The RF will increase since we are going from a single to a multi-node + // cluster; therefore, we want to run repair. + isReadRepairNeeded = true; + } else if (clusterSize == 2) { + if (storageNodes.size() > 3) { + // If we go from 2 to > 3 nodes we will increase the RF to 3; therefore + // we want to run repair. + isReadRepairNeeded = true; + } else { + // If we go from 2 to 3 nodes, we keep the RF at 2 so there is no need + // to run repair. + isReadRepairNeeded = false; + } + } else if (clusterSize == 3) { + // We are increasing the cluster size > 3 which means the RF will be + // updated to 3; therefore, we want to run repair. + isReadRepairNeeded = true; + } else { + // If we cluster size of zero, then something is really screwed up. It + // should always be > 0. + isReadRepairNeeded = storageNodes.size() > 1; + } + + if (isReadRepairNeeded) { + updateTopology(storageNodes); + } + + ResourceGroup storageNodeGroup = getStorageNodeGroup(); + + GroupOperationSchedule schedule = new GroupOperationSchedule(); + schedule.setGroup(storageNodeGroup); + schedule.setHaltOnFailure(false); + schedule.setExecutionOrder(new ArrayList<Resource>(storageNodeGroup.getExplicitResources())); + schedule.setJobTrigger(JobTrigger.createNowTrigger()); + schedule.setSubject(subjectManager.getOverlord()); + schedule.setOperationName(MAINTENANCE_OPERATION); + schedule.setDescription(MAINTENANCE_OPERATION_NOTE); + + Configuration config = new Configuration(); + config.put(createPropertyListOfAddresses(SEEDS_LIST, storageNodes)); + config.put(new PropertySimple(RUN_REPAIR_PROPERTY, isReadRepairNeeded)); + config.put(new PropertySimple(UPDATE_SEEDS_LIST, Boolean.TRUE)); + + schedule.setParameters(config); + + operationManager.scheduleGroupOperation(subjectManager.getOverlord(), schedule); + } + + private boolean updateTopology(List<StorageNode> storageNodes) { + String username = getRequiredStorageProperty(USERNAME_PROPERTY); + String password = getRequiredStorageProperty(PASSWORD_PROPERTY); + SchemaManager schemaManager = new SchemaManager(username, password, storageNodes); + try{ + return schemaManager.updateTopology(false); + } catch (Exception e) { + log.error("An error occurred while applying schema topology changes", e); + } + + return false; + } + + private String getRequiredStorageProperty(String property) { + String value = System.getProperty(property); + if (StringUtil.isEmpty(property)) { + throw new IllegalStateException("The system property [" + property + "] is not set. The RHQ " + + "server will not be able connect to the RHQ storage node(s). This property should be defined " + + "in rhq-server.properties."); + } + return value; + } + +} \ No newline at end of file diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java index 15fa85c..00ba9e7 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java @@ -18,6 +18,7 @@ */ package org.rhq.enterprise.server.cloud;
+import java.net.InetAddress; import java.util.List;
import javax.ejb.Local; @@ -106,6 +107,8 @@ public interface StorageNodeManagerLocal { */ PageList<Alert> findAllStorageNodeAlerts(Subject subject, StorageNode storageNode);
+ StorageNode findStorageNodeByAddress(InetAddress address); +
/** * Find ids for all resources and sub-resources of Storage Nodes that @@ -170,6 +173,8 @@ public interface StorageNodeManagerLocal { */ boolean storageNodeGroupExists();
+ void addToStorageNodeGroup(StorageNode storageNode); + /** * This method assumes the storage node resource group already exists; as such, it should only be called from places * in the code that are after the point(s) where the group has been created. @@ -181,4 +186,8 @@ public interface StorageNodeManagerLocal {
void scheduleOperationInNewTransaction(Subject subject, ResourceOperationSchedule schedule);
+ void prepareNewNodesForBootstrap(); + + void runAddNodeMaintenance(); + } diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/operation/OperationManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/operation/OperationManagerBean.java index 088f13e..5d3b1ae 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/operation/OperationManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/operation/OperationManagerBean.java @@ -96,6 +96,7 @@ import org.rhq.enterprise.server.resource.ResourceNotFoundException; import org.rhq.enterprise.server.resource.group.ResourceGroupManagerLocal; import org.rhq.enterprise.server.resource.group.ResourceGroupNotFoundException; import org.rhq.enterprise.server.scheduler.SchedulerLocal; +import org.rhq.enterprise.server.storage.StorageNodeOperationsHandler; import org.rhq.enterprise.server.util.CriteriaQueryGenerator; import org.rhq.enterprise.server.util.CriteriaQueryRunner;
@@ -123,6 +124,9 @@ public class OperationManagerBean implements OperationManagerLocal, OperationMan @EJB private SubjectManagerLocal subjectManager;
+ @EJB + private StorageNodeOperationsHandler storageNodeOperationsHandler; + @SuppressWarnings("unchecked") public List<IntegerOptionItem> getResourceNameOptionItems(int groupId) { String queryName = ResourceGroup.QUERY_FIND_RESOURCE_NAMES_BY_GROUP_ID; @@ -917,7 +921,7 @@ public class OperationManagerBean implements OperationManagerLocal, OperationMan if (history.getParameters() != null) { history.getParameters().getId(); // eagerly reload the parameters } - + storageNodeOperationsHandler.handleOperationUpdateIfNecessary(history); notifyAlertConditionCacheManager("updateOperationHistory", history); return history; } @@ -1723,6 +1727,7 @@ public class OperationManagerBean implements OperationManagerLocal, OperationMan if (!stillInProgress) { groupHistory.setErrorMessage((groupErrorMessage == null) ? null : groupErrorMessage.toString()); groupHistory.setStatus(groupStatus); + storageNodeOperationsHandler.handleGroupOperationUpdateIfNecessary(groupHistory); notifyAlertConditionCacheManager("checkForCompletedGroupOperation", groupHistory); } } diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterMonitor.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterMonitor.java index ec28888..0b404bb 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterMonitor.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterMonitor.java @@ -9,7 +9,9 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory;
import org.rhq.core.domain.cloud.Server; +import org.rhq.core.domain.cloud.StorageNode; import org.rhq.enterprise.server.auth.SubjectManagerLocal; +import org.rhq.enterprise.server.cloud.StorageNodeManagerLocal; import org.rhq.enterprise.server.cloud.TopologyManagerLocal; import org.rhq.enterprise.server.cloud.instance.ServerManagerLocal; import org.rhq.enterprise.server.util.LookupUtil; @@ -36,6 +38,18 @@ public class StorageClusterMonitor implements StorageStateListener { log.info("Taking server out of maintenance mode"); updateServerMode(Server.OperationMode.NORMAL); } + + StorageNodeManagerLocal storageNodeManager = LookupUtil.getStorageNodeManager(); + StorageNode newClusterNode = storageNodeManager.findStorageNodeByAddress(address); + + if (newClusterNode == null) { + log.error("Did not find storage node with address [" + address.getHostAddress() + "]. This should not " + + "happen."); + } else { + log.info("Adding " + newClusterNode + " to storage cluster and scheduling cluster maintenance..."); + storageNodeManager.addToStorageNodeGroup(newClusterNode); + storageNodeManager.runAddNodeMaintenance(); + } }
@Override diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandler.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandler.java new file mode 100644 index 0000000..96e8de8 --- /dev/null +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandler.java @@ -0,0 +1,17 @@ +package org.rhq.enterprise.server.storage; + +import javax.ejb.Asynchronous; + +import org.rhq.core.domain.operation.GroupOperationHistory; +import org.rhq.core.domain.operation.OperationHistory; + +/** + * @author John Sanda + */ +public interface StorageNodeOperationsHandler { + @Asynchronous + void handleOperationUpdateIfNecessary(OperationHistory operationHistory); + + @Asynchronous + void handleGroupOperationUpdateIfNecessary(GroupOperationHistory operationHistory); +} diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java new file mode 100644 index 0000000..6da5cca --- /dev/null +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java @@ -0,0 +1,90 @@ +package org.rhq.enterprise.server.storage; + +import javax.ejb.Asynchronous; +import javax.ejb.EJB; +import javax.ejb.Stateless; +import javax.persistence.EntityManager; +import javax.persistence.PersistenceContext; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import org.rhq.core.domain.cloud.StorageNode; +import org.rhq.core.domain.operation.GroupOperationHistory; +import org.rhq.core.domain.operation.OperationDefinition; +import org.rhq.core.domain.operation.OperationHistory; +import org.rhq.core.domain.operation.OperationRequestStatus; +import org.rhq.core.domain.resource.Resource; +import org.rhq.core.domain.resource.ResourceType; +import org.rhq.enterprise.server.RHQConstants; +import org.rhq.enterprise.server.cloud.StorageNodeManagerLocal; + +/** + * @author John Sanda + */ +@Stateless +public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHandler { + + private final Log log = LogFactory.getLog(StorageNodeOperationsHandlerBean.class); + + private static final String STORAGE_NODE_TYPE_NAME = "RHQ Storage Node"; + private static final String STORAGE_NODE_PLUGIN_NAME = "RHQStorage"; + + @PersistenceContext(unitName = RHQConstants.PERSISTENCE_UNIT_NAME) + private EntityManager entityManager; + + @EJB + private StorageNodeManagerLocal storageNodeManager; + + @Override + @Asynchronous + public void handleOperationUpdateIfNecessary(OperationHistory operationHistory) { +// if (isStorageNodeOperation(operationHistory.getOperationDefinition())) { +// if (operationHistory.getOperationDefinition().getName().equals("prepareForBootstrap")) { +// ResourceOperationHistory resourceOperationHistory = entityManager.find(ResourceOperationHistory.class, +// operationHistory.getId()); +// if (resourceOperationHistory.getStatus() == OperationRequestStatus.SUCCESS) { +// +// } +// StorageNode storageNode = findStorageNode(resourceOperationHistory.getResource()); +// storageNode.setOperationMode(StorageNode.OperationMode.NORMAL); +// } +// } + } + + private StorageNode findStorageNode(Resource resource) { + for (StorageNode storageNode : storageNodeManager.getStorageNodes()) { + if (storageNode.getResource().getId() == resource.getId()) { + return storageNode; + } + } + return null; + } + + @Override + @Asynchronous + public void handleGroupOperationUpdateIfNecessary(GroupOperationHistory groupOperationHistory) { + if (isStorageNodeOperation(groupOperationHistory.getOperationDefinition())) { + if (groupOperationHistory.getOperationDefinition().getName().equals("updateKnownNodes")) { + if (groupOperationHistory.getStatus() == OperationRequestStatus.SUCCESS) { + log.info("New storage has been successfully announced to the storage node cluster."); + storageNodeManager.prepareNewNodesForBootstrap(); + } else if (groupOperationHistory.getStatus() == OperationRequestStatus.FAILURE) { + log.warn("Failed to announce new storage node to the cluster. It cannot join the cluster until " + + "it has been announced to existing cluster nodes."); + } else if (groupOperationHistory.getStatus() == OperationRequestStatus.CANCELED) { + log.warn("New storage node has not been announced to the cluster. The group operation " + + groupOperationHistory.getOperationDefinition().getName() + " has been canceled. The new node " + + "cannot join the cluster until it has been announced to existing cluster nodes."); + } + } + } + } + + private boolean isStorageNodeOperation(OperationDefinition operationDefinition) { + ResourceType resourceType = operationDefinition.getResourceType(); + return resourceType.getName().equals(STORAGE_NODE_TYPE_NAME) && + resourceType.getPlugin().equals(STORAGE_NODE_PLUGIN_NAME); + } + +}
commit b36674c735aea47d668f840e4cd53560de3e80d8 Author: Jay Shaughnessy jshaughn@redhat.com Date: Mon Jul 29 17:22:29 2013 -0400
In progress: working through the bundle remote API and adding fine-grained authz...
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/auth/Subject.java b/modules/core/domain/src/main/java/org/rhq/core/domain/auth/Subject.java index 27ac704..5f86d8e 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/auth/Subject.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/auth/Subject.java @@ -58,7 +58,7 @@ import org.rhq.core.domain.resource.group.ResourceGroup; * @author Greg Hinkle */ @Entity -@NamedQueries( { +@NamedQueries({
@NamedQuery(name = Subject.QUERY_GET_SUBJECTS_ASSIGNED_TO_ROLE, query = "" // + "SELECT s " // @@ -139,6 +139,16 @@ import org.rhq.core.domain.resource.group.ResourceGroup; + " JOIN r.permissions p " // + " JOIN r.subjects s " // + " WHERE s.id = :subjectId and p = :permission ) ) "), + + @NamedQuery(name = Subject.QUERY_HAS_BUNDLE_PERMISSION, query = "SELECT COUNT(b) " + + "FROM Bundle b, IN (b.bundleGroups) bg, IN (bg.roles) r, IN (r.subjects) s, IN (r.permissions) p " + + "WHERE s = :subject AND b.id = :bundleId AND p = :permission"), + + @NamedQuery(name = Subject.QUERY_HAS_BUNDLE_GROUP_PERMISSION, query = "SELECT count(r) " + + "FROM Role r JOIN r.subjects s JOIN r.permissions p " + + "WHERE r in (SELECT r2 from BundleGroup bg JOIN bg.roles r2 WHERE bg.id = :bundleGroupId) " + + " AND s = :subject " + " AND p = :permission"), + @NamedQuery(name = Subject.QUERY_CAN_VIEW_RESOURCE, query = "SELECT COUNT(res) " + "FROM Resource res, IN (res.implicitGroups) g, IN (g.roles) r, IN (r.subjects) s " + "WHERE s = :subject AND res.id = :resourceId"), @@ -228,6 +238,8 @@ public class Subject implements Serializable { public static final String QUERY_HAS_PRIVATE_GROUP_PERMISSION = "Subject.hasPrivateGroupPermission"; public static final String QUERY_HAS_RESOURCE_PERMISSION = "Subject.hasResourcePermission"; public static final String QUERY_HAS_AUTO_GROUP_PERMISSION = "Subject.hasAutoGroupPermission"; + public static final String QUERY_HAS_BUNDLE_PERMISSION = "Subject.hasBundlePermission"; + public static final String QUERY_HAS_BUNDLE_GROUP_PERMISSION = "Subject.hasBundleGroupPermission";
/** This query can return more than 1 if the resource is accessible via separate groups */ public static final String QUERY_CAN_VIEW_RESOURCE = "Subject.canViewResource"; @@ -314,7 +326,8 @@ public class Subject implements Serializable { init(); }
- public Subject(@NotNull String name, boolean factive, boolean fsystem) { + public Subject(@NotNull + String name, boolean factive, boolean fsystem) { init(); this.name = name; this.factive = factive; @@ -353,7 +366,8 @@ public class Subject implements Serializable { return this.name; }
- public void setName(@NotNull String name) { + public void setName(@NotNull + String name) { this.name = name; }
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/authz/Permission.java b/modules/core/domain/src/main/java/org/rhq/core/domain/authz/Permission.java index 4ca733c..77f66fc 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/authz/Permission.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/authz/Permission.java @@ -104,10 +104,8 @@ public enum Permission { CONFIGURE_WRITE(Target.RESOURCE), // 11
/** - * can C/U/D provisioning bundles + * can perform any bundle action, assigns all other bundle permissions */ - // NOTE: This is a GLOBAL permission, but is defined down here so as to maintain the ordinal indexes of the other - // pre-existing permissions. MANAGE_BUNDLE(Target.GLOBAL), // 12
/** @@ -124,7 +122,6 @@ public enum Permission { /** * Can C/U/D repositories and content sources */ - // NOTE: This is a GLOBAL permission but defined here to maintain the ordinal indexes MANAGE_REPOSITORIES(Target.GLOBAL), // 15
/** @@ -135,7 +132,6 @@ public enum Permission { /** * Can view other RHQ users, except for their assigned roles */ - // NOTE: This is a GLOBAL permission but defined here to maintain the ordinal indexes VIEW_USERS(Target.GLOBAL), // 17
/** @@ -146,54 +142,59 @@ public enum Permission { /** * Can create Bundle [Versions]s * Can assign to viewable bundle groups - * Can create global Bundle [Versions] if holding Global.VIEW_BUNDLES + * Can create unassigned Bundle [Versions] if holding Global.VIEW_BUNDLES */ CREATE_BUNDLES(Target.GLOBAL), // 19
/** * Can delete viewable bundle [Versions]s * Can unassign from viewable bundle groups - * Can delete global bundles if holding Global.VIEW_BUNDLES + * Can delete unassigned bundles if holding Global.VIEW_BUNDLES */ DELETE_BUNDLES(Target.GLOBAL), // 20
/** - * Can view any bundle, including global bundles + * Can view any bundle, including unassigned bundles */ VIEW_BUNDLES(Target.GLOBAL), // 21
/** - * Can deploy any viewable bundle version to any viewable (deployable, compatible) resource group + * Can deploy any viewable bundle version to any viewable [deployable, compatible] resource group */ DEPLOY_BUNDLES(Target.GLOBAL), // 22
/** - * Can assign viewable bundles to the bundle group + * Can assign viewable bundles to the bundle groups associated with the role. * - this can be a copy from another viewable bundle group - * - this can be a global bundle if holding Global.VIEW_BUNDLES + * - this can be an unassigned bundle if holding Global.VIEW_BUNDLES */ ASSIGN_BUNDLES_TO_GROUP(Target.BUNDLE), // 23
/** - * Can unassign bundles from the bundle group - * - the bundle is not deleted and becomes a global bundle if assigned to no other bundle group + * Can unassign bundles assigned to bundle groups associated with the role. + * - the bundle is not deleted and becomes an unassigned bundle if assigned to no other bundle group */ UNASSIGN_BUNDLES_FROM_GROUP(Target.BUNDLE), // 24
/** - * Can create bundle [version]s for this bundle group + * Can create [implicitly assigned] bundle [version]s for bundle groups associated with the role. */ CREATE_BUNDLES_IN_GROUP(Target.BUNDLE), // 25
/** - * Can delete bundle [version]s from the bundle group + * Can delete assigned bundle [version]s from the bundle groups associated with the role. */ DELETE_BUNDLES_FROM_GROUP(Target.BUNDLE), // 26
/** - * Implied - Can view bundles in the bundle group + * Implied - Can view the bundles assigned to the bundle groups associated with the role. */ - VIEW_BUNDLES_IN_GROUP(Target.BUNDLE) // 27 + VIEW_BUNDLES_IN_GROUP(Target.BUNDLE), // 27 + + /** + * Can deploy viewable bundles to the [compatible, deployable] resource groups associated with the role. + */ + DEPLOY_BUNDLES_TO_GROUP(Target.RESOURCE) // 28
;
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/bundle/Bundle.java b/modules/core/domain/src/main/java/org/rhq/core/domain/bundle/Bundle.java index 7b5499f..3a8569f 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/bundle/Bundle.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/bundle/Bundle.java @@ -105,6 +105,9 @@ public class Bundle implements Serializable { private List<BundleVersion> bundleVersions = new ArrayList<BundleVersion>();
@ManyToMany(mappedBy = "bundles", fetch = FetchType.LAZY, cascade = CascadeType.REMOVE) + private Set<BundleGroup> bundleGroups; + + @ManyToMany(mappedBy = "bundles", fetch = FetchType.LAZY, cascade = CascadeType.REMOVE) private Set<Tag> tags;
public Bundle() { diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/PermissionsEditor.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/PermissionsEditor.java index 5e80d82..77beefb 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/PermissionsEditor.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/PermissionsEditor.java @@ -431,6 +431,7 @@ public class PermissionsEditor extends EnhancedVStack { String permissionName = record.getAttribute(nameField); Permission permission = Permission.valueOf(permissionName); String permissionDisplayName = record.getAttribute("displayName"); + if (permission == Permission.VIEW_RESOURCE) { String messageString = MSG.view_adminRoles_permissions_readAccessImplied(permissionDisplayName); handleIllegalPermissionSelection(event, messageString); @@ -449,6 +450,11 @@ public class PermissionsEditor extends EnhancedVStack { String messageString = MSG .view_adminRoles_permissions_illegalDeselectionDueToCorrespondingWritePermSelection(permissionDisplayName); handleIllegalPermissionSelection(event, messageString); + } else if (!authorized && selectedPermissions.contains(Permission.MANAGE_BUNDLE) + && Permission.BUNDLE_ALL.contains(permission)) { + String messageString = MSG + .view_adminRoles_permissions_illegalDeselectionDueToManageBundleSelection(permissionDisplayName); + handleIllegalPermissionSelection(event, messageString); } else { updatePermissions(authorized, permission);
diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties index 25f5171..5a42502 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties @@ -509,6 +509,7 @@ view_adminRoles_permissions_autoselecting_manageSecurity_implied = Autoselected view_adminRoles_permissions_bundleGroupPermissions = Bundle Group Permissions view_adminRoles_permissions_globalPermissions = Global Permissions view_adminRoles_permissions_illegalDeselectionDueToCorrespondingWritePermSelection = {0} read permission cannot be deselected, unless the {0} write permission, which implies the read permission, is deselected first. +view_adminRoles_permissions_illegalDeselectionDueToManageBundleSelection = {0} permission cannot be deselected, unless Manage Bundle, which implies all Bundle permissions, is deselected first. view_adminRoles_permissions_illegalDeselectionDueToManageInventorySelection = {0} permission cannot be deselected, unless Manage Inventory, which implies all Resource permissions, is deselected first. view_adminRoles_permissions_illegalDeselectionDueToManageSecuritySelection = {0} permission cannot be deselected, unless the Manage Security permission, which implies all other permissions, is deselected first. view_adminRoles_permissions_isAuthorized = Authorized? diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties index 8982a54..69c8452 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties @@ -528,6 +528,7 @@ view_adminRoles_permissions_autoselecting_manageSecurity_implied = Automaticky o view_adminRoles_permissions_bundleGroupPermissions = Bundle Group Permissions view_adminRoles_permissions_globalPermissions = Globálnà povolenà ##view_adminRoles_permissions_illegalDeselectionDueToCorrespondingWritePermSelection = {0} read permission cannot be deselected, unless the {0} write permission, which implies the read permission, is deselected first. +##view_adminRoles_permissions_illegalDeselectionDueToManageBundleSelection = {0} permission cannot be deselected, unless Manage Bundle, which implies all Bundle permissions, is deselected first. ##view_adminRoles_permissions_illegalDeselectionDueToManageInventorySelection = {0} permission cannot be deselected, unless Manage Inventory, which implies all Resource permissions, is deselected first. ##view_adminRoles_permissions_illegalDeselectionDueToManageSecuritySelection = {0} permission cannot be deselected, unless the Manage Security permission, which implies all other permissions, is deselected first. view_adminRoles_permissions_isAuthorized = Autorizován? diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties index 8b086a7..ddd4686 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties @@ -507,6 +507,7 @@ view_adminRoles_noLdap = Die LDAP-Integration ist nicht konfiguriert. Um LDAP zu view_adminRoles_permissions_bundleGroupPermissions = Bundle Group Permissions view_adminRoles_permissions_globalPermissions = Globale Rechte ##view_adminRoles_permissions_illegalDeselectionDueToCorrespondingWritePermSelection = {0} read permission cannot be deselected, unless the {0} write permission, which implies the read permission, is deselected first. +##view_adminRoles_permissions_illegalDeselectionDueToManageBundleSelection = {0} permission cannot be deselected, unless Manage Bundle, which implies all Bundle permissions, is deselected first. ##view_adminRoles_permissions_illegalDeselectionDueToManageInventorySelection = {0} permission cannot be deselected, unless Manage Inventory, which implies all Resource permissions, is deselected first. ##view_adminRoles_permissions_illegalDeselectionDueToManageSecuritySelection = {0} permission cannot be deselected, unless the Manage Security permission, which implies all other permissions, is deselected first. view_adminRoles_permissions_isAuthorized = Berechtigt? diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties index a78ab58..49cc7e6 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties @@ -506,6 +506,7 @@ view_adminRoles_permissions_autoselecting_manageSecurity_implied = æªéžæã® view_adminRoles_permissions_bundleGroupPermissions = Bundle Group Permissions view_adminRoles_permissions_globalPermissions = ã°ããŒãã«æš©é view_adminRoles_permissions_illegalDeselectionDueToCorrespondingWritePermSelection = {0} èªã¿åãæš©éã¯éžæ解é€ã§ããŸããã§ãããèªã¿åãæš©éãæ瀺ãã {0} æžã蟌ã¿æš©éãæåã«éžæ解é€ãããªããã°ããã¯ã§ããŸããã +##view_adminRoles_permissions_illegalDeselectionDueToManageBundleSelection = {0} permission cannot be deselected, unless Manage Bundle, which implies all Bundle permissions, is deselected first. view_adminRoles_permissions_illegalDeselectionDueToManageInventorySelection = {0} æš©éã¯éžæ解é€ã§ããŸããã§ãããä»ã®ãã¹ãŠã®ãªãœãŒã¹ãæ瀺ãã管çã€ã³ãã³ããªãæåã«éžæ解é€ãããªããã°ããã¯ã§ããŸããã view_adminRoles_permissions_illegalDeselectionDueToManageSecuritySelection = {0} æš©éã¯éžæ解é€ã§ããŸããã§ãããä»ã®ãã¹ãŠã®æš©éãæ瀺ãã管çã»ãã¥ãªãã£æš©éãæåã«éžæ解é€ãããªããã°ããã¯ã§ããŸããã view_adminRoles_permissions_isAuthorized = æš©éãããã? diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties index a00f560..89c10c0 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties @@ -457,6 +457,7 @@ view_adminRoles_noLdap = LDAP 볎ì íµí©ìŽ ì€ì ëì§ ìììµëë€. LDA view_adminRoles_permissions_bundleGroupPermissions = Bundle Group Permissions view_adminRoles_permissions_globalPermissions = êžë¡ë² ê¶í view_adminRoles_permissions_illegalDeselectionDueToCorrespondingWritePermSelection = {0} ìœêž° ê¶íì ì í íŽì í ì ììµëë€. ìœêž° ê¶íì ììíë {0} ì°êž° ê¶íìŽ ëšŒì ì í íŽì ëìŽìŒ ê·žê²ì ìíí ì ììµëë€. +##view_adminRoles_permissions_illegalDeselectionDueToManageBundleSelection = {0} permission cannot be deselected, unless Manage Bundle, which implies all Bundle permissions, is deselected first. view_adminRoles_permissions_illegalDeselectionDueToManageInventorySelection = {0} ê¶íì ì í íŽì í ì ììµëë€. ë€ë¥ž 몚ë ììì ììíë êŽëŠ¬ ìžë²€í ëŠ¬ê° ëšŒì ì í íŽì ëìŽìŒ ê·žê²ì ìíí ì ììµëë€. view_adminRoles_permissions_illegalDeselectionDueToManageSecuritySelection = {0} ê¶íì ì í íŽì í ì ììµëë€. ë€ë¥ž 몚ë ê¶íì ììíë êŽëŠ¬ 볎ì ê¶íìŽ ëšŒì ì í íŽì ëìŽìŒ ê·žê²ì ìíí ì ììµëë€. view_adminRoles_permissions_isAuthorized = ê¶íìŽ ììµëê¹? diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties index e6a7864..26546fb 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties @@ -511,6 +511,7 @@ view_adminRoles_permissions_autoselecting_manageSecurity_implied = Autoselected view_adminRoles_permissions_bundleGroupPermissions = Bundle Group Permissions view_adminRoles_permissions_globalPermissions = Permiss\u00F5es Globais view_adminRoles_permissions_illegalDeselectionDueToCorrespondingWritePermSelection = {0} permiss\u00E3o de leitura n\u00E3 pode ser desmarcada, a menos que {0} permiss\u00E3o de escrita, que implica na permiss\u00E3o de leitura, seja desmarcada primeiro. +##view_adminRoles_permissions_illegalDeselectionDueToManageBundleSelection = {0} permission cannot be deselected, unless Manage Bundle, which implies all Bundle permissions, is deselected first. view_adminRoles_permissions_illegalDeselectionDueToManageInventorySelection = {0} permiss\u00E3o n\u00E3o pode ser desmarcada, a menos que Gerenciar Invent\u00E1rio, que implica todas as permiss\u00F5es de Recurso, seja desmarcada primeiro. view_adminRoles_permissions_illegalDeselectionDueToManageSecuritySelection = {0} permiss\u00E3o n\u00E3o pode ser desmarcada, a menos que a permiss\u00E3 Gerenciar SeguranÃa, que implica em todas outras permissıes, seja desmarcada primeiro. view_adminRoles_permissions_isAuthorized = Autorizado? diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties index 4f9a97e..5692d33 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties @@ -2591,6 +2591,7 @@ view_adminRoles_permissions_autoselecting_manageSecurity_implied = ÐвÑПЌа view_adminRoles_permissions_bundleGroupPermissions = Bundle Group Permissions view_adminRoles_permissions_globalPermissions = ÐлПбалÑÐœÑе пПлМПЌÑÐžÑ view_adminRoles_permissions_illegalDeselectionDueToCorrespondingWritePermSelection = {0} пПлМПЌПÑÐžÑ ÐœÐ° ÑÑеМОе Ме ЌПгÑÑ Ð±ÑÑÑ ÐŸÑклÑÑеМÑ, пПка пÑеЎваÑОÑелÑМП {0} пПлМПЌПÑÐžÑ Ð·Ð°Ð¿ÐžÑО, кПÑПÑÑе вклÑÑаÑÑ Ð¿ÐŸÐ»ÐœÐŸÐŒÐŸÑÐžÑ ÐœÐ° ÑÑеМОе, Ме бÑÐŽÑÑ ÐŸÑклÑÑеМÑ. +##view_adminRoles_permissions_illegalDeselectionDueToManageBundleSelection = {0} permission cannot be deselected, unless Manage Bundle, which implies all Bundle permissions, is deselected first. view_adminRoles_permissions_illegalDeselectionDueToManageInventorySelection = {0} пПлМПЌПÑÐžÑ ÐœÐµ ЌПгÑÑ Ð±ÑÑÑ ÐŸÑклÑÑеМÑ, пПка пÑеЎваÑОÑелÑМП Manage Inventory, кПÑПÑÐ°Ñ Ð²ÐºÐ»ÑÑÐ°ÐµÑ Ð²Ñе пПлМПЌПÑÐžÑ ÑеÑÑÑÑа, Ме бÑÐŽÐµÑ ÐŸÑклÑÑеМП. view_adminRoles_permissions_illegalDeselectionDueToManageSecuritySelection = {0} пПлМПЌПÑÐžÑ ÐœÐµ ЌПгÑÑ Ð±ÑÑÑ ÐŸÑклÑÑеМÑ, пПка пÑеЎваÑОлÑМП Manage Security пПлМПЌПÑОе, кПÑПÑПе вклÑÑÐ°ÐµÑ Ð²Ñе ÐŽÑÑгОе пПлМПЌПÑОÑ, Ме бÑÐŽÐµÑ ÐŸÑклÑÑеМП. view_adminRoles_permissions_isAuthorized = ÐвÑПÑОзПваМÑ? diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties index 547ebd5..f126e51 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties @@ -500,6 +500,7 @@ view_adminRoles_noLdap = \u6ca1\u6709\u96c6\u6210LDAP\u5b89\u5168, \u5230<a {0}> view_adminRoles_permissions_bundleGroupPermissions = Bundle Group Permissions view_adminRoles_permissions_globalPermissions = \u5168\u5c40\u6388\u6743 ##view_adminRoles_permissions_illegalDeselectionDueToCorrespondingWritePermSelection = {0} read permission cannot be deselected, unless the {0} write permission, which implies the read permission, is deselected first. +##view_adminRoles_permissions_illegalDeselectionDueToManageBundleSelection = {0} permission cannot be deselected, unless Manage Bundle, which implies all Bundle permissions, is deselected first. ##view_adminRoles_permissions_illegalDeselectionDueToManageInventorySelection = {0} permission cannot be deselected, unless Manage Inventory, which implies all Resource permissions, is deselected first. ##view_adminRoles_permissions_illegalDeselectionDueToManageSecuritySelection = {0} permission cannot be deselected, unless the Manage Security permission, which implies all other permissions, is deselected first. view_adminRoles_permissions_isAuthorized = \u6388\u6743? diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/authz/AuthorizationManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/authz/AuthorizationManagerBean.java index 2fda53e..d71095f 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/authz/AuthorizationManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/authz/AuthorizationManagerBean.java @@ -51,6 +51,7 @@ public class AuthorizationManagerBean implements AuthorizationManagerLocal { @PersistenceContext(unitName = RHQConstants.PERSISTENCE_UNIT_NAME) private EntityManager entityManager;
+ @Override @SuppressWarnings("unchecked") public Set<Permission> getExplicitGlobalPermissions(Subject subject) { Query query = entityManager.createNamedQuery(Subject.QUERY_GET_GLOBAL_PERMISSIONS); @@ -66,6 +67,7 @@ public class AuthorizationManagerBean implements AuthorizationManagerLocal { return results; }
+ @Override @SuppressWarnings("unchecked") public Set<Permission> getExplicitGroupPermissions(Subject subject, int groupId) { Set<Permission> result = new HashSet<Permission>(); @@ -99,12 +101,14 @@ public class AuthorizationManagerBean implements AuthorizationManagerLocal { return result; }
+ @Override public Set<Permission> getImplicitGroupPermissions(Subject subject, int groupId) { Set<Permission> permissions = isInventoryManager(subject) ? Permission.RESOURCE_ALL : getExplicitGroupPermissions(subject, groupId); return permissions; }
+ @Override @SuppressWarnings("unchecked") public Set<Permission> getExplicitResourcePermissions(Subject subject, int resourceId) { Query query = entityManager.createNamedQuery(Subject.QUERY_GET_PERMISSIONS_BY_RESOURCE_ID); @@ -119,12 +123,14 @@ public class AuthorizationManagerBean implements AuthorizationManagerLocal { return results; }
+ @Override public Set<Permission> getImplicitResourcePermissions(Subject subject, int resourceId) { Set<Permission> permissions = isInventoryManager(subject) ? Permission.RESOURCE_ALL : getExplicitResourcePermissions(subject, resourceId); return permissions; }
+ @Override public boolean hasGlobalPermission(Subject subject, Permission permission) { if (isOverlord(subject)) { return true; @@ -137,6 +143,7 @@ public class AuthorizationManagerBean implements AuthorizationManagerLocal { return (count != 0); }
+ @Override @SuppressWarnings("unchecked") public boolean hasGroupPermission(Subject subject, Permission permission, int groupId) { if (isInventoryManager(subject)) { @@ -170,6 +177,7 @@ public class AuthorizationManagerBean implements AuthorizationManagerLocal { } }
+ @Override public boolean hasResourcePermission(Subject subject, Permission permission, int resourceId) { if (isInventoryManager(subject)) { return true; @@ -183,6 +191,31 @@ public class AuthorizationManagerBean implements AuthorizationManagerLocal { return (count != 0); }
+ @SuppressWarnings("unchecked") + @Override + public boolean hasBundlePermission(Subject subject, Permission permission, int bundleId) { + + Query query = entityManager.createNamedQuery(Subject.QUERY_HAS_BUNDLE_PERMISSION); + query.setParameter("subject", subject); + query.setParameter("permission", permission); + query.setParameter("bundleId", bundleId); + long count = (Long) query.getSingleResult(); + return (count != 0); + } + + @SuppressWarnings("unchecked") + @Override + public boolean hasBundleGroupPermission(Subject subject, Permission permission, int bundleGroupId) { + + Query query = entityManager.createNamedQuery(Subject.QUERY_HAS_BUNDLE_GROUP_PERMISSION); + query.setParameter("subject", subject); + query.setParameter("permission", permission); + query.setParameter("bundleGroupId", bundleGroupId); + long count = (Long) query.getSingleResult(); + return (count != 0); + } + + @Override public boolean hasAutoGroupPermission(Subject subject, Permission permission, int parentResourceId, int resourceTypeId) { if (isInventoryManager(subject)) { @@ -207,6 +240,7 @@ public class AuthorizationManagerBean implements AuthorizationManagerLocal { return (baseCount == subjectCount); }
+ @Override public boolean canViewResource(Subject subject, int resourceId) { if (isInventoryManager(subject)) { return true; @@ -219,6 +253,7 @@ public class AuthorizationManagerBean implements AuthorizationManagerLocal { return (count != 0); }
+ @Override public boolean canViewResources(Subject subject, List<Integer> resourceIds) { if (isInventoryManager(subject)) { return true; @@ -232,6 +267,7 @@ public class AuthorizationManagerBean implements AuthorizationManagerLocal { return count == resourceIds.size(); }
+ @Override public boolean canViewGroup(Subject subject, int groupId) { if (isInventoryManager(subject)) { return true; @@ -244,6 +280,7 @@ public class AuthorizationManagerBean implements AuthorizationManagerLocal { return (count != 0); }
+ @Override public boolean canViewAutoGroup(Subject subject, int parentResourceId, int resourceTypeId) { if (isInventoryManager(subject)) { return true; @@ -266,10 +303,12 @@ public class AuthorizationManagerBean implements AuthorizationManagerLocal { return (baseCount == subjectCount); }
+ @Override public boolean isInventoryManager(Subject subject) { return hasGlobalPermission(subject, Permission.MANAGE_INVENTORY); }
+ @Override @SuppressWarnings("unchecked") public boolean hasResourcePermission(Subject subject, Permission permission, Collection<Integer> resourceIds) { if (isInventoryManager(subject)) { @@ -284,16 +323,20 @@ public class AuthorizationManagerBean implements AuthorizationManagerLocal { return results.containsAll(resourceIds); }
+ @Override public boolean isSystemSuperuser(Subject subject) { // We know that our overlord is always id=1 and the rhqadmin user is always id=2. - return (subject != null) && ((subject.getId() == SUBJECT_ID_OVERLORD) || (subject.getId() == SUBJECT_ID_RHQADMIN)); + return (subject != null) + && ((subject.getId() == SUBJECT_ID_OVERLORD) || (subject.getId() == SUBJECT_ID_RHQADMIN)); }
+ @Override public boolean isOverlord(Subject subject) { // We know that our overlord is always id=1. return (subject != null) && (subject.getId() == SUBJECT_ID_OVERLORD); }
+ @Override public boolean canUpdateRepo(Subject subject, int repoId) { if (hasGlobalPermission(subject, Permission.MANAGE_REPOSITORIES)) { return true; @@ -301,11 +344,12 @@ public class AuthorizationManagerBean implements AuthorizationManagerLocal { Query q = entityManager.createNamedQuery(Repo.QUERY_CHECK_REPO_OWNED_BY_SUBJECT_ID); q.setParameter("repoId", repoId); q.setParameter("subjectId", subject.getId()); - + Long num = (Long) q.getSingleResult(); return num > 0; } - + + @Override public boolean canViewRepo(Subject subject, int repoId) { if (hasGlobalPermission(subject, Permission.MANAGE_REPOSITORIES)) { return true; @@ -314,7 +358,7 @@ public class AuthorizationManagerBean implements AuthorizationManagerLocal { Query q = entityManager.createNamedQuery(Repo.QUERY_CHECK_REPO_VISIBLE_BY_SUBJECT_ID); q.setParameter("repoId", repoId); q.setParameter("subjectId", subject.getId()); - + Long num = (Long) q.getSingleResult(); return num > 0; } diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/authz/AuthorizationManagerLocal.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/authz/AuthorizationManagerLocal.java index 194e345..8872d61 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/authz/AuthorizationManagerLocal.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/authz/AuthorizationManagerLocal.java @@ -20,7 +20,6 @@ package org.rhq.enterprise.server.authz;
import java.util.Collection; import java.util.List; -import java.util.Properties; import java.util.Set;
import javax.ejb.Local; @@ -135,6 +134,30 @@ public interface AuthorizationManagerLocal { boolean hasAutoGroupPermission(Subject subject, Permission permission, int parentResourceId, int resourceTypeId);
/** + * Returns true if the current user possesses the specified bundle permission for the specified bundle. + * + * @param subject the current subject or caller + * @param permission a bundle permission (i.e. permission.getTarget() == Permission.Target.BUNDLE) + * @param bundleId the id of the bundle to check permissions against + * + * @return true if the current user possesses the specified resource permission for the specified resource + */ + + boolean hasBundlePermission(Subject subject, Permission permission, int bundleId); + + /** + * Returns true if the current user possesses the specified bundle permission for the specified bundle group. + * + * @param subject the current subject or caller + * @param permission a bundle permission (i.e. permission.getTarget() == Permission.Target.BUNDLE) + * @param bundleGroupId the id of the bundle group to check permissions against + * + * @return true if the current user possesses the specified resource permission for the specified resource + */ + + boolean hasBundleGroupPermission(Subject subject, Permission permission, int bundleGroupId); + + /** * Returns true if the current user possesses the specified global permission. * * @param subject the current subject or caller diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerBean.java index 139a26e..51e59ff 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerBean.java @@ -214,12 +214,6 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot }
@Override - public void assignBundlesToBundleGroup(Subject subject, int bundleGroupId, int... bundleIds) { - // TODO Auto-generated method stub - - } - - @Override @RequiredPermission(Permission.MANAGE_BUNDLE) public Bundle createBundle(Subject subject, String name, String description, int bundleTypeId) throws Exception { if (null == name || "".equals(name.trim())) { @@ -281,7 +275,6 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot }
@Override - @RequiredPermission(Permission.MANAGE_BUNDLE) public BundleDeployment createBundleDeployment(Subject subject, int bundleVersionId, int bundleDestinationId, String description, Configuration configuration) throws Exception {
@@ -294,6 +287,8 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot throw new IllegalArgumentException("Invalid bundleDestinationId: " + bundleDestinationId); }
+ checkBundleDeploymentAuthz(subject, bundleVersion.getBundle().getId(), bundleDestination.getGroup().getId()); + String name = getBundleDeploymentNameImpl(subject, bundleDestination, bundleVersion, null); return this.createBundleDeploymentImpl(subject, bundleVersion, bundleDestination, name, description, configuration); @@ -326,7 +321,6 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot }
@Override - @RequiredPermission(Permission.MANAGE_BUNDLE) public BundleDestination createBundleDestination(Subject subject, int bundleId, String name, String description, String destBaseDirName, String deployDir, Integer groupId) throws Exception {
@@ -354,6 +348,8 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot } ResourceGroup group = entityManager.find(ResourceGroup.class, groups.get(0).getId());
+ checkBundleDeploymentAuthz(subject, bundle.getId(), groupId); + BundleDestination dest = new BundleDestination(bundle, name, group, destBaseDirName, deployDir); dest.setDescription(description); entityManager.persist(dest); @@ -561,37 +557,78 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot }
@Override - @RequiredPermission(Permission.MANAGE_BUNDLE) public BundleVersion createBundleVersionViaRecipe(Subject subject, String recipe) throws Exception {
+ return createBundleVersionViaRecipeImpl(subject, recipe, false, 0); + } + + @Override + public BundleVersion createInitialBundleVersionViaRecipe(Subject subject, int bundleGroupId, String recipe) + throws Exception { + + return createBundleVersionViaRecipeImpl(subject, recipe, true, bundleGroupId); + } + + private BundleVersion createBundleVersionViaRecipeImpl(Subject subject, String recipe, + boolean mustBeInitialVersion, int initialBundleGroupId) throws Exception { + BundleServerPluginManager manager = BundleManagerHelper.getPluginContainer().getBundleServerPluginManager(); BundleDistributionInfo info = manager.parseRecipe(recipe); - BundleVersion bundleVersion = createBundleVersionViaDistributionInfo(subject, info); + BundleVersion bundleVersion = createBundleVersionViaDistributionInfo(subject, info, mustBeInitialVersion, + initialBundleGroupId);
return bundleVersion; }
@Override - @RequiredPermission(Permission.MANAGE_BUNDLE) @TransactionAttribute(TransactionAttributeType.NEVER) public BundleVersion createBundleVersionViaFile(Subject subject, File distributionFile) throws Exception {
+ return createBundleVersionViaFileImpl(subject, distributionFile, false, 0); + } + + @Override + @TransactionAttribute(TransactionAttributeType.NEVER) + public BundleVersion createInitialBundleVersionViaFile(Subject subject, int bundleGroupId, File distributionFile) + throws Exception { + + return createBundleVersionViaFileImpl(subject, distributionFile, true, bundleGroupId); + } + + private BundleVersion createBundleVersionViaFileImpl(Subject subject, File distributionFile, + boolean mustBeInitialVersion, int initialBundleGroupId) throws Exception { + BundleServerPluginManager manager = BundleManagerHelper.getPluginContainer().getBundleServerPluginManager(); BundleDistributionInfo info = manager.processBundleDistributionFile(distributionFile); - BundleVersion bundleVersion = createBundleVersionViaDistributionInfo(subject, info); + BundleVersion bundleVersion = createBundleVersionViaDistributionInfo(subject, info, mustBeInitialVersion, + initialBundleGroupId);
return bundleVersion; }
@Override - @RequiredPermission(Permission.MANAGE_BUNDLE) @TransactionAttribute(TransactionAttributeType.NEVER) public BundleVersion createBundleVersionViaByteArray(Subject subject, byte[] fileBytes) throws Exception {
+ return createBundleVersionViaByteArrayImpl(subject, fileBytes, false, 0); + } + + @Override + @TransactionAttribute(TransactionAttributeType.NEVER) + public BundleVersion createInitialBundleVersionViaByteArray(Subject subject, int bundleGroupId, byte[] fileBytes) + throws Exception { + + return createBundleVersionViaByteArrayImpl(subject, fileBytes, true, bundleGroupId); + } + + private BundleVersion createBundleVersionViaByteArrayImpl(Subject subject, byte[] fileBytes, + boolean mustBeInitialVersion, int bundleGroupId) throws Exception { + File tmpFile = File.createTempFile("bundleDistroBits", ".zip"); try { StreamUtil.copy(new ByteArrayInputStream(fileBytes), new FileOutputStream(tmpFile)); - BundleVersion bundleVersion = createBundleVersionViaFile(subject, tmpFile); + BundleVersion bundleVersion = createBundleVersionViaFileImpl(subject, tmpFile, mustBeInitialVersion, + bundleGroupId); return bundleVersion; } finally { if (tmpFile != null) { @@ -601,17 +638,36 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot }
@Override - @RequiredPermission(Permission.MANAGE_BUNDLE) @TransactionAttribute(TransactionAttributeType.NEVER) public BundleVersion createBundleVersionViaURL(Subject subject, String distributionFileUrl) throws Exception { + return createBundleVersionViaURL(subject, distributionFileUrl, null, null); }
@Override - @RequiredPermission(Permission.MANAGE_BUNDLE) @TransactionAttribute(TransactionAttributeType.NEVER) public BundleVersion createBundleVersionViaURL(Subject subject, String distributionFileUrl, String username, String password) throws Exception { + + return createBundleVersionViaURLImpl(subject, distributionFileUrl, username, password, false, 0); + } + + @Override + public BundleVersion createInitialBundleVersionViaURL(Subject subject, int bundleGroupId, String distributionFileUrl) + throws Exception { + + return createInitialBundleVersionViaURL(subject, bundleGroupId, distributionFileUrl, null, null); + } + + @Override + public BundleVersion createInitialBundleVersionViaURL(Subject subject, int bundleGroupId, + String distributionFileUrl, String username, String password) throws Exception { + + return createBundleVersionViaURLImpl(subject, distributionFileUrl, username, password, true, bundleGroupId); + } + + public BundleVersion createBundleVersionViaURLImpl(Subject subject, String distributionFileUrl, String username, + String password, boolean mustBeInitialVersion, int initialBundleGroupId) throws Exception { File file = null; try { file = downloadFile(distributionFileUrl, username, password); @@ -619,7 +675,7 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot log.debug("Copied [" + file.length() + "] bytes from [" + distributionFileUrl + "] into [" + file.getPath() + "]");
- return createBundleVersionViaFile(subject, file); + return createBundleVersionViaFileImpl(subject, file, mustBeInitialVersion, initialBundleGroupId); } finally { if (file != null) { file.delete(); @@ -669,8 +725,8 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot return file; }
- private BundleVersion createBundleVersionViaDistributionInfo(Subject subject, BundleDistributionInfo info) - throws Exception { + private BundleVersion createBundleVersionViaDistributionInfo(Subject subject, BundleDistributionInfo info, + boolean mustBeInitialVersion, Integer initialBundleGroupId) throws Exception {
BundleType bundleType = bundleManager.getBundleType(subject, info.getBundleTypeName()); String bundleName = info.getRecipeParseResults().getBundleMetadata().getBundleName(); @@ -680,7 +736,7 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot String version = info.getRecipeParseResults().getBundleMetadata().getBundleVersion(); String recipe = info.getRecipe();
- // first see if the bundle exists or not; if not, create one + // first see if the bundle exists or not boolean createdBundle; BundleCriteria criteria = new BundleCriteria(); criteria.setStrict(true); @@ -689,11 +745,19 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot
PageList<Bundle> bundles = bundleManager.findBundlesByCriteria(subject, criteria); Bundle bundle; - if (bundles.getTotalSize() == 0) { + boolean isInitialVersion = (bundles.getTotalSize() == 0); + + if (!isInitialVersion && mustBeInitialVersion) { + throw new PermissionException("This must be the initial version of a new Bundle."); + } + + if (isInitialVersion) { + checkCreateInitialBundleVersionAuthz(subject, initialBundleGroupId); bundle = bundleManager.createBundle(subject, bundleName, bundleDescription, bundleType.getId()); createdBundle = true; } else { bundle = bundles.get(0); + checkCreateBundleVersionAuthz(subject, bundle.getId()); createdBundle = false; }
@@ -808,7 +872,6 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot }
@Override - @RequiredPermission(Permission.MANAGE_BUNDLE) public BundleFile addBundleFile(Subject subject, int bundleVersionId, String name, String version, Architecture architecture, InputStream fileStream) throws Exception {
@@ -826,6 +889,9 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot throw new IllegalArgumentException("Invalid bundleVersionId: " + bundleVersionId); }
+ // Check authorization + checkCreateBundleVersionAuthz(subject, bundleVersion.getBundle().getId()); + // Create the PackageVersion the BundleFile is tied to. This implicitly creates the // Package for the PackageVersion. Bundle bundle = bundleVersion.getBundle(); @@ -866,7 +932,6 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot }
@Override - @RequiredPermission(Permission.MANAGE_BUNDLE) public BundleFile addBundleFileViaByteArray(Subject subject, int bundleVersionId, String name, String version, Architecture architecture, byte[] fileBytes) throws Exception {
@@ -874,7 +939,6 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot }
@Override - @RequiredPermission(Permission.MANAGE_BUNDLE) public BundleFile addBundleFileViaURL(Subject subject, int bundleVersionId, String name, String version, Architecture architecture, String bundleFileUrl) throws Exception {
@@ -885,11 +949,17 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot }
@Override - @RequiredPermission(Permission.MANAGE_BUNDLE) @TransactionAttribute(TransactionAttributeType.NEVER) public BundleFile addBundleFileViaURL(Subject subject, int bundleVersionId, String name, String version, Architecture architecture, String bundleFileUrl, String userName, String password) throws Exception {
+ // Check authorization prior to performing any file download + BundleVersion bundleVersion = entityManager.find(BundleVersion.class, bundleVersionId); + if (null == bundleVersion) { + throw new IllegalArgumentException("Invalid bundleVersionId: " + bundleVersionId); + } + checkCreateBundleVersionAuthz(subject, bundleVersion.getBundle().getId()); + File file = null; FileInputStream fis = null; try { @@ -925,8 +995,11 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot throw new IllegalArgumentException("Invalid packageVersionId: " + packageVersionId); }
+ // Check authorization + checkCreateBundleVersionAuthz(subject, bundleVersion.getBundle().getId()); + // With all the plumbing in place, create and persist the BundleFile. Tie it to the Package if the caller - // wants this BundleFile pinned to themost recent version. + // wants this BundleFile pinned to the most recent version. BundleFile bundleFile = new BundleFile(); bundleFile.setBundleVersion(bundleVersion); bundleFile.setPackageVersion(packageVersion); @@ -1317,31 +1390,6 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot }
@Override - @RequiredPermission(Permission.MANAGE_BUNDLE_GROUPS) - public BundleGroup createBundleGroup(Subject subject, String name, String description) throws Exception { - if (null == name || "".equals(name.trim())) { - throw new IllegalArgumentException("Invalid bundleGroupName: " + name); - } - - BundleGroupCriteria c = new BundleGroupCriteria(); - c.addFilterName(name); - c.setStrict(true); - if (!bundleManager.findBundleGroupsByCriteria(subject, c).isEmpty()) { - throw new IllegalArgumentException("Invalid bundleGroupName, bundle group already exists with name: " - + name); - } - - // create and add the required Repo. the Repo is a detached object which helps in its eventual - // removal. - BundleGroup bg = new BundleGroup(name); - bg.setDescription(description); - - entityManager.persist(bg); - - return bg; - } - - @Override @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) @RequiredPermission(Permission.MANAGE_BUNDLE) public BundleResourceDeployment createBundleResourceDeployment(Subject subject, int bundleDeploymentId, @@ -1661,14 +1709,6 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot return queryRunner.execute(); }
- @Override - public PageList<BundleGroup> findBundleGroupsByCriteria(Subject subject, BundleGroupCriteria criteria) { - CriteriaQueryGenerator generator = new CriteriaQueryGenerator(subject, criteria); - CriteriaQueryRunner<BundleGroup> queryRunner = new CriteriaQueryRunner<BundleGroup>(criteria, generator, - entityManager); - return queryRunner.execute(); - } - /** * Fetch bundles by criteria and then filter destination on the result objects to limit what the user can see * @param subject Caller @@ -1697,27 +1737,6 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot }
@Override - @RequiredPermission(Permission.MANAGE_BUNDLE_GROUPS) - public void deleteBundleGroups(Subject subject, int... bundleGroupIds) throws Exception { - - for (int bundleGroupId : bundleGroupIds) { - BundleGroup bundleGroup = this.entityManager.find(BundleGroup.class, bundleGroupIds); - if (null == bundleGroup) { - return; - } - - // unassign any bundles assigned to the bundle group - for (Bundle b : bundleGroup.getBundles()) { - bundleGroup.removeBundle(b); - } - bundleGroup = entityManager.merge(bundleGroup); - - // now remove the bundle group - entityManager.remove(bundleGroup); - } - } - - @Override public PageList<BundleWithLatestVersionComposite> findBundlesWithLatestVersionCompositesByCriteria(Subject subject, BundleCriteria criteria) {
@@ -1862,12 +1881,6 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot return; }
- @Override - public void unassignBundlesFromBundleGroup(Subject subject, int bundleGroupId, int... bundleIds) { - // TODO Auto-generated method stub - - } - private void safeClose(InputStream is) { if (null != is) { try { @@ -1888,4 +1901,246 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot } }
+ @Override + public void assignBundlesToBundleGroup(Subject subject, int bundleGroupId, int[] bundleIds) { + BundleGroup bundleGroup = entityManager.find(BundleGroup.class, bundleGroupId); + if (null == bundleGroup) { + throw new IllegalArgumentException("BundleGroup does not exist for bundleGroupId [" + bundleGroupId + "]"); + } + + checkAssignBundleGroupAuthz(subject, bundleGroupId, bundleIds); + + for (int bundleId : bundleIds) { + Bundle bundle = entityManager.find(Bundle.class, bundleId); + if (null == bundle) { + throw new IllegalArgumentException("Bundle does not exist for bundleId [" + bundleId + "]"); + } + + bundleGroup.addBundle(bundle); + } + } + + @Override + @RequiredPermission(Permission.MANAGE_BUNDLE_GROUPS) + public BundleGroup createBundleGroup(Subject subject, String name, String description) throws Exception { + if (null == name || "".equals(name.trim())) { + throw new IllegalArgumentException("Invalid bundleGroupName: " + name); + } + + BundleGroupCriteria c = new BundleGroupCriteria(); + c.addFilterName(name); + c.setStrict(true); + if (!bundleManager.findBundleGroupsByCriteria(subject, c).isEmpty()) { + throw new IllegalArgumentException("Invalid bundleGroupName, bundle group already exists with name: " + + name); + } + + // create and add the required Repo. the Repo is a detached object which helps in its eventual + // removal. + BundleGroup bg = new BundleGroup(name); + bg.setDescription(description); + + entityManager.persist(bg); + + return bg; + } + + @Override + @RequiredPermission(Permission.MANAGE_BUNDLE_GROUPS) + public void deleteBundleGroups(Subject subject, int[] bundleGroupIds) throws Exception { + + for (int bundleGroupId : bundleGroupIds) { + BundleGroup bundleGroup = this.entityManager.find(BundleGroup.class, bundleGroupIds); + if (null == bundleGroup) { + return; + } + + // unassign any bundles assigned to the bundle group + for (Bundle b : bundleGroup.getBundles()) { + bundleGroup.removeBundle(b); + } + bundleGroup = entityManager.merge(bundleGroup); + + // now remove the bundle group + entityManager.remove(bundleGroup); + } + } + + @Override + public PageList<BundleGroup> findBundleGroupsByCriteria(Subject subject, BundleGroupCriteria criteria) { + CriteriaQueryGenerator generator = new CriteriaQueryGenerator(subject, criteria); + + if (!authorizationManager.hasGlobalPermission(subject, Permission.VIEW_BUNDLES)) { + + generator.setAuthorizationResourceFragment(CriteriaQueryGenerator.AuthorizationTokenType.BUNDLE, null, + subject.getId()); + } + + CriteriaQueryRunner<BundleGroup> queryRunner = new CriteriaQueryRunner<BundleGroup>(criteria, generator, + entityManager); + return queryRunner.execute(); + } + + @Override + public void unassignBundlesFromBundleGroup(Subject subject, int bundleGroupId, int[] bundleIds) { + // TODO Auto-generated method stub + + } + + /** + * @param subject + * @param bundleGroupId null or 0 for unassigned initial bundle version creation + * @throws PermissionException + */ + private void checkCreateInitialBundleVersionAuthz(Subject subject, Integer bundleGroupId) + throws PermissionException { + Set<Permission> globalPerms = authorizationManager.getExplicitGlobalPermissions(subject); + boolean hasGlobalCreateBundles = globalPerms.contains(Permission.CREATE_BUNDLES); + + if (hasGlobalCreateBundles && globalPerms.contains(Permission.VIEW_BUNDLES)) { + return; + } + + if (null == bundleGroupId || bundleGroupId.intValue() <= 0) { + String msg = "Subject [" + subject.getName() + + "] requires Global CREATE_BUNDLES and VIEW_BUNDLES to create unsassigned initial bundle version."; + throw new PermissionException(msg); + } + + if (hasGlobalCreateBundles) { + if (authorizationManager.hasBundleGroupPermission(subject, Permission.VIEW_BUNDLES_IN_GROUP, bundleGroupId)) { + return; + } + } else { + if (authorizationManager.hasBundleGroupPermission(subject, Permission.CREATE_BUNDLES_IN_GROUP, + bundleGroupId)) { + return; + } + } + + String msg = "Subject [" + + subject.getName() + + "] requires either Global.CREATE_BUNDLES + BundleGroup.VIEW_BUNDLES_IN_GROUP, or BundleGroup.CREATE_BUNDLES_IN_GROUP, to create or update a bundle in bundle group [" + + bundleGroupId + "]."; + throw new PermissionException(msg); + } + + /** + * @param subject + * @param bundleId required, bundleId of bundle in which bundle version is being created/updated + * @throws PermissionException + */ + private void checkCreateBundleVersionAuthz(Subject subject, int bundleId) throws PermissionException { + + if (bundleId <= 0) { + throw new IllegalArgumentException( + "Must supply valid bundleId for bundle version being created. BundleId specified [" + bundleId + "]"); + } + + Set<Permission> globalPerms = authorizationManager.getExplicitGlobalPermissions(subject); + boolean hasGlobalCreateBundles = globalPerms.contains(Permission.CREATE_BUNDLES); + + if (hasGlobalCreateBundles && globalPerms.contains(Permission.VIEW_BUNDLES)) { + return; + } + + if (hasGlobalCreateBundles) { + if (authorizationManager.hasBundlePermission(subject, Permission.VIEW_BUNDLES_IN_GROUP, bundleId)) { + return; + } + } else { + if (authorizationManager.hasBundlePermission(subject, Permission.CREATE_BUNDLES_IN_GROUP, bundleId)) { + return; + } + } + + String msg = "Subject [" + + subject.getName() + + "] requires either Global.CREATE_BUNDLES + BundleGroup.VIEW_BUNDLES_IN_GROUP, or BundleGroup.CREATE_BUNDLES_IN_GROUP, to create or update a bundleVersion for bundle [" + + bundleId + "]."; + throw new PermissionException(msg); + } + + /** + * @param subject + * @param bundleGroupId an existing bundle group + * @param bundleIds existing bundles + * @throws PermissionException + */ + private void checkAssignBundleGroupAuthz(Subject subject, int bundleGroupId, int[] bundleIds) + throws PermissionException { + + Set<Permission> globalPerms = authorizationManager.getExplicitGlobalPermissions(subject); + boolean hasGlobalCreateBundles = globalPerms.contains(Permission.CREATE_BUNDLES); + boolean hasGlobalViewBundles = globalPerms.contains(Permission.VIEW_BUNDLES); + + if (hasGlobalCreateBundles && hasGlobalViewBundles) { + return; + } + + boolean hasBundleGroupCreate = hasGlobalCreateBundles + || authorizationManager + .hasBundleGroupPermission(subject, Permission.CREATE_BUNDLES_IN_GROUP, bundleGroupId); + boolean hasBundleGroupAssign = hasBundleGroupCreate + || authorizationManager + .hasBundleGroupPermission(subject, Permission.ASSIGN_BUNDLES_TO_GROUP, bundleGroupId); + + if (!hasBundleGroupAssign) { + String msg = "Subject [" + + subject.getName() + + "] requires one of Global.CREATE_BUNDLES, BundleGroup.CREATE_BUNDLES_IN_GROUP, or BundleGroup.ASSIGN_BUNDLES_TO_GROUP to assign a bundle to undle group [" + + bundleGroupId + "]."; + throw new PermissionException(msg); + } + + for (int bundleId : bundleIds) { + if (bundleId <= 0) { + throw new IllegalArgumentException("Invalid bundleId: [" + bundleId + "]"); + } + + if (!authorizationManager.hasBundlePermission(subject, Permission.VIEW_BUNDLES_IN_GROUP, bundleId)) { + String msg = "Subject [" + subject.getName() + + "] requires either Global.VIEW_BUNDLES or BundleGroup.VIEW_BUNDLES_IN_GROUP to assign bundle [" + + bundleId + "] to bundle group [" + bundleGroupId + "]"; + throw new PermissionException(msg); + } + } + + return; + } + + private void checkBundleDeploymentAuthz(Subject subject, int bundleId, int resourceGroupId) + throws PermissionException { + + boolean hasResourceGroupView = authorizationManager.hasGroupPermission(subject, Permission.VIEW_RESOURCE, + resourceGroupId); + + if (!hasResourceGroupView) { + String msg = "Subject [" + subject.getName() + "] requires VIEW permission on resource group [" + + resourceGroupId + "]."; + throw new PermissionException(msg); + } + + Set<Permission> globalPerms = authorizationManager.getExplicitGlobalPermissions(subject); + boolean hasGlobalDeployBundles = globalPerms.contains(Permission.DEPLOY_BUNDLES); + boolean hasGlobalViewBundles = globalPerms.contains(Permission.VIEW_BUNDLES); + + if (hasGlobalDeployBundles && hasGlobalViewBundles) { + return; + } + + boolean hasResourceGroupDeploy = hasGlobalDeployBundles + || authorizationManager.hasGroupPermission(subject, Permission.DEPLOY_BUNDLES_TO_GROUP, resourceGroupId); + boolean hasBundleView = hasGlobalViewBundles + || authorizationManager.hasBundlePermission(subject, Permission.VIEW_BUNDLES_IN_GROUP, bundleId); + + if (!(hasResourceGroupDeploy && hasBundleView)) { + String msg = "Subject [" + subject.getName() + + "] requires DEPLOY permission (global or on for resource group [" + resourceGroupId + + "] and VIEW permission for bundle [" + bundleId + "]"; + throw new PermissionException(msg); + } + + return; + } } diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerRemote.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerRemote.java index cffe363..5ff63d0 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerRemote.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerRemote.java @@ -247,8 +247,8 @@ public interface BundleManagerRemote { * </p> * Required Permissions: Either: * - Global.CREATE_BUNDLES and Global.VIEW_BUNDLES - * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG - * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG + * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG + * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG * * @param subject user that must have proper permissions * @param bundleGroupId identifies the bundle group that the new bundle will be associated with; 0 if no group @@ -289,8 +289,8 @@ public interface BundleManagerRemote { * </p> * Required Permissions: Either: * - Global.CREATE_BUNDLES and Global.VIEW_BUNDLES - * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG - * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG + * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG + * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG * * @param subject user that must have proper permissions * @param bundleGroupId identifies the bundle group that the new bundle will be associated with; 0 if no group @@ -332,8 +332,8 @@ public interface BundleManagerRemote { * </p> * Required Permissions: Either: * - Global.CREATE_BUNDLES and Global.VIEW_BUNDLES - * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG - * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG + * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG + * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG * * @param subject user that must have proper permissions * @param bundleGroupId identifies the bundle group that the new bundle will be associated with; 0 if no group @@ -379,8 +379,8 @@ public interface BundleManagerRemote { * </p> * Required Permissions: Either: * - Global.CREATE_BUNDLES and Global.VIEW_BUNDLES - * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG - * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG + * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG + * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG * * @param subject user that must have proper permissions * @param bundleGroupId identifies the bundle group that the new bundle will be associated with; 0 if no group @@ -418,8 +418,8 @@ public interface BundleManagerRemote { * </p> * Required Permissions: Either: * - Global.CREATE_BUNDLES and Global.VIEW_BUNDLES - * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG - * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG + * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG + * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG * * @see #createBundleVersionViaURL(org.rhq.core.domain.auth.Subject, String) */
commit 160452fff76ba285b5774c866f1a6f07aae239e9 Author: Jirka Kremser jkremser@redhat.com Date: Mon Jul 29 14:45:30 2013 +0200
If there is just one group definition in ConfigurationEditor, normal form is used instead SectionStack component. Also the "Jump to Section" navigation is hidden for just one group to save some space on the screen.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/configuration/ConfigurationEditor.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/configuration/ConfigurationEditor.java index 779772f..418300f 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/configuration/ConfigurationEditor.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/configuration/ConfigurationEditor.java @@ -415,17 +415,29 @@ public class ConfigurationEditor extends EnhancedVLayout { EnhancedVLayout layout = new EnhancedVLayout(); List<PropertyGroupDefinition> groupDefinitions = configurationDefinition.getGroupDefinitions();
- if (groupDefinitions.isEmpty()) { - // No prop groups, so we just need a single form for the non-grouped props. + if (groupDefinitions.isEmpty() || groupDefinitions.size() == 1) { + // No or one prop groups, so we just need a single form for the non-grouped props + // and another one if there is just one group List<PropertyDefinition> propertyDefinitions = new ArrayList<PropertyDefinition>( configurationDefinition.getNonGroupedProperties()); - - DynamicForm form = buildPropertiesForm(propertyDefinitions, configuration); - form.setBorder("1px solid #AAA"); - form.validate(); - layout.addMember(form); + if (!propertyDefinitions.isEmpty()) { + DynamicForm form = buildPropertiesForm(propertyDefinitions, configuration); + form.setBorder("1px solid #AAA"); + form.validate(); + layout.addMember(form); + } + if (groupDefinitions.size() == 1) { + propertyDefinitions.addAll(configurationDefinition.getPropertiesInGroup(groupDefinitions.get(0) + .getName())); + DynamicForm groupForm = buildPropertiesForm(propertyDefinitions, configuration); + groupForm.setIsGroup(true); + groupForm.setGroupTitle(groupDefinitions.get(0).getDisplayName()); + groupForm.setBorder("1px solid #AAA"); + groupForm.validate(); + layout.addMember(groupForm); + } } else { - // One or more prop groups, so create a section stack with one section per group. + // Two or more prop groups, so create a section stack with one section per group. final SectionStack sectionStack = new SectionStack(); sectionStack.setVisibilityMode(VisibilityMode.MULTIPLE); sectionStack.setWidth100(); @@ -441,7 +453,6 @@ public class ConfigurationEditor extends EnhancedVLayout { // com.allen_sauer.gwt.log.client.Log.info("building: " + definition.getDisplayName()); sectionStack.addSection(buildGroupSection(definition)); } - this.toolStrip = buildToolStrip(layout, sectionStack); layout.addMember(toolStrip); layout.addMember(sectionStack);
commit ef3e3e3912fa2f015edf6f2eea8b0217bb7b9999 Author: Jirka Kremser jkremser@redhat.com Date: Mon Jul 29 13:19:16 2013 +0200
Adding the newly (6/28/13) issued certificate for accessing the pto calendar (mail.corp.redhat.com domain) to the rhq-ircbot keystore. This one should be valid until 6/28/15.
diff --git a/etc/rhq-ircBot/src/main/resources/org/rhq/etc/ircbot/cacerts.jks b/etc/rhq-ircBot/src/main/resources/org/rhq/etc/ircbot/cacerts.jks index 3d73cbc..431de7c 100644 Binary files a/etc/rhq-ircBot/src/main/resources/org/rhq/etc/ircbot/cacerts.jks and b/etc/rhq-ircBot/src/main/resources/org/rhq/etc/ircbot/cacerts.jks differ
commit f9f495ce4c2c0419cd75ca47096c3af9644a3250 Author: Jirka Kremser jkremser@redhat.com Date: Mon Jul 29 12:54:06 2013 +0200
Calling the right setter.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index 7d861f1..049cf42 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -310,7 +310,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN if ((scheduleId = scheduleIdsMap.get(totalDiskUsedPercentageMetric)) != null) { StorageNodeLoadComposite.MeasurementAggregateWithUnits totalDiskUsedPercentageAggregateWithUnits = getMeasurementAggregateWithUnits( subject, scheduleId, MeasurementUnits.PERCENTAGE, beginTime, endTime); - result.setDataDiskUsedPercentage(totalDiskUsedPercentageAggregateWithUnits); + result.setTotalDiskUsedPercentage(totalDiskUsedPercentageAggregateWithUnits); } if ((scheduleId = scheduleIdsMap.get(freeDiskToDataRatioMetric)) != null) { MeasurementAggregate freeDiskToDataRatioAggregate = measurementManager.getAggregate(subject, @@ -754,4 +754,4 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
return successResultFound; } -} \ No newline at end of file +}
commit c781acf5ce4680f4722cc6dfdeaadca2dfc110c8 Author: John Sanda jsanda@redhat.com Date: Sun Jul 28 07:36:07 2013 -0400
override default ring delay to speed up test and hopefully fix jenkins failure
diff --git a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java index ef92510..bd171a4 100644 --- a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java +++ b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java @@ -29,6 +29,7 @@ import java.io.File; import java.io.FileInputStream; import java.io.FileReader; import java.net.InetAddress; +import java.util.Properties; import java.util.Set;
import com.google.common.collect.ImmutableSet; @@ -73,6 +74,7 @@ import org.rhq.core.system.ProcessExecution; import org.rhq.core.system.ProcessExecutionResults; import org.rhq.core.system.SystemInfo; import org.rhq.core.system.SystemInfoFactory; +import org.rhq.core.util.PropertiesFileUpdate; import org.rhq.core.util.stream.StreamUtil;
/** @@ -124,6 +126,18 @@ public class StorageNodeComponentITest { deployer.updateFilePerms(); deployer.updateStorageAuthConf(Sets.newHashSet(InetAddress.getByName(address)));
+ File confDir = new File(basedir, "conf"); + File cassandraJvmPropsFile = new File(confDir, "cassandra-jvm.properties"); + PropertiesFileUpdate propertiesUpdater = new PropertiesFileUpdate(cassandraJvmPropsFile.getAbsolutePath()); + Properties properties = propertiesUpdater.loadExistingProperties(); + + String jvmOpts = properties.getProperty("JVM_OPTS"); + jvmOpts = jvmOpts.substring(0, jvmOpts.lastIndexOf(""")); + jvmOpts = jvmOpts + " -Dcassandra.ring_delay_ms=100""; + properties.setProperty("JVM_OPTS", jvmOpts); + + propertiesUpdater.update(properties); + File binDir = new File(basedir, "bin"); SystemInfo systemInfo = SystemInfoFactory.createSystemInfo();
@@ -261,9 +275,8 @@ public class StorageNodeComponentITest { log.info("Waiting for node to boostrap..."); // When a node goes through bootstrap, StorageService sleeps for RING_DELAY ms // while it determines the ranges of the token ring it will own. RING_DELAY defaults - // to 30 seconds by default. - // TODO Override the default RING_DELAY to speed up tests - Thread.sleep(33000); + // to 30 seconds by default but we are overriding it to be 100 ms. + Thread.sleep(3000);
assertEquals(result.getResultCode(), OperationServicesResultCode.SUCCESS, "The operation failed: " + result.getErrorStackTrace());
commit 96745558fb5c803bda91f5ac1180b17db8a76364 Author: John Sanda jsanda@redhat.com Date: Sat Jul 27 14:08:18 2013 -0400
updating exception handling and logging in prepareForBootstrap
diff --git a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/InternodeAuthConfUpdateException.java b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/InternodeAuthConfUpdateException.java new file mode 100644 index 0000000..d1e6e56 --- /dev/null +++ b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/InternodeAuthConfUpdateException.java @@ -0,0 +1,22 @@ +package org.rhq.plugins.storage; + +/** + * @author John Sanda + */ +public class InternodeAuthConfUpdateException extends Exception { + + public InternodeAuthConfUpdateException() { + } + + public InternodeAuthConfUpdateException(String message) { + super(message); + } + + public InternodeAuthConfUpdateException(String message, Throwable cause) { + super(message, cause); + } + + public InternodeAuthConfUpdateException(Throwable cause) { + super(cause); + } +} diff --git a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java index eb0b9fd..2cb9501 100644 --- a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java +++ b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java @@ -29,12 +29,10 @@ import java.io.File; import java.io.FileNotFoundException; import java.io.FileReader; import java.io.FileWriter; -import java.io.IOException; import java.io.StringReader; import java.util.ArrayList; import java.util.HashSet; import java.util.List; -import java.util.Map; import java.util.Set;
import org.apache.commons.logging.Log; @@ -95,6 +93,14 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper return new File(pluginConfig.getSimpleValue("baseDir")); }
+ private File getConfDir() { + return new File(getBasedir(), "conf"); + } + + private File getInternodeAuthConfFile() { + return new File(getConfDir(), "rhq-storage-auth.conf"); + } + @Override public OperationResult invokeOperation(String name, Configuration parameters) throws Exception { if (name.equals("addNodeMaintenance")) { @@ -242,56 +248,24 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper ipAddresses.add(propertySimple.getStringValue()); }
- if (updateAuthFile(result, ipAddresses)) return result; - - EmsBean authBean = getEmsConnection().getBean("org.rhq.cassandra.auth:type=RhqInternodeAuthenticator"); - EmsOperation emsOperation = authBean.getOperation("reloadConfiguration"); - emsOperation.invoke(); - - result.setSimpleResult("Successfully updated the set of known nodes."); - - return result; - } - - private boolean updateAuthFile(OperationResult result, Set<String> ipAddresses) { - log.info("Updating known nodes to " + ipAddresses); + try { + updateInternodeAuthConfFile(ipAddresses);
- File confDir = new File(getBasedir(), "conf"); - File authFile = new File(confDir, "rhq-storage-auth.conf"); - File authBackupFile = new File(confDir, "." + authFile.getName() + ".bak"); + EmsBean authBean = getEmsConnection().getBean("org.rhq.cassandra.auth:type=RhqInternodeAuthenticator"); + EmsOperation emsOperation = authBean.getOperation("reloadConfiguration"); + emsOperation.invoke();
- if (authBackupFile.exists()) { - if (log.isDebugEnabled()) { - log.debug(authBackupFile + " already exists. Deleting it now in preparation of creating new backup " + - "for " + authFile.getName()); - } - if (!authBackupFile.delete()) { - String msg = "Failed to delete backup file " + authBackupFile + ". The operation will abort " + - "since " + authFile + " cannot reliably be backed up before making changes. Please delete " + - authBackupFile + " manually and reschedule the operation once the file has been removed."; - log.error(msg); - result.setErrorMessage(msg); - - return true; - } - } + result.setSimpleResult("Successfully updated the set of known nodes.");
- try { - StreamUtil.copy(new StringReader(StringUtil.collectionToString(ipAddresses, "\n")), - new FileWriter(authFile), true); - } catch (IOException e) { - log.error("An error occurred while updating " + authFile, e); - try { - log.info("Restoring back up file " + authBackupFile); - FileUtil.copyFile(authBackupFile, authFile); - authBackupFile.delete(); - } catch (IOException e1) { - log.error("Failed to revert backup of " + authFile, e1); - } - result.setErrorMessage("There was an unexpected error while updating " + authFile); - return true; + return result; + } catch (InternodeAuthConfUpdateException e) { + File authFile = getInternodeAuthConfFile(); + log.error("Failed to update set of trusted nodes in " + authFile + " due to the following error(s): " + + ThrowableUtil.getAllMessages(e)) ; + result.setErrorMessage("Failed to update set of trusted nodes in " + authFile + " due to the following " + + "error(s): " + ThrowableUtil.getAllMessages(e)); + return result; } - return false; }
private OperationResult prepareForBootstrap(Configuration params) { @@ -326,7 +300,6 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper } purgeDir(new File(configEditor.getSavedCachesDirectory()));
- log.info("Updating cluster settings");
String address = pluginConfig.getSimpleValue("host"); @@ -344,10 +317,9 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper configEditor.setStoragePort(gossipPort);
configEditor.save(); + log.info("Cluster configuration settings have been applied to " + yamlFile);
- if (updateAuthFile(result, new HashSet<String>(addresses))) { - return result; - } + updateInternodeAuthConfFile(new HashSet<String>(addresses));
log.info(this + " is ready to be bootstrap. Restarting storage node..."); OperationResult startResult = startNode(); @@ -376,6 +348,11 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper } } return result; + } catch (InternodeAuthConfUpdateException e) { + File authFile = getInternodeAuthConfFile(); + result.setErrorMessage("Failed to update " + authFile + " due to the following error(s): " + + ThrowableUtil.getAllMessages(e)); + return result; } }
@@ -384,23 +361,18 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper FileUtil.purge(dir, true); }
- private File getCommitLogDir(Map yamlConfig) { - return new File((String) yamlConfig.get("commitlog_directory")); - } + private void updateInternodeAuthConfFile(Set<String> ipAddresses) throws InternodeAuthConfUpdateException { + File authFile = getInternodeAuthConfFile();
- private List<File> getDataDirs(Map yamlConfig) { - List<File> dirs = new ArrayList<File>(); - List<String> dirNames = (List<String>) yamlConfig.get("data_file_directories"); + log.info("Updating " + authFile);
- for (String dirName : dirNames) { - dirs.add(new File(dirName)); + try { + StreamUtil.copy(new StringReader(StringUtil.collectionToString(ipAddresses, "\n")), + new FileWriter(authFile), true); + } catch (Exception e) { + log.error("An error occurred while trying to update " + authFile, e); + throw new InternodeAuthConfUpdateException("An error occurred while trying to update " + authFile, e); } - - return dirs; - } - - private File getSavedCachesDir(Map yamlConfig) { - return new File((String) yamlConfig.get("saved_caches_directory")); }
private OperationResult nodeAdded(Configuration params) { diff --git a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java index d10e428..ef92510 100644 --- a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java +++ b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java @@ -259,6 +259,10 @@ public class StorageNodeComponentITest { params, timeout);
log.info("Waiting for node to boostrap..."); + // When a node goes through bootstrap, StorageService sleeps for RING_DELAY ms + // while it determines the ranges of the token ring it will own. RING_DELAY defaults + // to 30 seconds by default. + // TODO Override the default RING_DELAY to speed up tests Thread.sleep(33000);
assertEquals(result.getResultCode(), OperationServicesResultCode.SUCCESS, "The operation failed: " +
commit 237d38ea10fe57dfb699f01cd999c3aa53391ef4 Author: John Sanda jsanda@redhat.com Date: Sat Jul 27 12:49:30 2013 -0400
adding some initial test coverage for prepareForBootstrap operation
The prepareForBootstrap method has been refactored to use ConfigEditor but there is still a good bit of clean up to do.
diff --git a/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java b/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java index 679a84c..0b4a127 100644 --- a/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java +++ b/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java @@ -76,6 +76,22 @@ public class ConfigEditor { } }
+ public File getBackupFile() { + return backupFile; + } + + public String getCommitLogDirectory() { + return (String) config.get("commitlog_directory"); + } + + public List<String> getDataFileDirectories() { + return (List<String>) config.get("data_file_directories"); + } + + public String getSavedCachesDirectory() { + return (String) config.get("saved_caches_directory"); + } + public void setSeeds(String... seeds) { List seedProviderList = (List) config.get("seed_provider"); Map seedProvider = (Map) seedProviderList.get(0); diff --git a/modules/plugins/cassandra/pom.xml b/modules/plugins/cassandra/pom.xml index da90f09..bafc8d7 100644 --- a/modules/plugins/cassandra/pom.xml +++ b/modules/plugins/cassandra/pom.xml @@ -36,6 +36,11 @@ </dependency> <dependency> <groupId>${project.groupId}</groupId> + <artifactId>rhq-cassandra-util</artifactId> + <version>${project.version}</version> + </dependency> + <dependency> + <groupId>${project.groupId}</groupId> <artifactId>rhq-core-domain</artifactId> <version>${project.version}</version> <scope>provided</scope> @@ -89,6 +94,10 @@ <outputDirectory>${project.build.outputDirectory}/lib</outputDirectory> <artifactItems> <artifactItem> + <groupId>${project.groupId}</groupId> + <artifactId>rhq-cassandra-util</artifactId> + </artifactItem> + <artifactItem> <groupId>com.datastax.cassandra</groupId> <artifactId>cassandra-driver-core</artifactId> <version>${cassandra.driver.version}</version> diff --git a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java index 5933093..eb0b9fd 100644 --- a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java +++ b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java @@ -26,7 +26,6 @@ package org.rhq.plugins.storage;
import java.io.File; -import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.FileReader; import java.io.FileWriter; @@ -45,9 +44,10 @@ import org.mc4j.ems.connection.EmsConnection; import org.mc4j.ems.connection.bean.EmsBean; import org.mc4j.ems.connection.bean.attribute.EmsAttribute; import org.mc4j.ems.connection.bean.operation.EmsOperation; -import org.yaml.snakeyaml.DumperOptions; -import org.yaml.snakeyaml.Yaml; +import org.yaml.snakeyaml.error.YAMLException;
+import org.rhq.cassandra.util.ConfigEditor; +import org.rhq.cassandra.util.ConfigEditorException; import org.rhq.core.domain.configuration.Configuration; import org.rhq.core.domain.configuration.ConfigurationUpdateStatus; import org.rhq.core.domain.configuration.Property; @@ -277,27 +277,18 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper }
try { - FileUtil.copyFile(authFile, authBackupFile); - } catch (IOException e) { - String msg = "Failed to backup " + authFile + " prior to making updates. The operation will abort due " + - "to unexpected error"; - log.error(msg, e); - result.setErrorMessage(msg + ": " + ThrowableUtil.getRootMessage(e)); - return true; - } - - try { StreamUtil.copy(new StringReader(StringUtil.collectionToString(ipAddresses, "\n")), new FileWriter(authFile), true); } catch (IOException e) { log.error("An error occurred while updating " + authFile, e); try { + log.info("Restoring back up file " + authBackupFile); FileUtil.copyFile(authBackupFile, authFile); + authBackupFile.delete(); } catch (IOException e1) { log.error("Failed to revert backup of " + authFile, e1); } - result.setErrorMessage("There was an unexpected error while updating " + authFile + ". Make sure that " + - "it matches " + authBackupFile + " and then reschedule the operation."); + result.setErrorMessage("There was an unexpected error while updating " + authFile); return true; } return false; @@ -325,74 +316,67 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper String yamlProp = pluginConfig.getSimpleValue("yamlConfiguration"); File yamlFile = new File(yamlProp);
- DumperOptions options = new DumperOptions(); - options.setDefaultFlowStyle(DumperOptions.FlowStyle.BLOCK); - Yaml yaml = new Yaml(options); - - Map yamlConfig = null; + ConfigEditor configEditor = new ConfigEditor(yamlFile); try { - yamlConfig = (Map) yaml.load(new FileInputStream(yamlFile)); - } catch (FileNotFoundException e) { - log.error("Failed to load " + yamlFile, e); - log.error("Cannot prepare " + this + " for bootstrap. " + yamlFile + " must exist in order to make the " + - "necessary configuration changes."); - result.setErrorMessage("Cannot prepare storage node for bootstrap. It appears that " + yamlFile + - " does not exist. Make sure that it exists so that the necessary configuration changes can be made."); + configEditor.load();
- return result; - } + purgeDir(new File(configEditor.getCommitLogDirectory())); + for (String dir : configEditor.getDataFileDirectories()) { + purgeDir(new File(dir)); + } + purgeDir(new File(configEditor.getSavedCachesDirectory()));
- purgeDir(getCommitLogDir(yamlConfig)); - for (File dataDir : getDataDirs(yamlConfig)) { - purgeDir(dataDir); - } - purgeDir(getSavedCachesDir(yamlConfig));
- log.info("Updating cluster settings"); + log.info("Updating cluster settings");
- String address = pluginConfig.getSimpleValue("host"); - List<String> seeds = getAddresses(params.getList("storageNodeIPAddresses")); - // Make sure this node's address is not in the list; otherwise, it - // won't bootstrap properly. - seeds.remove(address); - try { - updateSeedsList(seeds); - } catch (IOException e) { - log.error("Failed to update seeds property in " + yamlFile, e); - result.setErrorMessage("Failed to prepared node for bootstrap due to unexpected error that occurred " + - "while updating seeds property in " + yamlFile + ":\n" + ThrowableUtil.getAllMessages(e)); - return result; - } + String address = pluginConfig.getSimpleValue("host"); + int cqlPort = Integer.parseInt(params.getSimpleValue("cqlPort")); + int gossipPort = Integer.parseInt(params.getSimpleValue("gossipPort")); + List<String> addresses = getAddresses(params.getList("storageNodeIPAddresses"));
- if (updateAuthFile(result, new HashSet<String>(seeds))) { - return result; - } + // Make sure this node's address is not in the list; otherwise, it + // won't bootstrap properly. + List<String> seeds = new ArrayList<String>(addresses); + seeds.remove(address);
- int cqlPort = Integer.parseInt(params.getSimpleValue("cqlPort")); - int gossipPort = Integer.parseInt(params.getSimpleValue("gossipPort")); + configEditor.setSeeds(seeds.toArray(new String[seeds.size()])); + configEditor.setNativeTransportPort(cqlPort); + configEditor.setStoragePort(gossipPort);
- yamlConfig.put("native_transport_port", cqlPort); - yamlConfig.put("storage_port", gossipPort); + configEditor.save();
- try { - yaml.dump(yamlConfig, new FileWriter(yamlFile)); - } catch (IOException e) { - log.error("Could not update cluster settings in " + yamlFile, e); - result.setErrorMessage("Could not update cluster settings in " + yamlFile + ":\n" + - ThrowableUtil.getAllMessages(e)); - return result; - } + if (updateAuthFile(result, new HashSet<String>(addresses))) { + return result; + }
- log.info(this + " is ready to be bootstrap. Restarting storage node..."); - OperationResult startResult = startNode(); - if (startResult.getErrorMessage() != null) { - log.error("Failed to restart storage node:\n" + startResult.getErrorMessage()); - result.setErrorMessage("Failed to restart storage node:\n" + startResult.getErrorMessage()); - } else { - result.setSimpleResult("The storage node was succesfully updated is now bootstrapping into the cluster."); - } + log.info(this + " is ready to be bootstrap. Restarting storage node..."); + OperationResult startResult = startNode(); + if (startResult.getErrorMessage() != null) { + log.error("Failed to restart storage node:\n" + startResult.getErrorMessage()); + result.setErrorMessage("Failed to restart storage node:\n" + startResult.getErrorMessage()); + } else { + result.setSimpleResult("The storage node was succesfully updated is now bootstrapping into the cluster."); + }
- return result; + return result; + } catch (ConfigEditorException e) { + log.error("There was an error while trying to update " + yamlFile, e); + if (e.getCause() instanceof YAMLException) { + log.info("Attempting to restore " + yamlFile); + try { + configEditor.restore(); + result.setErrorMessage("Failed to update configuration file [" + yamlFile + "]: " + + ThrowableUtil.getAllMessages(e.getCause())); + } catch (ConfigEditorException e1) { + log.error("Failed to restore " + yamlFile + ". A copy of the file prior to any modifications " + + "can be found at " + configEditor.getBackupFile()); + result.setErrorMessage("There was an error updating [" + yamlFile + "] and undoing the changes " + + "Failed. A copy of the file can be found at " + configEditor.getBackupFile() + ". See the " + + "agent logs for more details"); + } + } + return result; + } }
private void purgeDir(File dir) { diff --git a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java index eb4d545..d10e428 100644 --- a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java +++ b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java @@ -26,15 +26,25 @@ import static org.testng.Assert.assertNotNull; import static org.testng.Assert.assertTrue;
import java.io.File; +import java.io.FileInputStream; +import java.io.FileReader; import java.net.InetAddress; import java.util.Set;
+import com.google.common.collect.ImmutableSet; import com.google.common.collect.Sets;
+import org.apache.cassandra.config.Config; +import org.apache.cassandra.config.SeedProviderDef; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.hyperic.sigar.OperatingSystem; import org.testng.annotations.AfterSuite; import org.testng.annotations.BeforeSuite; import org.testng.annotations.Test; +import org.yaml.snakeyaml.Loader; +import org.yaml.snakeyaml.TypeDescription; +import org.yaml.snakeyaml.Yaml;
import org.rhq.cassandra.CassandraClusterManager; import org.rhq.cassandra.ClusterInitService; @@ -63,12 +73,15 @@ import org.rhq.core.system.ProcessExecution; import org.rhq.core.system.ProcessExecutionResults; import org.rhq.core.system.SystemInfo; import org.rhq.core.system.SystemInfoFactory; +import org.rhq.core.util.stream.StreamUtil;
/** * @author John Sanda */ public class StorageNodeComponentITest {
+ private final Log log = LogFactory.getLog(StorageNodeComponentITest.class); + private File basedir;
private Resource storageNode; @@ -232,7 +245,7 @@ public class StorageNodeComponentITest { }
@Test(dependsOnMethods = "restartStorageNode") - public void prepareForBootstrap() { + public void prepareForBootstrap() throws Exception { Configuration params = Configuration.builder().addSimple("cqlPort", 9242).addSimple("gossipPort", 7200) .openList("storageNodeIPAddresses", "storageNodeIPAddresse").addSimples("127.0.0.1", "127.0.0.2") .closeList().build(); @@ -245,10 +258,22 @@ public class StorageNodeComponentITest { OperationServicesResult result = operationsService.invokeOperation(operationContext, "prepareForBootstrap", params, timeout);
+ log.info("Waiting for node to boostrap..."); + Thread.sleep(33000); + assertEquals(result.getResultCode(), OperationServicesResultCode.SUCCESS, "The operation failed: " + result.getErrorStackTrace());
assertNodeIsUp("Expected " + storageNode + " to be up after the prepareForBootstrap operation completes."); + + assertThatInternodeAuthConfFileMatches("127.0.0.1", "127.0.0.2"); + + File confDir = new File(basedir, "conf"); + File cassandraYamlFile = new File(confDir, "cassandra.yaml"); + Config config = loadConfig(cassandraYamlFile); + + assertEquals(config.seed_provider.parameters.get("seeds"), "127.0.0.2", "Failed to update seeds " + + "property in " + cassandraYamlFile); }
private void assertNodeIsUp(String msg) { @@ -292,4 +317,28 @@ public class StorageNodeComponentITest { return type.getPlugin().equals("RHQStorage") && type.getName().equals("RHQ Storage Node"); }
+ private void assertThatInternodeAuthConfFileMatches(String... addresses) throws Exception { + File confDir = new File(basedir, "conf"); + File internodeAuthConfFile = new File(confDir, "rhq-storage-auth.conf"); + String contents = StreamUtil.slurp(new FileReader(internodeAuthConfFile)); + + Set<String> expected = ImmutableSet.copyOf(addresses); + Set<String> actual = ImmutableSet.copyOf(contents.split("\n")); + + assertEquals(actual, expected, "Failed to update internode authentication conf file " + + internodeAuthConfFile + "."); + } + + private Config loadConfig(File configFile) throws Exception { + FileInputStream inputStream = new FileInputStream(configFile); + org.yaml.snakeyaml.constructor.Constructor constructor = + new org.yaml.snakeyaml.constructor.Constructor(Config.class); + TypeDescription seedDesc = new TypeDescription(SeedProviderDef.class); + seedDesc.putMapPropertyType("parameters", String.class, String.class); + constructor.addTypeDescription(seedDesc); + Yaml yaml = new Yaml(new Loader(constructor)); + + return (Config) yaml.load(inputStream); + } + }
commit 30c6d6678edbb44bd7b07db0cd2855b9f1790aba Author: John Sanda jsanda@redhat.com Date: Sat Jul 27 10:14:18 2013 -0400
initial commit for ConfigEditor which handles updating cassandra.yaml
diff --git a/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java b/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java new file mode 100644 index 0000000..679a84c --- /dev/null +++ b/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java @@ -0,0 +1,95 @@ +package org.rhq.cassandra.util; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.FileWriter; +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import org.yaml.snakeyaml.DumperOptions; +import org.yaml.snakeyaml.Yaml; + +import org.rhq.core.util.StringUtil; +import org.rhq.core.util.file.FileUtil; + +/** + * @author John Sanda + */ +public class ConfigEditor { + + private File configFile; + + private File backupFile; + + private Yaml yaml; + + private Map config; + + public ConfigEditor(File cassandraYamlFile) { + configFile = cassandraYamlFile; + } + + public void load() { + try { + DumperOptions options = new DumperOptions(); + options.setDefaultFlowStyle(DumperOptions.FlowStyle.BLOCK); + yaml = new Yaml(options); + config = (Map) yaml.load(new FileInputStream(configFile)); + createBackup(); + } catch (FileNotFoundException e) { + throw new ConfigEditorException("Failed to load " + configFile, e); + } + } + + public void save() { + try { + yaml.dump(config, new FileWriter(configFile)); + backupFile.delete(); + yaml = null; + config = null; + backupFile = null; + } catch (Exception e) { + throw new ConfigEditorException("Failed to save changes to " + configFile, e); + } + } + + public void restore() { + try { + FileUtil.copyFile(backupFile, configFile); + backupFile.delete(); + yaml = null; + config = null; + backupFile = null; + } catch (IOException e) { + throw new ConfigEditorException("Failed to restore " + configFile + " from " + backupFile, e); + } + } + + private void createBackup() { + backupFile = new File(configFile.getParent(), "." + configFile.getName() + ".bak"); + try { + FileUtil.copyFile(configFile, backupFile); + } catch (IOException e) { + throw new ConfigEditorException("Failed to create " + backupFile, e); + } + } + + public void setSeeds(String... seeds) { + List seedProviderList = (List) config.get("seed_provider"); + Map seedProvider = (Map) seedProviderList.get(0); + List paramsList = (List) seedProvider.get("parameters"); + Map params = (Map) paramsList.get(0); + params.put("seeds", StringUtil.arrayToString(seeds)); + } + + public void setNativeTransportPort(int port) { + config.put("native_transport_port", port); + } + + public void setStoragePort(int port) { + config.put("storage_port", port); + } + +} diff --git a/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditorException.java b/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditorException.java new file mode 100644 index 0000000..db9e7ea --- /dev/null +++ b/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditorException.java @@ -0,0 +1,21 @@ +package org.rhq.cassandra.util; + +/** + * @author John Sanda + */ +public class ConfigEditorException extends RuntimeException { + + public ConfigEditorException() { + } + + public ConfigEditorException(String message) { + } + + public ConfigEditorException(String message, Throwable cause) { + super(message, cause); + } + + public ConfigEditorException(Throwable cause) { + super(cause); + } +} diff --git a/modules/common/cassandra-util/src/test/java/org/rhq/cassandra/util/ConfigEditorTest.java b/modules/common/cassandra-util/src/test/java/org/rhq/cassandra/util/ConfigEditorTest.java new file mode 100644 index 0000000..cf344e2 --- /dev/null +++ b/modules/common/cassandra-util/src/test/java/org/rhq/cassandra/util/ConfigEditorTest.java @@ -0,0 +1,94 @@ +package org.rhq.cassandra.util; + +import static org.testng.Assert.assertEquals; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.InputStream; +import java.lang.reflect.Method; + +import org.apache.cassandra.config.Config; +import org.apache.cassandra.config.SeedProviderDef; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; +import org.yaml.snakeyaml.Loader; +import org.yaml.snakeyaml.TypeDescription; +import org.yaml.snakeyaml.Yaml; + +import org.rhq.core.util.file.FileUtil; +import org.rhq.core.util.stream.StreamUtil; + +/** + * @author John Sanda + */ +public class ConfigEditorTest { + + private File basedir; + + private File configFile; + + @BeforeMethod + public void initTestDir(Method test) throws Exception { + File dir = new File(getClass().getResource(".").toURI()); + basedir = new File(dir, getClass().getSimpleName() + "/" + test.getName()); + FileUtil.purge(basedir, true); + basedir.mkdirs(); + + configFile = new File(basedir, "cassandra.yaml"); + + InputStream inputStream = getClass().getResourceAsStream("/cassandra.yaml"); + FileOutputStream outputStream = new FileOutputStream(configFile); + StreamUtil.copy(inputStream, outputStream); + } + + @Test + public void updateSeeds() throws Exception { + ConfigEditor editor = new ConfigEditor(configFile); + editor.load(); + editor.setSeeds("127.0.0.1", "127.0.0.2", "127.0.0.3"); + editor.save(); + + Config config = loadConfig(); + + assertEquals(config.seed_provider.parameters.get("seeds"), "127.0.0.1,127.0.0.2,127.0.0.3", + "Failed to update seeds property."); + } + + @Test + public void updateNativeTransportPort() throws Exception { + ConfigEditor editor = new ConfigEditor(configFile); + editor.load(); + editor.setNativeTransportPort(9393); + editor.save(); + + Config config = loadConfig(); + + assertEquals(config.native_transport_port, (Integer) 9393, "Failed to update native_transport_port"); + } + + @Test + public void updateStoragePort() throws Exception { + ConfigEditor editor = new ConfigEditor(configFile); + editor.load(); + editor.setStoragePort(6767); + editor.save(); + + Config config = loadConfig(); + + assertEquals(config.storage_port, (Integer) 6767, "Failed to update storage_port"); + } + + private Config loadConfig() throws Exception { + FileInputStream inputStream = new FileInputStream(configFile); + org.yaml.snakeyaml.constructor.Constructor constructor = + new org.yaml.snakeyaml.constructor.Constructor(Config.class); + TypeDescription seedDesc = new TypeDescription(SeedProviderDef.class); + seedDesc.putMapPropertyType("parameters", String.class, String.class); + constructor.addTypeDescription(seedDesc); + Yaml yaml = new Yaml(new Loader(constructor)); + + return (Config) yaml.load(inputStream); + } + +} diff --git a/modules/common/cassandra-util/src/test/resources/cassandra.yaml b/modules/common/cassandra-util/src/test/resources/cassandra.yaml new file mode 100644 index 0000000..fd7973b --- /dev/null +++ b/modules/common/cassandra-util/src/test/resources/cassandra.yaml @@ -0,0 +1,690 @@ +# Cassandra storage config YAML + +# NOTE: +# See http://wiki.apache.org/cassandra/StorageConfiguration for +# full explanations of configuration directives +# /NOTE + +# The name of the cluster. This is mainly used to prevent machines in +# one logical cluster from joining another. +cluster_name: rhq + +# This defines the number of tokens randomly assigned to this node on the ring +# The more tokens, relative to other nodes, the larger the proportion of data +# that this node will store. You probably want all nodes to have the same number +# of tokens assuming they have equal hardware capability. +# +# If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility, +# and will use the initial_token as described below. +# +# Specifying initial_token will override this setting. +# +# If you already have a cluster with 1 token per node, and wish to migrate to +# multiple tokens per node, see http://wiki.apache.org/cassandra/Operations +num_tokens: 256 + +# If you haven't specified num_tokens, or have set it to the default of 1 then +# you should always specify InitialToken when setting up a production +# cluster for the first time, and often when adding capacity later. +# The principle is that each node should be given an equal slice of +# the token ring; see http://wiki.apache.org/cassandra/Operations +# for more details. +# +# If blank, Cassandra will request a token bisecting the range of +# the heaviest-loaded existing node. If there is no load information +# available, such as is the case with a new cluster, it will pick +# a random token, which will lead to hot spots. +#initial_token: + +# See http://wiki.apache.org/cassandra/HintedHandoff +hinted_handoff_enabled: true +# this defines the maximum amount of time a dead host will have hints +# generated. After it has been dead this long, new hints for it will not be +# created until it has been seen alive and gone down again. +max_hint_window_in_ms: 10800000 # 3 hours +# throttle in KB's per second, per delivery thread +hinted_handoff_throttle_in_kb: 1024 +# Number of threads with which to deliver hints; +# Consider increasing this number when you have multi-dc deployments, since +# cross-dc handoff tends to be slower +max_hints_delivery_threads: 2 + +# The following setting populates the page cache on memtable flush and compaction +# WARNING: Enable this setting only when the whole node's data fits in memory. +# Defaults to: false +# populate_io_cache_on_flush: false + +# Authentication backend, implementing IAuthenticator; used to identify users +# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator, +# PasswordAuthenticator}. +# +# - AllowAllAuthenticator performs no checks - set it to disable authentication. +# - PasswordAuthenticator relies on username/password pairs to authenticate +# users. It keeps usernames and hashed passwords in system_auth.credentials table. +# Please increase system_auth keyspace replication factor if you use this authenticator. +authenticator: org.apache.cassandra.auth.PasswordAuthenticator + +# Authorization backend, implementing IAuthorizer; used to limit access/provide permissions +# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer, +# CassandraAuthorizer}. +# +# - AllowAllAuthorizer allows any action to any user - set it to disable authorization. +# - CassandraAuthorizer stores permissions in system_auth.permissions table. Please +# increase system_auth keyspace replication factor if you use this authorizer. +authorizer: org.apache.cassandra.auth.CassandraAuthorizer + +# Validity period for permissions cache (fetching permissions can be an +# expensive operation depending on the authorizer, CassandraAuthorizer is +# one example). Defaults to 2000, set to 0 to disable. +# Will be disabled automatically for AllowAllAuthorizer. +permissions_validity_in_ms: 2000 + +# The partitioner is responsible for distributing rows (by key) across +# nodes in the cluster. Any IPartitioner may be used, including your +# own as long as it is on the classpath. Out of the box, Cassandra +# provides org.apache.cassandra.dht.{Murmur3Partitioner, RandomPartitioner +# ByteOrderedPartitioner, OrderPreservingPartitioner (deprecated)}. +# +# - RandomPartitioner distributes rows across the cluster evenly by md5. +# This is the default prior to 1.2 and is retained for compatibility. +# - Murmur3Partitioner is similar to RandomPartioner but uses Murmur3_128 +# Hash Function instead of md5. When in doubt, this is the best option. +# - ByteOrderedPartitioner orders rows lexically by key bytes. BOP allows +# scanning rows in key order, but the ordering can generate hot spots +# for sequential insertion workloads. +# - OrderPreservingPartitioner is an obsolete form of BOP, that stores +# - keys in a less-efficient format and only works with keys that are +# UTF8-encoded Strings. +# - CollatingOPP colates according to EN,US rules rather than lexical byte +# ordering. Use this as an example if you need custom collation. +# +# See http://wiki.apache.org/cassandra/Operations for more on +# partitioners and token selection. +partitioner: org.apache.cassandra.dht.Murmur3Partitioner + +# directories where Cassandra should store data on disk. +data_file_directories: + - /Users/jsanda/Development/redhat/rhq/rhq-data/storage-1/data + +# commit log +commitlog_directory: /Users/jsanda/Development/redhat/rhq/rhq-data/storage-1/commit_log + +# policy for data disk failures: +# stop: shut down gossip and Thrift, leaving the node effectively dead, but +# still inspectable via JMX. +# best_effort: stop using the failed disk and respond to requests based on +# remaining available sstables. This means you WILL see obsolete +# data at CL.ONE! +# ignore: ignore fatal errors and let requests fail, as in pre-1.2 Cassandra +disk_failure_policy: stop + +# Maximum size of the key cache in memory. +# +# Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the +# minimum, sometimes more. The key cache is fairly tiny for the amount of +# time it saves, so it's worthwhile to use it at large numbers. +# The row cache saves even more time, but must store the whole values of +# its rows, so it is extremely space-intensive. It's best to only use the +# row cache if you have hot rows or static rows. +# +# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. +# +# Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache. +key_cache_size_in_mb: + +# Duration in seconds after which Cassandra should +# safe the keys cache. Caches are saved to saved_caches_directory as +# specified in this configuration file. +# +# Saved caches greatly improve cold-start speeds, and is relatively cheap in +# terms of I/O for the key cache. Row cache saving is much more expensive and +# has limited use. +# +# Default is 14400 or 4 hours. +key_cache_save_period: 14400 + +# Number of keys from the key cache to save +# Disabled by default, meaning all keys are going to be saved +# key_cache_keys_to_save: 100 + +# Maximum size of the row cache in memory. +# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. +# +# Default value is 0, to disable row caching. +row_cache_size_in_mb: 0 + +# Duration in seconds after which Cassandra should +# safe the row cache. Caches are saved to saved_caches_directory as specified +# in this configuration file. +# +# Saved caches greatly improve cold-start speeds, and is relatively cheap in +# terms of I/O for the key cache. Row cache saving is much more expensive and +# has limited use. +# +# Default is 0 to disable saving the row cache. +row_cache_save_period: 0 + +# Number of keys from the row cache to save +# Disabled by default, meaning all keys are going to be saved +# row_cache_keys_to_save: 100 + +# The provider for the row cache to use. +# +# Supported values are: ConcurrentLinkedHashCacheProvider, SerializingCacheProvider +# +# SerializingCacheProvider serialises the contents of the row and stores +# it in native memory, i.e., off the JVM Heap. Serialized rows take +# significantly less memory than "live" rows in the JVM, so you can cache +# more rows in a given memory footprint. And storing the cache off-heap +# means you can use smaller heap sizes, reducing the impact of GC pauses. +# +# It is also valid to specify the fully-qualified class name to a class +# that implements org.apache.cassandra.cache.IRowCacheProvider. +# +# Defaults to SerializingCacheProvider +row_cache_provider: SerializingCacheProvider + +# saved caches +saved_caches_directory: /Users/jsanda/Development/redhat/rhq/rhq-data/storage-1/saved_caches + +# commitlog_sync may be either "periodic" or "batch." +# When in batch mode, Cassandra won't ack writes until the commit log +# has been fsynced to disk. It will wait up to +# commitlog_sync_batch_window_in_ms milliseconds for other writes, before +# performing the sync. +# +# commitlog_sync: batch +# commitlog_sync_batch_window_in_ms: 50 +# +# the other option is "periodic" where writes may be acked immediately +# and the CommitLog is simply synced every commitlog_sync_period_in_ms +# milliseconds. +commitlog_sync: periodic +commitlog_sync_period_in_ms: 10000 + +# The size of the individual commitlog file segments. A commitlog +# segment may be archived, deleted, or recycled once all the data +# in it (potentally from each columnfamily in the system) has been +# flushed to sstables. +# +# The default size is 32, which is almost always fine, but if you are +# archiving commitlog segments (see commitlog_archiving.properties), +# then you probably want a finer granularity of archiving; 8 or 16 MB +# is reasonable. +commitlog_segment_size_in_mb: 32 + +# any class that implements the SeedProvider interface and has a +# constructor that takes a Map<String, String> of parameters will do. +seed_provider: + # Addresses of hosts that are deemed contact points. + # Cassandra nodes use this list of hosts to find each other and learn + # the topology of the ring. You must change this if you are running + # multiple nodes! + - class_name: org.apache.cassandra.locator.SimpleSeedProvider + parameters: + # seeds is actually a comma-delimited list of addresses. + # Ex: "<ip1>,<ip2>,<ip3>" + - seeds: "127.0.0.1" + +# emergency pressure valve: each time heap usage after a full (CMS) +# garbage collection is above this fraction of the max, Cassandra will +# flush the largest memtables. +# +# Set to 1.0 to disable. Setting this lower than +# CMSInitiatingOccupancyFraction is not likely to be useful. +# +# RELYING ON THIS AS YOUR PRIMARY TUNING MECHANISM WILL WORK POORLY: +# it is most effective under light to moderate load, or read-heavy +# workloads; under truly massive write load, it will often be too +# little, too late. +flush_largest_memtables_at: 0.75 + +# emergency pressure valve #2: the first time heap usage after a full +# (CMS) garbage collection is above this fraction of the max, +# Cassandra will reduce cache maximum _capacity_ to the given fraction +# of the current _size_. Should usually be set substantially above +# flush_largest_memtables_at, since that will have less long-term +# impact on the system. +# +# Set to 1.0 to disable. Setting this lower than +# CMSInitiatingOccupancyFraction is not likely to be useful. +reduce_cache_sizes_at: 0.85 +reduce_cache_capacity_to: 0.6 + +# For workloads with more data than can fit in memory, Cassandra's +# bottleneck will be reads that need to fetch data from +# disk. "concurrent_reads" should be set to (16 * number_of_drives) in +# order to allow the operations to enqueue low enough in the stack +# that the OS and drives can reorder them. +# +# On the other hand, since writes are almost never IO bound, the ideal +# number of "concurrent_writes" is dependent on the number of cores in +# your system; (8 * number_of_cores) is a good rule of thumb. +concurrent_reads: 32 +concurrent_writes: 32 + +# Total memory to use for memtables. Cassandra will flush the largest +# memtable when this much memory is used. +# If omitted, Cassandra will set it to 1/3 of the heap. +# memtable_total_space_in_mb: 2048 + +# Total space to use for commitlogs. Since commitlog segments are +# mmapped, and hence use up address space, the default size is 32 +# on 32-bit JVMs, and 1024 on 64-bit JVMs. +# +# If space gets above this value (it will round up to the next nearest +# segment multiple), Cassandra will flush every dirty CF in the oldest +# segment and remove it. So a small total commitlog space will tend +# to cause more flush activity on less-active columnfamilies. +# commitlog_total_space_in_mb: 4096 + +# This sets the amount of memtable flush writer threads. These will +# be blocked by disk io, and each one will hold a memtable in memory +# while blocked. If you have a large heap and many data directories, +# you can increase this value for better flush performance. +# By default this will be set to the amount of data directories defined. +#memtable_flush_writers: 1 + +# the number of full memtables to allow pending flush, that is, +# waiting for a writer thread. At a minimum, this should be set to +# the maximum number of secondary indexes created on a single CF. +memtable_flush_queue_size: 4 + +# Whether to, when doing sequential writing, fsync() at intervals in +# order to force the operating system to flush the dirty +# buffers. Enable this to avoid sudden dirty buffer flushing from +# impacting read latencies. Almost always a good idea on SSD:s; not +# necessarily on platters. +trickle_fsync: false +trickle_fsync_interval_in_kb: 10240 + +# TCP port, for commands and data +storage_port: 7100 + +# SSL port, for encrypted communication. Unused unless enabled in +# encryption_options +ssl_storage_port: 7101 + +# Address to bind to and tell other Cassandra nodes to connect to. You +# _must_ change this if you want multiple nodes to be able to +# communicate! +# +# Leaving it blank leaves it up to InetAddress.getLocalHost(). This +# will always do the Right Thing *if* the node is properly configured +# (hostname, name resolution, etc), and the Right Thing is to use the +# address associated with the hostname (it might not be). +# +# Setting this to 0.0.0.0 is always wrong. +listen_address: 127.0.0.1 + +# Address to broadcast to other Cassandra nodes +# Leaving this blank will set it to the same value as listen_address +# broadcast_address: 1.2.3.4 + +# Internode authentication backend, implementing IInternodeAuthenticator; +# used to allow/disallow connections from peer nodes. +internode_authenticator: org.rhq.cassandra.auth.RhqInternodeAuthenticator + +# Whether to start the native transport server. +# Currently, only the thrift server is started by default because the native +# transport is considered beta. +# Please note that the address on which the native transport is bound is the +# same as the rpc_address. The port however is different and specified below. +start_native_transport: true +# port for the CQL native transport to listen for clients on +native_transport_port: 9142 +# The minimum and maximum threads for handling requests when the native +# transport is used. The meaning is those is similar to the one of +# rpc_min_threads and rpc_max_threads, though the default differ slightly and +# are the ones below: +# native_transport_min_threads: 16 +native_transport_max_threads: 128 + + +# Whether to start the thrift rpc server. +start_rpc: false +# The address to bind the Thrift RPC service to -- clients connect +# here. Unlike ListenAddress above, you *can* specify 0.0.0.0 here if +# you want Thrift to listen on all interfaces. +# +# Leaving this blank has the same effect it does for ListenAddress, +# (i.e. it will be based on the configured hostname of the node). +rpc_address: 127.0.0.1 +# port for Thrift to listen for clients on +rpc_port: 9160 + +# enable or disable keepalive on rpc connections +rpc_keepalive: true + +# Cassandra provides three out-of-the-box options for the RPC Server: +# +# sync -> One thread per thrift connection. For a very large number of clients, memory +# will be your limiting factor. On a 64 bit JVM, 128KB is the minimum stack size +# per thread, and that will correspond to your use of virtual memory (but physical memory +# may be limited depending on use of stack space). +# +# hsha -> Stands for "half synchronous, half asynchronous." All thrift clients are handled +# asynchronously using a small number of threads that does not vary with the amount +# of thrift clients (and thus scales well to many clients). The rpc requests are still +# synchronous (one thread per active request). +# +# The default is sync because on Windows hsha is about 30% slower. On Linux, +# sync/hsha performance is about the same, with hsha of course using less memory. +# +# Alternatively, can provide your own RPC server by providing the fully-qualified class name +# of an o.a.c.t.TServerFactory that can create an instance of it. +rpc_server_type: sync + +# Uncomment rpc_min|max_thread to set request pool size limits. +# +# Regardless of your choice of RPC server (see above), the number of maximum requests in the +# RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync +# RPC server, it also dictates the number of clients that can be connected at all). +# +# The default is unlimited and thus provide no protection against clients overwhelming the server. You are +# encouraged to set a maximum that makes sense for you in production, but do keep in mind that +# rpc_max_threads represents the maximum number of client requests this server may execute concurrently. +# +# rpc_min_threads: 16 +# rpc_max_threads: 2048 + +# uncomment to set socket buffer sizes on rpc connections +# rpc_send_buff_size_in_bytes: +# rpc_recv_buff_size_in_bytes: + +# uncomment to set socket buffer size for internode communication +# Note that when setting this, the buffer size is limited by net.core.wmem_max +# and when not setting it it is defined by net.ipv4.tcp_wmem +# See: +# /proc/sys/net/core/wmem_max +# /proc/sys/net/core/rmem_max +# /proc/sys/net/ipv4/tcp_wmem +# /proc/sys/net/ipv4/tcp_wmem +# and: man tcp +# internode_send_buff_size_in_bytes: +# internode_recv_buff_size_in_bytes: + +# Frame size for thrift (maximum field length). +thrift_framed_transport_size_in_mb: 15 + +# The max length of a thrift message, including all fields and +# internal thrift overhead. +thrift_max_message_length_in_mb: 16 + +# Set to true to have Cassandra create a hard link to each sstable +# flushed or streamed locally in a backups/ subdirectory of the +# Keyspace data. Removing these links is the operator's +# responsibility. +incremental_backups: false + +# Whether or not to take a snapshot before each compaction. Be +# careful using this option, since Cassandra won't clean up the +# snapshots for you. Mostly useful if you're paranoid when there +# is a data format change. +snapshot_before_compaction: false + +# Whether or not a snapshot is taken of the data before keyspace truncation +# or dropping of column families. The STRONGLY advised default of true +# should be used to provide data safety. If you set this flag to false, you will +# lose data on truncation or drop. +auto_snapshot: true + +# Add column indexes to a row after its contents reach this size. +# Increase if your column values are large, or if you have a very large +# number of columns. The competing causes are, Cassandra has to +# deserialize this much of the row to read a single column, so you want +# it to be small - at least if you do many partial-row reads - but all +# the index data is read for each access, so you don't want to generate +# that wastefully either. +column_index_size_in_kb: 64 + +# Size limit for rows being compacted in memory. Larger rows will spill +# over to disk and use a slower two-pass compaction process. A message +# will be logged specifying the row key. +in_memory_compaction_limit_in_mb: 64 + +# Number of simultaneous compactions to allow, NOT including +# validation "compactions" for anti-entropy repair. Simultaneous +# compactions can help preserve read performance in a mixed read/write +# workload, by mitigating the tendency of small sstables to accumulate +# during a single long running compactions. The default is usually +# fine and if you experience problems with compaction running too +# slowly or too fast, you should look at +# compaction_throughput_mb_per_sec first. +# +# concurrent_compactors defaults to the number of cores. +# Uncomment to make compaction mono-threaded, the pre-0.8 default. +#concurrent_compactors: 1 + +# Multi-threaded compaction. When enabled, each compaction will use +# up to one thread per core, plus one thread per sstable being merged. +# This is usually only useful for SSD-based hardware: otherwise, +# your concern is usually to get compaction to do LESS i/o (see: +# compaction_throughput_mb_per_sec), not more. +multithreaded_compaction: false + +# Throttles compaction to the given total throughput across the entire +# system. The faster you insert data, the faster you need to compact in +# order to keep the sstable count down, but in general, setting this to +# 16 to 32 times the rate you are inserting data is more than sufficient. +# Setting this to 0 disables throttling. Note that this account for all types +# of compaction, including validation compaction. +compaction_throughput_mb_per_sec: 16 + +# Track cached row keys during compaction, and re-cache their new +# positions in the compacted sstable. Disable if you use really large +# key caches. +compaction_preheat_key_cache: true + +# Throttles all outbound streaming file transfers on this node to the +# given total throughput in Mbps. This is necessary because Cassandra does +# mostly sequential IO when streaming data during bootstrap or repair, which +# can lead to saturating the network connection and degrading rpc performance. +# When unset, the default is 200 Mbps or 25 MB/s. +# stream_throughput_outbound_megabits_per_sec: 200 + +# How long the coordinator should wait for read operations to complete +read_request_timeout_in_ms: 20000 +# How long the coordinator should wait for seq or index scans to complete +range_request_timeout_in_ms: 20000 +# How long the coordinator should wait for writes to complete +write_request_timeout_in_ms: 20000 +# How long the coordinator should wait for truncates to complete +# (This can be much longer, because we need to flush all CFs +# to make sure we can clear out anythink in the commitlog that could +# cause truncated data to reappear.) +truncate_request_timeout_in_ms: 60000 +# The default timeout for other, miscellaneous operations +request_timeout_in_ms: 20000 + +# Enable operation timeout information exchange between nodes to accurately +# measure request timeouts, If disabled cassandra will assuming the request +# was forwarded to the replica instantly by the coordinator +# +# Warning: before enabling this property make sure to ntp is installed +# and the times are synchronized between the nodes. +cross_node_timeout: false + +# Enable socket timeout for streaming operation. +# When a timeout occurs during streaming, streaming is retried from the start +# of the current file. This *can* involve re-streaming an important amount of +# data, so you should avoid setting the value too low. +# Default value is 0, which never timeout streams. +# streaming_socket_timeout_in_ms: 0 + +# phi value that must be reached for a host to be marked down. +# most users should never need to adjust this. +# phi_convict_threshold: 8 + +# endpoint_snitch -- Set this to a class that implements +# IEndpointSnitch. The snitch has two functions: +# - it teaches Cassandra enough about your network topology to route +# requests efficiently +# - it allows Cassandra to spread replicas around your cluster to avoid +# correlated failures. It does this by grouping machines into +# "datacenters" and "racks." Cassandra will do its best not to have +# more than one replica on the same "rack" (which may not actually +# be a physical location) +# +# IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER, +# YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS +# ARE PLACED. +# +# Out of the box, Cassandra provides +# - SimpleSnitch: +# Treats Strategy order as proximity. This improves cache locality +# when disabling read repair, which can further improve throughput. +# Only appropriate for single-datacenter deployments. +# - PropertyFileSnitch: +# Proximity is determined by rack and data center, which are +# explicitly configured in cassandra-topology.properties. +# - GossipingPropertyFileSnitch +# The rack and datacenter for the local node are defined in +# cassandra-rackdc.properties and propagated to other nodes via gossip. If +# cassandra-topology.properties exists, it is used as a fallback, allowing +# migration from the PropertyFileSnitch. +# - RackInferringSnitch: +# Proximity is determined by rack and data center, which are +# assumed to correspond to the 3rd and 2nd octet of each node's +# IP address, respectively. Unless this happens to match your +# deployment conventions (as it did Facebook's), this is best used +# as an example of writing a custom Snitch class. +# - Ec2Snitch: +# Appropriate for EC2 deployments in a single Region. Loads Region +# and Availability Zone information from the EC2 API. The Region is +# treated as the Datacenter, and the Availability Zone as the rack. +# Only private IPs are used, so this will not work across multiple +# Regions. +# - Ec2MultiRegionSnitch: +# Uses public IPs as broadcast_address to allow cross-region +# connectivity. (Thus, you should set seed addresses to the public +# IP as well.) You will need to open the storage_port or +# ssl_storage_port on the public IP firewall. (For intra-Region +# traffic, Cassandra will switch to the private IP after +# establishing a connection.) +# +# You can use a custom Snitch by setting this to the full class name +# of the snitch, which will be assumed to be on your classpath. +endpoint_snitch: SimpleSnitch + +# controls how often to perform the more expensive part of host score +# calculation +dynamic_snitch_update_interval_in_ms: 100 +# controls how often to reset all host scores, allowing a bad host to +# possibly recover +dynamic_snitch_reset_interval_in_ms: 600000 +# if set greater than zero and read_repair_chance is < 1.0, this will allow +# 'pinning' of replicas to hosts in order to increase cache capacity. +# The badness threshold will control how much worse the pinned host has to be +# before the dynamic snitch will prefer other replicas over it. This is +# expressed as a double which represents a percentage. Thus, a value of +# 0.2 means Cassandra would continue to prefer the static snitch values +# until the pinned host was 20% worse than the fastest. +dynamic_snitch_badness_threshold: 0.1 + +# request_scheduler -- Set this to a class that implements +# RequestScheduler, which will schedule incoming client requests +# according to the specific policy. This is useful for multi-tenancy +# with a single Cassandra cluster. +# NOTE: This is specifically for requests from the client and does +# not affect inter node communication. +# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place +# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of +# client requests to a node with a separate queue for each +# request_scheduler_id. The scheduler is further customized by +# request_scheduler_options as described below. +request_scheduler: org.apache.cassandra.scheduler.NoScheduler + +# Scheduler Options vary based on the type of scheduler +# NoScheduler - Has no options +# RoundRobin +# - throttle_limit -- The throttle_limit is the number of in-flight +# requests per client. Requests beyond +# that limit are queued up until +# running requests can complete. +# The value of 80 here is twice the number of +# concurrent_reads + concurrent_writes. +# - default_weight -- default_weight is optional and allows for +# overriding the default which is 1. +# - weights -- Weights are optional and will default to 1 or the +# overridden default_weight. The weight translates into how +# many requests are handled during each turn of the +# RoundRobin, based on the scheduler id. +# +# request_scheduler_options: +# throttle_limit: 80 +# default_weight: 5 +# weights: +# Keyspace1: 1 +# Keyspace2: 5 + +# request_scheduler_id -- An identifer based on which to perform +# the request scheduling. Currently the only valid option is keyspace. +# request_scheduler_id: keyspace + +# index_interval controls the sampling of entries from the primrary +# row index in terms of space versus time. The larger the interval, +# the smaller and less effective the sampling will be. In technicial +# terms, the interval coresponds to the number of index entries that +# are skipped between taking each sample. All the sampled entries +# must fit in memory. Generally, a value between 128 and 512 here +# coupled with a large key cache size on CFs results in the best trade +# offs. This value is not often changed, however if you have many +# very small rows (many to an OS page), then increasing this will +# often lower memory usage without a impact on performance. +index_interval: 128 + +# Enable or disable inter-node encryption +# Default settings are TLS v1, RSA 1024-bit keys (it is imperative that +# users generate their own keys) TLS_RSA_WITH_AES_128_CBC_SHA as the cipher +# suite for authentication, key exchange and encryption of the actual data transfers. +# NOTE: No custom encryption options are enabled at the moment +# The available internode options are : all, none, dc, rack +# +# If set to dc cassandra will encrypt the traffic between the DCs +# If set to rack cassandra will encrypt the traffic between the racks +# +# The passwords used in these options must match the passwords used when generating +# the keystore and truststore. For instructions on generating these files, see: +# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSE... +# +server_encryption_options: + internode_encryption: none + keystore: conf/.keystore + keystore_password: cassandra + truststore: conf/.truststore + truststore_password: cassandra + # More advanced defaults below: + # protocol: TLS + # algorithm: SunX509 + # store_type: JKS + # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA] + # require_client_auth: false + +# enable or disable client/server encryption. +client_encryption_options: + enabled: false + keystore: conf/.keystore + keystore_password: cassandra + # require_client_auth: false + # Set trustore and truststore_password if require_client_auth is true + # truststore: conf/.truststore + # truststore_password: cassandra + # More advanced defaults below: + # protocol: TLS + # algorithm: SunX509 + # store_type: JKS + # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA] + +# internode_compression controls whether traffic between nodes is +# compressed. +# can be: all - all traffic is compressed +# dc - traffic between different datacenters is compressed +# none - nothing is compressed. +internode_compression: none + +# Enable or disable tcp_nodelay for inter-dc communication. +# Disabling it will result in larger (but fewer) network packets being sent, +# reducing overhead from the TCP protocol itself, at the cost of increasing +# latency if you block for cross-datacenter responses. +inter_dc_tcp_nodelay: true
commit dac01526994e7de15ef99a72768bd9a81d7b8818 Author: Jay Shaughnessy jshaughn@redhat.com Date: Fri Jul 26 15:44:58 2013 -0400
Add new auth token for bundles
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerBean.java index ac05e20..139a26e 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerBean.java @@ -214,6 +214,12 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot }
@Override + public void assignBundlesToBundleGroup(Subject subject, int bundleGroupId, int... bundleIds) { + // TODO Auto-generated method stub + + } + + @Override @RequiredPermission(Permission.MANAGE_BUNDLE) public Bundle createBundle(Subject subject, String name, String description, int bundleTypeId) throws Exception { if (null == name || "".equals(name.trim())) { @@ -257,31 +263,6 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot }
@Override - @RequiredPermission(Permission.MANAGE_BUNDLE_GROUPS) - public BundleGroup createBundleGroup(Subject subject, String name, String description) throws Exception { - if (null == name || "".equals(name.trim())) { - throw new IllegalArgumentException("Invalid bundleGroupName: " + name); - } - - BundleGroupCriteria c = new BundleGroupCriteria(); - c.addFilterName(name); - c.setStrict(true); - if (!bundleManager.findBundleGroupsByCriteria(subject, c).isEmpty()) { - throw new IllegalArgumentException("Invalid bundleGroupName, bundle group already exists with name: " - + name); - } - - // create and add the required Repo. the Repo is a detached object which helps in its eventual - // removal. - BundleGroup bg = new BundleGroup(name); - bg.setDescription(description); - - entityManager.persist(bg); - - return bg; - } - - @Override @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) @RequiredPermission(Permission.MANAGE_BUNDLE) public BundleDeployment createBundleDeploymentInNewTrans(Subject subject, int bundleVersionId, @@ -1336,6 +1317,31 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot }
@Override + @RequiredPermission(Permission.MANAGE_BUNDLE_GROUPS) + public BundleGroup createBundleGroup(Subject subject, String name, String description) throws Exception { + if (null == name || "".equals(name.trim())) { + throw new IllegalArgumentException("Invalid bundleGroupName: " + name); + } + + BundleGroupCriteria c = new BundleGroupCriteria(); + c.addFilterName(name); + c.setStrict(true); + if (!bundleManager.findBundleGroupsByCriteria(subject, c).isEmpty()) { + throw new IllegalArgumentException("Invalid bundleGroupName, bundle group already exists with name: " + + name); + } + + // create and add the required Repo. the Repo is a detached object which helps in its eventual + // removal. + BundleGroup bg = new BundleGroup(name); + bg.setDescription(description); + + entityManager.persist(bg); + + return bg; + } + + @Override @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) @RequiredPermission(Permission.MANAGE_BUNDLE) public BundleResourceDeployment createBundleResourceDeployment(Subject subject, int bundleDeploymentId, @@ -1642,7 +1648,15 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot
@Override public PageList<Bundle> findBundlesByCriteria(Subject subject, BundleCriteria criteria) { + CriteriaQueryGenerator generator = new CriteriaQueryGenerator(subject, criteria); + + if (!authorizationManager.hasGlobalPermission(subject, Permission.VIEW_BUNDLES)) { + + generator.setAuthorizationResourceFragment(CriteriaQueryGenerator.AuthorizationTokenType.BUNDLE, null, + subject.getId()); + } + CriteriaQueryRunner<Bundle> queryRunner = new CriteriaQueryRunner<Bundle>(criteria, generator, entityManager); return queryRunner.execute(); } @@ -1683,6 +1697,27 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot }
@Override + @RequiredPermission(Permission.MANAGE_BUNDLE_GROUPS) + public void deleteBundleGroups(Subject subject, int... bundleGroupIds) throws Exception { + + for (int bundleGroupId : bundleGroupIds) { + BundleGroup bundleGroup = this.entityManager.find(BundleGroup.class, bundleGroupIds); + if (null == bundleGroup) { + return; + } + + // unassign any bundles assigned to the bundle group + for (Bundle b : bundleGroup.getBundles()) { + bundleGroup.removeBundle(b); + } + bundleGroup = entityManager.merge(bundleGroup); + + // now remove the bundle group + entityManager.remove(bundleGroup); + } + } + + @Override public PageList<BundleWithLatestVersionComposite> findBundlesWithLatestVersionCompositesByCriteria(Subject subject, BundleCriteria criteria) {
@@ -1780,27 +1815,6 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot }
@Override - @RequiredPermission(Permission.MANAGE_BUNDLE_GROUPS) - public void deleteBundleGroups(Subject subject, int... bundleGroupIds) throws Exception { - - for (int bundleGroupId : bundleGroupIds) { - BundleGroup bundleGroup = this.entityManager.find(BundleGroup.class, bundleGroupIds); - if (null == bundleGroup) { - return; - } - - // unassign any bundles assigned to the bundle group - for (Bundle b : bundleGroup.getBundles()) { - bundleGroup.removeBundle(b); - } - bundleGroup = entityManager.merge(bundleGroup); - - // now remove the bundle group - entityManager.remove(bundleGroup); - } - } - - @Override @RequiredPermission(Permission.MANAGE_BUNDLE) public void deleteBundleVersion(Subject subject, int bundleVersionId, boolean deleteBundleIfEmpty) throws Exception { BundleVersion bundleVersion = this.entityManager.find(BundleVersion.class, bundleVersionId); @@ -1848,6 +1862,12 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot return; }
+ @Override + public void unassignBundlesFromBundleGroup(Subject subject, int bundleGroupId, int... bundleIds) { + // TODO Auto-generated method stub + + } + private void safeClose(InputStream is) { if (null != is) { try { diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/util/CriteriaQueryGenerator.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/util/CriteriaQueryGenerator.java index 574fe73..e65d9f3 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/util/CriteriaQueryGenerator.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/util/CriteriaQueryGenerator.java @@ -68,7 +68,8 @@ public final class CriteriaQueryGenerator {
public enum AuthorizationTokenType { RESOURCE, // specifies the resource alias to join on for standard res-group-role-subject authorization checking - GROUP; // specifies the group alias to join on for standard group-role-subject authorization checking + GROUP, // specifies the group alias to join on for standard group-role-subject authorization checking + BUNDLE; // specifies the bundle alias to join on for standard bundle-bundleGroup-role-subject authorization checking }
private Criteria criteria; @@ -118,10 +119,13 @@ public final class CriteriaQueryGenerator { String defaultFragment = null; if (type == AuthorizationTokenType.RESOURCE) { defaultFragment = "resource"; + setAuthorizationResourceFragment(type, defaultFragment, subjectId); } else if (type == AuthorizationTokenType.GROUP) { defaultFragment = "group"; + setAuthorizationResourceFragment(type, defaultFragment, subjectId); + } else if (type == AuthorizationTokenType.BUNDLE) { + setAuthorizationBundleFragment(subjectId); } - setAuthorizationResourceFragment(type, defaultFragment, subjectId); }
private String fixFilterOverride(String expression, String fieldName) { @@ -173,9 +177,9 @@ public final class CriteriaQueryGenerator { + " does not yet support generating queries for '" + type + "' token types"); }
- // If the query results are narrowed by requiredParams generate the fragment now. It's done + // If the query results are narrowed by requiredPerms generate the fragment now. It's done // here for two reasons. First, it seems to make sense to apply this only when an authFragment is - // being used. Second, because ond day the query may be less brute force and may modify or + // being used. Second, because one day the query may be less brute force and may modify or // leverage the joinFragment above. But, after extensive trying a more elegant // query could not be constructed due to Hibernate limitations. So, for now, here it is... List<Permission> requiredPerms = this.criteria.getRequiredPermissions(); @@ -230,6 +234,39 @@ public final class CriteriaQueryGenerator { return customAuthzFragment; }
+ public void setAuthorizationBundleFragment(int subjectId) { + this.authorizationSubjectId = subjectId; + + String fragment = "bundle"; + String customAuthzFragment = "" // + + "( %aliasWithFragment%.id IN ( SELECT %innerAlias%.id " + NL // + + " FROM %alias% innerAlias " + NL // + + " JOIN %innerAlias%.bundleGroups g JOIN g.roles r JOIN r.subjects s " + NL // + + " WHERE s.id = %subjectId% ) )" + NL; // + String aliasReplacement = criteria.getAlias() + (fragment != null ? "." + fragment : ""); + String innerAliasReplacement = "innerAlias" + (fragment != null ? "." + fragment : ""); + customAuthzFragment = customAuthzFragment.replace("%alias%", criteria.getAlias()); + customAuthzFragment = customAuthzFragment.replace("%aliasWithFragment%", aliasReplacement); + customAuthzFragment = customAuthzFragment.replace("%innerAlias%", innerAliasReplacement); + customAuthzFragment = customAuthzFragment.replace("%subjectId%", String.valueOf(subjectId)); + + // If the query results are narrowed by requiredPerms generate the fragment now. It's done + // here for two reasons. First, it seems to make sense to apply this only when an authFragment is + // being used. Second, because one day the query may be less brute force and may modify or + // leverage the joinFragment above. But, after extensive trying a more elegant + // query could not be constructed due to Hibernate limitations. So, for now, here it is... + List<Permission> requiredPerms = this.criteria.getRequiredPermissions(); + if (!(null == requiredPerms || requiredPerms.isEmpty())) { + this.authorizationPermsFragment = "" // + + "( SELECT COUNT(DISTINCT p)" + NL // + + " FROM Subject innerSubject" + NL // + + " JOIN innerSubject.roles r" + NL // + + " JOIN r.permissions p" + NL // + + " WHERE innerSubject.id = " + this.authorizationSubjectId + NL // + + " AND p IN ( :requiredPerms ) ) = :requiredPermsSize" + NL; + } + } + public String getParameterReplacedQuery(boolean countQuery) { String query = getQueryString(countQuery);
commit f7fea2d432ad92b49574387af1964245acd490aa Author: John Mazzitelli mazz@redhat.com Date: Fri Jul 26 17:01:56 2013 -0400
add createInitialBundleVersion APIs
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerRemote.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerRemote.java index 8b4dae6..cffe363 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerRemote.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerRemote.java @@ -223,6 +223,9 @@ public interface BundleManagerRemote { * bundle the bundle will be implicitly created. The bundle type is discovered by the bundle server * plugin that can parse the recipe. * </p> + * If this bundle version is the initial version of a new bundle that needs to be created, the subject must + * have Global.VIEW_BUNDLES because the new bundle will not be associated with any bundle group. + * </p> * Required Permissions: Either: * - Global.CREATE_BUNDLES and Global.VIEW_BUNDLES * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG @@ -236,12 +239,35 @@ public interface BundleManagerRemote { BundleVersion createBundleVersionViaRecipe(Subject subject, String recipe) throws Exception;
/** + * Like #createBundleVersionViaRecipe except this method will assume this is a new bundle and is responsible + * for creating the bundle as well as the bundle version. The caller can indicate which bundle group the new bundle + * should be assigned to. + * If bundleGroupId is 0, then the new bundle will not be associated with any bundle group - this is only + * allowed if the caller has the permission Global.VIEW_BUNDLES. + * </p> + * Required Permissions: Either: + * - Global.CREATE_BUNDLES and Global.VIEW_BUNDLES + * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG + * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG + * + * @param subject user that must have proper permissions + * @param bundleGroupId identifies the bundle group that the new bundle will be associated with; 0 if no group + * @param recipe the recipe that defines the bundle version to be created + * @return the persisted BundleVersion with alot of the internal relationships filled in to help the caller + * understand all that this method did. + */ + BundleVersion createInitialBundleVersionViaRecipe(Subject subject, int bundleGroupId, String recipe) throws Exception; + + /** * Creates a bundle version based on a Bundle Distribution file. Typically a zip file, the bundle distribution * contains the recipe for a supported bundle type, along with 0, 1 or more bundle files that will be associated * with the bundle version. The recipe specifies the bundle name, version, version name and version description. * If this is the initial version for the named bundle the bundle will be implicitly created. The bundle type * is discovered by inspecting the distribution file. * </p> + * If this bundle version is the initial version of a new bundle that needs to be created, the subject must + * have Global.VIEW_BUNDLES because the new bundle will not be associated with any bundle group. + * </p> * Required Permissions: Either: * - Global.CREATE_BUNDLES and Global.VIEW_BUNDLES * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG @@ -255,12 +281,36 @@ public interface BundleManagerRemote { BundleVersion createBundleVersionViaFile(Subject subject, File distributionFile) throws Exception;
/** + * Like #createBundleVersionViaFile except this method will assume this is a new bundle and is responsible + * for creating the bundle as well as the bundle version. The caller can indicate which bundle group the new bundle + * should be assigned to. + * If bundleGroupId is 0, then the new bundle will not be associated with any bundle group - this is only + * allowed if the caller has the permission Global.VIEW_BUNDLES. + * </p> + * Required Permissions: Either: + * - Global.CREATE_BUNDLES and Global.VIEW_BUNDLES + * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG + * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG + * + * @param subject user that must have proper permissions + * @param bundleGroupId identifies the bundle group that the new bundle will be associated with; 0 if no group + * @param distributionFile a local Bundle Distribution file. It must be read accessible by the RHQ server process. + * @return the persisted BundleVersion with alot of the internal relationships filled in to help the caller + * understand all that this method did. Bundle files specifically are returned. + * @throws Exception + */ + BundleVersion createInitialBundleVersionViaFile(Subject subject, int bundleGroupId, File distributionFile) throws Exception; + + /** * Creates a bundle version based on the actual bytes of a Bundle Distribution file. This is essentially * the same as {@link #createBundleVersionViaFile(Subject, File)} but the caller is providing the actual * bytes of the file as opposed to the file itself. * WARNING: obviously, this requires the entire distribution file to have been loaded fully in memory. * For very large distribution files, this could cause OutOfMemoryErrors. * </p> + * If this bundle version is the initial version of a new bundle that needs to be created, the subject must + * have Global.VIEW_BUNDLES because the new bundle will not be associated with any bundle group. + * </p> * Required Permissions: Either: * - Global.CREATE_BUNDLES and Global.VIEW_BUNDLES * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG @@ -274,6 +324,27 @@ public interface BundleManagerRemote { BundleVersion createBundleVersionViaByteArray(Subject subject, byte[] fileBytes) throws Exception;
/** + * Like #createBundleVersionViaByteArray except this method will assume this is a new bundle and is responsible + * for creating the bundle as well as the bundle version. The caller can indicate which bundle group the new bundle + * should be assigned to. + * If bundleGroupId is 0, then the new bundle will not be associated with any bundle group - this is only + * allowed if the caller has the permission Global.VIEW_BUNDLES. + * </p> + * Required Permissions: Either: + * - Global.CREATE_BUNDLES and Global.VIEW_BUNDLES + * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG + * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG + * + * @param subject user that must have proper permissions + * @param bundleGroupId identifies the bundle group that the new bundle will be associated with; 0 if no group + * @param fileBytes the file bits that make up the entire bundle distribution file + * @return the persisted BundleVersion with a lot of the internal relationships filled in to help the caller + * understand all that this method did. Bundle files specifically are returned. + * @throws Exception + */ + BundleVersion createInitialBundleVersionViaByteArray(Subject subject, int bundleGroupId, byte[] fileBytes) throws Exception; + + /** * Creates a bundle version based on a Bundle Distribution file. Typically a zip file, the bundle distribution * contains the recipe for a supported bundle type, along with 0, 1 or more bundle files that will be associated * with the bundle version. The recipe specifies the bundle name, version, version name and version description. @@ -282,6 +353,9 @@ public interface BundleManagerRemote { * <br/></br> * Note, if the file is local it is more efficient to use {@link #createBundleVersionViaFile(Subject,File)}. * </p> + * If this bundle version is the initial version of a new bundle that needs to be created, the subject must + * have Global.VIEW_BUNDLES because the new bundle will not be associated with any bundle group. + * </p> * Required Permissions: Either: * - Global.CREATE_BUNDLES and Global.VIEW_BUNDLES * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG @@ -297,9 +371,34 @@ public interface BundleManagerRemote { BundleVersion createBundleVersionViaURL(Subject subject, String distributionFileUrl) throws Exception;
/** + * Like #createBundleVersionViaURL except this method will assume this is a new bundle and is responsible + * for creating the bundle as well as the bundle version. The caller can indicate which bundle group the new bundle + * should be assigned to. + * If bundleGroupId is 0, then the new bundle will not be associated with any bundle group - this is only + * allowed if the caller has the permission Global.VIEW_BUNDLES. + * </p> + * Required Permissions: Either: + * - Global.CREATE_BUNDLES and Global.VIEW_BUNDLES + * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG + * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG + * + * @param subject user that must have proper permissions + * @param bundleGroupId identifies the bundle group that the new bundle will be associated with; 0 if no group + * @param distributionFileUrl a URL String to the Bundle Distribution file. It must be live, resolvable and read accessible + * by the RHQ server process. + * @return the persisted BundleVersion with a lot of the internal relationships filled in to help the caller + * understand all that this method did. Bundle files specifically are returned. + * @throws Exception + */ + BundleVersion createInitialBundleVersionViaURL(Subject subject, int bundleGroupId, String distributionFileUrl) throws Exception; + + /** * A version of the {@link #createBundleVersionViaURL(org.rhq.core.domain.auth.Subject, String)} that accepts a * username and password for basic authentication on the HTTP URLs. * </p> + * If this bundle version is the initial version of a new bundle that needs to be created, the subject must + * have Global.VIEW_BUNDLES because the new bundle will not be associated with any bundle group. + * </p> * Required Permissions: Either: * - Global.CREATE_BUNDLES and Global.VIEW_BUNDLES * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG @@ -311,6 +410,22 @@ public interface BundleManagerRemote { String password) throws Exception;
/** + * Like #createBundleVersionViaURL except this method will assume this is a new bundle and is responsible + * for creating the bundle as well as the bundle version. The caller can indicate which bundle group the new bundle + * should be assigned to. + * If bundleGroupId is 0, then the new bundle will not be associated with any bundle group - this is only + * allowed if the caller has the permission Global.VIEW_BUNDLES. + * </p> + * Required Permissions: Either: + * - Global.CREATE_BUNDLES and Global.VIEW_BUNDLES + * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG + * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG + * + * @see #createBundleVersionViaURL(org.rhq.core.domain.auth.Subject, String) + */ + BundleVersion createInitialBundleVersionViaURL(Subject subject, int bundleGroupId, String distributionFileUrl, String username, String password) throws Exception; + + /** * Remove everything associated with the Bundles with the exception of files laid down by related deployments. * Deployed files are left as is on the deployment platforms but the bundle mechanism will no longer track * the deployment of all bundles that have been deleted.
commit d01c43b697ef67f598016824d6cd47b5de2ac83e Author: John Sanda jsanda@redhat.com Date: Fri Jul 26 15:42:46 2013 -0400
add storage node shutdown operation that uses pid file
There can be problems with getting the actual, current pid from Sigar, so the storage node plugin will first attempt to get the pid from the pid file which should exist on disk. If that fails, then we will attempt to get the pid from Sigar.
This implementation for shutdown does not work for Windows so need to revist it to add in Windows support. We may even consider delegating to rhqtcl.
diff --git a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java index 3e55a93..d648ad8 100644 --- a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java +++ b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java @@ -246,8 +246,6 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent
long pid = process.getPid(); try { - getEmsConnection().close(); - process.kill("KILL");
Configuration pluginConfig = getResourceContext().getPluginConfiguration(); @@ -266,7 +264,7 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent } }
- private void waitForNodeToGoDown() throws InterruptedException { + protected void waitForNodeToGoDown() throws InterruptedException { if (OperatingSystem.getInstance().getName().equals(OperatingSystem.NAME_MACOSX)) { // See this thread on VMWare forum: http://communities.vmware.com/message/2187972#2187972 // Unfortunately there is no work around for this failure on Mac OSX so the method will silently return on diff --git a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java index 125f4d2..5933093 100644 --- a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java +++ b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java @@ -28,6 +28,7 @@ package org.rhq.plugins.storage; import java.io.File; import java.io.FileInputStream; import java.io.FileNotFoundException; +import java.io.FileReader; import java.io.FileWriter; import java.io.IOException; import java.io.StringReader; @@ -39,6 +40,7 @@ import java.util.Set;
import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.hyperic.sigar.SigarException; import org.mc4j.ems.connection.EmsConnection; import org.mc4j.ems.connection.bean.EmsBean; import org.mc4j.ems.connection.bean.attribute.EmsAttribute; @@ -54,9 +56,11 @@ import org.rhq.core.domain.configuration.PropertyMap; import org.rhq.core.domain.configuration.PropertySimple; import org.rhq.core.pluginapi.configuration.ConfigurationFacet; import org.rhq.core.pluginapi.configuration.ConfigurationUpdateReport; +import org.rhq.core.pluginapi.inventory.ProcessScanResult; import org.rhq.core.pluginapi.inventory.ResourceContext; import org.rhq.core.pluginapi.operation.OperationFacet; import org.rhq.core.pluginapi.operation.OperationResult; +import org.rhq.core.system.ProcessInfo; import org.rhq.core.util.StringUtil; import org.rhq.core.util.exception.ThrowableUtil; import org.rhq.core.util.file.FileUtil; @@ -105,11 +109,80 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper return updateKnownNodes(parameters); } else if (name.equals("prepareForBootstrap")) { return prepareForBootstrap(parameters); + } else if (name.equals("shutdown")) { + return shutdownStorageNode(); } else { return super.invokeOperation(name, parameters); } }
+ private OperationResult shutdownStorageNode() { + OperationResult result = new OperationResult(); + File binDir = new File(getBasedir(), "bin"); + File pidFile = new File(binDir, "cassandra.pid"); + + try { + if (pidFile.exists()) { + long pid = readPidFile(pidFile); + log.info("Shutting down storage node with pid " + pid); + ProcessInfo process = findProcessInfo(pid); + if (process != null) { + try { + process.kill("KILL"); + waitForNodeToGoDown(); + pidFile.delete(); + result.setSimpleResult("Successfully storage node with pid " + pid); + } catch (SigarException e) { + log.error("Failed to delete storage node with pid " + process.getPid(), e); + result.setErrorMessage("Failed to delete storage node with pid " + pid + ": " + + ThrowableUtil.getAllMessages(e)); + } + } else { + log.warn("Could not find process info for pid " + pid); + result = shutdownUsingNativeProcessInfo(); + } + + } else { + log.warn("Did not find pid file " + pidFile + ". It should not be modified, deleted, or moved."); + result = shutdownUsingNativeProcessInfo(); + } + } catch (FileNotFoundException e) { + log.error("Could not read pid file " + pidFile, e); + result.setErrorMessage("Could not read pid file " + pidFile + ": " + ThrowableUtil.getAllMessages(e)); + } catch (InterruptedException e) { + log.warn("The shutdown operation was cancelled or interrupted. This interruption occurred while trying " + + "to verify that the storage node process has exited."); + result.setErrorMessage("The operation was cancelled or interrupted while trying to verify that the " + + "storage node process has exited."); + } + return result; + } + + private long readPidFile(File pidFile) throws FileNotFoundException { + return Long.parseLong(StreamUtil.slurp(new FileReader(pidFile))); + } + + private ProcessInfo findProcessInfo(long pid) { + List<ProcessScanResult> scanResults = getResourceContext().getNativeProcessesForType(); + + for (ProcessScanResult scanResult : scanResults) { + if (scanResult.getProcessInfo().getPid() == pid) { + return scanResult.getProcessInfo(); + } + } + return null; + } + + private OperationResult shutdownUsingNativeProcessInfo() throws InterruptedException { + log.warn("Could not obtain process info from pid file"); + log.info("Obtaining process info from the system to perform the shutdown"); + + OperationResult result = shutdownNode(); + waitForNodeToGoDown(); + + return result; + } + private OperationResult updateConfiguration(Configuration params) { boolean restartIsRequired = false;
@@ -237,7 +310,7 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper OperationResult result = new OperationResult();
log.info("Stopping storage node"); - OperationResult stopNodeResult = stopNode(); + OperationResult stopNodeResult = shutdownStorageNode(); if (stopNodeResult.getErrorMessage() != null) { log.error("Failed to stop storage node " + this + " Cannot prepare the node for bootstrap which means " + "that the storage node cannot join the cluster. Make sure the storage node is not running and retry " + diff --git a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java index f0744a4..eb4d545 100644 --- a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java +++ b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java @@ -231,6 +231,26 @@ public class StorageNodeComponentITest { assertNodeIsUp("Expected " + storageNode + " to be up after restarting it."); }
+ @Test(dependsOnMethods = "restartStorageNode") + public void prepareForBootstrap() { + Configuration params = Configuration.builder().addSimple("cqlPort", 9242).addSimple("gossipPort", 7200) + .openList("storageNodeIPAddresses", "storageNodeIPAddresse").addSimples("127.0.0.1", "127.0.0.2") + .closeList().build(); + + OperationManager operationManager = PluginContainer.getInstance().getOperationManager(); + OperationServicesAdapter operationsService = new OperationServicesAdapter(operationManager); + + long timeout = 1000 * 60; + OperationContextImpl operationContext = new OperationContextImpl(storageNode.getId()); + OperationServicesResult result = operationsService.invokeOperation(operationContext, "prepareForBootstrap", + params, timeout); + + assertEquals(result.getResultCode(), OperationServicesResultCode.SUCCESS, "The operation failed: " + + result.getErrorStackTrace()); + + assertNodeIsUp("Expected " + storageNode + " to be up after the prepareForBootstrap operation completes."); + } + private void assertNodeIsUp(String msg) { executeAvailabilityScan();
@@ -251,7 +271,6 @@ public class StorageNodeComponentITest {
private Availability getAvailability() { InventoryManager inventoryManager = PluginContainer.getInstance().getInventoryManager(); -// return inventoryManager.getAvailabilityIfKnown(storageNode); return inventoryManager.getCurrentAvailability(storageNode); }
commit a9c76478054bb558012e2706a6d64435f9588d7e Author: John Mazzitelli mazz@redhat.com Date: Fri Jul 26 15:45:05 2013 -0400
add some javadoc and new apis for bundle groups
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerRemote.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerRemote.java index 0ffdc1a..8b4dae6 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerRemote.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerRemote.java @@ -72,7 +72,12 @@ public interface BundleManagerRemote { /** * Adds a BundleFile to the BundleVersion and implicitly creates the backing PackageVersion. If the PackageVersion * already exists use {@link #addBundleFileViaPackageVersion(Subject, int, String, int)} - * + * </p> + * Required Permissions: Either: + * - Global.CREATE_BUNDLES and Global.VIEW_BUNDLES + * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG + * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG + * * @param subject user that must have proper permissions * @param bundleVersionId id of the BundleVersion incorporating this BundleFile * @param name name of the BundleFile (and the resulting Package) @@ -89,7 +94,12 @@ public interface BundleManagerRemote { * A convenience method taking a byte array as opposed to a stream for the file bits. * WARNING: obviously, this requires the entire bundle file to have been loaded fully in memory. * For very large files, this could cause OutOfMemoryErrors. - * + * </p> + * Required Permissions: Either: + * - Global.CREATE_BUNDLES and Global.VIEW_BUNDLES + * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG + * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG + * * @see {@link #addBundleFile(Subject, int, String, String, Architecture, InputStream)} */ BundleFile addBundleFileViaByteArray(Subject subject, int bundleVersionId, String name, String version, @@ -97,7 +107,12 @@ public interface BundleManagerRemote {
/** * A convenience method taking a URL String whose content will be streamed to the server and used for the file bits. - * + * </p> + * Required Permissions: Either: + * - Global.CREATE_BUNDLES and Global.VIEW_BUNDLES + * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG + * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG + * * @see #addBundleFile(Subject, int, String, String, Architecture, InputStream) */ BundleFile addBundleFileViaURL(Subject subject, int bundleVersionId, String name, String version, @@ -106,6 +121,11 @@ public interface BundleManagerRemote { /** * A variant of {@link #addBundleFileViaURL(Subject, int, String, String, Architecture, String)} supporting the * HTTP basic authentication. + * </p> + * Required Permissions: Either: + * - Global.CREATE_BUNDLES and Global.VIEW_BUNDLES + * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG + * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG * * @see #addBundleFileViaURL(Subject, int, String, String, Architecture, String) */ @@ -114,19 +134,40 @@ public interface BundleManagerRemote {
/** * A convenience method taking an existing PackageVersion as opposed to a stream for the file bits. - * + * </p> + * Required Permissions: Either: + * - Global.CREATE_BUNDLES and Global.VIEW_BUNDLES + * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG + * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG + * * @see {@link #addBundleFile(Subject, int, String, String, Architecture, InputStream)} */ BundleFile addBundleFileViaPackageVersion(Subject subject, int bundleVersionId, String name, int packageVersionId) throws Exception;
- //void assignBundlesToBundleGroup - + /** + * Assign the specified bundles to the specified bundle group. + * </p> + * Requires VIEW permission for the relevant bundle and either: + * - Global.CREATE_BUNDLE + * - BundleGroup.CREATE_BUNDLES_IN_GROUP or BundleGroup.ASSIGN_BUNDLES_TO_GROUP for the relevant bundle group + * + * @param subject + * @param bundleGroupId + * @param bundleIds + */ + void assignBundlesToBundleGroup(Subject subject, int bundleGroupId, int[] bundleIds); + /** * Create a new bundle deployment. Note that bundle deployment names are generated by this * call. This provides useful, uniform naming for display. An optional, custom description * can be added. This call defines a deployment. The defined deployment can then be * scheduled in a separate call. + * </p> + * Required Permissions: Either: + * - Global.DEPLOY_BUNDLES and a view of the relevant bundle and a view of the relevant resource group (may involve multiple roles) + * - Resource.DEPLOY_BUNDLES_TO_GROUP and a view of the relevant bundle and a view of the relevant resource group (may involve multiple roles) + * * @param subject user that must have proper permissions * @param bundleVersionId the BundleVersion being deployed by this deployment * @param bundleDestinationId the BundleDestination for the deployment @@ -141,7 +182,11 @@ public interface BundleManagerRemote {
/** * Creates a bundle destination that describes a target for the bundle deployments. - * + * </p> + * Required Permissions: Either: + * - Global.DEPLOY_BUNDLES and a view of the relevant bundle and a view of the relevant resource group (may involve multiple roles) + * - Resource.DEPLOY_BUNDLES_TO_GROUP and a view of the relevant bundle and a view of the relevant resource group (may involve multiple roles) + * * @param subject user must have MANAGE_INVENTORY permission * @param bundleId the Bundle to be deployed to this Destination * @param name a name for this destination. not null or empty @@ -161,7 +206,8 @@ public interface BundleManagerRemote { /** * Create a new bundle group. * <p/> - * Requires Global.MANAGE_BUNDLE_GROUP permission. + * Require Permissions: + * - Global.MANAGE_BUNDLE_GROUPS * * @param subject user that must have proper permissions * @param name the unique bundle group name @@ -176,7 +222,12 @@ public interface BundleManagerRemote { * version, version name and version description. If this is the initial version for the named * bundle the bundle will be implicitly created. The bundle type is discovered by the bundle server * plugin that can parse the recipe. - * + * </p> + * Required Permissions: Either: + * - Global.CREATE_BUNDLES and Global.VIEW_BUNDLES + * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG + * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG + * * @param subject user that must have proper permissions * @param recipe the recipe that defines the bundle version to be created * @return the persisted BundleVersion with alot of the internal relationships filled in to help the caller @@ -190,7 +241,12 @@ public interface BundleManagerRemote { * with the bundle version. The recipe specifies the bundle name, version, version name and version description. * If this is the initial version for the named bundle the bundle will be implicitly created. The bundle type * is discovered by inspecting the distribution file. - * + * </p> + * Required Permissions: Either: + * - Global.CREATE_BUNDLES and Global.VIEW_BUNDLES + * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG + * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG + * * @param subject user that must have proper permissions * @param distributionFile a local Bundle Distribution file. It must be read accessible by the RHQ server process. * @return the persisted BundleVersion with alot of the internal relationships filled in to help the caller @@ -204,6 +260,11 @@ public interface BundleManagerRemote { * bytes of the file as opposed to the file itself. * WARNING: obviously, this requires the entire distribution file to have been loaded fully in memory. * For very large distribution files, this could cause OutOfMemoryErrors. + * </p> + * Required Permissions: Either: + * - Global.CREATE_BUNDLES and Global.VIEW_BUNDLES + * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG + * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG * * @param subject user that must have proper permissions * @param fileBytes the file bits that make up the entire bundle distribution file @@ -220,7 +281,12 @@ public interface BundleManagerRemote { * is discovered by inspecting the distribution file. * <br/></br> * Note, if the file is local it is more efficient to use {@link #createBundleVersionViaFile(Subject,File)}. - * + * </p> + * Required Permissions: Either: + * - Global.CREATE_BUNDLES and Global.VIEW_BUNDLES + * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG + * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG + * * @param subject user that must have proper permissions * @param distributionFileUrl a URL String to the Bundle Distribution file. It must be live, resolvable and read accessible * by the RHQ server process. @@ -233,6 +299,11 @@ public interface BundleManagerRemote { /** * A version of the {@link #createBundleVersionViaURL(org.rhq.core.domain.auth.Subject, String)} that accepts a * username and password for basic authentication on the HTTP URLs. + * </p> + * Required Permissions: Either: + * - Global.CREATE_BUNDLES and Global.VIEW_BUNDLES + * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG + * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG * * @see #createBundleVersionViaURL(org.rhq.core.domain.auth.Subject, String) */ @@ -243,7 +314,13 @@ public interface BundleManagerRemote { * Remove everything associated with the Bundles with the exception of files laid down by related deployments. * Deployed files are left as is on the deployment platforms but the bundle mechanism will no longer track * the deployment of all bundles that have been deleted. - * + * The bundles that are deleted will be removed from all bundle groups that it was a member of. + * </p> + * Required Permissions: Either: + * - Global.DELETE_BUNDLES and Global.VIEW_BUNDLES + * - Global.DELETE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG + * - BundleGroup.DELETE_BUNDLES_FROM_GROUP for bundle group BG and the relevant bundle is assigned to BG + * * @param subject user that must have proper permissions * @param bundleIds IDs of all bundles to be deleted * @throws Exception if any part of the removal fails. @@ -254,7 +331,13 @@ public interface BundleManagerRemote { * Remove everything associated with the Bundle with the exception of files laid down by related deployments. * Deployed files are left as is on the deployment platforms but the bundle mechanism will no longer track * the deployment. - * + * The bundles that are deleted will be removed from all bundle groups that it was a member of. + * </p> + * Required Permissions: Either: + * - Global.DELETE_BUNDLES and Global.VIEW_BUNDLES + * - Global.DELETE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG + * - BundleGroup.DELETE_BUNDLES_FROM_GROUP for bundle group BG and the relevant bundle is assigned to BG + * * @param subject user that must have proper permissions * @param bundleId the id of the bundle to remove * @throws Exception if any part of the removal fails. @@ -264,19 +347,26 @@ public interface BundleManagerRemote { /** * Delete a bundle group. Any currently assigned bundles will be removed but are not deleted. * <p/> - * Requires Global.MANAGE_BUNDLE_GROUP permission. + * Required Permissions: + * - Global.MANAGE_BUNDLE_GROUPS * * @param subject user that must have proper permissions - * @param id the bundle group id + * @param ids the bundle group id * @throws Exception */ - void deleteBundleGroups(Subject subject, int... id) throws Exception; + void deleteBundleGroups(Subject subject, int[] ids) throws Exception;
/** * Remove everything associated with the BundleVersion with the exception of files laid down by related deployments. * Deployed files are left as is on the deployment platforms but the bundle mechanism will no longer track * the deployment. - * + * The deleted bundle version will no longer exist in any bundle group. + * </p> + * Required Permissions: Either: + * - Global.DELETE_BUNDLES and Global.VIEW_BUNDLES + * - Global.DELETE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG + * - BundleGroup.DELETE_BUNDLES_FROM_GROUP for bundle group BG and the relevant bundle is assigned to BG + * * @param subject user that must have proper permissions * @param bundleVersionId the id of the bundle version to remove * @param deleteBundleIfEmpty if <code>true</code> and if this method deletes the last bundle version for its @@ -310,7 +400,13 @@ public interface BundleManagerRemote { /** * Determine the files required for a BundleVersion and return all of the filenames or optionally, just those * that lack BundleFiles for the BundleVersion. The recipe may be parsed as part of this call. - * + * This is needed as part of the bundle creation workflow, hence why creation permissions are needed. + * </p> + * Required Permissions: Either: + * - Global.CREATE_BUNDLES and Global.VIEW_BUNDLES + * - Global.CREATE_BUNDLES and BundleGroup.VIEW_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG + * - BundleGroup.CREATE_BUNDLES_IN_GROUP for bundle group BG and the relevant bundle is assigned to BG + * * @param subject user that must have proper permissions * @param bundleVersionId the BundleVersion being queried * @param withoutBundleFileOnly if true omit any filenames that already have a corresponding BundleFile for @@ -322,23 +418,11 @@ public interface BundleManagerRemote { throws Exception;
/** - * Similar to {@link #getBundleVersionFilenames(Subject, int, boolean)}, this will determine the files required for a BundleVersion and return - * all of the filenames, with the values of the map being true if they already exist or false if they lack BundleFile representation - * in the BundleVersion. - * - * @param subject user that must have proper permissions - * @param bundleVersionId the BundleVersion being queried - * @return map keyed on filenames whose value indicates if a bundle file exists for the file or not - * @throws Exception - */ - /* comment back in when someone writes an adapter to support Map un/marshalling - Map<String, Boolean> getAllBundleVersionFilenames( - Subject subject, - int bundleVersionId) throws Exception; - */ - - /** * Purges the destination's live deployment content from the remote platforms. + * </p> + * Required Permissions: Either: + * - Global.DEPLOY_BUNDLES and a view of the relevant bundle and a view of the relevant resource group (may involve multiple roles) + * - Resource.DEPLOY_BUNDLES_TO_GROUP and a view of the relevant bundle and a view of the relevant resource group (may involve multiple roles) * * @param subject user that must have proper permissions * @param bundleDestinationId the ID of the destination that is to be purged of bundle content @@ -351,7 +435,11 @@ public interface BundleManagerRemote { * complete. The returned BundleDeployment can be used to track the history of the individual deployments. * <br/><br/> * TODO: Add the scheduling capability, currently it's Immediate. - * <br/> + * </p> + * Required Permissions: Either: + * - Global.DEPLOY_BUNDLES and a view of the relevant bundle and a view of the relevant resource group (may involve multiple roles) + * - Resource.DEPLOY_BUNDLES_TO_GROUP and a view of the relevant bundle and a view of the relevant resource group (may involve multiple roles) + * * @param subject user that must have proper permissions * @param bundleDeploymentId the BundleDeployment being used to guide the deployments * @param isCleanDeployment if true perform a wipe of the deploy directory prior to the deployment. If false @@ -371,7 +459,11 @@ public interface BundleManagerRemote { * call. This provides useful, uniform naming for display. An optional, custom description can be added. * <br/><br/> * TODO: Add the scheduling capability, currently it's Immediate. - * <br/> + * </p> + * Required Permissions: Either: + * - Global.DEPLOY_BUNDLES and a view of the relevant bundle and a view of the relevant resource group (may involve multiple roles) + * - Resource.DEPLOY_BUNDLES_TO_GROUP and a view of the relevant bundle and a view of the relevant resource group (may involve multiple roles) + * * @param subject user that must have proper permissions * @param deploymentDescription an optional longer description describing this deployment. If null defaults * to the description of the previous deployment. @@ -383,4 +475,16 @@ public interface BundleManagerRemote { BundleDeployment scheduleRevertBundleDeployment(Subject subject, int bundleDestinationId, String deploymentDescription, boolean isCleanDeployment) throws Exception;
+ /** + * Unassign the specified bundles from the specified bundle group. + * </p> + * Requires VIEW permission for the relevant bundles and either: + * - Global.DELETE_BUNDLE + * - BundleGroup.DELETE_BUNDLES_FROM_GROUP or BundleGroup.UNASSIGN_BUNDLES_FROM_GROUP for the relevant bundle group + * + * @param subject + * @param bundleGroupId + * @param bundleIds + */ + void unassignBundlesFromBundleGroup(Subject subject, int bundleGroupId, int[] bundleIds); }
commit 4945c1c2722ee77294860be6d99e15d5612f1220 Author: John Mazzitelli mazz@redhat.com Date: Fri Jul 26 11:20:54 2013 -0400
trivial change - first commit from intellij :}
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/authz/Permission.java b/modules/core/domain/src/main/java/org/rhq/core/domain/authz/Permission.java index 88fb948..4ca733c 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/authz/Permission.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/authz/Permission.java @@ -193,7 +193,7 @@ public enum Permission { /** * Implied - Can view bundles in the bundle group */ - VIEW_BUNDLES_IN_GROUP(Target.BUNDLE) // 25 + VIEW_BUNDLES_IN_GROUP(Target.BUNDLE) // 27
;
commit 57a64d60c88eb9b16c0a4048cd732057acbea65e Author: Jay Shaughnessy jshaughn@redhat.com Date: Fri Jul 26 10:16:34 2013 -0400
Start of remote API design/impl for BundleGroups
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerBean.java index dbecb26..ac05e20 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerBean.java @@ -257,6 +257,31 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot }
@Override + @RequiredPermission(Permission.MANAGE_BUNDLE_GROUPS) + public BundleGroup createBundleGroup(Subject subject, String name, String description) throws Exception { + if (null == name || "".equals(name.trim())) { + throw new IllegalArgumentException("Invalid bundleGroupName: " + name); + } + + BundleGroupCriteria c = new BundleGroupCriteria(); + c.addFilterName(name); + c.setStrict(true); + if (!bundleManager.findBundleGroupsByCriteria(subject, c).isEmpty()) { + throw new IllegalArgumentException("Invalid bundleGroupName, bundle group already exists with name: " + + name); + } + + // create and add the required Repo. the Repo is a detached object which helps in its eventual + // removal. + BundleGroup bg = new BundleGroup(name); + bg.setDescription(description); + + entityManager.persist(bg); + + return bg; + } + + @Override @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) @RequiredPermission(Permission.MANAGE_BUNDLE) public BundleDeployment createBundleDeploymentInNewTrans(Subject subject, int bundleVersionId, @@ -502,8 +527,8 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot RecipeParseResults results;
try { - results = BundleManagerHelper.getPluginContainer().getBundleServerPluginManager().parseRecipe( - bundleType.getName(), recipe); + results = BundleManagerHelper.getPluginContainer().getBundleServerPluginManager() + .parseRecipe(bundleType.getName(), recipe); } catch (Exception e) { // ensure that we throw a runtime exception to force a rollback throw new RuntimeException("Failed to parse recipe", e); @@ -610,8 +635,8 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot try { file = downloadFile(distributionFileUrl, username, password);
- log.debug("Copied [" + file.length() + "] bytes from [" + distributionFileUrl + "] into [" - + file.getPath() + "]"); + log.debug("Copied [" + file.length() + "] bytes from [" + distributionFileUrl + "] into [" + file.getPath() + + "]");
return createBundleVersionViaFile(subject, file); } finally { @@ -631,8 +656,7 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot } }
- private File downloadFileFromHttp(URL url, String username, String password) throws URISyntaxException, - IOException { + private File downloadFileFromHttp(URL url, String username, String password) throws URISyntaxException, IOException {
HttpParams params = new BasicHttpParams(); HttpClientParams.setRedirecting(params, true); @@ -647,9 +671,8 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot
HttpResponse response = httpClient.execute(get); if (response.getStatusLine().getStatusCode() != HttpStatus.SC_OK) { - throw new IllegalArgumentException( - "Failed to download the file from the URL [" + url + "]. The server responded: " + - response.getStatusLine().toString()); + throw new IllegalArgumentException("Failed to download the file from the URL [" + url + + "]. The server responded: " + response.getStatusLine().toString()); }
InputStream contents = response.getEntity().getContent(); @@ -842,8 +865,8 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot // Create the mapping between the Bundle's Repo and the BundleFile's PackageVersion Repo repo = bundle.getRepo(); // add the packageVersion as overlord, this allows users without MANAGE_INVENTORY permission to add bundle files - repoManager.addPackageVersionsToRepo(subjectManager.getOverlord(), repo.getId(), new int[] { packageVersion - .getId() }); + repoManager.addPackageVersionsToRepo(subjectManager.getOverlord(), repo.getId(), + new int[] { packageVersion.getId() });
// Classify the Package with the Bundle name in order to distinguish it from the same package name for // a different bundle. @@ -904,7 +927,6 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot } }
- @Override @RequiredPermission(Permission.MANAGE_BUNDLE) public BundleFile addBundleFileViaPackageVersion(Subject subject, int bundleVersionId, String name, @@ -1126,8 +1148,8 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot BundleDeployment revertDeployment = bundleManager.createBundleDeploymentInNewTrans(subject, prevDeployment .getBundleVersion().getId(), bundleDestinationId, name, desc, config);
- return scheduleBundleDeploymentImpl(subject, revertDeployment.getId(), isCleanDeployment, true, prevDeployment - .getReplacedBundleDeploymentId()); + return scheduleBundleDeploymentImpl(subject, revertDeployment.getId(), isCleanDeployment, true, + prevDeployment.getReplacedBundleDeploymentId()); }
// revertedDeploymentReplacedDeployment is only meaningful if isRevert is true @@ -1201,8 +1223,8 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot
// The BundleResourceDeployment record must exist in the db before the agent request because the agent may try // to add History to it during immediate deployments. So, create and persist it (requires a new trans). - BundleResourceDeployment resourceDeployment = bundleManager.createBundleResourceDeployment(subject, deployment - .getId(), bundleTargetResourceId); + BundleResourceDeployment resourceDeployment = bundleManager.createBundleResourceDeployment(subject, + deployment.getId(), bundleTargetResourceId);
if (null != bundleTarget.getResourceType().getResourceTypeBundleConfiguration()) {
@@ -1225,8 +1247,8 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot bundleManager.setBundleResourceDeploymentStatus(subject, resourceDeployment.getId(), BundleDeploymentStatus.FAILURE); history = new BundleResourceDeploymentHistory(subject.getName(), AUDIT_ACTION_DEPLOYMENT, - deployment.getName(), null, BundleResourceDeploymentHistory.Status.FAILURE, response - .getErrorMessage(), null); + deployment.getName(), null, BundleResourceDeploymentHistory.Status.FAILURE, + response.getErrorMessage(), null); bundleManager.addBundleResourceDeploymentHistory(subject, resourceDeployment.getId(), history); } } catch (Throwable t) { @@ -1494,9 +1516,9 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot */ @Override public PageList<BundleDeployment> findBundleDeploymentsByCriteriaWithDestinationFilter(Subject subject, - BundleDeploymentCriteria criteria) { + BundleDeploymentCriteria criteria) {
- PageList<BundleDeployment> deployments = findBundleDeploymentsByCriteria(subject,criteria); + PageList<BundleDeployment> deployments = findBundleDeploymentsByCriteria(subject, criteria); if (authorizationManager.isInventoryManager(subject)) return deployments;
@@ -1507,8 +1529,8 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot int bundleId = deployment.getBundleVersion().getBundle().getId(); BundleDestinationCriteria destinationCriteria = new BundleDestinationCriteria(); destinationCriteria.addFilterBundleId(bundleId); - List<BundleDestination> destinations = findBundleDestinationsByCriteria(subject,destinationCriteria); - if (destinationsContains(destinations,deployment.getDestination())) + List<BundleDestination> destinations = findBundleDestinationsByCriteria(subject, destinationCriteria); + if (destinationsContains(destinations, deployment.getDestination())) resultingDeployments.add(deployment); } return resultingDeployments; @@ -1520,8 +1542,10 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot CriteriaQueryGenerator generator = new CriteriaQueryGenerator(subject, criteria); // Filter by destinations that are viewable if (!authorizationManager.isInventoryManager(subject)) { - generator.setAuthorizationResourceFragment(CriteriaQueryGenerator.AuthorizationTokenType.GROUP,subject.getId()); - } CriteriaQueryRunner<BundleDestination> queryRunner = new CriteriaQueryRunner<BundleDestination>(criteria, + generator.setAuthorizationResourceFragment(CriteriaQueryGenerator.AuthorizationTokenType.GROUP, + subject.getId()); + } + CriteriaQueryRunner<BundleDestination> queryRunner = new CriteriaQueryRunner<BundleDestination>(criteria, generator, entityManager); return queryRunner.execute(); } @@ -1568,26 +1592,27 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot */ @Override public PageList<BundleVersion> findBundleVersionsByCriteriaWithDestinationFilter(Subject subject, - BundleVersionCriteria criteria) { + BundleVersionCriteria criteria) {
- PageList<BundleVersion> versions = findBundleVersionsByCriteria(subject,criteria); + PageList<BundleVersion> versions = findBundleVersionsByCriteria(subject, criteria); if (authorizationManager.isInventoryManager(subject)) { return versions; } // Not inv manager -> restrict visible deployments by visible destinations
- for (BundleVersion version:versions) { + for (BundleVersion version : versions) {
Bundle bundle = version.getBundle(); BundleDestinationCriteria destinationCriteria = new BundleDestinationCriteria(); destinationCriteria.addFilterBundleId(bundle.getId()); destinationCriteria.clearPaging();//disable paging as the code assumes all the results will be returned.
- List<BundleDestination> destinations = findBundleDestinationsByCriteria(subject,destinationCriteria); - List<BundleDeployment> resultingDeployments = new ArrayList<BundleDeployment>(version.getBundleDeployments().size()); + List<BundleDestination> destinations = findBundleDestinationsByCriteria(subject, destinationCriteria); + List<BundleDeployment> resultingDeployments = new ArrayList<BundleDeployment>(version + .getBundleDeployments().size()); // We now have visible destinations - go over the resultingDeployments and only include the ones with vis. destinations for (BundleDeployment deployment : version.getBundleDeployments()) { - if (destinationsContains(destinations,deployment.getDestination())) + if (destinationsContains(destinations, deployment.getDestination())) resultingDeployments.add(deployment); }
@@ -1639,18 +1664,18 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot @Override public PageList<Bundle> findBundlesByCriteriaWithDestinationFilter(Subject subject, BundleCriteria criteria) { // First get the bundles - PageList<Bundle> bundles = findBundlesByCriteria(subject,criteria); + PageList<Bundle> bundles = findBundlesByCriteria(subject, criteria); if (authorizationManager.isInventoryManager(subject)) { return bundles; } // Not inv manager -> restrict visible destinations - PageList<Bundle> result = new PageList<Bundle>(bundles.size(),bundles.getPageControl()); + PageList<Bundle> result = new PageList<Bundle>(bundles.size(), bundles.getPageControl()); for (Bundle bundle : bundles.getValues()) { // TODO clone the bundle and return the modified clones BundleDestinationCriteria destinationCriteria = new BundleDestinationCriteria(); destinationCriteria.addFilterBundleId(bundle.getId()); destinationCriteria.clearPaging();//disable paging as the code assumes all the results will be returned.
- List<BundleDestination> destinations = findBundleDestinationsByCriteria(subject,destinationCriteria); + List<BundleDestination> destinations = findBundleDestinationsByCriteria(subject, destinationCriteria); bundle.setDestinations(destinations); }
@@ -1755,6 +1780,27 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot }
@Override + @RequiredPermission(Permission.MANAGE_BUNDLE_GROUPS) + public void deleteBundleGroups(Subject subject, int... bundleGroupIds) throws Exception { + + for (int bundleGroupId : bundleGroupIds) { + BundleGroup bundleGroup = this.entityManager.find(BundleGroup.class, bundleGroupIds); + if (null == bundleGroup) { + return; + } + + // unassign any bundles assigned to the bundle group + for (Bundle b : bundleGroup.getBundles()) { + bundleGroup.removeBundle(b); + } + bundleGroup = entityManager.merge(bundleGroup); + + // now remove the bundle group + entityManager.remove(bundleGroup); + } + } + + @Override @RequiredPermission(Permission.MANAGE_BUNDLE) public void deleteBundleVersion(Subject subject, int bundleVersionId, boolean deleteBundleIfEmpty) throws Exception { BundleVersion bundleVersion = this.entityManager.find(BundleVersion.class, bundleVersionId); diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerRemote.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerRemote.java index b72d6b5..0ffdc1a 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerRemote.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerRemote.java @@ -120,6 +120,8 @@ public interface BundleManagerRemote { BundleFile addBundleFileViaPackageVersion(Subject subject, int bundleVersionId, String name, int packageVersionId) throws Exception;
+ //void assignBundlesToBundleGroup + /** * Create a new bundle deployment. Note that bundle deployment names are generated by this * call. This provides useful, uniform naming for display. An optional, custom description @@ -157,6 +159,19 @@ public interface BundleManagerRemote { String destBaseDirName, String deployDir, Integer groupId) throws Exception;
/** + * Create a new bundle group. + * <p/> + * Requires Global.MANAGE_BUNDLE_GROUP permission. + * + * @param subject user that must have proper permissions + * @param name the unique bundle group name + * @param description an optional description + * @return the persisted BundleGroup + * @throws Exception + */ + BundleGroup createBundleGroup(Subject subject, String name, String description) throws Exception; + + /** * Creates a bundle version based on single recipe string. The recipe specifies the bundle name, * version, version name and version description. If this is the initial version for the named * bundle the bundle will be implicitly created. The bundle type is discovered by the bundle server @@ -247,6 +262,17 @@ public interface BundleManagerRemote { void deleteBundle(Subject subject, int bundleId) throws Exception;
/** + * Delete a bundle group. Any currently assigned bundles will be removed but are not deleted. + * <p/> + * Requires Global.MANAGE_BUNDLE_GROUP permission. + * + * @param subject user that must have proper permissions + * @param id the bundle group id + * @throws Exception + */ + void deleteBundleGroups(Subject subject, int... id) throws Exception; + + /** * Remove everything associated with the BundleVersion with the exception of files laid down by related deployments. * Deployed files are left as is on the deployment platforms but the bundle mechanism will no longer track * the deployment.
commit 2e33f605b32204385e9dc7c4ea9f2f3ac2e71850 Author: Heiko W. Rupp hwr@redhat.com Date: Fri Jul 26 14:14:15 2013 +0200
Put the real version in here, as otherwise we have a circular dependency.
diff --git a/modules/enterprise/gui/coregui/pom.xml b/modules/enterprise/gui/coregui/pom.xml index 353f4d9..a227ed4 100644 --- a/modules/enterprise/gui/coregui/pom.xml +++ b/modules/enterprise/gui/coregui/pom.xml @@ -19,7 +19,7 @@
<properties> <!-- dependency versions --> - <gwt.version>${gwt.version}</gwt.version> + <gwt.version>2.5.0</gwt.version> <smartgwt.version>3.0</smartgwt.version>
<!-- If this is too much memory to allocate to your gwt:debug process then override this property in
commit 9fbd3da4446f026dd4095f23345a5acaa0b956c0 Author: John Sanda jsanda@redhat.com Date: Thu Jul 25 22:08:20 2013 -0400
check cluster status using cql driver instead of using jmx
The server had been polling storage cluster nodes with a jmx call to make sure that at least one node is up for client requests. If no nodes are up, then the server goes into maintenance mode.
That check is no longer done with a quartz job using jmx. It is now performed with event notifications we receive from the cql driver. This means that the server no longer has to care about storage node jmx ports. This has a couple benefits. First, it reduces the complexity involved with changing the jmx port. Secondly, since the server no longer is making jmx calls to storage nodes, the port can be locked down to localhost access.
Any server side code that needs access to the driver's Session should instead use the new StorageSession class. StorageSession provides the same API, and it also encapsulates the event handling/propagation logic that is necesary for monitoring the cluster availability.
diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/measurement/MeasurementDataManagerBeanTest.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/measurement/MeasurementDataManagerBeanTest.java index 4cc67c0..9f49143 100644 --- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/measurement/MeasurementDataManagerBeanTest.java +++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/measurement/MeasurementDataManagerBeanTest.java @@ -39,7 +39,6 @@ import java.util.List;
import javax.ejb.EJB;
-import com.datastax.driver.core.Session; import com.datastax.driver.core.exceptions.NoHostAvailableException;
import org.joda.time.DateTime; @@ -60,13 +59,14 @@ import org.rhq.core.domain.resource.Agent; import org.rhq.core.domain.resource.Resource; import org.rhq.core.domain.resource.ResourceType; import org.rhq.enterprise.server.auth.SubjectManagerLocal; -import org.rhq.enterprise.server.storage.StorageClientManagerBean; import org.rhq.enterprise.server.drift.DriftServerPluginService; import org.rhq.enterprise.server.resource.ResourceManagerLocal; +import org.rhq.enterprise.server.storage.StorageClientManagerBean; import org.rhq.enterprise.server.test.AbstractEJB3Test; import org.rhq.enterprise.server.test.TransactionCallback; import org.rhq.enterprise.server.util.ResourceTreeHelper; import org.rhq.server.metrics.MetricsDAO; +import org.rhq.server.metrics.StorageSession; import org.rhq.server.metrics.domain.AggregateNumericMetric; import org.rhq.server.metrics.domain.AggregateType; import org.rhq.server.metrics.domain.MetricsTable; @@ -360,7 +360,7 @@ public class MeasurementDataManagerBeanTest extends AbstractEJB3Test {
private void purgeMetricsTables() { try { - Session session = storageClientManager.getSession(); + StorageSession session = storageClientManager.getSession();
session.execute("TRUNCATE " + MetricsTable.RAW); session.execute("TRUNCATE " + MetricsTable.ONE_HOUR); diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/measurement/test/MeasurementBaselineManagerTest.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/measurement/test/MeasurementBaselineManagerTest.java index 49048ee..fdf2b1a 100644 --- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/measurement/test/MeasurementBaselineManagerTest.java +++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/measurement/test/MeasurementBaselineManagerTest.java @@ -28,7 +28,6 @@ import java.util.Random; import javax.inject.Inject; import javax.persistence.Query;
-import com.datastax.driver.core.Session; import com.datastax.driver.core.exceptions.NoHostAvailableException;
import org.testng.annotations.Test; @@ -45,13 +44,14 @@ import org.rhq.core.domain.resource.Resource; import org.rhq.core.domain.resource.ResourceCategory; import org.rhq.core.domain.resource.ResourceType; import org.rhq.core.domain.util.PageControl; -import org.rhq.enterprise.server.storage.StorageClientManagerBean; import org.rhq.enterprise.server.measurement.MeasurementBaselineManagerLocal; import org.rhq.enterprise.server.measurement.MeasurementOOBManagerLocal; import org.rhq.enterprise.server.resource.ResourceManagerLocal; +import org.rhq.enterprise.server.storage.StorageClientManagerBean; import org.rhq.enterprise.server.test.AbstractEJB3Test; import org.rhq.enterprise.server.util.LookupUtil; import org.rhq.server.metrics.MetricsDAO; +import org.rhq.server.metrics.StorageSession; import org.rhq.server.metrics.domain.AggregateNumericMetric; import org.rhq.server.metrics.domain.AggregateType; import org.rhq.server.metrics.domain.MetricsTable; @@ -645,7 +645,7 @@ public class MeasurementBaselineManagerTest extends AbstractEJB3Test { // Query q = em.createNativeQuery(sql); // q.executeUpdate(); try { - Session session = storageClientManager.getSession(); + StorageSession session = storageClientManager.getSession(); session.execute("DELETE FROM " + MetricsTable.ONE_HOUR.getTableName() + " WHERE schedule_id = " + schedule.getId()); } catch (NoHostAvailableException e) { diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/core/StartupBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/core/StartupBean.java index 7ce77d8..fa88263 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/core/StartupBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/core/StartupBean.java @@ -450,7 +450,6 @@ public class StartupBean implements StartupLocal { }
storageClientManager.init(); - storageClusterHeartBeatJob.scheduleJob(); }
/** diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClientManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClientManagerBean.java index 2d48092..ae80e50 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClientManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClientManagerBean.java @@ -50,6 +50,7 @@ import org.rhq.server.metrics.DateTimeService; import org.rhq.server.metrics.MetricsConfiguration; import org.rhq.server.metrics.MetricsDAO; import org.rhq.server.metrics.MetricsServer; +import org.rhq.server.metrics.StorageSession;
/** * @author John Sanda @@ -70,7 +71,7 @@ public class StorageClientManagerBean { @EJB private StorageNodeManagerLocal storageNodeManager;
- private Session session; + private StorageSession session; private MetricsConfiguration metricsConfiguration; private MetricsDAO metricsDAO; private MetricsServer metricsServer; @@ -95,7 +96,12 @@ public class StorageClientManagerBean { String password = getRequiredStorageProperty(PASSWORD_PROP);
metricsConfiguration = new MetricsConfiguration(); - session = createSession(username, password, storageNodeManager.getStorageNodes()); + + Session wrappedSession = createSession(username, password, storageNodeManager.getStorageNodes()); + session = new StorageSession(wrappedSession); + + session.addStorageStateListener(new StorageClusterMonitor()); + metricsDAO = new MetricsDAO(session, metricsConfiguration);
Server server = serverManager.getServer(); @@ -121,15 +127,15 @@ public class StorageClientManagerBean { }
public MetricsDAO getMetricsDAO() { - return this.metricsDAO; + return metricsDAO; }
public MetricsServer getMetricsServer() { - return this.metricsServer; + return metricsServer; }
- public Session getSession() { - return this.session; + public StorageSession getSession() { + return session; }
public MetricsConfiguration getMetricsConfiguration() { diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterMonitor.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterMonitor.java new file mode 100644 index 0000000..ec28888 --- /dev/null +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterMonitor.java @@ -0,0 +1,69 @@ +package org.rhq.enterprise.server.storage; + +import java.net.InetAddress; +import java.util.concurrent.atomic.AtomicBoolean; + +import com.datastax.driver.core.exceptions.NoHostAvailableException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import org.rhq.core.domain.cloud.Server; +import org.rhq.enterprise.server.auth.SubjectManagerLocal; +import org.rhq.enterprise.server.cloud.TopologyManagerLocal; +import org.rhq.enterprise.server.cloud.instance.ServerManagerLocal; +import org.rhq.enterprise.server.util.LookupUtil; +import org.rhq.server.metrics.StorageStateListener; + +/** + * @author John Sanda + */ +public class StorageClusterMonitor implements StorageStateListener { + + private Log log = LogFactory.getLog(StorageClusterMonitor.class); + + private AtomicBoolean isClusterDown = new AtomicBoolean(false); + + public boolean isClusterDown() { + return isClusterDown.get(); + } + + @Override + public void onStorageNodeUp(InetAddress address) { + log.info("Storage node at " + address.getHostAddress() + " is up"); + + if (isClusterDown.compareAndSet(true, false)) { + log.info("Taking server out of maintenance mode"); + updateServerMode(Server.OperationMode.NORMAL); + } + } + + @Override + public void onStorageNodeDown(InetAddress address) { + log.info("Storage node at " + address.getHostAddress() + " is down"); + } + + @Override + public void onStorageNodeRemoved(InetAddress address) { + log.info("Storage node at " + address.getHostAddress() + " has been removed from the cluster"); + } + + @Override + public void onStorageClusterDown(NoHostAvailableException e) { + if (isClusterDown.compareAndSet(false, true)) { + log.error("The server cannot connect to any storage nodes. The server will now go into maintenance mode."); + updateServerMode(Server.OperationMode.MAINTENANCE); + } + } + + private void updateServerMode(Server.OperationMode mode) { + ServerManagerLocal serverManager = LookupUtil.getServerManager(); + TopologyManagerLocal topologyManager = LookupUtil.getTopologyManager(); + SubjectManagerLocal subjectManager = LookupUtil.getSubjectManager(); + + Server server = serverManager.getServer(); + + topologyManager.updateServerMode(subjectManager.getOverlord(), new Integer[] {server.getId()}, + Server.OperationMode.MAINTENANCE); + } +} diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsDAO.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsDAO.java index 3ea47e8..63c11f1 100644 --- a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsDAO.java +++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsDAO.java @@ -26,20 +26,13 @@ package org.rhq.server.metrics;
-import static com.datastax.driver.core.querybuilder.QueryBuilder.insertInto; - -import java.util.ArrayList; import java.util.Date; import java.util.List; import java.util.Map; -import java.util.Set;
import com.datastax.driver.core.BoundStatement; import com.datastax.driver.core.PreparedStatement; import com.datastax.driver.core.ResultSet; -import com.datastax.driver.core.ResultSetFuture; -import com.datastax.driver.core.Session; -import com.datastax.driver.core.Statement; import com.datastax.driver.core.exceptions.NoHostAvailableException;
import org.apache.commons.logging.Log; @@ -67,7 +60,7 @@ public class MetricsDAO {
private final Log log = LogFactory.getLog(MetricsDAO.class);
- private Session session; + private StorageSession storageSession;
private MetricsConfiguration configuration;
@@ -86,8 +79,8 @@ public class MetricsDAO { private PreparedStatement findTimeSliceForIndex; private PreparedStatement deleteIndexEntries;
- public MetricsDAO(Session session, MetricsConfiguration configuration) { - this.session = session; + public MetricsDAO(StorageSession session, MetricsConfiguration configuration) { + this.storageSession = session; this.configuration = configuration; initPreparedStatements(); } @@ -104,176 +97,118 @@ public class MetricsDAO { // re-initialized and re-prepared with the new TTLs. None of this would be necessary // if the TTL value could be a bound value.
- insertRawData = session.prepare( + insertRawData = storageSession.prepare( "INSERT INTO " + MetricsTable.RAW + " (schedule_id, time, value) VALUES (?, ?, ?) USING TTL " + configuration.getRawTTL());
- rawMetricsQuery = session.prepare("SELECT schedule_id, time, value FROM " + MetricsTable.RAW + + rawMetricsQuery = storageSession.prepare("SELECT schedule_id, time, value FROM " + MetricsTable.RAW + " WHERE schedule_id = ? AND time >= ? AND time < ? ORDER BY time");
- insertOneHourData = session.prepare("INSERT INTO " + MetricsTable.ONE_HOUR + "(schedule_id, time, " + + insertOneHourData = storageSession.prepare("INSERT INTO " + MetricsTable.ONE_HOUR + "(schedule_id, time, " + "type, value) VALUES (?, ?, ?, ?) USING TTL " + configuration.getOneHourTTL());
- insertSixHourData = session.prepare("INSERT INTO " + MetricsTable.SIX_HOUR + "(schedule_id, time, " + + insertSixHourData = storageSession.prepare("INSERT INTO " + MetricsTable.SIX_HOUR + "(schedule_id, time, " + "type, value) VALUES (?, ?, ?, ?) USING TTL " + configuration.getOneHourTTL());
- insertTwentyFourHourData = session.prepare("INSERT INTO " + MetricsTable.TWENTY_FOUR_HOUR + "(schedule_id, " + + insertTwentyFourHourData = storageSession.prepare("INSERT INTO " + MetricsTable.TWENTY_FOUR_HOUR + "(schedule_id, " + "time, type, value) VALUES (?, ?, ?, ?) USING TTL " + configuration.getOneHourTTL());
- updateMetricsIndex = session.prepare("INSERT INTO " + MetricsTable.INDEX + " (bucket, time, schedule_id) " + + updateMetricsIndex = storageSession.prepare("INSERT INTO " + MetricsTable.INDEX + " (bucket, time, schedule_id) " + "VALUES (?, ?, ?)");
- findLatestRawMetric = session.prepare("SELECT schedule_id, time, value FROM " + MetricsTable.RAW + + findLatestRawMetric = storageSession.prepare("SELECT schedule_id, time, value FROM " + MetricsTable.RAW + " WHERE schedule_id = ? ORDER BY time DESC LIMIT 1");
- findRawMetrics = session.prepare("SELECT schedule_id, time, value FROM " + MetricsTable.RAW + + findRawMetrics = storageSession.prepare("SELECT schedule_id, time, value FROM " + MetricsTable.RAW + " WHERE schedule_id = ? AND time >= ? AND time <= ?");
- findOneHourMetricsByDateRange = session.prepare("SELECT schedule_id, time, type, value FROM " + + findOneHourMetricsByDateRange = storageSession.prepare("SELECT schedule_id, time, type, value FROM " + MetricsTable.ONE_HOUR + " WHERE schedule_id = ? AND time >= ? AND time < ?");
- findSixHourMetricsByDateRange = session.prepare("SELECT schedule_id, time, type, value FROM " + findSixHourMetricsByDateRange = storageSession.prepare("SELECT schedule_id, time, type, value FROM " + MetricsTable.SIX_HOUR + " WHERE schedule_id = ? AND time >= ? AND time < ?");
- findTwentyFourHourMetricsByDateRange = session.prepare("SELECT schedule_id, time, type, value FROM " + + findTwentyFourHourMetricsByDateRange = storageSession.prepare("SELECT schedule_id, time, type, value FROM " + MetricsTable.TWENTY_FOUR_HOUR + " WHERE schedule_id = ? AND time >= ? AND time < ?");
- findIndexEntries = session.prepare("SELECT time, schedule_id FROM " + MetricsTable.INDEX + + findIndexEntries = storageSession.prepare("SELECT time, schedule_id FROM " + MetricsTable.INDEX + " WHERE bucket = ? AND time = ?");
- findTimeSliceForIndex = session.prepare("SELECT time FROM " + MetricsTable.INDEX + + findTimeSliceForIndex = storageSession.prepare("SELECT time FROM " + MetricsTable.INDEX + " WHERE bucket = ? AND time = ?");
- deleteIndexEntries = session.prepare("DELETE FROM " + MetricsTable.INDEX + " WHERE bucket = ? AND time = ?"); + deleteIndexEntries = storageSession.prepare("DELETE FROM " + MetricsTable.INDEX + " WHERE bucket = ? AND time = ?");
long endTime = System.currentTimeMillis(); log.info("Finished initializing prepared statements in " + (endTime - startTime) + " ms"); }
- public ResultSetFuture insertRawData(MeasurementDataNumeric data) { + public StorageResultSetFuture insertRawData(MeasurementDataNumeric data) { BoundStatement statement = insertRawData.bind(data.getScheduleId(), new Date(data.getTimestamp()), data.getValue()); - return session.executeAsync(statement); - } - - public List<MetricResultFuture<MeasurementDataNumeric>> insertRawMetricsAsync(Set<MeasurementDataNumeric> dataSet, - int ttl) { - try { - List<MetricResultFuture<MeasurementDataNumeric>> resultFutures = new ArrayList<MetricResultFuture<MeasurementDataNumeric>>(); - - String cql = "INSERT INTO raw_metrics (schedule_id, time, value) VALUES (?, ?, ?) " + "USING TTL " + ttl; - PreparedStatement statement = session.prepare(cql); - - for (MeasurementDataNumeric data : dataSet) { - BoundStatement boundStatement = statement.bind(data.getScheduleId(), new Date(data.getTimestamp()), - data.getValue()); - - resultFutures.add(new MetricResultFuture<MeasurementDataNumeric>(session.executeAsync(boundStatement), - data)); - } - - return resultFutures; - } catch (NoHostAvailableException e) { - throw new CQLException(e); - } + return storageSession.executeAsync(statement); }
public ResultSet insertOneHourData(int scheduleId, long timestamp, AggregateType type, double value) { BoundStatement statement = insertOneHourData.bind(scheduleId, new Date(timestamp), type.ordinal(), value); - return session.execute(statement); + return storageSession.execute(statement); }
public ResultSet insertSixHourData(int scheduleId, long timestamp, AggregateType type, double value) { BoundStatement statement = insertSixHourData.bind(scheduleId, new Date(timestamp), type.ordinal(), value); - return session.execute(statement); + return storageSession.execute(statement); }
public ResultSet insertTwentyFourHourData(int scheduleId, long timestamp, AggregateType type, double value) { BoundStatement statement = insertTwentyFourHourData.bind(scheduleId, new Date(timestamp), type.ordinal(), value); - return session.execute(statement); - } - - public List<MetricResultFuture<AggregateNumericMetric>> insertAggregatesAsync(MetricsTable table, - List<AggregateNumericMetric> metrics, int ttl) { - List<MetricResultFuture<AggregateNumericMetric>> updates = new ArrayList<MetricResultFuture<AggregateNumericMetric>>(); - - if (metrics.isEmpty()) { - return updates; - } - - try { - Statement statement = null; - - for (AggregateNumericMetric metric : metrics) { - statement = insertInto(table.getTableName()) - .value("schedule_id", metric.getScheduleId()) - .value("time", new Date(metric.getTimestamp())) - .value("type", AggregateType.MIN.ordinal()) - .value("value", metric.getMin()); - updates.add(new MetricResultFuture<AggregateNumericMetric>(session.executeAsync(statement), metric)); - - statement = insertInto(table.getTableName()) - .value("schedule_id", metric.getScheduleId()) - .value("time", new Date(metric.getTimestamp())) - .value("type", AggregateType.MAX.ordinal()) - .value("value", metric.getMax()); - updates.add(new MetricResultFuture<AggregateNumericMetric>(session.executeAsync(statement), metric)); - - statement = insertInto(table.getTableName()) - .value("schedule_id", metric.getScheduleId()) - .value("time", new Date(metric.getTimestamp())) - .value("type", AggregateType.AVG.ordinal()) - .value("value", metric.getAvg()); - updates.add(new MetricResultFuture<AggregateNumericMetric>(session.executeAsync(statement), metric)); - } - - return updates; - } catch (Exception e) { - throw new CQLException(e); - } + return storageSession.execute(statement); }
public Iterable<RawNumericMetric> findRawMetrics(int scheduleId, long startTime, long endTime) { try { BoundStatement boundStatement = rawMetricsQuery.bind(scheduleId, new Date(startTime), new Date(endTime)); - return new SimplePagedResult<RawNumericMetric>(boundStatement, new RawNumericMetricMapper(false), session); + return new SimplePagedResult<RawNumericMetric>(boundStatement, new RawNumericMetricMapper(false), + storageSession); } catch (NoHostAvailableException e) { throw new CQLException(e); } }
- public ResultSetFuture findRawMetricsAsync(int scheduleId, long startTime, long endTime) { + public StorageResultSetFuture findRawMetricsAsync(int scheduleId, long startTime, long endTime) { BoundStatement boundStatement = rawMetricsQuery.bind(scheduleId, new Date(startTime), new Date(endTime)); - return session.executeAsync(boundStatement); + return storageSession.executeAsync(boundStatement); }
public RawNumericMetric findLatestRawMetric(int scheduleId) { RawNumericMetricMapper mapper = new RawNumericMetricMapper(false); BoundStatement boundStatement = findLatestRawMetric.bind(scheduleId); - ResultSet resultSet = session.execute(boundStatement); + ResultSet resultSet = storageSession.execute(boundStatement);
return mapper.mapOne(resultSet); }
public Iterable<RawNumericMetric> findRawMetrics(List<Integer> scheduleIds, long startTime, long endTime) { return new ListPagedResult<RawNumericMetric>(findRawMetrics, scheduleIds, startTime, endTime, - new RawNumericMetricMapper(), session); + new RawNumericMetricMapper(), storageSession); }
public Iterable<AggregateNumericMetric> findOneHourMetrics(int scheduleId, long startTime, long endTime) { BoundStatement statement = findOneHourMetricsByDateRange.bind(scheduleId, new Date(startTime), new Date(endTime)); - return new SimplePagedResult<AggregateNumericMetric>(statement, new AggregateNumericMetricMapper(), session); + return new SimplePagedResult<AggregateNumericMetric>(statement, new AggregateNumericMetricMapper(), + storageSession); }
public Iterable<AggregateNumericMetric> findSixHourMetrics(int scheduleId, long startTime, long endTime) { BoundStatement statement = findSixHourMetricsByDateRange.bind(scheduleId, new Date(startTime), new Date(endTime)); - return new SimplePagedResult<AggregateNumericMetric>(statement, new AggregateNumericMetricMapper(), session); + return new SimplePagedResult<AggregateNumericMetric>(statement, new AggregateNumericMetricMapper(), + storageSession); }
public Iterable<AggregateNumericMetric> findTwentyFourHourMetrics(int scheduleId, long startTime, long endTime) { BoundStatement statement = findTwentyFourHourMetricsByDateRange.bind(scheduleId, new Date(startTime), new Date(endTime)); - return new SimplePagedResult<AggregateNumericMetric>(statement, new AggregateNumericMetricMapper(), session); + return new SimplePagedResult<AggregateNumericMetric>(statement, new AggregateNumericMetricMapper(), + storageSession); }
public Iterable<AggregateSimpleNumericMetric> findAggregatedSimpleOneHourMetric(int scheduleId, long startTime, @@ -281,52 +216,52 @@ public class MetricsDAO { BoundStatement statement = findOneHourMetricsByDateRange.bind(scheduleId, new Date(startTime), new Date(endTime)); return new SimplePagedResult<AggregateSimpleNumericMetric>(statement, new AggregateSimpleNumericMetricMapper(), - session); + storageSession); }
public Iterable<AggregateNumericMetric> findOneHourMetrics(List<Integer> scheduleIds, long startTime, long endTime) { return new ListPagedResult<AggregateNumericMetric>(findOneHourMetricsByDateRange, scheduleIds, startTime, endTime, - new AggregateNumericMetricMapper(), session); + new AggregateNumericMetricMapper(), storageSession); }
public Iterable<AggregateNumericMetric> findSixHourMetrics(List<Integer> scheduleIds, long startTime, long endTime) { return new ListPagedResult<AggregateNumericMetric>(findSixHourMetricsByDateRange, scheduleIds, startTime, endTime, - new AggregateNumericMetricMapper(), session); + new AggregateNumericMetricMapper(), storageSession); }
public Iterable<AggregateNumericMetric> findTwentyFourHourMetrics(List<Integer> scheduleIds, long startTime, long endTime) { return new ListPagedResult<AggregateNumericMetric>(findTwentyFourHourMetricsByDateRange, scheduleIds, startTime, endTime, - new AggregateNumericMetricMapper(), session); + new AggregateNumericMetricMapper(), storageSession); }
public Iterable<MetricsIndexEntry> findMetricsIndexEntries(final MetricsTable table, long timestamp) { BoundStatement statement = findIndexEntries.bind(table.toString(), new Date(timestamp)); - return new SimplePagedResult<MetricsIndexEntry>(statement, new MetricsIndexEntryMapper(table), session); + return new SimplePagedResult<MetricsIndexEntry>(statement, new MetricsIndexEntryMapper(table), storageSession); }
public ResultSet setFindTimeSliceForIndex(MetricsTable table, long timestamp) { BoundStatement statement = findTimeSliceForIndex.bind(table.toString(), new Date(timestamp)); - return session.execute(statement); + return storageSession.execute(statement); }
public void updateMetricsIndex(MetricsTable table, Map<Integer, Long> updates) { for (Integer scheduleId : updates.keySet()) { BoundStatement statement = updateMetricsIndex.bind(table.getTableName(), new Date(updates.get(scheduleId)), scheduleId); - session.execute(statement); + storageSession.execute(statement); } }
- public ResultSetFuture updateMetricsIndex(MetricsTable table, int scheduleId, long timestamp) { + public StorageResultSetFuture updateMetricsIndex(MetricsTable table, int scheduleId, long timestamp) { BoundStatement statement = updateMetricsIndex.bind(table.getTableName(), new Date(timestamp), scheduleId); - return session.executeAsync(statement); + return storageSession.executeAsync(statement); }
public void deleteMetricsIndexEntries(MetricsTable table, long timestamp) { BoundStatement statement = deleteIndexEntries.bind(table.getTableName(), new Date(timestamp)); - session.execute(statement); + storageSession.execute(statement); } } diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsServer.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsServer.java index 7395953..9756006 100644 --- a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsServer.java +++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsServer.java @@ -35,7 +35,6 @@ import java.util.concurrent.Semaphore; import java.util.concurrent.atomic.AtomicInteger;
import com.datastax.driver.core.ResultSet; -import com.datastax.driver.core.ResultSetFuture; import com.datastax.driver.core.Row; import com.google.common.util.concurrent.FutureCallback; import com.google.common.util.concurrent.Futures; @@ -299,7 +298,7 @@ public class MetricsServer {
for (final MeasurementDataNumeric data : dataSet) { semaphore.acquire(); - ResultSetFuture resultSetFuture = dao.insertRawData(data); + StorageResultSetFuture resultSetFuture = dao.insertRawData(data); Futures.addCallback(resultSetFuture, new FutureCallback<ResultSet>() { @Override public void onSuccess(ResultSet rows) { @@ -325,7 +324,7 @@ public class MetricsServer {
long timeSlice = dateTimeService.getTimeSlice(new DateTime(rawData.getTimestamp()), configuration.getRawTimeSliceDuration()).getMillis(); - ResultSetFuture resultSetFuture = dao.updateMetricsIndex(MetricsTable.ONE_HOUR, rawData.getScheduleId(), + StorageResultSetFuture resultSetFuture = dao.updateMetricsIndex(MetricsTable.ONE_HOUR, rawData.getScheduleId(), timeSlice); Futures.addCallback(resultSetFuture, new FutureCallback<ResultSet>() { @Override diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/StorageResultSetFuture.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/StorageResultSetFuture.java new file mode 100644 index 0000000..902bf4a --- /dev/null +++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/StorageResultSetFuture.java @@ -0,0 +1,76 @@ +package org.rhq.server.metrics; + +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import com.datastax.driver.core.ResultSet; +import com.datastax.driver.core.ResultSetFuture; +import com.datastax.driver.core.exceptions.NoHostAvailableException; +import com.google.common.util.concurrent.ListenableFuture; + +/** + * @author John Sanda + */ +public class StorageResultSetFuture implements ListenableFuture<ResultSet> { + + private ResultSetFuture wrapperFuture; + + private List<StorageStateListener> listeners; + + public StorageResultSetFuture(ResultSetFuture resultSetFuture, List<StorageStateListener> listeners) { + wrapperFuture = resultSetFuture; + this.listeners = listeners; + } + + @Override + public void addListener(Runnable listener, Executor executor) { + wrapperFuture.addListener(listener, executor); + } + + @Override + public boolean cancel(boolean mayInterruptIfRunning) { + return wrapperFuture.cancel(mayInterruptIfRunning); + } + + @Override + public boolean isCancelled() { + return wrapperFuture.isCancelled(); + } + + @Override + public boolean isDone() { + return wrapperFuture.isDone(); + } + + @Override + public ResultSet get() throws InterruptedException, ExecutionException { + try { + return wrapperFuture.get(); + } catch (ExecutionException e) { + return handleException(e); + } + } + + @Override + public ResultSet get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, + TimeoutException { + try { + return wrapperFuture.get(timeout, unit); + } catch (ExecutionException e) { + return handleException(e); + } + } + + private ResultSet handleException(ExecutionException e) throws ExecutionException { + if (e.getCause() instanceof NoHostAvailableException) { + NoHostAvailableException cause = (NoHostAvailableException) e.getCause(); + for (StorageStateListener listener : listeners) { + listener.onStorageClusterDown(cause); + } + } + throw e; + } +} diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/StorageSession.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/StorageSession.java new file mode 100644 index 0000000..3f7af3f --- /dev/null +++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/StorageSession.java @@ -0,0 +1,110 @@ +package org.rhq.server.metrics; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.Host; +import com.datastax.driver.core.PreparedStatement; +import com.datastax.driver.core.Query; +import com.datastax.driver.core.ResultSet; +import com.datastax.driver.core.ResultSetFuture; +import com.datastax.driver.core.Session; +import com.datastax.driver.core.exceptions.NoHostAvailableException; + +/** + * @author John Sanda + */ +public class StorageSession implements Host.StateListener { + + private Session wrappedSession; + + private List<StorageStateListener> listeners = new ArrayList<StorageStateListener>(); + + public StorageSession(Session wrappedSession) { + this.wrappedSession = wrappedSession; + this.wrappedSession.getCluster().register(this); + } + + public void addStorageStateListener(StorageStateListener listener) { + listeners.add(listener); + } + + public ResultSet execute(String query) { + try { + return wrappedSession.execute(query); + } catch (NoHostAvailableException e) { + return handleException(e); + } + } + + public ResultSet execute(Query query) { + try { + return wrappedSession.execute(query); + } catch (NoHostAvailableException e) { + return handleException(e); + } + } + + public StorageResultSetFuture executeAsync(String query) { + ResultSetFuture future = wrappedSession.executeAsync(query); + return new StorageResultSetFuture(future, listeners); + } + + public StorageResultSetFuture executeAsync(Query query) { + ResultSetFuture future = wrappedSession.executeAsync(query); + return new StorageResultSetFuture(future, listeners); + } + + public PreparedStatement prepare(String query) { + return wrappedSession.prepare(query); + } + + public void shutdown() { + wrappedSession.shutdown(); + } + + public boolean shutdown(long timeout, TimeUnit unit) { + return wrappedSession.shutdown(timeout, unit); + } + + public Cluster getCluster() { + return wrappedSession.getCluster(); + } + + @Override + public void onAdd(Host host) { + for (StorageStateListener listener : listeners) { + listener.onStorageNodeUp(host.getAddress()); + } + } + + @Override + public void onUp(Host host) { + for (StorageStateListener listener : listeners) { + listener.onStorageNodeUp(host.getAddress()); + } + } + + @Override + public void onDown(Host host) { + for (StorageStateListener listener : listeners) { + listener.onStorageNodeDown(host.getAddress()); + } + } + + @Override + public void onRemove(Host host) { + for (StorageStateListener listener : listeners) { + listener.onStorageNodeRemoved(host.getAddress()); + } + } + + private ResultSet handleException(NoHostAvailableException e) { + for (StorageStateListener listener : listeners) { + listener.onStorageClusterDown(e); + } + throw e; + } +} diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/StorageStateListener.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/StorageStateListener.java new file mode 100644 index 0000000..6a3a216 --- /dev/null +++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/StorageStateListener.java @@ -0,0 +1,20 @@ +package org.rhq.server.metrics; + +import java.net.InetAddress; + +import com.datastax.driver.core.exceptions.NoHostAvailableException; + +/** + * @author John Sanda + */ +public interface StorageStateListener { + + void onStorageNodeUp(InetAddress address); + + void onStorageNodeDown(InetAddress address); + + void onStorageNodeRemoved(InetAddress address); + + void onStorageClusterDown(NoHostAvailableException e); + +} diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/domain/ListPagedResult.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/domain/ListPagedResult.java index 1c1fdd2..f3abd20 100644 --- a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/domain/ListPagedResult.java +++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/domain/ListPagedResult.java @@ -33,10 +33,10 @@ import java.util.List; import com.datastax.driver.core.BoundStatement; import com.datastax.driver.core.PreparedStatement; import com.datastax.driver.core.ResultSet; -import com.datastax.driver.core.Session; import com.datastax.driver.core.exceptions.NoHostAvailableException;
import org.rhq.server.metrics.CQLException; +import org.rhq.server.metrics.StorageSession;
/** * This class helps paginate Cassandra results that have a list in the matching clause. Instead of running @@ -52,12 +52,12 @@ public class ListPagedResult<T> implements Iterable<T> { private final long startTime; private final long endTime; private final ResultSetMapper<T> mapper; - private final Session session; + private final StorageSession session;
private final PreparedStatement preparedStatement;
public ListPagedResult(PreparedStatement preparedStatement, List<Integer> scheduleIds, long startTime, long endTime, - ResultSetMapper<T> mapper, Session session) { + ResultSetMapper<T> mapper, StorageSession session) { this.preparedStatement = preparedStatement; this.scheduleIds = new LinkedList<Integer>(scheduleIds); this.startTime = startTime; diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/domain/SimplePagedResult.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/domain/SimplePagedResult.java index d14195c..e31db95 100644 --- a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/domain/SimplePagedResult.java +++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/domain/SimplePagedResult.java @@ -34,6 +34,7 @@ import com.datastax.driver.core.SimpleStatement; import com.datastax.driver.core.exceptions.NoHostAvailableException;
import org.rhq.server.metrics.CQLException; +import org.rhq.server.metrics.StorageSession;
/** * This class is just a placeholder for future pagination implementations once Cassandra gets native support for paging results. @@ -51,7 +52,7 @@ public class SimplePagedResult<T> implements Iterable<T> {
private final ResultSetMapper<T> mapper; private final Query query; - private final Session session; + private final StorageSession session; private final int pageSize;
/** @@ -60,7 +61,7 @@ public class SimplePagedResult<T> implements Iterable<T> { * @param session Cassandra session * @param pageSize page size */ - public SimplePagedResult(Query query, ResultSetMapper<T> mapper, Session session, int pageSize) { + public SimplePagedResult(Query query, ResultSetMapper<T> mapper, StorageSession session, int pageSize) { this.query = query; this.mapper = mapper; this.session = session; @@ -73,7 +74,7 @@ public class SimplePagedResult<T> implements Iterable<T> { * @param session Cassandra session * @param pageSize page size */ - public SimplePagedResult(String query, ResultSetMapper<T> mapper, Session session, int pageSize) { + public SimplePagedResult(String query, ResultSetMapper<T> mapper, StorageSession session, int pageSize) { this(new SimpleStatement(query), mapper, session, pageSize); }
@@ -82,7 +83,7 @@ public class SimplePagedResult<T> implements Iterable<T> { * @param mapper result set mapper * @param session Cassandra session */ - public SimplePagedResult(Query query, ResultSetMapper<T> mapper, Session session) { + public SimplePagedResult(Query query, ResultSetMapper<T> mapper, StorageSession session) { this(query, mapper, session, DEFAULT_PAGE_SIZE); }
@@ -92,7 +93,7 @@ public class SimplePagedResult<T> implements Iterable<T> { * @param session Cassandra session * @param pageSize page size */ - public SimplePagedResult(String query, ResultSetMapper<T> mapper, Session session) { + public SimplePagedResult(String query, ResultSetMapper<T> mapper, StorageSession session) { this(new SimpleStatement(query), mapper, session); }
diff --git a/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/CassandraIntegrationTest.java b/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/CassandraIntegrationTest.java index c4c48c8..b227b2d 100644 --- a/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/CassandraIntegrationTest.java +++ b/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/CassandraIntegrationTest.java @@ -33,6 +33,7 @@ import java.util.concurrent.CountDownLatch;
import com.datastax.driver.core.BoundStatement; import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.Host; import com.datastax.driver.core.PreparedStatement; import com.datastax.driver.core.ResultSet; import com.datastax.driver.core.Session; @@ -65,8 +66,12 @@ public class CassandraIntegrationTest {
protected static Session session;
+ protected static StorageSession storageSession; + private static DateTimeService dateTimeService;
+ private final Log log = LogFactory.getLog(CassandraIntegrationTest.class); + @BeforeSuite @DeployCluster(numNodes = 2, username = "rhqadmin", password = "rhqadmin", waitForSchemaAgreement = true) public void deployCluster() throws Exception { @@ -76,7 +81,31 @@ public class CassandraIntegrationTest { .addContactPoints("127.0.0.1", "127.0.02") .withCredentials("rhqadmin", "rhqadmin") .build(); + + cluster.register(new Host.StateListener() { + @Override + public void onAdd(Host host) { + log.info("host " + host + " added"); + } + + @Override + public void onUp(Host host) { + log.info("host " + host + " up"); + } + + @Override + public void onDown(Host host) { + log.info("host " + host + " down"); + } + + @Override + public void onRemove(Host host) { + log.info("host " + host + " removed"); + } + }); + session = cluster.connect("rhq"); + storageSession = new StorageSession(session); }
@AfterSuite(alwaysRun = true) @@ -98,7 +127,7 @@ public class CassandraIntegrationTest { BoundStatement boundStatement = statement.bind(scheduleId);
return new SimplePagedResult<AggregateNumericMetric>(boundStatement, new AggregateNumericMetricMapper(), - session); + storageSession); }
protected Iterable<AggregateNumericMetric> findAggregateMetricsWithMetadata(MetricsTable table, int scheduleId, @@ -109,11 +138,11 @@ public class CassandraIntegrationTest { "SELECT schedule_id, time, type, value, ttl(value), writetime(value) " + "FROM " + table + " " + "WHERE schedule_id = ? AND time >= ? AND time < ?"; - PreparedStatement statement = session.prepare(cql); + PreparedStatement statement = storageSession.prepare(cql); BoundStatement boundStatement = statement.bind(scheduleId, new Date(startTime), new Date(endTime));
return new SimplePagedResult<AggregateNumericMetric>(boundStatement, new AggregateNumericMetricMapper(true), - session); + storageSession); } catch (NoHostAvailableException e) { throw new CQLException(e); } diff --git a/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/ClusterMonitorTest.java b/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/ClusterMonitorTest.java new file mode 100644 index 0000000..871b7c5 --- /dev/null +++ b/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/ClusterMonitorTest.java @@ -0,0 +1,92 @@ +package org.rhq.server.metrics; + +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.ConsistencyLevel; +import com.datastax.driver.core.Host; +import com.datastax.driver.core.ResultSet; +import com.datastax.driver.core.Session; +import com.datastax.driver.core.querybuilder.QueryBuilder; +import com.google.common.util.concurrent.FutureCallback; +import com.google.common.util.concurrent.Futures; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import org.rhq.cassandra.util.ClusterBuilder; +import org.rhq.core.domain.measurement.MeasurementDataNumeric; + +/** + * @author John Sanda + */ +public class ClusterMonitorTest { + + private final Log log = LogFactory.getLog(ClusterMonitorTest.class); + +// @Test + public void monitorCluster() { + Cluster cluster = new ClusterBuilder() + .addContactPoints("127.0.0.1") +// .withCredentials("cassandra", "cassandra") + .withCredentials("rhqadmin", "rhqadmin") + .build(); + + cluster.register(new Host.StateListener() { + @Override + public void onAdd(Host host) { + log.info("host " + host + " ADDED"); + } + + @Override + public void onUp(Host host) { + log.info("host " + host + " UP"); + } + + @Override + public void onDown(Host host) { + log.info("host " + host + " DOWN"); + } + + @Override + public void onRemove(Host host) { + log.info("host " + host + " REMOVED"); + } + }); + + Session session = cluster.connect("rhq"); + StorageSession storageSession = new StorageSession(session); + + MetricsDAO dao = new MetricsDAO(storageSession, new MetricsConfiguration()); + + while (true) { + try { + Thread.sleep(10000); + try { +// session.execute("select * from system.schema_keyspaces"); + com.datastax.driver.core.Query query = QueryBuilder.select().from("rhq", "raw_metrics").setConsistencyLevel( + ConsistencyLevel.ALL); +// session.execute(query); +// session.execute("select * from rhq.raw_metrics"); +// log.info("query succeeded"); + StorageResultSetFuture future = dao.insertRawData + (new MeasurementDataNumeric(System.currentTimeMillis(), 123, 1.1)); + + Futures.addCallback(future, new FutureCallback<ResultSet>() { + @Override + public void onSuccess(ResultSet rows) { + log.info("insert succeeded"); + } + + @Override + public void onFailure(Throwable throwable) { + log.error("insert failed", throwable); + } + }); + } catch (Exception e) { + log.error("query failed", e); + } + } catch (InterruptedException e) { + } + } + } + +} diff --git a/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsBaselineCalculatorTest.java b/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsBaselineCalculatorTest.java index ab4bd03..2b58222 100644 --- a/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsBaselineCalculatorTest.java +++ b/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsBaselineCalculatorTest.java @@ -35,8 +35,6 @@ import java.util.Arrays; import java.util.List; import java.util.Random;
-import com.datastax.driver.core.Session; - import org.powermock.api.mockito.PowerMockito; import org.powermock.core.classloader.annotations.PrepareForTest; import org.powermock.modules.testng.PowerMockObjectFactory; @@ -71,9 +69,9 @@ public class MetricsBaselineCalculatorTest {
//tell the method story as it happens: mock dependencies and configure //those dependencies to get the method under test to completion. - Session mockSession = mock(Session.class); + StorageSession mockSession = mock(StorageSession.class); MetricsDAO mockMetricsDAO = mock(MetricsDAO.class); - PowerMockito.whenNew(MetricsDAO.class).withParameterTypes(Session.class, MetricsConfiguration.class) + PowerMockito.whenNew(MetricsDAO.class).withParameterTypes(StorageSession.class, MetricsConfiguration.class) .withArguments(eq(mockSession), eq(metricsConfiguration)).thenReturn(mockMetricsDAO);
when(mockMetricsDAO.findAggregatedSimpleOneHourMetric(eq(1), eq(0), @@ -147,9 +145,9 @@ public class MetricsBaselineCalculatorTest {
//tell the method story as it happens: mock dependencies and configure //those dependencies to get the method under test to completion. - Session mockSession = mock(Session.class); + StorageSession mockSession = mock(StorageSession.class); MetricsDAO mockMetricsDAO = mock(MetricsDAO.class); - PowerMockito.whenNew(MetricsDAO.class).withParameterTypes(Session.class, MetricsConfiguration.class) + PowerMockito.whenNew(MetricsDAO.class).withParameterTypes(StorageSession.class, MetricsConfiguration.class) .withArguments(eq(mockSession), eq(metricsConfiguration)).thenReturn(mockMetricsDAO);
when( @@ -214,9 +212,9 @@ public class MetricsBaselineCalculatorTest {
//tell the method story as it happens: mock dependencies and configure //those dependencies to get the method under test to completion. - Session mockSession = mock(Session.class); + StorageSession mockSession = mock(StorageSession.class); MetricsDAO mockMetricsDAO = mock(MetricsDAO.class); - PowerMockito.whenNew(MetricsDAO.class).withParameterTypes(Session.class, MetricsConfiguration.class) + PowerMockito.whenNew(MetricsDAO.class).withParameterTypes(StorageSession.class, MetricsConfiguration.class) .withArguments(eq(mockSession), eq(metricsConfiguration)).thenReturn(mockMetricsDAO);
when( diff --git a/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsDAOTest.java b/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsDAOTest.java index ee65292..b06dfda 100644 --- a/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsDAOTest.java +++ b/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsDAOTest.java @@ -38,7 +38,6 @@ import java.util.Map; import java.util.Random; import java.util.Set;
-import com.datastax.driver.core.ResultSetFuture; import com.google.common.collect.Lists; import com.google.common.util.concurrent.Futures;
@@ -78,7 +77,7 @@ public class MetricsDAOTest extends CassandraIntegrationTest {
@BeforeClass public void initDAO() throws Exception { - dao = new MetricsDAO(session, new MetricsConfiguration()); + dao = new MetricsDAO(storageSession, new MetricsConfiguration()); }
@BeforeMethod @@ -101,7 +100,7 @@ public class MetricsDAOTest extends CassandraIntegrationTest {
WaitForWrite waitForResults = new WaitForWrite(1);
- ResultSetFuture resultSetFuture = dao.insertRawData(expected); + StorageResultSetFuture resultSetFuture = dao.insertRawData(expected); Futures.addCallback(resultSetFuture, waitForResults); waitForResults.await("Failed to insert raw data");
@@ -131,7 +130,7 @@ public class MetricsDAOTest extends CassandraIntegrationTest { WaitForWrite waitForWrite = new WaitForWrite(data.size());
for (MeasurementDataNumeric raw : data) { - ResultSetFuture resultSetFuture = dao.insertRawData(raw); + StorageResultSetFuture resultSetFuture = dao.insertRawData(raw); Futures.addCallback(resultSetFuture, waitForWrite); } waitForWrite.await("Failed to insert raw data"); @@ -160,14 +159,14 @@ public class MetricsDAOTest extends CassandraIntegrationTest { WaitForWrite waitForWrite = new WaitForWrite(data.size());
for (MeasurementDataNumeric raw : data) { - ResultSetFuture resultSetFuture = dao.insertRawData(raw); + StorageResultSetFuture resultSetFuture = dao.insertRawData(raw); Futures.addCallback(resultSetFuture, waitForWrite); } waitForWrite.await("Failed to insert raw data");
RawNumericMetricMapper mapper = new RawNumericMetricMapper(); WaitForRead<RawNumericMetric> waitForRead = new WaitForRead<RawNumericMetric>(mapper); - ResultSetFuture resultSetFuture = dao.findRawMetricsAsync(scheduleId, + StorageResultSetFuture resultSetFuture = dao.findRawMetricsAsync(scheduleId, threeMinutesAgo.minusSeconds(5).getMillis(), oneMinuteAgo.plusSeconds(5).getMillis()); Futures.addCallback(resultSetFuture, waitForRead);
@@ -200,7 +199,7 @@ public class MetricsDAOTest extends CassandraIntegrationTest { WaitForWrite waitForWrite = new WaitForWrite(data.size());
for (MeasurementDataNumeric raw : data) { - ResultSetFuture resultSetFuture = dao.insertRawData(raw); + StorageResultSetFuture resultSetFuture = dao.insertRawData(raw); Futures.addCallback(resultSetFuture, waitForWrite); } waitForWrite.await("Failed to insert raw data"); @@ -321,9 +320,9 @@ public class MetricsDAOTest extends CassandraIntegrationTest {
WaitForWrite waitForWrite = new WaitForWrite(2);
- ResultSetFuture resultSetFuture1 = dao.updateMetricsIndex(MetricsTable.TWENTY_FOUR_HOUR, scheduleId1, + StorageResultSetFuture resultSetFuture1 = dao.updateMetricsIndex(MetricsTable.TWENTY_FOUR_HOUR, scheduleId1, hour0().getMillis()); - ResultSetFuture resultSetFuture2 = dao.updateMetricsIndex(MetricsTable.TWENTY_FOUR_HOUR, scheduleId2, + StorageResultSetFuture resultSetFuture2 = dao.updateMetricsIndex(MetricsTable.TWENTY_FOUR_HOUR, scheduleId2, hour0().getMillis());
Futures.addCallback(resultSetFuture1, waitForWrite); diff --git a/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsServerTest.java b/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsServerTest.java index 1ec4771..b4de4e0 100644 --- a/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsServerTest.java +++ b/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsServerTest.java @@ -43,7 +43,6 @@ import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicInteger;
-import com.datastax.driver.core.ResultSetFuture; import com.google.common.base.Throwables; import com.google.common.collect.Lists; import com.google.common.util.concurrent.Futures; @@ -113,7 +112,7 @@ public class MetricsServerTest extends CassandraIntegrationTest { dateTimeService.setConfiguration(configuration); metricsServer.setDateTimeService(dateTimeService);
- dao = new MetricsDAO(session, configuration); + dao = new MetricsDAO(storageSession, configuration); metricsServer.setDAO(dao);
purgeDB(); @@ -230,7 +229,7 @@ public class MetricsServerTest extends CassandraIntegrationTest { WaitForWrite waitForRawInserts = new WaitForWrite(rawMetrics.size());
for (MeasurementDataNumeric raw : rawMetrics) { - ResultSetFuture resultSetFuture = dao.insertRawData(raw); + StorageResultSetFuture resultSetFuture = dao.insertRawData(raw); Futures.addCallback(resultSetFuture, waitForRawInserts); } waitForRawInserts.await("Failed to insert raw data"); @@ -1008,7 +1007,7 @@ public class MetricsServerTest extends CassandraIntegrationTest { "SELECT schedule_id, time, value, ttl(value), writetime(value) " + "FROM " + MetricsTable.RAW + " " + "WHERE schedule_id = " + scheduleId + " AND time >= " + startTime + " AND time < " + endTime; - return new SimplePagedResult<RawNumericMetric>(cql, new RawNumericMetricMapper(true), session); + return new SimplePagedResult<RawNumericMetric>(cql, new RawNumericMetricMapper(true), storageSession); }
private static class WaitForRawInserts implements RawDataInsertedCallback { diff --git a/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/TimeoutTest.java b/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/TimeoutTest.java index e2b23ec..dabd110 100644 --- a/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/TimeoutTest.java +++ b/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/TimeoutTest.java @@ -35,7 +35,6 @@ import com.google.common.base.Throwables;
import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.testng.annotations.Test;
import org.rhq.core.domain.measurement.MeasurementDataNumeric;
@@ -43,7 +42,7 @@ public class TimeoutTest extends CassandraIntegrationTest {
private final Log log = LogFactory.getLog(TimeoutTest.class);
- @Test +// @Test public void generateTimeout() throws Exception { MetricsConfiguration configuration = new MetricsConfiguration();
@@ -54,7 +53,7 @@ public class TimeoutTest extends CassandraIntegrationTest { dateTimeService.setConfiguration(configuration); metricsServer.setDateTimeService(dateTimeService);
- MetricsDAO dao = new MetricsDAO(session, configuration); + MetricsDAO dao = new MetricsDAO(new StorageSession(session), configuration); metricsServer.setDAO(dao);
long time = hour0().getMillis(); diff --git a/modules/enterprise/server/server-metrics/src/test/resources/log4j.properties b/modules/enterprise/server/server-metrics/src/test/resources/log4j.properties index 67db049..59bd5db 100644 --- a/modules/enterprise/server/server-metrics/src/test/resources/log4j.properties +++ b/modules/enterprise/server/server-metrics/src/test/resources/log4j.properties @@ -39,4 +39,4 @@ log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout log4j.appender.CONSOLE.layout.ConversionPattern=%d{ABSOLUTE} %-5p %c %m%n
log4j.logger.org.rhq=DEBUG -log4j.logger.com.datastax=DEBUG +log4j.logger.com.datastax=WARN diff --git a/modules/helpers/metrics-simulator/src/main/java/org/rhq/metrics/simulator/Simulator.java b/modules/helpers/metrics-simulator/src/main/java/org/rhq/metrics/simulator/Simulator.java index 7ff8f6c..40e00bf 100644 --- a/modules/helpers/metrics-simulator/src/main/java/org/rhq/metrics/simulator/Simulator.java +++ b/modules/helpers/metrics-simulator/src/main/java/org/rhq/metrics/simulator/Simulator.java @@ -60,6 +60,7 @@ import org.rhq.metrics.simulator.stats.Stats; import org.rhq.server.metrics.DateTimeService; import org.rhq.server.metrics.MetricsDAO; import org.rhq.server.metrics.MetricsServer; +import org.rhq.server.metrics.StorageSession;
/** * @author John Sanda @@ -96,7 +97,9 @@ public class Simulator implements ShutdownManager { session = createSession(nodes, compression); }
- MetricsDAO metricsDAO = new MetricsDAO(session, plan.getMetricsServerConfiguration()); + StorageSession storageSession = new StorageSession(session); + + MetricsDAO metricsDAO = new MetricsDAO(storageSession, plan.getMetricsServerConfiguration()); MetricsServer metricsServer = new MetricsServer(); metricsServer.setDAO(metricsDAO); metricsServer.setConfiguration(plan.getMetricsServerConfiguration());
commit 6c62c7c89a1707660e86fa97320420c2ef817339 Author: John Sanda jsanda@redhat.com Date: Thu Jul 25 22:06:31 2013 -0400
upgrade to version 1.0.2 of datastax driver
diff --git a/pom.xml b/pom.xml index f909033..3f2adbd 100644 --- a/pom.xml +++ b/pom.xml @@ -178,7 +178,7 @@ <!-- cassandra dependency versions --> <cassandra.version>1.2.4</cassandra.version> <cassandra.thrift.version>0.7.0</cassandra.thrift.version> - <cassandra.driver.version>1.0.0-rhq-1.2.4</cassandra.driver.version> + <cassandra.driver.version>1.0.2-rhq-1.2.4</cassandra.driver.version> <cassandra.driver.netty.version>3.6.3.Final</cassandra.driver.netty.version> <cassandra.snappy.version>1.0.4.1-rhq-p1</cassandra.snappy.version> <cassandra.snakeyaml.version>1.6</cassandra.snakeyaml.version>
commit b9029136b21154a2572c1794807de2ca9a30e2a2 Author: Stefan Negrea snegrea@redhat.com Date: Thu Jul 25 17:52:00 2013 -0500
Operations are now created in a new transaction. Also, change the way the configuration is updated.
The steps for a full configuration update: 1) Invoke config update operation on the plugin 2) Update the JMX port on the storage node entity 3) Restart the storage node server 4) Update the connection settings for the storage node if JMX port changed
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index 7a7eda4..7d861f1 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -35,6 +35,8 @@ import java.util.Queue;
import javax.ejb.EJB; import javax.ejb.Stateless; +import javax.ejb.TransactionAttribute; +import javax.ejb.TransactionAttributeType; import javax.persistence.EntityManager; import javax.persistence.PersistenceContext; import javax.persistence.TypedQuery; @@ -134,6 +136,9 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN @EJB private ConfigurationManagerLocal configurationManager;
+ @EJB + private StorageNodeManagerLocal storageNodeManger; + @Override public void linkResource(Resource resource) { List<StorageNode> storageNodes = this.getStorageNodes(); @@ -658,31 +663,49 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN parameters);
if (result) { + //2. Update the JMX port + //this is a fast operation compared to the restart + storageNode.setJmxPort(storageNodeConfiguration.getJmxPort()); + entityManager.merge(storageNode); + + //3. Restart the storage node + result = runOperationAndWaitForResult(subject, storageNodeResource, RESTART_OPERATION, + new Configuration()); + + //4. Update the plugin configuration to talk with the new server + //Up to this point communication with the storage node should not have been affected by the intermediate + //changes Configuration storageNodePluginConfig = configurationManager.getPluginConfiguration(subject, storageNodeResource.getId());
String existingJMXPort = storageNodePluginConfig.getSimpleValue("jmxPort"); - storageNodePluginConfig.setSimpleValue("jmxPort", storageNodeConfiguration.getJmxPort() + ""); + String newJMXPort = storageNodeConfiguration.getJmxPort() + "";
- String existingConnectionURL = storageNodePluginConfig.getSimpleValue("connectorAddress"); - String newConnectionURL = existingConnectionURL.replace(":" + existingJMXPort + "/", ":" - + storageNodeConfiguration.getJmxPort() + "/"); - storageNodePluginConfig.setSimpleValue("connectorAddress", newConnectionURL); + if (!existingJMXPort.equals(newJMXPort)) { + storageNodePluginConfig.setSimpleValue("jmxPort", newJMXPort);
- configurationManager.updatePluginConfiguration(subject, storageNodeResource.getId(), - storageNodePluginConfig); + String existingConnectionURL = storageNodePluginConfig.getSimpleValue("connectorAddress"); + String newConnectionURL = existingConnectionURL.replace(":" + existingJMXPort + "/", ":" + + storageNodeConfiguration.getJmxPort() + "/"); + storageNodePluginConfig.setSimpleValue("connectorAddress", newConnectionURL);
- storageNode.setJmxPort(storageNodeConfiguration.getJmxPort()); - entityManager.merge(storageNode); - entityManager.flush(); + configurationManager.updatePluginConfiguration(subject, storageNodeResource.getId(), + storageNodePluginConfig); + }
- return runOperationAndWaitForResult(subject, storageNodeResource, RESTART_OPERATION, null); + return result; } }
return false; }
+ @Override + @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) + public void scheduleOperationInNewTransaction(Subject subject, ResourceOperationSchedule schedule) { + operationManager.scheduleResourceOperation(subject, schedule); + } + private boolean runOperationAndWaitForResult(Subject subject, Resource storageNodeResource, String operationToRun, Configuration parameters) {
@@ -696,8 +719,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN newSchedule.setDescription("Run by StorageNodeManagerBean"); newSchedule.setParameters(parameters);
- operationManager.scheduleResourceOperation(subject, newSchedule); - entityManager.flush(); + storageNodeManger.scheduleOperationInNewTransaction(subject, newSchedule);
//waiting for the operation result then return it int iteration = 0; diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java index 69b16c4..15fa85c 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java @@ -28,6 +28,7 @@ import org.rhq.core.domain.cloud.StorageNode; import org.rhq.core.domain.cloud.StorageNodeConfigurationComposite; import org.rhq.core.domain.cloud.StorageNodeLoadComposite; import org.rhq.core.domain.criteria.StorageNodeCriteria; +import org.rhq.core.domain.operation.bean.ResourceOperationSchedule; import org.rhq.core.domain.resource.Resource; import org.rhq.core.domain.resource.group.ResourceGroup; import org.rhq.core.domain.util.PageList; @@ -178,4 +179,6 @@ public interface StorageNodeManagerLocal { */ ResourceGroup getStorageNodeGroup();
+ void scheduleOperationInNewTransaction(Subject subject, ResourceOperationSchedule schedule); + } diff --git a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml index a42040d..f5a4f6d 100644 --- a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml +++ b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml @@ -132,10 +132,10 @@
<operation name="updateConfiguration" description="Updates the node configuration. Will require a separate server restart for the settings to take effect."> <parameters> - <c:simple-property name="jmxPort" type="integer" description="JMX port JVM option."/> - <c:simple-property name="heapSize" type="string" description="The heap size to be used for both -Xms and -Xmx JVM options."/> - <c:simple-property name="heapNewSize" type="string" description="The heap new size to be used be used with -Xmn JVM option."/> - <c:simple-property name="threadStackSize" type="integer" description="The thread stack size. This memory is allocated to each thread off heap. The + <c:simple-property name="jmxPort" type="integer" required="false" description="JMX port JVM option."/> + <c:simple-property name="heapSize" type="string" required="false" description="The heap size to be used for both -Xms and -Xmx JVM options."/> + <c:simple-property name="heapNewSize" type="string" required="false" description="The heap new size to be used be used with -Xmn JVM option."/> + <c:simple-property name="threadStackSize" type="integer" required="false" description="The thread stack size. This memory is allocated to each thread off heap. The value should be an integer that will be interpreted in kilobytes."/> <c:simple-property name="restartIfRequired" type="boolean" required="true" default="false" description="If [true] then restart the server at the end of the update if and if only the updates made require a restart. If [false] the server will not be restarted regardless of the updates made."/> </parameters>
commit 97003022a49e9e534fd54488966eb89950760c39 Author: Mike Thompson mithomps@redhat.com Date: Thu Jul 25 14:16:38 2013 -0700
[BZ 988574] - Consolidated Metrics Screen - UXD Redesign
diff --git a/modules/enterprise/gui/coregui/pom.xml b/modules/enterprise/gui/coregui/pom.xml index 969ab8e..353f4d9 100644 --- a/modules/enterprise/gui/coregui/pom.xml +++ b/modules/enterprise/gui/coregui/pom.xml @@ -19,7 +19,7 @@
<properties> <!-- dependency versions --> - <gwt.version>2.5.0</gwt.version> + <gwt.version>${gwt.version}</gwt.version> <smartgwt.version>3.0</smartgwt.version>
<!-- If this is too much memory to allocate to your gwt:debug process then override this property in @@ -136,16 +136,7 @@ </dependency>
- <!-- the GWT graphing library (note, this provides jquery 1.3.2. If we get rid of GFlot we will need - to provide jquery explcitly for jquery.sparkline support. See CoreGUI.gwt.xml for the jquery.sparkline - declaration and coregui/webapp/js for the lib inclusion.) --> - <!-- NOTE: soon to be deprecated by d3.js --> - <dependency> - <groupId>com.googlecode.gflot</groupId> - <artifactId>gflot</artifactId> - <version>2.4.3</version> - <scope>provided</scope> - </dependency> +
<!-- for file uploads --> <dependency> @@ -245,7 +236,7 @@ <plugin> <groupId>org.codehaus.mojo</groupId> <artifactId>gwt-maven-plugin</artifactId> - <version>2.5.0</version> + <version>${gwt.version}</version> <configuration> <noServer>true</noServer> <inplace>false</inplace> diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/IconEnum.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/IconEnum.java index d11467a..b48a712 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/IconEnum.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/IconEnum.java @@ -114,6 +114,12 @@ public enum IconEnum { STORAGE_NODE("global/StorageNode_16.png", "global/StorageNode_24.png"),
///////////////////////////// + // General + ///////////////////////////// + EXPANDED_ICON("[SKIN]/ListGrid/row_expanded.png"), + COLLAPSED_ICON("[SKIN]/ListGrid/row_collapsed.png"), + + ///////////////////////////// // Resource Specific Tabs ///////////////////////////// CALLTIME("global/Recent_16.png", "global/Recent_24.png"); diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/table/Table.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/table/Table.java index 4cfe199..4ce05d2 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/table/Table.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/components/table/Table.java @@ -1202,16 +1202,15 @@ public class Table<DS extends RPCDataSource> extends EnhancedHLayout implements }
if (hiddenItem != null) { - Log.debug("Found hidden items"); // Add the hidden item if it exists FormItem[] tmpItems = new FormItem[items.length + 1]; System.arraycopy(items, 0, tmpItems, 0, items.length); tmpItems[items.length] = hiddenItem; items = tmpItems; } - for (FormItem item : items) { - Log.debug(" ******** Form Items sent: " + item.getName() + ": " + item.getValue()); - } +// for (FormItem item : items) { +// Log.debug(" ******** Form Items sent: " + item.getName() + ": " + item.getValue()); +// }
super.setItems(items); } @@ -1223,19 +1222,16 @@ public class Table<DS extends RPCDataSource> extends EnhancedHLayout implements @Override public void onKeyPress(KeyPressEvent event) { if (event.getKeyName().equals("Enter")) { - Log.debug("Table.TableFilter Pressed Enter key");
if (null != searchBarItem) { if (searchBarItem.getSearchBar().isFilterEnabled()) { TextItem searchTextItem = searchBarItem.getSearchBar().getSearchTextItem(); String searchBarValue = searchTextItem.getValueAsString(); String hiddenValue = (String) hiddenItem.getValue(); - Log.debug("Table.TableFilter searchBarValue :" + searchBarValue + ", hiddenValue" + hiddenValue);
// Only send a fetch request if the user actually changed the search expression. if (!equals(searchBarValue, hiddenValue)) { hiddenItem.setValue(searchBarValue); - Log.debug("Table.TableFilter fetchFilteredTableData"); fetchFilteredTableData(); } } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/AbstractMetricGraph.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/AbstractMetricGraph.java index e75b65a..ace6154 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/AbstractMetricGraph.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/AbstractMetricGraph.java @@ -202,6 +202,10 @@ public abstract class AbstractMetricGraph extends VLayout implements HasD3Metric return metricGraphData.getPortalId(); }
+ public boolean isHideLegend(){ + return metricGraphData.isHideLegend(); + } + public void setGraphListView(AbstractD3GraphListView graphListView) { this.graphListView = graphListView; } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/ButtonBarDateTimeRangeEditor.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/ButtonBarDateTimeRangeEditor.java index 2075783..b4f1354 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/ButtonBarDateTimeRangeEditor.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/ButtonBarDateTimeRangeEditor.java @@ -77,7 +77,7 @@ public class ButtonBarDateTimeRangeEditor extends EnhancedVLayout { prefs = measurementUserPreferences.getMetricRangePreferences(); Log.debug("ButtonBarDateTimeRangeEditor initialized with start Date: " + new Date(prefs.begin) + " end Date: " + new Date(prefs.end)); - createButtons(); + //createButtons();
}
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/MetricGraphData.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/MetricGraphData.java index ecd3b46..5ae392f 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/MetricGraphData.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/MetricGraphData.java @@ -73,6 +73,7 @@ public class MetricGraphData implements JsonMetricProducer { private MeasurementOOBComposite lastOOB; private Integer chartHeight; private boolean isPortalGraph; + private boolean hideLegend;
private MetricGraphData(int portalId) { @@ -249,6 +250,14 @@ public class MetricGraphData implements JsonMetricProducer { return isPortalGraph; }
+ public boolean isHideLegend() { + return hideLegend; + } + + public void setHideLegend(boolean hideLegend) { + this.hideLegend = hideLegend; + } + public String getChartTitle() {
if(definition != null){ @@ -380,8 +389,8 @@ public class MetricGraphData implements JsonMetricProducer { * @see StackedBarMetricGraphImpl */ public boolean showBarAvgTrendLine() { + int numberOfAggBars = 0; for (MeasurementDataNumericHighLowComposite measurement : metricData) { - int numberOfAggBars = 0; boolean noValuesInCurrentBarUndefined = (!Double.isNaN(measurement.getValue()) && !Double.isNaN(measurement.getHighValue()) && !Double.isNaN(measurement.getLowValue())); boolean foundAggregateBar = (measurement.getValue() != measurement.getHighValue() || measurement.getHighValue() != measurement.getLowValue()); // if there exists a even one aggregate bar then I can short circuit this and exit diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityLineGraphType.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityLineGraphType.java deleted file mode 100644 index 34ca60b..0000000 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityLineGraphType.java +++ /dev/null @@ -1,311 +0,0 @@ -/* - * RHQ Management Platform - * Copyright (C) 2005-2012 Red Hat, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ -package org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype; - -import java.util.Date; -import java.util.List; - -import org.rhq.core.domain.measurement.Availability; -import org.rhq.core.domain.measurement.MeasurementUnits; -import org.rhq.core.domain.resource.group.composite.ResourceGroupAvailability; -import org.rhq.core.domain.resource.group.composite.ResourceGroupComposite; -import org.rhq.enterprise.gui.coregui.client.CoreGUI; -import org.rhq.enterprise.gui.coregui.client.Messages; -import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.AvailabilityGraphType; -import org.rhq.enterprise.gui.coregui.client.util.Log; -import org.rhq.enterprise.gui.coregui.client.util.MeasurementConverterClient; - -/** - * This is now old and for demonstration purposes only. - * Contains the javascript chart definition for an implementation of the d3 availability chart. This implementation is - * just a line that changes color based on availability type: up=green, down=red, orange=disabled, unknown=grey, - * empty=grey, warn=yellow. This version of the availability graph shows continuous intervals. - * @deprecated - * @see AvailabilityOverUnderGraphType - * - * @author Mike Thompson - */ -public class AvailabilityLineGraphType implements AvailabilityGraphType { - - private static Messages MSG = CoreGUI.getMessages(); - private List<Availability> availabilityList; - private List<ResourceGroupAvailability> groupAvailabilityList; - private Integer entityId; - - /** - * General constructor for stacked bar graph when you have all the data needed to produce the graph. (This is true - * for all cases but the dashboard portlet). - */ - public AvailabilityLineGraphType(Integer entityId) { - this.entityId = entityId; - } - - public void setAvailabilityList(List<Availability> availabilityList) { - this.availabilityList = availabilityList; - } - - public void setGroupAvailabilityList(List<ResourceGroupAvailability> groupAvailabilityList) { - this.groupAvailabilityList = groupAvailabilityList; - } - - public String getAvailabilityJson() { - StringBuilder sb = new StringBuilder("["); - if (null != availabilityList) { - // loop through the avail intervals - for (Availability availability : availabilityList) { - sb.append("{ "availType":"" + availability.getAvailabilityType() + "", "); - sb.append(" "availTypeMessage":"" + availability.getAvailabilityType()+ "", "); - sb.append(" "availStart":" + availability.getStartTime() + ", "); - // last record will be null - long endTime = availability.getEndTime() != null ? availability.getEndTime() : (new Date()).getTime(); - sb.append(" "availEnd":" + endTime + ", "); - - long availDuration = endTime - availability.getStartTime(); - String availDurationString = MeasurementConverterClient.format((double) availDuration, - MeasurementUnits.MILLISECONDS, true); - sb.append(" "availDuration": "" + availDurationString + "" },"); - - } - sb.setLength(sb.length() - 1); - - } else if (null != groupAvailabilityList) { - // loop through the group avail down intervals - for (ResourceGroupAvailability groupAvailability : groupAvailabilityList) { - // allows substitution for situations like WARN=MIXED for easier terminology - String availabilityTypeMessage = (groupAvailability.getGroupAvailabilityType().equals(ResourceGroupComposite.GroupAvailabilityType.WARN)) - ? MSG.chart_hover_availability_type_warn() : groupAvailability.getGroupAvailabilityType().name(); - - sb.append("{ "availType":"" + groupAvailability.getGroupAvailabilityType() + "", "); - sb.append(" "availTypeMessage":"" + availabilityTypeMessage + "", "); - sb.append(" "availStart":" + groupAvailability.getStartTime() + ", "); - // last record will be null - long endTime = groupAvailability.getEndTime() != null ? groupAvailability.getEndTime() : (new Date()) - .getTime(); - sb.append(" "availEnd":" + endTime + ", "); - - long availDuration = endTime - groupAvailability.getStartTime(); - String availDurationString = MeasurementConverterClient.format((double) availDuration, - MeasurementUnits.MILLISECONDS, true); - sb.append(" "availDuration": "" + availDurationString + "" },"); - - } - sb.setLength(sb.length() - 1); - } - - sb.append("]"); - Log.debug(sb.toString()); - return sb.toString(); - } - - /** - * The magic JSNI to draw the charts with d3. - */ - public native void drawJsniChart() /*-{ - console.log("Draw Availability chart"); - - var global = this, - // tidy up all of our interactions with java (via JSNI) thru AvailChartContext class - // NOTE: rhq.js has the javascript object constructors in it. - availChartContext = new $wnd.AvailChartContext(global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.AvailabilityLineGraphType::getChartId()(), - global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.AvailabilityLineGraphType::getAvailabilityJson()(), - global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.AvailabilityLineGraphType::getChartDateLabel()(), - global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.AvailabilityLineGraphType::getChartTimeLabel()(), - global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.AvailabilityLineGraphType::getChartHoverStartLabel()(), - global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.AvailabilityLineGraphType::getChartHoverEndLabel()(), - global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.AvailabilityLineGraphType::getChartHoverBarLabel()(), - global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.AvailabilityLineGraphType::getChartHoverAvailabilityLabel()(), - global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.AvailabilityLineGraphType::getChartHoverTimeFormat()(), - global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.AvailabilityLineGraphType::getChartHoverDateFormat()() - ); - - - var availabilityGraph = function () { - "use strict"; - // privates - - var margin = {top: 5, right: 5, bottom: 5, left: 40}, - barOffset = 10, - width = 750 - margin.left - margin.right + barOffset, - height = 20 - margin.top - margin.bottom, - pixelsOffHeight = 0, - svg; - - - function drawBars(availChartContext) { - var xAxisMin = $wnd.d3.min(availChartContext.data, function (d) { - return +d.availStart; - }), - xAxisMax = $wnd.d3.max(availChartContext.data, function (d) { - return +d.availEnd; - }), - - timeScale = $wnd.d3.time.scale() - .range([0, width]) - .domain([xAxisMin, xAxisMax]), - - yScale = $wnd.d3.scale.linear() - .clamp(true) - .rangeRound([height, 0]) - .domain([0, 4]), - - svg = $wnd.d3.select(availChartContext.chartSelection).append("g") - .attr("width", width + margin.left + margin.right) - .attr("height", height + margin.top + margin.bottom) - .attr("transform", "translate(" + margin.left + "," + margin.top + ")"); - - - // The gray bars at the bottom leading up - svg.selectAll("rect.availBars") - .data(availChartContext.data) - .enter().append("rect") - .attr("class", "availBars") - .attr("x", function (d) { - return timeScale(+d.availStart); - }) - .attr("y", function (d) { - return yScale(0); - }) - .attr("height", function (d) { - return height - yScale(4) - pixelsOffHeight; - }) - .attr("width", function (d) { - return timeScale(+d.availEnd) - timeScale(+d.availStart); - }) - - .attr("opacity", ".9") - .attr("fill", function (d) { - if (d.availType === 'DOWN') { - return "#FF1919"; // red - } - else if (d.availType === 'DISABLED') { - return "#FF9933"; // orange - } - else if (d.availType === 'UNKNOWN') { - return "#CCC"; // gray - } - else if (d.availType === 'UP') { - return "#198C19"; // green - } - else if (d.availType === 'WARN') { - return "#FFFF00"; // yellow - } - else if (d.availType === 'EMPTY') { - return "#CCC"; // gray - } - else { - // should not ever happen, but... - console.warn("AvailabilityType not valid."); - return "#000"; //black - } - }); - } - - function createHovers() { - $wnd.jQuery('svg rect.availBars').tipsy({ - gravity: 'n', - html: true, - trigger: 'hover', - title: function () { - var d = this.__data__; - return formatHovers(d); - }, - show: function (e, el) { - el.css({ 'z-index': '990000'}) - } - }); - } - - function formatHovers(d) { - var hoverString, - timeFormatter = $wnd.d3.time.format(availChartContext.chartHoverTimeFormat), - dateFormatter = $wnd.d3.time.format(availChartContext.chartHoverDateFormat), - availStart = new Date(+d.availStart), - availEnd = new Date(+d.availEnd); - - hoverString = - '<div class="chartHoverEnclosingDiv">' + - '<div class="chartHoverAlignRight"><span >' + availChartContext.hoverBarAvailabilityLabel + ': </span><span style="width:50px;">' + d.availTypeMessage + '</span></div>' + - '<div class="chartHoverAlignRight"><span >' + availChartContext.hoverStartLabel + ': </span><span style="width:50px;">' + timeFormatter(availStart) + '</span></div>' + - '<div class="chartHoverAlignRight"><span >' + ' </span><span style="width:50px;">' + dateFormatter(availStart) + '</span></div>' + - '<div class="chartHoverAlignRight"><span >' + availChartContext.hoverEndLabel + ': </span><span style="width:50px;">' + timeFormatter(availEnd) + '</span></div>' + - '<div class="chartHoverAlignRight"><span >' + ' </span><span style="width:50px;">' + dateFormatter(availEnd) + '</span></div>' + - '<div class="chartHoverAlignRight"><span >' + availChartContext.hoverBarLabel + ': </span><span style="width:50px;">' + d.availDuration + '</span></div>' + - '</div>'; - return hoverString; - - } - - return { - // Public API - draw: function (chartContext) { - "use strict"; - console.info("AvailabilityChart"); - //console.time("availabilityChart"); - - drawBars(availChartContext); - createHovers(); - //console.timeEnd("availabilityChart"); - } - }; // end public closure - - - }(); - - if (availChartContext.data !== undefined && availChartContext.data.length > 0) { - availabilityGraph.draw(availChartContext); - } - - }-*/; - - public String getChartId() { - return String.valueOf(entityId); - } - - public String getChartTimeLabel() { - return MSG.chart_time_label(); - } - - public String getChartDateLabel() { - return MSG.chart_date_label(); - } - - public String getChartHoverAvailabilityLabel() { - return MSG.chart_hover_availability_label(); - } - - public String getChartHoverStartLabel() { - return MSG.chart_hover_start_label(); - } - - public String getChartHoverEndLabel() { - return MSG.chart_hover_end_label(); - } - - public String getChartHoverBarLabel() { - return MSG.chart_hover_bar_label(); - } - - public String getChartHoverTimeFormat() { - return MSG.chart_hover_time_format(); - } - - public String getChartHoverDateFormat() { - return MSG.chart_hover_date_format(); - } -} diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityOverUnderGraphType.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityOverUnderGraphType.java index 7c5f80a..811a579 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityOverUnderGraphType.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityOverUnderGraphType.java @@ -311,13 +311,6 @@ public class AvailabilityOverUnderGraphType implements AvailabilityGraphType { }); }
- function timeFormat(formats) { - return function(date) { - var i = formats.length - 1, f = formats[i]; - while (!f[1](date)) f = formats[--i]; - return f[0](date); - } - }
function formatHovers(d) { var timeFormatter = $wnd.d3.time.format(availChartContext.chartHoverTimeFormat), @@ -345,11 +338,13 @@ public class AvailabilityOverUnderGraphType implements AvailabilityGraphType {
}();
- if (availChartContext.data !== undefined && availChartContext.data.length > 0) { + console.log("Avail Data records: "+availChartContext.data.length); + if (typeof availChartContext.data !== 'undefined' && availChartContext.data.length > 0) { availabilityGraph.draw(availChartContext); + console.log("Availability Chart Drawn"); }
- }-*/; + }-*/;
public String getChartId() { return String.valueOf(entityId); diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/LineMetricGraph.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/LineMetricGraph.java deleted file mode 100644 index 079b2ab..0000000 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/LineMetricGraph.java +++ /dev/null @@ -1,158 +0,0 @@ -/* - * RHQ Management Platform - * Copyright (C) 2005-2012 Red Hat, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ -package org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype; - -import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.AbstractMetricGraph; -import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.MetricGraphData; - -/** - * Contains the javascript chart definition for a d3 Line graph chart. - * NOTE: this class isn't used just provided as an example as how to create - * other graph types. - * - * @author Mike Thompson - */ -public final class LineMetricGraph extends AbstractMetricGraph { - - /** - * General constructor for stacked bar graph when you have all the data needed to - * produce the graph. (This is true for all cases but the dashboard portlet). - */ - public LineMetricGraph(MetricGraphData metricGraphData) { - setMetricGraphData(metricGraphData); - } - - /** - * The magic JSNI to draw the charts with d3. - */ - public native void drawJsniChart() /*-{ - console.log("Draw Metric Line jsni chart"); - var global = this, - chartId = global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.MetricGraphData::getChartId()(), - chartHandle = "#rChart-"+chartId, - chartSelection = chartHandle + " svg", - json = $wnd.jQuery.parseJSON(global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.MetricGraphData::getJsonMetrics()()), - yAxisLabel = global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.MetricGraphData::getChartTitle()(), - yAxisUnits = global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.MetricGraphData::getYAxisUnits()(), - xAxisLabel = global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.MetricGraphData::getXAxisTitle()(); - - console.log("chart id: "+chartSelection ); - console.log(global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.MetricGraphData::getJsonMetrics()()); - - - function draw(data){ - "use strict"; - - var margin = {top: 10, right: 5, bottom: 30, left: 70}, - width = 400 - margin.left - margin.right, - height = 150 - margin.top - margin.bottom; - - var timeScale = $wnd.d3.time.scale() - .range([0, width]) - .domain($wnd.d3.extent(data, function(d) { return d.x; })); - - var yScale = $wnd.d3.scale.linear() - .rangeRound([height, 0]) - .domain([$wnd.d3.min(data.map(function(x) {return x.low;})), $wnd.d3.max(data.map(function(x){return x.high;}))]); - - var xAxis = $wnd.d3.svg.axis() - .scale(timeScale) - .ticks(5) - .orient("bottom"); - - var yAxis = $wnd.d3.svg.axis() - .scale(yScale) - .ticks(5) - .orient("left"); - - var interpolation = "basis"; - - var line = $wnd.d3.svg.line() - .interpolate(interpolation) - .x(function(d) { return timeScale(d.x); }) - .y(function(d) { return yScale(+d.y); }); - - var highLine = $wnd.d3.svg.line() - .interpolate(interpolation) - .x(function(d) { return timeScale(d.x); }) - .y(function(d) { return yScale(+d.high); }); - - var lowLine = $wnd.d3.svg.line() - .interpolate(interpolation) - .x(function(d) { return timeScale(d.x); }) - .y(function(d) { return yScale(+d.low); }); - - var svg = $wnd.d3.select(chartSelection).append("g") - .attr("width", width + margin.left + margin.right) - .attr("height", height + margin.top + margin.bottom) - .attr("transform", "translate(" + margin.left + "," + margin.top + ")"); - - svg.append("g") - .attr("class", "x axis") - .attr("transform", "translate(0," + height + ")") - .call(xAxis); - - - svg.append("g") - .attr("class", "y axis") - .call(yAxis) - .append("text") - .attr("transform", "rotate(-90)") - .attr("y", -60) - .attr("dy", ".71em") - .style("text-anchor", "end") - .text(yAxisUnits === "NONE" ? "" : yAxisUnits); - - console.log("finished axes"); - - svg.append("path") - .datum(data) - .attr("class", "line") - .attr("fill", "none") - .attr("stroke", "steelblue") - .attr("stroke-width", "2") - .attr("d", line); - - svg.append("path") - .datum(data) - .attr("class", "highLine") - .attr("fill", "none") - .attr("stroke", "red") - .attr("stroke-width", "1.5") - //.attr("stroke-dasharray", "20,10,5,5,5,10") - .attr("stroke-dasharray", "5,5") - .attr("stroke-opacity", ".3") - .attr("d", highLine); - - svg.append("path") - .datum(data) - .attr("class", "lowLine") - .attr("fill", "none") - .attr("stroke", "blue") - .attr("stroke-width", "1.5") - .attr("stroke-dasharray", "5,5") - .attr("stroke-opacity", ".3") - .attr("d", lowLine); - - console.log("finished paths"); - }(data); - - }-*/; - -} diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/StackedBarMetricGraphImpl.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/StackedBarMetricGraphImpl.java index 9058ea5..67552d3 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/StackedBarMetricGraphImpl.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/StackedBarMetricGraphImpl.java @@ -73,7 +73,8 @@ public class StackedBarMetricGraphImpl extends AbstractMetricGraph { global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.AbstractMetricGraph::getButtonBarDateTimeFormat()(), global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.AbstractMetricGraph::getChartSingleValueLabel()(), global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.AbstractMetricGraph::getXAxisTimeFormatHours()(), - global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.AbstractMetricGraph::getXAxisTimeFormatHoursMinutes()() + global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.AbstractMetricGraph::getXAxisTimeFormatHoursMinutes()(), + global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.AbstractMetricGraph::isHideLegend()() );
@@ -217,8 +218,8 @@ public class StackedBarMetricGraphImpl extends AbstractMetricGraph { .attr("transform", "translate(" + margin.left + "," + (+titleHeight + titleSpace + margin.top) + ")");
legendUnDefined = (typeof min === 'undefined') || (typeof avg === 'undefined') || (typeof peak === 'undefined'); - if (!useSmallCharts() && !legendUnDefined) { - createMinAvgPeakSidePanel(chartContext.minChartTitle, min, chartContext.avgChartTitle, avg, chartContext.peakChartTitle, peak, chartContext.yAxisUnits); + if (!(chartContext.hideLegend && !useSmallCharts())) { + createMinAvgPeakSidePanel(chartContext.minChartTitle, min, chartContext.avgChartTitle, avg, chartContext.peakChartTitle, peak, chartContext.yAxisUnits); } }
@@ -693,7 +694,7 @@ public class StackedBarMetricGraphImpl extends AbstractMetricGraph { }; // end public closure }();
- if(typeof chartContext.data !== 'undefined' && chartContext.data.length > 0){ + if(typeof chartContext.data !== 'undefined' && chartContext.data !== null && chartContext.data.length > 0){ metricStackedBarGraph.draw(chartContext); }
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/D3GroupGraphListView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/D3GroupGraphListView.java index 59737ac..a9789c6 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/D3GroupGraphListView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/D3GroupGraphListView.java @@ -21,7 +21,6 @@ package org.rhq.enterprise.gui.coregui.client.inventory.groups.detail; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; -import java.util.EnumSet; import java.util.List;
import com.google.gwt.core.client.GWT; @@ -35,19 +34,17 @@ import org.rhq.core.domain.measurement.DataType; import org.rhq.core.domain.measurement.DisplayType; import org.rhq.core.domain.measurement.MeasurementDefinition; import org.rhq.core.domain.measurement.composite.MeasurementDataNumericHighLowComposite; -import org.rhq.core.domain.resource.ResourceType; import org.rhq.core.domain.resource.group.ResourceGroup; import org.rhq.core.domain.resource.group.composite.ResourceGroupAvailability; import org.rhq.enterprise.gui.coregui.client.CoreGUI; import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; import org.rhq.enterprise.gui.coregui.client.inventory.AutoRefresh; import org.rhq.enterprise.gui.coregui.client.inventory.common.AbstractD3GraphListView; -import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.AvailabilityOverUnderGraphType; import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.MetricGraphData; +import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.AvailabilityOverUnderGraphType; import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.StackedBarMetricGraphImpl; import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.MetricD3Graph; import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.avail.AvailabilityD3GraphView; -import org.rhq.enterprise.gui.coregui.client.inventory.resource.type.ResourceTypeRepository; import org.rhq.enterprise.gui.coregui.client.util.Log; import org.rhq.enterprise.gui.coregui.client.util.async.CountDownLatch;
@@ -55,7 +52,7 @@ import org.rhq.enterprise.gui.coregui.client.util.async.CountDownLatch; * Build the Group version of the View that shows the individual graph views. * @author Mike Thompson */ -public final class D3GroupGraphListView extends AbstractD3GraphListView implements AutoRefresh{ +public final class D3GroupGraphListView extends AbstractD3GraphListView implements AutoRefresh {
private ResourceGroup resourceGroup; private VLayout graphsVLayout; @@ -75,7 +72,8 @@ public final class D3GroupGraphListView extends AbstractD3GraphListView implemen
addMember(buttonBarDateTimeRangeEditor); if (showAvailabilityGraph) { - availabilityGraph = new AvailabilityD3GraphView<AvailabilityOverUnderGraphType>(new AvailabilityOverUnderGraphType(resourceGroup.getId())); + availabilityGraph = new AvailabilityD3GraphView<AvailabilityOverUnderGraphType>( + new AvailabilityOverUnderGraphType(resourceGroup.getId())); addMember(availabilityGraph); } graphsVLayout = new VLayout(); @@ -89,7 +87,6 @@ public final class D3GroupGraphListView extends AbstractD3GraphListView implemen addMember(graphsVLayout); }
- public void redrawGraphs() { this.onDraw(); } @@ -100,68 +97,60 @@ public final class D3GroupGraphListView extends AbstractD3GraphListView implemen private void buildGraphs() {
queryAvailability(EntityContext.forGroup(resourceGroup), buttonBarDateTimeRangeEditor.getStartTime(), - buttonBarDateTimeRangeEditor.getEndTime(), null); + buttonBarDateTimeRangeEditor.getEndTime(), null);
- ResourceTypeRepository.Cache.getInstance().getResourceTypes(resourceGroup.getResourceType().getId(), - EnumSet.of(ResourceTypeRepository.MetadataType.measurements), - new ResourceTypeRepository.TypeLoadedCallback() { - public void onTypesLoaded(final ResourceType type) { + final ArrayList<MeasurementDefinition> measurementDefinitions = new ArrayList<MeasurementDefinition>();
- final ArrayList<MeasurementDefinition> measurementDefinitions = new ArrayList<MeasurementDefinition>(); + for (MeasurementDefinition def : resourceGroup.getResourceType().getMetricDefinitions()) { + if (def.getDataType() == DataType.MEASUREMENT && def.getDisplayType() == DisplayType.SUMMARY) { + measurementDefinitions.add(def); + } + }
- for (MeasurementDefinition def : type.getMetricDefinitions()) { - if (def.getDataType() == DataType.MEASUREMENT && def.getDisplayType() == DisplayType.SUMMARY) { - measurementDefinitions.add(def); - } - } + Collections.sort(measurementDefinitions, new Comparator<MeasurementDefinition>() { + public int compare(MeasurementDefinition o1, MeasurementDefinition o2) { + return new Integer(o1.getDisplayOrder()).compareTo(o2.getDisplayOrder()); + } + });
- Collections.sort(measurementDefinitions, new Comparator<MeasurementDefinition>() { - public int compare(MeasurementDefinition o1, MeasurementDefinition o2) { - return new Integer(o1.getDisplayOrder()).compareTo(o2.getDisplayOrder()); - } - }); + int[] measDefIdArray = new int[measurementDefinitions.size()]; + for (int i = 0; i < measDefIdArray.length; i++) { + measDefIdArray[i] = measurementDefinitions.get(i).getId(); + }
- int[] measDefIdArray = new int[measurementDefinitions.size()]; - for (int i = 0; i < measDefIdArray.length; i++) { - measDefIdArray[i] = measurementDefinitions.get(i).getId(); - } + GWTServiceLookup.getMeasurementDataService().findDataForCompatibleGroup(resourceGroup.getId(), measDefIdArray, + buttonBarDateTimeRangeEditor.getStartTime(), buttonBarDateTimeRangeEditor.getEndTime(), 60, + new AsyncCallback<List<List<MeasurementDataNumericHighLowComposite>>>() { + @Override + public void onFailure(Throwable caught) { + CoreGUI.getErrorHandler().handleError(MSG.view_resource_monitor_graphs_loadFailed(), caught); + loadingLabel.setContents(MSG.view_resource_monitor_graphs_loadFailed()); + }
- GWTServiceLookup.getMeasurementDataService().findDataForCompatibleGroup(resourceGroup.getId(), - measDefIdArray, buttonBarDateTimeRangeEditor.getStartTime(), buttonBarDateTimeRangeEditor.getEndTime(), 60, - new AsyncCallback<List<List<MeasurementDataNumericHighLowComposite>>>() { - @Override - public void onFailure(Throwable caught) { - CoreGUI.getErrorHandler().handleError(MSG.view_resource_monitor_graphs_loadFailed(), - caught); - loadingLabel.setContents(MSG.view_resource_monitor_graphs_loadFailed()); - } - - @Override - public void onSuccess(List<List<MeasurementDataNumericHighLowComposite>> result) { - if (result.isEmpty()) { - loadingLabel.setContents(MSG.view_resource_monitor_graphs_noneAvailable()); - } else { - loadingLabel.hide(); - int i = 0; - for (List<MeasurementDataNumericHighLowComposite> data : result) { - buildIndividualGraph(measurementDefinitions.get(i++), data); - } - // There is a weird timing case when availabilityGraph can be null - if (availabilityGraph != null) { - availabilityGraph.setGroupAvailabilityList(groupAvailabilityList); - new Timer(){ - @Override - public void run() { - availabilityGraph.drawJsniChart(); - } - }.schedule(150); - } + @Override + public void onSuccess(List<List<MeasurementDataNumericHighLowComposite>> result) { + if (result.isEmpty()) { + loadingLabel.setContents(MSG.view_resource_monitor_graphs_noneAvailable()); + } else { + loadingLabel.hide(); + int i = 0; + for (List<MeasurementDataNumericHighLowComposite> data : result) { + buildIndividualGraph(measurementDefinitions.get(i++), data); + } + // There is a weird timing case when availabilityGraph can be null + if (availabilityGraph != null) { + availabilityGraph.setGroupAvailabilityList(groupAvailabilityList); + new Timer() { + @Override + public void run() { + availabilityGraph.drawJsniChart(); } - } - }); - + }.schedule(150); + } + } } }); + }
protected void queryAvailability(final EntityContext groupContext, Long startTime, Long endTime, @@ -197,7 +186,7 @@ public final class D3GroupGraphListView extends AbstractD3GraphListView implemen List<MeasurementDataNumericHighLowComposite> data) {
MetricGraphData metricGraphData = MetricGraphData.createForResourceGroup(resourceGroup.getId(), - resourceGroup.getName(), measurementDefinition, data ); + resourceGroup.getName(), measurementDefinition, data);
StackedBarMetricGraphImpl graph = GWT.create(StackedBarMetricGraphImpl.class); graph.setMetricGraphData(metricGraphData); @@ -207,11 +196,9 @@ public final class D3GroupGraphListView extends AbstractD3GraphListView implemen graphView.setWidth("95%"); graphView.setHeight(MULTI_CHART_HEIGHT);
- if(graphsVLayout != null){ + if (graphsVLayout != null) { graphsVLayout.addMember(graphView); } }
- - } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupMultiLineGraphListView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupMultiLineGraphListView.java new file mode 100644 index 0000000..101df60 --- /dev/null +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupMultiLineGraphListView.java @@ -0,0 +1,79 @@ +/* + * RHQ Management Platform + * Copyright (C) 2005-2012 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +package org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table; + + +import org.rhq.enterprise.gui.coregui.client.CoreGUI; +import org.rhq.enterprise.gui.coregui.client.Messages; + +/** + * A MultiLine version of the Composite group single metric multiple resource charts. + * + * @author Mike Thompson + */ +public final class CompositeGroupMultiLineGraphListView extends CompositeGroupD3GraphListView +{ + private static final Messages MSG = CoreGUI.getMessages(); + + public CompositeGroupMultiLineGraphListView(int groupId, int defId, boolean isAutogroup) + { + super(groupId, defId, isAutogroup); + } + + + + @Override + public native void drawJsniChart() /*-{ + console.log("Draw nvd3 charts for composite multiline graph"); + var chartId = global.@org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table.CompositeGroupD3GraphListView::getChartId()(), + chartHandle = "#mChart-"+chartId, + chartSelection = chartHandle + " svg", + yAxisUnits = this.@org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table.CompositeGroupD3GraphListView::getYAxisUnits()(), + xAxisLabel = this.@org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table.CompositeGroupD3GraphListView::getXAxisTitle()(), + xAxisTimeFormat = this.@org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table.CompositeGroupMultiLineGraphListView::getXAxisTimeFormatHoursMinutes()(); + json = eval(this.@org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table.CompositeGroupD3GraphListView::getJsonMetrics()()); + + $wnd.nv.addGraph(function() { + var chart = $wnd.nv.models.lineChart(); + + chart.xAxis.axisLabel(xAxisLabel) + .tickFormat(function(d) { return $wnd.d3.time.format(xAxisTimeFormat)(new Date(d)) }); + + chart.yAxis + .axisLabel(yAxisUnits) + .tickFormat($wnd.d3.format('.02f')); + + $wnd.d3.select(chartSelection) + .datum(json) + .transition().duration(300) + .call(chart); + + $wnd.nv.utils.windowResize(chart.update); + + return chart; + }); + + }-*/; + + + + public String getXAxisTimeFormatHoursMinutes() { + return MSG.chart_xaxis_time_format_hours_minutes(); + } +} diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/DashboardLinkUtility.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/DashboardLinkUtility.java new file mode 100644 index 0000000..8ca1ac5 --- /dev/null +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/DashboardLinkUtility.java @@ -0,0 +1,258 @@ +/* + * RHQ Management Platform + * Copyright (C) 2005-2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +package org.rhq.enterprise.gui.coregui.client.inventory.resource.detail; + +import java.util.HashMap; +import java.util.Map; +import java.util.TreeSet; + +import com.google.gwt.http.client.Request; +import com.google.gwt.http.client.RequestBuilder; +import com.google.gwt.http.client.RequestCallback; +import com.google.gwt.http.client.RequestException; +import com.google.gwt.http.client.Response; +import com.google.gwt.user.client.History; +import com.google.gwt.user.client.rpc.AsyncCallback; +import com.smartgwt.client.widgets.menu.Menu; +import com.smartgwt.client.widgets.menu.MenuItem; +import com.smartgwt.client.widgets.menu.events.ClickHandler; +import com.smartgwt.client.widgets.menu.events.MenuItemClickEvent; + +import org.rhq.core.domain.auth.Subject; +import org.rhq.core.domain.configuration.PropertySimple; +import org.rhq.core.domain.criteria.DashboardCriteria; +import org.rhq.core.domain.criteria.SubjectCriteria; +import org.rhq.core.domain.dashboard.Dashboard; +import org.rhq.core.domain.dashboard.DashboardPortlet; +import org.rhq.core.domain.measurement.DataType; +import org.rhq.core.domain.measurement.MeasurementDefinition; +import org.rhq.core.domain.measurement.MeasurementSchedule; +import org.rhq.core.domain.resource.Resource; +import org.rhq.core.domain.resource.ResourceType; +import org.rhq.core.domain.util.PageList; +import org.rhq.enterprise.gui.coregui.client.CoreGUI; +import org.rhq.enterprise.gui.coregui.client.Messages; +import org.rhq.enterprise.gui.coregui.client.UserSessionManager; +import org.rhq.enterprise.gui.coregui.client.dashboard.portlets.inventory.resource.graph.ResourceD3GraphPortlet; +import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; +import org.rhq.enterprise.gui.coregui.client.util.Log; +import org.rhq.enterprise.gui.coregui.client.util.message.Message; +import org.rhq.enterprise.gui.coregui.client.util.preferences.MeasurementUserPreferences; +import org.rhq.enterprise.gui.coregui.client.util.preferences.UserPreferences; + +/** + * Utility Class to build menus for linking to the Dashboard. + * @author Jay Shaughnessy + * @author Greg Hinkle + * @author Mike Thompson + */ +public class DashboardLinkUtility { + final static Messages MSG = CoreGUI.getMessages(); + + private DashboardLinkUtility() { + } + + public static MenuItem buildMetricsMenu(final ResourceType resourceType, final Resource resource, String label) { + + MenuItem measurements = new MenuItem(label); + final Menu measurementsSubMenu = new Menu(); + + DashboardCriteria criteria = new DashboardCriteria(); + GWTServiceLookup.getDashboardService().findDashboardsByCriteria(criteria, + new AsyncCallback<PageList<Dashboard>>() { + + public void onFailure(Throwable caught) { + CoreGUI.getErrorHandler().handleError(MSG.view_tree_common_contextMenu_loadFailed_dashboard(), + caught); + } + + public void onSuccess(PageList<Dashboard> result) { + //sort the display items alphabetically + TreeSet<String> ordered = new TreeSet<String>(); + Map<String, MeasurementDefinition> definitionMap = new HashMap<String, MeasurementDefinition>(); + for (MeasurementDefinition m : resourceType.getMetricDefinitions()) { + ordered.add(m.getDisplayName()); + definitionMap.put(m.getDisplayName(), m); + } + + for (String displayName : ordered) { + final MeasurementDefinition def = definitionMap.get(displayName); + //only add menu items for Measurement + if (def.getDataType().equals(DataType.MEASUREMENT)) { + MenuItem defItem = new MenuItem(def.getDisplayName()); + measurementsSubMenu.addItem(defItem); + Menu defSubItem = new Menu(); + defItem.setSubmenu(defSubItem); + + for (final Dashboard d : result) { + MenuItem addToDBItem = new MenuItem(MSG + .view_tree_common_contextMenu_addChartToDashboard(d.getName())); + defSubItem.addItem(addToDBItem); + + addToDBItem.addClickHandler(new ClickHandler() { + + public void onClick(MenuItemClickEvent menuItemClickEvent) { + DashboardPortlet p = new DashboardPortlet(MSG + .view_tree_common_contextMenu_resourceGraph(), ResourceD3GraphPortlet.KEY, + 250); + p.getConfiguration() + .put( + new PropertySimple(ResourceD3GraphPortlet.CFG_RESOURCE_ID, resource + .getId())); + p.getConfiguration().put( + new PropertySimple(ResourceD3GraphPortlet.CFG_DEFINITION_ID, def.getId())); + + d.addPortlet(p); + + GWTServiceLookup.getDashboardService().storeDashboard(d, + new AsyncCallback<Dashboard>() { + + public void onFailure(Throwable caught) { + CoreGUI.getErrorHandler().handleError( + MSG.view_tree_common_contextMenu_saveChartToDashboardFailure(), + caught); + } + + public void onSuccess(Dashboard result) { + CoreGUI + .getMessageCenter() + .notify( + new Message( + MSG.view_tree_common_contextMenu_saveChartToDashboardSuccessful(result + .getName()), Message.Severity.Info)); + } + }); + + } + + }); + + } + + //add new menu item for adding current graphable element to view if on Monitor/Graphs tab + String currentViewPath = History.getToken(); + if (currentViewPath.contains("Monitoring/Metrics")) { + MenuItem addGraphItem = new MenuItem(MSG.common_title_add_graph_to_view()); + defSubItem.addItem(addGraphItem); + + addGraphItem.addClickHandler(new ClickHandler() { + public void onClick(MenuItemClickEvent menuItemClickEvent) { + //generate javascript to call out to. + //Ex. menuLayers.hide();addMetric('${metric.resourceId},${metric.scheduleId}') + if (getScheduleDefinitionId(resource, def.getName()) > -1) { + final String resourceGraphElements = resource.getId() + "," + + getScheduleDefinitionId(resource, def.getName()); + + //Once, the portal-war will be rewritten to GWT and operations performed + //within the iframe + JSF will update the user preferences, the following + //2 lines could be uncommented and the lines below them refactorized + //MeasurementUserPreferences measurementPreferences = new MeasurementUserPreferences(UserSessionManager.getUserPreferences()); + //String selectedView = measurementPreferences.getSelectedView(String.valueOf(resource.getId())); + + final int sid = UserSessionManager.getSessionSubject().getId(); + SubjectCriteria c = new SubjectCriteria(); + c.addFilterId(sid); + + GWTServiceLookup.getSubjectService().findSubjectsByCriteria(c, + new AsyncCallback<PageList<Subject>>() { + public void onSuccess(PageList<Subject> result) { + if (result.size() > 0) { + UserPreferences uPreferences = new UserPreferences(result + .get(0)); + MeasurementUserPreferences mPreferences = new MeasurementUserPreferences( + uPreferences); + String selectedView = mPreferences.getSelectedView(String + .valueOf(resource.getId())); + + addNewMetric(String.valueOf(resource.getId()), + selectedView, resourceGraphElements); + } else { + Log.warn("DashboardLinkUtility: Error obtaining subject with id:" + sid); + } + } + + public void onFailure(Throwable caught) { + Log.warn("DashboardLinkUtility: Error obtaining subject with id:" + sid, caught); + } + }); + } + } + }); + } + } + } + + } + }); + measurements.setSubmenu(measurementsSubMenu); + return measurements; + } + + /** Locate the specific schedule definition using the definition identifier. + */ + private static int getScheduleDefinitionId(Resource resource, String definitionName) { + int id = -1; + if (resource.getSchedules() != null) { + boolean located = false; + MeasurementSchedule[] schedules = new MeasurementSchedule[resource.getSchedules().size()]; + resource.getSchedules().toArray(schedules); + for (int i = 0; (!located && i < resource.getSchedules().size()); i++) { + MeasurementSchedule schedule = schedules[i]; + MeasurementDefinition definition = schedule.getDefinition(); + if ((definition != null) && definition.getName().equals(definitionName)) { + located = true; + id = schedule.getId(); + } + } + } + return id; + } + + private static void addNewMetric(String id, String selectedView, String resourceGraphElements) { + //construct portal.war url to access + String baseUrl = "/resource/common/monitor/visibility/IndicatorCharts.do"; + baseUrl += "?id=" + id; + baseUrl += "&view=" + selectedView; + baseUrl += "&action=addChart&metric=" + resourceGraphElements; + final String url = baseUrl; + //initiate HTTP request + final RequestBuilder b = new RequestBuilder(RequestBuilder.GET, baseUrl); + + try { + b.setCallback(new RequestCallback() { + public void onResponseReceived(final Request request, final Response response) { + Log.trace("Successfully submitted request to add graph to view:" + url); + + //kick off a page reload. + String currentViewPath = History.getToken(); + CoreGUI.goToView(currentViewPath, true); + } + + @Override + public void onError(Request request, Throwable t) { + Log.trace("Error adding Metric:" + url, t); + } + }); + b.send(); + } catch (RequestException e) { + Log.warn("Error adding Metric:" + url, e); + } + + } +} diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/ResourceDetailView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/ResourceDetailView.java index 2d33d66..ae51195 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/ResourceDetailView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/ResourceDetailView.java @@ -66,9 +66,8 @@ import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.inventory import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.inventory.ResourceAgentView; import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.CalltimeView; import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.D3GraphListView; -import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.avail.ResourceAvailabilityView; import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.schedules.ResourceSchedulesView; -import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.table.MeasurementTableView; +import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.table.MetricsResourceView; import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.traits.TraitsView; import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.operation.history.ResourceOperationHistoryListView; import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.operation.schedule.ResourceOperationScheduleListView; @@ -129,10 +128,8 @@ public class ResourceDetailView extends
private SubTab summaryActivity; private SubTab summaryTimeline; - private SubTab monitorGraphs; private SubTab monitorMetrics; private SubTab monitorTraits; - private SubTab monitorAvail; private SubTab monitorSched; private SubTab monitorCallTime; private SubTab inventoryChildren; @@ -196,16 +193,11 @@ public class ResourceDetailView extends monitoringTab = new TwoLevelTab(new ViewName("Monitoring", MSG.view_tabs_common_monitoring()), IconEnum.SUSPECT_METRICS);
- monitorGraphs = new SubTab(monitoringTab, new ViewName("Graphs", MSG.view_tabs_common_graphs()), null); - - monitorMetrics = new SubTab(monitoringTab, new ViewName("Metrics", "Metrics"), null); + monitorMetrics = new SubTab(monitoringTab, new ViewName("Metrics", MSG.view_tabs_common_metrics()), null); monitorTraits = new SubTab(monitoringTab, new ViewName("Traits", MSG.view_tabs_common_traits()), null); - monitorAvail = new SubTab(monitoringTab, new ViewName("Availability", MSG.view_tabs_common_availability()), - null); monitorSched = new SubTab(monitoringTab, new ViewName("Schedules", MSG.view_tabs_common_schedules()), null); monitorCallTime = new SubTab(monitoringTab, new ViewName("CallTime", MSG.view_tabs_common_calltime()), null); - monitoringTab.registerSubTabs(monitorGraphs, monitorMetrics, monitorTraits, monitorAvail, - monitorSched, monitorCallTime); + monitoringTab.registerSubTabs( monitorMetrics, monitorTraits, monitorSched, monitorCallTime); tabs.add(monitoringTab);
eventsTab = new TwoLevelTab(new ViewName("Events", MSG.view_tabs_common_events()), IconEnum.EVENTS); @@ -395,22 +387,14 @@ public class ResourceDetailView extends
boolean visibleToIE8 = !BrowserUtility.isBrowserPreIE9();
- viewFactory = (!visibleToIE8) ? null : new ViewFactory() { - @Override - public Canvas createView() { - return createD3GraphListView(); - } - }; - updateSubTab(this.monitoringTab, this.monitorGraphs, visible, visibleToIE8, viewFactory); - // visible = same test as above viewFactory = (!visible) ? null : new ViewFactory() { @Override public Canvas createView() { - return new MeasurementTableView(resource.getId()); + return new MetricsResourceView(resource); } }; - updateSubTab(this.monitoringTab, this.monitorMetrics, visible, true, viewFactory); + updateSubTab(this.monitoringTab, this.monitorMetrics, visible, visibleToIE8, viewFactory);
visible = hasMetricsOfType(this.resourceComposite, DataType.TRAIT); viewFactory = (!visible) ? null : new ViewFactory() { @@ -421,13 +405,6 @@ public class ResourceDetailView extends }; updateSubTab(this.monitoringTab, this.monitorTraits, visible, true, viewFactory);
- updateSubTab(this.monitoringTab, this.monitorAvail, true, true, new ViewFactory() { - @Override - public Canvas createView() { - return new ResourceAvailabilityView(resourceComposite); - } - }); - updateSubTab(this.monitoringTab, this.monitorSched, hasMetricsOfType(this.resourceComposite, null), true, new ViewFactory() { @Override diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/ResourceTreeView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/ResourceTreeView.java index 071f831..a3990bb 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/ResourceTreeView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/ResourceTreeView.java @@ -32,11 +32,6 @@ import java.util.Set; import java.util.TreeMap; import java.util.TreeSet;
-import com.google.gwt.http.client.Request; -import com.google.gwt.http.client.RequestBuilder; -import com.google.gwt.http.client.RequestCallback; -import com.google.gwt.http.client.RequestException; -import com.google.gwt.http.client.Response; import com.google.gwt.user.client.History; import com.google.gwt.user.client.rpc.AsyncCallback; import com.smartgwt.client.data.DSCallback; @@ -60,17 +55,8 @@ import com.smartgwt.client.widgets.tree.events.DataArrivedHandler; import com.smartgwt.client.widgets.tree.events.NodeContextClickEvent; import com.smartgwt.client.widgets.tree.events.NodeContextClickHandler;
-import org.rhq.core.domain.auth.Subject; -import org.rhq.core.domain.configuration.PropertySimple; -import org.rhq.core.domain.criteria.DashboardCriteria; import org.rhq.core.domain.criteria.ResourceCriteria; import org.rhq.core.domain.criteria.ResourceGroupCriteria; -import org.rhq.core.domain.criteria.SubjectCriteria; -import org.rhq.core.domain.dashboard.Dashboard; -import org.rhq.core.domain.dashboard.DashboardPortlet; -import org.rhq.core.domain.measurement.DataType; -import org.rhq.core.domain.measurement.MeasurementDefinition; -import org.rhq.core.domain.measurement.MeasurementSchedule; import org.rhq.core.domain.operation.OperationDefinition; import org.rhq.core.domain.resource.Resource; import org.rhq.core.domain.resource.ResourceType; @@ -82,11 +68,9 @@ import org.rhq.core.domain.util.PageList; import org.rhq.enterprise.gui.coregui.client.CoreGUI; import org.rhq.enterprise.gui.coregui.client.ImageManager; import org.rhq.enterprise.gui.coregui.client.LinkManager; -import org.rhq.enterprise.gui.coregui.client.UserSessionManager; import org.rhq.enterprise.gui.coregui.client.ViewId; import org.rhq.enterprise.gui.coregui.client.ViewPath; import org.rhq.enterprise.gui.coregui.client.components.tree.EnhancedTreeNode; -import org.rhq.enterprise.gui.coregui.client.dashboard.portlets.inventory.resource.graph.ResourceD3GraphPortlet; import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; import org.rhq.enterprise.gui.coregui.client.gwt.ResourceGWTServiceAsync; import org.rhq.enterprise.gui.coregui.client.gwt.ResourceGroupGWTServiceAsync; @@ -101,8 +85,6 @@ import org.rhq.enterprise.gui.coregui.client.inventory.resource.type.ResourceTyp import org.rhq.enterprise.gui.coregui.client.util.Log; import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedVLayout; import org.rhq.enterprise.gui.coregui.client.util.message.Message; -import org.rhq.enterprise.gui.coregui.client.util.preferences.MeasurementUserPreferences; -import org.rhq.enterprise.gui.coregui.client.util.preferences.UserPreferences;
/** * @author Jay Shaughnessy @@ -562,7 +544,7 @@ public class ResourceTreeView extends EnhancedVLayout { resourceContextMenu.addItem(operations);
// Metric graph addition menu - resourceContextMenu.addItem(buildMetricsMenu(resourceType, resource)); + resourceContextMenu.addItem(DashboardLinkUtility.buildMetricsMenu(resourceType, resource, MSG.view_tree_common_contextMenu_measurements()));
// Create Child Menu and Manual Import Menu final Set<ResourceType> creatableChildTypes = getCreatableChildTypes(resourceType); @@ -725,190 +707,6 @@ public class ResourceTreeView extends EnhancedVLayout { tree.reloadChildren(refreshNode); }
- private MenuItem buildMetricsMenu(final ResourceType type, final Resource resource) { - MenuItem measurements = new MenuItem(MSG.view_tree_common_contextMenu_measurements()); - final Menu measurementsSubMenu = new Menu(); - - DashboardCriteria criteria = new DashboardCriteria(); - GWTServiceLookup.getDashboardService().findDashboardsByCriteria(criteria, - new AsyncCallback<PageList<Dashboard>>() { - - public void onFailure(Throwable caught) { - CoreGUI.getErrorHandler().handleError(MSG.view_tree_common_contextMenu_loadFailed_dashboard(), - caught); - } - - public void onSuccess(PageList<Dashboard> result) { - //sort the display items alphabetically - TreeSet<String> ordered = new TreeSet<String>(); - Map<String, MeasurementDefinition> definitionMap = new HashMap<String, MeasurementDefinition>(); - for (MeasurementDefinition m : type.getMetricDefinitions()) { - ordered.add(m.getDisplayName()); - definitionMap.put(m.getDisplayName(), m); - } - - for (String displayName : ordered) { - final MeasurementDefinition def = definitionMap.get(displayName); - //only add menu items for Measurement - if (def.getDataType().equals(DataType.MEASUREMENT)) { - MenuItem defItem = new MenuItem(def.getDisplayName()); - measurementsSubMenu.addItem(defItem); - Menu defSubItem = new Menu(); - defItem.setSubmenu(defSubItem); - - for (final Dashboard d : result) { - MenuItem addToDBItem = new MenuItem(MSG - .view_tree_common_contextMenu_addChartToDashboard(d.getName())); - defSubItem.addItem(addToDBItem); - - addToDBItem.addClickHandler(new ClickHandler() { - - public void onClick(MenuItemClickEvent menuItemClickEvent) { - DashboardPortlet p = new DashboardPortlet(MSG - .view_tree_common_contextMenu_resourceGraph(), ResourceD3GraphPortlet.KEY, - 250); - p.getConfiguration().put( - new PropertySimple(ResourceD3GraphPortlet.CFG_RESOURCE_ID, resource.getId())); - p.getConfiguration().put( - new PropertySimple(ResourceD3GraphPortlet.CFG_DEFINITION_ID, def.getId())); - - d.addPortlet(p); - - GWTServiceLookup.getDashboardService().storeDashboard(d, - new AsyncCallback<Dashboard>() { - - public void onFailure(Throwable caught) { - CoreGUI.getErrorHandler().handleError( - MSG.view_tree_common_contextMenu_saveChartToDashboardFailure(), - caught); - } - - public void onSuccess(Dashboard result) { - CoreGUI - .getMessageCenter() - .notify( - new Message( - MSG.view_tree_common_contextMenu_saveChartToDashboardSuccessful(result - .getName()), Message.Severity.Info)); - } - }); - - } - - }); - - - }//end dashboard iteration - - //add new menu item for adding current graphable element to view if on Monitor/Graphs tab - String currentViewPath = History.getToken(); - if (currentViewPath.contains("Monitoring/NewGraphs")) { - MenuItem addGraphItem = new MenuItem(MSG.common_title_add_graph_to_view()); - defSubItem.addItem(addGraphItem); - - addGraphItem.addClickHandler(new ClickHandler() { - public void onClick(MenuItemClickEvent menuItemClickEvent) { - //generate javascript to call out to. - //Ex. menuLayers.hide();addMetric('${metric.resourceId},${metric.scheduleId}') - if (getScheduleDefinitionId(resource, def.getName()) > -1) { - final String resourceGraphElements = resource.getId() + "," - + getScheduleDefinitionId(resource, def.getName()); - - //Once, the portal-war will be rewritten to GWT and operations performed - //within the iframe + JSF will update the user preferences, the following - //2 lines could be uncommented and the lines below them refactorized - //MeasurementUserPreferences measurementPreferences = new MeasurementUserPreferences(UserSessionManager.getUserPreferences()); - //String selectedView = measurementPreferences.getSelectedView(String.valueOf(resource.getId())); - - final int sid = UserSessionManager.getSessionSubject().getId(); - SubjectCriteria c = new SubjectCriteria(); - c.addFilterId(sid); - - GWTServiceLookup.getSubjectService().findSubjectsByCriteria(c, - new AsyncCallback<PageList<Subject>>() { - public void onSuccess(PageList<Subject> result) { - if (result.size() > 0) { - UserPreferences uPreferences = new UserPreferences(result - .get(0)); - MeasurementUserPreferences mPreferences = new MeasurementUserPreferences( - uPreferences); - String selectedView = mPreferences.getSelectedView(String - .valueOf(resource.getId())); - - addNewMetric(String.valueOf(resource.getId()), - selectedView, resourceGraphElements); - } else { - Log.trace("Error obtaining subject with id:" + sid); - } - } - - public void onFailure(Throwable caught) { - Log.trace("Error obtaining subject with id:" + sid, caught); - } - }); - } - } - }); - } // end add the "add to view" menu item - }//end trait exclusion - }//end measurement def iteration - - } - }); - measurements.setSubmenu(measurementsSubMenu); - return measurements; - } - - private void addNewMetric(String id, String selectedView, String resourceGraphElements) { - //construct portal.war url to access - String baseUrl = "/resource/common/monitor/visibility/IndicatorCharts.do"; - baseUrl += "?id=" + id; - baseUrl += "&view=" + selectedView; - baseUrl += "&action=addChart&metric=" + resourceGraphElements; - final String url = baseUrl; - //initiate HTTP request - final RequestBuilder b = new RequestBuilder(RequestBuilder.GET, baseUrl); - - try { - b.setCallback(new RequestCallback() { - public void onResponseReceived(final Request request, final Response response) { - Log.trace("Successfully submitted request to add graph to view:" + url); - - //kick off a page reload. - String currentViewPath = History.getToken(); - CoreGUI.goToView(currentViewPath, true); - } - - @Override - public void onError(Request request, Throwable t) { - Log.trace("Error adding Metric:" + url, t); - } - }); - b.send(); - } catch (RequestException e) { - Log.trace("Error adding Metric:" + url, e); - } - } - - /** Locate the specific schedule definition using the definition identifier. - */ - private int getScheduleDefinitionId(Resource resource, String definitionName) { - int id = -1; - if (resource.getSchedules() != null) { - boolean located = false; - MeasurementSchedule[] schedules = new MeasurementSchedule[resource.getSchedules().size()]; - resource.getSchedules().toArray(schedules); - for (int i = 0; (!located && i < resource.getSchedules().size()); i++) { - MeasurementSchedule schedule = schedules[i]; - MeasurementDefinition definition = schedule.getDefinition(); - if ((definition != null) && definition.getName().equals(definitionName)) { - located = true; - id = schedule.getId(); - } - } - } - return id; - }
private void setRootResource(Resource rootResource) { this.rootResource = rootResource; diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/D3GraphListView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/D3GraphListView.java index 65893ce..8b9f327 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/D3GraphListView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/D3GraphListView.java @@ -1,6 +1,6 @@ /* * RHQ Management Platform - * Copyright (C) 2005-2012 Red Hat, Inc. + * Copyright (C) 2005-2013 Red Hat, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify @@ -21,7 +21,6 @@ package org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitori import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; -import java.util.EnumSet; import java.util.List; import java.util.Set; import java.util.TreeSet; @@ -30,7 +29,6 @@ import com.google.gwt.core.client.GWT; import com.google.gwt.user.client.Timer; import com.google.gwt.user.client.rpc.AsyncCallback; import com.smartgwt.client.types.Overflow; -import com.smartgwt.client.widgets.form.fields.events.ClickHandler; import com.smartgwt.client.widgets.layout.VLayout;
import org.rhq.core.domain.common.EntityContext; @@ -41,16 +39,14 @@ import org.rhq.core.domain.measurement.MeasurementDefinition; import org.rhq.core.domain.measurement.composite.MeasurementDataNumericHighLowComposite; import org.rhq.core.domain.measurement.composite.MeasurementOOBComposite; import org.rhq.core.domain.resource.Resource; -import org.rhq.core.domain.resource.ResourceType; import org.rhq.core.domain.util.PageList; import org.rhq.enterprise.gui.coregui.client.CoreGUI; import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; import org.rhq.enterprise.gui.coregui.client.inventory.common.AbstractD3GraphListView; -import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.AvailabilityOverUnderGraphType; import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.MetricGraphData; +import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.AvailabilityOverUnderGraphType; import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.StackedBarMetricGraphImpl; import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.avail.AvailabilityD3GraphView; -import org.rhq.enterprise.gui.coregui.client.inventory.resource.type.ResourceTypeRepository; import org.rhq.enterprise.gui.coregui.client.util.Log; import org.rhq.enterprise.gui.coregui.client.util.async.Command; import org.rhq.enterprise.gui.coregui.client.util.async.CountDownLatch; @@ -115,17 +111,17 @@ public class D3GraphListView extends AbstractD3GraphListView { setOverflow(Overflow.HIDDEN); }
- @Override protected void onDraw() { super.onDraw(); - Log.debug("D3GraphListView.onDraw() for: " + resource.getName()+ " id: "+ resource.getId()); + Log.debug("D3GraphListView.onDraw() for: " + resource.getName() + " id: " + resource.getId()); destroyMembers();
addMember(buttonBarDateTimeRangeEditor);
if (showAvailabilityGraph) { - availabilityGraph = new AvailabilityD3GraphView<AvailabilityOverUnderGraphType>(new AvailabilityOverUnderGraphType(resource.getId())); + availabilityGraph = new AvailabilityD3GraphView<AvailabilityOverUnderGraphType>( + new AvailabilityOverUnderGraphType(resource.getId())); addMember(availabilityGraph); }
@@ -163,8 +159,8 @@ public class D3GraphListView extends AbstractD3GraphListView {
@Override public void onSuccess(List<Availability> availList) { - Log.debug("\nSuccessfully queried availability in: " - + (System.currentTimeMillis() - timerStart) + " ms."); + Log.debug("\nSuccessfully queried availability in: " + (System.currentTimeMillis() - timerStart) + + " ms."); availabilityList = availList; if (countDownLatch != null) { countDownLatch.countDown(); @@ -180,193 +176,179 @@ public class D3GraphListView extends AbstractD3GraphListView { private void queryAndBuildGraphs() { final long startTimer = System.currentTimeMillis();
- if(null != availabilityGraph){ + if (null != availabilityGraph) { queryAvailability(EntityContext.forResource(resource.getId()), buttonBarDateTimeRangeEditor.getStartTime(), buttonBarDateTimeRangeEditor.getEndTime(), null); }
- ResourceTypeRepository.Cache.getInstance().getResourceTypes(resource.getResourceType().getId(), - EnumSet.of(ResourceTypeRepository.MetadataType.measurements), - new ResourceTypeRepository.TypeLoadedCallback() { - public void onTypesLoaded(final ResourceType type) { + final ArrayList<MeasurementDefinition> measurementDefinitions = new ArrayList<MeasurementDefinition>(); + final ArrayList<MeasurementDefinition> summaryMeasurementDefinitions = new ArrayList<MeasurementDefinition>();
- final ArrayList<MeasurementDefinition> measurementDefinitions = new ArrayList<MeasurementDefinition>(); - final ArrayList<MeasurementDefinition> summaryMeasurementDefinitions = new ArrayList<MeasurementDefinition>(); + for (MeasurementDefinition def : resource.getResourceType().getMetricDefinitions()) { + if (def.getDataType() == DataType.MEASUREMENT && def.getDisplayType() == DisplayType.SUMMARY) { + summaryMeasurementDefinitions.add(def); + } + measurementDefinitions.add(def); + }
- for (MeasurementDefinition def : type.getMetricDefinitions()) { - if (def.getDataType() == DataType.MEASUREMENT && def.getDisplayType() == DisplayType.SUMMARY) { - summaryMeasurementDefinitions.add(def); - } - measurementDefinitions.add(def); - } + Collections.sort(measurementDefinitions, new Comparator<MeasurementDefinition>() { + @Override + public int compare(MeasurementDefinition o1, MeasurementDefinition o2) { + return new Integer(o1.getDisplayOrder()).compareTo(o2.getDisplayOrder()); + } + }); + Collections.sort(summaryMeasurementDefinitions, new Comparator<MeasurementDefinition>() { + @Override + public int compare(MeasurementDefinition o1, MeasurementDefinition o2) { + return new Integer(o1.getDisplayOrder()).compareTo(o2.getDisplayOrder()); + } + }); + + int[] measDefIdArray = new int[measurementDefinitions.size()]; + for (int i = 0; i < measDefIdArray.length; i++) { + measDefIdArray[i] = measurementDefinitions.get(i).getId(); + }
- Collections.sort(measurementDefinitions, new Comparator<MeasurementDefinition>() { - @Override - public int compare(MeasurementDefinition o1, MeasurementDefinition o2) { - return new Integer(o1.getDisplayOrder()).compareTo(o2.getDisplayOrder()); - } - }); - Collections.sort(summaryMeasurementDefinitions, new Comparator<MeasurementDefinition>() { - @Override - public int compare(MeasurementDefinition o1, MeasurementDefinition o2) { - return new Integer(o1.getDisplayOrder()).compareTo(o2.getDisplayOrder()); - } - }); - - int[] measDefIdArray = new int[measurementDefinitions.size()]; - for (int i = 0; i < measDefIdArray.length; i++) { - measDefIdArray[i] = measurementDefinitions.get(i).getId(); + // setting up a deferred Command to execute after all resource queries have completed (successfully or unsuccessfully) + // we know there are exactly 2 resources + final CountDownLatch countDownLatch = CountDownLatch.create(NUM_ASYNC_CALLS, new Command() { + @Override + /** + * Satisfied only after ALL of the metric queries AND availability have completed + */ + public void execute() { + Log.debug("Total Time for async metrics/avail query: " + (System.currentTimeMillis() - startTimer)); + if (null == metricsDataList || metricsDataList.isEmpty()) { + loadingLabel.setContents(MSG.view_resource_monitor_graphs_noneAvailable()); + } else { + loadingLabel.hide(); + if (useSummaryData) { + buildSummaryGraphs(metricsDataList, summaryMeasurementDefinitions, measurementDefinitions); + } else { + determineGraphsToBuild(metricsDataList, measurementDefinitions, definitionIds); } - - // setting up a deferred Command to execute after all resource queries have completed (successfully or unsuccessfully) - // we know there are exactly 2 resources - final CountDownLatch countDownLatch = CountDownLatch.create(NUM_ASYNC_CALLS, new Command() { - @Override - /** - * Satisfied only after ALL of the metric queries AND availability have completed - */ - public void execute() { - Log.debug("Total Time for async metrics/avail query: " - + (System.currentTimeMillis() - startTimer)); - if (null == metricsDataList || metricsDataList.isEmpty()) { - loadingLabel.setContents(MSG.view_resource_monitor_graphs_noneAvailable()); - } else { - loadingLabel.hide(); - if (useSummaryData) { - buildSummaryGraphs(metricsDataList, summaryMeasurementDefinitions, - measurementDefinitions); - } else { - determineGraphsToBuild(metricsDataList, measurementDefinitions, definitionIds); - } - // There is a weird timing case when availabilityGraph can be null - if (null != availabilityGraph) { - // we only need the first metricData since we are only taking the - // availability data set in there for the dropdowns already - availabilityGraph.setAvailabilityList(availabilityList); - new Timer(){ - @Override - public void run() { - availabilityGraph.drawJsniChart(); - } - }.schedule(150); - } + // There is a weird timing case when availabilityGraph can be null + if (null != availabilityGraph) { + // we only need the first metricData since we are only taking the + // availability data set in there for the dropdowns already + availabilityGraph.setAvailabilityList(availabilityList); + new Timer() { + @Override + public void run() { + availabilityGraph.drawJsniChart(); } + }.schedule(150); + } + }
- } - }); + } + });
- queryMetricData(measDefIdArray, countDownLatch); - queryOOBMetrics(resource, countDownLatch); - // now the countDown latch will run sometime asynchronously + queryMetricData(measDefIdArray, countDownLatch); + queryOOBMetrics(resource, countDownLatch); + // now the countDown latch will run sometime asynchronously + }
+ private void queryMetricData(final int[] measDefIdArray, final CountDownLatch countDownLatch) { + GWTServiceLookup.getMeasurementDataService().findDataForResource(resource.getId(), measDefIdArray, + buttonBarDateTimeRangeEditor.getStartTime(), buttonBarDateTimeRangeEditor.getEndTime(), 60, + new AsyncCallback<List<List<MeasurementDataNumericHighLowComposite>>>() { + @Override + public void onFailure(Throwable caught) { + CoreGUI.getErrorHandler().handleError(MSG.view_resource_monitor_graphs_loadFailed(), caught); + loadingLabel.setContents(MSG.view_resource_monitor_graphs_loadFailed()); + countDownLatch.countDown(); }
- private void queryMetricData(final int[] measDefIdArray, final CountDownLatch countDownLatch) { - GWTServiceLookup.getMeasurementDataService().findDataForResource(resource.getId(), measDefIdArray, - buttonBarDateTimeRangeEditor.getStartTime(), buttonBarDateTimeRangeEditor.getEndTime(), 60, - new AsyncCallback<List<List<MeasurementDataNumericHighLowComposite>>>() { - @Override - public void onFailure(Throwable caught) { - CoreGUI.getErrorHandler().handleError(MSG.view_resource_monitor_graphs_loadFailed(), - caught); - loadingLabel.setContents(MSG.view_resource_monitor_graphs_loadFailed()); - countDownLatch.countDown(); - } - - @Override - public void onSuccess(List<List<MeasurementDataNumericHighLowComposite>> metrics) { - metricsDataList = metrics; - Log.debug("Regular Metric graph data queried in: " - + (System.currentTimeMillis() - startTimer + " ms.")); - countDownLatch.countDown(); + @Override + public void onSuccess(List<List<MeasurementDataNumericHighLowComposite>> metrics) { + metricsDataList = metrics; + countDownLatch.countDown();
- } - }); } + }); + }
- private void queryOOBMetrics(final Resource resource, final CountDownLatch countDownLatch) { + private void queryOOBMetrics(final Resource resource, final CountDownLatch countDownLatch) {
- final long startTime = System.currentTimeMillis(); + final long startTime = System.currentTimeMillis();
- GWTServiceLookup.getMeasurementDataService().getHighestNOOBsForResource(resource.getId(), 60, + GWTServiceLookup.getMeasurementDataService().getHighestNOOBsForResource(resource.getId(), 60,
- new AsyncCallback<PageList<MeasurementOOBComposite>>() { - @Override - public void onSuccess(PageList<MeasurementOOBComposite> measurementOOBComposites) { + new AsyncCallback<PageList<MeasurementOOBComposite>>() { + @Override + public void onSuccess(PageList<MeasurementOOBComposite> measurementOOBComposites) {
- measurementOOBCompositeList = measurementOOBComposites; - Log.debug("\nSuccessfully queried "+measurementOOBCompositeList.size() +" OOB records in: " + (System.currentTimeMillis() - startTime) - + " ms."); - countDownLatch.countDown(); - } + measurementOOBCompositeList = measurementOOBComposites; + Log.debug("\nSuccessfully queried " + measurementOOBCompositeList.size() + " OOB records in: " + + (System.currentTimeMillis() - startTime) + " ms."); + countDownLatch.countDown(); + }
- @Override - public void onFailure(Throwable caught) { - Log.debug("Error retrieving out of bound metrics for resource [" + resource.getId() + "]:" - + caught.getMessage()); - countDownLatch.countDown(); - } - }); + @Override + public void onFailure(Throwable caught) { + Log.debug("Error retrieving out of bound metrics for resource [" + resource.getId() + "]:" + + caught.getMessage()); + countDownLatch.countDown(); + } + });
- } + }
- /** - * Spin through the measurement definitions (in order) checking to see if they are in the - * summary measurement definition set and if so build a graph. - * @param measurementData - * @param summaryMeasurementDefinitions - * @param measurementDefinitions - */ - private void buildSummaryGraphs(List<List<MeasurementDataNumericHighLowComposite>> measurementData, - List<MeasurementDefinition> summaryMeasurementDefinitions, - List<MeasurementDefinition> measurementDefinitions) { - Set<Integer> summaryIds = new TreeSet<Integer>(); - for (MeasurementDefinition summaryMeasurementDefinition : summaryMeasurementDefinitions) { - summaryIds.add(summaryMeasurementDefinition.getId()); - } + /** + * Spin through the measurement definitions (in order) checking to see if they are in the + * summary measurement definition set and if so build a graph. + * @param measurementData + * @param summaryMeasurementDefinitions + * @param measurementDefinitions + */ + private void buildSummaryGraphs(List<List<MeasurementDataNumericHighLowComposite>> measurementData, + List<MeasurementDefinition> summaryMeasurementDefinitions, List<MeasurementDefinition> measurementDefinitions) { + Set<Integer> summaryIds = new TreeSet<Integer>(); + for (MeasurementDefinition summaryMeasurementDefinition : summaryMeasurementDefinitions) { + summaryIds.add(summaryMeasurementDefinition.getId()); + }
- int i = 0; - for (MeasurementDefinition measurementDefinition : measurementDefinitions) { - if (summaryIds.contains(measurementDefinition.getId())) { - buildSingleGraph(measurementOOBCompositeList, measurementDefinition, - measurementData.get(i), MULTI_CHART_HEIGHT); - } - i++; - } + int i = 0; + for (MeasurementDefinition measurementDefinition : measurementDefinitions) { + if (summaryIds.contains(measurementDefinition.getId())) { + buildSingleGraph(measurementOOBCompositeList, measurementDefinition, measurementData.get(i), + MULTI_CHART_HEIGHT); + } + i++; + }
- } + }
- private void determineGraphsToBuild(List<List<MeasurementDataNumericHighLowComposite>> measurementData, - List<MeasurementDefinition> measurementDefinitions, Set<Integer> definitionIds) { - int i = 0; - for (List<MeasurementDataNumericHighLowComposite> metric : measurementData) { - - for (Integer selectedDefinitionId : definitionIds) { - final MeasurementDefinition measurementDefinition = measurementDefinitions.get(i); - final int measurementId = measurementDefinition.getId(); - - if (null != selectedDefinitionId) { - // single graph case - if (measurementId == selectedDefinitionId) { - buildSingleGraph(measurementOOBCompositeList, measurementDefinition, metric, SINGLE_CHART_HEIGHT); - } - } else { - // multiple graph case - buildSingleGraph(measurementOOBCompositeList, measurementDefinition, metric, MULTI_CHART_HEIGHT); - } - } - i++; + private void determineGraphsToBuild(List<List<MeasurementDataNumericHighLowComposite>> measurementData, + List<MeasurementDefinition> measurementDefinitions, Set<Integer> definitionIds) { + int i = 0; + for (List<MeasurementDataNumericHighLowComposite> metric : measurementData) { + + for (Integer selectedDefinitionId : definitionIds) { + final MeasurementDefinition measurementDefinition = measurementDefinitions.get(i); + final int measurementId = measurementDefinition.getId(); + + if (null != selectedDefinitionId) { + // single graph case + if (measurementId == selectedDefinitionId) { + buildSingleGraph(measurementOOBCompositeList, measurementDefinition, metric, + SINGLE_CHART_HEIGHT); } + } else { + // multiple graph case + buildSingleGraph(measurementOOBCompositeList, measurementDefinition, metric, MULTI_CHART_HEIGHT); } - }); - + } + i++; + } }
private void buildSingleGraph(PageList<MeasurementOOBComposite> measurementOOBCompositeList, MeasurementDefinition measurementDefinition, List<MeasurementDataNumericHighLowComposite> data, int height) {
MetricGraphData metricGraphData = MetricGraphData.createForResource(resource.getId(), resource.getName(), - measurementDefinition, data, measurementOOBCompositeList ); + measurementDefinition, data, measurementOOBCompositeList); StackedBarMetricGraphImpl graph = GWT.create(StackedBarMetricGraphImpl.class); graph.setMetricGraphData(metricGraphData); graph.setGraphListView(this); diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/MetricD3Graph.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/MetricD3Graph.java index 3b4ec1f..65807f9 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/MetricD3Graph.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/MetricD3Graph.java @@ -22,6 +22,7 @@ import com.google.gwt.user.client.Timer; import com.smartgwt.client.widgets.HTMLFlow;
import org.rhq.enterprise.gui.coregui.client.inventory.common.AbstractD3GraphListView; +import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.RedrawGraphs; import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.StackedBarMetricGraphImpl; import org.rhq.enterprise.gui.coregui.client.util.Log; import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedVLayout; @@ -30,7 +31,7 @@ import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedVLayout; * A D3 graph implementation for graphing Resource metrics. * Just the graph only. No avail graph no buttons just he graph. */ -public class MetricD3Graph<T extends AbstractD3GraphListView> extends EnhancedVLayout { +public class MetricD3Graph<T extends AbstractD3GraphListView> extends EnhancedVLayout implements RedrawGraphs{
protected StackedBarMetricGraphImpl graph; private HTMLFlow graphDiv = null; @@ -59,18 +60,18 @@ public class MetricD3Graph<T extends AbstractD3GraphListView> extends EnhancedVL */ private static String getSvgDefs() { return " <defs>" - + " <linearGradient id="headerGrad" x1="0%" y1="0%" x2="0%" y2="100%">" - + " <stop offset="0%" style="stop-color:#E6E6E6;stop-opacity:1"/>" - + " <stop offset="100%" style="stop-color:#F0F0F0;stop-opacity:1"/>" - + " </linearGradient>" - + " <pattern id="noDataStripes" patternUnits="userSpaceOnUse" x="0" y="0"" - + " width="6" height="3">" - + " <path d="M 0 0 6 0" style="stroke:#CCCCCC; fill:none;"/>" - + " </pattern>" - + " <pattern id="unknownStripes" patternUnits="userSpaceOnUse" x="0" y="0"" - + " width="6" height="3">" - + " <path d="M 0 0 6 0" style="stroke:#2E9EC2; fill:none;"/>" - + " </pattern>" + + " <linearGradient id="headerGrad" x1="0%" y1="0%" x2="0%" y2="100%">" + + " <stop offset="0%" style="stop-color:#E6E6E6;stop-opacity:1"/>" + + " <stop offset="100%" style="stop-color:#F0F0F0;stop-opacity:1"/>" + + " </linearGradient>" + + " <pattern id="noDataStripes" patternUnits="userSpaceOnUse" x="0" y="0"" + + " width="6" height="3">" + + " <path d="M 0 0 6 0" style="stroke:#CCCCCC; fill:none;"/>" + + " </pattern>" + + " <pattern id="unknownStripes" patternUnits="userSpaceOnUse" x="0" y="0"" + + " width="6" height="3">" + + " <path d="M 0 0 6 0" style="stroke:#2E9EC2; fill:none;"/>" + + " </pattern>" + "<pattern id="diagonalHatchFill" patternUnits="userSpaceOnUse" x="0" y="0" width="105" height="105">" + "<g style="fill:none; stroke:black; stroke-width:1">" + "<path d="M0 90 l15,15"/>" diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/avail/ResourceAvailabilityView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/avail/ResourceAvailabilityView.java deleted file mode 100644 index 6719070..0000000 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/avail/ResourceAvailabilityView.java +++ /dev/null @@ -1,399 +0,0 @@ -/* - * RHQ Management Platform - * Copyright 2012, Red Hat Middleware LLC, and individual contributors - * as indicated by the @author tags. See the copyright.txt file in the - * distribution for a full listing of individual contributors. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -package org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.avail; - -import java.util.ArrayList; -import java.util.Date; - -import com.google.gwt.user.client.rpc.AsyncCallback; -import com.smartgwt.client.data.DSRequest; -import com.smartgwt.client.data.DSResponse; -import com.smartgwt.client.data.Record; -import com.smartgwt.client.data.SortSpecifier; -import com.smartgwt.client.rpc.RPCResponse; -import com.smartgwt.client.types.Alignment; -import com.smartgwt.client.types.ListGridFieldType; -import com.smartgwt.client.types.SortDirection; -import com.smartgwt.client.widgets.form.DynamicForm; -import com.smartgwt.client.widgets.form.fields.FormItem; -import com.smartgwt.client.widgets.form.fields.StaticTextItem; -import com.smartgwt.client.widgets.grid.ListGridField; -import com.smartgwt.client.widgets.grid.ListGridRecord; -import com.smartgwt.client.widgets.layout.Layout; - -import org.rhq.core.domain.criteria.AvailabilityCriteria; -import org.rhq.core.domain.measurement.Availability; -import org.rhq.core.domain.measurement.MeasurementUnits; -import org.rhq.core.domain.resource.composite.ResourceAvailabilitySummary; -import org.rhq.core.domain.resource.composite.ResourceComposite; -import org.rhq.core.domain.util.PageControl; -import org.rhq.core.domain.util.PageList; -import org.rhq.core.domain.util.PageOrdering; -import org.rhq.enterprise.gui.coregui.client.CoreGUI; -import org.rhq.enterprise.gui.coregui.client.ImageManager; -import org.rhq.enterprise.gui.coregui.client.components.table.Table; -import org.rhq.enterprise.gui.coregui.client.components.table.TimestampCellFormatter; -import org.rhq.enterprise.gui.coregui.client.gwt.AvailabilityGWTServiceAsync; -import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; -import org.rhq.enterprise.gui.coregui.client.util.MeasurementConverterClient; -import org.rhq.enterprise.gui.coregui.client.util.RPCDataSource; -import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedVLayout; - -/** - * This shows the availability history for a resource. - * - * @author Jay Shaughnessy - * @author John Mazzitelli - */ -public class ResourceAvailabilityView extends EnhancedVLayout { - - private ResourceComposite resourceComposite; - private StaticTextItem currentField; - private StaticTextItem availField; - private StaticTextItem availTimeField; - private StaticTextItem downField; - private StaticTextItem downTimeField; - private StaticTextItem disabledField; - private StaticTextItem disabledTimeField; - private StaticTextItem failureCountField; - private StaticTextItem disabledCountField; - private StaticTextItem mtbfField; - private StaticTextItem mttrField; - private StaticTextItem unknownField; - private StaticTextItem currentTimeField; - - public ResourceAvailabilityView(ResourceComposite resourceComposite) { - super(); - - this.resourceComposite = resourceComposite; - - setWidth100(); - setHeight100(); - } - - @Override - protected void onInit() { - super.onInit(); - - addMember(createSummaryForm()); - addMember(createListView()); - } - - private DynamicForm createSummaryForm() { - DynamicForm form = new DynamicForm(); - form.setWidth100(); - form.setAutoHeight(); - form.setMargin(10); - form.setNumCols(4); - - // row 1 - currentField = new StaticTextItem("current", MSG.view_resource_monitor_availability_currentStatus()); - currentField.setWrapTitle(false); - currentField.setColSpan(4); - - // row 2 - availField = new StaticTextItem("avail", MSG.view_resource_monitor_availability_availability()); - availField.setWrapTitle(false); - prepareTooltip(availField, MSG.view_resource_monitor_availability_availability_tooltip()); - - availTimeField = new StaticTextItem("availTime", MSG.view_resource_monitor_availability_uptime()); - availTimeField.setWrapTitle(false); - prepareTooltip(availTimeField, MSG.view_resource_monitor_availability_uptime_tooltip()); - - // row 3 - downField = new StaticTextItem("down", MSG.view_resource_monitor_availability_down()); - downField.setWrapTitle(false); - prepareTooltip(downField, MSG.view_resource_monitor_availability_down_tooltip()); - - downTimeField = new StaticTextItem("downTime", MSG.view_resource_monitor_availability_downtime()); - downTimeField.setWrapTitle(false); - prepareTooltip(downTimeField, MSG.view_resource_monitor_availability_downtime_tooltip()); - - // row 4 - disabledField = new StaticTextItem("disabled", MSG.view_resource_monitor_availability_disabled()); - disabledField.setWrapTitle(false); - prepareTooltip(disabledField, MSG.view_resource_monitor_availability_disabled_tooltip()); - - disabledTimeField = new StaticTextItem("disabledTime", MSG.view_resource_monitor_availability_disabledTime()); - disabledTimeField.setWrapTitle(false); - prepareTooltip(disabledTimeField, MSG.view_resource_monitor_availability_disabledTime_tooltip()); - - // row 5 - failureCountField = new StaticTextItem("failureCount", MSG.view_resource_monitor_availability_numFailures()); - failureCountField.setWrapTitle(false); - prepareTooltip(failureCountField, MSG.view_resource_monitor_availability_numFailures_tooltip()); - - disabledCountField = new StaticTextItem("disabledCount", MSG.view_resource_monitor_availability_numDisabled()); - disabledCountField.setWrapTitle(false); - prepareTooltip(disabledCountField, MSG.view_resource_monitor_availability_numDisabled_tooltip()); - - // row 6 - mtbfField = new StaticTextItem("mtbf", MSG.view_resource_monitor_availability_mtbf()); - mtbfField.setWrapTitle(false); - prepareTooltip(mtbfField, MSG.view_resource_monitor_availability_mtbf_tooltip()); - - mttrField = new StaticTextItem("mttr", MSG.view_resource_monitor_availability_mttr()); - mttrField.setWrapTitle(false); - prepareTooltip(mttrField, MSG.view_resource_monitor_availability_mttr_tooltip()); - - // row 7 - unknownField = new StaticTextItem("unknown"); - unknownField.setWrapTitle(false); - unknownField.setColSpan(4); - unknownField.setShowTitle(false); - - // row 8 - currentTimeField = new StaticTextItem("currentTime"); - currentTimeField.setWrapTitle(false); - currentTimeField.setColSpan(4); - currentTimeField.setShowTitle(false); - - form.setItems(currentField, availField, availTimeField, downField, downTimeField, disabledField, - disabledTimeField, failureCountField, disabledCountField, mtbfField, mttrField, unknownField, - currentTimeField); - - reloadSummaryData(); - - return form; - } - - private void reloadSummaryData() { - GWTServiceLookup.getResourceService().getResourceAvailabilitySummary(resourceComposite.getResource().getId(), - new AsyncCallback<ResourceAvailabilitySummary>() { - - @Override - public void onSuccess(ResourceAvailabilitySummary result) { - - currentField.setValue(MSG.view_resource_monitor_availability_currentStatus_value(result - .getCurrent().getName(), TimestampCellFormatter.format(result.getLastChange().getTime()))); - availField.setValue(MeasurementConverterClient.format(result.getUpPercentage(), - MeasurementUnits.PERCENTAGE, true)); - availTimeField.setValue(MeasurementConverterClient.format((double) result.getUpTime(), - MeasurementUnits.MILLISECONDS, true)); - downField.setValue(MeasurementConverterClient.format(result.getDownPercentage(), - MeasurementUnits.PERCENTAGE, true)); - downTimeField.setValue(MeasurementConverterClient.format((double) result.getDownTime(), - MeasurementUnits.MILLISECONDS, true)); - disabledField.setValue(MeasurementConverterClient.format(result.getDisabledPercentage(), - MeasurementUnits.PERCENTAGE, true)); - disabledTimeField.setValue(MeasurementConverterClient.format((double) result.getDisabledTime(), - MeasurementUnits.MILLISECONDS, true)); - failureCountField.setValue(result.getFailures()); - disabledCountField.setValue(result.getDisabled()); - mtbfField.setValue(MeasurementConverterClient.format((double) result.getMTBF(), - MeasurementUnits.MILLISECONDS, true)); - mttrField.setValue(MeasurementConverterClient.format((double) result.getMTTR(), - MeasurementUnits.MILLISECONDS, true)); - - if (result.getUnknownTime() > 0L) { - unknownField.setValue(MSG.view_resource_monitor_availability_unknown(MeasurementConverterClient - .format((double) result.getUnknownTime(), MeasurementUnits.MILLISECONDS, true))); - } else { - unknownField.setValue(""); - } - - currentTimeField.setValue(MSG.view_resource_monitor_availability_currentAsOf(TimestampCellFormatter - .format(result.getCurrentTime()))); - } - - @Override - public void onFailure(Throwable caught) { - currentField.setValue(MSG.common_label_error()); - CoreGUI.getErrorHandler() - .handleError(MSG.view_resource_monitor_availability_summaryError(), caught); - } - }); - } - - private void prepareTooltip(FormItem item, String tooltip) { - item.setHoverWidth(400); - item.setPrompt(tooltip); - } - - private Table<ListView.DS> createListView() { - ListView listView = new ListView(resourceComposite.getResource().getId()); - return listView; - } - - private class ListView extends Table<ListView.DS> { - - private DS dataSource; - private int resourceId; - - public ListView(int resourceId) { - super(null, new SortSpecifier[] { new SortSpecifier("startTime", SortDirection.DESCENDING) }); - - this.resourceId = resourceId; - - setDataSource(getDataSource()); - } - - @Override - public DS getDataSource() { - if (null == this.dataSource) { - this.dataSource = new DS(resourceId); - } - return this.dataSource; - } - - @Override - public void refresh() { - super.refresh(); - reloadSummaryData(); - } - - @Override - protected void configureTableContents(Layout contents) { - super.configureTableContents(contents); - setAutoHeight(); - } - - @Override - protected void configureTable() { - ArrayList<ListGridField> dataSourceFields = getDataSource().getListGridFields(); - getListGrid().setFields(dataSourceFields.toArray(new ListGridField[dataSourceFields.size()])); - - super.configureTable(); - } - - private class DS extends RPCDataSource<Availability, AvailabilityCriteria> { - - public static final String ATTR_ID = "id"; - public static final String ATTR_AVAILABILITY = "availabilityType"; - public static final String ATTR_START_TIME = "startTime"; - public static final String ATTR_END_TIME = "endTime"; - - public static final String ATTR_DURATION = "duration"; - - private AvailabilityGWTServiceAsync availService = GWTServiceLookup.getAvailabilityService(); - private int resourceId; - - public DS(int resourceId) { - super(); - this.resourceId = resourceId; - addDataSourceFields(); - } - - /** - * The view that contains the list grid which will display this datasource's data will call this - * method to get the field information which is used to control the display of the data. - * - * @return list grid fields used to display the datasource data - */ - public ArrayList<ListGridField> getListGridFields() { - ArrayList<ListGridField> fields = new ArrayList<ListGridField>(6); - - ListGridField startTimeField = new ListGridField(ATTR_START_TIME, MSG.common_title_start()); - startTimeField.setCellFormatter(new TimestampCellFormatter()); - startTimeField.setShowHover(true); - startTimeField.setHoverCustomizer(TimestampCellFormatter.getHoverCustomizer(ATTR_START_TIME)); - startTimeField.setCanSortClientOnly(true); - fields.add(startTimeField); - - ListGridField endTimeField = new ListGridField(ATTR_END_TIME, MSG.common_title_end()); - endTimeField.setCellFormatter(new TimestampCellFormatter()); - endTimeField.setShowHover(true); - endTimeField.setHoverCustomizer(TimestampCellFormatter.getHoverCustomizer(ATTR_END_TIME)); - endTimeField.setCanSortClientOnly(true); - fields.add(endTimeField); - - ListGridField durationField = new ListGridField(ATTR_DURATION, MSG.common_title_duration()); - durationField.setAlign(Alignment.RIGHT); - fields.add(durationField); - - ListGridField availabilityField = new ListGridField(ATTR_AVAILABILITY, MSG.common_title_availability()); - availabilityField.setType(ListGridFieldType.IMAGE); - availabilityField.setAlign(Alignment.CENTER); - fields.add(availabilityField); - - return fields; - } - - @Override - protected AvailabilityCriteria getFetchCriteria(DSRequest request) { - AvailabilityCriteria c = new AvailabilityCriteria(); - c.addFilterResourceId(resourceId); - c.addFilterInitialAvailability(false); - - // This code is unlikely to be necessary as the encompassing view should be using an initial - // sort specifier. But just in case, make sure we set the initial sort. Note that we have to - // manipulate the PageControl directly as per the restrictions on getFetchCriteria() (see jdoc). - PageControl pageControl = getPageControl(request); - if (pageControl.getOrderingFields().isEmpty()) { - pageControl.initDefaultOrderingField("startTime", PageOrdering.DESC); - } - - return c; - } - - @Override - protected void executeFetch(final DSRequest request, final DSResponse response, - AvailabilityCriteria criteria) { - - this.availService.findAvailabilityByCriteria(criteria, new AsyncCallback<PageList<Availability>>() { - public void onFailure(Throwable caught) { - // TODO fix message - CoreGUI.getErrorHandler().handleError(MSG.common_label_error(), caught); - response.setStatus(RPCResponse.STATUS_FAILURE); - processResponse(request.getRequestId(), response); - } - - public void onSuccess(final PageList<Availability> result) { - response.setData(buildRecords(result)); - response.setTotalRows(result.size()); - processResponse(request.getRequestId(), response); - } - }); - } - - @Override - public Availability copyValues(Record from) { - return null; - } - - @Override - public ListGridRecord copyValues(Availability from) { - ListGridRecord record = new ListGridRecord(); - - record.setAttribute(ATTR_ID, from.getId()); - record.setAttribute(ATTR_AVAILABILITY, - ImageManager.getAvailabilityIconFromAvailType(from.getAvailabilityType())); - record.setAttribute(ATTR_START_TIME, new Date(from.getStartTime())); - if (null != from.getEndTime()) { - record.setAttribute(ATTR_END_TIME, new Date(from.getEndTime())); - long duration = from.getEndTime() - from.getStartTime(); - record.setAttribute(ATTR_DURATION, - MeasurementConverterClient.format((double) duration, MeasurementUnits.MILLISECONDS, true)); - - } else { - record.setAttribute(ATTR_END_TIME, MSG.common_label_none2()); - long duration = System.currentTimeMillis() - from.getStartTime(); - record.setAttribute(ATTR_DURATION, - MeasurementConverterClient.format((double) duration, MeasurementUnits.MILLISECONDS, true)); - - } - - return record; - } - } - } - -} diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/AddToDashboardComponent.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/AddToDashboardComponent.java new file mode 100644 index 0000000..ce4f8ef --- /dev/null +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/AddToDashboardComponent.java @@ -0,0 +1,185 @@ +/* + * RHQ Management Platform + * Copyright (C) 2005-2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +package org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.table; + +import java.util.LinkedHashMap; + +import com.google.gwt.user.client.rpc.AsyncCallback; +import com.smartgwt.client.widgets.IButton; +import com.smartgwt.client.widgets.events.ClickEvent; +import com.smartgwt.client.widgets.form.fields.SelectItem; +import com.smartgwt.client.widgets.form.fields.events.ChangeEvent; +import com.smartgwt.client.widgets.form.fields.events.ChangeHandler; +import com.smartgwt.client.widgets.grid.ListGridRecord; +import com.smartgwt.client.widgets.toolbar.ToolStrip; + +import org.rhq.core.domain.configuration.PropertySimple; +import org.rhq.core.domain.criteria.DashboardCriteria; +import org.rhq.core.domain.dashboard.Dashboard; +import org.rhq.core.domain.dashboard.DashboardPortlet; +import org.rhq.core.domain.measurement.MeasurementDefinition; +import org.rhq.core.domain.resource.Resource; +import org.rhq.core.domain.util.PageList; +import org.rhq.enterprise.gui.coregui.client.CoreGUI; +import org.rhq.enterprise.gui.coregui.client.dashboard.portlets.inventory.resource.graph.ResourceD3GraphPortlet; +import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; +import org.rhq.enterprise.gui.coregui.client.util.Log; +import org.rhq.enterprise.gui.coregui.client.util.enhanced.Enhanced; +import org.rhq.enterprise.gui.coregui.client.util.message.Message; + +/** + * @author Mike Thompson + */ +public class AddToDashboardComponent extends ToolStrip implements Enhanced { + final private Resource resource; + private SelectItem dashboardSelectItem; + private Dashboard selectedDashboard; + private IButton addToDashboardButton; + private LinkedHashMap<String, String> dashboardMenuMap; + private LinkedHashMap<Integer, Dashboard> dashboardMap; + private MetricsTableView.MetricsTableListGrid metricsListGrid; + + public AddToDashboardComponent(Resource resource) { + this.resource = resource; + setPadding(5); + setMembersMargin(15); + setWidth(300); + dashboardMenuMap = new LinkedHashMap<String, String>(); + dashboardMap = new LinkedHashMap<Integer, Dashboard>(); + createToolstrip(); + } + + @Override + protected void onDraw() { + super.onDraw(); + removeMembers(getMembers()); + createToolstrip(); + } + + private void createToolstrip() { + addSpacer(15); + dashboardSelectItem = new SelectItem(); + addToDashboardButton = new IButton(MSG.view_metric_addToDashboard()); + addToDashboardButton.disable(); + + dashboardSelectItem = new SelectItem(); + dashboardSelectItem.setTitle("Dashboards"); + dashboardSelectItem.setWidth(300); + dashboardSelectItem.setPickListWidth(210); + populateDashboardMenu(); + addFormItem(dashboardSelectItem); + addMember(addToDashboardButton); + + dashboardSelectItem.addChangeHandler(new ChangeHandler() { + @Override + public void onChange(ChangeEvent changeEvent) { + Integer selectedDashboardId = Integer.valueOf((String) changeEvent.getValue()); + selectedDashboard = dashboardMap.get(selectedDashboardId); + } + }); + addToDashboardButton.addClickHandler(new com.smartgwt.client.widgets.events.ClickHandler() { + @Override + public void onClick(ClickEvent clickEvent) { + ListGridRecord[] selectedRecords = metricsListGrid.getSelectedRecords(); + for (ListGridRecord selectedRecord : selectedRecords) { + for (MeasurementDefinition measurementDefinition : resource.getResourceType() + .getMetricDefinitions()) { + if (measurementDefinition.getId() == selectedRecord + .getAttributeAsInt(MetricsViewDataSource.FIELD_METRIC_DEF_ID)) { + Log.debug("Add to Dashboard -- Storing: " + measurementDefinition.getDisplayName() + + " in " + selectedDashboard.getName()); + storeDashboardMetric(selectedDashboard, resource, measurementDefinition); + break; + } + } + } + } + }); + } + + public void disableAddToDashboardButton(){ + addToDashboardButton.disable(); + } + + public void enableAddToDashboardButton(){ + addToDashboardButton.enable(); + } + + + public void populateDashboardMenu() { + dashboardMenuMap.clear(); + dashboardMap.clear(); + + DashboardCriteria criteria = new DashboardCriteria(); + GWTServiceLookup.getDashboardService().findDashboardsByCriteria(criteria, + new AsyncCallback<PageList<Dashboard>>() { + + public void onFailure(Throwable caught) { + CoreGUI.getErrorHandler().handleError(MSG.view_tree_common_contextMenu_loadFailed_dashboard(), + caught); + } + + public void onSuccess(PageList<Dashboard> dashboards) { + for (final Dashboard dashboard : dashboards) { + dashboardMenuMap.put(String.valueOf(dashboard.getId()), + MSG.view_tree_common_contextMenu_addChartToDashboard(dashboard.getName())); + dashboardMap.put(dashboard.getId(), dashboard); + } + selectedDashboard = dashboards.get(0); + dashboardSelectItem.setValueMap(dashboardMenuMap); + dashboardSelectItem.setValue(selectedDashboard.getId()); + } + }); + } + + /** + * The metrics list grid is not available on object creation so we must attach later after it has been initialized. + * @param metricsListGrid + */ + public void setMetricsListGrid(MetricsTableView.MetricsTableListGrid metricsListGrid) { + this.metricsListGrid = metricsListGrid; + } + + + private void storeDashboardMetric(Dashboard dashboard, Resource resource, MeasurementDefinition definition) { + DashboardPortlet dashboardPortlet = new DashboardPortlet(MSG.view_tree_common_contextMenu_resourceGraph(), + ResourceD3GraphPortlet.KEY, 250); + dashboardPortlet.getConfiguration().put( + new PropertySimple(ResourceD3GraphPortlet.CFG_RESOURCE_ID, resource.getId())); + dashboardPortlet.getConfiguration().put( + new PropertySimple(ResourceD3GraphPortlet.CFG_DEFINITION_ID, definition.getId())); + + dashboard.addPortlet(dashboardPortlet); + + GWTServiceLookup.getDashboardService().storeDashboard(dashboard, new AsyncCallback<Dashboard>() { + + public void onFailure(Throwable caught) { + CoreGUI.getErrorHandler().handleError(MSG.view_tree_common_contextMenu_saveChartToDashboardFailure(), + caught); + } + + public void onSuccess(Dashboard result) { + CoreGUI.getMessageCenter().notify( + new Message(MSG.view_tree_common_contextMenu_saveChartToDashboardSuccessful(result.getName()), + Message.Severity.Info)); + } + }); + } + +} diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MeasurementTableView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MeasurementTableView.java deleted file mode 100644 index 7022648..0000000 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MeasurementTableView.java +++ /dev/null @@ -1,237 +0,0 @@ -/* - * RHQ Management Platform - * Copyright (C) 2005-2011 Red Hat, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -package org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.table; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.Comparator; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Set; -import java.util.TreeSet; - -import com.google.gwt.user.client.rpc.AsyncCallback; -import com.smartgwt.client.types.SelectionStyle; -import com.smartgwt.client.widgets.Window; -import com.smartgwt.client.widgets.events.CloseClickEvent; -import com.smartgwt.client.widgets.events.CloseClickHandler; -import com.smartgwt.client.widgets.grid.ListGrid; -import com.smartgwt.client.widgets.grid.ListGridField; -import com.smartgwt.client.widgets.grid.ListGridRecord; - -import org.rhq.core.domain.criteria.ResourceCriteria; -import org.rhq.core.domain.measurement.MeasurementData; -import org.rhq.core.domain.measurement.MeasurementUnits; -import org.rhq.core.domain.resource.composite.ResourceComposite; -import org.rhq.core.domain.util.PageList; -import org.rhq.enterprise.gui.coregui.client.CoreGUI; -import org.rhq.enterprise.gui.coregui.client.components.measurement.UserPreferencesMeasurementRangeEditor; -import org.rhq.enterprise.gui.coregui.client.components.table.Table; -import org.rhq.enterprise.gui.coregui.client.components.table.TableAction; -import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; -import org.rhq.enterprise.gui.coregui.client.inventory.InventoryView; -import org.rhq.enterprise.gui.coregui.client.inventory.common.detail.summary.AbstractActivityView.ChartViewWindow; -import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.D3GraphListView; -import org.rhq.enterprise.gui.coregui.client.util.MeasurementConverterClient; -import org.rhq.enterprise.gui.coregui.client.util.message.Message; - -/** - * Views a resource's measurements in a tabular view. - * - * @author John Mazzitelli - */ -public class MeasurementTableView extends Table<MetricsTableDataSource> { - - private final int resourceId; - - public MeasurementTableView(int resourceId) { - super(); - this.resourceId = resourceId; - setDataSource(new MetricsTableDataSource(resourceId)); - } - - protected void configureTable() { - ArrayList<ListGridField> fields = getDataSource().getListGridFields(); - setListGridFields(fields.toArray(new ListGridField[0])); - addExtraWidget(new UserPreferencesMeasurementRangeEditor(), true); - addTableAction(MSG.view_measureTable_getLive(), new TableAction() { - @Override - public boolean isEnabled(ListGridRecord[] selection) { - return selection != null && selection.length > 0; - } - - @Override - public void executeAction(ListGridRecord[] selection, Object actionValue) { - if (selection == null || selection.length == 0) { - return; - } - // keyed on metric name - string[0] is the metric label, [1] is the units - final HashMap<String, String[]> scheduleNamesAndUnits = new HashMap<String, String[]>(); - int[] definitionIds = new int[selection.length]; - int i = 0; - for (ListGridRecord record : selection) { - Integer defId = record.getAttributeAsInt(MetricsTableDataSource.FIELD_METRIC_DEF_ID); - definitionIds[i++] = defId.intValue(); - - String name = record.getAttribute(MetricsTableDataSource.FIELD_METRIC_NAME); - String label = record.getAttribute(MetricsTableDataSource.FIELD_METRIC_LABEL); - String units = record.getAttribute(MetricsTableDataSource.FIELD_METRIC_UNITS); - if (units == null || units.length() < 1) { - units = MeasurementUnits.NONE.name(); - } - - scheduleNamesAndUnits.put(name, new String[] { label, units }); - } - - // actually go out and ask the agents for the data - GWTServiceLookup.getMeasurementDataService(60000).findLiveData(resourceId, definitionIds, - new AsyncCallback<Set<MeasurementData>>() { - @Override - public void onSuccess(Set<MeasurementData> result) { - if (result == null) { - result = new HashSet<MeasurementData>(0); - } - ArrayList<ListGridRecord> records = new ArrayList<ListGridRecord>(result.size()); - for (MeasurementData data : result) { - String[] nameAndUnits = scheduleNamesAndUnits.get(data.getName()); - if (nameAndUnits != null) { - double doubleValue; - if (data.getValue() instanceof Number) { - doubleValue = ((Number) data.getValue()).doubleValue(); - } else { - doubleValue = Double.parseDouble(data.getValue().toString()); - } - String value = MeasurementConverterClient.formatToSignificantPrecision( - new double[] { doubleValue }, MeasurementUnits.valueOf(nameAndUnits[1]), true)[0]; - - ListGridRecord record = new ListGridRecord(); - record.setAttribute("name", nameAndUnits[0]); - record.setAttribute("value", value); - records.add(record); - } - } - Collections.sort(records, new Comparator<ListGridRecord>() { - public int compare(ListGridRecord o1, ListGridRecord o2) { - return o1.getAttribute("name").compareTo(o2.getAttribute("name")); - } - }); - showLiveData(records); - } - - @Override - public void onFailure(Throwable caught) { - CoreGUI.getErrorHandler().handleError(MSG.view_measureTable_getLive_failure(), caught); - } - }); - } - }); - - - addTableAction(MSG.view_measureTable_chartMetricValues(), new TableAction() { - @Override - public boolean isEnabled(ListGridRecord[] selection) { - return selection != null && selection.length > 0; - } - - @Override - public void executeAction(ListGridRecord[] selection, Object actionValue) { - if (selection == null || selection.length == 0) { - return; - } - final TreeSet<Integer> definitionIds = new TreeSet<Integer>(); - for (ListGridRecord record : selection) { - Integer defId = record.getAttributeAsInt(MetricsTableDataSource.FIELD_METRIC_DEF_ID); - definitionIds.add(defId); - } - - ResourceCriteria criteria = new ResourceCriteria(); - criteria.addFilterId(resourceId); - criteria.fetchSchedules(true); - GWTServiceLookup.getResourceService().findResourceCompositesByCriteria(criteria, - new AsyncCallback<PageList<ResourceComposite>>() { - - @Override - public void onFailure(Throwable caught) { - CoreGUI.getMessageCenter().notify( - new Message(MSG.view_inventory_resource_loadFailed(String.valueOf(resourceId)), - Message.Severity.Warning)); - - CoreGUI.goToView(InventoryView.VIEW_ID.getName()); - } - - @Override - public void onSuccess(PageList<ResourceComposite> result) { - if (result.isEmpty()) { - onFailure(new Exception(MSG.view_inventory_resource_loadFailed(String - .valueOf(resourceId)))); - } else { - final ResourceComposite resourceComposite = result.get(0); - - ChartViewWindow window = new ChartViewWindow(""); - final D3GraphListView graphListView = D3GraphListView.createMultipleGraphs( - resourceComposite.getResource(), definitionIds, true); - - window.addItem(graphListView); - window.show(); - refreshTableInfo(); - - } - } - }); - - } - }); - } - - private void showLiveData(ArrayList<ListGridRecord> records) { - final Window liveDataWindow = new Window(); - liveDataWindow.setTitle(MSG.view_measureTable_live_title()); - liveDataWindow.setShowModalMask(true); - liveDataWindow.setShowMinimizeButton(false); - liveDataWindow.setShowMaximizeButton(true); - liveDataWindow.setShowCloseButton(true); - liveDataWindow.setShowResizer(true); - liveDataWindow.setCanDragResize(true); - liveDataWindow.setDismissOnEscape(true); - liveDataWindow.setIsModal(true); - liveDataWindow.setWidth(700); - liveDataWindow.setHeight(425); - liveDataWindow.setAutoCenter(true); - liveDataWindow.centerInPage(); - liveDataWindow.addCloseClickHandler(new CloseClickHandler() { - @Override - public void onCloseClick(CloseClickEvent event) { - liveDataWindow.destroy(); - refreshTableInfo(); - } - }); - - ListGrid liveDataGrid = new ListGrid(); - liveDataGrid.setShowAllRecords(true); - liveDataGrid.setData(records.toArray(new ListGridRecord[records.size()])); - liveDataGrid.setSelectionType(SelectionStyle.NONE); - ListGridField name = new ListGridField("name", MSG.common_title_metric()); - ListGridField value = new ListGridField("value", MSG.common_title_value()); - liveDataGrid.setFields(name, value); - - liveDataWindow.addItem(liveDataGrid); - liveDataWindow.show(); - } -} diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsResourceView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsResourceView.java new file mode 100644 index 0000000..bcea825 --- /dev/null +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsResourceView.java @@ -0,0 +1,167 @@ +/* + * RHQ Management Platform + * Copyright (C) 2005-2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +package org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.table; + +import java.util.List; + +import com.google.gwt.user.client.Timer; +import com.google.gwt.user.client.rpc.AsyncCallback; +import com.smartgwt.client.types.Overflow; +import com.smartgwt.client.types.VerticalAlignment; +import com.smartgwt.client.widgets.Img; +import com.smartgwt.client.widgets.events.ClickEvent; +import com.smartgwt.client.widgets.events.ClickHandler; + +import org.rhq.core.domain.common.EntityContext; +import org.rhq.core.domain.measurement.Availability; +import org.rhq.core.domain.resource.Resource; +import org.rhq.enterprise.gui.coregui.client.CoreGUI; +import org.rhq.enterprise.gui.coregui.client.IconEnum; +import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; +import org.rhq.enterprise.gui.coregui.client.inventory.common.AbstractD3GraphListView; +import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.AvailabilityOverUnderGraphType; +import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.avail.AvailabilityD3GraphView; +import org.rhq.enterprise.gui.coregui.client.util.BrowserUtility; +import org.rhq.enterprise.gui.coregui.client.util.Log; +import org.rhq.enterprise.gui.coregui.client.util.async.CountDownLatch; +import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedHLayout; + +/** + * The consolidated metrics view showing metric graphs and availability data both in graphical and tabular form. + * + * @author Mike Thompson + */ +public class MetricsResourceView extends AbstractD3GraphListView { + + private static final String COLLAPSED_TOOLTIP = MSG.chart_metrics_collapse_tooltip(); + private static final String EXPANDED_TOOLTIP = MSG.chart_metrics_expand_tooltip(); + + private final Resource resource; + private Img expandCollapseArrow; + private final MetricsTableView metricsTableView; + private final ResourceMetricAvailabilityView availabilityDetails; + + public MetricsResourceView(Resource resource) { + super(); + setOverflow(Overflow.AUTO); + setWidth100(); + setHeight100(); + this.resource = resource; + metricsTableView = new MetricsTableView(resource, this); + availabilityDetails = new ResourceMetricAvailabilityView(resource); + } + + + public void redrawGraphs() { + this.onDraw(); + } + + public void refreshGraphs(){ + new Timer() { + @Override + public void run() { + availabilityGraph.drawJsniChart(); + BrowserUtility.graphSparkLines(); + } + }.schedule(150); + } + + @Override + protected void onDraw() { + super.onDraw(); + Log.debug("MetricResourceView.onDraw() for: " + resource.getName() + " id: " + resource.getId()); + destroyMembers(); + + + addMember(buttonBarDateTimeRangeEditor); + + availabilityGraph = new AvailabilityD3GraphView<AvailabilityOverUnderGraphType>( + new AvailabilityOverUnderGraphType(resource.getId())); + + EnhancedHLayout expandCollapseHLayout = new EnhancedHLayout(); + + //add expand/collapse icon + expandCollapseArrow = new Img(IconEnum.COLLAPSED_ICON.getIcon16x16Path(), 16, 16); + expandCollapseArrow.setTooltip(COLLAPSED_TOOLTIP); + expandCollapseArrow.setLayoutAlign(VerticalAlignment.BOTTOM); + expandCollapseArrow.addClickHandler(new ClickHandler() { + private boolean collapsed = true; + + @Override + public void onClick(ClickEvent event) { + collapsed = !collapsed; + if (collapsed) { + expandCollapseArrow.setSrc(IconEnum.COLLAPSED_ICON.getIcon16x16Path()); + expandCollapseArrow.setTooltip(COLLAPSED_TOOLTIP); + availabilityDetails.hide(); + } else { + expandCollapseArrow.setSrc(IconEnum.EXPANDED_ICON.getIcon16x16Path()); + expandCollapseArrow.setTooltip(EXPANDED_TOOLTIP); + availabilityDetails.show(); + + } + refreshGraphs(); + } + }); + + + expandCollapseHLayout.addMember(expandCollapseArrow); + expandCollapseHLayout.addMember(availabilityGraph); + addMember(expandCollapseHLayout); + + availabilityDetails.hide(); + addMember(availabilityDetails); + + metricsTableView.setHeight100(); + addMember(metricsTableView); + + + queryAvailability(EntityContext.forResource(resource.getId()), buttonBarDateTimeRangeEditor.getStartTime(), + buttonBarDateTimeRangeEditor.getEndTime(), null); + } + + @Override + protected void queryAvailability(final EntityContext context, Long startTime, Long endTime, CountDownLatch notUsed ) { + + final long timerStart = System.currentTimeMillis(); + + // now return the availability + GWTServiceLookup.getAvailabilityService().getAvailabilitiesForResource(context.getResourceId(), startTime, + endTime, new AsyncCallback<List<Availability>>() { + @Override + public void onFailure(Throwable caught) { + CoreGUI.getErrorHandler().handleError(MSG.view_resource_monitor_availability_loadFailed(), caught); + } + + @Override + public void onSuccess(List<Availability> availList) { + Log.debug("\nSuccessfully queried availability in: " + (System.currentTimeMillis() - timerStart) + + " ms."); + availabilityGraph.setAvailabilityList(availList); + new Timer() { + @Override + public void run() { + availabilityGraph.drawJsniChart(); + } + }.schedule(150); + } + }); + } + +} diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableDataSource.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableDataSource.java index 318166d..57e62ab 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableDataSource.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableDataSource.java @@ -1,7 +1,13 @@ package org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.table;
import java.util.ArrayList; +import java.util.Arrays; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.List; +import java.util.Set;
+import com.google.gwt.user.client.Timer; import com.google.gwt.user.client.rpc.AsyncCallback; import com.smartgwt.client.data.DSRequest; import com.smartgwt.client.data.DSResponse; @@ -11,18 +17,28 @@ import com.smartgwt.client.widgets.grid.ListGridField; import com.smartgwt.client.widgets.grid.ListGridRecord;
import org.rhq.core.domain.criteria.Criteria; +import org.rhq.core.domain.criteria.ResourceCriteria; import org.rhq.core.domain.measurement.DataType; +import org.rhq.core.domain.measurement.MeasurementDefinition; import org.rhq.core.domain.measurement.MeasurementSchedule; +import org.rhq.core.domain.measurement.composite.MeasurementDataNumericHighLowComposite; import org.rhq.core.domain.measurement.ui.MetricDisplaySummary; import org.rhq.core.domain.measurement.ui.MetricDisplayValue; +import org.rhq.core.domain.resource.Resource; +import org.rhq.core.domain.resource.ResourceType; +import org.rhq.core.domain.resource.composite.ResourceComposite; +import org.rhq.core.domain.util.PageList; import org.rhq.enterprise.gui.coregui.client.CoreGUI; import org.rhq.enterprise.gui.coregui.client.UserSessionManager; import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; +import org.rhq.enterprise.gui.coregui.client.inventory.resource.type.ResourceTypeRepository; import org.rhq.enterprise.gui.coregui.client.util.BrowserUtility; +import org.rhq.enterprise.gui.coregui.client.util.Log; import org.rhq.enterprise.gui.coregui.client.util.MeasurementUtility; import org.rhq.enterprise.gui.coregui.client.util.RPCDataSource; +import org.rhq.enterprise.gui.coregui.client.util.async.Command; +import org.rhq.enterprise.gui.coregui.client.util.async.CountDownLatch; import org.rhq.enterprise.gui.coregui.client.util.preferences.MeasurementUserPreferences; -import org.rhq.enterprise.gui.coregui.client.util.preferences.UserPreferences;
/** * A simple data source to read in metric data summaries for a resource. @@ -31,9 +47,13 @@ import org.rhq.enterprise.gui.coregui.client.util.preferences.UserPreferences; * we just load them all in at once. * * @author John Mazzitelli + * @author Mike Thompson + * @todo: get rid of this once we have tested the new screen out */ +@Deprecated public class MetricsTableDataSource extends RPCDataSource<MetricDisplaySummary, Criteria> {
+ public static final String FIELD_SPARKLINE = "sparkline"; public static final String FIELD_METRIC_LABEL = "label"; public static final String FIELD_ALERT_COUNT = "alertCount"; public static final String FIELD_MIN_VALUE = "min"; @@ -44,11 +64,15 @@ public class MetricsTableDataSource extends RPCDataSource<MetricDisplaySummary, public static final String FIELD_METRIC_SCHED_ID = "schedId"; public static final String FIELD_METRIC_UNITS = "units"; public static final String FIELD_METRIC_NAME = "name"; - + public static final String FIELD_RESOURCE_ID = "resourceId"; private int resourceId; + private List<MetricDisplaySummary> metricDisplaySummaries; + private List<List<MeasurementDataNumericHighLowComposite>> metricsDataList; + private MeasurementUserPreferences measurementUserPrefs;
public MetricsTableDataSource(int resourceId) { this.resourceId = resourceId; + measurementUserPrefs = new MeasurementUserPreferences(UserSessionManager.getUserPreferences()); }
/** @@ -58,7 +82,25 @@ public class MetricsTableDataSource extends RPCDataSource<MetricDisplaySummary, * @return list grid fields used to display the datasource data */ public ArrayList<ListGridField> getListGridFields() { - ArrayList<ListGridField> fields = new ArrayList<ListGridField>(6); + ArrayList<ListGridField> fields = new ArrayList<ListGridField>(7); + + ListGridField sparklineField = new ListGridField(FIELD_SPARKLINE, "chart"); + sparklineField.setCellFormatter(new CellFormatter() { + @Override + public String format(Object value, ListGridRecord record, int rowNum, int colNum) { + if (value == null) { + return ""; + } + String contents = "<span id='sparkline_" + resourceId + "-" + + record.getAttributeAsInt(FIELD_METRIC_DEF_ID) + "' class='dynamicsparkline' width='70' " + + "values='" + record.getAttribute(FIELD_SPARKLINE) + "'></span>"; + return contents; + + } + }); + + sparklineField.setWidth(80); + fields.add(sparklineField);
ListGridField nameField = new ListGridField(FIELD_METRIC_LABEL, MSG.common_title_name()); nameField.setWidth("30%"); @@ -101,6 +143,7 @@ public class MetricsTableDataSource extends RPCDataSource<MetricDisplaySummary, MeasurementUtility.formatSimpleMetrics(from);
ListGridRecord record = new ListGridRecord(); + record.setAttribute(FIELD_SPARKLINE, getCsvMetricsForSparkline()); record.setAttribute(FIELD_METRIC_LABEL, from.getLabel()); record.setAttribute(FIELD_ALERT_COUNT, String.valueOf(from.getAlertCount())); record.setAttribute(FIELD_MIN_VALUE, getMetricStringValue(from.getMinMetric())); @@ -111,9 +154,32 @@ public class MetricsTableDataSource extends RPCDataSource<MetricDisplaySummary, record.setAttribute(FIELD_METRIC_SCHED_ID, from.getScheduleId()); record.setAttribute(FIELD_METRIC_UNITS, from.getUnits()); record.setAttribute(FIELD_METRIC_NAME, from.getMetricName()); + record.setAttribute(FIELD_RESOURCE_ID, resourceId); return record; }
+ private String getCsvMetricsForSparkline() { + StringBuilder sb = new StringBuilder(); + Log.debug("getCsvMetricsForSparkline.metricsDataList: " + metricsDataList.size()); + for (List<MeasurementDataNumericHighLowComposite> measurementData : metricsDataList) { + for (int i = 0; i < measurementData.size(); i++) { + // take the last 20 values + if (i >= measurementData.size() - 20) { + if (!Double.isNaN(measurementData.get(i).getValue())) { + sb.append((int) measurementData.get(i).getValue()); + sb.append(","); + } + } + } + if (sb.toString().endsWith(",")) { + sb.setLength(sb.length() - 1); + } + } + Log.debug("getCsvMetricsForSparkline: " + sb.toString()); + + return sb.toString(); + } + protected String getMetricStringValue(MetricDisplayValue value) { return (value != null) ? value.toString() : ""; } @@ -127,34 +193,54 @@ public class MetricsTableDataSource extends RPCDataSource<MetricDisplaySummary, @Override protected void executeFetch(final DSRequest request, final DSResponse response, final Criteria unused) {
- // see MetricsTableUIBean for the old JSF class to see where this came from - GWTServiceLookup.getMeasurementScheduleService().findSchedulesForResourceAndType(resourceId, DataType.MEASUREMENT, null, true, new AsyncCallback<ArrayList<MeasurementSchedule>>() { @Override - public void onSuccess(ArrayList<MeasurementSchedule> result) { - int[] schedIds = new int[result.size()]; + public void onSuccess(ArrayList<MeasurementSchedule> measurementSchedules) { + int[] scheduleIds = new int[measurementSchedules.size()]; int i = 0; - for (MeasurementSchedule measurementSchedule : result) { - schedIds[i++] = measurementSchedule.getId(); + for (MeasurementSchedule measurementSchedule : measurementSchedules) { + scheduleIds[i++] = measurementSchedule.getId(); }
- UserPreferences prefs = UserSessionManager.getUserPreferences(); - MeasurementUserPreferences mprefs = new MeasurementUserPreferences(prefs); - ArrayList<Long> range = mprefs.getMetricRangePreferences().getBeginEndTimes(); + final CountDownLatch countDownLatch = CountDownLatch.create(2, new Command() { + + @Override + public void execute() { + response.setData(buildRecords(metricDisplaySummaries)); + processResponse(request.getRequestId(), response); + + new Timer() { + + @Override + public void run() { + BrowserUtility.graphSparkLines(); + } + }.schedule(150); + Log.debug("*** Finished CountdownLatch for metrics loaded: " + metricsDataList.size()); + } + }); + + retrieveResourceMetrics(resourceId, countDownLatch); + GWTServiceLookup.getMeasurementChartsService().getMetricDisplaySummariesForResource(resourceId, - schedIds, range.get(0), range.get(1), new AsyncCallback<ArrayList<MetricDisplaySummary>>() { - @Override - public void onSuccess(ArrayList<MetricDisplaySummary> result) { - response.setData(buildRecords(result)); - processResponse(request.getRequestId(), response); - } + scheduleIds, measurementUserPrefs.getMetricRangePreferences().begin, + measurementUserPrefs.getMetricRangePreferences().end, + new AsyncCallback<ArrayList<MetricDisplaySummary>>() { + @Override + public void onSuccess(ArrayList<MetricDisplaySummary> metricDisplaySummaries) { + setMetricDisplaySummaries(metricDisplaySummaries); + countDownLatch.countDown(); + }
- @Override - public void onFailure(Throwable caught) { - CoreGUI.getErrorHandler().handleError("Cannot load metrics", caught); + @Override + public void onFailure(Throwable caught) { + CoreGUI.getErrorHandler().handleError("Cannot load metrics", caught); + countDownLatch.countDown(); + } } - }); + + ); }
@Override @@ -163,4 +249,87 @@ public class MetricsTableDataSource extends RPCDataSource<MetricDisplaySummary, } }); } + + void setMetricDisplaySummaries(List<MetricDisplaySummary> metricDisplaySummaries) { + this.metricDisplaySummaries = metricDisplaySummaries; + } + + public void retrieveResourceMetrics(final Integer resourceId, final CountDownLatch countDownLatch) { + + ResourceCriteria criteria = new ResourceCriteria(); + criteria.addFilterId(resourceId); + + //locate the resource + GWTServiceLookup.getResourceService().findResourceCompositesByCriteria(criteria, + new AsyncCallback<PageList<ResourceComposite>>() { + @Override + public void onFailure(Throwable caught) { + Log.debug("Error retrieving resource resource composite for resource [" + resourceId + "]:" + + caught.getMessage()); + } + + @Override + public void onSuccess(PageList<ResourceComposite> resourceCompositePageList) { + if (!resourceCompositePageList.isEmpty()) { + final ResourceComposite resourceComposite = resourceCompositePageList.get(0); + final Resource resource = resourceComposite.getResource(); + // Load the fully fetched ResourceType. + ResourceType resourceType = resource.getResourceType(); + ResourceTypeRepository.Cache.getInstance().getResourceTypes(resourceType.getId(), + EnumSet.of(ResourceTypeRepository.MetadataType.measurements), + new ResourceTypeRepository.TypeLoadedCallback() { + public void onTypesLoaded(ResourceType type) { + resource.setResourceType(type); + //metric definitions + Set<MeasurementDefinition> definitions = type.getMetricDefinitions(); + + //build id mapping for measurementDefinition instances Ex. Free Memory -> MeasurementDefinition[100071] + final HashMap<String, MeasurementDefinition> measurementDefMap = new HashMap<String, MeasurementDefinition>(); + for (MeasurementDefinition definition : definitions) { + measurementDefMap.put(definition.getDisplayName(), definition); + } + //bundle definition ids for asynch call. + int[] definitionArrayIds = new int[definitions.size()]; + final String[] displayOrder = new String[definitions.size()]; + measurementDefMap.keySet().toArray(displayOrder); + //sort the charting data ex. Free Memory, Free Swap Space,..System Load + Arrays.sort(displayOrder); + + //organize definitionArrayIds for ordered request on server. + int index = 0; + for (String definitionToDisplay : displayOrder) { + definitionArrayIds[index++] = measurementDefMap.get(definitionToDisplay) + .getId(); + } + + GWTServiceLookup.getMeasurementDataService().findDataForResource(resourceId, + definitionArrayIds, measurementUserPrefs.getMetricRangePreferences().begin, + measurementUserPrefs.getMetricRangePreferences().end, 60, + new AsyncCallback<List<List<MeasurementDataNumericHighLowComposite>>>() { + @Override + public void onFailure(Throwable caught) { + Log.warn("Error retrieving recent metrics charting data for resource [" + + resourceId + "]:" + caught.getMessage()); + } + + @Override + public void onSuccess( + List<List<MeasurementDataNumericHighLowComposite>> measurementDataList) { + + if (!measurementDataList.isEmpty()) { + metricsDataList = measurementDataList; + Log.debug("*** Setting metricsDataList.size: " + + metricsDataList.size()); + countDownLatch.countDown(); + } + } + }); + + } + }); + } + } + }); + + } } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableView.java new file mode 100644 index 0000000..09a7bc3 --- /dev/null +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableView.java @@ -0,0 +1,385 @@ +/* + * RHQ Management Platform + * Copyright (C) 2005-2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +package org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.table; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import com.google.gwt.core.client.GWT; +import com.google.gwt.user.client.Timer; +import com.google.gwt.user.client.rpc.AsyncCallback; +import com.smartgwt.client.types.ExpansionMode; +import com.smartgwt.client.types.SelectionStyle; +import com.smartgwt.client.widgets.Canvas; +import com.smartgwt.client.widgets.HTMLFlow; +import com.smartgwt.client.widgets.Window; +import com.smartgwt.client.widgets.events.CloseClickEvent; +import com.smartgwt.client.widgets.events.CloseClickHandler; +import com.smartgwt.client.widgets.grid.ListGrid; +import com.smartgwt.client.widgets.grid.ListGridField; +import com.smartgwt.client.widgets.grid.ListGridRecord; +import com.smartgwt.client.widgets.grid.events.DataArrivedEvent; +import com.smartgwt.client.widgets.grid.events.DataArrivedHandler; +import com.smartgwt.client.widgets.grid.events.RecordCollapseEvent; +import com.smartgwt.client.widgets.grid.events.RecordCollapseHandler; +import com.smartgwt.client.widgets.grid.events.RecordExpandEvent; +import com.smartgwt.client.widgets.grid.events.RecordExpandHandler; +import com.smartgwt.client.widgets.grid.events.SelectionChangedHandler; +import com.smartgwt.client.widgets.grid.events.SelectionEvent; +import com.smartgwt.client.widgets.grid.events.SortChangedHandler; +import com.smartgwt.client.widgets.grid.events.SortEvent; +import com.smartgwt.client.widgets.layout.VLayout; + +import org.rhq.core.domain.measurement.MeasurementData; +import org.rhq.core.domain.measurement.MeasurementDefinition; +import org.rhq.core.domain.measurement.MeasurementUnits; +import org.rhq.core.domain.measurement.composite.MeasurementDataNumericHighLowComposite; +import org.rhq.core.domain.resource.Resource; +import org.rhq.enterprise.gui.coregui.client.CoreGUI; +import org.rhq.enterprise.gui.coregui.client.UserSessionManager; +import org.rhq.enterprise.gui.coregui.client.components.table.Table; +import org.rhq.enterprise.gui.coregui.client.components.table.TableAction; +import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; +import org.rhq.enterprise.gui.coregui.client.inventory.common.AbstractD3GraphListView; +import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.MetricGraphData; +import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.RedrawGraphs; +import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.StackedBarMetricGraphImpl; +import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.MetricD3Graph; +import org.rhq.enterprise.gui.coregui.client.util.BrowserUtility; +import org.rhq.enterprise.gui.coregui.client.util.Log; +import org.rhq.enterprise.gui.coregui.client.util.MeasurementConverterClient; +import org.rhq.enterprise.gui.coregui.client.util.preferences.MeasurementUserPreferences; + +/** + * Views a resource's metrics in a tabular view with sparkline graph and optional detailed d3 graph. + * + * @author John Mazzitelli + * @author Mike Thompson + */ +public class MetricsTableView extends Table<MetricsViewDataSource> implements RedrawGraphs { + + private final Resource resource; + private final AbstractD3GraphListView abstractD3GraphListView; + + private final MeasurementUserPreferences measurementUserPrefs; + private final AddToDashboardComponent addToDashboardComponent; + private MetricsTableListGrid metricsTableListGrid; + + Set<Integer> expandedRows = new HashSet<Integer>(); + + public MetricsTableView(Resource resource, AbstractD3GraphListView abstractD3GraphListView) { + super(); + this.resource = resource; + this.abstractD3GraphListView = abstractD3GraphListView; + measurementUserPrefs = new MeasurementUserPreferences(UserSessionManager.getUserPreferences()); + setDataSource(new MetricsViewDataSource(resource)); + addToDashboardComponent = new AddToDashboardComponent(resource); + } + + /** + * Creates this Table's list grid (called by onInit()). Subclasses can override this if they require a custom + * subclass of ListGrid. + * + * @return this Table's list grid (must be an instance of ListGrid) + */ + @Override + protected ListGrid createListGrid() { + metricsTableListGrid = new MetricsTableListGrid(this, resource); + addToDashboardComponent.setMetricsListGrid(metricsTableListGrid); + return metricsTableListGrid; + } + + protected void configureTable() { + ArrayList<ListGridField> fields = getDataSource().getListGridFields(); + setListGridFields(fields.toArray(new ListGridField[0])); + + addTableAction(MSG.view_measureTable_getLive(), new ShowLiveDataTableAction(this)); + addExtraWidget(addToDashboardComponent, false); + addToDashboardComponent.disableAddToDashboardButton(); + metricsTableListGrid.addSelectionChangedHandler(new SelectionChangedHandler() { + @Override + public void onSelectionChanged(SelectionEvent selectionEvent) { + if(metricsTableListGrid.getSelectedRecords().length > 0){ + addToDashboardComponent.enableAddToDashboardButton(); + }else { + addToDashboardComponent.disableAddToDashboardButton(); + } + } + }); + } + + private static class ShowLiveDataTableAction implements TableAction { + private MetricsTableView metricsTableView; + + public ShowLiveDataTableAction(MetricsTableView metricsTableView) { + this.metricsTableView = metricsTableView; + } + + @Override + public boolean isEnabled(ListGridRecord[] selection) { + return selection != null && selection.length > 0; + } + + @Override + public void executeAction(ListGridRecord[] selection, Object actionValue) { + if (selection == null || selection.length == 0) { + return; + } + // keyed on metric name - string[0] is the metric label, [1] is the units + final HashMap<String, String[]> scheduleNamesAndUnits = new HashMap<String, String[]>(); + int[] definitionIds = new int[selection.length]; + int i = 0; + for (ListGridRecord record : selection) { + Integer defId = record.getAttributeAsInt(MetricsViewDataSource.FIELD_METRIC_DEF_ID); + definitionIds[i++] = defId; + + String name = record.getAttribute(MetricsViewDataSource.FIELD_METRIC_NAME); + String label = record.getAttribute(MetricsViewDataSource.FIELD_METRIC_LABEL); + String units = record.getAttribute(MetricsViewDataSource.FIELD_METRIC_UNITS); + if (units == null || units.length() < 1) { + units = MeasurementUnits.NONE.name(); + } + + scheduleNamesAndUnits.put(name, new String[] { label, units }); + } + + // actually go out and ask the agents for the data + GWTServiceLookup.getMeasurementDataService(60000).findLiveData(metricsTableView.resource.getId(), + definitionIds, new AsyncCallback<Set<MeasurementData>>() { + @Override + public void onSuccess(Set<MeasurementData> result) { + if (result == null) { + result = new HashSet<MeasurementData>(0); + } + ArrayList<ListGridRecord> records = new ArrayList<ListGridRecord>(result.size()); + for (MeasurementData data : result) { + String[] nameAndUnits = scheduleNamesAndUnits.get(data.getName()); + if (nameAndUnits != null) { + double doubleValue; + if (data.getValue() instanceof Number) { + doubleValue = ((Number) data.getValue()).doubleValue(); + } else { + doubleValue = Double.parseDouble(data.getValue().toString()); + } + String value = MeasurementConverterClient.formatToSignificantPrecision( + new double[] { doubleValue }, MeasurementUnits.valueOf(nameAndUnits[1]), true)[0]; + + ListGridRecord record = new ListGridRecord(); + record.setAttribute("name", nameAndUnits[0]); + record.setAttribute("value", value); + records.add(record); + } + } + Collections.sort(records, new Comparator<ListGridRecord>() { + public int compare(ListGridRecord o1, ListGridRecord o2) { + return o1.getAttribute("name").compareTo(o2.getAttribute("name")); + } + }); + showLiveData(records); + } + + @Override + public void onFailure(Throwable caught) { + CoreGUI.getErrorHandler().handleError(MSG.view_measureTable_getLive_failure(), caught); + } + }); + } + + private void showLiveData(ArrayList<ListGridRecord> records) { + final Window liveDataWindow = new Window(); + liveDataWindow.setTitle(MSG.view_measureTable_live_title()); + liveDataWindow.setShowModalMask(true); + liveDataWindow.setShowMinimizeButton(false); + liveDataWindow.setShowMaximizeButton(true); + liveDataWindow.setShowCloseButton(true); + liveDataWindow.setShowResizer(true); + liveDataWindow.setCanDragResize(true); + liveDataWindow.setDismissOnEscape(true); + liveDataWindow.setIsModal(true); + liveDataWindow.setWidth(700); + liveDataWindow.setHeight(425); + liveDataWindow.setAutoCenter(true); + liveDataWindow.centerInPage(); + liveDataWindow.addCloseClickHandler(new CloseClickHandler() { + @Override + public void onCloseClick(CloseClickEvent event) { + liveDataWindow.destroy(); + metricsTableView.refreshTableInfo(); + } + }); + + ListGrid liveDataGrid = new ListGrid(); + liveDataGrid.setShowAllRecords(true); + liveDataGrid.setData(records.toArray(new ListGridRecord[records.size()])); + liveDataGrid.setSelectionType(SelectionStyle.NONE); + ListGridField name = new ListGridField("name", MSG.common_title_metric()); + ListGridField value = new ListGridField("value", MSG.common_title_value()); + liveDataGrid.setFields(name, value); + + liveDataWindow.addItem(liveDataGrid); + liveDataWindow.show(); + } + + } + + @Override + /** + * Redraw Graphs in this context means to refresh the table and redraw open graphs. + */ + public void redrawGraphs() { + Log.debug("MetricsView.redrawGraphs."); + + new Timer() { + + @Override + public void run() { + BrowserUtility.graphSparkLines(); + } + }.schedule(150); + + } + + public class MetricsTableListGrid extends ListGrid { + + private static final int TREEVIEW_DETAIL_CHART_HEIGHT = 205; + private static final int NUM_METRIC_POINTS = 60; + private Resource resource; + final MetricsTableView metricsTableView; + + public MetricsTableListGrid(final MetricsTableView metricsTableView, final Resource resource) { + super(); + this.resource = resource; + this.metricsTableView = metricsTableView; + setCanExpandRecords(true); + setCanExpandMultipleRecords(true); + setExpansionMode(ExpansionMode.DETAIL_FIELD); + + addRecordExpandHandler(new RecordExpandHandler() { + @Override + public void onRecordExpand(RecordExpandEvent recordExpandEvent) { + metricsTableView.expandedRows.add(recordExpandEvent.getRecord().getAttributeAsInt( + MetricsViewDataSource.FIELD_METRIC_DEF_ID)); + redrawGraphs(); + } + + }); + addRecordCollapseHandler(new RecordCollapseHandler() { + @Override + public void onRecordCollapse(RecordCollapseEvent recordCollapseEvent) { + metricsTableView.expandedRows.remove(recordCollapseEvent.getRecord().getAttributeAsInt( + MetricsViewDataSource.FIELD_METRIC_DEF_ID)); + redrawGraphs(); + } + }); + addSortChangedHandler(new SortChangedHandler() { + @Override + public void onSortChanged(SortEvent sortEvent) { + redrawGraphs(); + } + }); + addDataArrivedHandler(new DataArrivedHandler() { + @Override + public void onDataArrived(DataArrivedEvent dataArrivedEvent) { + int startRow = dataArrivedEvent.getStartRow(); + int endRow = dataArrivedEvent.getEndRow(); + for (int i = startRow; i < endRow; i++) { + if (null != metricsTableView.expandedRows + && metricsTableView.expandedRows.contains(getRecord(i).getAttributeAsInt( + MetricsViewDataSource.FIELD_METRIC_DEF_ID))) { + expandRecord(getRecord(i)); + } + } + } + }); + + } + + @Override + protected Canvas getExpansionComponent(final ListGridRecord record) { + final Integer definitionId = record.getAttributeAsInt(MetricsViewDataSource.FIELD_METRIC_DEF_ID); + final Integer resourceId = record.getAttributeAsInt(MetricsViewDataSource.FIELD_RESOURCE_ID); + VLayout vLayout = new VLayout(); + vLayout.setPadding(5); + + final String chartId = "rChart-" + resourceId + "-" + definitionId; + HTMLFlow htmlFlow = new HTMLFlow(MetricD3Graph.createGraphMarkerTemplate(chartId, + TREEVIEW_DETAIL_CHART_HEIGHT)); + vLayout.addMember(htmlFlow); + + int[] definitionArrayIds = new int[1]; + definitionArrayIds[0] = definitionId; + GWTServiceLookup.getMeasurementDataService().findDataForResource(resourceId, definitionArrayIds, + measurementUserPrefs.getMetricRangePreferences().begin, + measurementUserPrefs.getMetricRangePreferences().end, NUM_METRIC_POINTS, + new AsyncCallback<List<List<MeasurementDataNumericHighLowComposite>>>() { + @Override + public void onFailure(Throwable caught) { + Log.warn("Error retrieving recent metrics charting data for resource [" + resourceId + "]:" + + caught.getMessage()); + } + + @Override + public void onSuccess(List<List<MeasurementDataNumericHighLowComposite>> results) { + if (!results.isEmpty()) { + + //load the data results for the given metric definition + List<MeasurementDataNumericHighLowComposite> measurementList = results.get(0); + + MeasurementDefinition measurementDefinition = null; + for (MeasurementDefinition definition : resource.getResourceType().getMetricDefinitions()) { + if (definition.getId() == definitionId) { + measurementDefinition = definition; + break; + } + } + + MetricGraphData metricGraphData = MetricGraphData.createForResource(resourceId, + resource.getName(), measurementDefinition, measurementList, null); + metricGraphData.setHideLegend(true); + + StackedBarMetricGraphImpl graph = GWT.create(StackedBarMetricGraphImpl.class); + graph.setMetricGraphData(metricGraphData); + final MetricD3Graph graphView = new MetricD3Graph(graph, abstractD3GraphListView); + new Timer() { + @Override + public void run() { + graphView.drawJsniChart(); + BrowserUtility.graphSparkLines(); + + } + }.schedule(150); + + } else { + Log.warn("No chart data retrieving for resource [" + resourceId + "-" + definitionId + "]"); + + } + } + }); + + return vLayout; + } + } + +} diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsViewDataSource.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsViewDataSource.java new file mode 100644 index 0000000..5c2fe25 --- /dev/null +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsViewDataSource.java @@ -0,0 +1,307 @@ +package org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.table; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Set; + +import com.google.gwt.user.client.Timer; +import com.google.gwt.user.client.rpc.AsyncCallback; +import com.smartgwt.client.data.DSRequest; +import com.smartgwt.client.data.DSResponse; +import com.smartgwt.client.data.Record; +import com.smartgwt.client.widgets.grid.CellFormatter; +import com.smartgwt.client.widgets.grid.ListGridField; +import com.smartgwt.client.widgets.grid.ListGridRecord; + +import org.rhq.core.domain.criteria.Criteria; +import org.rhq.core.domain.measurement.DataType; +import org.rhq.core.domain.measurement.MeasurementDefinition; +import org.rhq.core.domain.measurement.MeasurementSchedule; +import org.rhq.core.domain.measurement.composite.MeasurementDataNumericHighLowComposite; +import org.rhq.core.domain.measurement.ui.MetricDisplaySummary; +import org.rhq.core.domain.measurement.ui.MetricDisplayValue; +import org.rhq.core.domain.resource.Resource; +import org.rhq.enterprise.gui.coregui.client.CoreGUI; +import org.rhq.enterprise.gui.coregui.client.UserSessionManager; +import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; +import org.rhq.enterprise.gui.coregui.client.util.BrowserUtility; +import org.rhq.enterprise.gui.coregui.client.util.Log; +import org.rhq.enterprise.gui.coregui.client.util.MeasurementUtility; +import org.rhq.enterprise.gui.coregui.client.util.RPCDataSource; +import org.rhq.enterprise.gui.coregui.client.util.async.Command; +import org.rhq.enterprise.gui.coregui.client.util.async.CountDownLatch; +import org.rhq.enterprise.gui.coregui.client.util.preferences.MeasurementUserPreferences; + +/** + * A simple data source to read in metric data summaries for a resource. + * This doesn't support paging - everything is returned in one query. Since + * the number of metrics per resource is relatively small (never more than tens of them), + * we just load them all in at once. + * + * @author John Mazzitelli + * @author Mike Thompson + */ +public class MetricsViewDataSource extends RPCDataSource<MetricDisplaySummary, Criteria> { + + private static final int NUMBER_OF_METRIC_POINTS = 60; + public static final String FIELD_SPARKLINE = "sparkline"; + public static final String FIELD_METRIC_LABEL = "label"; + public static final String FIELD_ALERT_COUNT = "alertCount"; + public static final String FIELD_MIN_VALUE = "min"; + public static final String FIELD_MAX_VALUE = "max"; + public static final String FIELD_AVG_VALUE = "avg"; + public static final String FIELD_LAST_VALUE = "last"; + public static final String FIELD_METRIC_DEF_ID = "defId"; + public static final String FIELD_METRIC_SCHED_ID = "schedId"; + public static final String FIELD_METRIC_UNITS = "units"; + public static final String FIELD_METRIC_NAME = "name"; + public static final String FIELD_RESOURCE_ID = "resourceId"; + private final Resource resource; + private List<MetricDisplaySummary> metricDisplaySummaries; + private List<List<MeasurementDataNumericHighLowComposite>> metricsDataList; + private int[] definitionArrayIds; + private final MeasurementUserPreferences measurementUserPrefs; + + public MetricsViewDataSource(Resource resource) { + this.resource = resource; + measurementUserPrefs = new MeasurementUserPreferences(UserSessionManager.getUserPreferences()); + } + + /** + * The view that contains the list grid which will display this datasource's data will call this + * method to get the field information which is used to control the display of the data. + * + * @return list grid fields used to display the datasource data + */ + public ArrayList<ListGridField> getListGridFields() { + ArrayList<ListGridField> fields = new ArrayList<ListGridField>(7); + + ListGridField sparklineField = new ListGridField(FIELD_SPARKLINE, MSG.chart_metrics_sparkline_header()); + sparklineField.setCellFormatter(new CellFormatter() { + @Override + public String format(Object value, ListGridRecord record, int rowNum, int colNum) { + if (value == null) { + return ""; + } + String contents = "<span id='sparkline_" + resource.getId() + "-" + + record.getAttributeAsInt(FIELD_METRIC_DEF_ID) + "' class='dynamicsparkline' width='70' " + + "values='" + record.getAttribute(FIELD_SPARKLINE) + "'></span>"; + return contents; + + } + }); + + sparklineField.setWidth(80); + fields.add(sparklineField); + + ListGridField nameField = new ListGridField(FIELD_METRIC_LABEL, MSG.common_title_name()); + nameField.setWidth("30%"); + fields.add(nameField); + + ListGridField minField = new ListGridField(FIELD_MIN_VALUE, MSG.view_resource_monitor_table_min()); + minField.setWidth("15%"); + fields.add(minField); + + ListGridField maxField = new ListGridField(FIELD_MAX_VALUE, MSG.view_resource_monitor_table_max()); + maxField.setWidth("15%"); + fields.add(maxField); + + ListGridField avgField = new ListGridField(FIELD_AVG_VALUE, MSG.view_resource_monitor_table_avg()); + avgField.setWidth("15%"); + fields.add(avgField); + + ListGridField lastField = new ListGridField(FIELD_LAST_VALUE, MSG.view_resource_monitor_table_last()); + lastField.setWidth("15%"); + fields.add(lastField); + + ListGridField alertsField = new ListGridField(FIELD_ALERT_COUNT, MSG.common_title_alerts()); + alertsField.setWidth("10%"); + fields.add(alertsField); + + return fields; + } + + @Override + public MetricDisplaySummary copyValues(Record from) { + // we should never need this method - we only go in one direction + // if we ever need this, just have copyValues store an "object" attribute whose value is "from" + // which this method then just reads out. Since we don't need this now, save memory by not + // keeping the MetricDisplayValue around + return null; + } + + @Override + public ListGridRecord copyValues(MetricDisplaySummary from) { + MeasurementUtility.formatSimpleMetrics(from); + + ListGridRecord record = new ListGridRecord(); + record.setAttribute(FIELD_SPARKLINE, getCsvMetricsForSparkline(from.getDefinitionId())); + record.setAttribute(FIELD_METRIC_LABEL, from.getLabel()); + record.setAttribute(FIELD_ALERT_COUNT, String.valueOf(from.getAlertCount())); + record.setAttribute(FIELD_MIN_VALUE, getMetricStringValue(from.getMinMetric())); + record.setAttribute(FIELD_MAX_VALUE, getMetricStringValue(from.getMaxMetric())); + record.setAttribute(FIELD_AVG_VALUE, getMetricStringValue(from.getAvgMetric())); + record.setAttribute(FIELD_LAST_VALUE, getMetricStringValue(from.getLastMetric())); + record.setAttribute(FIELD_METRIC_DEF_ID, from.getDefinitionId()); + record.setAttribute(FIELD_METRIC_SCHED_ID, from.getScheduleId()); + record.setAttribute(FIELD_METRIC_UNITS, from.getUnits()); + record.setAttribute(FIELD_METRIC_NAME, from.getMetricName()); + record.setAttribute(FIELD_RESOURCE_ID, resource.getId()); + return record; + } + + private String getCsvMetricsForSparkline(int definitionId) { + StringBuilder sb = new StringBuilder(); + List<MeasurementDataNumericHighLowComposite> selectedMetricsList = getMeasurementsForMeasurementDefId(definitionId); + + for (int i = 0; i < selectedMetricsList.size(); i++) { + MeasurementDataNumericHighLowComposite measurementData = selectedMetricsList.get(i); + if (!Double.isNaN(measurementData.getValue())) { + sb.append((int) measurementData.getValue()); + sb.append(","); + } + } + + if (sb.toString().endsWith(",")) { + sb.setLength(sb.length() - 1); + } + + return sb.toString(); + } + + List<MeasurementDataNumericHighLowComposite> getMeasurementsForMeasurementDefId(int definitionId) { + int selectedIndex = 0; + + // find the ordinal position as specified when querying the metrics + for (int i = 0; i < definitionArrayIds.length; i++) { + if (definitionArrayIds[i] == definitionId) { + selectedIndex = i; + break; + } + } + + return metricsDataList.get(selectedIndex); + } + + protected String getMetricStringValue(MetricDisplayValue value) { + return (value != null) ? value.toString() : ""; + } + + @Override + protected Criteria getFetchCriteria(DSRequest request) { + // NOTE: we don't use criterias for this datasource, just return null + return null; + } + + @Override + protected void executeFetch(final DSRequest request, final DSResponse response, final Criteria unused) { + + GWTServiceLookup.getMeasurementScheduleService().findSchedulesForResourceAndType(resource.getId(), + DataType.MEASUREMENT, null, true, new AsyncCallback<ArrayList<MeasurementSchedule>>() { + @Override + public void onSuccess(ArrayList<MeasurementSchedule> measurementSchedules) { + int[] scheduleIds = new int[measurementSchedules.size()]; + int i = 0; + for (MeasurementSchedule measurementSchedule : measurementSchedules) { + scheduleIds[i++] = measurementSchedule.getId(); + } + + final CountDownLatch countDownLatch = CountDownLatch.create(2, new Command() { + + @Override + public void execute() { + response.setData(buildRecords(metricDisplaySummaries)); + processResponse(request.getRequestId(), response); + + new Timer() { + + @Override + public void run() { + BrowserUtility.graphSparkLines(); + } + }.schedule(150); + } + }); + + queryResourceMetrics(resource, measurementUserPrefs.getMetricRangePreferences().begin, + measurementUserPrefs.getMetricRangePreferences().end, countDownLatch); + queryMetricDisplaySummaries(scheduleIds, measurementUserPrefs.getMetricRangePreferences().begin, + measurementUserPrefs.getMetricRangePreferences().end, countDownLatch); + } + + @Override + public void onFailure(Throwable caught) { + CoreGUI.getErrorHandler().handleError("Cannot load schedules", caught); + } + }); + } + + private void queryMetricDisplaySummaries(int[] scheduleIds, Long startTime, Long endTime, + final CountDownLatch countDownLatch) { + GWTServiceLookup.getMeasurementChartsService().getMetricDisplaySummariesForResource(resource.getId(), + scheduleIds, startTime, endTime, new AsyncCallback<ArrayList<MetricDisplaySummary>>() { + @Override + public void onSuccess(ArrayList<MetricDisplaySummary> metricDisplaySummaries) { + setMetricDisplaySummaries(metricDisplaySummaries); + countDownLatch.countDown(); + } + + @Override + public void onFailure(Throwable caught) { + CoreGUI.getErrorHandler().handleError("Cannot load metrics", caught); + countDownLatch.countDown(); + } + } + + ); + } + + void setMetricDisplaySummaries(List<MetricDisplaySummary> metricDisplaySummaries) { + this.metricDisplaySummaries = metricDisplaySummaries; + } + + public void queryResourceMetrics(final Resource resource, Long startTime, Long endTime, + final CountDownLatch countDownLatch) { + Set<MeasurementDefinition> definitions = resource.getResourceType().getMetricDefinitions(); + + //build id mapping for measurementDefinition instances Ex. Free Memory -> MeasurementDefinition[100071] + final HashMap<String, MeasurementDefinition> measurementDefMap = new HashMap<String, MeasurementDefinition>(); + for (MeasurementDefinition definition : definitions) { + measurementDefMap.put(definition.getDisplayName(), definition); + } + //bundle definition ids for asynch call. + definitionArrayIds = new int[definitions.size()]; + final String[] displayOrder = new String[definitions.size()]; + measurementDefMap.keySet().toArray(displayOrder); + //sort the charting data ex. Free Memory, Free Swap Space,..System Load + Arrays.sort(displayOrder); + + //organize definitionArrayIds for ordered request on server. + int index = 0; + for (String definitionToDisplay : displayOrder) { + definitionArrayIds[index++] = measurementDefMap.get(definitionToDisplay).getId(); + } + + GWTServiceLookup.getMeasurementDataService().findDataForResource(resource.getId(), definitionArrayIds, + startTime, endTime, NUMBER_OF_METRIC_POINTS, + new AsyncCallback<List<List<MeasurementDataNumericHighLowComposite>>>() { + @Override + public void onFailure(Throwable caught) { + Log.warn("Error retrieving recent metrics charting data for resource [" + resource.getId() + "]:" + + caught.getMessage()); + } + + @Override + public void onSuccess(List<List<MeasurementDataNumericHighLowComposite>> measurementDataList) { + + if (null != measurementDataList && !measurementDataList.isEmpty()) { + metricsDataList = measurementDataList; + countDownLatch.countDown(); + } + } + }); + + } +} diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/ResourceMetricAvailabilityView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/ResourceMetricAvailabilityView.java new file mode 100644 index 0000000..9d6b892 --- /dev/null +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/ResourceMetricAvailabilityView.java @@ -0,0 +1,208 @@ +/* + * RHQ Management Platform + * Copyright 2012, Red Hat Middleware LLC, and individual contributors + * as indicated by the @author tags. See the copyright.txt file in the + * distribution for a full listing of individual contributors. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +package org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.table; + +import com.google.gwt.user.client.rpc.AsyncCallback; +import com.smartgwt.client.widgets.form.DynamicForm; +import com.smartgwt.client.widgets.form.fields.FormItem; +import com.smartgwt.client.widgets.form.fields.StaticTextItem; + +import org.rhq.core.domain.measurement.MeasurementUnits; +import org.rhq.core.domain.resource.Resource; +import org.rhq.core.domain.resource.composite.ResourceAvailabilitySummary; +import org.rhq.enterprise.gui.coregui.client.CoreGUI; +import org.rhq.enterprise.gui.coregui.client.components.table.TimestampCellFormatter; +import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; +import org.rhq.enterprise.gui.coregui.client.util.MeasurementConverterClient; +import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedVLayout; + +/** + * This shows the availability history for a resource. + * + * @author Jay Shaughnessy + * @author John Mazzitelli + * @author Mike Thompson + */ +public class ResourceMetricAvailabilityView extends EnhancedVLayout { + + private Resource resource; + private StaticTextItem currentField; + private StaticTextItem availField; + private StaticTextItem availTimeField; + private StaticTextItem downField; + private StaticTextItem downTimeField; + private StaticTextItem disabledField; + private StaticTextItem disabledTimeField; + private StaticTextItem failureCountField; + private StaticTextItem disabledCountField; + private StaticTextItem mtbfField; + private StaticTextItem mttrField; + private StaticTextItem unknownField; + private StaticTextItem currentTimeField; + + public ResourceMetricAvailabilityView(Resource resource) { + super(); + + this.resource = resource; + + setWidth100(); + setHeight(165); + } + + @Override + protected void onInit() { + super.onInit(); + + addMember(createSummaryForm()); + } + + private DynamicForm createSummaryForm() { + DynamicForm form = new DynamicForm(); + form.setWidth100(); + form.setAutoHeight(); + form.setMargin(10); + form.setNumCols(4); + + // row 1 + currentField = new StaticTextItem("current", MSG.view_resource_monitor_availability_currentStatus()); + currentField.setWrapTitle(false); + currentField.setColSpan(4); + + // row 2 + availField = new StaticTextItem("avail", MSG.view_resource_monitor_availability_availability()); + availField.setWrapTitle(false); + prepareTooltip(availField, MSG.view_resource_monitor_availability_availability_tooltip()); + + availTimeField = new StaticTextItem("availTime", MSG.view_resource_monitor_availability_uptime()); + availTimeField.setWrapTitle(false); + prepareTooltip(availTimeField, MSG.view_resource_monitor_availability_uptime_tooltip()); + + // row 3 + downField = new StaticTextItem("down", MSG.view_resource_monitor_availability_down()); + downField.setWrapTitle(false); + prepareTooltip(downField, MSG.view_resource_monitor_availability_down_tooltip()); + + downTimeField = new StaticTextItem("downTime", MSG.view_resource_monitor_availability_downtime()); + downTimeField.setWrapTitle(false); + prepareTooltip(downTimeField, MSG.view_resource_monitor_availability_downtime_tooltip()); + + // row 4 + disabledField = new StaticTextItem("disabled", MSG.view_resource_monitor_availability_disabled()); + disabledField.setWrapTitle(false); + prepareTooltip(disabledField, MSG.view_resource_monitor_availability_disabled_tooltip()); + + disabledTimeField = new StaticTextItem("disabledTime", MSG.view_resource_monitor_availability_disabledTime()); + disabledTimeField.setWrapTitle(false); + prepareTooltip(disabledTimeField, MSG.view_resource_monitor_availability_disabledTime_tooltip()); + + // row 5 + failureCountField = new StaticTextItem("failureCount", MSG.view_resource_monitor_availability_numFailures()); + failureCountField.setWrapTitle(false); + prepareTooltip(failureCountField, MSG.view_resource_monitor_availability_numFailures_tooltip()); + + disabledCountField = new StaticTextItem("disabledCount", MSG.view_resource_monitor_availability_numDisabled()); + disabledCountField.setWrapTitle(false); + prepareTooltip(disabledCountField, MSG.view_resource_monitor_availability_numDisabled_tooltip()); + + // row 6 + mtbfField = new StaticTextItem("mtbf", MSG.view_resource_monitor_availability_mtbf()); + mtbfField.setWrapTitle(false); + prepareTooltip(mtbfField, MSG.view_resource_monitor_availability_mtbf_tooltip()); + + mttrField = new StaticTextItem("mttr", MSG.view_resource_monitor_availability_mttr()); + mttrField.setWrapTitle(false); + prepareTooltip(mttrField, MSG.view_resource_monitor_availability_mttr_tooltip()); + + // row 7 + unknownField = new StaticTextItem("unknown"); + unknownField.setWrapTitle(false); + unknownField.setColSpan(4); + unknownField.setShowTitle(false); + + // row 8 + currentTimeField = new StaticTextItem("currentTime"); + currentTimeField.setWrapTitle(false); + currentTimeField.setColSpan(4); + currentTimeField.setShowTitle(false); + + form.setItems(currentField, availField, availTimeField, downField, downTimeField, disabledField, + disabledTimeField, failureCountField, disabledCountField, mtbfField, mttrField, unknownField, + currentTimeField); + + reloadSummaryData(); + + return form; + } + + private void reloadSummaryData() { + GWTServiceLookup.getResourceService().getResourceAvailabilitySummary(resource.getId(), + new AsyncCallback<ResourceAvailabilitySummary>() { + + @Override + public void onSuccess(ResourceAvailabilitySummary result) { + + currentField.setValue(MSG.view_resource_monitor_availability_currentStatus_value(result + .getCurrent().getName(), TimestampCellFormatter.format(result.getLastChange().getTime()))); + availField.setValue(MeasurementConverterClient.format(result.getUpPercentage(), + MeasurementUnits.PERCENTAGE, true)); + availTimeField.setValue(MeasurementConverterClient.format((double) result.getUpTime(), + MeasurementUnits.MILLISECONDS, true)); + downField.setValue(MeasurementConverterClient.format(result.getDownPercentage(), + MeasurementUnits.PERCENTAGE, true)); + downTimeField.setValue(MeasurementConverterClient.format((double) result.getDownTime(), + MeasurementUnits.MILLISECONDS, true)); + disabledField.setValue(MeasurementConverterClient.format(result.getDisabledPercentage(), + MeasurementUnits.PERCENTAGE, true)); + disabledTimeField.setValue(MeasurementConverterClient.format((double) result.getDisabledTime(), + MeasurementUnits.MILLISECONDS, true)); + failureCountField.setValue(result.getFailures()); + disabledCountField.setValue(result.getDisabled()); + mtbfField.setValue(MeasurementConverterClient.format((double) result.getMTBF(), + MeasurementUnits.MILLISECONDS, true)); + mttrField.setValue(MeasurementConverterClient.format((double) result.getMTTR(), + MeasurementUnits.MILLISECONDS, true)); + + if (result.getUnknownTime() > 0L) { + unknownField.setValue(MSG.view_resource_monitor_availability_unknown(MeasurementConverterClient + .format((double) result.getUnknownTime(), MeasurementUnits.MILLISECONDS, true))); + } else { + unknownField.setValue(""); + } + + currentTimeField.setValue(MSG.view_resource_monitor_availability_currentAsOf(TimestampCellFormatter + .format(result.getCurrentTime()))); + } + + @Override + public void onFailure(Throwable caught) { + currentField.setValue(MSG.common_label_error()); + CoreGUI.getErrorHandler() + .handleError(MSG.view_resource_monitor_availability_summaryError(), caught); + } + }); + } + + private void prepareTooltip(FormItem item, String tooltip) { + item.setHoverWidth(400); + item.setPrompt(tooltip); + } + +} diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/searchbar/BasicSearchStrategy.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/searchbar/BasicSearchStrategy.java index 1edd076..94fd1e8 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/searchbar/BasicSearchStrategy.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/searchbar/BasicSearchStrategy.java @@ -26,7 +26,6 @@ import com.smartgwt.client.data.fields.DataSourceTextField; import com.smartgwt.client.widgets.form.fields.events.KeyUpEvent; import com.smartgwt.client.widgets.grid.HoverCustomizer; import com.smartgwt.client.widgets.grid.ListGrid; -import com.smartgwt.client.widgets.grid.ListGridField; import com.smartgwt.client.widgets.grid.ListGridRecord; import com.smartgwt.client.widgets.grid.events.RecordClickEvent;
@@ -89,17 +88,14 @@ public class BasicSearchStrategy extends AbstractSearchStrategy { */ @Override public void onRecordClick(RecordClickEvent event) { - Log.debug("BasicSearchStrategy click");
String kind = event.getRecord().getAttribute(ATTR_KIND); String searchExpression;
if (kind.equals("SAVED") || kind.equals("GLOBAL")) { - Log.debug("Saved or Global Search Click"); searchExpression = event.getRecord().getAttribute(ATTR_PATTERN);
} else { - Log.debug("Regular Search Click"); searchExpression = event.getRecord().getAttribute(ATTR_NAME); }
@@ -112,14 +108,12 @@ public class BasicSearchStrategy extends AbstractSearchStrategy {
@Override public void searchFocusHandler() { - Log.debug("focus in BasicSearchStrategy"); String searchExpression = searchBar.getSearchTextItem().getValueAsString(); doSearch(searchExpression); }
@Override public void searchKeyUpHandler(KeyUpEvent keyUpEvent) { - Log.debug("Keyup in BasicSearchStrategy: " + keyUpEvent.getKeyName()); String searchExpression = searchBar.getSearchTextItem().getValueAsString(); doSearch(searchExpression); } @@ -131,20 +125,16 @@ public class BasicSearchStrategy extends AbstractSearchStrategy {
private void doSearch(String searchExpression) { if (isSearchInProgress) { - Log.debug("Adding pending search [" + searchExpression + "]"); pendingSearchExpression = (null == searchExpression) ? "" : searchExpression; return; }
- Log.debug("Search Start"); isSearchInProgress = true;
if (null == searchExpression || searchExpression.isEmpty()) { - Log.debug("Empty Search expression"); getSearchSuggestions(SearchSubsystem.RESOURCE, null, 0);
} else { - Log.debug("doSearch: " + searchExpression); getSearchSuggestions(SearchSubsystem.RESOURCE, searchBar.getSearchTextItem().getValueAsString(), searchBar .getSearchTextItem().getValueAsString().length()); } @@ -154,7 +144,6 @@ public class BasicSearchStrategy extends AbstractSearchStrategy {
final long suggestStart = System.currentTimeMillis();
- Log.debug("Searching for: " + expression);
searchService.getTabAwareSuggestions(searchSubsystem, expression, caretPosition, null, new AsyncCallback<List<SearchSuggestion>>() { @@ -179,7 +168,6 @@ public class BasicSearchStrategy extends AbstractSearchStrategy { ds.setFields(idField, valueField);
searchBarPickListGrid.setDataSource(ds); - ListGridField[] fields = searchBarPickListGrid.getAllFields(); searchBarPickListGrid.getField(ATTR_VALUE).setShowHover(true); searchBarPickListGrid.getField(ATTR_VALUE).setHoverCustomizer(new HoverCustomizer() {
@@ -223,7 +211,7 @@ public class BasicSearchStrategy extends AbstractSearchStrategy { searchBarPickListGrid.setData(new ListGridRecord[] {}); searchBarPickListGrid.fetchData(); } catch (Exception e) { - Log.debug("Caught exception on fetchData: " + e); + Log.info("Caught exception on fetchData: " + e); }
long suggestFetchTime = System.currentTimeMillis() - suggestStart; @@ -250,7 +238,6 @@ public class BasicSearchStrategy extends AbstractSearchStrategy {
@Override public void onFailure(Throwable caught) { - Log.debug("Search End"); isSearchInProgress = false; pendingSearchExpression = null; CoreGUI.getErrorHandler().handleError(MSG.view_searchBar_suggest_failSuggest(), caught); diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/util/enhanced/Enhanced.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/util/enhanced/Enhanced.java index 10505e9..a069a5d 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/util/enhanced/Enhanced.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/util/enhanced/Enhanced.java @@ -31,5 +31,5 @@ import org.rhq.enterprise.gui.coregui.client.Messages; */ public interface Enhanced {
- Messages MSG = CoreGUI.getMessages(); + final static Messages MSG = CoreGUI.getMessages(); } diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties index 19d3fa5..abcfa62 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties @@ -20,6 +20,10 @@ chart_hover_period_label = Period chart_hover_start_label = Start chart_hover_time_format = %I:%M:%S %p chart_ie_not_supported = Charting is not available for this browser. +chart_metrics= Metrics +chart_metrics_collapse_tooltip= Click here to see additional tabular availability data. +chart_metrics_expand_tooltip= Click here to collapse additional availability detail. +chart_metrics_sparkline_header= Chart chart_no_data_label = No Data chart_single_value_label = Value chart_slider_button_bar_day = Day @@ -1792,6 +1796,7 @@ view_messageCenter_messageTime = Time view_messageCenter_messageTitle = Message Center view_messageCenter_noRecentMessages = No Recent Messages view_messageCenter_stackTraceFollows = --- STACK TRACE FOLLOWS --- +view_metric_addToDashboard = Add to Dashboard view_metric_traits = Traits view_metric_viewTraitHistory = Value History for Trait [{0}] view_operationHistoryDetails_dateCompleted = Date Completed @@ -2111,7 +2116,7 @@ view_titleBar_common_updateTagsSuccessful = The tags for [{0}] have been updated view_titleBar_group_failInfo = Failed to get general info on group [{0}] with ID [{1}] view_titleBar_group_summary_collapsedTooltip = Click to show more details for this group view_titleBar_group_summary_expandedTooltip = Click to hide details for this group -view_tree_common_contextMenu_addChartToDashboard = Add chart to dashboard [{0}] +view_tree_common_contextMenu_addChartToDashboard = Add Graph to Dashboard [{0}] view_tree_common_contextMenu_editPluginConfiguration = Edit [{0}] Plugin Configuration view_tree_common_contextMenu_editResourceConfiguration = Edit [{0}] Resource Configuration view_tree_common_contextMenu_groupGraph = Group Metric Graph diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties index f71f907..3a79eeb 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties @@ -48,6 +48,10 @@ chart_hover_date_format = %d.%m.%y ##chart_hover_start_label = Start chart_hover_time_format = %H:%M:%S ##chart_ie_not_supported = Charting is not available for this browser. +##chart_metrics= Metrics +##chart_metrics_collapse_tooltip= Click here to see additional tabular availability data. +##chart_metrics_expand_tooltip= Click here to collapse additional availability detail. +##chart_metrics_sparkline_header= Chart ##chart_no_data_label = No Data ##chart_single_value_label = Value ##chart_slider_button_bar_day = Day @@ -1804,6 +1808,7 @@ view_messageCenter_messageTime = Äas view_messageCenter_messageTitle = Centrum zpráv view_messageCenter_noRecentMessages = Ŝádné nové zprávy view_messageCenter_stackTraceFollows = --- VÃPIS ZÃSOBNÃKU NÃÅœE --- +##view_metric_addToDashboard = Add to Dashboard view_metric_traits = Vlastnosti view_metric_viewTraitHistory = Historie hodnot pro vlastnost [{0}] view_operationHistoryDetails_dateCompleted = Datum ukonÄenà diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties index 524dcc0..e79504c 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties @@ -23,6 +23,10 @@ chart_hover_period_label = Zeitraum chart_hover_start_label = Start chart_hover_time_format = %H:%M:%S chart_ie_not_supported = Charting ist bei diesem Browser nicht unterstÃŒtzt +##chart_metrics= Metrics +##chart_metrics_collapse_tooltip= Click here to see additional tabular availability data. +##chart_metrics_expand_tooltip= Click here to collapse additional availability detail. +##chart_metrics_sparkline_header= Chart chart_no_data_label = Keine Daten vorhanden ##chart_single_value_label = Value chart_slider_button_bar_day = Tag @@ -1591,6 +1595,7 @@ view_messageCenter_messageTime = Zeitpukt view_messageCenter_messageTitle = Nachrichtencenter view_messageCenter_noRecentMessages = Keine aktuellen Nachrichten view_messageCenter_stackTraceFollows = --- STACK TRACE FOLGT --- +##view_metric_addToDashboard = Add to Dashboard view_metric_traits = Traits view_metric_viewTraitHistory = Werteverlauf fÃŒr Trait [{0}] ##view_operationCreateWizard_error_scheduleOperationFailure = Failed to schedule operation execution. diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties index cb0c35f..3f1f701 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties @@ -24,6 +24,10 @@ ##chart_hover_date_format = %m/%d/%y ##chart_hover_time_format = %I:%M:%S %p ##chart_ie_not_supported = Charting is not available for this browser. +##chart_metrics= Metrics +##chart_metrics_collapse_tooltip= Click here to see additional tabular availability data. +##chart_metrics_expand_tooltip= Click here to collapse additional availability detail. +##chart_metrics_sparkline_header= Chart ##chart_no_data_label = No Data ##chart_single_value_label = Value ##chart_slider_button_bar_minute = Min @@ -1775,6 +1779,7 @@ view_messageCenter_messageTime = æé view_messageCenter_messageTitle = ã¡ãã»ãŒãžã»ã³ã¿ãŒ view_messageCenter_noRecentMessages = æè¿ã®ã¡ãã»ãŒãžã¯ãããŸãã view_messageCenter_stackTraceFollows = --- ã¹ã¿ãã¯ãã¬ãŒã¹ --- +##view_metric_addToDashboard = Add to Dashboard view_metric_traits = ãã¬ã€ã view_metric_viewTraitHistory = ãã¬ã€ã [{0}] ã®ããã®å€ã®å±¥æŽ view_operationHistoryDetails_dateCompleted = å®äºæ¥ diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties index 32dc73d..3d40d83 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties @@ -22,6 +22,10 @@ ##chart_hover_date_format = %m/%d/%y ##chart_hover_time_format = %I:%M:%S %p ##chart_ie_not_supported = Charting is not available for this browser. +##chart_metrics= Metrics +##chart_metrics_collapse_tooltip= Click here to see additional tabular availability data. +##chart_metrics_expand_tooltip= Click here to collapse additional availability detail. +##chart_metrics_sparkline_header= Chart ##chart_no_data_label = No Data ##chart_single_value_label = Value ##chart_slider_button_bar_minute = Min @@ -1496,6 +1500,7 @@ view_messageCenter_messageTime = ìê° view_messageCenter_messageTitle = ë©ìì§ ìŒí° view_messageCenter_noRecentMessages = ìµê·Œ ë©ìì§ë ììµëë€ view_messageCenter_stackTraceFollows = --- ì€í ì¶ì --- +##view_metric_addToDashboard = Add to Dashboard view_metric_traits = í¹ì± view_metric_viewTraitHistory = í¹ì± [{0}]ì ëí ê° êž°ë¡ view_operationHistoryDetails_dateCompleted = ìë£ìŒ diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties index f4419e1..de43fa4 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties @@ -21,6 +21,10 @@ ##chart_hover_date_format = %m/%d/%y ##chart_hover_time_format = %I:%M:%S %p ##chart_ie_not_supported = Charting is not available for this browser. +##chart_metrics= Metrics +##chart_metrics_collapse_tooltip= Click here to see additional tabular availability data. +##chart_metrics_expand_tooltip= Click here to collapse additional availability detail. +##chart_metrics_sparkline_header= Chart ##chart_no_data_label = No Data ##chart_single_value_label = Value ##chart_slider_button_bar_minute = Min @@ -1808,6 +1812,7 @@ view_messageCenter_messageTime = Tempo view_messageCenter_messageTitle = Centro de Mensagens view_messageCenter_noRecentMessages = N\u00E3o existem Mensagens Recentes view_messageCenter_stackTraceFollows = --- STACK TRACE FOLLOWS --- +##view_metric_addToDashboard = Add to Dashboard view_metric_traits = Traits view_metric_viewTraitHistory = Value History for Trait [{0}] view_operationCreateWizard_button_execute = Execute diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties index d75b76e..669d8d6 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties @@ -21,6 +21,10 @@ ##chart_hover_date_format = %m/%d/%y ##chart_hover_time_format = %I:%M:%S %p ##chart_ie_not_supported = Charting is not available for this browser. +##chart_metrics= Metrics +##chart_metrics_collapse_tooltip= Click here to see additional tabular availability data. +##chart_metrics_expand_tooltip= Click here to collapse additional availability detail. +##chart_metrics_sparkline_header= Chart ##chart_no_data_label = No Data ##chart_single_value_label = Value ##chart_slider_button_bar_minute = Min @@ -1728,6 +1732,7 @@ #view_messageCenter_messageTitle = Message Center #view_messageCenter_noRecentMessages = No Recent Messages #view_messageCenter_stackTraceFollows = --- STACK TRACE FOLLOWS --- +##view_metric_addToDashboard = Add to Dashboard #view_metric_traits = Traits #view_metric_viewTraitHistory = Value History for Trait [{0}] #view_operationHistoryDetails_dateCompleted = Date Completed diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties index a9a24df..b15023f 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties @@ -21,6 +21,10 @@ ##chart_hover_date_format = %m/%d/%y ##chart_hover_time_format = %I:%M:%S %p ##chart_ie_not_supported = Charting is not available for this browser. +##chart_metrics= Metrics +##chart_metrics_collapse_tooltip= Click here to see additional tabular availability data. +##chart_metrics_expand_tooltip= Click here to collapse additional availability detail. +##chart_metrics_sparkline_header= Chart ##chart_no_data_label = No Data ##chart_single_value_label = Value ##chart_slider_button_bar_minute = Min @@ -1776,6 +1780,7 @@ view_messageCenter_messageTime = \u65f6\u95f4 view_messageCenter_messageTitle = \u6d88\u606f\u4e2d\u5fc3 view_messageCenter_noRecentMessages = \u65e0\u8fd1\u671f\u6d88\u606f view_messageCenter_stackTraceFollows = --- \u540e\u9762\u662f\u5f02\u5e38\u4fe1\u606f --- +##view_metric_addToDashboard = Add to Dashboard view_metric_traits = \u7279\u5f81 view_metric_viewTraitHistory = Value History for Trait [{0}] view_operationHistoryDetails_dateCompleted = \u5b8c\u6210\u65e5\u671f diff --git a/modules/enterprise/gui/coregui/src/main/webapp/js/rhq.js b/modules/enterprise/gui/coregui/src/main/webapp/js/rhq.js index 40c7ca8..ef65e69 100644 --- a/modules/enterprise/gui/coregui/src/main/webapp/js/rhq.js +++ b/modules/enterprise/gui/coregui/src/main/webapp/js/rhq.js @@ -38,9 +38,10 @@ if (!window.console.log) window.console.log = function () { * @param singleValueLabel * @param chartXaxisTimeFormatHours * @param chartXaxisTimeFormatHoursMinutes + * @param showLegend * @constructor */ -var ChartContext = function (chartId, chartHeight, metricsData, xAxisLabel, chartTitle, yAxisUnits, minChartTitle, avgChartTitle, peakChartTitle, dateLabel, timeLabel, downLabel, unknownLabel, noDataLabel, hoverStartLabel, hoverEndLabel, hoverPeriodLabel, hoverBarLabel, chartHoverTimeFormat, chartHoverDateFormat, isPortalGraph, portalId, buttonBarDateTimeFormat, singleValueLabel, chartXaxisTimeFormatHours, chartXaxisTimeFormatHoursMinutes) { +var ChartContext = function (chartId, chartHeight, metricsData, xAxisLabel, chartTitle, yAxisUnits, minChartTitle, avgChartTitle, peakChartTitle, dateLabel, timeLabel, downLabel, unknownLabel, noDataLabel, hoverStartLabel, hoverEndLabel, hoverPeriodLabel, hoverBarLabel, chartHoverTimeFormat, chartHoverDateFormat, isPortalGraph, portalId, buttonBarDateTimeFormat, singleValueLabel, chartXaxisTimeFormatHours, chartXaxisTimeFormatHoursMinutes, hideLegend) { "use strict"; if (!(this instanceof ChartContext)) { throw new Error("ChartContext function cannot be called as a function.") @@ -78,6 +79,7 @@ var ChartContext = function (chartId, chartHeight, metricsData, xAxisLabel, char this.buttonBarDateTimeFormat = buttonBarDateTimeFormat; this.chartXaxisTimeFormatHours = chartXaxisTimeFormatHours; this.chartXaxisTimeFormatHoursMinutes = chartXaxisTimeFormatHoursMinutes; + this.hideLegend = hideLegend;
}, /**
commit f126a952186530d6e0e84afca704862e9a0a8f60 Author: Jay Shaughnessy jshaughn@redhat.com Date: Thu Jul 25 15:45:30 2013 -0400
More work on working new bundle group stuff into role edit view. Also, continued cleanup of Messages.properties.
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/authz/Permission.java b/modules/core/domain/src/main/java/org/rhq/core/domain/authz/Permission.java index 5b009b5..88fb948 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/authz/Permission.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/authz/Permission.java @@ -228,15 +228,22 @@ public enum Permission {
public static final Set<Permission> GLOBAL_ALL = new HashSet<Permission>(); public static final Set<Permission> RESOURCE_ALL = new HashSet<Permission>(); + public static final Set<Permission> BUNDLE_ALL = new HashSet<Permission>(); static { for (Permission permission : Permission.values()) { switch (permission.getTarget()) { case GLOBAL: GLOBAL_ALL.add(permission); + if (permission.name().contains("BUNDLE")) { + BUNDLE_ALL.add(permission); + } break; case RESOURCE: RESOURCE_ALL.add(permission); break; + case BUNDLE: + BUNDLE_ALL.add(permission); + break; } } } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/PermissionsEditor.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/PermissionsEditor.java index cde2c6d..5e80d82 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/PermissionsEditor.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/PermissionsEditor.java @@ -101,6 +101,11 @@ public class PermissionsEditor extends EnhancedVStack { this.resourcePermissionsGrid = createResourcePermissionsGrid(); addMember(this.resourcePermissionsGrid);
+ Label bundleGroupPermissionsHeader = new Label("<h4>" + + MSG.view_adminRoles_permissions_bundleGroupPermissions() + "</h4>"); + bundleGroupPermissionsHeader.setHeight(17); + addMember(bundleGroupPermissionsHeader); + this.bundleGroupPermissionsGrid = createBundleGroupPermissionsGrid(); addMember(this.bundleGroupPermissionsGrid);
@@ -135,6 +140,13 @@ public class PermissionsEditor extends EnhancedVStack { record.setAttribute("writeAuthorized", this.selectedPermissions.contains(writePermission)); }
+ ListGridRecord[] bundleGroupPermissionRecords = this.bundleGroupPermissionsGrid.getRecords(); + for (ListGridRecord record : bundleGroupPermissionRecords) { + String permissionName = record.getAttribute("name"); + Permission permission = Permission.valueOf(permissionName); + record.setAttribute("authorized", this.selectedPermissions.contains(permission)); + } + markForRedraw(); }
@@ -216,7 +228,6 @@ public class PermissionsEditor extends EnhancedVStack { MSG.view_adminRoles_permissions_permDesc_deployBundles()); records.add(record);
- record = createPermissionRecord(MSG.view_adminRoles_permissions_perm_viewUsers(), "global/User", Permission.VIEW_USERS, MSG.view_adminRoles_permissions_permDesc_viewUsers()); records.add(record); @@ -481,6 +492,12 @@ public class PermissionsEditor extends EnhancedVStack { messageString = MSG.view_adminRoles_permissions_autoselecting_configureWrite_implied(); redrawRequired = true; } + } else if (permission == Permission.MANAGE_BUNDLE) { + // MANAGE_BUNDLE implies all other bundle-related perms + if (this.selectedPermissions.addAll(Permission.BUNDLE_ALL)) { + messageString = MSG.view_adminRoles_permissions_autoselecting_manageBundle_implied(); + redrawRequired = true; + } } } else { this.selectedPermissions.remove(permission); @@ -499,14 +516,14 @@ public class PermissionsEditor extends EnhancedVStack { } }
- private ListGridRecord createPermissionRecord(String displayName, String icon, Permission globalPermission, + private ListGridRecord createPermissionRecord(String displayName, String icon, Permission permission, String description) { ListGridRecord record = new ListGridRecord(); record.setAttribute("displayName", displayName); record.setAttribute("icon", icon); - record.setAttribute("name", globalPermission.name()); + record.setAttribute("name", permission.name()); record.setAttribute("description", description); - record.setAttribute("authorized", this.selectedPermissions.contains(globalPermission)); + record.setAttribute("authorized", this.selectedPermissions.contains(permission));
return record; } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RoleEditView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RoleEditView.java index 63b6e3f..9c2dc96 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RoleEditView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RoleEditView.java @@ -268,7 +268,7 @@ public class RoleEditView extends AbstractRecordEditor<RolesDataSource> implemen onItemChanged(); } }); - updateTab(this.resourceGroupsTab, this.resourceGroupSelector); + updateTab(this.bundleGroupsTab, this.bundleGroupSelector);
}
@@ -393,6 +393,9 @@ public class RoleEditView extends AbstractRecordEditor<RolesDataSource> implemen if (this.ldapGroupSelector != null) { this.ldapGroupSelector.reset(); } + if (this.bundleGroupSelector != null) { + this.bundleGroupSelector.reset(); + } }
private static void updateTab(Tab tab, Canvas content) { diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties index 1f9c73c..25f5171 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties @@ -503,8 +503,10 @@ view_adminRoles_ldapGroupsReadOnly = LDAP group data is read only view_adminRoles_noLdap = The LDAP security integration is not configured. To configure LDAP, go to <a {0}>{1}</a>. view_adminRoles_permissions_autoselecting_configureRead_implied = Autodeselected CONFIGURE_WRITE permission, since lack of CONFIGURE_READ implies lack of it... view_adminRoles_permissions_autoselecting_configureWrite_implied = Autoselected CONFIGURE_READ permission, since CONFIGURE_WRITE implies it... +view_adminRoles_permissions_autoselecting_manageBundle_implied = Autoselected unselected permissions, since MANAGE_BUNDLE implies all other bundle permissions... view_adminRoles_permissions_autoselecting_manageInventory_implied = Autoselected unselected Resource permissions, since MANAGE_INVENTORY implies all Resource permissions... view_adminRoles_permissions_autoselecting_manageSecurity_implied = Autoselected unselected permissions, since MANAGE_SECURITY implies all other permissions... +view_adminRoles_permissions_bundleGroupPermissions = Bundle Group Permissions view_adminRoles_permissions_globalPermissions = Global Permissions view_adminRoles_permissions_illegalDeselectionDueToCorrespondingWritePermSelection = {0} read permission cannot be deselected, unless the {0} write permission, which implies the read permission, is deselected first. view_adminRoles_permissions_illegalDeselectionDueToManageInventorySelection = {0} permission cannot be deselected, unless Manage Inventory, which implies all Resource permissions, is deselected first. diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties index 4e73829..8982a54 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties @@ -522,12 +522,14 @@ view_adminRoles_ldapGroupsReadOnly = data LDAP skupiny jsou jen pro Ätenà view_adminRoles_noLdap = Integrace LDAP nenà nakonfigurována. K nastavenà ÅÃzenà bezpeÄnosti pÅes LDAP prosÃm navÅ¡tivte <a {0}>{1}</a>. view_adminRoles_permissions_autoselecting_configureRead_implied = Automaticky odoznaÄeno CONFIGURE_WRITE povolenÃ, protoÅŸe absence CONFIGURE_READ to implikuje... view_adminRoles_permissions_autoselecting_configureWrite_implied = Automaticky oznaÄeno CONFIGURE_READ povolenÃ, protoÅŸe CONFIGURE_WRITE jej implikuje... +##view_adminRoles_permissions_autoselecting_manageBundle_implied = Autoselected unselected permissions, since MANAGE_BUNDLE implies all other bundle permissions... view_adminRoles_permissions_autoselecting_manageInventory_implied = Automaticky oznaÄeny neoznaÄené zdroje, protoÅŸe MANAGE_INVENTORY implikuje povolenà na vÅ¡ech zdrojÃch... view_adminRoles_permissions_autoselecting_manageSecurity_implied = Automaticky oznaÄeny neoznaÄená povolenÃ, protoÅŸe MANAGE_SECURITY implikuje povolenà na vÅ¡ech práv... +view_adminRoles_permissions_bundleGroupPermissions = Bundle Group Permissions view_adminRoles_permissions_globalPermissions = Globálnà povolenà -view_adminRoles_permissions_illegalDeselectionDueToCorrespondingWritePermSelection = {0} read permission cannot be deselected, unless the {0} write permission, which implies the read permission, is deselected first. -view_adminRoles_permissions_illegalDeselectionDueToManageInventorySelection = {0} permission cannot be deselected, unless Manage Inventory, which implies all Resource permissions, is deselected first. -view_adminRoles_permissions_illegalDeselectionDueToManageSecuritySelection = {0} permission cannot be deselected, unless the Manage Security permission, which implies all other permissions, is deselected first. +##view_adminRoles_permissions_illegalDeselectionDueToCorrespondingWritePermSelection = {0} read permission cannot be deselected, unless the {0} write permission, which implies the read permission, is deselected first. +##view_adminRoles_permissions_illegalDeselectionDueToManageInventorySelection = {0} permission cannot be deselected, unless Manage Inventory, which implies all Resource permissions, is deselected first. +##view_adminRoles_permissions_illegalDeselectionDueToManageSecuritySelection = {0} permission cannot be deselected, unless the Manage Security permission, which implies all other permissions, is deselected first. view_adminRoles_permissions_isAuthorized = Autorizován? view_adminRoles_permissions_isRead = ÄtenÃ? view_adminRoles_permissions_isWrite = Zápis? diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties index 0db1d05..8b086a7 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties @@ -499,7 +499,16 @@ view_adminRoles_globalPerms = Applikationsweite Rechte view_adminRoles_ldapGroups = LDAP-Gruppen view_adminRoles_ldapGroupsReadOnly = LDAP Gruppendaten können nur gelesen werden view_adminRoles_noLdap = Die LDAP-Integration ist nicht konfiguriert. Um LDAP zu konfigurieren, wechseln sie zu <a {0}>{1}</a>. +##view_adminRoles_permissions_autoselecting_configureRead_implied = Autodeselected CONFIGURE_WRITE permission, since lack of CONFIGURE_READ implies lack of it... +##view_adminRoles_permissions_autoselecting_configureWrite_implied = Autoselected CONFIGURE_READ permission, since CONFIGURE_WRITE implies it... +##view_adminRoles_permissions_autoselecting_manageBundle_implied = Autoselected unselected permissions, since MANAGE_BUNDLE implies all other bundle permissions... +##view_adminRoles_permissions_autoselecting_manageInventory_implied = Autoselected unselected Resource permissions, since MANAGE_INVENTORY implies all Resource permissions... +##view_adminRoles_permissions_autoselecting_manageSecurity_implied = Autoselected unselected permissions, since MANAGE_SECURITY implies all other permissions... +view_adminRoles_permissions_bundleGroupPermissions = Bundle Group Permissions view_adminRoles_permissions_globalPermissions = Globale Rechte +##view_adminRoles_permissions_illegalDeselectionDueToCorrespondingWritePermSelection = {0} read permission cannot be deselected, unless the {0} write permission, which implies the read permission, is deselected first. +##view_adminRoles_permissions_illegalDeselectionDueToManageInventorySelection = {0} permission cannot be deselected, unless Manage Inventory, which implies all Resource permissions, is deselected first. +##view_adminRoles_permissions_illegalDeselectionDueToManageSecuritySelection = {0} permission cannot be deselected, unless the Manage Security permission, which implies all other permissions, is deselected first. view_adminRoles_permissions_isAuthorized = Berechtigt? view_adminRoles_permissions_isRead = Lesen? view_adminRoles_permissions_isWrite = Schreiben? diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties index 626243e..a78ab58 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties @@ -500,8 +500,10 @@ view_adminRoles_ldapGroupsReadOnly = LDAPã°ã«ãŒãããŒã¿ã¯èªã¿åºãå° view_adminRoles_noLdap = LDAPã»ãã¥ãªãã£ã®çµ±åã¯æ§æãããŠããŸãããLDAPãæ§æããã«ã¯ã <a {0}>{1}</a>ã«è¡ã£ãŠãã ããã view_adminRoles_permissions_autoselecting_configureRead_implied = CONFIGURE_WRITEæš©éãèªåçã«éžæãããŸããããªããªããCONFIGURE_READãç¡ãããšããããæ瀺ããŠããããã§ãã view_adminRoles_permissions_autoselecting_configureWrite_implied = CONFIGURE_READæš©éãèªåçã«éžæãããŸããããªããªããCONFIGURE_WRITEããããæ瀺ããŠããããã§ãã +##view_adminRoles_permissions_autoselecting_manageBundle_implied = Autoselected unselected permissions, since MANAGE_BUNDLE implies all other bundle permissions... view_adminRoles_permissions_autoselecting_manageInventory_implied = æªéžæã®ãªãœãŒã¹æš©éãèªåçã«éžæãããŸããããªããªããMANAGE_INVENTORYã¯ãã¹ãŠã®ãªãœãŒã¹æš©éãæ瀺ããŠããããã§ãã view_adminRoles_permissions_autoselecting_manageSecurity_implied = æªéžæã®æš©éãèªåçã«éžæãããŸããããªããªããMANAGE_SECURITYã¯ä»ã®ãã¹ãŠã®æš©éãæ瀺ããŠããããã§ãã +view_adminRoles_permissions_bundleGroupPermissions = Bundle Group Permissions view_adminRoles_permissions_globalPermissions = ã°ããŒãã«æš©é view_adminRoles_permissions_illegalDeselectionDueToCorrespondingWritePermSelection = {0} èªã¿åãæš©éã¯éžæ解é€ã§ããŸããã§ãããèªã¿åãæš©éãæ瀺ãã {0} æžã蟌ã¿æš©éãæåã«éžæ解é€ãããªããã°ããã¯ã§ããŸããã view_adminRoles_permissions_illegalDeselectionDueToManageInventorySelection = {0} æš©éã¯éžæ解é€ã§ããŸããã§ãããä»ã®ãã¹ãŠã®ãªãœãŒã¹ãæ瀺ãã管çã€ã³ãã³ããªãæåã«éžæ解é€ãããªããã°ããã¯ã§ããŸããã diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties index 0da2a6e..a00f560 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties @@ -447,12 +447,21 @@ view_adminRoles_failLdapGroupsRole = ìí ì ìíŽ ì¬ì©ê°ë¥í LDAP ê·ž view_adminRoles_failRoles = ìí ì ê±°ì ì€íšíìµëë€. view_adminRoles_globalPerms = êžë¡ë² ê¶í view_adminRoles_ldapGroups = LDAP 귞룹 +##view_adminRoles_ldapGroupsReadOnly = LDAP group data is read only view_adminRoles_noLdap = LDAP 볎ì íµí©ìŽ ì€ì ëì§ ìììµëë€. LDAPì 구ì±íë €ë©Ž <a {0}>{1}</a>ë¡ ê°ììì. +##view_adminRoles_permissions_autoselecting_configureRead_implied = Autodeselected CONFIGURE_WRITE permission, since lack of CONFIGURE_READ implies lack of it... +##view_adminRoles_permissions_autoselecting_configureWrite_implied = Autoselected CONFIGURE_READ permission, since CONFIGURE_WRITE implies it... +##view_adminRoles_permissions_autoselecting_manageBundle_implied = Autoselected unselected permissions, since MANAGE_BUNDLE implies all other bundle permissions... +##view_adminRoles_permissions_autoselecting_manageInventory_implied = Autoselected unselected Resource permissions, since MANAGE_INVENTORY implies all Resource permissions... +##view_adminRoles_permissions_autoselecting_manageSecurity_implied = Autoselected unselected permissions, since MANAGE_SECURITY implies all other permissions... +view_adminRoles_permissions_bundleGroupPermissions = Bundle Group Permissions view_adminRoles_permissions_globalPermissions = êžë¡ë² ê¶í view_adminRoles_permissions_illegalDeselectionDueToCorrespondingWritePermSelection = {0} ìœêž° ê¶íì ì í íŽì í ì ììµëë€. ìœêž° ê¶íì ììíë {0} ì°êž° ê¶íìŽ ëšŒì ì í íŽì ëìŽìŒ ê·žê²ì ìíí ì ììµëë€. view_adminRoles_permissions_illegalDeselectionDueToManageInventorySelection = {0} ê¶íì ì í íŽì í ì ììµëë€. ë€ë¥ž 몚ë ììì ììíë êŽëŠ¬ ìžë²€í ëŠ¬ê° ëšŒì ì í íŽì ëìŽìŒ ê·žê²ì ìíí ì ììµëë€. view_adminRoles_permissions_illegalDeselectionDueToManageSecuritySelection = {0} ê¶íì ì í íŽì í ì ììµëë€. ë€ë¥ž 몚ë ê¶íì ììíë êŽëŠ¬ 볎ì ê¶íìŽ ëšŒì ì í íŽì ëìŽìŒ ê·žê²ì ìíí ì ììµëë€. view_adminRoles_permissions_isAuthorized = ê¶íìŽ ììµëê¹? +##view_adminRoles_permissions_isRead = Read? +##view_adminRoles_permissions_isWrite = Write? ##view_adminRoles_permissions_permDesc_assignBundlesToGroup = can copy a viewable bundle to the bundle group ##view_adminRoles_permissions_permDesc_createBundles = can create new bundle [version]s. can copy existing bundles between viewable groups ##view_adminRoles_permissions_permDesc_createBundlesInGroup = can create new bundle [version]s for the bundle group. can copy a viewable bundle to the bundle group. @@ -470,16 +479,22 @@ view_adminRoles_permissions_permDesc_manageSettings = {0} ìë²ì ìì ë° ##view_adminRoles_permissions_permDesc_viewBundlesInGroup = can view any bundle in the group view_adminRoles_permissions_permDesc_viewUsers = ë€ë¥ž ì¬ì©ì륌 볌 ì ììµëë€. ê·žë¬ë ê·žë€ìê² í ë¹ë ìí ì 볎ìŽì§ ììµëë€. view_adminRoles_permissions_permReadDesc_configure = ìì ì€ì ë° ìì ì€ì ìì êž°ë¡ ë³Žêž° +##view_adminRoles_permissions_permReadDesc_control = (IMPLIED) view available operations and operation execution history view_adminRoles_permissions_permReadDesc_createChildResources = (ììì ) ìì 늬ìì€ ë§ë€êž° êž°ë¡ ë³Žêž° view_adminRoles_permissions_permReadDesc_deleteChildResources = (ììì ) ìì 늬ìì€ ìì êž°ë¡ ë³Žêž° +##view_adminRoles_permissions_permReadDesc_inventory = (IMPLIED) view Resource properties (name, description, version, etc.), connection settings, and connection settings history view_adminRoles_permissions_permReadDesc_manageAlerts = (ììì ) ê²œê³ ì ì ë° ê²œê³ êž°ë¡ ë³Žêž° view_adminRoles_permissions_permReadDesc_manageContent = (ììì ) ì€ì¹ëìŽ ì¬ì©ê°ë¥í íší€ì§; íší€ì§ ì€ì¹ êž°ë¡ ë³Žêž° view_adminRoles_permissions_permReadDesc_manageDrift = (ììì ) ë늬ííž ì ìì ë늬ííž êž°ë¡ ë³Žêž° view_adminRoles_permissions_permReadDesc_manageEvents = (ììì ) ìŽë²€íž 볎Ʞ view_adminRoles_permissions_permReadDesc_manageMeasurements = (ììì ) íµê³ ë°ìŽí° ë° ìì§ ì€ìŒì¥Ž 볎Ʞ +##view_adminRoles_permissions_permWriteDesc_configure = update Resource configuration; delete Resource configuration revision history items +##view_adminRoles_permissions_permWriteDesc_control = execute operations; delete operation execution history items view_adminRoles_permissions_permWriteDesc_createChildResources = (ìì± ê°ë¥í ì íì ìì 늬ìì€ì ëí) ìë¡ìŽ ìì 늬ìì€ ë§ë€êž° view_adminRoles_permissions_permWriteDesc_deleteChildResources = ìì ì ê±° ìžë²€í 늬; (ìì ê°ë¥í ì íì ììì ëí) 늬ìì€ ìì +##view_adminRoles_permissions_permWriteDesc_inventory = update Resource name, version, description, and connection settings; delete connection settings history items view_adminRoles_permissions_permWriteDesc_manageAlerts = ê²œê³ ì ì륌 ë§ë€êž°, ìì , ìì ; ê²œê³ êž°ë¡ íìž ë° ìì +##view_adminRoles_permissions_permWriteDesc_manageContent = subscribe to content sources; install and uninstall packages view_adminRoles_permissions_permWriteDesc_manageDrift = ë늬ííž ì ì ë§ë€êž°, ìì , ìì ; ë늬ííž ìžì€íŽì€ êŽëŠ¬ view_adminRoles_permissions_permWriteDesc_manageEvents = ìŽë²€íž ìì view_adminRoles_permissions_permWriteDesc_manageMeasurements = íµê³ ìì§ ì€ìŒì¥Ž ìì @@ -517,6 +532,8 @@ view_adminRoles_resourcePerms = 늬ìì€ ê¶í view_adminRoles_roleAdded = ìí [{0}]ìŽ ì¶ê°ëììµëë€. view_adminRoles_roleDeleteFailed = ìí [{0}]ì ìì ì ì€íšíìµëë€. view_adminRoles_roleDeleted = ìí [{0}]ìŽ ìì ëììµëë€. +##view_adminRoles_roleUpdateFailed = Failed to update role [{0}]. +##view_adminRoles_roleUpdated = Role [{0}] updated. view_adminTemplates_definedBy = ì ì view_adminTemplates_platformServices = íë«íŒ ìë¹ì€ view_adminTemplates_platforms = íë«íŒ diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties index 6d74d41..e6a7864 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties @@ -505,8 +505,10 @@ view_adminRoles_ldapGroupsReadOnly = Informa\u00E7\u00F5es do grupo LDAP com per view_adminRoles_noLdap = A integra\u00E7\u00E3o com o LDAP ainda n\u00E3o foi configurada. Para configurar o LDAP acesse <a {0}>{1}</a>. view_adminRoles_permissions_autoselecting_configureRead_implied = Permiss\u00E3o CONFIGURE_WRITE desmarcada automaticamente devida a aus\u00EAncia da permiss\u00E3o CONFIGURE_READ... view_adminRoles_permissions_autoselecting_configureWrite_implied = Permiss\u00E3o CONFIGURE_READ marcada automaticamente devida a marca\u00E7\u00E3o de CONFIGURE_WRITE... +##view_adminRoles_permissions_autoselecting_manageBundle_implied = Autoselected unselected permissions, since MANAGE_BUNDLE implies all other bundle permissions... view_adminRoles_permissions_autoselecting_manageInventory_implied = Autoselected unselected Resource permissions, since MANAGE_INVENTORY implies all Resource permissions... view_adminRoles_permissions_autoselecting_manageSecurity_implied = Autoselected unselected permissions, since MANAGE_SECURITY implies all other permissions... +view_adminRoles_permissions_bundleGroupPermissions = Bundle Group Permissions view_adminRoles_permissions_globalPermissions = Permiss\u00F5es Globais view_adminRoles_permissions_illegalDeselectionDueToCorrespondingWritePermSelection = {0} permiss\u00E3o de leitura n\u00E3 pode ser desmarcada, a menos que {0} permiss\u00E3o de escrita, que implica na permiss\u00E3o de leitura, seja desmarcada primeiro. view_adminRoles_permissions_illegalDeselectionDueToManageInventorySelection = {0} permiss\u00E3o n\u00E3o pode ser desmarcada, a menos que Gerenciar Invent\u00E1rio, que implica todas as permiss\u00F5es de Recurso, seja desmarcada primeiro. @@ -584,7 +586,6 @@ view_adminRoles_resourcePerms = Permiss\u00F5es do Recurso view_adminRoles_roleAdded = Perfil [{0}] adicionado. view_adminRoles_roleDeleteFailed = Falha ao excluir o perfil [{0}]. view_adminRoles_roleDeleted = Perfil [{0}] exclu\u00EDdo. -view_adminRoles_roleExists = O perfil com nome [{0}] j\u00E1 existe. view_adminRoles_roleUpdateFailed = Falha ao atualizar o perfil [{0}]. view_adminRoles_roleUpdated = Perfil [{0}] atualizado. ##view_adminTemplates_definedBy = Defined By diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties index f31b829..4f9a97e 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties @@ -2574,6 +2574,7 @@ view_adminConfig_systemSettings = СОÑÑеЌМÑе МаÑÑÑПйкО view_adminContent_contentSources = ÐÑÑПÑМОкО кПМÑеМÑа view_adminRoles_assignedGroups = ÐазМаÑеММÑе гÑÑÐ¿Ð¿Ñ ÑеÑÑÑÑПв view_adminRoles_assignedSubjects = ÐазМаÑеММÑе ÑÑбÑекÑÑ +##view_adminRoles_failCreateRoleWithExistingName = Failed to create role with existing name [{0}]. Please use another name. view_adminRoles_failLdap = Ðе ÑЎалПÑÑ ÐŸÐ¿ÑеЎелОÑÑ, МаÑÑÑПеМ лО LDAP - пÑеЎпПлПжОÑелÑМП LDAP ПÑÑÑÑÑÑвÑеÑ. view_adminRoles_failLdapGroups = Ðе ÑЎалПÑÑ Ð¿ÐŸÐ»ÑÑОÑÑ LDAP-гÑÑппÑ. ÐÑеЎпПлПжОÑелÑМП LDAP гÑÑÐ¿Ð¿Ñ ÐŸÑÑÑÑÑÑвÑÑÑ view_adminRoles_failLdapGroupsRole = Ðе ÑЎалПÑÑ Ð·Ð°Ð³ÑÑзОÑÑ LDAP гÑÑппÑ, ЎПÑÑÑпМÑе ÐŽÐ»Ñ ÑПлО. @@ -2581,10 +2582,13 @@ view_adminRoles_failRoles = Ðе ÑЎалПÑÑ Ð¿ÐŸÐ»ÑÑОÑÑ ÑПлО. view_adminRoles_globalPerms = ÐлПбалÑÐœÑе пПлМПЌПÑÐžÑ view_adminRoles_ldapGroups = LDAP-гÑÑÐ¿Ð¿Ñ view_adminRoles_ldapGroupsReadOnly = ЎаММÑе LDAP гÑÑпп ЎПÑÑÑÐ¿ÐœÑ ÑПлÑкП ÐŽÐ»Ñ ÑÑÐµÐœÐžÑ +##view_adminRoles_noLdap = The LDAP security integration is not configured. To configure LDAP, go to <a {0}>{1}</a>. view_adminRoles_permissions_autoselecting_configureRead_implied = ÐвÑПЌаÑОÑеÑкО ПÑклÑÑеМП CONFIGURE_WRITE пПлМПЌПÑОе, пПÑкПлÑÐºÑ ÐŸÑÑÑÑÑÑвÑÐµÑ CONFIGURE_READ... view_adminRoles_permissions_autoselecting_configureWrite_implied = ÐвÑПЌаÑОÑеÑкО вÑбÑаМП CONFIGURE_READ пПлМПЌПÑОе, пПÑкПлÑÐºÑ CONFIGURE_WRITE пПЎÑазÑЌеваеÑ, ÑÑП ... +##view_adminRoles_permissions_autoselecting_manageBundle_implied = Autoselected unselected permissions, since MANAGE_BUNDLE implies all other bundle permissions... view_adminRoles_permissions_autoselecting_manageInventory_implied = ÐвÑПЌаÑОÑеÑкО ÑÑÑÐ°ÐœÐŸÐ²Ð»ÐµÐœÑ ÐœÐµ вÑбÑаММÑе пПлМПЌПÑÐžÑ ÑеÑÑÑÑÑПв, пПÑкПлÑÐºÑ MANAGE_INVENTORY пÑÐµÐŽÐ¿ÐŸÐ»Ð°Ð³Ð°ÐµÑ Ð²Ñе пПлМПЌПÑÐžÑ ÑеÑÑÑÑа... view_adminRoles_permissions_autoselecting_manageSecurity_implied = ÐвÑПЌаÑОÑеÑкО ÑÑÑÐ°ÐœÐŸÐ²Ð»ÐµÐœÑ ÐœÐµÐ²ÑбÑаММÑе пПлМПЌПÑОÑ, пПÑкПлÑÐºÑ MANAGE_SECURITY вклÑÑÐ°ÐµÑ Ð²Ñе ÐŽÑÑгОе пПлМПЌПÑОÑ... +view_adminRoles_permissions_bundleGroupPermissions = Bundle Group Permissions view_adminRoles_permissions_globalPermissions = ÐлПбалÑÐœÑе пПлМПЌÑÐžÑ view_adminRoles_permissions_illegalDeselectionDueToCorrespondingWritePermSelection = {0} пПлМПЌПÑÐžÑ ÐœÐ° ÑÑеМОе Ме ЌПгÑÑ Ð±ÑÑÑ ÐŸÑклÑÑеМÑ, пПка пÑеЎваÑОÑелÑМП {0} пПлМПЌПÑÐžÑ Ð·Ð°Ð¿ÐžÑО, кПÑПÑÑе вклÑÑаÑÑ Ð¿ÐŸÐ»ÐœÐŸÐŒÐŸÑÐžÑ ÐœÐ° ÑÑеМОе, Ме бÑÐŽÑÑ ÐŸÑклÑÑеМÑ. view_adminRoles_permissions_illegalDeselectionDueToManageInventorySelection = {0} пПлМПЌПÑÐžÑ ÐœÐµ ЌПгÑÑ Ð±ÑÑÑ ÐŸÑклÑÑеМÑ, пПка пÑеЎваÑОÑелÑМП Manage Inventory, кПÑПÑÐ°Ñ Ð²ÐºÐ»ÑÑÐ°ÐµÑ Ð²Ñе пПлМПЌПÑÐžÑ ÑеÑÑÑÑа, Ме бÑÐŽÐµÑ ÐŸÑклÑÑеМП. @@ -2609,17 +2613,23 @@ view_adminRoles_permissions_permDesc_manageSecurity = ЌПжМП ÑПзЎава ##view_adminRoles_permissions_permDesc_viewBundlesInGroup = can view any bundle in the group ##view_adminRoles_permissions_permDesc_viewUsers = can view other users, with the exception of their assigned roles view_adminRoles_permissions_permReadDesc_configure = пÑПÑЌПÑÑ ÐºÐŸÐœÑОгÑÑаÑОО ÑеÑÑÑÑа О ОÑÑПÑÐžÑ Ð²ÐµÑÑОй кПМÑОгÑÑаÑОО ÑеÑÑÑÑа +##view_adminRoles_permissions_permReadDesc_control = (IMPLIED) view available operations and operation execution history view_adminRoles_permissions_permReadDesc_createChildResources = (ÐÐÐÐ ÐÐУÐÐÐÐÐÐЫÐ) пÑПÑЌПÑÑ ÐžÑÑПÑОО ÑÐŸÐ·ÐŽÐ°ÐœÐžÑ ÐŽÐŸÑеÑМегП ÑеÑÑÑÑа view_adminRoles_permissions_permReadDesc_deleteChildResources = (ÐÐÐÐ ÐÐУÐÐÐÐÐÐЫÐ) пÑПÑЌПÑÑ ÐžÑÑПÑОО ÑÐŽÐ°Ð»ÐµÐœÐžÑ ÐŽÐŸÑеÑМегП ÑеÑÑÑÑа view_adminRoles_permissions_permReadDesc_inventory = (ÐÐÐÐ ÐÐУÐÐÐÐÐÐЫÐ) пÑПÑЌПÑÑ ÑвПйÑÑв ÑеÑÑÑÑа (МаОЌеМПваМОе, ПпОÑаМОе, веÑÑÐžÑ Ðž Ñ.ÐŽ.), МаÑÑÑПйкО пПЎклÑÑеМОÑ, О ОÑÑПÑÐžÑ ÐœÐ°ÑÑÑПек пПЎклÑÑÐµÐœÐžÑ view_adminRoles_permissions_permReadDesc_manageAlerts = (ÐÐÐÐ ÐÐУÐÐÐÐÐÐЫÐ) пÑПÑЌПÑÑ ÐŸÐ¿ÑеЎелеМОй пÑеЎÑпÑежЎеМОй О ОÑÑПÑОО пÑеЎÑпÑÐµÐ¶ÐŽÐµÐœÐžÑ +##view_adminRoles_permissions_permReadDesc_manageContent = (IMPLIED) view installed and available packages; view package installation history +##view_adminRoles_permissions_permReadDesc_manageDrift = (IMPLIED) view drift definitions and drift history view_adminRoles_permissions_permReadDesc_manageEvents = (ÐÐÐÐ ÐÐУÐÐÐÐÐÐЫÐ) пÑПÑЌПÑÑ ÑПбÑÑОй +##view_adminRoles_permissions_permReadDesc_manageMeasurements = (IMPLIED) view metric data and collection schedules view_adminRoles_permissions_permWriteDesc_configure = ПбМПвОÑÑ ÐºÐŸÐœÑОгÑÑаÑÐžÑ ÑеÑÑÑÑа; ÑЎалОÑÑ ÑлеЌеМÑÑ ÐžÑÑПÑОО веÑÑОй кПМÑОгÑÑаÑОО ÑеÑÑÑÑа view_adminRoles_permissions_permWriteDesc_control = вÑпПлМÑÑÑ ÐŸÐ¿ÐµÑаÑОО; ÑЎалОÑÑ ÑлеЌеМÑÑ ÐžÑÑПÑОО вÑÐ¿ÐŸÐ»ÐœÐµÐœÐžÑ ÐŸÐ¿ÐµÑаÑОй +##view_adminRoles_permissions_permWriteDesc_createChildResources = create new child Resources (for child Resources of types that are creatable) view_adminRoles_permissions_permWriteDesc_deleteChildResources = ÑбÑаÑÑ ÑеÑÑÑÑ ÐžÐ· ОМвеМÑаÑОзаÑОО; ÑЎалОÑÑ ÑеÑÑÑÑÑ (ÐŽÐ»Ñ ÑОпПв ÑеÑÑÑÑПв, кПÑПÑÑе ЌПгÑÑ Ð±ÑÑÑ ÑЎалеМÑ) view_adminRoles_permissions_permWriteDesc_inventory = ПбМПвОÑÑ ÐœÐ°ÐžÐŒÐµÐœÐŸÐ²Ð°ÐœÐžÐµ ÑеÑÑÑÑа, веÑÑОÑ, ПпОÑаМОе О МаÑÑÑПйкО пПЎклÑÑеМОÑ; ÑЎалОÑÑ ÑлеЌеМÑÑ ÐžÑÑПÑОО МаÑÑÑПек пПЎклÑÑÐµÐœÐžÑ view_adminRoles_permissions_permWriteDesc_manageAlerts = ÑПзЎаÑÑ, ПбМПвОÑÑ Ðž ÑЎалОÑÑ ÐŸÐ¿ÑÐµÐŽÐµÐ»ÐµÐœÐžÑ ÑвеЎПЌлеМОй; пПЎÑвеÑжЎаÑÑ Ðž ÑЎалÑÑÑ ÑлеЌеМÑÑ ÐžÑÑПÑОО пÑеЎÑпÑежЎеМОй view_adminRoles_permissions_permWriteDesc_manageContent = пПЎпОÑаÑÑÑÑ ÐœÐ° ОÑÑПÑМОкО кПМÑеМÑа; ÑÑÑаМавлОваÑÑ Ðž ÑЎалÑÑÑ Ð¿Ð°ÐºÐµÑÑ +##view_adminRoles_permissions_permWriteDesc_manageDrift = create, update, and delete drift definitions; and manage drift instances view_adminRoles_permissions_permWriteDesc_manageEvents = ÑЎалÑÑÑ ÑПбÑÑÐžÑ view_adminRoles_permissions_permWriteDesc_manageMeasurements = ПбМПвлеМОе ЌеÑÑОк кПллекÑОО плаМОÑПвÑОкПв ##view_adminRoles_permissions_perm_assignBundlesToGroup = Assign Bundles To Group diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties index 1aeb1a1..547ebd5 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties @@ -492,14 +492,16 @@ view_adminRoles_globalPerms = \u5168\u5c40\u6388\u6743 view_adminRoles_ldapGroups = LDAP\u7ec4 view_adminRoles_ldapGroupsReadOnly = LDAP\u7ec4\u6570\u636e\u4e3a\u53ea\u8bfb view_adminRoles_noLdap = \u6ca1\u6709\u96c6\u6210LDAP\u5b89\u5168, \u5230<a {0}>{1}</a>. -view_adminRoles_permissions_autoselecting_configureRead_implied = Autodeselected CONFIGURE_WRITE permission, since lack of CONFIGURE_READ implies lack of it... -view_adminRoles_permissions_autoselecting_configureWrite_implied = Autoselected CONFIGURE_READ permission, since CONFIGURE_WRITE implies it... -view_adminRoles_permissions_autoselecting_manageInventory_implied = Autoselected unselected Resource permissions, since MANAGE_INVENTORY implies all Resource permissions... -view_adminRoles_permissions_autoselecting_manageSecurity_implied = Autoselected unselected permissions, since MANAGE_SECURITY implies all other permissions... +##view_adminRoles_permissions_autoselecting_configureRead_implied = Autodeselected CONFIGURE_WRITE permission, since lack of CONFIGURE_READ implies lack of it... +##view_adminRoles_permissions_autoselecting_configureWrite_implied = Autoselected CONFIGURE_READ permission, since CONFIGURE_WRITE implies it... +##view_adminRoles_permissions_autoselecting_manageBundle_implied = Autoselected unselected permissions, since MANAGE_BUNDLE implies all other bundle permissions... +##view_adminRoles_permissions_autoselecting_manageInventory_implied = Autoselected unselected Resource permissions, since MANAGE_INVENTORY implies all Resource permissions... +##view_adminRoles_permissions_autoselecting_manageSecurity_implied = Autoselected unselected permissions, since MANAGE_SECURITY implies all other permissions... +view_adminRoles_permissions_bundleGroupPermissions = Bundle Group Permissions view_adminRoles_permissions_globalPermissions = \u5168\u5c40\u6388\u6743 -view_adminRoles_permissions_illegalDeselectionDueToCorrespondingWritePermSelection = {0} read permission cannot be deselected, unless the {0} write permission, which implies the read permission, is deselected first. -view_adminRoles_permissions_illegalDeselectionDueToManageInventorySelection = {0} permission cannot be deselected, unless Manage Inventory, which implies all Resource permissions, is deselected first. -view_adminRoles_permissions_illegalDeselectionDueToManageSecuritySelection = {0} permission cannot be deselected, unless the Manage Security permission, which implies all other permissions, is deselected first. +##view_adminRoles_permissions_illegalDeselectionDueToCorrespondingWritePermSelection = {0} read permission cannot be deselected, unless the {0} write permission, which implies the read permission, is deselected first. +##view_adminRoles_permissions_illegalDeselectionDueToManageInventorySelection = {0} permission cannot be deselected, unless Manage Inventory, which implies all Resource permissions, is deselected first. +##view_adminRoles_permissions_illegalDeselectionDueToManageSecuritySelection = {0} permission cannot be deselected, unless the Manage Security permission, which implies all other permissions, is deselected first. view_adminRoles_permissions_isAuthorized = \u6388\u6743? view_adminRoles_permissions_isRead = \u8bfb? view_adminRoles_permissions_isWrite = \u5199? @@ -523,16 +525,16 @@ view_adminRoles_permissions_permReadDesc_configure = \u67e5\u770b\u8d44\u6e90\u9 view_adminRoles_permissions_permReadDesc_control = (IMPLIED) \u67e5\u770b\u53ef\u7528\u64cd\u4f5c\u548c\u64cd\u4f5c\u6267\u884c\u5386\u53f2 view_adminRoles_permissions_permReadDesc_createChildResources = (IMPLIED) \u67e5\u770b\u5b50\u8d44\u6e90\u521b\u5efa\u5386\u53f2 view_adminRoles_permissions_permReadDesc_deleteChildResources = (IMPLIED) \u67e5\u770b\u5b50\u8d44\u6e90\u5220\u9664\u5386\u53f2 -view_adminRoles_permissions_permReadDesc_inventory = (IMPLIED) view Resource properties (name, description, version, etc.), connection settings, and connection settings history +##view_adminRoles_permissions_permReadDesc_inventory = (IMPLIED) view Resource properties (name, description, version, etc.), connection settings, and connection settings history view_adminRoles_permissions_permReadDesc_manageAlerts = (IMPLIED) \u67e5\u770b\u544a\u8b66\u5b9a\u4e49\u548c\u544a\u8b66\u5386\u53f2 view_adminRoles_permissions_permReadDesc_manageContent = (IMPLIED) \u67e5\u770b\u5b89\u88c5\u7684\u548c\u53ef\u7528\u7684\u5305; \u67e5\u770b\u5305\u5b89\u88c5\u5386\u53f2 ##view_adminRoles_permissions_permReadDesc_manageDrift = (IMPLIED) view drift definitions and drift history view_adminRoles_permissions_permReadDesc_manageEvents = (IMPLIED)\u67e5\u770b\u4e8b\u4ef6 -view_adminRoles_permissions_permReadDesc_manageMeasurements = (IMPLIED) view metric data and collection schedules +##view_adminRoles_permissions_permReadDesc_manageMeasurements = (IMPLIED) view metric data and collection schedules view_adminRoles_permissions_permWriteDesc_configure = \u66f4\u65b0\u8d44\u6e90\u914d\u7f6e; \u5220\u9664\u8d44\u6e90\u914d\u7f6e\u4fee\u8ba2\u7684\u5386\u53f2\u9879 view_adminRoles_permissions_permWriteDesc_control = \u6267\u884c\u64cd\u4f5c; \u5220\u9664\u64cd\u4f5c\u6267\u884c\u5386\u53f2 view_adminRoles_permissions_permWriteDesc_createChildResources = \u521b\u5efa\u65b0\u7684\u5b50\u8d44\u6e90 (for child Resources of types that are creatable) -view_adminRoles_permissions_permWriteDesc_deleteChildResources = uninventory resources; delete Resources (for Resources of types that are deletable) +##view_adminRoles_permissions_permWriteDesc_deleteChildResources = uninventory resources; delete Resources (for Resources of types that are deletable) view_adminRoles_permissions_permWriteDesc_inventory = \u66f4\u65b0\u8d44\u6e90\u540d, \u7248\u672c,\u63cf\u8ff0, \u548c\u8fde\u63a5\u8bbe\u7f6e; \u5220\u9664\u8fde\u63a5\u8bbe\u7f6e\u5386\u53f2\u9879 view_adminRoles_permissions_permWriteDesc_manageAlerts = \u521b\u5efa, \u66f4\u65b0, \u548c\u5220\u9664\u544a\u8b66\u5b9a\u4e49; \u786e\u8ba4\u548c\u5220\u9664\u5386\u53f2\u544a\u8b66\u9879 view_adminRoles_permissions_permWriteDesc_manageContent = \u8ba2\u9605\u5185\u5bb9\u6e90; \u5b89\u88c5\u5378\u8f7d\u5305
commit 4ab947d9c450a94601055509cb9c4240b0652210 Author: Thomas Segismont tsegismo@redhat.com Date: Thu Jul 25 18:06:07 2013 +0200
Bug 886126 - Datasource JNDI change takes too long to be propagated to JON server
In EAP5, JNDI name is the key of Datasource and Connection Factory managed components. Consequently, there is no way to change the JNDI name of such components without creating a new managed component, which will in the end be discovered by RHQ.
So in RHQ, jndi-name config property is now marked as read-only (it will still be required when creating a Datasource or a Connection Factory).
diff --git a/modules/plugins/jboss-as-5/src/main/resources/META-INF/rhq-plugin.xml b/modules/plugins/jboss-as-5/src/main/resources/META-INF/rhq-plugin.xml index 2b14f5b..9a53a80 100644 --- a/modules/plugins/jboss-as-5/src/main/resources/META-INF/rhq-plugin.xml +++ b/modules/plugins/jboss-as-5/src/main/resources/META-INF/rhq-plugin.xml @@ -133,6 +133,7 @@
<!ENTITY datasourceConnectionResourceConfigProps ' <c:simple-property name="jndi-name" + readOnly="true" displayName="JNDI Name" description="The global JNDI Name under which to bind the datasource" type="string"/> @@ -502,7 +503,7 @@ -->
<!ENTITY connectionFactoryConnectionResourceConfigProps ' - <c:simple-property required="true" name="jndi-name" displayName="JNDI Name" + <c:simple-property required="true" name="jndi-name" readOnly="true" displayName="JNDI Name" description="The global JNDI name to bind the connection factory under."/>
<c:simple-property required="true" name="rar-name" displayName="RAR Name">
commit ddef8592f462ec95309368170b1e6c60966b8d6b Author: Heiko W. Rupp hwr@redhat.com Date: Thu Jul 25 17:17:27 2013 +0200
Make the parser check its input and bail out if it is not valid.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/helper/LinkDeserializer.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/helper/LinkDeserializer.java index e9582d4..f320d09 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/helper/LinkDeserializer.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/helper/LinkDeserializer.java @@ -20,7 +20,11 @@ package org.rhq.enterprise.server.rest.helper;
import java.io.IOException; +import java.util.regex.Matcher; +import java.util.regex.Pattern;
+import org.codehaus.jackson.JsonLocation; +import org.codehaus.jackson.JsonParseException; import org.codehaus.jackson.JsonParser; import org.codehaus.jackson.JsonProcessingException; import org.codehaus.jackson.map.DeserializationContext; @@ -44,25 +48,47 @@ import org.rhq.enterprise.server.rest.domain.Link; */ public class LinkDeserializer extends JsonDeserializer<Link>{
+ Pattern textPattern = Pattern.compile("\S+"); // Non whitespace; could possibly be narrowed + @Override public Link deserialize(JsonParser jp, DeserializationContext ctxt) throws IOException, JsonProcessingException {
String tmp = jp.getText(); // { + validate(jp, tmp,"{"); jp.nextToken(); // skip over { to the rel String rel = jp.getText(); + validateText(jp, rel); jp.nextToken(); // skip over { tmp = jp.getText(); + validate(jp, tmp,"{"); jp.nextToken(); // skip over "href" tmp = jp.getText(); -// jp.nextToken(); // skip over : + validate(jp, tmp,"href"); jp.nextToken(); // skip to "http:// ... " String href = jp.getText(); + validateText(jp, href); jp.nextToken(); // skip } tmp = jp.getText(); + validate(jp, tmp, "}"); jp.nextToken(); // skip } + tmp = jp.getText(); + validate(jp, tmp, "}");
Link link = new Link(rel,href);
return link; } + + private void validateText(JsonParser jsonParser, String input) throws JsonProcessingException { + Matcher m = textPattern.matcher(input); + if (!m.matches()) { + throw new JsonParseException("Unexpected token: " + input, jsonParser.getTokenLocation()); + } + } + + private void validate(JsonParser jsonParser, String input, String expected) throws JsonProcessingException { + if (!input.equals(expected)) { + throw new JsonParseException("Unexpected token: " + input, jsonParser.getTokenLocation()); + } + } } diff --git a/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/OperationsTest.java b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/OperationsTest.java index 3d39181..7789f9e 100644 --- a/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/OperationsTest.java +++ b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/OperationsTest.java @@ -18,7 +18,6 @@ */ package org.rhq.modules.integrationTests.restApi;
-import java.util.ArrayList; import java.util.List; import java.util.Map;
@@ -219,10 +218,61 @@ public class OperationsTest extends AbstractBase { .when() .delete("/operation/" + draftId); } + } + + @Test + public void testCatchBadLinkSerialization() throws Exception { + + // Test that when we get Links back in bad format, we + // correctly bail out. + + Operation draft = getADraftOperation(_platformId, discoveryDefinitionId); + + int draftId = draft.getId(); + draft.getParams().put("detailedDiscovery",true); + + String jsonWithBadLinkSer = // + "{\n" + + " "id": " + draftId + ",\n" + + " "name": "discovery",\n" + + " "readyToSubmit": false,\n" + + " "resourceId": " + _platformId + ",\n" + + " "definitionId": " + discoveryDefinitionId + ",\n" + + " "params": {\n" + + " "detailedDiscovery": true\n" + + " },\n" + + " "links": [\n" + + " {\n" + + " "rel": "edit",\n" + + " "href": "http://localhost:7080/rest/operation/" + draftId + ""\n" + + " }\n" + + " ]\n" + + "}"; + + try { + given() + .contentType(ContentType.JSON) + .pathParam("id", draftId) + .body(jsonWithBadLinkSer) + .log().everything() + .expect() + .statusCode(503) + .log().ifError() + .when() + .put("/operation/{id}"); + } finally { + // delete the draft again + expect() + .statusCode(204) + .when() + .delete("/operation/" + draftId); + }
}
+ + @Test public void testCreateDraftOperationAndScheduleExecution() throws Exception {
commit 32395046687dcfd0cd9e46164a9ce4b6a82d1752 Author: Heiko W. Rupp hwr@redhat.com Date: Thu Jul 25 14:57:07 2013 +0200
Add a LinkSerializer to send links in correct format (one could argue not to send links at all).
diff --git a/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/OperationsTest.java b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/OperationsTest.java index 57c1a49..3d39181 100644 --- a/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/OperationsTest.java +++ b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/OperationsTest.java @@ -318,8 +318,6 @@ public class OperationsTest extends AbstractBase { assert draft != null; assert draft.getDefinitionId() == definitionId;
- draft.setLinks(new ArrayList<Link>()); // Clean out links TODO - System.out.println("--- Draft created --"); System.out.flush();
diff --git a/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/d/Link.java b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/d/Link.java index c3f7b2b..c199729 100644 --- a/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/d/Link.java +++ b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/d/Link.java @@ -20,11 +20,13 @@ package org.rhq.modules.integrationTests.restApi.d;
import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.fasterxml.jackson.databind.annotation.JsonSerialize;
/** * A Link inside objects * @author Heiko W. Rupp */ +@JsonSerialize(using = LinkSerializer.class) @JsonDeserialize(using = LinkDeserializer.class) public class Link {
diff --git a/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/d/LinkSerializer.java b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/d/LinkSerializer.java new file mode 100644 index 0000000..759c8dc --- /dev/null +++ b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/d/LinkSerializer.java @@ -0,0 +1,53 @@ +/* + * RHQ Management Platform + * Copyright (C) 2005-2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA + */ + +package org.rhq.modules.integrationTests.restApi.d; + +import java.io.IOException; + +import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.JsonSerializer; +import com.fasterxml.jackson.databind.SerializerProvider; + +/** + * Special serializer for Link objects that does not map the classical {rel:abc, href:xyz} scheme, + * but which puts the rel name "at the outside" like { abc : { href : xyz }} to make it easier for + * clients to access the link. + * See also https://bugzilla.redhat.com/show_bug.cgi?id=845244 + * @author Heiko W. Rupp + * @see LinkDeserializer + */ +public class LinkSerializer extends JsonSerializer<Link> { + + @Override + public void serialize(Link link, JsonGenerator jsonGenerator, + SerializerProvider serializerProvider) throws IOException, JsonProcessingException { + + jsonGenerator.writeStartObject(); + jsonGenerator.writeFieldName(link.getRel()); + + jsonGenerator.writeStartObject(); + jsonGenerator.writeFieldName("href"); + jsonGenerator.writeString(link.getHref()); + jsonGenerator.writeEndObject(); + + jsonGenerator.writeEndObject(); + } +}
commit 83336d77e19949e332d29e936ce6739aded399da Author: Heiko W. Rupp hwr@redhat.com Date: Thu Jul 25 12:55:54 2013 +0200
BZ 974963 Allow to schedule operations that have no parameters.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/OperationsHandlerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/OperationsHandlerBean.java index 5e8e68e..070675e 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/OperationsHandlerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/OperationsHandlerBean.java @@ -303,12 +303,15 @@ public class OperationsHandlerBean extends AbstractRestBean {
// Validate parameters ConfigurationDefinition parameterDefinition = opDef.getParametersConfigurationDefinition(); - List<String> errorMessages = ConfigurationHelper.checkConfigurationWrtDefinition(parameters, parameterDefinition); - - if (errorMessages.size()>0) { - // Configuration is not ok - operation.setReadyToSubmit(false); - throw new BadArgumentException("Validation of parameters failed", StringUtils.getListAsString(errorMessages,", ")); + if (parameterDefinition!=null) { + // There are parameters defined, so lets validate them. + List<String> errorMessages = ConfigurationHelper.checkConfigurationWrtDefinition(parameters, parameterDefinition); + + if (errorMessages.size()>0) { + // Configuration is not ok + operation.setReadyToSubmit(false); + throw new BadArgumentException("Validation of parameters failed", StringUtils.getListAsString(errorMessages,", ")); + } } }
diff --git a/modules/integration-tests/rest-api/pom.xml b/modules/integration-tests/rest-api/pom.xml index 47bfe18..5d7bde5 100644 --- a/modules/integration-tests/rest-api/pom.xml +++ b/modules/integration-tests/rest-api/pom.xml @@ -18,7 +18,7 @@
<properties> <surefire-plugin.version>2.10</surefire-plugin.version> - <jackson.version>1.9.5</jackson.version> + <jackson.version>2.0.5</jackson.version> <rest-assured.version>1.8.1</rest-assured.version> </properties>
@@ -53,15 +53,14 @@ <scope>test</scope> </dependency> <dependency> - <groupId>org.codehaus.jackson</groupId> - <artifactId>jackson-core-asl</artifactId> + <groupId>com.fasterxml.jackson.core</groupId> + <artifactId>jackson-annotations</artifactId> <version>${jackson.version}</version> <scope>test</scope> </dependency> - <dependency> - <groupId>org.codehaus.jackson</groupId> - <artifactId>jackson-mapper-asl</artifactId> + <groupId>com.fasterxml.jackson.core</groupId> + <artifactId>jackson-core</artifactId> <version>${jackson.version}</version> <scope>test</scope> </dependency> diff --git a/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/OperationsTest.java b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/OperationsTest.java index 7800e5c..57c1a49 100644 --- a/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/OperationsTest.java +++ b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/OperationsTest.java @@ -18,6 +18,7 @@ */ package org.rhq.modules.integrationTests.restApi;
+import java.util.ArrayList; import java.util.List; import java.util.Map;
@@ -42,7 +43,8 @@ import static org.hamcrest.core.Is.is; */ public class OperationsTest extends AbstractBase {
- private int definitionId; + private int discoveryDefinitionId; + private int viewPLDefinitionId;
@Before public void setUp() throws Exception { @@ -58,15 +60,20 @@ public class OperationsTest extends AbstractBase { .when() .get("/operation/definitions");
- definitionId = -1; + discoveryDefinitionId = -1; List<Map<String,Object>> list = r.as(List.class); for (Map<String,Object> map : list) { - if (map.get("name").equals("discovery")) { - definitionId = (Integer) map.get("id"); + String name = (String) map.get("name"); + Integer id = (Integer) map.get("id"); + if (name.equals("discovery")) { + discoveryDefinitionId = id; + } + if (name.equals("viewProcessList")) { + viewPLDefinitionId = id; } }
- assert definitionId !=-1 : "No discovery operation found"; + assert discoveryDefinitionId !=-1 : "No discovery operation found"; }
@Test @@ -76,7 +83,7 @@ public class OperationsTest extends AbstractBase {
given() .header(acceptJson) - .pathParam("did",definitionId) + .pathParam("did", discoveryDefinitionId) .expect() .statusCode(200) .body("name",is("discovery")) @@ -92,7 +99,7 @@ public class OperationsTest extends AbstractBase {
given() .header(acceptJson) - .pathParam("did",-42) + .pathParam("did", -42) .expect() .statusCode(404) .when() @@ -107,7 +114,7 @@ public class OperationsTest extends AbstractBase {
given() .header(acceptJson) - .queryParam("resourceId",42) + .queryParam("resourceId", 42) .expect() .statusCode(404) .when() @@ -134,7 +141,7 @@ public class OperationsTest extends AbstractBase {
given() .header(acceptJson) - .pathParam("did",-42) + .pathParam("did", -42) .expect() .statusCode(406) .when() @@ -150,7 +157,7 @@ public class OperationsTest extends AbstractBase { given() .header(acceptJson) .queryParam("resourceId", 42) - .pathParam("definitionId", definitionId) + .pathParam("definitionId", discoveryDefinitionId) .expect() .statusCode(404) .when() @@ -164,7 +171,7 @@ public class OperationsTest extends AbstractBase {
given() .header(acceptJson) - .pathParam("definitionId", definitionId) + .pathParam("definitionId", discoveryDefinitionId) .expect() .statusCode(406) .when() @@ -174,20 +181,7 @@ public class OperationsTest extends AbstractBase { @Test public void testCreateDraftOperation() throws Exception {
- Operation draft = - given() - .header(acceptJson) - .pathParam("definitionId",definitionId) - .queryParam("resourceId",_platformId) - .expect() - .statusCode(200) - .log().ifError() - .when() - .post("/operation/definition/{definitionId}") - .as(Operation.class); - - assert draft != null; - assert draft.getDefinitionId() == definitionId; + Operation draft = getADraftOperation(_platformId, discoveryDefinitionId);
int draftId = draft.getId();
@@ -202,29 +196,17 @@ public class OperationsTest extends AbstractBase { @Test public void testCreateAndUpdateDraftOperation() throws Exception {
- Operation draft = - given() - .header(acceptJson) - .pathParam("definitionId",definitionId) - .queryParam("resourceId",_platformId) - .expect() - .statusCode(200) - .log().ifError() - .when() - .post("/operation/definition/{definitionId}") - .as(Operation.class); - - assert draft != null; - assert draft.getDefinitionId() == definitionId; + Operation draft = getADraftOperation(_platformId, discoveryDefinitionId);
int draftId = draft.getId(); - draft.getParams().put("detailed",true); + draft.getParams().put("detailedDiscovery",true);
try { given() .contentType(ContentType.JSON) .pathParam("id", draftId) .body(draft) + .log().everything() .expect() .statusCode(200) .log().ifError() @@ -246,25 +228,51 @@ public class OperationsTest extends AbstractBase {
int platformId = findIdOfARealPlatform();
- Operation draft = + Operation draft = getADraftOperation(platformId, discoveryDefinitionId); + + int draftId = draft.getId(); + + draft.setReadyToSubmit(true); + draft.getParams().put("detailedDiscovery", false); + + // update to schedule + Operation scheduled = given() - .header(acceptJson) - .pathParam("definitionId",definitionId) - .queryParam("resourceId",platformId) + .contentType(ContentType.JSON) + .pathParam("id",draftId) + .body(draft) .expect() .statusCode(200) .log().ifError() .when() - .post("/operation/definition/{definitionId}") + .put("/operation/{id}") .as(Operation.class);
- assert draft != null; - assert draft.getDefinitionId() == definitionId; + System.out.println(scheduled.getId()); + String history = findHistoryItem(scheduled); + + String historyId = history.substring(history.lastIndexOf("/")+1); + try { + waitAndCheckStatus(platformId, historyId); + + } finally { + + // Wait until the operation has finished and then delete + waitForTerminationAndDelete(historyId); + + } + } + + @Test + public void testCreateDraftOperationNoParamsAndScheduleExecution() throws Exception { + + int platformId = findIdOfARealPlatform(); + + Operation draft = getADraftOperation(platformId, viewPLDefinitionId);
int draftId = draft.getId();
draft.setReadyToSubmit(true); - draft.getParams().put("detailedDiscovery", false);
// update to schedule Operation scheduled = @@ -280,6 +288,45 @@ public class OperationsTest extends AbstractBase { .as(Operation.class);
System.out.println(scheduled.getId()); + String history = findHistoryItem(scheduled); + + String historyId = history.substring(history.lastIndexOf("/")+1); + try { + waitAndCheckStatus(platformId, historyId); + + } finally { + + // Wait until the operation has finished and then delete + waitForTerminationAndDelete(historyId); + + } + } + + private Operation getADraftOperation(int platformId, int definitionId) { + Operation draft = + given() + .header(acceptJson) + .pathParam("definitionId", definitionId) + .queryParam("resourceId",platformId) + .expect() + .statusCode(200) + .log().ifError() + .when() + .post("/operation/definition/{definitionId}") + .as(Operation.class); + + assert draft != null; + assert draft.getDefinitionId() == definitionId; + + draft.setLinks(new ArrayList<Link>()); // Clean out links TODO + + System.out.println("--- Draft created --"); + System.out.flush(); + + return draft; + } + + private String findHistoryItem(Operation scheduled) { String history = null; List<Link> links = scheduled.getLinks(); for (Link link : links) { @@ -288,76 +335,74 @@ public class OperationsTest extends AbstractBase { } } assert history != null; + return history; + }
- String historyId = history.substring(history.lastIndexOf("/")+1); - try { - Thread.sleep(15000); // we need to wait a little as the execution may take time + private void waitAndCheckStatus(int platformId, String historyId) throws InterruptedException { + Thread.sleep(15000); // we need to wait a little as the execution may take time
- given() - .pathParam("hid",historyId) - .expect() - .statusCode(200) - .log().ifError() - .when() - .get("/operation/history/{hid}"); + given() + .pathParam("hid", historyId) + .expect() + .statusCode(200) + .log().ifError() + .when() + .get("/operation/history/{hid}"); + + // See if we also find it when we are looking for histories on the resource + Response response = + given() + .queryParam("resourceId", platformId) + .header(acceptJson) + .expect() + .statusCode(200) + .log().ifError() + .when() + .get("/operation/history");
- // See if we also find it when we are looking for histories on the resource + // compare + List<Map<String,Object>> list = response.as(List.class); + boolean found = false; + for (Map<String,Object> map : list) { + if (map.get("jobId").equals(historyId)) { + found = true; + } + } + assert found; + } + + private void waitForTerminationAndDelete(String historyId) throws InterruptedException { + boolean done = false; + int count = 0; + while (!done) { Response response = given() - .queryParam("resourceId",platformId) .header(acceptJson) - .expect() - .statusCode(200) - .log().ifError() + .pathParam("hid", historyId) .when() - .get("/operation/history"); - - // compare - List<Map<String,Object>> list = response.as(List.class); - boolean found = false; - for (Map<String,Object> map : list) { - if (map.get("jobId").equals(historyId)) { - found = true; - } - } - assert found; + .get("/operation/history/{hid}");
- } finally { + JsonPath jsonPath = response.jsonPath(); + String status= jsonPath.getString("status"); + int code = response.statusCode();
- // Wait until the operation has finished and then delete - boolean done = false; - int count = 0; - while (!done) { - Response response = - given() - .header(acceptJson) - .pathParam("hid", historyId) - .when() - .get("/operation/history/{hid}"); - - JsonPath jsonPath = response.jsonPath(); - String status= jsonPath.getString("status"); - int code = response.statusCode(); - - if (code==200 && (status.equals("Success") || status.equals("Failed"))) { - done = true; - } else { - Thread.sleep(2000); - } - count ++; - assert count < 10 :"Waited for 20sec -- something is wrong"; + if (code==200 && (status.equals("Success") || status.equals("Failed"))) { + done = true; + } else { + Thread.sleep(2000); } - - // Delete the history item - given() - .pathParam("hid",historyId) - .expect() - .statusCode(204) - .log().ifError() - .when() - .delete("/operation/history/{hid}"); - + count ++; + assert count < 10 :"Waited for 20sec -- something is wrong"; } + + // Delete the history item + given() + .pathParam("hid", historyId) + .expect() + .statusCode(204) + .log().ifError() + .when() + .delete("/operation/history/{hid}"); }
@Test @@ -365,20 +410,7 @@ public class OperationsTest extends AbstractBase {
int platformId = findIdOfARealPlatform();
- Operation draft = - given() - .header(acceptJson) - .pathParam("definitionId",definitionId) - .queryParam("resourceId",platformId) - .expect() - .statusCode(200) - .log().ifError() - .when() - .post("/operation/definition/{definitionId}") - .as(Operation.class); - - assert draft != null; - assert draft.getDefinitionId() == definitionId; + Operation draft = getADraftOperation(platformId, discoveryDefinitionId);
int draftId = draft.getId();
@@ -423,20 +455,7 @@ public class OperationsTest extends AbstractBase {
int platformId = findIdOfARealPlatform();
- Operation draft = - given() - .header(acceptJson) - .pathParam("definitionId",definitionId) - .queryParam("resourceId",platformId) - .expect() - .statusCode(200) - .log().ifError() - .when() - .post("/operation/definition/{definitionId}") - .as(Operation.class); - - assert draft != null; - assert draft.getDefinitionId() == definitionId; + Operation draft = getADraftOperation(platformId, discoveryDefinitionId);
int draftId = draft.getId();
diff --git a/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/d/Link.java b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/d/Link.java index e16213c..c3f7b2b 100644 --- a/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/d/Link.java +++ b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/d/Link.java @@ -19,7 +19,7 @@
package org.rhq.modules.integrationTests.restApi.d;
-import org.codehaus.jackson.map.annotate.JsonDeserialize; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize;
/** * A Link inside objects diff --git a/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/d/LinkDeserializer.java b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/d/LinkDeserializer.java index 5d45724..c138761 100644 --- a/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/d/LinkDeserializer.java +++ b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/d/LinkDeserializer.java @@ -21,19 +21,19 @@ package org.rhq.modules.integrationTests.restApi.d;
import java.io.IOException;
-import org.codehaus.jackson.JsonNode; -import org.codehaus.jackson.JsonParser; -import org.codehaus.jackson.JsonProcessingException; -import org.codehaus.jackson.ObjectCodec; -import org.codehaus.jackson.map.DeserializationContext; -import org.codehaus.jackson.map.JsonDeserializer; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.core.ObjectCodec; +import com.fasterxml.jackson.databind.DeserializationContext; +import com.fasterxml.jackson.databind.JsonDeserializer; +import com.fasterxml.jackson.databind.JsonNode;
/** * Custom deserializer for link object that are now in the form { relname : { "href": link}} * * @author Heiko W. Rupp */ -public class LinkDeserializer extends JsonDeserializer<Link>{ +public class LinkDeserializer extends JsonDeserializer<Link> {
@Override public Link deserialize(JsonParser jsonParser, @@ -41,8 +41,8 @@ public class LinkDeserializer extends JsonDeserializer<Link>{
ObjectCodec oc = jsonParser.getCodec(); JsonNode node = oc.readTree(jsonParser); - String rel = node.getFieldNames().next(); - String href = node.getElements().next().get("href").getTextValue(); + String rel = node.fieldNames().next(); + String href = node.elements().next().get("href").textValue();
return new Link(rel,href);
commit bac87d4800260b79c0398291d3909aa1b383b5e2 Author: Thomas Segismont tsegismo@redhat.com Date: Thu Jul 25 16:50:06 2013 +0200
Make CassandraNodeComponent shutdown operation wait for server to go down (unless running on OSX)
diff --git a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java index 37aa425..3e55a93 100644 --- a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java +++ b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java @@ -68,6 +68,7 @@ import org.rhq.core.pluginapi.util.ProcessExecutionUtility; import org.rhq.core.system.ProcessExecution; import org.rhq.core.system.ProcessExecutionResults; import org.rhq.core.system.ProcessInfo; +import org.rhq.core.system.ProcessInfo.ProcessInfoSnapshot; import org.rhq.core.system.SystemInfo; import org.rhq.core.util.StringUtil; import org.rhq.core.util.exception.ThrowableUtil; @@ -140,7 +141,6 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent if (isStorageServiceReachable()) { return AvailabilityType.UP; } - return AvailabilityType.DOWN; } finally { long totalTimeMillis = NANOSECONDS.toMillis(System.nanoTime() - start); @@ -190,6 +190,7 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent public OperationResult invokeOperation(String name, Configuration parameters) throws Exception { if (name.equals("shutdown")) { OperationResult operationResult = shutdownNode(); + waitForNodeToGoDown(); return operationResult; } else if (name.equals("start")) { return startNode(); @@ -238,7 +239,7 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent protected OperationResult stopNode() { ProcessInfo process = getResourceContext().getNativeProcess();
- if (processInfo == null) { + if (process == null) { LOG.warn("Failed to obtain process info. It appears Cassandra is already shutdown."); return new OperationResult("Failed to obtain process info. It appears Cassandra is already shutdown."); } @@ -265,6 +266,39 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent } }
+ private void waitForNodeToGoDown() throws InterruptedException { + if (OperatingSystem.getInstance().getName().equals(OperatingSystem.NAME_MACOSX)) { + // See this thread on VMWare forum: http://communities.vmware.com/message/2187972#2187972 + // Unfortunately there is no work around for this failure on Mac OSX so the method will silently return on + // this platform. + return; + } + for (ProcessInfoSnapshot processInfoSnapshot = getProcessInfoSnapshot();; processInfoSnapshot = getProcessInfoSnapshot()) { + if (processInfoSnapshot == null || !processInfoSnapshot.isRunning()) { + // Process not found, so it died, that's fine + // OR + // Process info says process is no longer running, that's fine as well + break; + } + if (getResourceContext().getComponentInvocationContext().isInterrupted()) { + // Operation canceled or timed out + throw new InterruptedException(); + } + // Process is still running, wait a second and check again + Thread.sleep(SECONDS.toMillis(2)); + } + } + + private ProcessInfoSnapshot getProcessInfoSnapshot() { + ProcessInfoSnapshot processInfoSnapshot = (processInfo == null) ? null : processInfo.freshSnapshot(); + if (processInfoSnapshot == null || !processInfoSnapshot.isRunning()) { + processInfo = getResourceContext().getNativeProcess(); + // Safe to get prior snapshot here, we've just recreated the process info instance + processInfoSnapshot = (processInfo == null) ? null : processInfo.priorSnaphot(); + } + return processInfoSnapshot; + } + protected OperationResult startNode() { Configuration pluginConfig = getResourceContext().getPluginConfiguration(); String baseDir = pluginConfig.getSimpleValue("baseDir");
commit 76183b30be9694e81ed93239ff505f887c101f32 Author: Stefan Negrea snegrea@redhat.com Date: Thu Jul 25 08:51:47 2013 -0500
Add support for updating the JMX Port. The process sends a restart to the resource last to avoid putting the Server in maintenance mode.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index 1441785..7a7eda4 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -103,7 +103,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN private static final String RHQ_STORAGE_ADDRESS_PROPERTY = "host";
private static final int OPERATION_QUERY_TIMEOUT = 20000; - private static final int MAX_ITERATIONS = 6; + private static final int MAX_ITERATIONS = 10; private static final String UPDATE_CONFIGURATION_OPERATION = "updateConfiguration"; private static final String RESTART_OPERATION = "restart";
@@ -639,16 +639,45 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN StorageNode storageNode = findStorageNodeByAddress(storageNodeConfiguration.getStorageNode().getAddress());
if (storageNode != null && storageNode.getResource() != null) { - Resource storageNodeResource = storageNode.getResource(); Configuration parameters = new Configuration(); parameters.setSimpleValue("jmxPort", storageNodeConfiguration.getJmxPort() + ""); - parameters.setSimpleValue("heapSize", storageNodeConfiguration.getHeapSize() + ""); - parameters.setSimpleValue("heapNewSize", storageNodeConfiguration.getHeapNewSize() + ""); - parameters.setSimpleValue("threadStackSize", storageNodeConfiguration.getThreadStackSize() + ""); - parameters.setSimpleValue("restartIfRequired", "true"); + if (storageNodeConfiguration.getHeapSize() != null) { + parameters.setSimpleValue("heapSize", storageNodeConfiguration.getHeapSize() + ""); + } + if (storageNodeConfiguration.getHeapNewSize() != null) { + parameters.setSimpleValue("heapNewSize", storageNodeConfiguration.getHeapNewSize() + ""); + } + if (storageNodeConfiguration.getThreadStackSize() != null) { + parameters.setSimpleValue("threadStackSize", storageNodeConfiguration.getThreadStackSize() + ""); + } + parameters.setSimpleValue("restartIfRequired", "false");
- return runOperationAndWaitForResult(subject, storageNodeResource, UPDATE_CONFIGURATION_OPERATION, + Resource storageNodeResource = storageNode.getResource(); + + boolean result = runOperationAndWaitForResult(subject, storageNodeResource, UPDATE_CONFIGURATION_OPERATION, parameters); + + if (result) { + Configuration storageNodePluginConfig = configurationManager.getPluginConfiguration(subject, + storageNodeResource.getId()); + + String existingJMXPort = storageNodePluginConfig.getSimpleValue("jmxPort"); + storageNodePluginConfig.setSimpleValue("jmxPort", storageNodeConfiguration.getJmxPort() + ""); + + String existingConnectionURL = storageNodePluginConfig.getSimpleValue("connectorAddress"); + String newConnectionURL = existingConnectionURL.replace(":" + existingJMXPort + "/", ":" + + storageNodeConfiguration.getJmxPort() + "/"); + storageNodePluginConfig.setSimpleValue("connectorAddress", newConnectionURL); + + configurationManager.updatePluginConfiguration(subject, storageNodeResource.getId(), + storageNodePluginConfig); + + storageNode.setJmxPort(storageNodeConfiguration.getJmxPort()); + entityManager.merge(storageNode); + entityManager.flush(); + + return runOperationAndWaitForResult(subject, storageNodeResource, RESTART_OPERATION, null); + } }
return false; diff --git a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java index 41a0c42..125f4d2 100644 --- a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java +++ b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java @@ -117,6 +117,7 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper
//update storage node jvm settings Configuration config = new Configuration(); + config.put(new PropertySimple("jmxPort", params.getSimpleValue("jmxPort"))); config.put(new PropertySimple("minHeapSize", params.getSimpleValue("heapSize"))); config.put(new PropertySimple("maxHeapSize", params.getSimpleValue("heapSize"))); config.put(new PropertySimple("heapNewSize", params.getSimpleValue("heapNewSize"))); diff --git a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeConfigDelegate.java b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeConfigDelegate.java index 1667877..dd5b8a2 100644 --- a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeConfigDelegate.java +++ b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeConfigDelegate.java @@ -148,11 +148,17 @@ public class StorageNodeConfigDelegate implements ConfigurationFacet { } }
- private void updateCassandraJvmProps(Configuration config) throws IOException { + private void updateCassandraJvmProps(Configuration newConfig) throws IOException { PropertiesFileUpdate propertiesUpdater = new PropertiesFileUpdate(jvmOptsFile.getAbsolutePath()); Properties properties = propertiesUpdater.loadExistingProperties();
- String maxHeapSize = config.getSimpleValue("maxHeapSize"); + String jmxPort = newConfig.getSimpleValue("jmxPort"); + if (!StringUtil.isEmpty(jmxPort)) { + validateIntegerArg("jmx_port", jmxPort); + properties.setProperty("jmx_port", jmxPort); + } + + String maxHeapSize = newConfig.getSimpleValue("maxHeapSize"); if (!StringUtil.isEmpty(maxHeapSize)) { validateHeapArg("maxHeapSize", maxHeapSize); // We want min and max heap to be the same @@ -160,19 +166,19 @@ public class StorageNodeConfigDelegate implements ConfigurationFacet { properties.setProperty("heap_max", "-Xmx" + maxHeapSize); }
- String heapNewSize = config.getSimpleValue("heapNewSize"); + String heapNewSize = newConfig.getSimpleValue("heapNewSize"); if (!StringUtil.isEmpty(heapNewSize)) { validateHeapArg("heapNewSize", heapNewSize); properties.setProperty("heap_new", "-Xmn" + heapNewSize); }
- String threadStackSize = config.getSimpleValue("threadStackSize"); + String threadStackSize = newConfig.getSimpleValue("threadStackSize"); if (!StringUtil.isEmpty(threadStackSize)) { - validateStackArg(threadStackSize); + validateIntegerArg("threadStackSize", threadStackSize); properties.setProperty("thread_stack_size", "-Xss" + threadStackSize + "k"); }
- PropertySimple heapDumpOnOMMError = config.getSimple("heapDumpOnOOMError"); + PropertySimple heapDumpOnOMMError = newConfig.getSimple("heapDumpOnOOMError"); if (heapDumpOnOMMError != null) { if (heapDumpOnOMMError.getBooleanValue()) { properties.setProperty("heap_dump_on_OOMError", "-XX:+HeapDumpOnOutOfMemoryError"); @@ -181,7 +187,7 @@ public class StorageNodeConfigDelegate implements ConfigurationFacet { } }
- String heapDumpDir = useForwardSlash(config.getSimpleValue("heapDumpDir")); + String heapDumpDir = useForwardSlash(newConfig.getSimpleValue("heapDumpDir")); if (!StringUtil.isEmpty(heapDumpDir)) { properties.setProperty("heap_dump_dir", heapDumpDir); } @@ -209,7 +215,7 @@ public class StorageNodeConfigDelegate implements ConfigurationFacet {
String threadStackSize = config.getSimpleValue("threadStackSize"); if (!StringUtil.isEmpty(threadStackSize)) { - validateStackArg(threadStackSize); + validateIntegerArg("threadStackSize", threadStackSize); properties.setProperty("set.thread_stack_size", "-Xss" + threadStackSize + "k"); }
@@ -248,11 +254,11 @@ public class StorageNodeConfigDelegate implements ConfigurationFacet { } }
- private void validateStackArg(String value) { + private void validateIntegerArg(String name, String value) { try { Integer.parseInt(value); } catch (NumberFormatException e) { - throw new IllegalArgumentException(value + " is not a legal value for the property [threadStackSize]"); + throw new IllegalArgumentException(value + " is not a legal value for the property [" + name + "]"); } }
diff --git a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml index aca3ba2..a42040d 100644 --- a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml +++ b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml @@ -32,6 +32,7 @@ <c:simple-property name="baseDir" displayName="Base Directory" description="The base directory from which the Cassandra Daemon was launched." required="false"/> <c:simple-property name="yamlConfiguration" displayName="YAML Configuration File" description="YAML Configuration File"/> <c:simple-property name="nativeTransportPort" description="The port on which Cassandra listens for CQL client connections." default="9042" type="integer"/> + <c:simple-property name="jmxPort" description="The JMX port for Cassandra" default="7299" type="integer" readOnly="true"/> <c:simple-property name="host" description="The host on which cassandra listens to CQL client connections" default="localhost"/> <c:simple-property name="clusterName" description="Cluster name" default="localhost"/> <c:simple-property name="authenticator" required="true" default="org.apache.cassandra.auth.AllowAllAuthenticator" description="Cassandra client authenticator"> @@ -131,11 +132,11 @@
<operation name="updateConfiguration" description="Updates the node configuration. Will require a separate server restart for the settings to take effect."> <parameters> + <c:simple-property name="jmxPort" type="integer" description="JMX port JVM option."/> <c:simple-property name="heapSize" type="string" description="The heap size to be used for both -Xms and -Xmx JVM options."/> <c:simple-property name="heapNewSize" type="string" description="The heap new size to be used be used with -Xmn JVM option."/> <c:simple-property name="threadStackSize" type="integer" description="The thread stack size. This memory is allocated to each thread off heap. The value should be an integer that will be interpreted in kilobytes."/> - <c:simple-property name="jmxPort" type="integer" description="JMX port JVM option."/> <c:simple-property name="restartIfRequired" type="boolean" required="true" default="false" description="If [true] then restart the server at the end of the update if and if only the updates made require a restart. If [false] the server will not be restarted regardless of the updates made."/> </parameters> <results>
commit c4c99241f2c2406b4b916865b0932d1df5e2af4f Author: Stefan Negrea snegrea@redhat.com Date: Tue Jul 23 16:43:29 2013 -0500
Move the restart operation to the plugin rather than server bean. This simplifies the interface between the server bean and storage node plugin.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index 9416c67..1441785 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -645,21 +645,10 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN parameters.setSimpleValue("heapSize", storageNodeConfiguration.getHeapSize() + ""); parameters.setSimpleValue("heapNewSize", storageNodeConfiguration.getHeapNewSize() + ""); parameters.setSimpleValue("threadStackSize", storageNodeConfiguration.getThreadStackSize() + ""); + parameters.setSimpleValue("restartIfRequired", "true");
- boolean updateConfigurationResult = runOperationAndWaitForResult(subject, storageNodeResource, - UPDATE_CONFIGURATION_OPERATION, parameters); - - if (updateConfigurationResult) { - boolean restartResult = runOperationAndWaitForResult(subject, storageNodeResource, RESTART_OPERATION, - null); - - if (restartResult) { - storageNode.setJmxPort(storageNodeConfiguration.getJmxPort()); - entityManager.persist(storageNode); - - return true; - } - } + return runOperationAndWaitForResult(subject, storageNodeResource, UPDATE_CONFIGURATION_OPERATION, + parameters); }
return false; diff --git a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java index d9b35b9..41a0c42 100644 --- a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java +++ b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java @@ -111,6 +111,8 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper }
private OperationResult updateConfiguration(Configuration params) { + boolean restartIsRequired = false; + OperationResult result = new OperationResult("Configuration updated.");
//update storage node jvm settings @@ -125,6 +127,31 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper
if (!configurationUpdate.getStatus().equals(ConfigurationUpdateStatus.SUCCESS)) { result.setErrorMessage(configurationUpdate.getErrorMessage()); + } else { + if (params.getSimpleValue("heapSize") != null + || params.getSimpleValue("heapNewSize") != null + || params.getSimpleValue("threadStackSize") != null) { + restartIsRequired = true; + } + } + + //restart the server if: + //- requested by the user + //- the updates done require restart + boolean restartIfRequiredConfig = false; + if (params.getSimpleValue("restartIfRequired") != null) { + restartIfRequiredConfig = Boolean.parseBoolean(params.getSimpleValue("restartIfRequired")); + } + + if (restartIfRequiredConfig && restartIsRequired) { + try { + OperationResult restartResult = this.invokeOperation("restart", null); + if (restartResult.getErrorMessage() != null) { + result.setErrorMessage(restartResult.getErrorMessage()); + } + } catch (Exception e) { + result.setErrorMessage(e.getMessage()); + } }
return result; diff --git a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml index cd84de6..aca3ba2 100644 --- a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml +++ b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml @@ -136,6 +136,7 @@ <c:simple-property name="threadStackSize" type="integer" description="The thread stack size. This memory is allocated to each thread off heap. The value should be an integer that will be interpreted in kilobytes."/> <c:simple-property name="jmxPort" type="integer" description="JMX port JVM option."/> + <c:simple-property name="restartIfRequired" type="boolean" required="true" default="false" description="If [true] then restart the server at the end of the update if and if only the updates made require a restart. If [false] the server will not be restarted regardless of the updates made."/> </parameters> <results> <c:simple-property name="operationResult" description="Outcome of updating the configuration."/>
commit f0ec56f27ca9b3871290d717232dd0fcec1b488c Author: Thomas Segismont tsegismo@redhat.com Date: Thu Jul 25 12:16:39 2013 +0200
CassandraNodeComponent#startNode now starts C* with a short CLASSPATH to allow later discovery on Linux
diff --git a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java index 70ba4b2..37aa425 100644 --- a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java +++ b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java @@ -47,6 +47,7 @@ import com.datastax.driver.core.Session;
import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.hyperic.sigar.OperatingSystem; import org.hyperic.sigar.SigarException; import org.mc4j.ems.connection.EmsConnection; import org.mc4j.ems.connection.bean.EmsBean; @@ -67,7 +68,6 @@ import org.rhq.core.pluginapi.util.ProcessExecutionUtility; import org.rhq.core.system.ProcessExecution; import org.rhq.core.system.ProcessExecutionResults; import org.rhq.core.system.ProcessInfo; -import org.rhq.core.system.ProcessInfo.ProcessInfoSnapshot; import org.rhq.core.system.SystemInfo; import org.rhq.core.util.StringUtil; import org.rhq.core.util.exception.ThrowableUtil; @@ -186,16 +186,6 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent } }
- private ProcessInfoSnapshot getProcessInfoSnapshot() { - ProcessInfoSnapshot processInfoSnapshot = (processInfo == null) ? null : processInfo.freshSnapshot(); - if (processInfoSnapshot == null || !processInfoSnapshot.isRunning()) { - processInfo = getResourceContext().getNativeProcess(); - // Safe to get prior snapshot here, we've just recreated the process info instance - processInfoSnapshot = (processInfo == null) ? null : processInfo.priorSnaphot(); - } - return processInfoSnapshot; - } - @Override public OperationResult invokeOperation(String name, Configuration parameters) throws Exception { if (name.equals("shutdown")) { @@ -212,23 +202,6 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent return null; }
- private void waitForNodeToGoDown() throws InterruptedException { - for (ProcessInfoSnapshot processInfoSnapshot = getProcessInfoSnapshot();; processInfoSnapshot = getProcessInfoSnapshot()) { - if (processInfoSnapshot == null || !processInfoSnapshot.isRunning()) { - // Process not found, so it died, that's fine - // OR - // Process info says process is no longer running, that's fine as well - break; - } - if (getResourceContext().getComponentInvocationContext().isInterrupted()) { - // Operation canceled or timed out - throw new InterruptedException(); - } - // Process is still running, wait a second and check again - Thread.sleep(SECONDS.toMillis(1)); - } - } - @SuppressWarnings("rawtypes") protected OperationResult shutdownNode() { ResourceContext<?> context = getResourceContext(); @@ -292,21 +265,19 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent } }
- protected OperationResult startNode() { - ResourceContext<?> context = getResourceContext(); - Configuration pluginConfig = context.getPluginConfiguration(); + Configuration pluginConfig = getResourceContext().getPluginConfiguration(); String baseDir = pluginConfig.getSimpleValue("baseDir"); File binDir = new File(baseDir, "bin"); - File startScript = new File(binDir, getStartScript()); - File pidFile = new File(binDir, "cassandra.pid"); - - ProcessExecution scriptExe = ProcessExecutionUtility.createProcessExecution(startScript); - scriptExe.addArguments(asList("-p", pidFile.getAbsolutePath())); - SystemInfo systemInfo = context.getSystemInformation(); + if (!startScriptExists(binDir)) { + OperationResult failure = new OperationResult("Failed to start Cassandra daemon"); + failure.setErrorMessage("Start script does not exists"); + return failure; + } + ProcessExecution scriptExe = getProcessExecution(binDir); + SystemInfo systemInfo = getResourceContext().getSystemInformation(); ProcessExecutionResults results = systemInfo.executeProcess(scriptExe); - - if (results.getError() == null) { + if (results.getError() == null) { return new OperationResult("Successfully started Cassandra daemon"); } else { OperationResult failure = new OperationResult("Failed to start Cassandra daemon"); @@ -315,6 +286,30 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent } }
+ private boolean startScriptExists(File binDir) { + File file = new File(binDir, getStartScript()); + return file.exists() && !file.isDirectory(); + } + + private ProcessExecution getProcessExecution(File binDir) { + ProcessExecution scriptExe; + if (OperatingSystem.getInstance().getName().equals(OperatingSystem.NAME_WIN32)) { + File startScript = new File(binDir, getStartScript()); + scriptExe = ProcessExecutionUtility.createProcessExecution(startScript); + } else { + // On Linux, when Cassandra is started with an absolute path, the command line is too long and is truncated + // in /proc/pid/cmdline (beacuse of a long CLASSPATH made of absolute paths) + // This prevents the process from being later discovered because the process query argument criteria + // expects org.apache.cassandra.service.CassandraDaemon to be found + File startScript = new File("./" + getStartScript()); + scriptExe = ProcessExecutionUtility.createProcessExecution(startScript); + scriptExe.setCheckExecutableExists(false); + } + scriptExe.setWorkingDirectory(binDir.getAbsolutePath()); + scriptExe.addArguments(asList("-p", "cassandra.pid")); + return scriptExe; + } + protected OperationResult restartNode() { OperationResult result = shutdownNode();
diff --git a/modules/plugins/rhq-storage/pom.xml b/modules/plugins/rhq-storage/pom.xml index b1d50b8..ab97902 100644 --- a/modules/plugins/rhq-storage/pom.xml +++ b/modules/plugins/rhq-storage/pom.xml @@ -121,21 +121,27 @@ <plugin> <artifactId>maven-failsafe-plugin</artifactId> <version>2.13</version> + <configuration> + <includes> + <include>**/*ITest.java</include> + </includes> + <argLine>-Djava.library.path=${pc.lib.dir}</argLine> + <systemPropertyVariables> + <pc.plugins.dir>${pc.plugins.dir}</pc.plugins.dir> + </systemPropertyVariables> + <properties> + <property> + <name>listener</name> + <value>org.rhq.test.testng.StdoutReporter</value> + </property> + </properties> + </configuration> <executions> <execution> <id>integration-test</id> <goals> <goal>integration-test</goal> </goals> - <configuration> - <includes> - <include>**/*ITest.java</include> - </includes> - <argLine>-Djava.library.path=${pc.lib.dir}</argLine> - <systemPropertyVariables> - <pc.plugins.dir>${pc.plugins.dir}</pc.plugins.dir> - </systemPropertyVariables> - </configuration> </execution> <execution> <id>verify</id> @@ -155,6 +161,12 @@ <excludes> <exclude>**/*ITest.java</exclude> </excludes> + <properties> + <property> + <name>listener</name> + <value>org.rhq.test.testng.StdoutReporter</value> + </property> + </properties> </configuration> </plugin> </plugins> diff --git a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java index 5bc8b31..f0744a4 100644 --- a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java +++ b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java @@ -31,6 +31,7 @@ import java.util.Set;
import com.google.common.collect.Sets;
+import org.hyperic.sigar.OperatingSystem; import org.testng.annotations.AfterSuite; import org.testng.annotations.BeforeSuite; import org.testng.annotations.Test; @@ -113,14 +114,8 @@ public class StorageNodeComponentITest { File binDir = new File(basedir, "bin"); SystemInfo systemInfo = SystemInfoFactory.createSystemInfo();
- File startScript = new File("./cassandra"); - ProcessExecution startScriptExe = ProcessExecutionUtility.createProcessExecution(startScript); - startScriptExe.setWorkingDirectory(binDir.getAbsolutePath()); - startScriptExe.setCheckExecutableExists(false); - - startScriptExe.addArguments(asList("-p", "cassandra.pid")); - startScriptExe.setCaptureOutput(true); - ProcessExecutionResults results = systemInfo.executeProcess(startScriptExe); + ProcessExecution processExecution = getProcessExecution(binDir); + ProcessExecutionResults results = systemInfo.executeProcess(processExecution);
assertEquals(results.getExitCode(), (Integer) 0, "Cassandra failed to start: " + results.getCapturedOutput());
@@ -135,6 +130,22 @@ public class StorageNodeComponentITest { schemaManager.updateTopology(true); }
+ private ProcessExecution getProcessExecution(File binDir) { + ProcessExecution startScriptExe; + if (OperatingSystem.getInstance().getName().equals(OperatingSystem.NAME_WIN32)) { + File startScript = new File(binDir, "cassandra.bat"); + startScriptExe = ProcessExecutionUtility.createProcessExecution(startScript); + } else { + File startScript = new File("./cassandra"); + startScriptExe = ProcessExecutionUtility.createProcessExecution(startScript); + startScriptExe.setCheckExecutableExists(false); + } + startScriptExe.setWorkingDirectory(binDir.getAbsolutePath()); + startScriptExe.addArguments(asList("-p", "cassandra.pid")); + startScriptExe.setCaptureOutput(true); + return startScriptExe; + } + private void initPluginContainer() { PluginContainerConfiguration pcConfig = new PluginContainerConfiguration(); File pluginsDir = new File(System.getProperty("pc.plugins.dir"));
commit 17793afa6bd966d69b3198a936a2f0b0503e288b Author: Thomas Segismont tsegismo@redhat.com Date: Thu Jul 25 11:04:44 2013 +0200
Update intentional changes file (methods added in StorageNodeManagerRemote)
diff --git a/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml b/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml index 26e9e95..44886b4 100644 --- a/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml +++ b/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml @@ -32,6 +32,20 @@ <difference> <className>org/rhq/enterprise/server/cloud/StorageNodeManagerRemote</className> <differenceType>7012</differenceType> <!-- method added to an interface --> + <method>org.rhq.core.domain.cloud.StorageNodeConfigurationComposite retrieveConfiguration(org.rhq.core.domain.auth.Subject, org.rhq.core.domain.cloud.StorageNode)</method> + <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> + </difference> + + <difference> + <className>org/rhq/enterprise/server/cloud/StorageNodeManagerRemote</className> + <differenceType>7012</differenceType> <!-- method added to an interface --> + <method>boolean updateConfiguration(org.rhq.core.domain.auth.Subject, org.rhq.core.domain.cloud.StorageNodeConfigurationComposite)</method> + <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> + </difference> + + <difference> + <className>org/rhq/enterprise/server/cloud/StorageNodeManagerRemote</className> + <differenceType>7012</differenceType> <!-- method added to an interface --> <method>org.rhq.core.domain.util.PageList findNotAcknowledgedStorageNodeAlerts(org.rhq.core.domain.auth.Subject, org.rhq.core.domain.cloud.StorageNode)</method> <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> </difference>
commit 600119036639c9e6bfb56a77f99c71d77d757646 Author: Thomas Segismont tsegismo@redhat.com Date: Thu Jul 25 09:44:45 2013 +0200
Update intentional changes file (methods added in StorageNodeManagerRemote)
diff --git a/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml b/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml index 58c4eda..26e9e95 100644 --- a/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml +++ b/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml @@ -29,4 +29,18 @@ <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> </difference>
+ <difference> + <className>org/rhq/enterprise/server/cloud/StorageNodeManagerRemote</className> + <differenceType>7012</differenceType> <!-- method added to an interface --> + <method>org.rhq.core.domain.util.PageList findNotAcknowledgedStorageNodeAlerts(org.rhq.core.domain.auth.Subject, org.rhq.core.domain.cloud.StorageNode)</method> + <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> + </difference> + + <difference> + <className>org/rhq/enterprise/server/cloud/StorageNodeManagerRemote</className> + <differenceType>7012</differenceType> <!-- method added to an interface --> + <method>org.rhq.core.domain.util.PageList findAllStorageNodeAlerts(org.rhq.core.domain.auth.Subject, org.rhq.core.domain.cloud.StorageNode)</method> + <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> + </difference> + </differences>
commit 067cb8cc2d2204e187f104ad7cb5b8de2220cc25 Author: John Sanda jsanda@redhat.com Date: Wed Jul 24 23:07:53 2013 -0400
do availability check via jmx
CassandraNodeComponent previously was doing availability checks at the process level. Results were some times inconsistent in large part due to different behaviors on different platforms. Doing a JMX check is a more representative check since we perform most of the managemeent via JMX. It also eliminates any platform-specific issues.
The check is done via a direct JMX connection instead of EMS to avoid the caching issues with EMS that could yield inaccurate results.
diff --git a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java index c41e8e7..70ba4b2 100644 --- a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java +++ b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java @@ -35,6 +35,12 @@ import java.util.Date; import java.util.List; import java.util.Map;
+import javax.management.MBeanServerConnection; +import javax.management.ObjectName; +import javax.management.remote.JMXConnector; +import javax.management.remote.JMXConnectorFactory; +import javax.management.remote.JMXServiceURL; + import com.datastax.driver.core.Cluster; import com.datastax.driver.core.Cluster.Builder; import com.datastax.driver.core.Session; @@ -131,10 +137,11 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent public AvailabilityType getAvailability() { long start = System.nanoTime(); try { - // Get a fresh snapshot of the process - ProcessInfoSnapshot processInfoSnapshot = getProcessInfoSnapshot(); - return (processInfoSnapshot != null && processInfoSnapshot.isRunning()) ? AvailabilityType.UP - : AvailabilityType.DOWN; + if (isStorageServiceReachable()) { + return AvailabilityType.UP; + } + + return AvailabilityType.DOWN; } finally { long totalTimeMillis = NANOSECONDS.toMillis(System.nanoTime() - start); if (LOG.isDebugEnabled()) { @@ -146,6 +153,39 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent } }
+ private boolean isStorageServiceReachable() { + JMXConnector connector = null; + try { + Configuration pluginConfig = getResourceContext().getPluginConfiguration(); + String url = pluginConfig.getSimpleValue("connectorAddress"); + JMXServiceURL serviceURL = new JMXServiceURL(url); + connector = JMXConnectorFactory.connect(serviceURL, null); + + MBeanServerConnection serverConnection = connector.getMBeanServerConnection(); + ObjectName storageService = new ObjectName("org.apache.cassandra.db:type=StorageService"); + + // query an attribute to make sure it is in fact available + serverConnection.getAttribute(storageService, "NativeTransportRunning"); + + return true; + } catch (Exception e) { + if (LOG.isDebugEnabled()) { + LOG.debug("Failed to make JMX connection to StorageService", e); + } + return false; + } finally { + if (connector != null) { + try { + connector.close(); + } catch (IOException e) { + if (LOG.isDebugEnabled()) { + LOG.debug("An error occurred closing the JMX connector", e); + } + } + } + } + } + private ProcessInfoSnapshot getProcessInfoSnapshot() { ProcessInfoSnapshot processInfoSnapshot = (processInfo == null) ? null : processInfo.freshSnapshot(); if (processInfoSnapshot == null || !processInfoSnapshot.isRunning()) { @@ -158,10 +198,8 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent
@Override public OperationResult invokeOperation(String name, Configuration parameters) throws Exception { - if (name.equals("shutdown")) { OperationResult operationResult = shutdownNode(); - waitForNodeToGoDown(); return operationResult; } else if (name.equals("start")) { return startNode(); @@ -234,6 +272,8 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent
long pid = process.getPid(); try { + getEmsConnection().close(); + process.kill("KILL");
Configuration pluginConfig = getResourceContext().getPluginConfiguration();
commit 8dfcad9d2bdb3138651e19e65e784c999106a5d6 Author: Jay Shaughnessy jshaughn@redhat.com Date: Wed Jul 24 17:28:38 2013 -0400
Work in progress - getting new bundle perms integrated into role views
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/authz/Permission.java b/modules/core/domain/src/main/java/org/rhq/core/domain/authz/Permission.java index 40e3150..5b009b5 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/authz/Permission.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/authz/Permission.java @@ -145,15 +145,15 @@ public enum Permission {
/** * Can create Bundle [Versions]s + * Can assign to viewable bundle groups * Can create global Bundle [Versions] if holding Global.VIEW_BUNDLES - * Can assign to viewable bundle groups */ CREATE_BUNDLES(Target.GLOBAL), // 19
/** - * Can delete bundle [Versions]s from viewable groups + * Can delete viewable bundle [Versions]s + * Can unassign from viewable bundle groups * Can delete global bundles if holding Global.VIEW_BUNDLES - * Can unassign from viewable bundle groups */ DELETE_BUNDLES(Target.GLOBAL), // 20
@@ -188,7 +188,7 @@ public enum Permission { /** * Can delete bundle [version]s from the bundle group */ - DELETE_BUNDLES_IN_GROUP(Target.BUNDLE), // 26 + DELETE_BUNDLES_FROM_GROUP(Target.BUNDLE), // 26
/** * Implied - Can view bundles in the bundle group diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/PermissionsEditor.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/PermissionsEditor.java index d949617..cde2c6d 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/PermissionsEditor.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/PermissionsEditor.java @@ -48,8 +48,8 @@ import org.rhq.core.domain.authz.Role; import org.rhq.core.domain.common.ProductInfo; import org.rhq.enterprise.gui.coregui.client.CoreGUI; import org.rhq.enterprise.gui.coregui.client.Messages; -import org.rhq.enterprise.gui.coregui.client.util.message.Message; import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedVStack; +import org.rhq.enterprise.gui.coregui.client.util.message.Message;
/** * An editor for editing the set of RHQ {@link Permission permission}s associated with an RHQ {@link Role role}. @@ -62,6 +62,7 @@ public class PermissionsEditor extends EnhancedVStack {
private ListGrid globalPermissionsGrid; private ListGrid resourcePermissionsGrid; + private ListGrid bundleGroupPermissionsGrid; private Set<Permission> selectedPermissions; private RoleEditView roleEditView; private boolean isReadOnly; @@ -99,6 +100,10 @@ public class PermissionsEditor extends EnhancedVStack {
this.resourcePermissionsGrid = createResourcePermissionsGrid(); addMember(this.resourcePermissionsGrid); + + this.bundleGroupPermissionsGrid = createBundleGroupPermissionsGrid(); + addMember(this.bundleGroupPermissionsGrid); + }
public void reset() { @@ -162,31 +167,57 @@ public class PermissionsEditor extends EnhancedVStack { grid.setFields(iconField, displayNameField, authorizedField, descriptionField);
List<ListGridRecord> records = new ArrayList<ListGridRecord>(); - ListGridRecord record = createGlobalPermissionRecord(MSG.view_adminRoles_permissions_perm_manageSecurity(), + ListGridRecord record = createPermissionRecord(MSG.view_adminRoles_permissions_perm_manageSecurity(), "global/Locked", Permission.MANAGE_SECURITY, MSG.view_adminRoles_permissions_permDesc_manageSecurity()); records.add(record);
- record = createGlobalPermissionRecord(MSG.view_adminRoles_permissions_perm_manageInventory(), + record = createPermissionRecord(MSG.view_adminRoles_permissions_perm_manageInventory(), "subsystems/inventory/Inventory", Permission.MANAGE_INVENTORY, MSG.view_adminRoles_permissions_permDesc_manageInventory()); records.add(record);
- record = createGlobalPermissionRecord(MSG.view_adminRoles_permissions_perm_manageSettings(), + record = createPermissionRecord(MSG.view_adminRoles_permissions_perm_manageSettings(), "subsystems/configure/Configure", Permission.MANAGE_SETTINGS, MSG.view_adminRoles_permissions_permDesc_manageSettings(productInfo.getShortName())); records.add(record);
- record = createGlobalPermissionRecord(MSG.view_adminRoles_permissions_perm_manageBundles(), + record = createPermissionRecord(MSG.view_adminRoles_permissions_perm_manageRepositories(), + "subsystems/content/Content", Permission.MANAGE_REPOSITORIES, + MSG.view_adminRoles_permissions_permDesc_manageRepositories()); + records.add(record); + + record = createPermissionRecord(MSG.view_adminRoles_permissions_perm_manageBundles(), "subsystems/bundle/Bundle", Permission.MANAGE_BUNDLE, MSG.view_adminRoles_permissions_permDesc_manageBundles()); records.add(record);
- record = createGlobalPermissionRecord(MSG.view_adminRoles_permissions_perm_manageRepositories(), - "subsystems/content/Content", Permission.MANAGE_REPOSITORIES, - MSG.view_adminRoles_permissions_permDesc_manageRepositories()); + record = createPermissionRecord(MSG.view_adminRoles_permissions_perm_manageBundleGroups(), + "subsystems/content/Content", Permission.MANAGE_BUNDLE_GROUPS, + MSG.view_adminRoles_permissions_permDesc_manageBundleGroups()); + records.add(record); + + record = createPermissionRecord(MSG.view_adminRoles_permissions_perm_createBundles(), + "subsystems/content/Content", Permission.CREATE_BUNDLES, + MSG.view_adminRoles_permissions_permDesc_createBundles()); + records.add(record); + + record = createPermissionRecord(MSG.view_adminRoles_permissions_perm_deleteBundles(), + "subsystems/content/Content", Permission.DELETE_BUNDLES, + MSG.view_adminRoles_permissions_permDesc_deleteBundles()); + records.add(record); + + record = createPermissionRecord(MSG.view_adminRoles_permissions_perm_viewBundles(), + "subsystems/content/Content", Permission.VIEW_BUNDLES, + MSG.view_adminRoles_permissions_permDesc_viewBundles()); records.add(record);
- record = createGlobalPermissionRecord(MSG.view_adminRoles_permissions_perm_viewUsers(), "global/User", + record = createPermissionRecord(MSG.view_adminRoles_permissions_perm_deployBundles(), + "subsystems/content/Content", Permission.DEPLOY_BUNDLES, + MSG.view_adminRoles_permissions_permDesc_deployBundles()); + records.add(record); + + + record = createPermissionRecord(MSG.view_adminRoles_permissions_perm_viewUsers(), "global/User", Permission.VIEW_USERS, MSG.view_adminRoles_permissions_permDesc_viewUsers()); records.add(record);
@@ -280,6 +311,54 @@ public class PermissionsEditor extends EnhancedVStack { return grid; }
+ private ListGrid createBundleGroupPermissionsGrid() { + ListGrid grid = createPermissionsGrid(); + // TODO: Add table title. + + ListGridField iconField = createIconField(); + + ListGridField displayNameField = new ListGridField("displayName", MSG.common_title_name(), 130); + + ListGridField descriptionField = new ListGridField("description", MSG.common_title_description()); + descriptionField.setWrap(true); + + final ListGridField authorizedField = createAuthorizedField("authorized", + MSG.view_adminRoles_permissions_isAuthorized(), "name", grid, false); + + grid.setFields(iconField, displayNameField, authorizedField, descriptionField); + + List<ListGridRecord> records = new ArrayList<ListGridRecord>(); + + ListGridRecord record = createPermissionRecord(MSG.view_adminRoles_permissions_perm_assignBundlesToGroup(), + "subsystems/bundle/Bundle", Permission.ASSIGN_BUNDLES_TO_GROUP, + MSG.view_adminRoles_permissions_permDesc_assignBundlesToGroup()); + records.add(record); + + record = createPermissionRecord(MSG.view_adminRoles_permissions_perm_unassignBundlesFromGroup(), + "subsystems/bundle/Bundle", Permission.UNASSIGN_BUNDLES_FROM_GROUP, + MSG.view_adminRoles_permissions_permDesc_unassignBundlesFromGroup()); + records.add(record); + + record = createPermissionRecord(MSG.view_adminRoles_permissions_perm_createBundlesInGroup(), + "subsystems/bundle/Bundle", Permission.CREATE_BUNDLES_IN_GROUP, + MSG.view_adminRoles_permissions_permDesc_createBundlesInGroup()); + records.add(record); + + record = createPermissionRecord(MSG.view_adminRoles_permissions_perm_deleteBundlesFromGroup(), + "subsystems/bundle/Bundle", Permission.DELETE_BUNDLES_FROM_GROUP, + MSG.view_adminRoles_permissions_permDesc_deleteBundlesFromGroup()); + records.add(record); + + record = createPermissionRecord(MSG.view_adminRoles_permissions_perm_viewBundlesInGroup(), + "subsystems/bundle/Bundle", Permission.VIEW_BUNDLES_IN_GROUP, + MSG.view_adminRoles_permissions_permDesc_viewBundlesInGroup()); + records.add(record); + + grid.setData(records.toArray(new ListGridRecord[records.size()])); + + return grid; + } + private ListGridField createIconField() { ListGridField iconField = new ListGridField("icon", " ", 28); iconField.setShowDefaultContextMenu(false); @@ -420,7 +499,7 @@ public class PermissionsEditor extends EnhancedVStack { } }
- private ListGridRecord createGlobalPermissionRecord(String displayName, String icon, Permission globalPermission, + private ListGridRecord createPermissionRecord(String displayName, String icon, Permission globalPermission, String description) { ListGridRecord record = new ListGridRecord(); record.setAttribute("displayName", displayName); diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RoleBundleGroupSelector.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RoleBundleGroupSelector.java new file mode 100644 index 0000000..e3876ca --- /dev/null +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RoleBundleGroupSelector.java @@ -0,0 +1,40 @@ +/* + * RHQ Management Platform + * Copyright (C) 2005-2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation, and/or the GNU Lesser + * General Public License, version 2.1, also as published by the Free + * Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License and the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * and the GNU Lesser General Public License along with this program; + * if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ +package org.rhq.enterprise.gui.coregui.client.admin.roles; +import com.smartgwt.client.widgets.grid.ListGridRecord; + +import org.rhq.enterprise.gui.coregui.client.bundle.group.BundleGroupSelector; + + +/** + * @author Jay Shaughnessy + */ +public class RoleBundleGroupSelector extends BundleGroupSelector { + + public RoleBundleGroupSelector(ListGridRecord[] bundleGroupRecords, boolean isReadOnly) { + super(isReadOnly); + + setAssigned(bundleGroupRecords); + } + +} diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RoleEditView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RoleEditView.java index 941c7ee..63b6e3f 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RoleEditView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RoleEditView.java @@ -46,6 +46,7 @@ import org.rhq.enterprise.gui.coregui.client.PermissionsLoader; import org.rhq.enterprise.gui.coregui.client.UserSessionManager; import org.rhq.enterprise.gui.coregui.client.ViewPath; import org.rhq.enterprise.gui.coregui.client.admin.users.UsersDataSource; +import org.rhq.enterprise.gui.coregui.client.bundle.group.BundleGroupSelector; import org.rhq.enterprise.gui.coregui.client.components.form.AbstractRecordEditor; import org.rhq.enterprise.gui.coregui.client.components.form.EnhancedDynamicForm; import org.rhq.enterprise.gui.coregui.client.components.selector.AssignedItemsChangedEvent; @@ -75,6 +76,9 @@ public class RoleEditView extends AbstractRecordEditor<RolesDataSource> implemen private Tab ldapGroupsTab; private RoleLdapGroupSelector ldapGroupSelector;
+ private Tab bundleGroupsTab; + private BundleGroupSelector bundleGroupSelector; + private boolean hasManageSecurityPermission; private boolean isLdapConfigured; private boolean isSystemRole; @@ -161,6 +165,9 @@ public class RoleEditView extends AbstractRecordEditor<RolesDataSource> implemen if (!this.isSystemRole) { this.resourceGroupsTab = buildResourceGroupsTab(tabSet); tabSet.addTab(resourceGroupsTab); + + this.bundleGroupsTab = buildBundleGroupsTab(tabSet); + tabSet.addTab(bundleGroupsTab); }
this.subjectsTab = buildSubjectsTab(tabSet); @@ -189,6 +196,13 @@ public class RoleEditView extends AbstractRecordEditor<RolesDataSource> implemen return tab; }
+ private Tab buildBundleGroupsTab(TabSet tabSet) { + Tab tab = new Tab(MSG.common_title_bundleGroups(), ImageManager.getBundleIcon()); + // NOTE: We will set the tab content to the bundle group selector later once the Role has been fetched. + + return tab; + } + private Tab buildSubjectsTab(TabSet tabSet) { Tab tab = new Tab(MSG.common_title_users(), "global/User_16.png"); // NOTE: We will set the tab content to the subject selector later once the Role has been fetched. @@ -244,6 +258,18 @@ public class RoleEditView extends AbstractRecordEditor<RolesDataSource> implemen } }); updateTab(this.resourceGroupsTab, this.resourceGroupSelector); + + Record[] bundleGroupRecords = record.getAttributeAsRecordArray(RolesDataSource.Field.BUNDLE_GROUPS); + ListGridRecord[] bundleGroupListGridRecords = toListGridRecordArray(bundleGroupRecords); + this.bundleGroupSelector = new RoleBundleGroupSelector(bundleGroupListGridRecords, + !this.hasManageSecurityPermission); + this.bundleGroupSelector.addAssignedItemsChangedHandler(new AssignedItemsChangedHandler() { + public void onSelectionChanged(AssignedItemsChangedEvent event) { + onItemChanged(); + } + }); + updateTab(this.resourceGroupsTab, this.resourceGroupSelector); + }
ListGridRecord[] subjectListGridRecords = toListGridRecordArray(subjectRecords); @@ -341,6 +367,11 @@ public class RoleEditView extends AbstractRecordEditor<RolesDataSource> implemen getForm().setValue(RolesDataSource.Field.LDAP_GROUPS, ldapGroupRecords); }
+ if (this.bundleGroupSelector != null) { + ListGridRecord[] bundleGroupRecords = this.bundleGroupSelector.getSelectedRecords(); + getForm().setValue(RolesDataSource.Field.BUNDLE_GROUPS, bundleGroupRecords); + } + // Submit the form values to the datasource. super.save(requestProperties); } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RolesDataSource.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RolesDataSource.java index b454fad..047dacc 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RolesDataSource.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/roles/RolesDataSource.java @@ -37,11 +37,13 @@ import com.smartgwt.client.widgets.grid.ListGridRecord; import org.rhq.core.domain.auth.Subject; import org.rhq.core.domain.authz.Permission; import org.rhq.core.domain.authz.Role; +import org.rhq.core.domain.bundle.BundleGroup; import org.rhq.core.domain.criteria.RoleCriteria; import org.rhq.core.domain.resource.group.LdapGroup; import org.rhq.core.domain.resource.group.ResourceGroup; import org.rhq.core.domain.util.PageList; import org.rhq.enterprise.gui.coregui.client.admin.users.UsersDataSource; +import org.rhq.enterprise.gui.coregui.client.bundle.group.BundleGroupsDataSource; import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; import org.rhq.enterprise.gui.coregui.client.gwt.RoleGWTServiceAsync; import org.rhq.enterprise.gui.coregui.client.inventory.groups.ResourceGroupsDataSource; @@ -61,6 +63,7 @@ public class RolesDataSource extends RPCDataSource<Role, RoleCriteria> { public static final String NAME = "name"; public static final String DESCRIPTION = "description"; public static final String RESOURCE_GROUPS = "resourceGroups"; + public static final String BUNDLE_GROUPS = "bundleGroups"; public static final String PERMISSIONS = "permissions"; public static final String SUBJECTS = "subjects"; public static final String LDAP_GROUPS = "ldapGroups"; @@ -125,6 +128,10 @@ public class RolesDataSource extends RPCDataSource<Role, RoleCriteria> { MSG.datasource_roles_field_ldapGroups()); fields.add(ldapGroupsField);
+ DataSourceField bundleGroupsField = new DataSourceField(Field.BUNDLE_GROUPS, FieldType.ANY, + MSG.common_title_bundleGroups()); + fields.add(bundleGroupsField); + return fields; }
@@ -223,6 +230,10 @@ public class RolesDataSource extends RPCDataSource<Role, RoleCriteria> { Set<LdapGroup> ldapGroups = new RoleLdapGroupSelector.LdapGroupsDataSource().buildDataObjects(ldapGroupRecords); to.setLdapGroups(ldapGroups);
+ Record[] bundleGroupRecords = from.getAttributeAsRecordArray(Field.BUNDLE_GROUPS); + Set<BundleGroup> bundleGroups = BundleGroupsDataSource.getInstance().buildDataObjects(bundleGroupRecords); + to.setBundleGroups(bundleGroups); + return to; }
@@ -256,6 +267,12 @@ public class RolesDataSource extends RPCDataSource<Role, RoleCriteria> { ListGridRecord[] ldapGroupRecords = new RoleLdapGroupSelector.LdapGroupsDataSource() .buildRecords(ldapGroups); targetRecord.setAttribute(Field.LDAP_GROUPS, ldapGroupRecords); + + Set<BundleGroup> bundleGroups = sourceRole.getBundleGroups(); + ListGridRecord[] bundleGroupRecords = BundleGroupsDataSource.getInstance() + .buildRecords(bundleGroups, false); + targetRecord.setAttribute(Field.BUNDLE_GROUPS, bundleGroupRecords); + }
return targetRecord; diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/bundle/group/BundleGroupSelector.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/bundle/group/BundleGroupSelector.java new file mode 100644 index 0000000..3a5c047 --- /dev/null +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/bundle/group/BundleGroupSelector.java @@ -0,0 +1,83 @@ +/* + * RHQ Management Platform + * Copyright (C) 2005-2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +package org.rhq.enterprise.gui.coregui.client.bundle.group; + +import com.smartgwt.client.data.Criteria; +import com.smartgwt.client.data.DSRequest; +import com.smartgwt.client.widgets.form.DynamicForm; +import com.smartgwt.client.widgets.form.fields.TextItem; + +import org.rhq.core.domain.bundle.BundleGroup; +import org.rhq.core.domain.criteria.BundleGroupCriteria; +import org.rhq.enterprise.gui.coregui.client.components.selector.AbstractSelector; +import org.rhq.enterprise.gui.coregui.client.util.RPCDataSource; + +/** + * @author Jay Shaughnessy + */ +public class BundleGroupSelector extends AbstractSelector<BundleGroup, BundleGroupCriteria> { + + + public BundleGroupSelector() { + this(false); + } + + public BundleGroupSelector(boolean isReadOnly) { + super(isReadOnly); + } + + protected DynamicForm getAvailableFilterForm() { + DynamicForm availableFilterForm = new DynamicForm(); + availableFilterForm.setNumCols(4); + final TextItem search = new TextItem("search", MSG.common_title_search()); + + availableFilterForm.setItems(search); + + return availableFilterForm; + } + + protected RPCDataSource<BundleGroup, BundleGroupCriteria> getDataSource() { + return new SelectedBundleGroupsDataSource(); + } + + protected Criteria getLatestCriteria(DynamicForm availableFilterForm) { + String search = (String) availableFilterForm.getValue("search"); + + Criteria latestCriteria = new Criteria(); + latestCriteria.addCriteria("search", search); + + return latestCriteria; + } + + @Override + protected String getItemTitle() { + return MSG.common_title_bundleGroups(); + } + + public class SelectedBundleGroupsDataSource extends BundleGroupsDataSource { + @Override + protected BundleGroupCriteria getFetchCriteria(final DSRequest request) { + BundleGroupCriteria result = super.getFetchCriteria(request); + if (null != result) { + result.setStrict(false); + } + return result; + } + } +} diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/bundle/group/BundleGroupsDataSource.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/bundle/group/BundleGroupsDataSource.java new file mode 100644 index 0000000..4f76f06 --- /dev/null +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/bundle/group/BundleGroupsDataSource.java @@ -0,0 +1,126 @@ +/* + * RHQ Management Platform + * Copyright (C) 2005-2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +package org.rhq.enterprise.gui.coregui.client.bundle.group; + +import java.util.List; + +import com.google.gwt.user.client.rpc.AsyncCallback; +import com.smartgwt.client.data.DSRequest; +import com.smartgwt.client.data.DSResponse; +import com.smartgwt.client.data.DataSourceField; +import com.smartgwt.client.data.Record; +import com.smartgwt.client.data.fields.DataSourceIntegerField; +import com.smartgwt.client.data.fields.DataSourceTextField; +import com.smartgwt.client.widgets.grid.ListGridRecord; + +import org.rhq.core.domain.bundle.BundleGroup; +import org.rhq.core.domain.criteria.BundleGroupCriteria; +import org.rhq.core.domain.util.PageList; +import org.rhq.enterprise.gui.coregui.client.CoreGUI; +import org.rhq.enterprise.gui.coregui.client.gwt.BundleGWTServiceAsync; +import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; +import org.rhq.enterprise.gui.coregui.client.util.RPCDataSource; + +/** + * @author Jay Shaughnessy + */ +public class BundleGroupsDataSource extends RPCDataSource<BundleGroup, BundleGroupCriteria> { + + private BundleGWTServiceAsync bundleService = GWTServiceLookup.getBundleService(); + + private static BundleGroupsDataSource INSTANCE; + + public static BundleGroupsDataSource getInstance() { + if (INSTANCE == null) { + INSTANCE = new BundleGroupsDataSource(); + } + return INSTANCE; + } + + public BundleGroupsDataSource() { + super(); + List<DataSourceField> fields = addDataSourceFields(); + addFields(fields); + } + + @Override + protected List<DataSourceField> addDataSourceFields() { + List<DataSourceField> fields = super.addDataSourceFields(); + + DataSourceIntegerField idField = new DataSourceIntegerField("id", MSG.common_title_id()); + idField.setPrimaryKey(true); + fields.add(idField); + + DataSourceTextField nameField = new DataSourceTextField("name", MSG.common_title_name()); + fields.add(nameField); + + DataSourceTextField descriptionField = new DataSourceTextField("description", MSG.common_title_description()); + fields.add(descriptionField); + + return fields; + } + + @Override + protected void executeFetch(final DSRequest request, final DSResponse response, final BundleGroupCriteria criteria) { + bundleService.findBundleGroupsByCriteria(criteria, new AsyncCallback<PageList<BundleGroup>>() { + public void onFailure(Throwable caught) { + CoreGUI.getErrorHandler().handleError(MSG.dataSource_bundle_loadFailed(), caught); + response.setStatus(DSResponse.STATUS_FAILURE); + processResponse(request.getRequestId(), response); + } + + public void onSuccess(PageList<BundleGroup> result) { + response.setData(buildRecords(result)); + response.setTotalRows(result.getTotalSize()); + processResponse(request.getRequestId(), response); + } + }); + } + + @Override + protected BundleGroupCriteria getFetchCriteria(final DSRequest request) { + BundleGroupCriteria criteria = new BundleGroupCriteria(); + // may support tags in future, but not in rev1 + //criteria.addFilterTagNamespace(getFilter(request, "tagNamespace", String.class)); + //criteria.addFilterTagSemantic(getFilter(request, "tagSemantic", String.class)); + //criteria.addFilterTagName(getFilter(request, "tagName", String.class)); + //criteria.addFilterTagSemantic(getFilter(request, "tagSemantic", String.class)); + criteria.addFilterName(getFilter(request, "search", String.class)); + + return criteria; + } + + @Override + public BundleGroup copyValues(Record from) { + return (BundleGroup) from.getAttributeAsObject("object"); + } + + @Override + public ListGridRecord copyValues(BundleGroup from) { + ListGridRecord record = new ListGridRecord(); + + record.setAttribute("id", from.getId()); + record.setAttribute("name", from.getName()); + record.setAttribute("description", from.getDescription()); + + record.setAttribute("object", from); + + return record; + } +} diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/BundleGWTService.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/BundleGWTService.java index 54c3085..cd108b7 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/BundleGWTService.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/BundleGWTService.java @@ -27,6 +27,7 @@ import org.rhq.core.domain.bundle.Bundle; import org.rhq.core.domain.bundle.BundleDeployment; import org.rhq.core.domain.bundle.BundleDestination; import org.rhq.core.domain.bundle.BundleFile; +import org.rhq.core.domain.bundle.BundleGroup; import org.rhq.core.domain.bundle.BundleResourceDeployment; import org.rhq.core.domain.bundle.BundleType; import org.rhq.core.domain.bundle.BundleVersion; @@ -37,6 +38,7 @@ import org.rhq.core.domain.criteria.BundleCriteria; import org.rhq.core.domain.criteria.BundleDeploymentCriteria; import org.rhq.core.domain.criteria.BundleDestinationCriteria; import org.rhq.core.domain.criteria.BundleFileCriteria; +import org.rhq.core.domain.criteria.BundleGroupCriteria; import org.rhq.core.domain.criteria.BundleResourceDeploymentCriteria; import org.rhq.core.domain.criteria.BundleVersionCriteria; import org.rhq.core.domain.util.PageList; @@ -70,6 +72,8 @@ public interface BundleGWTService extends RemoteService {
PageList<Bundle> findBundlesByCriteria(BundleCriteria criteria) throws RuntimeException;
+ PageList<BundleGroup> findBundleGroupsByCriteria(BundleGroupCriteria criteria) throws RuntimeException; + PageList<Bundle> findBundlesByCriteriaWithDestinationFilter(BundleCriteria criteria) throws RuntimeException;
PageList<BundleDeployment> findBundleDeploymentsByCriteria(BundleDeploymentCriteria criteria) diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/BundleGWTServiceImpl.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/BundleGWTServiceImpl.java index e51383d..88458b8 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/BundleGWTServiceImpl.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/BundleGWTServiceImpl.java @@ -25,6 +25,7 @@ import org.rhq.core.domain.bundle.Bundle; import org.rhq.core.domain.bundle.BundleDeployment; import org.rhq.core.domain.bundle.BundleDestination; import org.rhq.core.domain.bundle.BundleFile; +import org.rhq.core.domain.bundle.BundleGroup; import org.rhq.core.domain.bundle.BundleResourceDeployment; import org.rhq.core.domain.bundle.BundleType; import org.rhq.core.domain.bundle.BundleVersion; @@ -35,6 +36,7 @@ import org.rhq.core.domain.criteria.BundleCriteria; import org.rhq.core.domain.criteria.BundleDeploymentCriteria; import org.rhq.core.domain.criteria.BundleDestinationCriteria; import org.rhq.core.domain.criteria.BundleFileCriteria; +import org.rhq.core.domain.criteria.BundleGroupCriteria; import org.rhq.core.domain.criteria.BundleResourceDeploymentCriteria; import org.rhq.core.domain.criteria.BundleVersionCriteria; import org.rhq.core.domain.util.PageList; @@ -234,6 +236,16 @@ public class BundleGWTServiceImpl extends AbstractGWTServiceImpl implements Bund }
@Override + public PageList<BundleGroup> findBundleGroupsByCriteria(BundleGroupCriteria criteria) throws RuntimeException { + try { + PageList<BundleGroup> results = bundleManager.findBundleGroupsByCriteria(getSessionSubject(), criteria); + return SerialUtility.prepare(results, "findBundleGroupsByCriteria"); + } catch (Throwable t) { + throw getExceptionToThrowToClient(t); + } + } + + @Override public PageList<Bundle> findBundlesByCriteriaWithDestinationFilter(BundleCriteria criteria) throws RuntimeException { try { PageList<Bundle> results = bundleManager.findBundlesByCriteriaWithDestinationFilter(getSessionSubject(), criteria); diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties index 19d3fa5..1f9c73c 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties @@ -43,17 +43,6 @@ common_button_ack_all = Acknowledge All common_button_add = Add common_button_advanced = Advanced... common_button_apply = Apply -common_buttonbar_custom_window_title= Date Range -common_buttonbar_custom_window_subtitle= Custom -common_buttonbar_datetime_format= MM/dd/yyyy h:mm a -common_buttonbar_datetime_format_moment_js= MM/DD/YYYY h:mm a -common_buttonbar_end_date= End Date -common_buttonbar_end_time= End Time -common_buttonbar_start_date= Start Date -common_buttonbar_start_time= Start Time -common_buttonbar_custom = Custom... -common_buttonbar_custom_cancel = Cancel -common_buttonbar_custom_save = Save common_button_cancel = Cancel common_button_close = Close common_button_compare = Compare @@ -86,6 +75,17 @@ common_button_set = Set common_button_showDetails = Show Details... common_button_unignore = Unignore common_button_uninventory = Uninventory +common_buttonbar_custom = Custom... +common_buttonbar_custom_cancel = Cancel +common_buttonbar_custom_save = Save +common_buttonbar_custom_window_subtitle = Custom +common_buttonbar_custom_window_title = Date Range +common_buttonbar_datetime_format = MM/dd/yyyy h:mm a +common_buttonbar_datetime_format_moment_js = MM/DD/YYYY h:mm a +common_buttonbar_end_date = End Date +common_buttonbar_end_time = End Time +common_buttonbar_start_date = Start Date +common_buttonbar_start_time = Start Time common_calendar_april_short = apr common_calendar_august_short = aug common_calendar_december_short = dec @@ -158,6 +158,7 @@ common_title_available_resources = Available Resources common_title_average_metrics = Average Metrics per Minute common_title_background = Background common_title_bundle = Bundle +common_title_bundleGroups = Bundle Groups common_title_bundles = Bundles common_title_category = Category common_title_change_refresh_time = Refresh Interval @@ -511,11 +512,21 @@ view_adminRoles_permissions_illegalDeselectionDueToManageSecuritySelection = {0} view_adminRoles_permissions_isAuthorized = Authorized? view_adminRoles_permissions_isRead = Read? view_adminRoles_permissions_isWrite = Write? +view_adminRoles_permissions_permDesc_assignBundlesToGroup = can copy a viewable bundle to the bundle group +view_adminRoles_permissions_permDesc_createBundles = can create new bundle [version]s. can copy existing bundles between viewable groups +view_adminRoles_permissions_permDesc_createBundlesInGroup = can create new bundle [version]s for the bundle group. can copy a viewable bundle to the bundle group. +view_adminRoles_permissions_permDesc_deleteBundles = can delete or unassign viewable bundle [version]s +view_adminRoles_permissions_permDesc_deleteBundlesFromGroup = can delete bundle [version]s from the bundle group (implicitly deleting then from other assigned groups) +view_adminRoles_permissions_permDesc_deployBundles = can deploy any viewable bundle version to any viewable, deployable, compatible, resource group +view_adminRoles_permissions_permDesc_manageBundleGroups = can create, update, or delete bundle groups view_adminRoles_permissions_permDesc_manageBundles = can create, update, or delete provisioning bundles (viewing is implied for everyone) view_adminRoles_permissions_permDesc_manageInventory = has all Resource permissions, as described below, for all Resources; can create, update, and delete groups; and can import auto-discovered or manually discovered Resources view_adminRoles_permissions_permDesc_manageRepositories = can create, update, or delete repositories of any user (everyone can create their own repositories), can associate content sources to repositories. view_adminRoles_permissions_permDesc_manageSecurity = can create, update, or delete users and roles - implies all other permissions view_adminRoles_permissions_permDesc_manageSettings = can modify the {0} Server configuration and perform any Server-related functionality +view_adminRoles_permissions_permDesc_unassignBundlesFromGroup = can unassign (not delete) a bundle from the bundle group +view_adminRoles_permissions_permDesc_viewBundles = can view any bundle including global bundles (those not assigned to any bundle group) +view_adminRoles_permissions_permDesc_viewBundlesInGroup = can view any bundle in the group view_adminRoles_permissions_permDesc_viewUsers = can view other users, with the exception of their assigned roles view_adminRoles_permissions_permReadDesc_configure = view Resource configuration and Resource configuration revision history view_adminRoles_permissions_permReadDesc_control = (IMPLIED) view available operations and operation execution history @@ -537,12 +548,19 @@ view_adminRoles_permissions_permWriteDesc_manageContent = subscribe to content s view_adminRoles_permissions_permWriteDesc_manageDrift = create, update, and delete drift definitions; and manage drift instances view_adminRoles_permissions_permWriteDesc_manageEvents = delete events view_adminRoles_permissions_permWriteDesc_manageMeasurements = update metric collection schedules +view_adminRoles_permissions_perm_assignBundlesToGroup = Assign Bundles To Group view_adminRoles_permissions_perm_configure = Configure view_adminRoles_permissions_perm_control = Control +view_adminRoles_permissions_perm_createBundles = Create Bundles +view_adminRoles_permissions_perm_createBundlesInGroup = Create Bundles In Group view_adminRoles_permissions_perm_createChildResources = Create Child Resources +view_adminRoles_permissions_perm_deleteBundles = Delete Bundles +view_adminRoles_permissions_perm_deleteBundlesFromGroup = Delete Bundles From Group view_adminRoles_permissions_perm_deleteChildResources = Delete Child Resources +view_adminRoles_permissions_perm_deployBundles = Deploy Bundles view_adminRoles_permissions_perm_inventory = Inventory view_adminRoles_permissions_perm_manageAlerts = Manage Alerts +view_adminRoles_permissions_perm_manageBundleGroups = Manage Bundle Groups view_adminRoles_permissions_perm_manageBundles = Manage Bundles view_adminRoles_permissions_perm_manageContent = Manage Content view_adminRoles_permissions_perm_manageDrift = Manage Drift @@ -552,6 +570,9 @@ view_adminRoles_permissions_perm_manageMeasurements = Manage Measurements view_adminRoles_permissions_perm_manageRepositories = Manage Repositories view_adminRoles_permissions_perm_manageSecurity = Manage Security view_adminRoles_permissions_perm_manageSettings = Manage Settings +view_adminRoles_permissions_perm_unassignBundlesFromGroup = Unassign Bundles From Group +view_adminRoles_permissions_perm_viewBundles = View Bundles +view_adminRoles_permissions_perm_viewBundlesInGroup = View Bundles In Group view_adminRoles_permissions_perm_viewUsers = View Users view_adminRoles_permissions_read = Read: view_adminRoles_permissions_readAccessImplied = Read access for the {0} permission is implied and cannot be disabled. diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties index f71f907..4e73829 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties @@ -66,17 +66,6 @@ common_button_ack_all = Potvrdit vÅ¡echny common_button_add = PÅidat common_button_advanced = RozÅ¡ÃÅené... common_button_apply = PouÅŸÃt -##common_buttonbar_custom_window_title= Date Range -##common_buttonbar_custom_window_subtitle= Custom -common_buttonbar_datetime_format=dd.MM.yy hh:mm -common_buttonbar_datetime_format_moment_js= DD.MM.YY HH:mm -##common_buttonbar_end_date= End Date -##common_buttonbar_end_time= End Time -##common_buttonbar_start_date= Start Date -##common_buttonbar_start_time= Start Time -##common_buttonbar_custom = Custom... -##common_buttonbar_custom_cancel = Cancel -##common_buttonbar_custom_save = Save common_button_cancel = Storno common_button_close = ZavÅÃt common_button_compare = Porovnat @@ -109,6 +98,17 @@ common_button_set = Nastavit common_button_showDetails = Zobrazi detaily... ##common_button_unignore = Unignore common_button_uninventory = Odstranit z inventáÅe +##common_buttonbar_custom = Custom... +##common_buttonbar_custom_cancel = Cancel +##common_buttonbar_custom_save = Save +##common_buttonbar_custom_window_subtitle = Custom +##common_buttonbar_custom_window_title = Date Range +common_buttonbar_datetime_format = dd.MM.yy hh:mm +common_buttonbar_datetime_format_moment_js = DD.MM.YY HH:mm +##common_buttonbar_end_date = End Date +##common_buttonbar_end_time = End Time +##common_buttonbar_start_date = Start Date +##common_buttonbar_start_time = Start Time common_calendar_april_short = dub common_calendar_august_short = srp common_calendar_december_short = pro @@ -180,6 +180,7 @@ common_title_available_resources = Dostupné zdroje common_title_average_metrics = PrůmÄrné metriky za minutu common_title_background = Pozadà common_title_bundle = BalÃk +##common_title_bundleGroups = Bundle Groups common_title_bundles = BalÃky common_title_category = Kategorie common_title_change_refresh_time = Obnovovacà interval @@ -530,11 +531,21 @@ view_adminRoles_permissions_illegalDeselectionDueToManageSecuritySelection = {0} view_adminRoles_permissions_isAuthorized = Autorizován? view_adminRoles_permissions_isRead = ÄtenÃ? view_adminRoles_permissions_isWrite = Zápis? +##view_adminRoles_permissions_permDesc_assignBundlesToGroup = can copy a viewable bundle to the bundle group +##view_adminRoles_permissions_permDesc_createBundles = can create new bundle [version]s. can copy existing bundles between viewable groups +##view_adminRoles_permissions_permDesc_createBundlesInGroup = can create new bundle [version]s for the bundle group. can copy a viewable bundle to the bundle group. +##view_adminRoles_permissions_permDesc_deleteBundles = can delete or unassign viewable bundle [version]s +##view_adminRoles_permissions_permDesc_deleteBundlesFromGroup = can delete bundle [version]s from the bundle group (implicitly deleting then from other assigned groups) +##view_adminRoles_permissions_permDesc_deployBundles = can deploy any viewable bundle version to any viewable, deployable, compatible, resource group +##view_adminRoles_permissions_permDesc_manageBundleGroups = can create, update, or delete bundle groups view_adminRoles_permissions_permDesc_manageBundles = můşe vytváÅet, mÄnit, mazat balÃky (zobrazovat můşe kdokoli) view_adminRoles_permissions_permDesc_manageInventory = má vÅ¡echna práva zdroje: můşe vytváÅet, mÄnit, mazat skupiny, můşe importovat automaticky nebo manuálnÄ nalezené zdroje view_adminRoles_permissions_permDesc_manageRepositories = můşe vytváÅet, mÄnit, mazat repozitáÅe jakÃœchkoli uÅŸivatelů (kaÅŸdÃœ můşe vytváÅet své repozitáÅe), můşe asociovat zdroje obsahů s repozitáÅi view_adminRoles_permissions_permDesc_manageSecurity = můşe vytváÅet, mÄnit, mazat uÅŸivatele a role - implikuje vÅ¡echna ostatnà povolenà view_adminRoles_permissions_permDesc_manageSettings = můşe modifikovat {0} serverovou konfiguraci a provozovat jakékoli operace souvisejÃcà se serverem +##view_adminRoles_permissions_permDesc_unassignBundlesFromGroup = can unassign (not delete) a bundle from the bundle group +##view_adminRoles_permissions_permDesc_viewBundles = can view any bundle including global bundles (those not assigned to any bundle group) +##view_adminRoles_permissions_permDesc_viewBundlesInGroup = can view any bundle in the group view_adminRoles_permissions_permDesc_viewUsers = můşe zobrazovat ostatnà uÅŸivatele s vÃœjimkou jejich pÅiÅazenÃœch rolà view_adminRoles_permissions_permReadDesc_configure = zobrazà konfiguraci zdroje a historii konfigurace zdroje view_adminRoles_permissions_permReadDesc_control = (VÃCHOZÃ) zobrazà dostupné operace a historii spouÅ¡tÄnà operacà @@ -556,12 +567,19 @@ view_adminRoles_permissions_permWriteDesc_manageContent = pÅihlásit se k odbÄ view_adminRoles_permissions_permWriteDesc_manageDrift = vytvoÅit, zmÄnit a smazat definici driftu view_adminRoles_permissions_permWriteDesc_manageEvents = mazat události view_adminRoles_permissions_permWriteDesc_manageMeasurements = mÄnit rozvrhy sbÄru metrik +##view_adminRoles_permissions_perm_assignBundlesToGroup = Assign Bundles To Group view_adminRoles_permissions_perm_configure = Konfigurovat view_adminRoles_permissions_perm_control = Ovládánà +##view_adminRoles_permissions_perm_createBundles = Create Bundles +##view_adminRoles_permissions_perm_createBundlesInGroup = Create Bundles In Group view_adminRoles_permissions_perm_createChildResources = VytváÅet potomky zdrojů +##view_adminRoles_permissions_perm_deleteBundles = Delete Bundles +##view_adminRoles_permissions_perm_deleteBundlesFromGroup = Delete Bundles From Group view_adminRoles_permissions_perm_deleteChildResources = VytváÅet potomky zdrojů +##view_adminRoles_permissions_perm_deployBundles = Deploy Bundles view_adminRoles_permissions_perm_inventory = Inventarizovat view_adminRoles_permissions_perm_manageAlerts = Spravovat vÃœstrahy +##view_adminRoles_permissions_perm_manageBundleGroups = Manage Bundle Groups view_adminRoles_permissions_perm_manageBundles = Spravovat balÃky view_adminRoles_permissions_perm_manageContent = Spravovat obsah view_adminRoles_permissions_perm_manageDrift = Spravovat drift @@ -571,6 +589,9 @@ view_adminRoles_permissions_perm_manageMeasurements = Spravovat mÄÅenà view_adminRoles_permissions_perm_manageRepositories = Spravovat repozitáÅe view_adminRoles_permissions_perm_manageSecurity = Spravovat bezpeÄnost view_adminRoles_permissions_perm_manageSettings = Spravovat nastavenà +view_adminRoles_permissions_perm_unassignBundlesFromGroup = Unassign Bundles From Group +##view_adminRoles_permissions_perm_viewBundles = View Bundles +##view_adminRoles_permissions_perm_viewBundlesInGroup = View Bundles In Group view_adminRoles_permissions_perm_viewUsers = Zobrazit uÅŸivatelé view_adminRoles_permissions_read = ÄÃst: view_adminRoles_permissions_readAccessImplied = PÅÃstup pro Ätenà pro {0} je vÃœchozà a nemůşe bÃœt zakázán. diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties index 524dcc0..0db1d05 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties @@ -46,17 +46,6 @@ common_button_ack_all = Alle bestÀtigen common_button_add = HinzufÃŒgen common_button_advanced = Erweitert... common_button_apply = Anwenden -common_buttonbar_custom_window_title=Zeitbereich -common_buttonbar_datetime_format=dd.MM.yy hh:mm -common_buttonbar_datetime_format_moment_js= DD.MM.YY HH:mm -common_buttonbar_end_date=Enddatum -common_buttonbar_end_time=Endzeitpunkt -common_buttonbar_start_date=Startdatum -common_buttonbar_start_time=Startzeitpunkt -common_buttonbar_custom_cancel=Abbrechen -common_buttonbar_custom_save=Speichern -common_buttonbar_custom_window_subtitle= Spezifisch -common_buttonbar_custom = Spezifisch... common_button_cancel = Abbrechen common_button_close = SchlieÃen common_button_compare = Vergleichen @@ -89,6 +78,17 @@ common_button_set = Setzen common_button_showDetails = Details anzeigen... common_button_unignore = Ignorieren aufheben common_button_uninventory = Aus dem Inventar löschen +common_buttonbar_custom = Spezifisch... +common_buttonbar_custom_cancel = Abbrechen +common_buttonbar_custom_save = Speichern +common_buttonbar_custom_window_subtitle = Spezifisch +common_buttonbar_custom_window_title = Zeitbereich +common_buttonbar_datetime_format = dd.MM.yy hh:mm +common_buttonbar_datetime_format_moment_js = DD.MM.YY HH:mm +common_buttonbar_end_date = Enddatum +common_buttonbar_end_time = Endzeitpunkt +common_buttonbar_start_date = Startdatum +common_buttonbar_start_time = Startzeitpunkt common_calendar_april_short = Apr common_calendar_august_short = Aug common_calendar_december_short = Dez @@ -160,6 +160,7 @@ common_title_available_resources = VerfÃŒgbare Ressourcen common_title_average_metrics = Durchsch. Anzahl Metriken pro Minute common_title_background = Hintergrund common_title_bundle = Bundle +##common_title_bundleGroups = Bundle Groups common_title_bundles = Bundles common_title_category = Kategorie common_title_change_refresh_time = Aktualisierungsintervall @@ -502,10 +503,22 @@ view_adminRoles_permissions_globalPermissions = Globale Rechte view_adminRoles_permissions_isAuthorized = Berechtigt? view_adminRoles_permissions_isRead = Lesen? view_adminRoles_permissions_isWrite = Schreiben? +##view_adminRoles_permissions_permDesc_assignBundlesToGroup = can copy a viewable bundle to the bundle group +##view_adminRoles_permissions_permDesc_createBundles = can create new bundle [version]s. can copy existing bundles between viewable groups +##view_adminRoles_permissions_permDesc_createBundlesInGroup = can create new bundle [version]s for the bundle group. can copy a viewable bundle to the bundle group. +##view_adminRoles_permissions_permDesc_deleteBundles = can delete or unassign viewable bundle [version]s +##view_adminRoles_permissions_permDesc_deleteBundlesFromGroup = can delete bundle [version]s from the bundle group (implicitly deleting then from other assigned groups) +##view_adminRoles_permissions_permDesc_deployBundles = can deploy any viewable bundle version to any viewable, deployable, compatible, resource group +##view_adminRoles_permissions_permDesc_manageBundleGroups = can create, update, or delete bundle groups view_adminRoles_permissions_permDesc_manageBundles = Kann Bundles anlegen, aktualisieren und löschen (Ansehen ist fÃŒr alle implizit erlaubt). view_adminRoles_permissions_permDesc_manageInventory = Hat alle Rechte auf alle Ressourcen, wie unten beschrieben. Kann Gruppen anlegen, aktualisieren und löschen. Kann Ressourcen in das Inventar aufnehmen. +##view_adminRoles_permissions_permDesc_manageRepositories = can create, update, or delete repositories of any user (everyone can create their own repositories), can associate content sources to repositories. view_adminRoles_permissions_permDesc_manageSecurity = Kann Benutzer und Rollen anlegen, aktualisieren oder löschen (Anschauen ist fÃŒr alle implizit erlaubt) view_adminRoles_permissions_permDesc_manageSettings = Kann die Konfiguration des {0}-Servers Àndern und jegliche Server-bezogene FunktionalitÀt ausfÃŒhren. +##view_adminRoles_permissions_permDesc_unassignBundlesFromGroup = can unassign (not delete) a bundle from the bundle group +##view_adminRoles_permissions_permDesc_viewBundles = can view any bundle including global bundles (those not assigned to any bundle group) +##view_adminRoles_permissions_permDesc_viewBundlesInGroup = can view any bundle in the group +##view_adminRoles_permissions_permDesc_viewUsers = can view other users, with the exception of their assigned roles view_adminRoles_permissions_permReadDesc_configure = Ansehen der Ressourcen-Konfiguration und des Verlaufs derselben. view_adminRoles_permissions_permReadDesc_control = (IMPLIZIT) Ansehen der verfÃŒgbaren Operationen und des Verlaufs der ausgefÃŒhren Operationen view_adminRoles_permissions_permReadDesc_createChildResources = (IMPLIZIT) Ansehen des Verlaufs des Anlegens von Kind-Ressourcen @@ -526,20 +539,32 @@ view_adminRoles_permissions_permWriteDesc_manageContent = Abonnieren von Content view_adminRoles_permissions_permWriteDesc_manageDrift = Anlegen, Bearbeiten und Löschen von Drift-Definitionen und Management von Drift-Instanzen view_adminRoles_permissions_permWriteDesc_manageEvents = Ereignisse löschen view_adminRoles_permissions_permWriteDesc_manageMeasurements = Zeitplan fÃŒr das Monitoring bearbeiten +##view_adminRoles_permissions_perm_assignBundlesToGroup = Assign Bundles To Group view_adminRoles_permissions_perm_configure = Konfiguration view_adminRoles_permissions_perm_control = Operationen +##view_adminRoles_permissions_perm_createBundles = Create Bundles +##view_adminRoles_permissions_perm_createBundlesInGroup = Create Bundles In Group view_adminRoles_permissions_perm_createChildResources = Kind-Ressourcen erzeugen +##view_adminRoles_permissions_perm_deleteBundles = Delete Bundles +##view_adminRoles_permissions_perm_deleteBundlesFromGroup = Delete Bundles From Group view_adminRoles_permissions_perm_deleteChildResources = Löschen von Kind-Ressourcen +##view_adminRoles_permissions_perm_deployBundles = Deploy Bundles view_adminRoles_permissions_perm_inventory = Inventar view_adminRoles_permissions_perm_manageAlerts = Alarme verwalten +##view_adminRoles_permissions_perm_manageBundleGroups = Manage Bundle Groups view_adminRoles_permissions_perm_manageBundles = Bundles verwalten view_adminRoles_permissions_perm_manageContent = Content verwalten view_adminRoles_permissions_perm_manageDrift = Drift verwalten view_adminRoles_permissions_perm_manageEvents = Ereignisse verwalten view_adminRoles_permissions_perm_manageInventory = Inventar verwalten view_adminRoles_permissions_perm_manageMeasurements = Monitoring verwalten +##view_adminRoles_permissions_perm_manageRepositories = Manage Repositories view_adminRoles_permissions_perm_manageSecurity = Sicherheitseinstellungen verwalten view_adminRoles_permissions_perm_manageSettings = Einstellungen verwalten +view_adminRoles_permissions_perm_unassignBundlesFromGroup = Unassign Bundles From Group +##view_adminRoles_permissions_perm_viewBundles = View Bundles +##view_adminRoles_permissions_perm_viewBundlesInGroup = View Bundles In Group +##view_adminRoles_permissions_perm_viewUsers = View Users view_adminRoles_permissions_read = Lesen view_adminRoles_permissions_readAccessImplied = Lesezugriff fÃŒr das Recht {0} ist implizit und kann nicht abgeschaltet werden. view_adminRoles_permissions_resourcePermissions = Rechte fÃŒr Ressourcen @@ -746,8 +771,7 @@ view_admin_systemSettings_AlertPurge_name = Bereinigen der Alarme, die Àlter si view_admin_systemSettings_AvailabilityPurge_name = Lösche VerfÃŒgbarkeitsdaten, die Àlter sind als ##view_admin_systemSettings_DriftFilePurge_desc = How old unused and orphaned drift files must be before being purged from backend storage. This is specified in days. view_admin_systemSettings_DriftFilePurge_name = Bereinige ungenutzte Drift-Dateine, die Àlter sind als -view_admin_systemSettings_LDAPBaseDN_name = \ -view_admin_systemSettings_LDAPBindPW_name = Passwort +view_admin_systemSettings_LDAPBaseDN_name = view_admin_systemSettings_LDAPBindPW_name = Passwort ##view_admin_systemSettings_RHQSessionTimeout_desc = If this amount of time passes without any user interaction in the browser, the session is considered as expired and user is aked to log in again. This value is specified in hours. ##view_admin_systemSettings_RHQSessionTimeout_name = GUI Session Timeout view_admin_systemSettings_TraitPurge_name = Bereinigen der Trait-Daten, die Àlter sind als diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties index cb0c35f..626243e 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties @@ -46,16 +46,6 @@ common_button_ack_all = ãã¹ãŠãç¢ºèª common_button_add = è¿œå common_button_advanced = é«åºŠãªèšå®... common_button_apply = é©çš -##common_buttonbar_custom_window_title= Date Range -##common_buttonbar_custom_window_subtitle= Custom -##common_buttonbar_datetime_format= MM/dd/yyyy h:mm a -##common_buttonbar_end_date= End Date -##common_buttonbar_end_time= End Time -##common_buttonbar_start_date= Start Date -##common_buttonbar_start_time= Start Time -##common_buttonbar_custom = Custom... -##common_buttonbar_custom_cancel = Cancel -##common_buttonbar_custom_save = Save common_button_cancel = ãã£ã³ã»ã« common_button_close = éãã common_button_compare = æ¯èŒ @@ -88,6 +78,16 @@ common_button_set = èšå® common_button_showDetails = 詳现衚瀺... ##common_button_unignore = Unignore common_button_uninventory = ã¢ã³ã€ã³ãã³ã㪠+##common_buttonbar_custom = Custom... +##common_buttonbar_custom_cancel = Cancel +##common_buttonbar_custom_save = Save +##common_buttonbar_custom_window_subtitle = Custom +##common_buttonbar_custom_window_title = Date Range +##common_buttonbar_datetime_format = MM/dd/yyyy h:mm a +##common_buttonbar_end_date = End Date +##common_buttonbar_end_time = End Time +##common_buttonbar_start_date = Start Date +##common_buttonbar_start_time = Start Time common_calendar_april_short = 4æ common_calendar_august_short = 8æ common_calendar_december_short = 12æ @@ -160,6 +160,7 @@ common_title_available_resources = å©çšå¯èœãªãœãŒã¹ common_title_average_metrics = æ¯åã®å¹³åã¡ããªãã¯ã¹ common_title_background = èæ¯è² common_title_bundle = ãã³ãã« +##common_title_bundleGroups = Bundle Groups common_title_bundles = ãã³ãã« common_title_category = ã«ããŽãª common_title_change_refresh_time = ãªãã¬ãã·ã¥éé @@ -508,11 +509,21 @@ view_adminRoles_permissions_illegalDeselectionDueToManageSecuritySelection = {0} view_adminRoles_permissions_isAuthorized = æš©éãããã? view_adminRoles_permissions_isRead = èªã¿åºãã§ãã? view_adminRoles_permissions_isWrite = æžã蟌ã¿ã§ãã? +##view_adminRoles_permissions_permDesc_assignBundlesToGroup = can copy a viewable bundle to the bundle group +##view_adminRoles_permissions_permDesc_createBundles = can create new bundle [version]s. can copy existing bundles between viewable groups +##view_adminRoles_permissions_permDesc_createBundlesInGroup = can create new bundle [version]s for the bundle group. can copy a viewable bundle to the bundle group. +##view_adminRoles_permissions_permDesc_deleteBundles = can delete or unassign viewable bundle [version]s +##view_adminRoles_permissions_permDesc_deleteBundlesFromGroup = can delete bundle [version]s from the bundle group (implicitly deleting then from other assigned groups) +##view_adminRoles_permissions_permDesc_deployBundles = can deploy any viewable bundle version to any viewable, deployable, compatible, resource group +##view_adminRoles_permissions_permDesc_manageBundleGroups = can create, update, or delete bundle groups view_adminRoles_permissions_permDesc_manageBundles = ããããžã§ãã³ã°ãã³ãã«ã®äœæãæŽæ°ãåé€ãå¯èœã§ã(誰ã§ãæé»çã«é²èŠ§å¯èœã§ã) view_adminRoles_permissions_permDesc_manageInventory = ãã¹ãŠã®ãªãœãŒã¹ã«ã€ããŠãã¹ãŠã®ãªãœãŒã¹æš©éãæã¡ãŸããã€ãŸããã°ã«ãŒãã®äœæãæŽæ°ãåé€ããããŠèªåæ€åºãããããŸãã¯æåã§æ€åºããããªãœãŒã¹ã®ã€ã³ããŒããå¯èœã§ã view_adminRoles_permissions_permDesc_manageRepositories = ãŠãŒã¶ãŒã®ãªããžããªã®äœæãæŽæ°ãåé€ãå¯èœ(誰ã§ããªããžããªãäœæå¯èœ)ã§ãã³ã³ãã³ããœãŒã¹ãšãªããžããªãé¢é£ã¥ããã§ããŸãã view_adminRoles_permissions_permDesc_manageSecurity = ãŠãŒã¶ãŒãšããŒã«ã®äœæãæŽæ°ãåé€ããã®ä»ãã¹ãŠã®æš©éãå¯èœã§ã view_adminRoles_permissions_permDesc_manageSettings = {0}ãµãŒããŒã®ä¿®æ£ãšä»»æã®ãµãŒããŒé¢é£ã®æ©èœã®å®è¡ãã§ããŸã +##view_adminRoles_permissions_permDesc_unassignBundlesFromGroup = can unassign (not delete) a bundle from the bundle group +##view_adminRoles_permissions_permDesc_viewBundles = can view any bundle including global bundles (those not assigned to any bundle group) +##view_adminRoles_permissions_permDesc_viewBundlesInGroup = can view any bundle in the group view_adminRoles_permissions_permDesc_viewUsers = ä»ã®ãŠãŒã¶ãŒãé²èŠ§ã§ããŸãããããããããã«å²ãåœãŠãããããŒã«ã¯èŠããŸããã view_adminRoles_permissions_permReadDesc_configure = ãªãœãŒã¹æ§æãšãªãœãŒã¹æ§æãªããžã§ã³å±¥æŽã®é²èŠ§ view_adminRoles_permissions_permReadDesc_control = å©çšå¯èœãªãã¬ãŒã·ã§ã³; ãªãã¬ãŒã·ã§ã³å®è¡å±¥æŽã®(æé»çãª)é²èŠ§ @@ -534,12 +545,19 @@ view_adminRoles_permissions_permWriteDesc_manageContent = ã³ã³ãã³ããœãŒ view_adminRoles_permissions_permWriteDesc_manageDrift = ããªããå®çŸ©ã®äœæãæŽæ°ãåé€; ããªããã€ã³ã¹ã¿ã³ã¹ã®ç®¡ç view_adminRoles_permissions_permWriteDesc_manageEvents = ã€ãã³ãã®åé€ view_adminRoles_permissions_permWriteDesc_manageMeasurements = ã¡ããªãã¯åéã¹ã±ãžã¥ãŒã«ã®æŽæ° +##view_adminRoles_permissions_perm_assignBundlesToGroup = Assign Bundles To Group view_adminRoles_permissions_perm_configure = æ§æ view_adminRoles_permissions_perm_control = å¶åŸ¡ +##view_adminRoles_permissions_perm_createBundles = Create Bundles +##view_adminRoles_permissions_perm_createBundlesInGroup = Create Bundles In Group view_adminRoles_permissions_perm_createChildResources = åãªãœãŒã¹ã®äœæ +##view_adminRoles_permissions_perm_deleteBundles = Delete Bundles +##view_adminRoles_permissions_perm_deleteBundlesFromGroup = Delete Bundles From Group view_adminRoles_permissions_perm_deleteChildResources = åãªãœãŒã¹ã®åé€ +##view_adminRoles_permissions_perm_deployBundles = Deploy Bundles view_adminRoles_permissions_perm_inventory = ã€ã³ãã³ã㪠view_adminRoles_permissions_perm_manageAlerts = ã¢ã©ãŒãã®ç®¡ç +##view_adminRoles_permissions_perm_manageBundleGroups = Manage Bundle Groups view_adminRoles_permissions_perm_manageBundles = ãã³ãã«ã®ç®¡ç view_adminRoles_permissions_perm_manageContent = ã³ã³ãã³ã管ç view_adminRoles_permissions_perm_manageDrift = ããªãã管ç @@ -549,6 +567,9 @@ view_adminRoles_permissions_perm_manageMeasurements = 枬å®ã®ç®¡ç view_adminRoles_permissions_perm_manageRepositories = ãªããžããªã®ç®¡ç view_adminRoles_permissions_perm_manageSecurity = ã»ãã¥ãªãã£ã®ç®¡ç view_adminRoles_permissions_perm_manageSettings = èšå®ã®ç®¡ç +view_adminRoles_permissions_perm_unassignBundlesFromGroup = Unassign Bundles From Group +##view_adminRoles_permissions_perm_viewBundles = View Bundles +##view_adminRoles_permissions_perm_viewBundlesInGroup = View Bundles In Group view_adminRoles_permissions_perm_viewUsers = ãŠãŒã¶ãŒé²èŠ§ view_adminRoles_permissions_read = Read: view_adminRoles_permissions_readAccessImplied = [{0}] æš©éã®ããã®èªã¿åãã¢ã¯ã»ã¹ãæ瀺ãããŠããŠç¡å¹ã«ã§ããŸãã diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties index 32dc73d..0da2a6e 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties @@ -44,16 +44,6 @@ common_button_ack_all = ì 첎 íìž common_button_add = ì¶ê° common_button_advanced = ê³ êž ì€ì ... common_button_apply = ì ì© -##common_buttonbar_custom_window_title= Date Range -##common_buttonbar_custom_window_subtitle= Custom -##common_buttonbar_datetime_format= MM/dd/yyyy h:mm a -##common_buttonbar_end_date= End Date -##common_buttonbar_end_time= End Time -##common_buttonbar_start_date= Start Date -##common_buttonbar_start_time= Start Time -##common_buttonbar_custom = Custom... -##common_buttonbar_custom_cancel = Cancel -##common_buttonbar_custom_save = Save common_button_cancel = ì·šì common_button_close = ë«êž° common_button_compare = ë¹êµ @@ -85,6 +75,16 @@ common_button_set = ì€ì common_button_showDetails = ììží볎Ʞ... ##common_button_unignore = Unignore common_button_uninventory = ìžë²€í 늬 ì ê±° +##common_buttonbar_custom = Custom... +##common_buttonbar_custom_cancel = Cancel +##common_buttonbar_custom_save = Save +##common_buttonbar_custom_window_subtitle = Custom +##common_buttonbar_custom_window_title = Date Range +##common_buttonbar_datetime_format = MM/dd/yyyy h:mm a +##common_buttonbar_end_date = End Date +##common_buttonbar_end_time = End Time +##common_buttonbar_start_date = Start Date +##common_buttonbar_start_time = Start Time common_calendar_april_short = 4ì common_calendar_august_short = 8ì common_calendar_december_short = 12ì @@ -153,6 +153,7 @@ common_title_available_resources = ì¬ì© ê°ë¥í ìì common_title_average_metrics = ë¶ë¹ íê· íµê³ common_title_background = 배겜ì common_title_bundle = ë²ë€ +##common_title_bundleGroups = Bundle Groups common_title_bundles = ë²ë€ common_title_change_refresh_time = ìë¡ê³ 칚 죌Ʞ common_title_columns = ì»¬ëŒ @@ -452,8 +453,21 @@ view_adminRoles_permissions_illegalDeselectionDueToCorrespondingWritePermSelecti view_adminRoles_permissions_illegalDeselectionDueToManageInventorySelection = {0} ê¶íì ì í íŽì í ì ììµëë€. ë€ë¥ž 몚ë ììì ììíë êŽëŠ¬ ìžë²€í ëŠ¬ê° ëšŒì ì í íŽì ëìŽìŒ ê·žê²ì ìíí ì ììµëë€. view_adminRoles_permissions_illegalDeselectionDueToManageSecuritySelection = {0} ê¶íì ì í íŽì í ì ììµëë€. ë€ë¥ž 몚ë ê¶íì ììíë êŽëŠ¬ 볎ì ê¶íìŽ ëšŒì ì í íŽì ëìŽìŒ ê·žê²ì ìíí ì ììµëë€. view_adminRoles_permissions_isAuthorized = ê¶íìŽ ììµëê¹? +##view_adminRoles_permissions_permDesc_assignBundlesToGroup = can copy a viewable bundle to the bundle group +##view_adminRoles_permissions_permDesc_createBundles = can create new bundle [version]s. can copy existing bundles between viewable groups +##view_adminRoles_permissions_permDesc_createBundlesInGroup = can create new bundle [version]s for the bundle group. can copy a viewable bundle to the bundle group. +##view_adminRoles_permissions_permDesc_deleteBundles = can delete or unassign viewable bundle [version]s +##view_adminRoles_permissions_permDesc_deleteBundlesFromGroup = can delete bundle [version]s from the bundle group (implicitly deleting then from other assigned groups) +##view_adminRoles_permissions_permDesc_deployBundles = can deploy any viewable bundle version to any viewable, deployable, compatible, resource group +##view_adminRoles_permissions_permDesc_manageBundleGroups = can create, update, or delete bundle groups +##view_adminRoles_permissions_permDesc_manageBundles = can create, update, or delete provisioning bundles (viewing is implied for everyone) view_adminRoles_permissions_permDesc_manageInventory = 몚ë ììì 몚ë ìì ê¶íì ê°ìµëë€. ìŠ, 귞룹ì ìì±, ìì , ìëê°ì§ ëë ìëìŒë¡ ê²ì¶ë ììì ê°ì žì¬ì ììµëë€. +##view_adminRoles_permissions_permDesc_manageRepositories = can create, update, or delete repositories of any user (everyone can create their own repositories), can associate content sources to repositories. +##view_adminRoles_permissions_permDesc_manageSecurity = can create, update, or delete users and roles - implies all other permissions view_adminRoles_permissions_permDesc_manageSettings = {0} ìë²ì ìì ë° ëªšë ìë² êŽë š êž°ë¥ì ìíí ì ììµëë€. +##view_adminRoles_permissions_permDesc_unassignBundlesFromGroup = can unassign (not delete) a bundle from the bundle group +##view_adminRoles_permissions_permDesc_viewBundles = can view any bundle including global bundles (those not assigned to any bundle group) +##view_adminRoles_permissions_permDesc_viewBundlesInGroup = can view any bundle in the group view_adminRoles_permissions_permDesc_viewUsers = ë€ë¥ž ì¬ì©ì륌 볌 ì ììµëë€. ê·žë¬ë ê·žë€ìê² í ë¹ë ìí ì 볎ìŽì§ ììµëë€. view_adminRoles_permissions_permReadDesc_configure = ìì ì€ì ë° ìì ì€ì ìì êž°ë¡ ë³Žêž° view_adminRoles_permissions_permReadDesc_createChildResources = (ììì ) ìì 늬ìì€ ë§ë€êž° êž°ë¡ ë³Žêž° @@ -469,13 +483,21 @@ view_adminRoles_permissions_permWriteDesc_manageAlerts = ê²œê³ ì ì륌 ë§ë€ view_adminRoles_permissions_permWriteDesc_manageDrift = ë늬ííž ì ì ë§ë€êž°, ìì , ìì ; ë늬ííž ìžì€íŽì€ êŽëŠ¬ view_adminRoles_permissions_permWriteDesc_manageEvents = ìŽë²€íž ìì view_adminRoles_permissions_permWriteDesc_manageMeasurements = íµê³ ìì§ ì€ìŒì¥Ž ìì +##view_adminRoles_permissions_perm_assignBundlesToGroup = Assign Bundles To Group view_adminRoles_permissions_perm_configure = êµ¬ì± view_adminRoles_permissions_perm_control = ì ìŽ +##view_adminRoles_permissions_perm_createBundles = Create Bundles +##view_adminRoles_permissions_perm_createBundlesInGroup = Create Bundles In Group view_adminRoles_permissions_perm_createChildResources = ìì 늬ìì€ ë§ë€êž° +##view_adminRoles_permissions_perm_deleteBundles = Delete Bundles +##view_adminRoles_permissions_perm_deleteBundlesFromGroup = Delete Bundles From Group view_adminRoles_permissions_perm_deleteChildResources = ìì 늬ìì€ ìì +##view_adminRoles_permissions_perm_deployBundles = Deploy Bundles view_adminRoles_permissions_perm_inventory = ìžë²€í 늬 view_adminRoles_permissions_perm_manageAlerts = ê²œê³ êŽëŠ¬ +##view_adminRoles_permissions_perm_manageBundleGroups = Manage Bundle Groups view_adminRoles_permissions_perm_manageBundles = ë²ë€ êŽëŠ¬ +##view_adminRoles_permissions_perm_manageContent = Manage Content view_adminRoles_permissions_perm_manageDrift = ë늬ííž êŽëŠ¬ view_adminRoles_permissions_perm_manageEvents = ìŽë²€íž êŽëŠ¬ view_adminRoles_permissions_perm_manageInventory = ìžë²€í 늬 êŽëŠ¬ @@ -483,6 +505,9 @@ view_adminRoles_permissions_perm_manageMeasurements = ìž¡ì êŽëŠ¬ view_adminRoles_permissions_perm_manageRepositories = ì ì¥ì êŽëŠ¬ view_adminRoles_permissions_perm_manageSecurity = 볎ì êŽëŠ¬ view_adminRoles_permissions_perm_manageSettings = ì€ì êŽëŠ¬ +view_adminRoles_permissions_perm_unassignBundlesFromGroup = Unassign Bundles From Group +##view_adminRoles_permissions_perm_viewBundles = View Bundles +##view_adminRoles_permissions_perm_viewBundlesInGroup = View Bundles In Group view_adminRoles_permissions_perm_viewUsers = ì¬ì©ì 볎Ʞ view_adminRoles_permissions_read = ìœêž°: view_adminRoles_permissions_readAccessImplied = {0} ê¶íì ëí ìœêž° ììžì€ë¥Œ ììíŽ íŽì í ì ììµëë€. diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties index f4419e1..6d74d41 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties @@ -43,16 +43,6 @@ common_button_ack_all = Confirmar Todos common_button_add = Adicionar common_button_advanced = Avan\u00E7ado... common_button_apply = Aplicar -##common_buttonbar_custom_window_title= Date Range -##common_buttonbar_custom_window_subtitle= Custom -##common_buttonbar_datetime_format= MM/dd/yyyy h:mm a -##common_buttonbar_end_date= End Date -##common_buttonbar_end_time= End Time -##common_buttonbar_start_date= Start Date -##common_buttonbar_start_time= Start Time -##common_buttonbar_custom = Custom... -##common_buttonbar_custom_cancel = Cancel -##common_buttonbar_custom_save = Save common_button_cancel = Cancelar common_button_close = Fechar common_button_compare = Comparar @@ -85,6 +75,16 @@ common_button_set = Definir common_button_showDetails = Mostrar Detalhes... ##common_button_unignore = Unignore common_button_uninventory = Remover do Invent\u00E1rio +##common_buttonbar_custom = Custom... +##common_buttonbar_custom_cancel = Cancel +##common_buttonbar_custom_save = Save +##common_buttonbar_custom_window_subtitle = Custom +##common_buttonbar_custom_window_title = Date Range +##common_buttonbar_datetime_format = MM/dd/yyyy h:mm a +##common_buttonbar_end_date = End Date +##common_buttonbar_end_time = End Time +##common_buttonbar_start_date = Start Date +##common_buttonbar_start_time = Start Time common_calendar_april_short = abr common_calendar_august_short = ago common_calendar_december_short = dez @@ -157,6 +157,7 @@ common_title_available_resources = Recursos Dispon\u00EDveis common_title_average_metrics = M\u00E9tricas da m\u00E9dia por Minuto common_title_background = Fundo (background) common_title_bundle = Bundle +##common_title_bundleGroups = Bundle Groups common_title_bundles = Bundles common_title_category = Categoria common_title_change_refresh_time = Atualizar Intervalo @@ -513,11 +514,22 @@ view_adminRoles_permissions_illegalDeselectionDueToManageSecuritySelection = {0} view_adminRoles_permissions_isAuthorized = Autorizado? view_adminRoles_permissions_isRead = Leitura? view_adminRoles_permissions_isWrite = Escrita? +##view_adminRoles_permissions_permDesc_assignBundlesToGroup = can copy a viewable bundle to the bundle group +##view_adminRoles_permissions_permDesc_createBundles = can create new bundle [version]s. can copy existing bundles between viewable groups +##view_adminRoles_permissions_permDesc_createBundlesInGroup = can create new bundle [version]s for the bundle group. can copy a viewable bundle to the bundle group. +##view_adminRoles_permissions_permDesc_deleteBundles = can delete or unassign viewable bundle [version]s +##view_adminRoles_permissions_permDesc_deleteBundlesFromGroup = can delete bundle [version]s from the bundle group (implicitly deleting then from other assigned groups) +##view_adminRoles_permissions_permDesc_deployBundles = can deploy any viewable bundle version to any viewable, deployable, compatible, resource group +##view_adminRoles_permissions_permDesc_manageBundleGroups = can create, update, or delete bundle groups view_adminRoles_permissions_permDesc_manageBundles = pode criar, atualizar, ou excluir o provisionamento de bundles (a visualiza\u00E7\u00E3 \u00E9 implicita para todos) view_adminRoles_permissions_permDesc_manageInventory = possui todas as permiss\u00F5es de Recurso, como descrito abaixo, pode criar, atualizar, excluir grupos e importar Recursos descobertos automaticamente ou manualmente. view_adminRoles_permissions_permDesc_manageRepositories = pode criar, atualizar, ou excluir reposit\u00F3rios de qualquer usu\u00E1rio (todos podem criar seus pr\u00F3prios reposit\u00F3rios), pode associar fontes de conte\u00FAdos a reposit\u00F3rios. view_adminRoles_permissions_permDesc_manageSecurity = pode criar, atualizar, ou excluir usu\u00E1rios e perfis (visualiza\u00E7\u00E3o \u00E9 padr\u00E3o para todos) ##view_adminRoles_permissions_permDesc_manageSettings = pode modificar a configura\u00E7\u00E3o do RHQ Server e utilizar qualquer funcionalidade relacionada ao Servidor +##view_adminRoles_permissions_permDesc_unassignBundlesFromGroup = can unassign (not delete) a bundle from the bundle group +##view_adminRoles_permissions_permDesc_viewBundles = can view any bundle including global bundles (those not assigned to any bundle group) +##view_adminRoles_permissions_permDesc_viewBundlesInGroup = can view any bundle in the group +##view_adminRoles_permissions_permDesc_viewUsers = can view other users, with the exception of their assigned roles view_adminRoles_permissions_permReadDesc_configure = view Resource configuration and Resource configuration revision history view_adminRoles_permissions_permReadDesc_control = (IMPL\u00CDCITO) visualizar opera\u00E7\u00F5es dispon\u00EDveis e o hist\u00F3rico da execu\u00E7\u00E3o de opera\u00E7\u00F5es view_adminRoles_permissions_permReadDesc_createChildResources = (IMPL\u00CDCITO) visualizar o hist\u00F3rico de cria\u00E7\u00E3o do recurso filho @@ -538,12 +550,19 @@ view_adminRoles_permissions_permWriteDesc_manageContent = inscrever-se no reposi ##view_adminRoles_permissions_permWriteDesc_manageDrift = create, update, and delete drift definitions; and manage drift instances view_adminRoles_permissions_permWriteDesc_manageEvents = excluir eventos view_adminRoles_permissions_permWriteDesc_manageMeasurements = atualizar agendamento de coleta de m\u00E9tricas +##view_adminRoles_permissions_perm_assignBundlesToGroup = Assign Bundles To Group view_adminRoles_permissions_perm_configure = Configurar view_adminRoles_permissions_perm_control = Controlar +##view_adminRoles_permissions_perm_createBundles = Create Bundles +##view_adminRoles_permissions_perm_createBundlesInGroup = Create Bundles In Group view_adminRoles_permissions_perm_createChildResources = Criar Recurso filho +##view_adminRoles_permissions_perm_deleteBundles = Delete Bundles +##view_adminRoles_permissions_perm_deleteBundlesFromGroup = Delete Bundles From Group view_adminRoles_permissions_perm_deleteChildResources = Excluir Recursos filhos +##view_adminRoles_permissions_perm_deployBundles = Deploy Bundles view_adminRoles_permissions_perm_inventory = Invent\u00E1rio view_adminRoles_permissions_perm_manageAlerts = Gerenciar Alertas +##view_adminRoles_permissions_perm_manageBundleGroups = Manage Bundle Groups view_adminRoles_permissions_perm_manageBundles = Gerenciar Bundles view_adminRoles_permissions_perm_manageContent = Gerenciar Conte\u00FAdo ##view_adminRoles_permissions_perm_manageDrift = Manage Drift @@ -553,6 +572,10 @@ view_adminRoles_permissions_perm_manageMeasurements = Gerenciar M\u00E9tricas view_adminRoles_permissions_perm_manageRepositories = Gerenciar Reposit\u00F3rios view_adminRoles_permissions_perm_manageSecurity = Gerenciar Seguran\u00E7a view_adminRoles_permissions_perm_manageSettings = Gerenciar Configura\u00E7\u00F5es +view_adminRoles_permissions_perm_unassignBundlesFromGroup = Unassign Bundles From Group +##view_adminRoles_permissions_perm_viewBundles = View Bundles +##view_adminRoles_permissions_perm_viewBundlesInGroup = View Bundles In Group +##view_adminRoles_permissions_perm_viewUsers = View Users view_adminRoles_permissions_read = Leitura: view_adminRoles_permissions_readAccessImplied = Acesso de leitura para a permiss\u00E3o {0} \u00E9 impl\u00EDcita e n\u00E3o pode ser desabilitada. view_adminRoles_permissions_resourcePermissions = Permiss\u00F5es de Recurso diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties index d75b76e..f31b829 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties @@ -2271,6 +2271,7 @@ common_title_available_resources = ÐПÑÑÑпМÑе ÑеÑÑÑÑÑ common_title_average_metrics = СÑеЎМее кПлОÑеÑÑвП ЌеÑÑОк в ЌОМÑÑÑ common_title_background = ЀПМ common_title_bundle = ÐÐ°ÐºÐµÑ +##common_title_bundleGroups = Bundle Groups common_title_bundles = ÐакеÑÑ common_title_category = ÐаÑегПÑÐžÑ common_title_change_refresh_time = ÐÐœÑеÑвал ÐŸÐ±ÐœÐŸÐ²Ð»ÐµÐœÐžÑ @@ -2591,9 +2592,22 @@ view_adminRoles_permissions_illegalDeselectionDueToManageSecuritySelection = {0} view_adminRoles_permissions_isAuthorized = ÐвÑПÑОзПваМÑ? view_adminRoles_permissions_isRead = ЧОÑаÑÑ? view_adminRoles_permissions_isWrite = ÐапОÑÑ? +##view_adminRoles_permissions_permDesc_assignBundlesToGroup = can copy a viewable bundle to the bundle group +##view_adminRoles_permissions_permDesc_createBundles = can create new bundle [version]s. can copy existing bundles between viewable groups +##view_adminRoles_permissions_permDesc_createBundlesInGroup = can create new bundle [version]s for the bundle group. can copy a viewable bundle to the bundle group. +##view_adminRoles_permissions_permDesc_deleteBundles = can delete or unassign viewable bundle [version]s +##view_adminRoles_permissions_permDesc_deleteBundlesFromGroup = can delete bundle [version]s from the bundle group (implicitly deleting then from other assigned groups) +##view_adminRoles_permissions_permDesc_deployBundles = can deploy any viewable bundle version to any viewable, deployable, compatible, resource group +##view_adminRoles_permissions_permDesc_manageBundleGroups = can create, update, or delete bundle groups view_adminRoles_permissions_permDesc_manageBundles = ÐПжМП ÑПзЎаваÑÑ, ПбМПвлÑÑÑ Ðž ÑЎалÑÑÑ ÑÐ·Ð»Ñ Ð¿ÑеЎПÑÑÐ°Ð²Ð»ÐµÐœÐžÑ (пÑПÑЌПÑÑ Ð¿ÑеЎПÑÑавлÑеÑÑÑ Ð²ÑеЌ) +##view_adminRoles_permissions_permDesc_manageInventory = has all Resource permissions, as described below, for all Resources; can create, update, and delete groups; and can import auto-discovered or manually discovered Resources view_adminRoles_permissions_permDesc_manageRepositories = ЌПжМП ÑПзЎаваÑÑ, ПбМПвлÑÑÑ ÐžÐ»Ðž ÑЎалÑÑÑ ÑепПзОÑПÑОО лÑбПгП пПлÑзПваÑÐµÐ»Ñ (кажЎÑй ÐŒÐŸÐ¶ÐµÑ ÑПзЎаваÑÑ ÑвПО ÑПбÑÑвеММÑе ÑепПзОÑПÑОО), ЌПжМП аÑÑПÑООÑПваÑÑ ÐžÑÑПÑМОкО кПМÑеМÑа Ñ ÑезпПзОÑПÑОÑЌО. view_adminRoles_permissions_permDesc_manageSecurity = ЌПжМП ÑПзЎаваÑÑ, ПбМПвлÑÑÑ ÐžÐ»Ðž ÑЎалÑÑÑ Ð¿ÐŸÐ»ÑзПваÑелей О ÑПлО - вклÑÑÐ°ÐµÑ Ð²Ñе ÐŽÑÑгОе пПлМПЌПÑÐžÑ +##view_adminRoles_permissions_permDesc_manageSettings = can modify the {0} Server configuration and perform any Server-related functionality +##view_adminRoles_permissions_permDesc_unassignBundlesFromGroup = can unassign (not delete) a bundle from the bundle group +##view_adminRoles_permissions_permDesc_viewBundles = can view any bundle including global bundles (those not assigned to any bundle group) +##view_adminRoles_permissions_permDesc_viewBundlesInGroup = can view any bundle in the group +##view_adminRoles_permissions_permDesc_viewUsers = can view other users, with the exception of their assigned roles view_adminRoles_permissions_permReadDesc_configure = пÑПÑЌПÑÑ ÐºÐŸÐœÑОгÑÑаÑОО ÑеÑÑÑÑа О ОÑÑПÑÐžÑ Ð²ÐµÑÑОй кПМÑОгÑÑаÑОО ÑеÑÑÑÑа view_adminRoles_permissions_permReadDesc_createChildResources = (ÐÐÐÐ ÐÐУÐÐÐÐÐÐЫÐ) пÑПÑЌПÑÑ ÐžÑÑПÑОО ÑÐŸÐ·ÐŽÐ°ÐœÐžÑ ÐŽÐŸÑеÑМегП ÑеÑÑÑÑа view_adminRoles_permissions_permReadDesc_deleteChildResources = (ÐÐÐÐ ÐÐУÐÐÐÐÐÐЫÐ) пÑПÑЌПÑÑ ÐžÑÑПÑОО ÑÐŽÐ°Ð»ÐµÐœÐžÑ ÐŽÐŸÑеÑМегП ÑеÑÑÑÑа @@ -2608,18 +2622,32 @@ view_adminRoles_permissions_permWriteDesc_manageAlerts = ÑПзЎаÑÑ, ПбМ view_adminRoles_permissions_permWriteDesc_manageContent = пПЎпОÑаÑÑÑÑ ÐœÐ° ОÑÑПÑМОкО кПМÑеМÑа; ÑÑÑаМавлОваÑÑ Ðž ÑЎалÑÑÑ Ð¿Ð°ÐºÐµÑÑ view_adminRoles_permissions_permWriteDesc_manageEvents = ÑЎалÑÑÑ ÑПбÑÑÐžÑ view_adminRoles_permissions_permWriteDesc_manageMeasurements = ПбМПвлеМОе ЌеÑÑОк кПллекÑОО плаМОÑПвÑОкПв +##view_adminRoles_permissions_perm_assignBundlesToGroup = Assign Bundles To Group view_adminRoles_permissions_perm_configure = ÐПМÑОгÑÑаÑÐžÑ view_adminRoles_permissions_perm_control = УпÑавлеМОе +##view_adminRoles_permissions_perm_createBundles = Create Bundles +##view_adminRoles_permissions_perm_createBundlesInGroup = Create Bundles In Group +##view_adminRoles_permissions_perm_createChildResources = Create Child Resources +##view_adminRoles_permissions_perm_deleteBundles = Delete Bundles +##view_adminRoles_permissions_perm_deleteBundlesFromGroup = Delete Bundles From Group +##view_adminRoles_permissions_perm_deleteChildResources = Delete Child Resources +##view_adminRoles_permissions_perm_deployBundles = Deploy Bundles view_adminRoles_permissions_perm_inventory = ÐМвеМÑаÑОзаÑÐžÑ view_adminRoles_permissions_perm_manageAlerts = УпÑавлеМОе пÑеЎÑпÑежЎеМОÑЌО +##view_adminRoles_permissions_perm_manageBundleGroups = Manage Bundle Groups view_adminRoles_permissions_perm_manageBundles = УпÑавлеМОе ÑзлаЌО view_adminRoles_permissions_perm_manageContent = УпÑавлеМОе кПМÑеМÑПЌ +##view_adminRoles_permissions_perm_manageDrift = Manage Drift view_adminRoles_permissions_perm_manageEvents = УпÑавлеМОе ÑПбÑÑОÑЌО view_adminRoles_permissions_perm_manageInventory = УпÑавлеМОе ОМвеМÑаÑОзаÑОей view_adminRoles_permissions_perm_manageMeasurements = УпÑавлеМОе ОзЌеÑеМОÑЌО view_adminRoles_permissions_perm_manageRepositories = УпÑавлеМОе ÑепПзОÑПÑОÑЌО view_adminRoles_permissions_perm_manageSecurity = УпÑавлеМОе безПпаÑМПÑÑÑÑ view_adminRoles_permissions_perm_manageSettings = УпÑавлеМОе МаÑÑÑПйкаЌО +view_adminRoles_permissions_perm_unassignBundlesFromGroup = Unassign Bundles From Group +##view_adminRoles_permissions_perm_viewBundles = View Bundles +##view_adminRoles_permissions_perm_viewBundlesInGroup = View Bundles In Group +##view_adminRoles_permissions_perm_viewUsers = View Users view_adminRoles_permissions_read = ЧÑеМОе: view_adminRoles_permissions_readAccessImplied = ÐПÑÑÑп ÑÑÐµÐœÐžÑ ÐŽÐ»Ñ {0} пПлМПЌПÑÐžÑ Ð¿ÐŸÐŽÑазÑЌеваеÑÑÑ Ðž Ме ÐŒÐŸÐ¶ÐµÑ Ð±ÑÑÑ Ð·Ð°ÐºÑÑÑ. view_adminRoles_permissions_resourcePermissions = ÐПлМПЌÑÐžÑ ÑеÑÑÑÑа diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties index a9a24df..1aeb1a1 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties @@ -43,16 +43,6 @@ common_button_ack_all = \u786e\u8ba4\u6240\u6709 common_button_add = \u6dfb\u52a0 common_button_advanced = \u9ad8\u7ea7... common_button_apply = \u63a5\u53d7 -##common_buttonbar_custom_window_title= Date Range -##common_buttonbar_custom_window_subtitle= Custom -##common_buttonbar_datetime_format= MM/dd/yyyy h:mm a -##common_buttonbar_end_date= End Date -##common_buttonbar_end_time= End Time -##common_buttonbar_start_date= Start Date -##common_buttonbar_start_time= Start Time -##common_buttonbar_custom = Custom... -##common_buttonbar_custom_cancel = Cancel -##common_buttonbar_custom_save = Save common_button_cancel = \u53d6\u6d88 common_button_close = \u5173\u95ed common_button_compare = \u5bf9\u6bd4 @@ -85,6 +75,16 @@ common_button_set = Set common_button_showDetails = \u67e5\u770b\u8be6\u60c5... ##common_button_unignore = Unignore common_button_uninventory = Uninventory +##common_buttonbar_custom = Custom... +##common_buttonbar_custom_cancel = Cancel +##common_buttonbar_custom_save = Save +##common_buttonbar_custom_window_subtitle = Custom +##common_buttonbar_custom_window_title = Date Range +##common_buttonbar_datetime_format = MM/dd/yyyy h:mm a +##common_buttonbar_end_date = End Date +##common_buttonbar_end_time = End Time +##common_buttonbar_start_date = Start Date +##common_buttonbar_start_time = Start Time common_calendar_april_short = \u56db\u6708 common_calendar_august_short = \u516b\u6708 common_calendar_december_short = \u5341\u4e8c\u6708 @@ -157,6 +157,7 @@ common_title_available_resources = \u53ef\u7528\u7684\u8d44\u6e90 common_title_average_metrics = \u5e73\u5747\u6bcf\u5206\u949f\u6307\u6807 common_title_background = \u80cc\u666f\u8272 common_title_bundle = Bundle +##common_title_bundleGroups = Bundle Groups common_title_bundles = Bundles common_title_category = \u7c7b\u522b common_title_change_refresh_time = \u5237\u65b0\u95f4\u9694 @@ -502,11 +503,22 @@ view_adminRoles_permissions_illegalDeselectionDueToManageSecuritySelection = {0} view_adminRoles_permissions_isAuthorized = \u6388\u6743? view_adminRoles_permissions_isRead = \u8bfb? view_adminRoles_permissions_isWrite = \u5199? +##view_adminRoles_permissions_permDesc_assignBundlesToGroup = can copy a viewable bundle to the bundle group +##view_adminRoles_permissions_permDesc_createBundles = can create new bundle [version]s. can copy existing bundles between viewable groups +##view_adminRoles_permissions_permDesc_createBundlesInGroup = can create new bundle [version]s for the bundle group. can copy a viewable bundle to the bundle group. +##view_adminRoles_permissions_permDesc_deleteBundles = can delete or unassign viewable bundle [version]s +##view_adminRoles_permissions_permDesc_deleteBundlesFromGroup = can delete bundle [version]s from the bundle group (implicitly deleting then from other assigned groups) +##view_adminRoles_permissions_permDesc_deployBundles = can deploy any viewable bundle version to any viewable, deployable, compatible, resource group +##view_adminRoles_permissions_permDesc_manageBundleGroups = can create, update, or delete bundle groups view_adminRoles_permissions_permDesc_manageBundles = \u80fd\u521b\u5efa,\u66f4\u65b0\u6216\u8005\u5220\u9664\u63d0\u4f9b\u7684bundles(\u4efb\u4f55\u4eba\u90fd\u80fd\u67e5\u770b) view_adminRoles_permissions_permDesc_manageInventory = \u62e5\u6709\u6240\u6709\u8d44\u6e90\u6743\u9650, \u5982\u4e0b\u6240\u8ff0, \u5bf9\u6240\u6709\u8d44\u6e90; \u5177\u6709\u521b\u5efa, \u66f4\u65b0, \u5220\u9664\u7ec4; \u80fd\u5bfc\u5165\u81ea\u52a8\u53d1\u73b0\u6216\u624b\u52a8\u53d1\u73b0\u7684\u8d44\u6e90 view_adminRoles_permissions_permDesc_manageRepositories = can create, update, or delete repositories of any user (everyone can create their own repositories), can associate content sources to repositories. view_adminRoles_permissions_permDesc_manageSecurity = \u80fd\u521b\u5efa,\u66f4\u65b0,\u6216\u5220\u9664\u7528\u6237\u548c\u89d2\u8272 (\u4efb\u4f55\u4eba\u90fd\u6709\u67e5\u770b\u6743\u9650) ##view_adminRoles_permissions_permDesc_manageSettings = \u80fd\u4fee\u6539RHQ\u670d\u52a1\u5668\u914d\u7f6e\u800c\u4e14\u80fd\u64cd\u4f5c\u4efb\u4f55\u76f8\u5173\u7684\u670d\u52a1\u5668\u529f\u80fd +##view_adminRoles_permissions_permDesc_unassignBundlesFromGroup = can unassign (not delete) a bundle from the bundle group +##view_adminRoles_permissions_permDesc_viewBundles = can view any bundle including global bundles (those not assigned to any bundle group) +##view_adminRoles_permissions_permDesc_viewBundlesInGroup = can view any bundle in the group +##view_adminRoles_permissions_permDesc_viewUsers = can view other users, with the exception of their assigned roles view_adminRoles_permissions_permReadDesc_configure = \u67e5\u770b\u8d44\u6e90\u914d\u7f6e\u548c\u8d44\u6e90\u914d\u7f6e\u4fee\u8ba2\u5386\u53f2 view_adminRoles_permissions_permReadDesc_control = (IMPLIED) \u67e5\u770b\u53ef\u7528\u64cd\u4f5c\u548c\u64cd\u4f5c\u6267\u884c\u5386\u53f2 view_adminRoles_permissions_permReadDesc_createChildResources = (IMPLIED) \u67e5\u770b\u5b50\u8d44\u6e90\u521b\u5efa\u5386\u53f2 @@ -527,12 +539,19 @@ view_adminRoles_permissions_permWriteDesc_manageContent = \u8ba2\u9605\u5185\u5b ##view_adminRoles_permissions_permWriteDesc_manageDrift = create, update, and delete drift definitions; and manage drift instances view_adminRoles_permissions_permWriteDesc_manageEvents = \u5220\u9664\u4e8b\u4ef6 view_adminRoles_permissions_permWriteDesc_manageMeasurements = \u66f4\u65b0\u6307\u6807\u96c6\u8ba1\u5212\u4efb\u52a1 +##view_adminRoles_permissions_perm_assignBundlesToGroup = Assign Bundles To Group view_adminRoles_permissions_perm_configure = \u914d\u7f6e view_adminRoles_permissions_perm_control = \u63a7\u5236 +##view_adminRoles_permissions_perm_createBundles = Create Bundles +##view_adminRoles_permissions_perm_createBundlesInGroup = Create Bundles In Group view_adminRoles_permissions_perm_createChildResources = \u521b\u5efa\u5b50\u8d44\u6e90 +##view_adminRoles_permissions_perm_deleteBundles = Delete Bundles +##view_adminRoles_permissions_perm_deleteBundlesFromGroup = Delete Bundles From Group view_adminRoles_permissions_perm_deleteChildResources = \u5220\u9664\u5b50\u8d44\u6e90 +##view_adminRoles_permissions_perm_deployBundles = Deploy Bundles view_adminRoles_permissions_perm_inventory = \u6e05\u5355 view_adminRoles_permissions_perm_manageAlerts = \u7ba1\u7406\u544a\u8b66 +##view_adminRoles_permissions_perm_manageBundleGroups = Manage Bundle Groups view_adminRoles_permissions_perm_manageBundles = \u7ba1\u7406Bundles view_adminRoles_permissions_perm_manageContent = \u7ba1\u7406\u5185\u5bb9 ##view_adminRoles_permissions_perm_manageDrift = Manage Drift @@ -542,6 +561,10 @@ view_adminRoles_permissions_perm_manageMeasurements = \u7ba1\u7406\u6307\u6807 view_adminRoles_permissions_perm_manageRepositories = \u7ba1\u7406\u5e93 view_adminRoles_permissions_perm_manageSecurity = \u7ba1\u7406\u6743\u9650 view_adminRoles_permissions_perm_manageSettings = \u7ba1\u7406\u914d\u7f6e +view_adminRoles_permissions_perm_unassignBundlesFromGroup = Unassign Bundles From Group +##view_adminRoles_permissions_perm_viewBundles = View Bundles +##view_adminRoles_permissions_perm_viewBundlesInGroup = View Bundles In Group +##view_adminRoles_permissions_perm_viewUsers = View Users view_adminRoles_permissions_read = \u8bfb: view_adminRoles_permissions_readAccessImplied = \u9ed8\u8ba4\u6709\u8bbf\u95ee{0}\u7684\u6743\u9650\u800c\u4e14\u65e0\u6cd5\u7981\u7528. view_adminRoles_permissions_resourcePermissions = \u8d44\u6e90\u6388\u6743 diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerBean.java index f61782a..dbecb26 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerBean.java @@ -43,7 +43,6 @@ import javax.persistence.EntityNotFoundException; import javax.persistence.PersistenceContext; import javax.persistence.Query;
-import org.apache.commons.httpclient.params.DefaultHttpParams; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.http.HttpResponse; @@ -55,7 +54,6 @@ import org.apache.http.client.params.HttpClientParams; import org.apache.http.impl.auth.BasicScheme; import org.apache.http.impl.client.DefaultHttpClient; import org.apache.http.params.BasicHttpParams; -import org.apache.http.params.DefaultedHttpParams; import org.apache.http.params.HttpParams; import org.apache.maven.artifact.versioning.ComparableVersion;
@@ -72,6 +70,7 @@ import org.rhq.core.domain.bundle.BundleDeployment; import org.rhq.core.domain.bundle.BundleDeploymentStatus; import org.rhq.core.domain.bundle.BundleDestination; import org.rhq.core.domain.bundle.BundleFile; +import org.rhq.core.domain.bundle.BundleGroup; import org.rhq.core.domain.bundle.BundleResourceDeployment; import org.rhq.core.domain.bundle.BundleResourceDeploymentHistory; import org.rhq.core.domain.bundle.BundleType; @@ -90,6 +89,7 @@ import org.rhq.core.domain.criteria.BundleCriteria; import org.rhq.core.domain.criteria.BundleDeploymentCriteria; import org.rhq.core.domain.criteria.BundleDestinationCriteria; import org.rhq.core.domain.criteria.BundleFileCriteria; +import org.rhq.core.domain.criteria.BundleGroupCriteria; import org.rhq.core.domain.criteria.BundleResourceDeploymentCriteria; import org.rhq.core.domain.criteria.BundleVersionCriteria; import org.rhq.core.domain.criteria.ResourceCriteria; @@ -1622,6 +1622,14 @@ public class BundleManagerBean implements BundleManagerLocal, BundleManagerRemot return queryRunner.execute(); }
+ @Override + public PageList<BundleGroup> findBundleGroupsByCriteria(Subject subject, BundleGroupCriteria criteria) { + CriteriaQueryGenerator generator = new CriteriaQueryGenerator(subject, criteria); + CriteriaQueryRunner<BundleGroup> queryRunner = new CriteriaQueryRunner<BundleGroup>(criteria, generator, + entityManager); + return queryRunner.execute(); + } + /** * Fetch bundles by criteria and then filter destination on the result objects to limit what the user can see * @param subject Caller diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerRemote.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerRemote.java index 2387833..b72d6b5 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerRemote.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/bundle/BundleManagerRemote.java @@ -30,6 +30,7 @@ import org.rhq.core.domain.bundle.Bundle; import org.rhq.core.domain.bundle.BundleDeployment; import org.rhq.core.domain.bundle.BundleDestination; import org.rhq.core.domain.bundle.BundleFile; +import org.rhq.core.domain.bundle.BundleGroup; import org.rhq.core.domain.bundle.BundleResourceDeployment; import org.rhq.core.domain.bundle.BundleType; import org.rhq.core.domain.bundle.BundleVersion; @@ -41,6 +42,7 @@ import org.rhq.core.domain.criteria.BundleCriteria; import org.rhq.core.domain.criteria.BundleDeploymentCriteria; import org.rhq.core.domain.criteria.BundleDestinationCriteria; import org.rhq.core.domain.criteria.BundleFileCriteria; +import org.rhq.core.domain.criteria.BundleGroupCriteria; import org.rhq.core.domain.criteria.BundleResourceDeploymentCriteria; import org.rhq.core.domain.criteria.BundleVersionCriteria; import org.rhq.core.domain.util.PageList; @@ -259,6 +261,8 @@ public interface BundleManagerRemote {
PageList<Bundle> findBundlesByCriteria(Subject subject, BundleCriteria criteria);
+ PageList<BundleGroup> findBundleGroupsByCriteria(Subject subject, BundleGroupCriteria criteria); + PageList<BundleDeployment> findBundleDeploymentsByCriteria(Subject subject, BundleDeploymentCriteria criteria);
PageList<BundleDestination> findBundleDestinationsByCriteria(Subject subject, BundleDestinationCriteria criteria);
commit 20f29263bca99d540b4027b63b690f805663ed13 Author: Thomas Segismont tsegismo@redhat.com Date: Wed Jul 24 18:55:25 2013 +0200
Fix StorageNodeComponentITest.shutdownStorageNode
Fixed storage node module pom typo Made test Cassandra server start with relative paths in classpath (otherwise the command line is too long and gets truncated in /proc/pid/cmdline this preventing the process query to find the server) Made CassandraNodeComponent shutdown operation to wait for server to go down
diff --git a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java index 8d74ccc..c41e8e7 100644 --- a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java +++ b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java @@ -132,12 +132,7 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent long start = System.nanoTime(); try { // Get a fresh snapshot of the process - ProcessInfoSnapshot processInfoSnapshot = (processInfo == null) ? null : processInfo.freshSnapshot(); - if (processInfoSnapshot == null || !processInfoSnapshot.isRunning()) { - processInfo = getResourceContext().getNativeProcess(); - // Safe to get prior snapshot here, we've just recreated the process info instance - processInfoSnapshot = (processInfo == null) ? null : processInfo.priorSnaphot(); - } + ProcessInfoSnapshot processInfoSnapshot = getProcessInfoSnapshot(); return (processInfoSnapshot != null && processInfoSnapshot.isRunning()) ? AvailabilityType.UP : AvailabilityType.DOWN; } finally { @@ -151,11 +146,23 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent } }
+ private ProcessInfoSnapshot getProcessInfoSnapshot() { + ProcessInfoSnapshot processInfoSnapshot = (processInfo == null) ? null : processInfo.freshSnapshot(); + if (processInfoSnapshot == null || !processInfoSnapshot.isRunning()) { + processInfo = getResourceContext().getNativeProcess(); + // Safe to get prior snapshot here, we've just recreated the process info instance + processInfoSnapshot = (processInfo == null) ? null : processInfo.priorSnaphot(); + } + return processInfoSnapshot; + } + @Override public OperationResult invokeOperation(String name, Configuration parameters) throws Exception {
if (name.equals("shutdown")) { - return shutdownNode(); + OperationResult operationResult = shutdownNode(); + waitForNodeToGoDown(); + return operationResult; } else if (name.equals("start")) { return startNode(); } else if (name.equals("restart")) { @@ -167,6 +174,23 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent return null; }
+ private void waitForNodeToGoDown() throws InterruptedException { + for (ProcessInfoSnapshot processInfoSnapshot = getProcessInfoSnapshot();; processInfoSnapshot = getProcessInfoSnapshot()) { + if (processInfoSnapshot == null || !processInfoSnapshot.isRunning()) { + // Process not found, so it died, that's fine + // OR + // Process info says process is no longer running, that's fine as well + break; + } + if (getResourceContext().getComponentInvocationContext().isInterrupted()) { + // Operation canceled or timed out + throw new InterruptedException(); + } + // Process is still running, wait a second and check again + Thread.sleep(SECONDS.toMillis(1)); + } + } + @SuppressWarnings("rawtypes") protected OperationResult shutdownNode() { ResourceContext<?> context = getResourceContext(); diff --git a/modules/plugins/rhq-storage/pom.xml b/modules/plugins/rhq-storage/pom.xml index df79e40..b1d50b8 100644 --- a/modules/plugins/rhq-storage/pom.xml +++ b/modules/plugins/rhq-storage/pom.xml @@ -55,6 +55,20 @@ </dependency>
<dependency> + <groupId>${rhq.groupId}</groupId> + <artifactId>test-utils</artifactId> + <version>${project.version}</version> + <scope>test</scope> + <exclusions> + <exclusion> + <groupId>org.slf4j</groupId> + <artifactId>slf4j-nop</artifactId> + </exclusion> + </exclusions> + </dependency> + + + <dependency> <groupId>${project.groupId}</groupId> <artifactId>rhq-cassandra-schema</artifactId> <version>${project.version}</version> @@ -70,7 +84,7 @@ <phase>pre-integration-test</phase> <configuration> <target> - <property name="sigar.dir" value="${project.build.directory/sigar}"/> + <property name="sigar.dir" value="${project.build.directory}/sigar"/>
<mkdir dir="${pc.basedir}"/> <mkdir dir="${pc.lib.dir}"/> diff --git a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java index b668073..5bc8b31 100644 --- a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java +++ b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java @@ -1,3 +1,22 @@ +/* + * RHQ Management Platform + * Copyright (C) 2005-2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA + */ + package org.rhq.plugins.storage;
import static java.util.Arrays.asList; @@ -94,8 +113,10 @@ public class StorageNodeComponentITest { File binDir = new File(basedir, "bin"); SystemInfo systemInfo = SystemInfoFactory.createSystemInfo();
- File startScript = new File(binDir, "cassandra"); + File startScript = new File("./cassandra"); ProcessExecution startScriptExe = ProcessExecutionUtility.createProcessExecution(startScript); + startScriptExe.setWorkingDirectory(binDir.getAbsolutePath()); + startScriptExe.setCheckExecutableExists(false);
startScriptExe.addArguments(asList("-p", "cassandra.pid")); startScriptExe.setCaptureOutput(true); @@ -176,8 +197,7 @@ public class StorageNodeComponentITest {
assertFalse(pidFile.exists(), pidFile + " should be deleted when the storage node is shutdown.");
- // TODO why is this failing? - //assertNodeIsDown("Expected " + storageNode + " to be DOWN after shutting it down"); + assertNodeIsDown("Expected " + storageNode + " to be DOWN after shutting it down"); }
@Test(dependsOnMethods = "shutdownStorageNode") diff --git a/modules/plugins/rhq-storage/src/test/resources/log4j.properties b/modules/plugins/rhq-storage/src/test/resources/log4j.properties deleted file mode 100644 index 67db049..0000000 --- a/modules/plugins/rhq-storage/src/test/resources/log4j.properties +++ /dev/null @@ -1,42 +0,0 @@ -# -# /* -# * RHQ Management Platform -# * Copyright (C) 2005-2012 Red Hat, Inc. -# * All rights reserved. -# * -# * This program is free software; you can redistribute it and/or modify -# * it under the terms of the GNU General Public License, version 2, as -# * published by the Free Software Foundation, and/or the GNU Lesser -# * General Public License, version 2.1, also as published by the Free -# * Software Foundation. -# * -# * This program is distributed in the hope that it will be useful, -# * but WITHOUT ANY WARRANTY; without even the implied warranty of -# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# * GNU General Public License and the GNU Lesser General Public License -# * for more details. -# * -# * You should have received a copy of the GNU General Public License -# * and the GNU Lesser General Public License along with this program; -# * if not, write to the Free Software Foundation, Inc., -# * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -# */ -# - -log4j.rootCategory=WARN, FILE, CONSOLE - -log4j.appender.FILE=org.apache.log4j.DailyRollingFileAppender -log4j.appender.FILE.DatePattern='.'yyyy-MM-dd -log4j.appender.FILE.File=./target/test.log -log4j.appender.FILE.layout=org.apache.log4j.PatternLayout -log4j.appender.FILE.layout.ConversionPattern=%d{ABSOLUTE} %-5p %c %m%n -#log4j.appender.A1.layout.ConversionPattern=%d [%t] %-5p %c - %m%n -log4j.appender.FILE.Append=false - -log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender -log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout -#log4j.appender.CONSOLE.layout.ConversionPattern=%5p %d{HH:mm:ss,SSS} %m%n -log4j.appender.CONSOLE.layout.ConversionPattern=%d{ABSOLUTE} %-5p %c %m%n - -log4j.logger.org.rhq=DEBUG -log4j.logger.com.datastax=DEBUG diff --git a/modules/plugins/rhq-storage/src/test/resources/log4j.xml b/modules/plugins/rhq-storage/src/test/resources/log4j.xml new file mode 100644 index 0000000..ec3cd98 --- /dev/null +++ b/modules/plugins/rhq-storage/src/test/resources/log4j.xml @@ -0,0 +1,40 @@ +<?xml version="1.0" encoding="UTF-8"?> + +<!DOCTYPE log4j:configuration SYSTEM "log4j.dtd"> + +<!-- | For more configuration information and examples, see the Jakarta Log4j | website: http://jakarta.apache.org/log4j --> + +<log4j:configuration xmlns:log4j="http://jakarta.apache.org/log4j/%22%3E + + <appender name="CONSOLE" class="org.apache.log4j.ConsoleAppender"> + <param name="Target" value="System.out" /> + <param name="Threshold" value="WARN" /> + <layout class="org.apache.log4j.PatternLayout"> + <param name="ConversionPattern" value="%-5p %d{dd-MM HH:mm:ss,SSS} (%F:%M:%L) - %m%n" /> + </layout> + </appender> + + <appender name="FILE" class="org.apache.log4j.RollingFileAppender"> + <param name="File" value="target/test.log" /> + <param name="Append" value="false" /> + <param name="Threshold" value="DEBUG" /> + <layout class="org.apache.log4j.PatternLayout"> + <param name="ConversionPattern" value="%-5p %d{dd-MM HH:mm:ss,SSS} (%F:%M:%L) - %m%n" /> + </layout> + </appender> + + <logger name="org.rhq"> + <level value="DEBUG" /> + </logger> + + <logger name="com.datastax"> + <level value="DEBUG" /> + </logger> + + <root> + <level value="WARN" /> + <appender-ref ref="CONSOLE" /> + <appender-ref ref="FILE" /> + </root> + +</log4j:configuration>
commit 68fcc27445e375748d2bd27429d7729ae3c076db Author: John Sanda jsanda@redhat.com Date: Wed Jul 24 07:47:24 2013 -0400
[BZ 987899] remove and create pid file during shutdown and start operations
diff --git a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java index cf24fcd..8d74ccc 100644 --- a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java +++ b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java @@ -18,6 +18,7 @@ */ package org.rhq.plugins.cassandra;
+import static java.util.Arrays.asList; import static java.util.concurrent.TimeUnit.NANOSECONDS; import static java.util.concurrent.TimeUnit.SECONDS; import static org.rhq.core.system.OperatingSystemType.WINDOWS; @@ -210,6 +211,14 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent long pid = process.getPid(); try { process.kill("KILL"); + + Configuration pluginConfig = getResourceContext().getPluginConfiguration(); + File basedir = new File(pluginConfig.getSimpleValue("baseDir")); + File binDir = new File(basedir, "bin"); + File pidFile = new File(binDir, "cassandra.pid"); + + pidFile.delete(); + return new OperationResult("Successfully shut down Cassandra daemon with pid " + pid); } catch (SigarException e) { LOG.warn("Failed to shut down Cassandra node with pid " + pid, e); @@ -226,8 +235,10 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent String baseDir = pluginConfig.getSimpleValue("baseDir"); File binDir = new File(baseDir, "bin"); File startScript = new File(binDir, getStartScript()); + File pidFile = new File(binDir, "cassandra.pid");
ProcessExecution scriptExe = ProcessExecutionUtility.createProcessExecution(startScript); + scriptExe.addArguments(asList("-p", pidFile.getAbsolutePath())); SystemInfo systemInfo = context.getSystemInformation(); ProcessExecutionResults results = systemInfo.executeProcess(scriptExe);
diff --git a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java index cd9f148..b668073 100644 --- a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java +++ b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java @@ -2,7 +2,9 @@ package org.rhq.plugins.storage;
import static java.util.Arrays.asList; import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertNotNull; +import static org.testng.Assert.assertTrue;
import java.io.File; import java.net.InetAddress; @@ -168,8 +170,34 @@ public class StorageNodeComponentITest { new Configuration(), timeout);
assertEquals(result.getResultCode(), OperationServicesResultCode.SUCCESS, "The shutdown operation failed"); + + File binDir = new File(basedir, "bin"); + File pidFile = new File(binDir, "cassandra.pid"); + + assertFalse(pidFile.exists(), pidFile + " should be deleted when the storage node is shutdown."); + // TODO why is this failing? - assertNodeIsDown("Expected " + storageNode + " to be DOWN after shutting it down"); + //assertNodeIsDown("Expected " + storageNode + " to be DOWN after shutting it down"); + } + + @Test(dependsOnMethods = "shutdownStorageNode") + public void restartStorageNode() { + OperationManager operationManager = PluginContainer.getInstance().getOperationManager(); + OperationServicesAdapter operationsService = new OperationServicesAdapter(operationManager); + + long timeout = 1000 * 60; + OperationContextImpl operationContext = new OperationContextImpl(storageNode.getId()); + OperationServicesResult result = operationsService.invokeOperation(operationContext, "start", + new Configuration(), timeout); + + assertEquals(result.getResultCode(), OperationServicesResultCode.SUCCESS, "The start operation failed."); + + File binDir = new File(basedir, "bin"); + File pidFile = new File(binDir, "cassandra.pid"); + + assertTrue(pidFile.exists(), pidFile + " should be created when starting the storage node."); + + assertNodeIsUp("Expected " + storageNode + " to be up after restarting it."); }
private void assertNodeIsUp(String msg) { @@ -192,7 +220,8 @@ public class StorageNodeComponentITest {
private Availability getAvailability() { InventoryManager inventoryManager = PluginContainer.getInstance().getInventoryManager(); - return inventoryManager.getAvailabilityIfKnown(storageNode); +// return inventoryManager.getAvailabilityIfKnown(storageNode); + return inventoryManager.getCurrentAvailability(storageNode); }
private void executeAvailabilityScan() {
commit 96286a4e22da2135a85ad6b09d069b9e690a05c9 Author: John Sanda jsanda@redhat.com Date: Wed Jul 24 07:08:09 2013 -0400
uncommented code that was done while debugging tests
diff --git a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java index f76da22..cf24fcd 100644 --- a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java +++ b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java @@ -111,7 +111,7 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent clusterBuilder = clusterBuilder.withCredentials(username, password); }
-// this.cassandraSession = clusterBuilder.build().connect(clusterName); + this.cassandraSession = clusterBuilder.build().connect(clusterName); } catch (Exception e) { LOG.error("Connect to Cassandra " + host + ":" + nativePort, e); throw e;
commit 1ceae7f8fc049bd036d03e4c84fadb5a8a057563 Author: Heiko W. Rupp hwr@redhat.com Date: Wed Jul 24 11:54:53 2013 +0200
BZ 976786 Add a bit more wait time and an additional check if SUCCESS really means it. Return IN_PROGRESS otherwise.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/ResourceHandlerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/ResourceHandlerBean.java index 4bfbb7c..7a6fb33 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/ResourceHandlerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/ResourceHandlerBean.java @@ -721,6 +721,9 @@ public class ResourceHandlerBean extends AbstractRestBean {
@POST @Path("/") + @ApiErrors({ + @ApiError(code = 302, reason = "Creation is still happening. Check back with a GET on the Location.") + }) @ApiOperation(value = "Create a new resource as a child of an existing resource. ", notes= "If a handle is given, a content based resource is created; the content identified by the handle is not removed from the content store." + "If no handle is given, a resource is created from the data of the passed 'resource' object.") @@ -824,6 +827,11 @@ public class ResourceHandlerBean extends AbstractRestBean {
CreateResourceStatus status = history.getStatus();
+ try { + Thread.sleep(2000L); // give the agent time to do the work + } catch (InterruptedException e) { + ; // nothing + }
MediaType mediaType = headers.getAcceptableMediaTypes().get(0);
@@ -832,11 +840,16 @@ public class ResourceHandlerBean extends AbstractRestBean { if ( status == CreateResourceStatus.SUCCESS) {
ResourceWithType rwt = findCreatedResource(history.getParentResource().getId(),history.getCreatedResourceName(),uriInfo); - - builder = Response.ok(); - builder.entity(rwt); + if (rwt!=null) { + builder = Response.ok(); + builder.entity(rwt); + } else { + // History says we had success but due to internal timing + // the resource is not yet visible, so switch to in_progress + status = CreateResourceStatus.IN_PROGRESS; + } } - else if (status==CreateResourceStatus.IN_PROGRESS) { + if (status==CreateResourceStatus.IN_PROGRESS) {
try { Thread.sleep(2000L); // give the agent time to do the work @@ -865,6 +878,7 @@ public class ResourceHandlerBean extends AbstractRestBean { @GET @Path("/creationStatus/{id}") @ApiOperation("Get the status of a resource creation for content based resources.") + @ApiError(code = 302, reason = "Creation is still going on. Check back later with the same URL.") public Response getHistoryItem(@PathParam("id") int historyId, @Context HttpHeaders headers, @Context UriInfo uriInfo) {
CreateResourceHistory history; @@ -888,13 +902,17 @@ public class ResourceHandlerBean extends AbstractRestBean { if (status== CreateResourceStatus.SUCCESS) {
ResourceWithType rwt = findCreatedResource(history.getParentResource().getId(),history.getCreatedResourceName(),uriInfo); - - builder = Response.ok(); - setCachingHeader(builder, 600); - builder.entity(rwt); - + if (rwt!=null) { + builder = Response.ok(); + setCachingHeader(builder, 600); + builder.entity(rwt); + } else { + // History says we had success but due to internal timing + // the resource is not yet visible, so switch to in_progress + status = CreateResourceStatus.IN_PROGRESS; + } } - else if (status==CreateResourceStatus.IN_PROGRESS) { + if (status==CreateResourceStatus.IN_PROGRESS) {
UriBuilder uriBuilder = uriInfo.getRequestUriBuilder(); @@ -913,6 +931,14 @@ public class ResourceHandlerBean extends AbstractRestBean {
}
+ /** + * Find the created resource by its name and parent. Will only return it + * if the resource is already committed. + * @param parentId Id of the parent + * @param name Name of the resource to find + * @param uriInfo UriInfo object to fill links in the returned resource + * @return A ResourceWithType if found, null otherwise. + */ private ResourceWithType findCreatedResource(int parentId, String name, UriInfo uriInfo) { ResourceCriteria criteria = new ResourceCriteria(); criteria.setStrict(true); @@ -920,6 +946,9 @@ public class ResourceHandlerBean extends AbstractRestBean { criteria.addFilterName(name); criteria.addFilterInventoryStatus(InventoryStatus.COMMITTED); List<Resource> resources = resMgr.findResourcesByCriteria(caller,criteria); + if (resources.size()==0) { + return null; + } Resource res = resources.get(0); return fillRWT(res,uriInfo); } diff --git a/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/ContentTest.java b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/ContentTest.java index 8303513..b12eea3 100644 --- a/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/ContentTest.java +++ b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/ContentTest.java @@ -222,8 +222,10 @@ public class ContentTest extends AbstractBase { int status = response.getStatusCode(); String location = response.getHeader("Location");
- System.out.println("\nLocation " + location + "\n\n"); - assert location!=null; + if (status!=200) { + System.out.println("\nLocation " + location + "\n\n"); + assert location!=null; + }
// We need to check what we got. A 302 means the deploy is still // in progress, so we need to wait a little longer @@ -244,6 +246,7 @@ public class ContentTest extends AbstractBase {
createdResourceId = response.jsonPath().getInt("resourceId");
+ System.out.flush(); System.out.println("\n Deploy is done, resource Id = " + createdResourceId + " \n"); System.out.flush();
@@ -254,6 +257,7 @@ public class ContentTest extends AbstractBase { // Remove the uploaded content removeContent(handle, false);
+ System.out.flush(); System.out.println("\n Content removed \n"); System.out.flush();
commit b537244bad778a80f6fdf92880abc245eed465ec Author: John Sanda jsanda@redhat.com Date: Tue Jul 23 22:23:06 2013 -0400
initial commit for StorageNodeComponentITest
This is a first stab at some integration tests for the storage plugin.
diff --git a/modules/plugins/rhq-storage/pom.xml b/modules/plugins/rhq-storage/pom.xml index a88f56e..df79e40 100644 --- a/modules/plugins/rhq-storage/pom.xml +++ b/modules/plugins/rhq-storage/pom.xml @@ -10,11 +10,16 @@
<groupId>org.rhq</groupId> <artifactId>rhq-rhqstorage-plugin</artifactId> - <packaging>jar</packaging>
<name>RHQ Storage Plugin</name> <description>A plugin for managing RHQ Storage Nodes</description>
+ <properties> + <pc.basedir>${project.build.directory}/plugin-container</pc.basedir> + <pc.plugins.dir>${pc.basedir}/plugins</pc.plugins.dir> + <pc.lib.dir>${pc.basedir}/lib</pc.lib.dir> + </properties> + <dependencies> <dependency> <groupId>${rhq.groupId}</groupId> @@ -27,7 +32,6 @@ <groupId>${rhq.groupId}</groupId> <artifactId>rhq-cassandra-plugin</artifactId> <version>${project.version}</version> - <!--<scope>provided</scope>--> </dependency>
<dependency> @@ -35,8 +39,113 @@ <artifactId>org-mc4j-ems</artifactId> <scope>provided</scope> </dependency> + + <dependency> + <groupId>${project.groupId}</groupId> + <artifactId>rhq-cassandra-ccm-core</artifactId> + <version>${project.version}</version> + <scope>test</scope> + </dependency> + + <dependency> + <groupId>${project.groupId}</groupId> + <artifactId>rhq-platform-plugin</artifactId> + <version>${project.version}</version> + <scope>test</scope> + </dependency> + + <dependency> + <groupId>${project.groupId}</groupId> + <artifactId>rhq-cassandra-schema</artifactId> + <version>${project.version}</version> + </dependency> </dependencies>
+ <build> + <plugins> + <plugin> + <artifactId>maven-antrun-plugin</artifactId> + <executions> + <execution> + <phase>pre-integration-test</phase> + <configuration> + <target> + <property name="sigar.dir" value="${project.build.directory/sigar}"/> + + <mkdir dir="${pc.basedir}"/> + <mkdir dir="${pc.lib.dir}"/> + <mkdir dir="${pc.plugins.dir}"/> + + <copy file="${org.rhq:rhq-platform-plugin:jar}" todir="${pc.plugins.dir}"/> + <copy file="${org.rhq:rhq-jmx-plugin:jar}" todir="${pc.plugins.dir}"/> + <copy file="${org.rhq:rhq-cassandra-plugin:jar}" todir="${pc.plugins.dir}"/> + <copy file="${project.build.directory}/${project.build.finalName}.jar" todir="${pc.plugins.dir}"/> + + <unzip src="${org.hyperic:sigar-dist:zip}" dest="${sigar.dir}"> + <patternset> + <include name="**/lib/sigar.jar" /> + <include name="**/lib/bcel*.jar" /> + <include name="**/lib/*.so" /> + <include name="**/lib/*.sl" /> + <include name="**/lib/*.dll" /> + <include name="**/lib/*.dylib" /> + </patternset> + </unzip> + <move todir="${pc.lib.dir}" flatten="true"> + <fileset dir="${sigar.dir}" includes="**/lib/*"/> + </move> + <delete dir="${sigar.dir}"/> + </target> + </configuration> + <goals> + <goal>run</goal> + </goals> + </execution> + </executions> + </plugin> + + <plugin> + <artifactId>maven-failsafe-plugin</artifactId> + <version>2.13</version> + <executions> + <execution> + <id>integration-test</id> + <goals> + <goal>integration-test</goal> + </goals> + <configuration> + <includes> + <include>**/*ITest.java</include> + </includes> + <argLine>-Djava.library.path=${pc.lib.dir}</argLine> + <systemPropertyVariables> + <pc.plugins.dir>${pc.plugins.dir}</pc.plugins.dir> + </systemPropertyVariables> + </configuration> + </execution> + <execution> + <id>verify</id> + <goals> + <goal>verify</goal> + </goals> + <configuration> + <testFailureIgnore>false</testFailureIgnore> + </configuration> + </execution> + </executions> + </plugin> + + <plugin> + <artifactId>maven-surefire-plugin</artifactId> + <configuration> + <excludes> + <exclude>**/*ITest.java</exclude> + </excludes> + </configuration> + </plugin> + </plugins> + </build> + <profiles> <profile> <id>dev</id> diff --git a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java new file mode 100644 index 0000000..cd9f148 --- /dev/null +++ b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java @@ -0,0 +1,216 @@ +package org.rhq.plugins.storage; + +import static java.util.Arrays.asList; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertNotNull; + +import java.io.File; +import java.net.InetAddress; +import java.util.Set; + +import com.google.common.collect.Sets; + +import org.testng.annotations.AfterSuite; +import org.testng.annotations.BeforeSuite; +import org.testng.annotations.Test; + +import org.rhq.cassandra.CassandraClusterManager; +import org.rhq.cassandra.ClusterInitService; +import org.rhq.cassandra.Deployer; +import org.rhq.cassandra.DeploymentOptions; +import org.rhq.cassandra.DeploymentOptionsFactory; +import org.rhq.cassandra.schema.SchemaManager; +import org.rhq.core.clientapi.server.discovery.InventoryReport; +import org.rhq.core.domain.cloud.StorageNode; +import org.rhq.core.domain.configuration.Configuration; +import org.rhq.core.domain.measurement.Availability; +import org.rhq.core.domain.measurement.AvailabilityType; +import org.rhq.core.domain.resource.Resource; +import org.rhq.core.domain.resource.ResourceType; +import org.rhq.core.pc.PluginContainer; +import org.rhq.core.pc.PluginContainerConfiguration; +import org.rhq.core.pc.inventory.InventoryManager; +import org.rhq.core.pc.operation.OperationContextImpl; +import org.rhq.core.pc.operation.OperationManager; +import org.rhq.core.pc.operation.OperationServicesAdapter; +import org.rhq.core.pc.plugin.FileSystemPluginFinder; +import org.rhq.core.pluginapi.operation.OperationServicesResult; +import org.rhq.core.pluginapi.operation.OperationServicesResultCode; +import org.rhq.core.pluginapi.util.ProcessExecutionUtility; +import org.rhq.core.system.ProcessExecution; +import org.rhq.core.system.ProcessExecutionResults; +import org.rhq.core.system.SystemInfo; +import org.rhq.core.system.SystemInfoFactory; + +/** + * @author John Sanda + */ +public class StorageNodeComponentITest { + + private File basedir; + + private Resource storageNode; + + @BeforeSuite + public void deployStorageNodeAndPluginContainer() throws Exception { + basedir = new File("target", "rhq-storage"); + + deployStorageNode(); + + initPluginContainer(); + } + + private void deployStorageNode() throws Exception { + DeploymentOptionsFactory factory = new DeploymentOptionsFactory(); + DeploymentOptions deploymentOptions = factory.newDeploymentOptions(); + String address = "127.0.0.1"; + + deploymentOptions.setSeeds(address); + deploymentOptions.setListenAddress(address); + deploymentOptions.setRpcAddress(address); + deploymentOptions.setBasedir(basedir.getAbsolutePath()); + deploymentOptions.setCommitLogDir(new File(basedir, "commit_log").getAbsolutePath()); + deploymentOptions.setDataDir(new File(basedir, "data").getAbsolutePath()); + deploymentOptions.setSavedCachesDir(new File(basedir, "saved_caches").getAbsolutePath()); + deploymentOptions.setCommitLogDir(new File(basedir, "logs").getAbsolutePath()); + deploymentOptions.setLoggingLevel("DEBUG"); + deploymentOptions.setNativeTransportPort(9142); + deploymentOptions.setJmxPort(7399); + deploymentOptions.setHeapSize("256M"); + deploymentOptions.setHeapNewSize("64M"); + + deploymentOptions.load(); + + Deployer deployer = new Deployer(); + deployer.setDeploymentOptions(deploymentOptions); + + deployer.unzipDistro(); + deployer.applyConfigChanges(); + deployer.updateFilePerms(); + deployer.updateStorageAuthConf(Sets.newHashSet(InetAddress.getByName(address))); + + File binDir = new File(basedir, "bin"); + SystemInfo systemInfo = SystemInfoFactory.createSystemInfo(); + + File startScript = new File(binDir, "cassandra"); + ProcessExecution startScriptExe = ProcessExecutionUtility.createProcessExecution(startScript); + + startScriptExe.addArguments(asList("-p", "cassandra.pid")); + startScriptExe.setCaptureOutput(true); + ProcessExecutionResults results = systemInfo.executeProcess(startScriptExe); + + assertEquals(results.getExitCode(), (Integer) 0, "Cassandra failed to start: " + results.getCapturedOutput()); + + StorageNode storageNode = new StorageNode(); + storageNode.parseNodeInformation("127.0.0.1|7399|9142"); + + ClusterInitService clusterInitService = new ClusterInitService(); + clusterInitService.waitForClusterToStart(asList(storageNode)); + + SchemaManager schemaManager = new SchemaManager("rhqadmin", "rhqadmin", "127.0.0.1|7399|9142"); + schemaManager.install(); + schemaManager.updateTopology(true); + } + + private void initPluginContainer() { + PluginContainerConfiguration pcConfig = new PluginContainerConfiguration(); + File pluginsDir = new File(System.getProperty("pc.plugins.dir")); + pcConfig.setPluginDirectory(pluginsDir); + pcConfig.setPluginFinder(new FileSystemPluginFinder(pluginsDir)); + + pcConfig.setInsideAgent(false); + PluginContainer.getInstance().setConfiguration(pcConfig); + PluginContainer.getInstance().initialize(); + } + + @AfterSuite + public void ShutdownPluginContainerAndStorageNode() throws Exception { + PluginContainer.getInstance().shutdown(); + shutdownStorageNodeIfNecessary(); + } + + private void shutdownStorageNodeIfNecessary() throws Exception { + File binDir = new File(basedir, "bin"); + File pidFile = new File(binDir, "cassandra.pid"); + + if (pidFile.exists()) { + CassandraClusterManager ccm = new CassandraClusterManager(); + ccm.killNode(basedir); + } + } + + @Test + public void discoverStorageNode() { + InventoryManager inventoryManager = PluginContainer.getInstance().getInventoryManager(); + InventoryReport inventoryReport = inventoryManager.executeServerScanImmediately(); + + if (inventoryReport.getAddedRoots().isEmpty()) { + // could be empty if the storage node is already in inventory from + // a prior discovery scan. + Resource platform = inventoryManager.getPlatform(); + storageNode = findCassandraNode(platform.getChildResources()); + } else { + storageNode = findCassandraNode(inventoryReport.getAddedRoots()); + } + + assertNotNull(storageNode, "Failed to discover Storage Node instance"); + assertNodeIsUp("Expected " + storageNode + " to be UP after discovery"); + } + + @Test(dependsOnMethods = "discoverStorageNode") + public void shutdownStorageNode() throws Exception { + OperationManager operationManager = PluginContainer.getInstance().getOperationManager(); + OperationServicesAdapter operationsService = new OperationServicesAdapter(operationManager); + + long timeout = 1000 * 60; + OperationContextImpl operationContext = new OperationContextImpl(storageNode.getId()); + OperationServicesResult result = operationsService.invokeOperation(operationContext, "shutdown", + new Configuration(), timeout); + + assertEquals(result.getResultCode(), OperationServicesResultCode.SUCCESS, "The shutdown operation failed"); + // TODO why is this failing? + assertNodeIsDown("Expected " + storageNode + " to be DOWN after shutting it down"); + } + + private void assertNodeIsUp(String msg) { + executeAvailabilityScan(); + + Availability availability = getAvailability(); + + assertNotNull(availability, "Unable to determine availability for " + storageNode); + assertEquals(availability.getAvailabilityType(), AvailabilityType.UP, msg); + } + + private void assertNodeIsDown(String msg) { + executeAvailabilityScan(); + + Availability availability = getAvailability(); + + assertNotNull(availability, "Unable to determine availability for " + storageNode); + assertEquals(availability.getAvailabilityType(), AvailabilityType.DOWN, msg); + } + + private Availability getAvailability() { + InventoryManager inventoryManager = PluginContainer.getInstance().getInventoryManager(); + return inventoryManager.getAvailabilityIfKnown(storageNode); + } + + private void executeAvailabilityScan() { + InventoryManager inventoryManager = PluginContainer.getInstance().getInventoryManager(); + inventoryManager.executeAvailabilityScanImmediately(false, true); + } + + private Resource findCassandraNode(Set<Resource> resources) { + for (Resource resource : resources) { + if (isCassandraNode(resource.getResourceType())) { + return resource; + } + } + return null; + } + + private boolean isCassandraNode(ResourceType type) { + return type.getPlugin().equals("RHQStorage") && type.getName().equals("RHQ Storage Node"); + } + +} diff --git a/modules/plugins/rhq-storage/src/test/resources/log4j.properties b/modules/plugins/rhq-storage/src/test/resources/log4j.properties new file mode 100644 index 0000000..67db049 --- /dev/null +++ b/modules/plugins/rhq-storage/src/test/resources/log4j.properties @@ -0,0 +1,42 @@ +# +# /* +# * RHQ Management Platform +# * Copyright (C) 2005-2012 Red Hat, Inc. +# * All rights reserved. +# * +# * This program is free software; you can redistribute it and/or modify +# * it under the terms of the GNU General Public License, version 2, as +# * published by the Free Software Foundation, and/or the GNU Lesser +# * General Public License, version 2.1, also as published by the Free +# * Software Foundation. +# * +# * This program is distributed in the hope that it will be useful, +# * but WITHOUT ANY WARRANTY; without even the implied warranty of +# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# * GNU General Public License and the GNU Lesser General Public License +# * for more details. +# * +# * You should have received a copy of the GNU General Public License +# * and the GNU Lesser General Public License along with this program; +# * if not, write to the Free Software Foundation, Inc., +# * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# */ +# + +log4j.rootCategory=WARN, FILE, CONSOLE + +log4j.appender.FILE=org.apache.log4j.DailyRollingFileAppender +log4j.appender.FILE.DatePattern='.'yyyy-MM-dd +log4j.appender.FILE.File=./target/test.log +log4j.appender.FILE.layout=org.apache.log4j.PatternLayout +log4j.appender.FILE.layout.ConversionPattern=%d{ABSOLUTE} %-5p %c %m%n +#log4j.appender.A1.layout.ConversionPattern=%d [%t] %-5p %c - %m%n +log4j.appender.FILE.Append=false + +log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender +log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout +#log4j.appender.CONSOLE.layout.ConversionPattern=%5p %d{HH:mm:ss,SSS} %m%n +log4j.appender.CONSOLE.layout.ConversionPattern=%d{ABSOLUTE} %-5p %c %m%n + +log4j.logger.org.rhq=DEBUG +log4j.logger.com.datastax=DEBUG
commit 83e5b228871c9a8352e98a12e0db76f8f4ea982e Author: John Sanda jsanda@redhat.com Date: Tue Jul 23 22:16:41 2013 -0400
first stab at prepareForBootstrap operation (which is currently broken)
This is clearly broken from some manual testing I did. Given that the implementation is a bit sloppy at the moment, this is a good time to get some automated tests in place. The operation will perform the following steps in the ordered specified:
1) shut down the storage node 2) update cassandra.yaml 3) update rhq-storage-auth.conf 4) restart the node
diff --git a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java index 0037bfe..f76da22 100644 --- a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java +++ b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java @@ -111,7 +111,7 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent clusterBuilder = clusterBuilder.withCredentials(username, password); }
- this.cassandraSession = clusterBuilder.build().connect(clusterName); +// this.cassandraSession = clusterBuilder.build().connect(clusterName); } catch (Exception e) { LOG.error("Connect to Cassandra " + host + ":" + nativePort, e); throw e; @@ -196,7 +196,17 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent operation = storageService.getOperation("drain", emptyParams); operation.invoke((Object[]) emptyParams);
- ProcessInfo process = context.getNativeProcess(); + return stopNode(); + } + + protected OperationResult stopNode() { + ProcessInfo process = getResourceContext().getNativeProcess(); + + if (processInfo == null) { + LOG.warn("Failed to obtain process info. It appears Cassandra is already shutdown."); + return new OperationResult("Failed to obtain process info. It appears Cassandra is already shutdown."); + } + long pid = process.getPid(); try { process.kill("KILL"); @@ -209,6 +219,7 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent } }
+ protected OperationResult startNode() { ResourceContext<?> context = getResourceContext(); Configuration pluginConfig = context.getPluginConfiguration(); diff --git a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java index 3b0aa5b..d9b35b9 100644 --- a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java +++ b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java @@ -26,11 +26,15 @@ package org.rhq.plugins.storage;
import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; import java.io.FileWriter; import java.io.IOException; import java.io.StringReader; +import java.util.ArrayList; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Set;
import org.apache.commons.logging.Log; @@ -39,6 +43,8 @@ import org.mc4j.ems.connection.EmsConnection; import org.mc4j.ems.connection.bean.EmsBean; import org.mc4j.ems.connection.bean.attribute.EmsAttribute; import org.mc4j.ems.connection.bean.operation.EmsOperation; +import org.yaml.snakeyaml.DumperOptions; +import org.yaml.snakeyaml.Yaml;
import org.rhq.core.domain.configuration.Configuration; import org.rhq.core.domain.configuration.ConfigurationUpdateStatus; @@ -48,6 +54,7 @@ import org.rhq.core.domain.configuration.PropertyMap; import org.rhq.core.domain.configuration.PropertySimple; import org.rhq.core.pluginapi.configuration.ConfigurationFacet; import org.rhq.core.pluginapi.configuration.ConfigurationUpdateReport; +import org.rhq.core.pluginapi.inventory.ResourceContext; import org.rhq.core.pluginapi.operation.OperationFacet; import org.rhq.core.pluginapi.operation.OperationResult; import org.rhq.core.util.StringUtil; @@ -96,6 +103,8 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper return updateConfiguration(parameters); } else if (name.equals("updateKnownNodes")) { return updateKnownNodes(parameters); + } else if (name.equals("prepareForBootstrap")) { + return prepareForBootstrap(parameters); } else { return super.invokeOperation(name, parameters); } @@ -132,6 +141,18 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper ipAddresses.add(propertySimple.getStringValue()); }
+ if (updateAuthFile(result, ipAddresses)) return result; + + EmsBean authBean = getEmsConnection().getBean("org.rhq.cassandra.auth:type=RhqInternodeAuthenticator"); + EmsOperation emsOperation = authBean.getOperation("reloadConfiguration"); + emsOperation.invoke(); + + result.setSimpleResult("Successfully updated the set of known nodes."); + + return result; + } + + private boolean updateAuthFile(OperationResult result, Set<String> ipAddresses) { log.info("Updating known nodes to " + ipAddresses);
File confDir = new File(getBasedir(), "conf"); @@ -150,7 +171,7 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper log.error(msg); result.setErrorMessage(msg);
- return result; + return true; } }
@@ -161,7 +182,7 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper "to unexpected error"; log.error(msg, e); result.setErrorMessage(msg + ": " + ThrowableUtil.getRootMessage(e)); - return result; + return true; }
try { @@ -176,18 +197,127 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper } result.setErrorMessage("There was an unexpected error while updating " + authFile + ". Make sure that " + "it matches " + authBackupFile + " and then reschedule the operation."); + return true; + } + return false; + } + + private OperationResult prepareForBootstrap(Configuration params) { + log.info("Preparing " + this + " for bootstrap..."); + + ResourceContext context = getResourceContext(); + OperationResult result = new OperationResult(); + + log.info("Stopping storage node"); + OperationResult stopNodeResult = stopNode(); + if (stopNodeResult.getErrorMessage() != null) { + log.error("Failed to stop storage node " + this + " Cannot prepare the node for bootstrap which means " + + "that the storage node cannot join the cluster. Make sure the storage node is not running and retry " + + "the operation"); + result.setErrorMessage("Failed to stop storage node. Cannot prepare the node for bootstrap which means " + + "that it cannot join the cluster. Make sure that the node is not running and retry the operation. " + + "Stopping the storage node failed with this error: " + stopNodeResult.getErrorMessage()); return result; }
- EmsBean authBean = getEmsConnection().getBean("org.rhq.cassandra.auth:type=RhqInternodeAuthenticator"); - EmsOperation emsOperation = authBean.getOperation("reloadConfiguration"); - emsOperation.invoke(); + Configuration pluginConfig = context.getPluginConfiguration(); + String yamlProp = pluginConfig.getSimpleValue("yamlConfiguration"); + File yamlFile = new File(yamlProp);
- result.setSimpleResult("Successfully updated the set of known nodes."); + DumperOptions options = new DumperOptions(); + options.setDefaultFlowStyle(DumperOptions.FlowStyle.BLOCK); + Yaml yaml = new Yaml(options); + + Map yamlConfig = null; + try { + yamlConfig = (Map) yaml.load(new FileInputStream(yamlFile)); + } catch (FileNotFoundException e) { + log.error("Failed to load " + yamlFile, e); + log.error("Cannot prepare " + this + " for bootstrap. " + yamlFile + " must exist in order to make the " + + "necessary configuration changes."); + result.setErrorMessage("Cannot prepare storage node for bootstrap. It appears that " + yamlFile + + " does not exist. Make sure that it exists so that the necessary configuration changes can be made."); + + return result; + } + + purgeDir(getCommitLogDir(yamlConfig)); + for (File dataDir : getDataDirs(yamlConfig)) { + purgeDir(dataDir); + } + purgeDir(getSavedCachesDir(yamlConfig)); + + log.info("Updating cluster settings"); + + String address = pluginConfig.getSimpleValue("host"); + List<String> seeds = getAddresses(params.getList("storageNodeIPAddresses")); + // Make sure this node's address is not in the list; otherwise, it + // won't bootstrap properly. + seeds.remove(address); + try { + updateSeedsList(seeds); + } catch (IOException e) { + log.error("Failed to update seeds property in " + yamlFile, e); + result.setErrorMessage("Failed to prepared node for bootstrap due to unexpected error that occurred " + + "while updating seeds property in " + yamlFile + ":\n" + ThrowableUtil.getAllMessages(e)); + return result; + } + + if (updateAuthFile(result, new HashSet<String>(seeds))) { + return result; + } + + int cqlPort = Integer.parseInt(params.getSimpleValue("cqlPort")); + int gossipPort = Integer.parseInt(params.getSimpleValue("gossipPort")); + + yamlConfig.put("native_transport_port", cqlPort); + yamlConfig.put("storage_port", gossipPort); + + try { + yaml.dump(yamlConfig, new FileWriter(yamlFile)); + } catch (IOException e) { + log.error("Could not update cluster settings in " + yamlFile, e); + result.setErrorMessage("Could not update cluster settings in " + yamlFile + ":\n" + + ThrowableUtil.getAllMessages(e)); + return result; + } + + log.info(this + " is ready to be bootstrap. Restarting storage node..."); + OperationResult startResult = startNode(); + if (startResult.getErrorMessage() != null) { + log.error("Failed to restart storage node:\n" + startResult.getErrorMessage()); + result.setErrorMessage("Failed to restart storage node:\n" + startResult.getErrorMessage()); + } else { + result.setSimpleResult("The storage node was succesfully updated is now bootstrapping into the cluster."); + }
return result; }
+ private void purgeDir(File dir) { + log.info("Purging " + dir); + FileUtil.purge(dir, true); + } + + private File getCommitLogDir(Map yamlConfig) { + return new File((String) yamlConfig.get("commitlog_directory")); + } + + private List<File> getDataDirs(Map yamlConfig) { + List<File> dirs = new ArrayList<File>(); + List<String> dirNames = (List<String>) yamlConfig.get("data_file_directories"); + + for (String dirName : dirNames) { + dirs.add(new File(dirName)); + } + + return dirs; + } + + private File getSavedCachesDir(Map yamlConfig) { + return new File((String) yamlConfig.get("saved_caches_directory")); + } + private OperationResult nodeAdded(Configuration params) { boolean runRepair = params.getSimple("runRepair").getBooleanValue(); boolean updateSeedsList = params.getSimple("updateSeedsList").getBooleanValue(); @@ -405,4 +535,10 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper boolean succeeded; String details; } + + @Override + public String toString() { + return StorageNodeComponent.class.getSimpleName() + "[resourceKey: " + getResourceContext().getResourceKey() + + "]"; + } } diff --git a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml index 1e39d6c..cd84de6 100644 --- a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml +++ b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml @@ -101,6 +101,16 @@ </parameters> </operation>
+ <operation name="prepareForBootstrap"> + <parameters> + <c:simple-property name="cqlPort" type="integer" displayName="CQL Port"/> + <c:simple-property name="gossipPort" type="integer"/> + <c:list-property name="storageNodeIPAddresses" displayName="Storage Node IP Addresses"> + <c:simple-property name="storageNodeIPAddress"/> + </c:list-property> + </parameters> + </operation> + <operation name="prepareForUpgrade" description="Prepares the storage node for upgrade (this operation consists of following steps: 1) turning off the RPC server, 2) turning off the gossiper, 3) taking the snapshot (backuping the data), 4) invoking the drain operation"> <parameters> <c:simple-property name="snapshotName" required="false" type="string" displayName="Snapshot Name"
commit da7b8bf04aab224fcce8c613fb1dee74c62186d1 Author: Jay Shaughnessy jshaughn@redhat.com Date: Tue Jul 23 17:37:15 2013 -0400
Start setting up some infrastructure for Bundle Groups and associated permissions.
diff --git a/modules/core/dbutils/src/main/scripts/dbsetup/content-schema.xml b/modules/core/dbutils/src/main/scripts/dbsetup/content-schema.xml index 6055d57..35d17b6 100644 --- a/modules/core/dbutils/src/main/scripts/dbsetup/content-schema.xml +++ b/modules/core/dbutils/src/main/scripts/dbsetup/content-schema.xml @@ -740,5 +740,41 @@ <column name="NEW_DRIFT_FILE" type="VARCHAR2" size="64" required="false" references="RHQ_DRIFT_FILE"/> </table>
+ <table name="RHQ_BUNDLE_GROUP"> + <column name="ID" default="sequence-only" initial="10001" + primarykey="true" required="true" type="INTEGER"/> + <column name="NAME" required="true" size="4000" type="VARCHAR2"/> + <column name="DESCRIPTION" size="100" type="VARCHAR2"/> + <column name="CTIME" type="LONG"/> + <column name="MTIME" type="LONG"/> + </table> + + <!-- Many To Many mapping for bundles to bundle groups --> + <table name="RHQ_BUNDLE_GROUP_BUNDLE_MAP"> + <column name="BUNDLE_ID" required="true" type="INTEGER" references="RHQ_BUNDLE"/> + <column name="BUNDLE_GROUP_ID" required="true" type="INTEGER" references="RHQ_BUNDLE_GROUP"/> + + <!-- not using full words to fit index name length --> + <constraint name="RHQ_BUNDLE_GROUP_BUN_MAP_KEY"> + <primaryKey> + <field ref="BUNDLE_ID"/> + <field ref="BUNDLE_GROUP_ID"/> + </primaryKey> + </constraint> + </table> + + <!-- this would be better in authz-schema but too early in the chain, so putting it here --> + <table name="RHQ_ROLE_BUNDLE_GROUP_MAP"> + <column name="ROLE_ID" required="true" type="INTEGER" references="RHQ_ROLE"/> + <column name="BUNDLE_GROUP_ID" required="true" type="INTEGER" references="RHQ_BUNDLE_GROUP"/> + + <constraint name="RHQ_ROLE_BUN_GROUP_MAP_KEY"> + <primaryKey> + <field ref="ROLE_ID"/> + <field ref="BUNDLE_GROUP_ID"/> + </primaryKey> + </constraint> + </table> + </dbsetup>
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/authz/Permission.java b/modules/core/domain/src/main/java/org/rhq/core/domain/authz/Permission.java index c7abea6..40e3150 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/authz/Permission.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/authz/Permission.java @@ -136,7 +136,64 @@ public enum Permission { * Can view other RHQ users, except for their assigned roles */ // NOTE: This is a GLOBAL permission but defined here to maintain the ordinal indexes - VIEW_USERS(Target.GLOBAL) // 17 + VIEW_USERS(Target.GLOBAL), // 17 + + /** + * Can CRUD BundleGroups + */ + MANAGE_BUNDLE_GROUPS(Target.GLOBAL), // 18 + + /** + * Can create Bundle [Versions]s + * Can create global Bundle [Versions] if holding Global.VIEW_BUNDLES + * Can assign to viewable bundle groups + */ + CREATE_BUNDLES(Target.GLOBAL), // 19 + + /** + * Can delete bundle [Versions]s from viewable groups + * Can delete global bundles if holding Global.VIEW_BUNDLES + * Can unassign from viewable bundle groups + */ + DELETE_BUNDLES(Target.GLOBAL), // 20 + + /** + * Can view any bundle, including global bundles + */ + VIEW_BUNDLES(Target.GLOBAL), // 21 + + /** + * Can deploy any viewable bundle version to any viewable (deployable, compatible) resource group + */ + DEPLOY_BUNDLES(Target.GLOBAL), // 22 + + /** + * Can assign viewable bundles to the bundle group + * - this can be a copy from another viewable bundle group + * - this can be a global bundle if holding Global.VIEW_BUNDLES + */ + ASSIGN_BUNDLES_TO_GROUP(Target.BUNDLE), // 23 + + /** + * Can unassign bundles from the bundle group + * - the bundle is not deleted and becomes a global bundle if assigned to no other bundle group + */ + UNASSIGN_BUNDLES_FROM_GROUP(Target.BUNDLE), // 24 + + /** + * Can create bundle [version]s for this bundle group + */ + CREATE_BUNDLES_IN_GROUP(Target.BUNDLE), // 25 + + /** + * Can delete bundle [version]s from the bundle group + */ + DELETE_BUNDLES_IN_GROUP(Target.BUNDLE), // 26 + + /** + * Implied - Can view bundles in the bundle group + */ + VIEW_BUNDLES_IN_GROUP(Target.BUNDLE) // 25
;
@@ -144,11 +201,14 @@ public enum Permission { * the target to which the permission applies */ public enum Target { - /** global permissions do not apply to specific resources in groups */ + /** global permissions do not apply to specific resources or bundles */ GLOBAL,
- /** resource permissions apply only to the resources in the role's groups */ - RESOURCE + /** resource permissions apply only to the resources in the role's resource groups */ + RESOURCE, + + /** bundle permissions apply only to the bundles in the role's bundle groups */ + BUNDLE }
private Target target; diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/authz/Role.java b/modules/core/domain/src/main/java/org/rhq/core/domain/authz/Role.java index c99cf703..af5af3b 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/authz/Role.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/authz/Role.java @@ -49,6 +49,7 @@ import org.hibernate.annotations.Cascade; import org.jetbrains.annotations.NotNull;
import org.rhq.core.domain.auth.Subject; +import org.rhq.core.domain.bundle.BundleGroup; import org.rhq.core.domain.resource.group.LdapGroup; import org.rhq.core.domain.resource.group.ResourceGroup;
@@ -61,7 +62,7 @@ import org.rhq.core.domain.resource.group.ResourceGroup; * @author Greg Hinkle */ @Entity -@NamedQueries( { // +@NamedQueries({ // @NamedQuery(name = Role.QUERY_FIND_BY_IDS, query = "SELECT r FROM Role AS r WHERE r.id IN ( :ids )"), // @NamedQuery(name = Role.QUERY_FIND_ALL, query = "SELECT r FROM Role AS r"), // @NamedQuery(name = Role.QUERY_FIND_AVAILABLE_ROLES_WITH_EXCLUDES, query = "" // @@ -128,11 +129,15 @@ public class Role implements Serializable { @Cascade({ org.hibernate.annotations.CascadeType.ALL }) private Set<Permission> permissions = new HashSet<Permission>();
+ @ManyToMany(mappedBy = "roles") + private Set<BundleGroup> bundleGroups = new HashSet<BundleGroup>(); + public Role() { fsystem = Boolean.FALSE; }
- public Role(@NotNull String name) { + public Role(@NotNull + String name) { this(); this.name = name; } @@ -150,7 +155,8 @@ public class Role implements Serializable { return this.name; }
- public void setName(@NotNull String name) { + public void setName(@NotNull + String name) { this.name = name; }
@@ -206,7 +212,7 @@ public class Role implements Serializable { this.ldapGroups.add(ldapGroup); }
- public boolean removeLdapGroup(LdapGroup ldapGroup) { + public boolean removeLdapGroup(LdapGroup ldapGroup) { return this.ldapGroups.remove(ldapGroup); }
@@ -219,7 +225,7 @@ public class Role implements Serializable { this.subjects = new HashSet<Subject>(); } else { this.subjects = subjects; - for (Subject subject :subjects) { + for (Subject subject : subjects) { subject.addRole(this); this.subjects.add(subject); } @@ -270,6 +276,40 @@ public class Role implements Serializable { this.ldapSubjects.remove(subject); }
+ public Set<BundleGroup> getBundleGroups() { + return bundleGroups; + } + + public void setBundleGroups(Set<BundleGroup> bundleGroups) { + if (bundleGroups == null) { + this.bundleGroups = new HashSet<BundleGroup>(); + } else { + this.bundleGroups = bundleGroups; + for (BundleGroup bundleGroup : this.bundleGroups) { + bundleGroup.addRole(this); + this.bundleGroups.add(bundleGroup); + } + } + } + + public void addBundleGroup(BundleGroup bundleGroup) { + if (this.bundleGroups == null) { + this.bundleGroups = new HashSet<BundleGroup>(); + } + + bundleGroup.addRole(this); + this.bundleGroups.add(bundleGroup); + } + + public void removeBundleGroup(BundleGroup bundleGroup) { + if (this.bundleGroups == null) { + this.bundleGroups = new HashSet<BundleGroup>(); + } + + bundleGroup.removeRole(this); + this.bundleGroups.remove(bundleGroup); + } + public Set<ResourceGroup> getResourceGroups() { return resourceGroups; } diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/bundle/BundleGroup.java b/modules/core/domain/src/main/java/org/rhq/core/domain/bundle/BundleGroup.java new file mode 100644 index 0000000..16037f7 --- /dev/null +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/bundle/BundleGroup.java @@ -0,0 +1,207 @@ +/* + * RHQ Management Platform + * Copyright (C) 2005-2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation, and/or the GNU Lesser + * General Public License, version 2.1, also as published by the Free + * Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License and the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * and the GNU Lesser General Public License along with this program; + * if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ +package org.rhq.core.domain.bundle; + +import java.io.Serializable; +import java.util.HashSet; +import java.util.Set; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; +import javax.persistence.Id; +import javax.persistence.JoinColumn; +import javax.persistence.JoinTable; +import javax.persistence.ManyToMany; +import javax.persistence.PrePersist; +import javax.persistence.PreUpdate; +import javax.persistence.SequenceGenerator; +import javax.persistence.Table; +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; + +import org.rhq.core.domain.authz.Role; + +/** + * Defines a grouping of bundles, typically for role-based authz reasons. + * + * @author Jay Shaughnessy + */ +@Entity +@SequenceGenerator(allocationSize = org.rhq.core.domain.util.Constants.ALLOCATION_SIZE, name = "RHQ_BUNDLE_GROUP_ID_SEQ", sequenceName = "RHQ_BUNDLE_GROUP_ID_SEQ") +@Table(name = "RHQ_BUNDLE_GROUP") +@XmlAccessorType(XmlAccessType.FIELD) +public class BundleGroup implements Serializable { + private static final long serialVersionUID = 1L; + + @Column(name = "ID", nullable = false) + @GeneratedValue(strategy = GenerationType.AUTO, generator = "RHQ_BUNDLE_GROUP_ID_SEQ") + @Id + private int id; + + @Column(name = "NAME", nullable = false) + private String name; + + @Column(name = "DESCRIPTION", nullable = true) + private String description; + + @Column(name = "CTIME") + private Long ctime; + + @Column(name = "MTIME") + private Long mtime; + + @JoinTable(name = "RHQ_BUNDLE_GROUP_BUNDLE_MAP", joinColumns = { @JoinColumn(name = "BUNDLE_GROUP_ID") }, inverseJoinColumns = { @JoinColumn(name = "BUNDLE_ID") }) + @ManyToMany + private Set<Bundle> bundles = new HashSet<Bundle>(); + + @JoinTable(name = "RHQ_ROLE_BUNDLE_GROUP_MAP", joinColumns = { @JoinColumn(name = "BUNDLE_GROUP_ID") }, inverseJoinColumns = { @JoinColumn(name = "ROLE_ID") }) + @ManyToMany + private Set<Role> roles = new HashSet<Role>(); + + public BundleGroup() { + // for JPA use + } + + public BundleGroup(String name) { + setName(name); + } + + public int getId() { + return id; + } + + public void setId(int id) { + this.id = id; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getDescription() { + return description; + } + + public void setDescription(String description) { + this.description = description; + } + + public Set<Bundle> getBundles() { + if (null == bundles) { + bundles = new HashSet<Bundle>(); + } + return bundles; + } + + public void addBundle(Bundle bundle) { + getBundles().add(bundle); + } + + public void removeBundle(Bundle bundle) { + getBundles().remove(bundle); + } + + public Set<Role> getRoles() { + if (null == roles) { + roles = new HashSet<Role>(); + } + return roles; + } + + public void addRole(Role role) { + getRoles().add(role); + } + + public void removeRole(Role role) { + getRoles().remove(role); + } + + public Long getCtime() { + return ctime; + } + + public void setCtime(Long ctime) { + this.ctime = ctime; + } + + public Long getMtime() { + return mtime; + } + + public void setMtime(Long mtime) { + this.mtime = mtime; + } + + @PrePersist + void onPersist() { + this.mtime = System.currentTimeMillis(); + this.ctime = System.currentTimeMillis(); + } + + @PreUpdate + void onUpdate() { + this.mtime = System.currentTimeMillis(); + } + + @Override + public String toString() { + return "BundleGroup[id=" + id + ",name=" + name + "]"; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = (prime * result) + ((name == null) ? 0 : name.hashCode()); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (!(obj instanceof BundleGroup)) { + return false; + } + + final BundleGroup other = (BundleGroup) obj; + + if (name == null) { + if (other.name != null) { + return false; + } + } else if (!name.equals(other.name)) { + return false; + } + + return true; + } +} \ No newline at end of file diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/criteria/BundleCriteria.java b/modules/core/domain/src/main/java/org/rhq/core/domain/criteria/BundleCriteria.java index abef183..5e6f8c1 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/criteria/BundleCriteria.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/criteria/BundleCriteria.java @@ -42,10 +42,12 @@ public class BundleCriteria extends TaggedCriteria { private String filterBundleTypeName; // needs override private String filterDescription; private List<Integer> filterDestinationIds; // needs overrides + private List<Integer> filterBundleGroupIds; // needs overrides private String filterName; private Integer filterPackageTypeId; // needs override private String filterPackageTypeName; // needs override
+ private boolean fetchBundleGroups; private boolean fetchBundleVersions; private boolean fetchDestinations; private boolean fetchPackageType; @@ -61,6 +63,10 @@ public class BundleCriteria extends TaggedCriteria { + " WHERE bv.id IN ( ? ) )"); filterOverrides.put("bundleTypeId", "bundleType.id = ?"); filterOverrides.put("bundleTypeName", "bundleType.name like ?"); + filterOverrides.put("bundleGroupIds", "" // + + "id IN ( SELECT bg.bundle.id " // + + " FROM BundleGroup bg " // + + " WHERE bg.id IN ( ? ) )"); filterOverrides.put("destinationIds", "" // + "id IN ( SELECT bd.bundle.id " // + " FROM BundleDestination bd " // @@ -97,6 +103,17 @@ public class BundleCriteria extends TaggedCriteria { this.filterDescription = filterDescription; }
+ /** Convenience routine calls addFilterBundleGroupIds */ + public void addFilterBundleGroupId(Integer filterBundleGroupId) { + List<Integer> ids = new ArrayList<Integer>(1); + ids.add(filterBundleGroupId); + this.addFilterBundleGroupIds(ids); + } + + public void addFilterBundleGroupIds(List<Integer> filterBundleGroupIds) { + this.filterBundleGroupIds = filterBundleGroupIds; + } + /** Convenience routine calls addFilterDestinationIds */ public void addFilterDestinationId(Integer filterDestinationId) { List<Integer> ids = new ArrayList<Integer>(1); @@ -120,6 +137,10 @@ public class BundleCriteria extends TaggedCriteria { this.filterPackageTypeName = filterPackageTypeName; }
+ public void fetchBundleGroups(boolean fetchBundleGroups) { + this.fetchBundleGroups = fetchBundleGroups; + } + public void fetchBundleVersions(boolean fetchBundleVersions) { this.fetchBundleVersions = fetchBundleVersions; } diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/criteria/BundleGroupCriteria.java b/modules/core/domain/src/main/java/org/rhq/core/domain/criteria/BundleGroupCriteria.java new file mode 100644 index 0000000..88886d7 --- /dev/null +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/criteria/BundleGroupCriteria.java @@ -0,0 +1,105 @@ +/* + * RHQ Management Platform + * Copyright (C) 2005-2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +package org.rhq.core.domain.criteria; + +import java.util.ArrayList; +import java.util.List; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; + +import org.rhq.core.domain.bundle.BundleGroup; +import org.rhq.core.domain.util.PageOrdering; + +/** + * @author Jay Shaughnessy + */ +@XmlRootElement +@XmlAccessorType(XmlAccessType.FIELD) +@SuppressWarnings("unused") +public class BundleGroupCriteria extends Criteria { + private static final long serialVersionUID = 1L; + + private String filterName; + private String filterDescription; + private Integer filterBundleId; + private List<Integer> filterBundleIds; // requires overrides + private Integer filterRoleId; + private List<Integer> filterRoleIds; // requires overrides + + private boolean fetchBundles; + private boolean fetchRoles; + + private PageOrdering sortName; + private PageOrdering sortDescription; + + public BundleGroupCriteria() { + filterOverrides.put("bundleIds", "" // + + "id IN ( SELECT b.id " // + + " FROM Bundle b " // + + " WHERE b.id IN ( ? ) )"); + filterOverrides.put("roleIds", "" // + + "id IN ( SELECT r.id " // + + " FROM Role r " // + + " WHERE r.id IN ( ? ) )"); + } + + @Override + public Class<BundleGroup> getPersistentClass() { + return BundleGroup.class; + } + + public void addFilterName(String filterName) { + this.filterName = filterName; + } + + public void addFilterDescription(String filterDescription) { + this.filterDescription = filterDescription; + } + + /** Convenience routine calls addFilterBundleVersionIds */ + public void addFilterBundleId(Integer filterBundleId) { + List<Integer> ids = new ArrayList<Integer>(1); + ids.add(filterBundleId); + this.addFilterBundleIds(ids); + } + + public void addFilterBundleIds(List<Integer> filterBundleIds) { + this.filterBundleIds = filterBundleIds; + } + + public void fetchBundles(boolean fetchBundles) { + this.fetchBundles = fetchBundles; + } + + public void fetchRoles(boolean fetchRoles) { + this.fetchRoles = fetchRoles; + } + + public void addSortName(PageOrdering sortName) { + addSortField("name"); + this.sortName = sortName; + } + + public void addSortDescription(PageOrdering sortDescription) { + addSortField("description"); + this.sortDescription = sortDescription; + } +} diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/criteria/RoleCriteria.java b/modules/core/domain/src/main/java/org/rhq/core/domain/criteria/RoleCriteria.java index efb2b71..a07f012 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/criteria/RoleCriteria.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/criteria/RoleCriteria.java @@ -47,6 +47,7 @@ public class RoleCriteria extends Criteria { private boolean fetchRoleNotifications; private boolean fetchSubjects; private boolean fetchLdapGroups; + private boolean fetchBundleGroups;
private PageOrdering sortName;
@@ -112,6 +113,10 @@ public class RoleCriteria extends Criteria { this.fetchResourceGroups = fetchResourceGroups; }
+ public void fetchBundleGroups(boolean fetchBundleGroups) { + this.fetchBundleGroups = fetchBundleGroups; + } + public void fetchPermissions(boolean fetchPermissions) { this.fetchPermissions = fetchPermissions; } @@ -127,7 +132,7 @@ public class RoleCriteria extends Criteria {
/** subclasses should override as necessary */ public boolean isSecurityManagerRequired() { - return (this.fetchSubjects || this.fetchResourceGroups || this.fetchLdapGroups); + return (this.fetchSubjects || this.fetchResourceGroups || this.fetchLdapGroups || this.fetchBundleGroups); }
}
commit 4fa9f082b2e011b3bde9defe1021248148c4ad40 Author: Simeon Pinder spinder@fulliautomatix.conchfritter.com Date: Tue Jul 23 15:02:09 2013 -0400
[BZ 984649] fix module metadata.
diff --git a/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml b/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml index 145e3af..82ff294 100644 --- a/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml +++ b/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml @@ -290,7 +290,7 @@ <delete file="${jboss.home}/modules/system/layers/base/org/jgroups/main/jgroups-${jgroups.initial.version}.jar.index" /> <!-- Update the module metadata to the patched version --> <replace file="${jboss.home}/modules/system/layers/base/org/jgroups/main/module.xml" - token="jgroups-${jgroups.initial.version}.jar" value="${jgroups.patch.version}"/> + token="jgroups-${jgroups.initial.version}.jar" value="jgroups-${jgroups.patch.version}.jar"/> <!-- Copy in patched version --> <copy file="${settings.localRepository}/org/jgroups/jgroups/${jgroups.patch.version}/jgroups-${jgroups.patch.version}.jar" toDir="${jboss.home}/modules/system/layers/base/org/jgroups/main" verbose="true"/>
commit 8203c669b3b3ba5ed5c3ef27f051220da93ea868 Author: Simeon Pinder spinder@fulliautomatix.conchfritter.com Date: Tue Jul 23 13:37:32 2013 -0400
Upgrading richfaces to latest patched version.
diff --git a/pom.xml b/pom.xml index 3662bc7..f909033 100644 --- a/pom.xml +++ b/pom.xml @@ -135,7 +135,7 @@ <postgresql.version>9.2-1002.jdbc4</postgresql.version> <h2.version>1.2.139</h2.version> <jtds.version>1.2.2</jtds.version> - <richfaces.version>3.3.3.Final</richfaces.version> + <richfaces.version>3.3.4.Final</richfaces.version> <jline.version>0.9.94</jline.version> <sigar.version>1.6.5.132-5</sigar.version> <sigar.zip.version>1.6.5</sigar.zip.version>
commit caeb7a5c832334b74f76a265c8028a5697152dda Author: Simeon Pinder spinder@fulliautomatix.conchfritter.com Date: Tue Jul 23 12:28:52 2013 -0400
[BZ 984649] update jgroups usage to latest patched version.
diff --git a/modules/enterprise/server/appserver/pom.xml b/modules/enterprise/server/appserver/pom.xml index 0a61138..f1a4c7b 100644 --- a/modules/enterprise/server/appserver/pom.xml +++ b/modules/enterprise/server/appserver/pom.xml @@ -19,6 +19,8 @@
<properties> <rhq.dev.data.dir>${rhq.rootDir}/rhq-data</rhq.dev.data.dir> + <jgroups.initial.version>3.2.7.Final</jgroups.initial.version> + <jgroups.patch.version>3.2.10.Final</jgroups.patch.version> </properties>
<dependencies> @@ -72,6 +74,13 @@ <groupId>org.codehaus.groovy</groupId> <artifactId>groovy-all</artifactId> </dependency> + + <!-- Pull down the patched version of JGroups. See CVE 2013-4112 and BZ 984365 --> + <dependency> + <groupId>org.jgroups</groupId> + <artifactId>jgroups</artifactId> + <version>${jgroups.patch.version}</version> + </dependency> </dependencies>
<build> @@ -157,6 +166,8 @@ <property name="rhq.server.http.port" value="${rhq.server.http.port}" /> <property name="rhq.server.https.port" value="${rhq.server.https.port}" /> <property name="rhq.sync.endpoint-address" value="${rhq.sync.endpoint-address}" /> + <property name="jgroups.initial.version" value="${jgroups.initial.version}" /> + <property name="jgroups.patch.version" value="${jgroups.patch.version}" /> </ant> </target> </configuration> diff --git a/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml b/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml index a81b6cd..145e3af 100644 --- a/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml +++ b/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml @@ -283,6 +283,17 @@ </resources> </module> ]]></echo> + + <echo>Updating JGroups module component for EAP to ${jgroups.patch.version}</echo> + <!-- Remove the unpatched version --> + <delete file="${jboss.home}/modules/system/layers/base/org/jgroups/main/jgroups-${jgroups.initial.version}.jar" /> + <delete file="${jboss.home}/modules/system/layers/base/org/jgroups/main/jgroups-${jgroups.initial.version}.jar.index" /> + <!-- Update the module metadata to the patched version --> + <replace file="${jboss.home}/modules/system/layers/base/org/jgroups/main/module.xml" + token="jgroups-${jgroups.initial.version}.jar" value="${jgroups.patch.version}"/> + <!-- Copy in patched version --> + <copy file="${settings.localRepository}/org/jgroups/jgroups/${jgroups.patch.version}/jgroups-${jgroups.patch.version}.jar" + toDir="${jboss.home}/modules/system/layers/base/org/jgroups/main" verbose="true"/>
<echo>Generate SSL key for RHQ server - 128-bit key that expires in 20 years</echo> <property name="jboss.conf.dir" location="${jboss.home}/standalone/configuration" />
commit cc64adde1d8835f8c000afe2de0746fda5bbd5c1 Author: Stefan Negrea snegrea@redhat.com Date: Tue Jul 23 08:43:19 2013 -0500
One more place where the previous rebase removed code for the storage node configuration.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index 31e3bf7..9416c67 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -643,6 +643,8 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN Configuration parameters = new Configuration(); parameters.setSimpleValue("jmxPort", storageNodeConfiguration.getJmxPort() + ""); parameters.setSimpleValue("heapSize", storageNodeConfiguration.getHeapSize() + ""); + parameters.setSimpleValue("heapNewSize", storageNodeConfiguration.getHeapNewSize() + ""); + parameters.setSimpleValue("threadStackSize", storageNodeConfiguration.getThreadStackSize() + "");
boolean updateConfigurationResult = runOperationAndWaitForResult(subject, storageNodeResource, UPDATE_CONFIGURATION_OPERATION, parameters);
commit 373a931987b402479df3d02269cc00f4ac88a358 Author: Stefan Negrea snegrea@redhat.com Date: Tue Jul 23 08:42:49 2013 -0500
Enable the new set of calculated metrics for disk space utilization in the UI and CLI.
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNodeLoadComposite.java b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNodeLoadComposite.java index 80bfdd6..2c0b8f8 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNodeLoadComposite.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNodeLoadComposite.java @@ -41,9 +41,13 @@ public class StorageNodeLoadComposite implements Serializable { private MeasurementAggregateWithUnits heapUsed; private MeasurementAggregateWithUnits heapPercentageUsed; private MeasurementAggregateWithUnits load; - private MeasurementAggregateWithUnits partitionDiskUsedPercentage; private MeasurementAggregateWithUnits dataDiskUsed; private MeasurementAggregate tokens; + + private MeasurementAggregateWithUnits dataDiskUsedPercentage; + private MeasurementAggregateWithUnits totalDiskUsedPercentage; + private MeasurementAggregate freeDiskToDataSizeRatio; + private MeasurementAggregateWithUnits actuallyOwns;
public StorageNodeLoadComposite() { @@ -113,35 +117,59 @@ public class StorageNodeLoadComposite implements Serializable { public void setHeapPercentageUsed(MeasurementAggregateWithUnits heapPercentageUsed) { this.heapPercentageUsed = heapPercentageUsed; } - + /** * @deprecated use {@link #getPartitionDiskUsedPercentage() getPartitionDiskUsedPercentage()} instead - * + * * @return partitionDiskUsedPercentage */ public MeasurementAggregateWithUnits getDiskSpacePercentageUsed() { - return getPartitionDiskUsedPercentage(); + return getDataDiskUsedPercentage(); } - + /** * @deprecated use {@link #setPartitionDiskUsedPercentage() setPartitionDiskUsedPercentage()} instead - * + * * @param partitionDiskUsedPercentage */ - public void setDiskSpacePercentageUsed(MeasurementAggregateWithUnits partitionDiskUsedPercentage) { - setPartitionDiskUsedPercentage(partitionDiskUsedPercentage); + public void setDiskSpacePercentageUsed(MeasurementAggregateWithUnits diskUsedPercentage) { + setDataDiskUsedPercentage(diskUsedPercentage); + } + + /** + * @return A computed metric for the percentage of disk space used by data file on the corresponding partitions. + * If multiple data locations are configured then the aggregate is calculated. + */ + public MeasurementAggregateWithUnits getDataDiskUsedPercentage() { + return dataDiskUsedPercentage; + } + + public void setDataDiskUsedPercentage(MeasurementAggregateWithUnits dataDiskUsedPercentage) { + this.dataDiskUsedPercentage = dataDiskUsedPercentage; + } + + /** + * @return A computed metric for the percentage of total (system + Storage Node data file) disk space used the partitions where data files are stored. + * If multiple data locations are configured then the aggregate is calculated. + */ + public MeasurementAggregateWithUnits getTotalDiskUsedPercentage() { + return totalDiskUsedPercentage; + } + + public void setTotalDiskUsedPercentage(MeasurementAggregateWithUnits totalDiskUsedPercentage) { + this.totalDiskUsedPercentage = totalDiskUsedPercentage; }
/** - * @return A computed metric for the percentage of disk space used on the partition that contains the SSTables. - * If multiple data locations are configured then the partition with the highest utilization will be reported. + * @return A computed metric for the percentage of total (system + Storage Node data file) disk space used the partitions where data files are stored. + * If multiple data locations are configured then the aggregate is calculated. */ - public MeasurementAggregateWithUnits getPartitionDiskUsedPercentage() { - return partitionDiskUsedPercentage; + public MeasurementAggregate getFreeDiskToDataSizeRatio() { + return freeDiskToDataSizeRatio; }
- public void setPartitionDiskUsedPercentage(MeasurementAggregateWithUnits partitionDiskUsedPercentage) { - this.partitionDiskUsedPercentage = partitionDiskUsedPercentage; + public void setFreeDiskToDataSizeRatio(MeasurementAggregate freeDiskToDataSizeRatio) { + this.freeDiskToDataSizeRatio = freeDiskToDataSizeRatio; }
/** @@ -202,7 +230,7 @@ public class StorageNodeLoadComposite implements Serializable { builder.append("heapUsed=").append(heapUsed).append(", "); builder.append("heapPercentageUsed=").append(heapPercentageUsed).append(", "); builder.append("load=").append(load).append(", "); - builder.append("partitionDiskUsedPercentage=").append(partitionDiskUsedPercentage).append(", "); + builder.append("dataUsedPercentage=").append(dataDiskUsedPercentage).append(", "); builder.append("dataDiskUsed=").append(dataDiskUsed).append(", "); builder.append("tokens=").append(tokens).append(", "); builder.append("actuallyOwns=").append(actuallyOwns); diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java index 7d413fd..07064b7 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java @@ -47,6 +47,7 @@ import org.rhq.core.domain.cloud.StorageNode.OperationMode; import org.rhq.core.domain.cloud.StorageNodeLoadComposite; import org.rhq.core.domain.cloud.StorageNodeLoadComposite.MeasurementAggregateWithUnits; import org.rhq.core.domain.criteria.StorageNodeCriteria; +import org.rhq.core.domain.measurement.MeasurementAggregate; import org.rhq.core.domain.util.PageControl; import org.rhq.core.domain.util.PageList; import org.rhq.core.domain.util.PageOrdering; @@ -200,7 +201,8 @@ public class StorageNodeDatasource extends RPCDataSource<StorageNode, StorageNod
public static class StorageNodeLoadCompositeDatasource extends RPCDataSource<StorageNodeLoadComposite, StorageNodeCriteria> { public static final String HEAP_PERCENTAGE_KEY = "heapPercentage"; - public static final String DISK_SPACE_PERCENTAGE_KEY = "diskSpacePercentage"; + public static final String DATA_DISK_SPACE_PERCENTAGE_KEY = "dataDiskSpacePercentage"; + public static final String TOTAL_DISK_SPACE_PERCENTAGE_KEY = "totalDiskSpacePercentage"; private int id;
public static StorageNodeLoadCompositeDatasource getInstance(int id) { @@ -296,10 +298,15 @@ public class StorageNodeDatasource extends RPCDataSource<StorageNode, StorageNod "This value is calculated by dividing Heap Used by Heap Maximum.", HEAP_PERCENTAGE_KEY), Arrays.<Object> asList(loadComposite.getLoad(), "Load", "Data stored on the node", "load"), Arrays.<Object> asList( - loadComposite.getPartitionDiskUsedPercentage(), - "Disk Space Percent Used", - "Percentage of total disk space used for the partition that contains the data files.If multiple data locations are specified then this will report the average utilization accross all the partitions.", - DISK_SPACE_PERCENTAGE_KEY), + loadComposite.getDataDiskUsedPercentage(), + "Data Disk Space Percent Used", + "Percentage of disk space used by data files on the partitions that contain the data files. If multiple data locations are specified then the aggregate accross all the partitions that contain data files is reported.", + DATA_DISK_SPACE_PERCENTAGE_KEY), + Arrays.<Object> asList( + loadComposite.getTotalDiskUsedPercentage(), + "Total Disk Space Percent Used", + "Percentage of total disk space used (system and Storage Node) on the partitions that contain the data files. If multiple data locations are specified then the aggregate accross all the partitions that contain data files is reported.", + TOTAL_DISK_SPACE_PERCENTAGE_KEY), Arrays.<Object> asList( loadComposite.getDataDiskUsed(), "Total Disk Space Used", @@ -325,6 +332,21 @@ public class StorageNodeDatasource extends RPCDataSource<StorageNode, StorageNod recordsList.add(tokens); }
+ + if (loadComposite.getFreeDiskToDataSizeRatio() != null){ + MeasurementAggregate aggregate = loadComposite.getFreeDiskToDataSizeRatio(); + + ListGridRecord record = new ListGridRecord(); + record.setAttribute("id", "freeDiskToDataSizeRatio"); + record.setAttribute("name", "Free Disk To Data Size Ratio"); + record.setAttribute("hover", "Ratio of (Free Disk)/(Data File Size). A value below 1 is not recommended since a compaction or repair process could double the amount of disk space used by data files. If multiple data locations are specified then the aggregate accross all the partitions that contain data files is reported."); + record.setAttribute("min", aggregate.getMin()); + record.setAttribute("avg", aggregate.getAvg()); + record.setAttribute("max", aggregate.getMax()); + + recordsList.add(record); + } + ListGridRecord[] records = recordsList.toArray(new ListGridRecord[recordsList.size()]); return records; } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeLoadComponent.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeLoadComponent.java index e8dde9d..e044e4e 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeLoadComponent.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeLoadComponent.java @@ -51,8 +51,9 @@ public class StorageNodeLoadComponent extends EnhancedVLayout { @Override protected String getCellCSSText(ListGridRecord record, int rowNum, int colNum) { if ("avg".equals(getFieldName(colNum)) - && (StorageNodeLoadCompositeDatasource.HEAP_PERCENTAGE_KEY.equals(record.getAttribute("id")) || StorageNodeLoadCompositeDatasource.DISK_SPACE_PERCENTAGE_KEY - .equals(record.getAttribute("id")))) { + && (StorageNodeLoadCompositeDatasource.HEAP_PERCENTAGE_KEY.equals(record.getAttribute("id")) || + StorageNodeLoadCompositeDatasource.DATA_DISK_SPACE_PERCENTAGE_KEY.equals(record.getAttribute("id")) || + StorageNodeLoadCompositeDatasource.TOTAL_DISK_SPACE_PERCENTAGE_KEY.equals(record.getAttribute("id")))) { if (record.getAttributeAsFloat("avgFloat") > .85) { return "font-weight:bold; color:#d64949;"; } else if (record.getAttributeAsFloat("avgFloat") > .7) { diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index fab803b..31e3bf7 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -253,13 +253,16 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN Map<String, Integer> scheduleIdsMap = new HashMap<String, Integer>();
// get the schedule ids for Storage Service resource - final String tokensMetric = "Tokens", ownershipMetric = "Ownership", diskUsedPercentageMetric = "Calculated.PartitionDiskUsedPercentage"; + final String tokensMetric = "Tokens", ownershipMetric = "Ownership"; + final String dataDiskUsedPercentageMetric = "Calculated.DataDiskUsedPercentage"; + final String totalDiskUsedPercentageMetric = "Calculated.TotalDiskUsedPercentage"; + final String freeDiskToDataRatioMetric = "Calculated.FreeDiskToDataSizeRatio"; final String loadMetric = "Load", keyCacheSize = "KeyCacheSize", rowCacheSize = "RowCacheSize", totalCommitLogSize = "TotalCommitlogSize"; TypedQuery<Object[]> query = entityManager.<Object[]> createNamedQuery( StorageNode.QUERY_FIND_SCHEDULE_IDS_BY_PARENT_RESOURCE_ID_AND_MEASUREMENT_DEFINITION_NAMES, Object[].class); query.setParameter("parrentId", resourceId).setParameter("metricNames", - Arrays.asList(tokensMetric, ownershipMetric, diskUsedPercentageMetric, loadMetric, keyCacheSize, - rowCacheSize, totalCommitLogSize)); + Arrays.asList(tokensMetric, ownershipMetric, loadMetric, keyCacheSize, rowCacheSize, totalCommitLogSize, + dataDiskUsedPercentageMetric, totalDiskUsedPercentageMetric, freeDiskToDataRatioMetric)); for (Object[] pair : query.getResultList()) { scheduleIdsMap.put((String) pair[0], (Integer) pair[1]); } @@ -292,10 +295,22 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN subject, scheduleId, MeasurementUnits.PERCENTAGE, beginTime, endTime); result.setActuallyOwns(ownershipAggregateWithUnits); } - if ((scheduleId = scheduleIdsMap.get(diskUsedPercentageMetric)) != null) { - StorageNodeLoadComposite.MeasurementAggregateWithUnits diskUsedPercentageAggregateWithUnits = getMeasurementAggregateWithUnits( + + //calculated disk space related metrics + if ((scheduleId = scheduleIdsMap.get(dataDiskUsedPercentageMetric)) != null) { + StorageNodeLoadComposite.MeasurementAggregateWithUnits dataDiskUsedPercentageAggregateWithUnits = getMeasurementAggregateWithUnits( + subject, scheduleId, MeasurementUnits.PERCENTAGE, beginTime, endTime); + result.setDataDiskUsedPercentage(dataDiskUsedPercentageAggregateWithUnits); + } + if ((scheduleId = scheduleIdsMap.get(totalDiskUsedPercentageMetric)) != null) { + StorageNodeLoadComposite.MeasurementAggregateWithUnits totalDiskUsedPercentageAggregateWithUnits = getMeasurementAggregateWithUnits( subject, scheduleId, MeasurementUnits.PERCENTAGE, beginTime, endTime); - result.setPartitionDiskUsedPercentage(diskUsedPercentageAggregateWithUnits); + result.setDataDiskUsedPercentage(totalDiskUsedPercentageAggregateWithUnits); + } + if ((scheduleId = scheduleIdsMap.get(freeDiskToDataRatioMetric)) != null) { + MeasurementAggregate freeDiskToDataRatioAggregate = measurementManager.getAggregate(subject, + scheduleId, beginTime, endTime); + result.setFreeDiskToDataSizeRatio(freeDiskToDataRatioAggregate); }
if ((scheduleId = scheduleIdsMap.get(loadMetric)) != null) { diff --git a/modules/plugins/cassandra/src/main/resources/META-INF/rhq-plugin.xml b/modules/plugins/cassandra/src/main/resources/META-INF/rhq-plugin.xml index 5bbebed..e95f995 100644 --- a/modules/plugins/cassandra/src/main/resources/META-INF/rhq-plugin.xml +++ b/modules/plugins/cassandra/src/main/resources/META-INF/rhq-plugin.xml @@ -188,7 +188,7 @@
<metric property="Calculated.DataDiskUsedPercentage" displayName="Data File Disk Used Percentage" dataType="measurement" units="percentage" displayType="summary" description="Percentage of disk space used by Cassandra data files. The aggregate accross all the partitions will be reported if multiple data locations are specified. This is a calculated metric derived from system and Cassandra runtime information."/> <metric property="Calculated.TotalDiskUsedPercentage" displayName="Total Disk Used Percentage" dataType="measurement" units="percentage" displayType="summary" description="Percentage of total disk space used. The metric acounts overall disk usage (including system files), not just disk space used by Cassandra. The aggregate accross all the partitions will be reported if multiple data locations are specified. This is a calculated metric derived from system and Cassandra runtime information."/> - <metric property="Calculated.FreeDiskToDataSizeRatio" displayName="Free Disk to Data Size Ratio" dataType="measurement" displayType="summary" description="Ratio of (Amount of Free Disk)/(Data File Size). A value below 1 is not recommended since a compaction or repair process could double the amount of disk space used by data files. The aggregate accross all the partitions will be reported if multiple data locations are specified. This is a calculated metric derived from system and Cassandra runtime information."/> + <metric property="Calculated.FreeDiskToDataSizeRatio" displayName="Free Disk to Data Size Ratio" dataType="measurement" displayType="summary" description="Ratio of (Free Disk)/(Data File Size). A value below 1 is not recommended since a compaction or repair process could double the amount of disk space used by data files. The aggregate accross all the partitions will be reported if multiple data locations are specified. This is a calculated metric derived from system and Cassandra runtime information."/>
<metric property="CurrentGenerationNumber" dataType="trait" displayType="summary" description="Current generation number"/> <metric property="ExceptionCount" measurementType="trendsup" dataType="measurement" displayType="summary" description="Exception Count"/>
commit 6997631e56204db41c9f4902eef1c6210706be3f Author: Stefan Negrea snegrea@redhat.com Date: Mon Jul 22 15:14:22 2013 -0500
Add back code used to update storage node configuration tha was lost due to rebase. This code updates two additional storage node properties added.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index b32ab5b..fab803b 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -611,6 +611,8 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN storageNodeResource.getId());
configuration.setHeapSize(storageNodeConfiguration.getSimpleValue("maxHeapSize")); + configuration.setHeapNewSize(storageNodeConfiguration.getSimpleValue("heapNewSize")); + configuration.setThreadStackSize(storageNodeConfiguration.getSimpleValue("threadStackSize")); configuration.setJmxPort(storageNode.getJmxPort()); }
diff --git a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml index 8156d02..1e39d6c 100644 --- a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml +++ b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml @@ -122,6 +122,9 @@ <operation name="updateConfiguration" description="Updates the node configuration. Will require a separate server restart for the settings to take effect."> <parameters> <c:simple-property name="heapSize" type="string" description="The heap size to be used for both -Xms and -Xmx JVM options."/> + <c:simple-property name="heapNewSize" type="string" description="The heap new size to be used be used with -Xmn JVM option."/> + <c:simple-property name="threadStackSize" type="integer" description="The thread stack size. This memory is allocated to each thread off heap. The + value should be an integer that will be interpreted in kilobytes."/> <c:simple-property name="jmxPort" type="integer" description="JMX port JVM option."/> </parameters> <results>
commit e85abcf495d6939d876b09bd1de1e71a29af17ec Author: Heiko W. Rupp hwr@redhat.com Date: Tue Jul 23 10:42:17 2013 +0200
BZ 796480 (and others) add support for subCategory in embedded types (aka runs-inside)
diff --git a/modules/core/client-api/src/main/java/org/rhq/core/clientapi/agent/metadata/PluginMetadataParser.java b/modules/core/client-api/src/main/java/org/rhq/core/clientapi/agent/metadata/PluginMetadataParser.java index 88dd865..d5ff2ef 100644 --- a/modules/core/client-api/src/main/java/org/rhq/core/clientapi/agent/metadata/PluginMetadataParser.java +++ b/modules/core/client-api/src/main/java/org/rhq/core/clientapi/agent/metadata/PluginMetadataParser.java @@ -417,16 +417,56 @@ public class PluginMetadataParser { return serviceResourceType; }
- private static void setSubCategory(ResourceDescriptor resourceDescriptor, ResourceType resourceType) + /** + * Try to find the subCategory of the p/s/s descriptor in one of the parents + * <subcategories><subcategory>Foo</subcategory></subcategories> elements and + * set it on the resourceType if found. + * + * It is not enough to look at the direct parents, but we need to also look at the + * <runs-inside> types if our type is "embedded" in a different type. + * @param resourceDescriptor Descriptor to get the subCategory attribute from + * @param resourceType The type to attach the ResourceSubCategory to. + * @throws InvalidPluginDescriptorException If the descriptor.subCategory can not be found in any parent. + */ + private void setSubCategory(ResourceDescriptor resourceDescriptor, ResourceType resourceType) throws InvalidPluginDescriptorException { String subCatName = resourceDescriptor.getSubCategory(); if (subCatName != null) { ResourceSubCategory subCat = SubCategoriesMetadataParser.findSubCategoryOnResourceTypeAncestor( resourceType, subCatName); - if (subCat == null) + + // We need to look at resourceDescriptor -> runsInside to see if one of those defines the + // subcategories that we are looking for. + if (subCat == null && resourceDescriptor.getRunsInside() != null) { + RunsInsideType rit = resourceDescriptor.getRunsInside(); + List<ParentResourceType> parentResourceTypeList = rit.getParentResourceType(); + for (ParentResourceType parentResourceType : parentResourceTypeList) { + ResourceType parentType = getResourceTypeFromPlugin(parentResourceType.getName(),parentResourceType.getPlugin()); + // check on the parent + if (parentType.getChildSubCategories()!=null ) { + for (ResourceSubCategory parentSubcat : parentType.getChildSubCategories()) { + if (parentSubcat.getName().equals(subCatName)) { + subCat = parentSubcat; + break; + } + } + } + + // Not found on runs-inside type look at the ancestor of those runs-inside types? + if (subCat==null) { + subCat = SubCategoriesMetadataParser.findSubCategoryOnResourceTypeAncestor(parentType,subCatName); + } + if (subCat!=null) { + break; + } + } + } + + if (subCat == null) { throw new InvalidPluginDescriptorException("Resource type [" + resourceType.getName() + "] specified a subcategory (" + subCatName + ") that is not defined as a child subcategory of one of its ancestor resource types."); + } resourceType.setSubCategory(subCat); } } diff --git a/modules/core/client-api/src/test/java/org/rhq/core/clientapi/agent/metadata/test/ExtensionModelTest.java b/modules/core/client-api/src/test/java/org/rhq/core/clientapi/agent/metadata/test/ExtensionModelTest.java index 20e1aa9..2a9595f 100644 --- a/modules/core/client-api/src/test/java/org/rhq/core/clientapi/agent/metadata/test/ExtensionModelTest.java +++ b/modules/core/client-api/src/test/java/org/rhq/core/clientapi/agent/metadata/test/ExtensionModelTest.java @@ -226,6 +226,7 @@ public class ExtensionModelTest { assert jbossServer.getCategory().equals(ResourceCategory.SERVER); assert jbossServer.getDescription().equals("JBoss Application Server Description"); assert jbossServer.getParentResourceTypes().size() == 0; + assert jbossServer.getChildSubCategories().size() == 2;
assert jbossServer.getChildResourceTypes().size() == 1; ResourceType embeddedTomcatServer = jbossServer.getChildResourceTypes().iterator().next(); @@ -263,6 +264,7 @@ public class ExtensionModelTest { assert hibernateService.getDescription().equals("Hibernate Service Description"); assert hibernateService.getChildResourceTypes().size() == 0; assert hibernateService.getParentResourceTypes().size() == 3; + assert hibernateService.getSubCategory().getName().equals("Framework");
ResourceType tomcatServer = metadataManager.getType("TomcatServer", "Tomcat"); ResourceType jbossServer = metadataManager.getType("JBossASServer", "JBossAS"); diff --git a/modules/core/client-api/src/test/java/org/rhq/core/clientapi/agent/metadata/test/NestedSubCategoriesMetadataParserTest.java b/modules/core/client-api/src/test/java/org/rhq/core/clientapi/agent/metadata/test/NestedSubCategoriesMetadataParserTest.java new file mode 100644 index 0000000..1cba523 --- /dev/null +++ b/modules/core/client-api/src/test/java/org/rhq/core/clientapi/agent/metadata/test/NestedSubCategoriesMetadataParserTest.java @@ -0,0 +1,159 @@ + /* + * RHQ Management Platform + * Copyright (C) 2005-2008 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation, and/or the GNU Lesser + * General Public License, version 2.1, also as published by the Free + * Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License and the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * and the GNU Lesser General Public License along with this program; + * if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ +package org.rhq.core.clientapi.agent.metadata.test; + + import java.net.URL; + import java.util.List; + import java.util.Set; + + import javax.xml.bind.JAXBContext; + import javax.xml.bind.Unmarshaller; + import javax.xml.bind.util.ValidationEventCollector; + + import org.apache.commons.logging.Log; + import org.apache.commons.logging.LogFactory; + import org.testng.annotations.BeforeSuite; + import org.testng.annotations.Test; + + import org.rhq.core.clientapi.agent.metadata.PluginMetadataManager; + import org.rhq.core.clientapi.agent.metadata.SubCategoriesMetadataParser; + import org.rhq.core.clientapi.descriptor.AgentPluginDescriptorUtil; + import org.rhq.core.clientapi.descriptor.DescriptorPackages; + import org.rhq.core.clientapi.descriptor.plugin.PluginDescriptor; + import org.rhq.core.clientapi.descriptor.plugin.ResourceDescriptor; + import org.rhq.core.clientapi.descriptor.plugin.ServerDescriptor; + import org.rhq.core.clientapi.descriptor.plugin.ServiceDescriptor; + import org.rhq.core.clientapi.descriptor.plugin.SubCategoryDescriptor; + import org.rhq.core.domain.resource.ResourceCategory; + import org.rhq.core.domain.resource.ResourceSubCategory; + import org.rhq.core.domain.resource.ResourceType; + + /** + * @author Charles Crouch + * @author Heiko W. Rupp + */ +public class NestedSubCategoriesMetadataParserTest { + private static final String DESCRIPTOR_FILENAME = "test-subcategories-nested.xml"; + private final Log LOG = LogFactory.getLog(NestedSubCategoriesMetadataParserTest.class); + + private PluginDescriptor pluginDescriptor; + + @BeforeSuite + public void loadPluginDescriptor() throws Exception { + try { + URL descriptorUrl = this.getClass().getClassLoader().getResource(DESCRIPTOR_FILENAME); + LOG.info("Loading plugin descriptor at: " + descriptorUrl); + + JAXBContext jaxbContext = JAXBContext.newInstance(DescriptorPackages.PC_PLUGIN); + + Unmarshaller unmarshaller = jaxbContext.createUnmarshaller(); + ValidationEventCollector vec = new ValidationEventCollector(); + unmarshaller.setEventHandler(vec); + pluginDescriptor = (PluginDescriptor) unmarshaller.unmarshal(descriptorUrl.openStream()); + } catch (Throwable t) { + // Catch RuntimeExceptions and Errors and dump their stack trace, because Surefire will completely swallow them + // and throw a cryptic NPE (see http://jira.codehaus.org/browse/SUREFIRE-157)! + t.printStackTrace(); + throw new RuntimeException(t); + } + } + + @Test + public void parseSingleSubCategory() { + List<ServerDescriptor> servers = pluginDescriptor.getServers(); + ServerDescriptor server0 = servers.get(0); + ResourceDescriptor.Subcategories subCategoriesDescriptor = server0.getSubcategories(); + assert subCategoriesDescriptor != null : "No subcategories element: " + server0.getName(); + + List<SubCategoryDescriptor> subCategoryDescriptors = subCategoriesDescriptor.getSubcategory(); + + assert subCategoryDescriptors != null : "No subcategory elements: " + server0.getName(); + assert !subCategoryDescriptors.isEmpty() : "No subcategory elements: " + server0.getName(); + + ResourceSubCategory subCat; + + ResourceType resType = new ResourceType("testResType", "myplugin", ResourceCategory.SERVER, null); + subCat = SubCategoriesMetadataParser.getSubCategory(subCategoryDescriptors.get(0), resType); + + assert subCat != null : "Null subcategory received from parser"; + assert subCat.getName().equals("applications") : "Name not read correctly"; + assert subCat.getDisplayName().equals("Apps") : "Display name not read correctly"; + assert subCat.getDescription().equals("The apps.") : "Description not read correctly"; + // getSubCategory is no longer responsible for setting resourcetype information, that is done in PluginMetadataParser + //assert subCat.getResourceType().equals(resType) : "ResourceType not set correctly"; + + } + + @Test + public void parseNestedSubCategories() { + List<ServerDescriptor> servers = pluginDescriptor.getServers(); + ServerDescriptor server2 = servers.get(1); + assert server2.getName().equals("testServer2"); + ResourceDescriptor.Subcategories subCategoriesDescriptor = server2.getSubcategories(); + assert subCategoriesDescriptor == null : "Unexpected subcategories element: " + server2.getName(); + assert server2.getSubCategory().equals("applications"); + + List<ServiceDescriptor> services = pluginDescriptor.getServices(); + ServiceDescriptor service1 = services.get(0); + assert service1.getName().equals("testService"); + assert service1.getSubCategory().equals("applications"); + } + + @Test + public void testParseViaMetaDataManager() throws Exception { + + PluginDescriptor pluginDescriptor; + + URL descriptorUrl = this.getClass().getClassLoader().getResource(DESCRIPTOR_FILENAME); + System.out.println("Loading plugin descriptor at: " + descriptorUrl); + + pluginDescriptor = (PluginDescriptor) AgentPluginDescriptorUtil.parsePluginDescriptor(descriptorUrl + .openStream()); + + PluginMetadataManager metadataManager = new PluginMetadataManager(); + Set<ResourceType> typeSet = metadataManager.loadPlugin(pluginDescriptor); + assert typeSet != null : "Got no types!!"; + assert typeSet.size()==5 : "Expected 5 types, but got " + typeSet.size(); + + ResourceType testService = findType(typeSet,"testService"); + assert testService.getSubCategory().getName().equals("applications"); + + ResourceType testService2 = findType(typeSet,"testService2"); + assert testService2.getSubCategory().getName().equals("applications"); + + ResourceType testService3 = findType(typeSet,"testService3"); + assert testService3.getSubCategory().getName().equals("fooBar"); + + + } + + private ResourceType findType(Set<ResourceType> types, String name) { + for (ResourceType type : types ) { + if (type.getName().equals(name)) { + return type; + } + } + assert false : "Type with name " + name + " not found"; + return null; + } + } \ No newline at end of file diff --git a/modules/core/client-api/src/test/resources/test-hibernate.xml b/modules/core/client-api/src/test/resources/test-hibernate.xml index 37a2e03..9051ca2 100644 --- a/modules/core/client-api/src/test/resources/test-hibernate.xml +++ b/modules/core/client-api/src/test/resources/test-hibernate.xml @@ -3,7 +3,7 @@ package="org.rhq.plugins.test2" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="urn:xmlns:rhq-plugin"> - + <depends plugin="JMX" /> <depends plugin="Tomcat" /> <depends plugin="JBossAS" /> @@ -11,11 +11,13 @@ <service name="HibernateService" discovery="HibernateDiscoveryComponent" class="HibernateServiceComponent" - description="Hibernate Service Description"> + description="Hibernate Service Description" + subCategory="Framework" + > <runs-inside> <parent-resource-type name="TomcatServer" plugin="Tomcat"/> <parent-resource-type name="JBossASServer" plugin="JBossAS"/> <parent-resource-type name="EmbeddedTomcatServer" plugin="JBossAS"/> </runs-inside> - </service> + </service> </plugin> \ No newline at end of file diff --git a/modules/core/client-api/src/test/resources/test-jbossas.xml b/modules/core/client-api/src/test/resources/test-jbossas.xml index 4d2602b..8269343 100644 --- a/modules/core/client-api/src/test/resources/test-jbossas.xml +++ b/modules/core/client-api/src/test/resources/test-jbossas.xml @@ -12,7 +12,11 @@ discovery="JBossASDiscoveryComponent" class="JBossASServerComponent" description="JBoss Application Server Description"> - + <subcategories> + <subcategory name="Applications" /> + <subcategory name="Framework" /> + </subcategories> + <operation name="stop" displayName="Stop JBossAS Server" description="Kills the server" timeout="30"> <parameters> <c:simple-property name="force" @@ -29,7 +33,7 @@ required="true" description="If true, the server is definitely down; otherwise, the shutdown was issued but it is unclear if it really died"/> </results> - </operation> + </operation> <server name="EmbeddedTomcatServer" description="Embedded Tomcat Web Server Description" sourcePlugin="Tomcat" @@ -37,6 +41,6 @@ discovery="JBossASTomcatDiscoveryComponent" class="JBossASTomcatServerComponent"> </server> - + </server> </plugin> \ No newline at end of file diff --git a/modules/core/client-api/src/test/resources/test-subcategories-nested.xml b/modules/core/client-api/src/test/resources/test-subcategories-nested.xml new file mode 100644 index 0000000..eb43af7 --- /dev/null +++ b/modules/core/client-api/src/test/resources/test-subcategories-nested.xml @@ -0,0 +1,60 @@ +<!-- + ~ RHQ Management Platform + ~ Copyright (C) 2005-2013 Red Hat, Inc. + ~ All rights reserved. + ~ + ~ This program is free software; you can redistribute it and/or modify + ~ it under the terms of the GNU General Public License as published by + ~ the Free Software Foundation version 2 of the License. + ~ + ~ This program is distributed in the hope that it will be useful, + ~ but WITHOUT ANY WARRANTY; without even the implied warranty of + ~ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + ~ GNU General Public License for more details. + ~ + ~ You should have received a copy of the GNU General Public License + ~ along with this program; if not, write to the Free Software Foundation, Inc., + ~ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA + --> + +<plugin name="TestPlugin" displayName="Mock JBoss AS" package="org.rhq.plugins.mock.jboss" + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xmlns="urn:xmlns:rhq-plugin"> + + <server name="testServer1"> + <subcategories> + <subcategory name="applications" displayName="Apps" description="The apps."/> + <subcategory name="fooBar"/> + </subcategories> + </server> + + <!-- subCategory="applications" means that resources of this type go in the 'applications folder' of testServer1--> + <server name="testServer2" subCategory="applications"> + <runs-inside> + <parent-resource-type name="testServer1" plugin="TestPlugin"/> + </runs-inside> + <metric property="testMetric"/> + </server> + + <service name="testService" subCategory="applications"> + <runs-inside> + <parent-resource-type name="testServer1" plugin="TestPlugin"/> + </runs-inside> + + </service> + + <service name="testService2" subCategory="applications"> + <runs-inside> + <parent-resource-type name="testServer1" plugin="TestPlugin"/> + </runs-inside> + + </service> + + <service name="testService3" subCategory="fooBar"> + <runs-inside> + <parent-resource-type name="testServer1" plugin="TestPlugin"/> + </runs-inside> + + </service> + +</plugin>
commit 60329fcbda5a1961e0f9285c70eb56ea12fe2eb0 Author: Heiko W. Rupp hwr@redhat.com Date: Mon Jul 22 15:00:35 2013 +0200
Add Michael Burman as contributor
diff --git a/pom.xml b/pom.xml index c5cf5a7..3662bc7 100644 --- a/pom.xml +++ b/pom.xml @@ -2353,6 +2353,10 @@ <timezone>+1</timezone> </contributor> <contributor> + <name>Michael Burman</name> + <timezone>+2</timezone> + </contributor> + <contributor> <name>Torben JÀger</name> <timezone>+1</timezone> </contributor>
commit d7e9f5b9871824d1f02ae762b44cff85ff6c3d44 Author: Thomas Segismont tsegismo@redhat.com Date: Tue Jul 23 10:41:12 2013 +0200
Bug 969621 - EAP 6 managed plug-in is unable to discover EAP servers when more then one is running on a single host
Update expected resource keys in itests
diff --git a/modules/plugins/jboss-as-7/src/test/java/org/rhq/modules/plugins/jbossas7/itest/domain/DomainServerComponentTest.java b/modules/plugins/jboss-as-7/src/test/java/org/rhq/modules/plugins/jbossas7/itest/domain/DomainServerComponentTest.java index 91ece92..269474b 100644 --- a/modules/plugins/jboss-as-7/src/test/java/org/rhq/modules/plugins/jbossas7/itest/domain/DomainServerComponentTest.java +++ b/modules/plugins/jboss-as-7/src/test/java/org/rhq/modules/plugins/jbossas7/itest/domain/DomainServerComponentTest.java @@ -43,8 +43,11 @@ public class DomainServerComponentTest extends AbstractServerComponentTest {
public static final ResourceType RESOURCE_TYPE = new ResourceType("JBossAS7 Host Controller", PLUGIN_NAME, ResourceCategory.SERVER, null); - // The key is the server's base dir. - public static final String RESOURCE_KEY = new File(JBOSS_HOME, "domain").getPath(); + // The key is the server host config file + // hostConfig: /tmp/jboss-as-6.0.0/domain/configuration/host.xml + public static final String RESOURCE_KEY = "hostConfig: " + + new File(JBOSS_HOME, "domain" + File.separator + "configuration" + File.separator + "host.xml") + .getAbsolutePath();
@Override protected ResourceType getServerResourceType() { diff --git a/modules/plugins/jboss-as-7/src/test/java/org/rhq/modules/plugins/jbossas7/itest/domain/SecurityModuleOptionsTest.java b/modules/plugins/jboss-as-7/src/test/java/org/rhq/modules/plugins/jbossas7/itest/domain/SecurityModuleOptionsTest.java index d128144..182ef36 100644 --- a/modules/plugins/jboss-as-7/src/test/java/org/rhq/modules/plugins/jbossas7/itest/domain/SecurityModuleOptionsTest.java +++ b/modules/plugins/jboss-as-7/src/test/java/org/rhq/modules/plugins/jbossas7/itest/domain/SecurityModuleOptionsTest.java @@ -1,6 +1,6 @@ /* * RHQ Management Platform - * Copyright (C) 2005-2011 Red Hat, Inc. + * Copyright (C) 2005-2013 Red Hat, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify @@ -13,8 +13,8 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * along with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ package org.rhq.modules.plugins.jbossas7.itest.domain;
@@ -488,10 +488,8 @@ public class SecurityModuleOptionsTest extends AbstractJBossAS7PluginTest { InventoryManager im = pluginContainer.getInventoryManager(); Resource platform = im.getPlatform(); //host controller - ResourceType hostControllerType = new ResourceType("JBossAS7 Host Controller", PLUGIN_NAME, - ResourceCategory.SERVER, null); - Resource hostController = getResourceByTypeAndKey(platform, hostControllerType, - "/tmp/jboss-as-6.0.0/domain"); + Resource hostController = getResourceByTypeAndKey(platform, DomainServerComponentTest.RESOURCE_TYPE, + DomainServerComponentTest.RESOURCE_KEY); //profile=full-ha ResourceType profileType = new ResourceType("Profile", PLUGIN_NAME, ResourceCategory.SERVICE, null); String key = PROFILE; diff --git a/modules/plugins/jboss-as-7/src/test/java/org/rhq/modules/plugins/jbossas7/itest/standalone/StandaloneServerComponentTest.java b/modules/plugins/jboss-as-7/src/test/java/org/rhq/modules/plugins/jbossas7/itest/standalone/StandaloneServerComponentTest.java index 8446345..32f92c7 100644 --- a/modules/plugins/jboss-as-7/src/test/java/org/rhq/modules/plugins/jbossas7/itest/standalone/StandaloneServerComponentTest.java +++ b/modules/plugins/jboss-as-7/src/test/java/org/rhq/modules/plugins/jbossas7/itest/standalone/StandaloneServerComponentTest.java @@ -51,8 +51,11 @@ public class StandaloneServerComponentTest extends AbstractServerComponentTest {
public static final ResourceType RESOURCE_TYPE = new ResourceType("JBossAS7 Standalone Server", PLUGIN_NAME, ResourceCategory.SERVER, null); - // The key is the server's base dir. - public static final String RESOURCE_KEY = new File(JBOSS_HOME, "standalone").getPath(); + // The key is the server host config file + // hostConfig: /tmp/jboss-as-6.0.0/standalone/configuration/standalone-full-ha.xml + public static final String RESOURCE_KEY = "hostConfig: " + + new File(JBOSS_HOME, "standalone" + File.separator + "configuration" + File.separator + + "standalone-full-ha.xml").getAbsolutePath();
private static final String RELOAD_OPERATION_NAME = "reload"; private static final String RESTART_OPERATION_NAME = "restart";
commit 567aee7f81c6aa0f7680d4f394cccb1974705320 Author: Larry O'Leary loleary@redhat.com Date: Mon Jul 22 16:10:09 2013 -0500
BZ 981015: Fix test failures introduced by commit 01cd91b - findLdapUserDetails was appending baseDN twice during fallback code - FakeLdapContext contained some lazy escaping on the mock group entries
diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/ldap/FakeLdapContext.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/ldap/FakeLdapContext.java index dad31ce..2ae6265 100644 --- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/ldap/FakeLdapContext.java +++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/ldap/FakeLdapContext.java @@ -99,7 +99,6 @@ public class FakeLdapContext implements LdapContext { try { return new FakeNamingEnumeration<SearchResult>(ldapTestData.getSearchResults(attributes)); } catch (Exception e) { - // TODO Auto-generated catch block e.printStackTrace(); return null; } @@ -516,12 +515,12 @@ public class FakeLdapContext implements LdapContext { attr = new BasicAttribute("member"); attr.add("cn=Robert Smith,ou=users,dc=test,dc=rhq,dc=redhat,dc=com"); attr.add("cn=Cannon\, Brett,ou=users,dc=test,dc=rhq,dc=redhat,dc=com"); - attr.add("cn=Charles H\\Samlin,ou=users,dc=test,dc=rhq,dc=redhat,dc=com"); + attr.add("cn=Charles H\Samlin,ou=users,dc=test,dc=rhq,dc=redhat,dc=com"); attr.add("cn=Craig \#1 Sellers,ou=users,dc=test,dc=rhq,dc=redhat,dc=com"); attr.add("cn=Beverly \+1 Balanger,ou=users,dc=test,dc=rhq,dc=redhat,dc=com"); attr.add("cn=Bethany \<Stuart\> Wallace,ou=users,dc=test,dc=rhq,dc=redhat,dc=com"); attr.add("cn=Zachory S\; Balanger,ou=users,dc=test,dc=rhq,dc=redhat,dc=com"); - attr.add("cn=Allen \"The Hammer\" Callen,ou=users,dc=test,dc=rhq,dc=redhat,dc=com"); + attr.add("cn=Allen "The Hammer" Callen,ou=users,dc=test,dc=rhq,dc=redhat,dc=com"); attr.add("cn=Sam Not \= Smitherson,ou=users,dc=test,dc=rhq,dc=redhat,dc=com"); attr.add("cn=\ Billy The Kiddough\ ,ou=users,dc=test,dc=rhq,dc=redhat,dc=com"); attr.add("cn=System/Integration API,ou=users,dc=test,dc=rhq,dc=redhat,dc=com"); @@ -557,12 +556,12 @@ public class FakeLdapContext implements LdapContext { attr = new BasicAttribute("member"); attr.add("cn=John Smith,ou=users,dc=test,dc=rhq,dc=redhat,dc=com"); attr.add("cn=Dr. Greg Hause\, MD,ou=users,dc=test,dc=rhq,dc=redhat,dc=com"); - attr.add("cn=Cindy\\Cynthia Groober,ou=users,dc=test,dc=rhq,dc=redhat,dc=com"); + attr.add("cn=Cindy\Cynthia Groober,ou=users,dc=test,dc=rhq,dc=redhat,dc=com"); attr.add("cn=Biff \# Rogers,ou=users,dc=test,dc=rhq,dc=redhat,dc=com"); attr.add("cn=Steven \+2 Reed,ou=users,dc=test,dc=rhq,dc=redhat,dc=com"); attr.add("cn=Lisa \<The Great\> Toller,ou=users,dc=test,dc=rhq,dc=redhat,dc=com"); attr.add("cn=Homer J Simpsonite\; III,ou=users,dc=test,dc=rhq,dc=redhat,dc=com"); - attr.add("cn=Jessica \"Crouching Tiger\" Mathers,ou=users,dc=test,dc=rhq,dc=redhat,dc=com"); + attr.add("cn=Jessica "Crouching Tiger" Mathers,ou=users,dc=test,dc=rhq,dc=redhat,dc=com"); attr.add("cn=Hope \= Rein,ou=users,dc=test,dc=rhq,dc=redhat,dc=com"); attr.add("cn=\ Sue Ferguson\ ,ou=users,dc=test,dc=rhq,dc=redhat,dc=com"); attr.add("cn=Phil/Susan Carlson,ou=users,dc=test,dc=rhq,dc=redhat,dc=com"); @@ -598,12 +597,12 @@ public class FakeLdapContext implements LdapContext { attr = new BasicAttribute("member"); attr.add("cn=Sheri Smith,ou=users,dc=test,dc=rhq,dc=redhat,dc=com"); attr.add("cn=Walsh\, Brad,ou=users,dc=test,dc=rhq,dc=redhat,dc=com"); - attr.add("cn=Jim\\James Kirk,ou=users,dc=test,dc=rhq,dc=redhat,dc=com"); + attr.add("cn=Jim\James Kirk,ou=users,dc=test,dc=rhq,dc=redhat,dc=com"); attr.add("cn=Sandra \# Phillips,ou=users,dc=test,dc=rhq,dc=redhat,dc=com"); attr.add("cn=William Tell Overture \+1,ou=users,dc=test,dc=rhq,dc=redhat,dc=com"); attr.add("cn=Craig \<Bison\> Allen,ou=users,dc=test,dc=rhq,dc=redhat,dc=com"); attr.add("cn=Walter T Fredrick\; The Second,ou=users,dc=test,dc=rhq,dc=redhat,dc=com"); - attr.add("cn=Stanley \"Short\" Mein,ou=users,dc=test,dc=rhq,dc=redhat,dc=com"); + attr.add("cn=Stanley "Short" Mein,ou=users,dc=test,dc=rhq,dc=redhat,dc=com"); attr.add("cn=Noah \= Sadler,ou=users,dc=test,dc=rhq,dc=redhat,dc=com"); attr.add("cn=\ Stuart Smiley\ ,ou=users,dc=test,dc=rhq,dc=redhat,dc=com"); attr.add("cn=System/Integration API 2,ou=users,dc=test,dc=rhq,dc=redhat,dc=com"); @@ -984,7 +983,7 @@ public class FakeLdapContext implements LdapContext { attr.add("User with backslash (\) in 'cn' in the RHQ Admin Group"); attrs.put(attr);
- sr = new SearchResult("cn=Charles H\\Samlin,ou=users", null, null, attrs, true); + sr = new SearchResult("cn=Charles H\Samlin,ou=users", null, null, attrs, true); this.add(sr);
// dn: cn=Cindy\Cynthia Groober,ou=users,dc=test,dc=rhq,dc=redhat,dc=com @@ -1033,7 +1032,7 @@ public class FakeLdapContext implements LdapContext { attr.add("User with backslash (\) in 'cn' in the JBoss Admin Group"); attrs.put(attr);
- sr = new SearchResult("cn=Cindy\\Cynthia Groober,ou=users", null, null, attrs, true); + sr = new SearchResult("cn=Cindy\Cynthia Groober,ou=users", null, null, attrs, true); this.add(sr);
// dn: cn=Jim\James Kirk,ou=users,dc=test,dc=rhq,dc=redhat,dc=com @@ -1082,7 +1081,7 @@ public class FakeLdapContext implements LdapContext { attr.add("User with backslash (\) in 'cn' in the JBoss Monitor Group"); attrs.put(attr);
- sr = new SearchResult("cn=Jim\\James Kirk,ou=users", null, null, attrs, true); + sr = new SearchResult("cn=Jim\James Kirk,ou=users", null, null, attrs, true); this.add(sr);
// dn: cn=Craig #1 Sellers,ou=users,dc=test,dc=rhq,dc=redhat,dc=com @@ -1675,7 +1674,7 @@ public class FakeLdapContext implements LdapContext { null, attrs, true); this.add(sr);
- // dn: cn=Allen "The Hammer" Callen,ou=users,dc=test,dc=rhq,dc=redhat,dc=com + // dn: cn=Allen "The Hammer" Callen,ou=users,dc=test,dc=rhq,dc=redhat,dc=com attrs = new BasicAttributes();
attr = new BasicAttribute("baseName"); @@ -1721,11 +1720,11 @@ public class FakeLdapContext implements LdapContext { attr.add("User with quote (") in 'cn' in the RHQ Admin Group"); attrs.put(attr);
- sr = new SearchResult("cn=Allen \"The Hammer\" Callen,ou=users", "javax.naming.directory.DirContext", + sr = new SearchResult("cn=Allen "The Hammer" Callen,ou=users", "javax.naming.directory.DirContext", null, attrs, true); this.add(sr);
- // dn: cn=Jessica "Crouching Tiger" Mathers,ou=users,dc=test,dc=rhq,dc=redhat,dc=com + // dn: cn=Jessica "Crouching Tiger" Mathers,ou=users,dc=test,dc=rhq,dc=redhat,dc=com attrs = new BasicAttributes();
attr = new BasicAttribute("baseName"); @@ -1771,11 +1770,11 @@ public class FakeLdapContext implements LdapContext { attr.add("User with quote (") in 'cn' in the JBoss Admin Group"); attrs.put(attr);
- sr = new SearchResult("cn=Jessica \"Crouching Tiger\" Mathers,ou=users", + sr = new SearchResult("cn=Jessica "Crouching Tiger" Mathers,ou=users", "javax.naming.directory.DirContext", null, attrs, true); this.add(sr);
- // dn: cn=Stanley "Short" Mein,ou=users,dc=test,dc=rhq,dc=redhat,dc=com + // dn: cn=Stanley "Short" Mein,ou=users,dc=test,dc=rhq,dc=redhat,dc=com attrs = new BasicAttributes();
attr = new BasicAttribute("baseName"); @@ -1821,7 +1820,7 @@ public class FakeLdapContext implements LdapContext { attr.add("User with quote (") in 'cn' in the JBoss Monitor Group"); attrs.put(attr);
- sr = new SearchResult("cn=Stanley \"Short\" Mein,ou=users", null, null, attrs, true); + sr = new SearchResult("cn=Stanley "Short" Mein,ou=users", null, null, attrs, true); this.add(sr);
// dn: cn=Sam Not = Smitherson,ou=users,dc=test,dc=rhq,dc=redhat,dc=com @@ -2160,7 +2159,7 @@ public class FakeLdapContext implements LdapContext { attr.add("User with slash (/) in 'cn' in the RHQ Admin Group"); attrs.put(attr);
- sr = new SearchResult("cn=System/Integration API,ou=users", null, null, attrs, true); + sr = new SearchResult("cn=System\/Integration API,ou=users", null, null, attrs, true); this.add(sr);
// dn: cn=Phil/Susan Carlson,ou=users,dc=test,dc=rhq,dc=redhat,dc=com @@ -2209,7 +2208,7 @@ public class FakeLdapContext implements LdapContext { attr.add("User with slash (/) in 'cn' in the JBoss Admin Group"); attrs.put(attr);
- sr = new SearchResult("cn=Phil/Susan Carlson,ou=users", null, null, attrs, true); + sr = new SearchResult("cn=Phil\/Susan Carlson,ou=users", null, null, attrs, true); this.add(sr);
// dn: cn=System/Integration API 2,ou=users,dc=test,dc=rhq,dc=redhat,dc=com @@ -2254,7 +2253,7 @@ public class FakeLdapContext implements LdapContext { attr.add("User with slash (/) in 'cn' in the JBoss Monitor Group"); attrs.put(attr);
- sr = new SearchResult("cn=System/Integration API 2,ou=users", null, null, attrs, true); + sr = new SearchResult("cn=System\/Integration API 2,ou=users", null, null, attrs, true); this.add(sr);
// dn: cn=Lee -Fast- Croutche,ou=users,dc=test,dc=rhq,dc=redhat,dc=com diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java index a28c709..7473321 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java @@ -350,7 +350,6 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { if (si.isRelative()) { userDN += "," + baseDNs[x]; } - userDN = userDN + "," + baseDNs[x]; } userDetails.put("dn", userDN);
commit 8c693ee685d538a28a3c42ce813b10b49997f871 Author: Jay Shaughnessy jshaughn@redhat.com Date: Mon Jul 22 16:36:23 2013 -0400
get rid of the upgrade wording, it's confusing when performing an initial install.
diff --git a/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/AbstractInstall.java b/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/AbstractInstall.java index 61c8a9c..fb9bceb 100644 --- a/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/AbstractInstall.java +++ b/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/AbstractInstall.java @@ -363,14 +363,14 @@ public abstract class AbstractInstall extends ControlCommand {
protected void startRHQServerForInstallation() throws IOException { try { - log.info("The RHQ Server must be started to complete its upgrade. Starting the RHQ server in preparation of running the server installer..."); + log.info("The RHQ Server must be started to complete its installation. Starting the RHQ server in preparation of running the server installer...");
// when you unzip the distro, you are getting a fresh, unadulterated, out-of-box EAP installation, which by default listens // to port 9999 for its native management subsystem. Make sure some other independent EAP server (or anything for that matter) // isn't already listening to that port. if (isPortInUse("127.0.0.1", 9999)) { throw new IOException( - "Something is already listening to port 9999 - shut it down before upgrading the server."); + "Something is already listening to port 9999 - shut it down before installing the server."); }
Executor executor = new DefaultExecutor(); @@ -400,7 +400,7 @@ public abstract class AbstractInstall extends ControlCommand { }
// Wait for the server to complete it's startup - log.info("Waiting for the RHQ Server to start in preparation of running the server installer for upgrade..."); + log.info("Waiting for the RHQ Server to start in preparation of running the server installer..."); commandLine = getCommandLine("rhq-installer", "--test");
Executor installerExecutor = new DefaultExecutor();
commit 2409ed2dcd705c58e5024182e95445431b25acf5 Author: John Sanda jsanda@redhat.com Date: Mon Jul 22 15:16:49 2013 -0400
document the 4.8 storage patch script
diff --git a/etc/scripts/rhq48-storage-patch/rhq48-storage-patch.sh b/etc/scripts/rhq48-storage-patch/rhq48-storage-patch.sh index 33984d1..ae78240 100755 --- a/etc/scripts/rhq48-storage-patch/rhq48-storage-patch.sh +++ b/etc/scripts/rhq48-storage-patch/rhq48-storage-patch.sh @@ -1,7 +1,43 @@ #!/bin/bash +# +# BACKGROUND: +# This patch script needs to be run against RHQ 4.8.0 installations prior to +# script. You do not need to run this script if upgrading from a version +# earlier than 4.8.0. +# +# PREREQUISITES: +# 1) Shut down the RHQ storage node and server. +# +# 2) Edit <rhq-install-dir>/rhq-storage/conf/cassandra.yaml and set the +# following property, +# +# start_rpc: true +# +# 3) Note the value of rpc_port in cassandra.yaml. By default it is 9160 which +# is fine. +# +# RUNNING THE PATCH: +# 1) cd <patch-dir> +# +# 2) ./rhq48-storage-patch.sh <rhq-480-server-dir> <storage-ip-address> <thrift-port> <jmx-port> +# +# 3) Carefully reivew the script output for any errors. +# +# 4) Edit cassandra.yaml against and reset start_rpc: false +# +# ADDITIONAL NOTES: +# The <jmx-port> defaults to 7299. If you are uncertain of what value to use, +# you can find it in the UI. Log into RHQ and go to Administration --> Storage Nodes. +# +# If you are uncertain of the value to use for the storage node IP address, you +# find the correct valu in the storage nodes admin UI as well. +# +# EXAMPLE: +# ./rhq48-storage-patch.sh /opt/rhq-4.8.0 127.0.0.1 9160 7299 +# Usage: ./rhq48-storage-patch.sh <rhq-480-server-dir> <storage-ip-address> <thrift-port> <jmx-port>
function usage() { - echo "Usage: $0 <rhq-server-dir> <storage-ip-address> <cql-port> <jmx-port>" + echo "Usage: $0 <rhq-480-server-dir> <storage-ip-address> <thrift-port> <jmx-port>" }
if [ $# -ne 4 ]; then @@ -16,11 +52,11 @@ fi
RHQ_SERVER_DIR=$1 CQL_HOSTNAME=$2 -CQL_PORT=$3 +THRIFT_PORT=$3 JMX_PORT=$4
export CQLSH_HOST=$2 -export CQL_PORT=$3 +export CQLSH_PORT=$3
PATCH="apache-cassandra-1.2.4-patch-1.jar"
commit c91c8f23416db836308b2bf3871fdda87559297e Author: Stefan Negrea snegrea@redhat.com Date: Mon Jul 22 14:12:08 2013 -0500
Update the storage node manager API for alerts to support UI functionality.
diff --git a/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml b/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml index a9ce322..58c4eda 100644 --- a/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml +++ b/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml @@ -11,8 +11,22 @@ <difference> <className>org/rhq/enterprise/server/cloud/StorageNodeManagerRemote</className> <differenceType>7012</differenceType> <!-- method added to an interface --> + <method>org.rhq.core.domain.util.PageList findNotAcknowledgedStorageNodeAlerts(org.rhq.core.domain.auth.Subject, org.rhq.core.domain.cloud.StorageNode)</method> + <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> + </difference> + + <difference> + <className>org/rhq/enterprise/server/cloud/StorageNodeManagerRemote</className> + <differenceType>7012</differenceType> <!-- method added to an interface --> <method>org.rhq.core.domain.util.PageList findAllStorageNodeAlerts(org.rhq.core.domain.auth.Subject)</method> <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> </difference>
+ <difference> + <className>org/rhq/enterprise/server/cloud/StorageNodeManagerRemote</className> + <differenceType>7012</differenceType> <!-- method added to an interface --> + <method>org.rhq.core.domain.util.PageList findAllStorageNodeAlerts(org.rhq.core.domain.auth.Subject, org.rhq.core.domain.cloud.StorageNode)</method> + <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> + </difference> + </differences> diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index c2a7b46..b32ab5b 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -512,12 +512,22 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
@Override public PageList<Alert> findNotAcknowledgedStorageNodeAlerts(Subject subject) { - return findStorageNodeAlerts(subject, false); + return findStorageNodeAlerts(subject, false, null); + } + + @Override + public PageList<Alert> findNotAcknowledgedStorageNodeAlerts(Subject subject, StorageNode storageNode) { + return findStorageNodeAlerts(subject, false, storageNode); }
@Override public PageList<Alert> findAllStorageNodeAlerts(Subject subject) { - return findStorageNodeAlerts(subject, true); + return findStorageNodeAlerts(subject, true, null); + } + + @Override + public PageList<Alert> findAllStorageNodeAlerts(Subject subject, StorageNode storageNode) { + return findStorageNodeAlerts(subject, true, storageNode); }
/** @@ -527,8 +537,8 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN * @param allAlerts if [true] then return all alerts; if [false] then return only alerts that are not acknowledged * @return alerts */ - private PageList<Alert> findStorageNodeAlerts(Subject subject, boolean allAlerts) { - Integer[] resouceIdsWithAlertDefinitions = findResourcesWithAlertDefinitions(); + private PageList<Alert> findStorageNodeAlerts(Subject subject, boolean allAlerts, StorageNode storageNode) { + Integer[] resouceIdsWithAlertDefinitions = findResourcesWithAlertDefinitions(storageNode); PageList<Alert> alerts = new PageList<Alert>();
if( resouceIdsWithAlertDefinitions != null && resouceIdsWithAlertDefinitions.length != 0 ){ @@ -555,31 +565,35 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN return alerts; }
- /** - * Return resource Ids for all resources and sub-resources of Storage Nodes that - * have alert definitions. This will be used by the resource criteria to find - * all alerts triggered for storage nodes. - * - * @return - */ - private Integer[] findResourcesWithAlertDefinitions() { - List<Integer> resourceIdsWithAlertDefinitions = new ArrayList<Integer>(); - List<StorageNode> test2 = getStorageNodes(); + @Override + public Integer[] findResourcesWithAlertDefinitions() { + return this.findResourcesWithAlertDefinitions(null); + } + + @Override + public Integer[] findResourcesWithAlertDefinitions(StorageNode storageNode) { + List<StorageNode> initialStorageNodes; + if (storageNode == null) { + initialStorageNodes = getStorageNodes(); + } else { + initialStorageNodes = Arrays.asList(storageNode); + }
Queue<Resource> unvisitedResources = new LinkedList<Resource>(); - for (StorageNode node : test2) { - if (node.getResource() != null) { - unvisitedResources.add(node.getResource()); + for (StorageNode initialStorageNode : initialStorageNodes) { + if (initialStorageNode.getResource() != null) { + unvisitedResources.add(initialStorageNode.getResource()); } }
- while(!unvisitedResources.isEmpty()){ + List<Integer> resourceIdsWithAlertDefinitions = new ArrayList<Integer>(); + while (!unvisitedResources.isEmpty()) { Resource resource = unvisitedResources.poll(); if (resource.getAlertDefinitions() != null) { resourceIdsWithAlertDefinitions.add(resource.getId()); }
- for(Resource child: resource.getChildResources()){ + for (Resource child : resource.getChildResources()) { unvisitedResources.add(child); } } diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java index 6fca820..69b16c4 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java @@ -81,6 +81,15 @@ public interface StorageNodeManagerLocal { PageList<Alert> findNotAcknowledgedStorageNodeAlerts(Subject subject);
/** + * Fetches the list of Storage Node related alerts that have not yet been acknowledged for the + * specified storage node. + * + * @param subject subject + * @return storage nodes alerts not acknowledged + */ + PageList<Alert> findNotAcknowledgedStorageNodeAlerts(Subject subject, StorageNode storageNode); + + /** * Fetches all the Storage Node related alerts. * * @param subject subject @@ -89,6 +98,39 @@ public interface StorageNodeManagerLocal { PageList<Alert> findAllStorageNodeAlerts(Subject subject);
/** + * Fetches all the Storage Node related alerts for the specified storage node. + * + * @param subject subject + * @return all storage nodes alerts + */ + PageList<Alert> findAllStorageNodeAlerts(Subject subject, StorageNode storageNode); + + + /** + * Find ids for all resources and sub-resources of Storage Nodes that + * have alert definitions. This can be used by the resource criteria queries to find + * all alerts triggered for storage nodes resources. + * + * @return resource ids + */ + Integer[] findResourcesWithAlertDefinitions(); + + /** + * Find ids for all resources and sub-resources, of the specified storage node, that + * have alert definitions. This can be used by the resource criteria queries to find + * all alerts triggered for storage nodes resources. + * + * If storage node is null it find ids for all resources and sub-resources of Storage Nodes that + * have alert definitions. Please see {@link #findResourcesWithAlertDefinitions()} for more details. + * + * @param storageNode storage node + * + * @return resource ids + */ + Integer[] findResourcesWithAlertDefinitions(StorageNode storageNode); + + + /** * <p>Prepares the node for subsequent upgrade.</p> * <p> CAUTION: this method will set the RHQ server to maintenance mode, RHQ storage flushes all the data to disk * and backup of all the keyspaces is created</p> diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerRemote.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerRemote.java index 7be1b07..75ac02b 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerRemote.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerRemote.java @@ -73,10 +73,27 @@ public interface StorageNodeManagerRemote { PageList<Alert> findNotAcknowledgedStorageNodeAlerts(Subject subject);
/** + * Fetches the list of Storage Node related alerts that have not yet been acknowledged for the + * specified storage node. + * + * @param subject subject + * @return storage nodes alerts not acknowledged + */ + PageList<Alert> findNotAcknowledgedStorageNodeAlerts(Subject subject, StorageNode storageNode); + + /** * Fetches all the Storage Node related alerts. * * @param subject subject * @return all storage nodes alerts */ PageList<Alert> findAllStorageNodeAlerts(Subject subject); + + /** + * Fetches all the Storage Node related alerts for the specified storage node. + * + * @param subject subject + * @return all storage nodes alerts + */ + PageList<Alert> findAllStorageNodeAlerts(Subject subject, StorageNode storageNode); }
commit c6c9e50398ffb5fc6d297ceffc369975a56b3ef9 Author: Jay Shaughnessy jshaughn@redhat.com Date: Mon Jul 22 14:09:52 2013 -0400
Add windows support storage install options and resource config update. Introduce rhq-storage-wrapper.env to hold the configurable values (mimicing somewhat cassandra-jvm.properties) and apply the values as token replacements in rhq-storage-wrapper.conf.
Note that cassandra-jvm.properties is still kept up to date on windows and can be used generically, as needed.
diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/Deployer.java b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/Deployer.java index 6547043..60667cc 100644 --- a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/Deployer.java +++ b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/Deployer.java @@ -91,7 +91,14 @@ public class Deployer { applyConfigChanges(confDir, "cassandra.yaml", tokens); applyConfigChanges(confDir, "log4j-server.properties", tokens); applyChangesToCassandraJvmProps(confDir, deploymentOptions); -// applyConfigChanges(confDir, "cassandra-env.sh", tokens); + + // For windows, update the service wrapper env. It may not ne necessary to have updated cassandra-jvm.properties + // as well as this file, but for now we'll update both, leaving the former as a dependably set file. + if (File.separatorChar == '\') { + applyChangesToWindowsServiceWrapper(deployDir); + } + + // applyConfigChanges(confDir, "cassandra-env.sh", tokens); }
private void applyConfigChanges(File confDir, String fileName, Map<String, String> tokens) @@ -109,8 +116,8 @@ public class Deployer { rhqFile.delete(); } catch (IOException e) { log.error("An unexpected error occurred while apply configuration changes to " + filteredFile, e); - throw new DeploymentException("An unexpected error occurred while apply configuration changes to " + - filteredFile, e); + throw new DeploymentException("An unexpected error occurred while apply configuration changes to " + + filteredFile, e); } }
@@ -132,8 +139,8 @@ public class Deployer {
String javaVersion = System.getProperty("java.version"); // The check here is taken right from cassandra-env.sh - if ((!isOpenJDK() || javaVersion.compareTo("1.6.0") > 0) || - (javaVersion.equals("1.6.0") && getJavaPatchVersion() > 23)) { + if ((!isOpenJDK() || javaVersion.compareTo("1.6.0") > 0) + || (javaVersion.equals("1.6.0") && getJavaPatchVersion() > 23)) { properties.put("java_agent", "-javaagent:$CASSANDRA_HOME/lib/jamm-0.2.5.jar"); }
@@ -165,6 +172,29 @@ public class Deployer { return Integer.parseInt(javaVersion.substring(startIndex + 1, javaVersion.length())); }
+ public void applyChangesToWindowsServiceWrapper(File deployDir) throws DeploymentException { + File wrapperDir = new File(deployDir, "../bin/wrapper"); + File wrapperEnvFile = new File(wrapperDir, "rhq-storage-wrapper.env"); + + try { + log.info("Applying configuration changes to " + wrapperEnvFile); + + PropertiesFileUpdate propertiesUpdater = new PropertiesFileUpdate(wrapperEnvFile.getAbsolutePath()); + Properties wrapperEnvProps = propertiesUpdater.loadExistingProperties(); + + wrapperEnvProps.setProperty("set.heap_min", "-Xms" + deploymentOptions.getHeapSize()); + wrapperEnvProps.setProperty("set.heap_max", "-Xmx" + deploymentOptions.getHeapSize()); + wrapperEnvProps.setProperty("set.heap_new", "-Xmn" + deploymentOptions.getHeapNewSize()); + wrapperEnvProps.setProperty("set.thread_stack_size", "-Xss" + deploymentOptions.getStackSize()); + wrapperEnvProps.setProperty("set.jmx_port", deploymentOptions.getJmxPort().toString()); + + propertiesUpdater.update(wrapperEnvProps); + } catch (IOException e) { + log.error("An error occurred while updating " + wrapperEnvFile, e); + throw new DeploymentException("An error occurred while updating " + wrapperEnvFile, e); + } + } + public void updateFilePerms() { File deployDir = new File(deploymentOptions.getBasedir()); File binDir = new File(deployDir, "bin"); @@ -187,8 +217,8 @@ public class Deployer {
try { authFile.delete(); - StreamUtil.copy(new StringReader(StringUtil.collectionToString(addresses, "\n")), - new FileWriter(authFile), true); + StreamUtil.copy(new StringReader(StringUtil.collectionToString(addresses, "\n")), new FileWriter(authFile), + true); } catch (IOException e) { throw new RuntimeException("Failed to update " + authFile); } diff --git a/modules/enterprise/server/appserver/src/main/bin-resources/bin/wrapper/rhq-storage-wrapper.conf b/modules/enterprise/server/appserver/src/main/bin-resources/bin/wrapper/rhq-storage-wrapper.conf index 14f2ff1..de83364 100644 --- a/modules/enterprise/server/appserver/src/main/bin-resources/bin/wrapper/rhq-storage-wrapper.conf +++ b/modules/enterprise/server/appserver/src/main/bin-resources/bin/wrapper/rhq-storage-wrapper.conf @@ -66,23 +66,25 @@ wrapper.app.parameter.3="-Dcassandra-foreground=yes" # Additional JVM parameters (quotes ARE needed) wrapper.java.additional.1="-ea" wrapper.java.additional.2="-javaagent:"%RHQ_STORAGE_HOME%\lib\jamm-0.2.5.jar"" -wrapper.java.additional.3="-Xms1G" -wrapper.java.additional.4="-Xmx1G" -wrapper.java.additional.5="-XX:+HeapDumpOnOutOfMemoryError" -wrapper.java.additional.6="-XX:+UseParNewGC" -wrapper.java.additional.7="-XX:+UseConcMarkSweepGC" -wrapper.java.additional.8="-XX:+CMSParallelRemarkEnabled" -wrapper.java.additional.9="-XX:SurvivorRatio=8" -wrapper.java.additional.10="-XX:MaxTenuringThreshold=1" -wrapper.java.additional.11="-XX:CMSInitiatingOccupancyFraction=75" -wrapper.java.additional.12="-XX:+UseCMSInitiatingOccupancyOnly" -wrapper.java.additional.13="-Dcom.sun.management.jmxremote.port=7299" -wrapper.java.additional.14="-Dcom.sun.management.jmxremote.ssl=false" -wrapper.java.additional.15="-Dcom.sun.management.jmxremote.authenticate=false" -wrapper.java.additional.16="-Dlog4j.configuration=log4j-server.properties" -wrapper.java.additional.17="-Dlog4j.defaultInitOverride=true" - -# We want to make sure the Storage Node starts in the casandra bin directory +wrapper.java.additional.3="%heap_min%" +wrapper.java.additional.4="%heap_max%" +wrapper.java.additional.5="%heap_new%" +wrapper.java.additional.6="%heap_dump_on_OOMError%" +wrapper.java.additional.7="%heap_dump_dir%" +wrapper.java.additional.8="-XX:+UseConcMarkSweepGC" +wrapper.java.additional.9="-XX:+CMSParallelRemarkEnabled" +wrapper.java.additional.10="-XX:SurvivorRatio=8" +wrapper.java.additional.11="-XX:MaxTenuringThreshold=1" +wrapper.java.additional.12="-XX:CMSInitiatingOccupancyFraction=75" +wrapper.java.additional.13="-XX:+UseCMSInitiatingOccupancyOnly" +wrapper.java.additional.14="-XX:+UseParNewGC" +wrapper.java.additional.15="-Dcom.sun.management.jmxremote.port=%jmx_port%" +wrapper.java.additional.16="-Dcom.sun.management.jmxremote.ssl=false" +wrapper.java.additional.17="-Dcom.sun.management.jmxremote.authenticate=false" +wrapper.java.additional.18="-Dlog4j.configuration=log4j-server.properties" +wrapper.java.additional.19="-Dlog4j.defaultInitOverride=true" + +# We want to make sure the Storage Node starts in the cassandra bin directory wrapper.working.dir=%RHQ_STORAGE_HOME%/bin
#***************************************************************************** diff --git a/modules/enterprise/server/appserver/src/main/bin-resources/bin/wrapper/rhq-storage-wrapper.env b/modules/enterprise/server/appserver/src/main/bin-resources/bin/wrapper/rhq-storage-wrapper.env new file mode 100644 index 0000000..1441e0a --- /dev/null +++ b/modules/enterprise/server/appserver/src/main/bin-resources/bin/wrapper/rhq-storage-wrapper.env @@ -0,0 +1,24 @@ +#***************************************************************************** +# RHQ Storage Node Java Service Wrapper Environment Settings File +# +# This file specifies a set of environment variables that will be +# applied to the Storage Node JVM. +# +# THIS FILE SHOULD NOT BE EDITED! +# +# This file represents the values managed as RHQ Storage Node resource +# configuration values. Or, set by the installer. +# +#***************************************************************************** + +set.jmx_port=7299 + +set.heap_min=-Xms512M +set.heap_max=-Xms512M +set.heap_new=-Xmn128M + +set.thread_stack_size=-Xss180k + +set.heap_dump_on_OOMError=-XX:+HeapDumpOnOutOfMemoryError + +set.heap_dump_dir= diff --git a/modules/enterprise/server/appserver/src/main/dev-resources/bin/wrapper/rhq-storage-wrapper.inc b/modules/enterprise/server/appserver/src/main/dev-resources/bin/wrapper/rhq-storage-wrapper.inc index ee0d448..6941358 100644 --- a/modules/enterprise/server/appserver/src/main/dev-resources/bin/wrapper/rhq-storage-wrapper.inc +++ b/modules/enterprise/server/appserver/src/main/dev-resources/bin/wrapper/rhq-storage-wrapper.inc @@ -4,11 +4,11 @@ #
# override and lower the initial memory profile -wrapper.java.additional.18=-Xms128M -wrapper.java.additional.19=-Xmx256M +wrapper.java.additional.20=-Xms128M +wrapper.java.additional.21=-Xmx256M
# enable remote debugging -#wrapper.java.additional.20=-agentlib:jdwp=transport=dt_socket,address=8788,server=y,suspend=n +#wrapper.java.additional.22=-agentlib:jdwp=transport=dt_socket,address=8788,server=y,suspend=n
# disable JVM startup timeout wrapper.startup.timeout=0 diff --git a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeConfigDelegate.java b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeConfigDelegate.java index 8d1771d..1667877 100644 --- a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeConfigDelegate.java +++ b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeConfigDelegate.java @@ -19,10 +19,17 @@ import org.rhq.core.util.StringUtil; public class StorageNodeConfigDelegate implements ConfigurationFacet {
private File jvmOptsFile; + private File wrapperEnvFile;
public StorageNodeConfigDelegate(File basedir) { File confDir = new File(basedir, "conf"); jvmOptsFile = new File(confDir, "cassandra-jvm.properties"); + + // for windows, config props also get propagated to the wrapper env + if (isWindows()) { + File wrapperDir = new File(basedir, "../bin/wrapper"); + wrapperEnvFile = new File(wrapperDir, "rhq-storage-wrapper.env"); + } }
@Override @@ -56,6 +63,16 @@ public class StorageNodeConfigDelegate implements ConfigurationFacet { return config; }
+ /** + * Ensure that the path uses only forward slash. + * @param path + * @return forward-slashed path, or null if path is null + */ + private static String useForwardSlash(String path) { + + return (null != path) ? path.replace('\', '/') : null; + } + private String getHeapMinProp(Properties properties) { String value = properties.getProperty("heap_min");
@@ -115,47 +132,14 @@ public class StorageNodeConfigDelegate implements ConfigurationFacet { @Override public void updateResourceConfiguration(ConfigurationUpdateReport configurationUpdateReport) { try { - PropertiesFileUpdate propertiesUpdater = new PropertiesFileUpdate(jvmOptsFile.getAbsolutePath()); - Properties properties = propertiesUpdater.loadExistingProperties(); - Configuration config = configurationUpdateReport.getConfiguration();
- String maxHeapSize = config.getSimpleValue("maxHeapSize"); - if (!StringUtil.isEmpty(maxHeapSize)) { - validateHeapArg("maxHeapSize", maxHeapSize); - // We want min and max heap to be the same - properties.setProperty("heap_min", "-Xms" + maxHeapSize); - properties.setProperty("heap_max", "-Xmx" + maxHeapSize); - } - - String heapNewSize = config.getSimpleValue("heapNewSize"); - if (!StringUtil.isEmpty(heapNewSize)) { - validateHeapArg("heapNewSize", heapNewSize); - properties.setProperty("heap_new", "-Xmn" + heapNewSize); - } + updateCassandraJvmProps(config);
- String threadStackSize = config.getSimpleValue("threadStackSize"); - if (!StringUtil.isEmpty(threadStackSize)) { - validateStackArg(threadStackSize); - properties.setProperty("thread_stack_size", "-Xss" + threadStackSize + "k"); + if (isWindows()) { + updateWrapperEnv(config); }
- PropertySimple heapDumpOnOMMError = config.getSimple("heapDumpOnOOMError"); - if (heapDumpOnOMMError != null) { - if (heapDumpOnOMMError.getBooleanValue()) { - properties.setProperty("heap_dump_on_OOMError", "-XX:+HeapDumpOnOutOfMemoryError"); - } else { - properties.setProperty("heap_dump_on_OOMError", ""); - } - } - - String heapDumpDir = config.getSimpleValue("heapDumpDir"); - if (!StringUtil.isEmpty(heapDumpDir)) { - properties.setProperty("heap_dump_dir", heapDumpDir); - } - - propertiesUpdater.update(properties); - configurationUpdateReport.setStatus(ConfigurationUpdateStatus.SUCCESS); } catch (IllegalArgumentException e) { configurationUpdateReport.setErrorMessage("No configuration update was applied: " + e.getMessage()); @@ -164,6 +148,88 @@ public class StorageNodeConfigDelegate implements ConfigurationFacet { } }
+ private void updateCassandraJvmProps(Configuration config) throws IOException { + PropertiesFileUpdate propertiesUpdater = new PropertiesFileUpdate(jvmOptsFile.getAbsolutePath()); + Properties properties = propertiesUpdater.loadExistingProperties(); + + String maxHeapSize = config.getSimpleValue("maxHeapSize"); + if (!StringUtil.isEmpty(maxHeapSize)) { + validateHeapArg("maxHeapSize", maxHeapSize); + // We want min and max heap to be the same + properties.setProperty("heap_min", "-Xms" + maxHeapSize); + properties.setProperty("heap_max", "-Xmx" + maxHeapSize); + } + + String heapNewSize = config.getSimpleValue("heapNewSize"); + if (!StringUtil.isEmpty(heapNewSize)) { + validateHeapArg("heapNewSize", heapNewSize); + properties.setProperty("heap_new", "-Xmn" + heapNewSize); + } + + String threadStackSize = config.getSimpleValue("threadStackSize"); + if (!StringUtil.isEmpty(threadStackSize)) { + validateStackArg(threadStackSize); + properties.setProperty("thread_stack_size", "-Xss" + threadStackSize + "k"); + } + + PropertySimple heapDumpOnOMMError = config.getSimple("heapDumpOnOOMError"); + if (heapDumpOnOMMError != null) { + if (heapDumpOnOMMError.getBooleanValue()) { + properties.setProperty("heap_dump_on_OOMError", "-XX:+HeapDumpOnOutOfMemoryError"); + } else { + properties.setProperty("heap_dump_on_OOMError", ""); + } + } + + String heapDumpDir = useForwardSlash(config.getSimpleValue("heapDumpDir")); + if (!StringUtil.isEmpty(heapDumpDir)) { + properties.setProperty("heap_dump_dir", heapDumpDir); + } + + propertiesUpdater.update(properties); + } + + private void updateWrapperEnv(Configuration config) throws IOException { + PropertiesFileUpdate propertiesUpdater = new PropertiesFileUpdate(wrapperEnvFile.getAbsolutePath()); + Properties properties = propertiesUpdater.loadExistingProperties(); + + String maxHeapSize = config.getSimpleValue("maxHeapSize"); + if (!StringUtil.isEmpty(maxHeapSize)) { + validateHeapArg("maxHeapSize", maxHeapSize); + // We want min and max heap to be the same + properties.setProperty("set.heap_min", "-Xms" + maxHeapSize); + properties.setProperty("set.heap_max", "-Xmx" + maxHeapSize); + } + + String heapNewSize = config.getSimpleValue("heapNewSize"); + if (!StringUtil.isEmpty(heapNewSize)) { + validateHeapArg("heapNewSize", heapNewSize); + properties.setProperty("set.heap_new", "-Xmn" + heapNewSize); + } + + String threadStackSize = config.getSimpleValue("threadStackSize"); + if (!StringUtil.isEmpty(threadStackSize)) { + validateStackArg(threadStackSize); + properties.setProperty("set.thread_stack_size", "-Xss" + threadStackSize + "k"); + } + + PropertySimple heapDumpOnOMMError = config.getSimple("heapDumpOnOOMError"); + if (heapDumpOnOMMError != null) { + if (heapDumpOnOMMError.getBooleanValue()) { + properties.setProperty("set.heap_dump_on_OOMError", "-XX:+HeapDumpOnOutOfMemoryError"); + } else { + properties.setProperty("set.heap_dump_on_OOMError", ""); + } + } + + String heapDumpDir = useForwardSlash(config.getSimpleValue("heapDumpDir")); + if (!StringUtil.isEmpty(heapDumpDir)) { + properties.setProperty("set.heap_dump_dir", heapDumpDir); + } + + propertiesUpdater.update(properties); + } + private void validateHeapArg(String name, String value) { if (value.length() < 2) { throw new IllegalArgumentException(value + " is not a legal value for the property [" + name + "]"); @@ -189,4 +255,8 @@ public class StorageNodeConfigDelegate implements ConfigurationFacet { throw new IllegalArgumentException(value + " is not a legal value for the property [threadStackSize]"); } } + + private boolean isWindows() { + return File.separatorChar == '\'; + } }
commit 969ea38e7254d61903c699380bd066d6cad3e85e Author: Jay Shaughnessy jshaughn@redhat.com Date: Mon Jul 22 14:06:39 2013 -0400
When recreating win services make sure they get started appropriately.
diff --git a/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/AbstractInstall.java b/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/AbstractInstall.java index 8c885b0..61c8a9c 100644 --- a/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/AbstractInstall.java +++ b/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/AbstractInstall.java @@ -36,12 +36,16 @@ import java.util.Properties; import org.apache.commons.cli.CommandLine; import org.apache.commons.exec.DefaultExecuteResultHandler; import org.apache.commons.exec.DefaultExecutor; +import org.apache.commons.exec.ExecuteException; import org.apache.commons.exec.Executor; import org.apache.commons.exec.PumpStreamHandler; + import org.jboss.as.controller.client.ModelControllerClient; + import org.rhq.common.jbossas.client.controller.DeploymentJBossASClient; import org.rhq.common.jbossas.client.controller.MCCHelper; import org.rhq.server.control.ControlCommand; +import org.rhq.server.control.RHQControlException;
/** * Common code for commands that perform installs. Basically shared code for Install and Upgrade commands. @@ -55,7 +59,7 @@ public abstract class AbstractInstall extends ControlCommand {
protected final String STORAGE_CONFIG_PROP = "rhqctl.install.storage-config";
- protected void installWindowsService(File workingDir, String batFile) throws Exception { + protected void installWindowsService(File workingDir, String batFile, boolean start) throws Exception { Executor executor = new DefaultExecutor(); executor.setWorkingDirectory(workingDir); executor.setStreamHandler(new PumpStreamHandler()); @@ -69,6 +73,11 @@ public abstract class AbstractInstall extends ControlCommand {
commandLine = getCommandLine(batFile, "install"); executor.execute(commandLine); + + if (start) { + commandLine = getCommandLine(batFile, "start"); + executor.execute(commandLine); + } }
protected void validateCustomStorageDataDirectories(CommandLine commandLine, List<String> errors) { @@ -100,6 +109,54 @@ public abstract class AbstractInstall extends ControlCommand { } }
+ protected void waitForProcessToStop(String pid) throws Exception { + + if (isWindows() || pid == null) { + // For the moment we have no better way to just wait some time + Thread.sleep(10 * 1000L); + } else { + int tries = 5; + while (tries > 0) { + log.debug("."); + if (!isUnixPidRunning(pid)) { + break; + } + Thread.sleep(2 * 1000L); + tries--; + } + if (tries == 0) { + throw new RHQControlException("Process [" + pid + + "] did not finish yet. Terminate it manually and retry."); + } + } + + } + + protected boolean isUnixPidRunning(String pid) { + + Executor executor = new DefaultExecutor(); + executor.setWorkingDirectory(getBinDir()); + executor.setStreamHandler(new PumpStreamHandler()); + org.apache.commons.exec.CommandLine commandLine; + + commandLine = new org.apache.commons.exec.CommandLine("/bin/kill").addArgument("-0").addArgument(pid); + + try { + int code = executor.execute(commandLine); + if (code != 0) { + return false; + } + } catch (ExecuteException ee) { + if (ee.getExitValue() == 1) { + // return code 1 means process does not exist + return false; + } + } catch (IOException e) { + log.error("Checking for running process failed: " + e.getMessage()); + } + return true; + } + protected void waitForRHQServerToInitialize() throws Exception { try { final long messageInterval = 30000L; @@ -287,7 +344,6 @@ public abstract class AbstractInstall extends ControlCommand {
log.debug("Stopping RHQ server...");
- Executor executor = new DefaultExecutor(); executor.setWorkingDirectory(serverBinDir); executor.setStreamHandler(new PumpStreamHandler()); diff --git a/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Install.java b/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Install.java index bb6aa40..0808db2 100644 --- a/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Install.java +++ b/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Install.java @@ -142,7 +142,7 @@ public class Install extends AbstractInstall { if (!isStorageInstalled()) { installStorageNode(getStorageBasedir(), commandLine); } else if (isWindows()) { - installWindowsService(getBinDir(), "rhq-storage"); + installWindowsService(getBinDir(), "rhq-storage", true); }
if (!isServerInstalled()) { @@ -150,7 +150,7 @@ public class Install extends AbstractInstall { runRHQServerInstaller(); waitForRHQServerToInitialize(); } else if (isWindows()) { - installWindowsService(getBinDir(), "rhq-server"); + installWindowsService(getBinDir(), "rhq-server", true); }
if (!isAgentInstalled()) { @@ -158,13 +158,15 @@ public class Install extends AbstractInstall { File agentBasedir = getAgentBasedir(); installAgent(agentBasedir); configureAgent(agentBasedir, commandLine); - if (Boolean.parseBoolean(commandLine.getOptionValue(AGENT_AUTOSTART_OPTION, "true"))) { + boolean start = Boolean.parseBoolean(commandLine.getOptionValue(AGENT_AUTOSTART_OPTION, "true")); + if (start) { startAgent(agentBasedir, true); } else { log.info("The agent was installed but was told not to start automatically."); } } else if (isWindows()) { - installWindowsService(new File(getAgentBasedir(), "bin"), "rhq-agent-wrapper"); + boolean start = Boolean.parseBoolean(commandLine.getOptionValue(AGENT_AUTOSTART_OPTION, "true")); + installWindowsService(new File(getAgentBasedir(), "bin"), "rhq-agent-wrapper", start); }
} else { @@ -173,7 +175,7 @@ public class Install extends AbstractInstall { log.info("The RHQ storage node is already installed in " + new File(getBaseDir(), "storage"));
if (isWindows()) { - installWindowsService(getBinDir(), "rhq-storage"); + installWindowsService(getBinDir(), "rhq-storage", true); } else { log.info("Skipping storage node installation."); } @@ -200,7 +202,7 @@ public class Install extends AbstractInstall { log.warn("The RHQ server is already installed.");
if (isWindows()) { - installWindowsService(getBinDir(), "rhq-server"); + installWindowsService(getBinDir(), "rhq-server", true); } else { log.info("Skipping server installation."); } @@ -217,8 +219,10 @@ public class Install extends AbstractInstall { if (isAgentInstalled() && !commandLine.hasOption(STORAGE_OPTION)) { log.info("The RHQ agent is already installed in [" + getAgentBasedir() + "]");
+ boolean start = Boolean + .parseBoolean(commandLine.getOptionValue(AGENT_AUTOSTART_OPTION, "true")); if (isWindows()) { - installWindowsService(new File(getAgentBasedir(), "bin"), "rhq-agent-wrapper"); + installWindowsService(new File(getAgentBasedir(), "bin"), "rhq-agent-wrapper", start); } else { log.info("Skipping agent installation."); }
commit e1461de493712c30e454fa51f946a72cadf2c257 Author: Stefan Negrea snegrea@redhat.com Date: Mon Jul 22 07:47:00 2013 -0500
Fix merge rebase issue.
diff --git a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java index 4744e96..3b0aa5b 100644 --- a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java +++ b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java @@ -41,8 +41,8 @@ import org.mc4j.ems.connection.bean.attribute.EmsAttribute; import org.mc4j.ems.connection.bean.operation.EmsOperation;
import org.rhq.core.domain.configuration.Configuration; -import org.rhq.core.domain.configuration.Property; import org.rhq.core.domain.configuration.ConfigurationUpdateStatus; +import org.rhq.core.domain.configuration.Property; import org.rhq.core.domain.configuration.PropertyList; import org.rhq.core.domain.configuration.PropertyMap; import org.rhq.core.domain.configuration.PropertySimple; @@ -116,7 +116,8 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper
if (!configurationUpdate.getStatus().equals(ConfigurationUpdateStatus.SUCCESS)) { result.setErrorMessage(configurationUpdate.getErrorMessage()); - + } + return result; }
commit 642421966cbea990df8cf0593e1fb82a4c1a98a8 Author: Stefan Negrea snegrea@redhat.com Date: Mon Jul 22 07:40:53 2013 -0500
Attempt to improve the operation trigger for updates on the storage node configuration.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index c77e229..c2a7b46 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -64,6 +64,7 @@ import org.rhq.core.domain.measurement.MeasurementUnits; import org.rhq.core.domain.operation.OperationRequestStatus; import org.rhq.core.domain.operation.ResourceOperationHistory; import org.rhq.core.domain.operation.bean.GroupOperationSchedule; +import org.rhq.core.domain.operation.bean.ResourceOperationSchedule; import org.rhq.core.domain.resource.Resource; import org.rhq.core.domain.resource.ResourceType; import org.rhq.core.domain.resource.group.ResourceGroup; @@ -101,8 +102,8 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN private static final String RHQ_STORAGE_JMX_PORT_PROPERTY = "jmxPort"; private static final String RHQ_STORAGE_ADDRESS_PROPERTY = "host";
- private static final int OPERATION_QUERY_TIMEOUT = 1000; - private static final int MAX_ITERATIONS = 5; + private static final int OPERATION_QUERY_TIMEOUT = 20000; + private static final int MAX_ITERATIONS = 6; private static final String UPDATE_CONFIGURATION_OPERATION = "updateConfiguration"; private static final String RESTART_OPERATION = "restart";
@@ -636,8 +637,16 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
//scheduling the operation long operationStartTime = System.currentTimeMillis(); - operationManager.scheduleResourceOperation(subject, storageNodeResource.getId(), operationToRun, 0, 0, 0, 0, - parameters, "Run by StorageNodeManagerBean"); + + ResourceOperationSchedule newSchedule = new ResourceOperationSchedule(); + newSchedule.setJobTrigger(JobTrigger.createNowTrigger()); + newSchedule.setResource(storageNodeResource); + newSchedule.setOperationName(operationToRun); + newSchedule.setDescription("Run by StorageNodeManagerBean"); + newSchedule.setParameters(parameters); + + operationManager.scheduleResourceOperation(subject, newSchedule); + entityManager.flush();
//waiting for the operation result then return it int iteration = 0;
commit a7e279884e1a7ee6c0782c1d5e79a27dcff26b76 Author: Stefan Negrea snegrea@redhat.com Date: Mon Jul 22 07:39:55 2013 -0500
Implement the storage node operation for updating jvm options.
diff --git a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java index 006dd26..4744e96 100644 --- a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java +++ b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java @@ -42,6 +42,7 @@ import org.mc4j.ems.connection.bean.operation.EmsOperation;
import org.rhq.core.domain.configuration.Configuration; import org.rhq.core.domain.configuration.Property; +import org.rhq.core.domain.configuration.ConfigurationUpdateStatus; import org.rhq.core.domain.configuration.PropertyList; import org.rhq.core.domain.configuration.PropertyMap; import org.rhq.core.domain.configuration.PropertySimple; @@ -102,7 +103,20 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper
private OperationResult updateConfiguration(Configuration params) { OperationResult result = new OperationResult("Configuration updated."); - //TODO: implement updates to various sub-resources here + + //update storage node jvm settings + Configuration config = new Configuration(); + config.put(new PropertySimple("minHeapSize", params.getSimpleValue("heapSize"))); + config.put(new PropertySimple("maxHeapSize", params.getSimpleValue("heapSize"))); + config.put(new PropertySimple("heapNewSize", params.getSimpleValue("heapNewSize"))); + config.put(new PropertySimple("threadStackSize", params.getSimpleValue("threadStackSize"))); + + ConfigurationUpdateReport configurationUpdate = new ConfigurationUpdateReport(config); + this.updateResourceConfiguration(configurationUpdate); + + if (!configurationUpdate.getStatus().equals(ConfigurationUpdateStatus.SUCCESS)) { + result.setErrorMessage(configurationUpdate.getErrorMessage()); + return result; }
commit 26a7f7f476e03367d52e9e229f2e583c789e7bec Author: Stefan Negrea snegrea@redhat.com Date: Mon Jul 22 07:38:15 2013 -0500
Add two more properties to the list of configurable properties for the storaga node. The new properties are heap new size and thread stack size.
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNodeConfigurationComposite.java b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNodeConfigurationComposite.java index e2c64f9..32d8ab3 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNodeConfigurationComposite.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNodeConfigurationComposite.java @@ -33,6 +33,8 @@ public class StorageNodeConfigurationComposite implements Serializable { private StorageNode storageNode; private int jmxPort; private String heapSize; + private String threadStackSize; + private String heapNewSize;
public StorageNodeConfigurationComposite() { // GWT needs this @@ -85,14 +87,44 @@ public class StorageNodeConfigurationComposite implements Serializable { this.heapSize = heapSize; }
+ /** + * @return the threadStackSize + */ + public String getThreadStackSize() { + return threadStackSize; + } + + /** + * @param threadStackSize the threadStackSize to set + */ + public void setThreadStackSize(String threadStackSize) { + this.threadStackSize = threadStackSize; + } + + /** + * @return the heapNewSize + */ + public String getHeapNewSize() { + return heapNewSize; + } + + /** + * @param heapNewSize the heapNewSize to set + */ + public void setHeapNewSize(String heapNewSize) { + this.heapNewSize = heapNewSize; + } + /* (non-Javadoc) * @see java.lang.Object#toString() */ public String toString() { StringBuilder builder = new StringBuilder(); builder.append("storageNode.addresss=").append(storageNode.getAddress()).append(", "); + builder.append("jmxPort=").append(jmxPort).append(","); builder.append("heapSize=").append(heapSize).append(", "); - builder.append("jmxPort=").append(jmxPort).append(""); + builder.append("heapNewSize=").append(heapSize).append(", "); + builder.append("threadStackSize=").append(threadStackSize).append(""); return builder.toString(); } }
commit a0285ac83b5b673dab0582f9ff52baaa746fd8cd Author: Stefan Negrea snegrea@redhat.com Date: Mon Jul 22 07:37:10 2013 -0500
Update the set of calculated metrics for Cassandra to incorporate latest feedback. There are three metrics now: total system used space percentage, data file disk space used percentage, and free to data disk space ratio.
Also, update the alert definition for disk space to monitor all three metrics.
diff --git a/modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java b/modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java index 8995ec0..37d10a8 100644 --- a/modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java +++ b/modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java @@ -61,7 +61,9 @@ public class AlertDefinitionServerPluginComponent implements ServerPluginCompone
private final Log log = LogFactory.getLog(AlertDefinitionServerPluginComponent.class);
- private static final String PARTITION_DISK_USED_PERCENTAGE_METRIC_NAME = "Calculated.PartitionDiskUsedPercentage"; + private static final String DATA_DISK_USED_PERCENTAGE_METRIC_NAME = "Calculated.DataDiskUsedPercentage"; + private static final String TOTAL_DISK_USED_PERCENTAGE_METRIC_NAME = "Calculated.TotalDiskUsedPercentage"; + private static final String FREE_DISK_TO_DATA_SIZE_RATIO_METRIC_NAME = "Calculated.FreeDiskToDataSizeRatio";
static private final List<InjectedTemplate> injectedTemplates; static private final InjectedTemplate storageNodeHighHeapTemplate; @@ -297,23 +299,42 @@ public class AlertDefinitionServerPluginComponent implements ServerPluginCompone newTemplate.setRecoveryId(0); newTemplate.setEnabled(true);
- AlertCondition ac = new AlertCondition(); - ac.setCategory(AlertConditionCategory.THRESHOLD); - ac.setComparator(">"); - ac.setThreshold(0.5D); + + AlertCondition dataDiskUsedAlertCondition = new AlertCondition(); + dataDiskUsedAlertCondition.setCategory(AlertConditionCategory.THRESHOLD); + dataDiskUsedAlertCondition.setComparator(">"); + dataDiskUsedAlertCondition.setThreshold(0.5D); + + AlertCondition totalDiskUsedAlertCondition = new AlertCondition(); + totalDiskUsedAlertCondition.setCategory(AlertConditionCategory.THRESHOLD); + totalDiskUsedAlertCondition.setComparator(">"); + totalDiskUsedAlertCondition.setThreshold(0.75D); + + AlertCondition freeSpaveDataRatioAlertCondition = new AlertCondition(); + freeSpaveDataRatioAlertCondition.setCategory(AlertConditionCategory.THRESHOLD); + freeSpaveDataRatioAlertCondition.setComparator("<"); + freeSpaveDataRatioAlertCondition.setThreshold(1.5D);
List<Integer> measurementDefinitionIds = new ArrayList<Integer>(1); for (MeasurementDefinition d : resourceType.getMetricDefinitions()) { - if (PARTITION_DISK_USED_PERCENTAGE_METRIC_NAME.equals(d.getName())) { + if (DATA_DISK_USED_PERCENTAGE_METRIC_NAME.equals(d.getName())) { measurementDefinitionIds.add(d.getId()); - ac.setMeasurementDefinition(d); - ac.setName(d.getDisplayName()); + dataDiskUsedAlertCondition.setMeasurementDefinition(d); + dataDiskUsedAlertCondition.setName(d.getDisplayName()); + } else if (TOTAL_DISK_USED_PERCENTAGE_METRIC_NAME.equals(d.getName())) { + measurementDefinitionIds.add(d.getId()); + totalDiskUsedAlertCondition.setMeasurementDefinition(d); + totalDiskUsedAlertCondition.setName(d.getDisplayName()); + } else if (FREE_DISK_TO_DATA_SIZE_RATIO_METRIC_NAME.equals(d.getName())) { + measurementDefinitionIds.add(d.getId()); + freeSpaveDataRatioAlertCondition.setMeasurementDefinition(d); + freeSpaveDataRatioAlertCondition.setName(d.getDisplayName()); } } - assert null != ac.getMeasurementDefinition() : "Did not find expected measurement definition " - + PARTITION_DISK_USED_PERCENTAGE_METRIC_NAME + " for " - + resourceType; - newTemplate.addCondition(ac); + + newTemplate.addCondition(dataDiskUsedAlertCondition); + newTemplate.addCondition(totalDiskUsedAlertCondition); + newTemplate.addCondition(freeSpaveDataRatioAlertCondition);
AlertDampening dampener = new AlertDampening(AlertDampening.Category.PARTIAL_COUNT); dampener.setPeriod(15); diff --git a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/StorageServiceComponent.java b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/StorageServiceComponent.java index e5a2283..76ce2b2 100644 --- a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/StorageServiceComponent.java +++ b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/StorageServiceComponent.java @@ -58,7 +58,9 @@ import org.rhq.plugins.jmx.JMXComponent; public class StorageServiceComponent extends ComplexConfigurationResourceComponent {
private static final String OWNERSHIP_METRIC_NAME = "Ownership"; - private static final String PARTITION_DISK_USED_PERCENTAGE_METRIC_NAME = "Calculated.PartitionDiskUsedPercentage"; + private static final String DATA_DISK_USED_PERCENTAGE_METRIC_NAME = "Calculated.DataDiskUsedPercentage"; + private static final String TOTAL_DISK_USED_PERCENTAGE_METRIC_NAME = "Calculated.TotalDiskUsedPercentage"; + private static final String FREE_DISK_TO_DATA_SIZE_RATIO_METRIC_NAME = "Calculated.FreeDiskToDataSizeRatio"; private static final String DATA_FILE_LOCATIONS_NAME = "AllDataFileLocations"; private static final String LOAD_NAME = "Load";
@@ -156,6 +158,22 @@ public class StorageServiceComponent extends ComplexConfigurationResourceCompone @Override protected void getValues(MeasurementReport report, Set<MeasurementScheduleRequest> requests, EmsBean bean) { super.getValues(report, requests, bean); + + EmsAttribute loadAttribute = bean.getAttribute(LOAD_NAME); + Object loadValue = loadAttribute.refresh(); + + EmsAttribute dataFileLocationAttribute = bean.getAttribute(DATA_FILE_LOCATIONS_NAME); + Object dataFileLocationValue = dataFileLocationAttribute.refresh(); + + double load = 0; + if (loadValue != null && dataFileLocationValue != null && dataFileLocationValue instanceof String[]) { + //Please visit for details: https://issues.apache.org/jira/browse/CASSANDRA-2749 + //The average usage of all partitions with the data will be reported. + //Cassandra selects the partition with most free space for SStable flush and compaction. + load = Double.parseDouble(loadValue.toString()); + load = load / 1024d; //transform in MB + } + for (MeasurementScheduleRequest request : requests) { if (OWNERSHIP_METRIC_NAME.equals(request.getName()) && host != null) { EmsAttribute attribute = bean.getAttribute(OWNERSHIP_METRIC_NAME); @@ -179,49 +197,53 @@ public class StorageServiceComponent extends ComplexConfigurationResourceCompone report.addData(new MeasurementDataNumeric(request, value.doubleValue())); } break; - } else if (PARTITION_DISK_USED_PERCENTAGE_METRIC_NAME.equals(request.getName())) { - - - EmsAttribute loadAttribute = bean.getAttribute(LOAD_NAME); - Object loadValue = loadAttribute.refresh(); - - EmsAttribute dataFileLocationAttribute = bean.getAttribute(DATA_FILE_LOCATIONS_NAME); - Object dataFileLocationValue = dataFileLocationAttribute.refresh(); - - if (loadValue != null && dataFileLocationValue != null && dataFileLocationValue instanceof String[]) { - //Please visit for details: https://issues.apache.org/jira/browse/CASSANDRA-2749 - //The average usage of all partitions with the data will be reported. - //Cassandra selects the partition with most free space for SStable flush and compaction. - double load = Double.parseDouble(loadValue.toString()); - - report.addData(new MeasurementDataNumeric(request, getPartitionDiskUsedPercentage(load, - (String[]) dataFileLocationValue))); - } + } else if (DATA_DISK_USED_PERCENTAGE_METRIC_NAME.equals(request.getName()) + || TOTAL_DISK_USED_PERCENTAGE_METRIC_NAME.equals(request.getName()) + || FREE_DISK_TO_DATA_SIZE_RATIO_METRIC_NAME.equals(request.getName())) { + double metricValue = getDiskUsageMetric(request, load, (String[]) dataFileLocationValue); + report.addData(new MeasurementDataNumeric(request, metricValue)); } } }
- private double getPartitionDiskUsedPercentage(double dataSize, String[] paths) { + private double getDiskUsageMetric(MeasurementScheduleRequest request, double dataSize, String[] paths) { List<String> visitedMountPoints = new ArrayList<String>(); long totalDiskSpace = 0; + long totalFreeDiskSpace = 0; + long totalUsedDiskSpace = 0;
for (String path : paths) { try { FileSystemInfo fileSystemInfo = this.getResourceContext().getSystemInformation().getFileSystem(path); if (!visitedMountPoints.contains(fileSystemInfo.getMountPoint())) { visitedMountPoints.add(fileSystemInfo.getMountPoint()); + + //contrary to Sigar documentation this values are reported in MB and not bytes totalDiskSpace += fileSystemInfo.getFileSystemUsage().getTotal(); + totalFreeDiskSpace += fileSystemInfo.getFileSystemUsage().getFree(); + totalUsedDiskSpace += fileSystemInfo.getFileSystemUsage().getUsed(); } } catch (Exception e) { log.error("Unable to determine file system usage information for data file location " + path, e); } }
+ double metricValue = 0; + + if (totalDiskSpace != 0) { - double rawPercentage = dataSize / ((double) totalDiskSpace); - return Math.round(rawPercentage * 100.0) / 100.0; + double rawPercentage = 0; + if (DATA_DISK_USED_PERCENTAGE_METRIC_NAME.equals(request.getName())) { + rawPercentage = dataSize / ((double) totalDiskSpace); + } else if (TOTAL_DISK_USED_PERCENTAGE_METRIC_NAME.equals(request.getName())) { + rawPercentage = ((double) totalUsedDiskSpace) / ((double) totalDiskSpace); + } else if (FREE_DISK_TO_DATA_SIZE_RATIO_METRIC_NAME.equals(request.getName())) { + rawPercentage = ((double) totalFreeDiskSpace) / (double) dataSize; + } + + metricValue = Math.round(rawPercentage * 100d) / 100d; }
- return 0; + return metricValue; } } diff --git a/modules/plugins/cassandra/src/main/resources/META-INF/rhq-plugin.xml b/modules/plugins/cassandra/src/main/resources/META-INF/rhq-plugin.xml index a1b3412..5bbebed 100644 --- a/modules/plugins/cassandra/src/main/resources/META-INF/rhq-plugin.xml +++ b/modules/plugins/cassandra/src/main/resources/META-INF/rhq-plugin.xml @@ -186,9 +186,11 @@ </parameters> </operation>
+ <metric property="Calculated.DataDiskUsedPercentage" displayName="Data File Disk Used Percentage" dataType="measurement" units="percentage" displayType="summary" description="Percentage of disk space used by Cassandra data files. The aggregate accross all the partitions will be reported if multiple data locations are specified. This is a calculated metric derived from system and Cassandra runtime information."/> + <metric property="Calculated.TotalDiskUsedPercentage" displayName="Total Disk Used Percentage" dataType="measurement" units="percentage" displayType="summary" description="Percentage of total disk space used. The metric acounts overall disk usage (including system files), not just disk space used by Cassandra. The aggregate accross all the partitions will be reported if multiple data locations are specified. This is a calculated metric derived from system and Cassandra runtime information."/> + <metric property="Calculated.FreeDiskToDataSizeRatio" displayName="Free Disk to Data Size Ratio" dataType="measurement" displayType="summary" description="Ratio of (Amount of Free Disk)/(Data File Size). A value below 1 is not recommended since a compaction or repair process could double the amount of disk space used by data files. The aggregate accross all the partitions will be reported if multiple data locations are specified. This is a calculated metric derived from system and Cassandra runtime information."/> + <metric property="CurrentGenerationNumber" dataType="trait" displayType="summary" description="Current generation number"/> - <metric property="Calculated.PartitionDiskUsedPercentage" displayName="Data File Disk Used Percentage" dataType="measurement" units="percentage" displayType="summary" description="Percentage of disk space used by the data files. If multiple data locations are specified then this will report - the average utilization accross all the partitions that contain data files."/> <metric property="ExceptionCount" measurementType="trendsup" dataType="measurement" displayType="summary" description="Exception Count"/> <metric property="Initialized" dataType="trait" displayType="summary" description="Initialized"/> <metric property="Joined" dataType="trait" displayType="summary" description="Joined"/>
commit 82d0075ed860dcc2f2aed57191e4fa64806caa97 Author: Jirka Kremser jkremser@redhat.com Date: Mon Jul 22 14:39:39 2013 +0200
Entities < and > in the plugin descriptor were interpreted as HTML in the ConfigurationEditor and the content between them wasn't there.
diff --git a/modules/plugins/cassandra/src/main/resources/META-INF/rhq-plugin.xml b/modules/plugins/cassandra/src/main/resources/META-INF/rhq-plugin.xml index e6bc1da..a1b3412 100644 --- a/modules/plugins/cassandra/src/main/resources/META-INF/rhq-plugin.xml +++ b/modules/plugins/cassandra/src/main/resources/META-INF/rhq-plugin.xml @@ -159,7 +159,7 @@ <operation name="takeSnapshot" description="Takes a snapshot of all keyspaces. A snapshot first flushes all in-memory writes to disk and then creates a hard link of each SSTable file for each keyspace. Note that a column family can have multiple - SSTables on disk. By default snapshots are stored in the <cassandra_data_dir>/<keyspace_name>/<column_family_name>/snapshots + SSTables on disk. By default snapshots are stored in the [cassandra_data_dir]/[keyspace_name]/[column_family_name]/snapshots directory. On Linux/UNIX systems cassandra_data_dir defaults to /var/lib/cassandra/data"> <parameters> <c:simple-property name="snapshotName" required="false" type="string" displayName="Snapshot Name"
commit 1a520b6c33dd2bde09d58ea7e51d04c83622f481 Author: John Sanda jsanda@redhat.com Date: Mon Jul 22 07:17:20 2013 -0400
comment out exporting test ear
Might be nice to enable exporting test ear with a system property where the location is also configurable.
diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/AbstractEJB3Test.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/AbstractEJB3Test.java index 2fd8624..8010c2b 100644 --- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/AbstractEJB3Test.java +++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/AbstractEJB3Test.java @@ -413,7 +413,8 @@ public abstract class AbstractEJB3Test extends Arquillian { //System.out.println("** The Deployment EAR: " + testEar.toString(true) + "\n");
// Save the test EAR to a zip file for inspection (set file explicitly) - exportZip(testEar, new File("/Users/jsanda/tmp/test-ear.ear")); + //String tmpDir = System.getProperty("java.io.tmpdir"); + //exportZip(testEar, new File(tmpDir, "test.ear"));
return testEar; }
commit df8669222aa3146b71b3252166af6bdd387806cd Author: Heiko W. Rupp hwr@redhat.com Date: Mon Jul 22 10:38:44 2013 +0200
Add more units.
diff --git a/modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/Units.java b/modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/Units.java index 0ae4011..150b9e0 100644 --- a/modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/Units.java +++ b/modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/Units.java @@ -1,6 +1,6 @@ /* * JBoss, Home of Professional Open Source. - * Copyright 2009, Red Hat, Inc. and/or its affiliates, and + * Copyright 2009-2013, Red Hat, Inc. and/or its affiliates, and * individual contributors as indicated by the @author tags. See the * copyright.txt file in the distribution for a full listing of * individual contributors. @@ -26,10 +26,18 @@ package org.rhq.helpers.pluginAnnotations.agent; * Metric Units. * * @author Galder Zamarreño + * @author Heiko W. Rupp + * See also org.rhq.core.domain.measurement.MeasurementUnits * @since 4.0 */ +@SuppressWarnings("unused") public enum Units { - NONE, MILLISECONDS, SECONDS, PERCENTAGE; + NONE, PERCENTAGE, + BYTES, KILOBYTES, MEGABYTES, GIGABYTES, TERABYTES, PETABYTES, + BITS, KILOBITS, MEGABITS, GIGABITS, TERABITS, PETABITS, + EPOCH_MILLISECONDS, EPOCH_SECONDS, + JIFFYS, NANOSECONDS, MICROSECONDS, MILLISECONDS, SECONDS, MINUTES, HOURS, DAYS, + CELSIUS, KELVIN, FAHRENHEIT;
@Override public String toString() {
commit 14edae37060b25ebcfed621deef0281c72b1cea2 Author: John Sanda jsanda@redhat.com Date: Sun Jul 21 21:22:43 2013 -0400
adding resource operation to update internode auth conf file
The operation updates the file on disk and then invokes the JMX operation to have the authenticator reload the configuration so that the changes can be picked up without having to restart the node.
diff --git a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java index 380da65..006dd26 100644 --- a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java +++ b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java @@ -26,7 +26,12 @@ package org.rhq.plugins.storage;
import java.io.File; +import java.io.FileWriter; +import java.io.IOException; +import java.io.StringReader; +import java.util.HashSet; import java.util.List; +import java.util.Set;
import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -36,6 +41,7 @@ import org.mc4j.ems.connection.bean.attribute.EmsAttribute; import org.mc4j.ems.connection.bean.operation.EmsOperation;
import org.rhq.core.domain.configuration.Configuration; +import org.rhq.core.domain.configuration.Property; import org.rhq.core.domain.configuration.PropertyList; import org.rhq.core.domain.configuration.PropertyMap; import org.rhq.core.domain.configuration.PropertySimple; @@ -43,7 +49,10 @@ import org.rhq.core.pluginapi.configuration.ConfigurationFacet; import org.rhq.core.pluginapi.configuration.ConfigurationUpdateReport; import org.rhq.core.pluginapi.operation.OperationFacet; import org.rhq.core.pluginapi.operation.OperationResult; +import org.rhq.core.util.StringUtil; import org.rhq.core.util.exception.ThrowableUtil; +import org.rhq.core.util.file.FileUtil; +import org.rhq.core.util.stream.StreamUtil; import org.rhq.plugins.cassandra.CassandraNodeComponent; import org.rhq.plugins.cassandra.util.KeyspaceService;
@@ -84,6 +93,8 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper return readRepair(); } else if (name.equals("updateConfiguration")) { return updateConfiguration(parameters); + } else if (name.equals("updateKnownNodes")) { + return updateKnownNodes(parameters); } else { return super.invokeOperation(name, parameters); } @@ -95,6 +106,73 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper return result; }
+ private OperationResult updateKnownNodes(Configuration params) { + OperationResult result = new OperationResult(); + + PropertyList propertyList = params.getList("ipAddresses"); + Set<String> ipAddresses = new HashSet<String>(); + + for (Property property : propertyList.getList()) { + PropertySimple propertySimple = (PropertySimple) property; + ipAddresses.add(propertySimple.getStringValue()); + } + + log.info("Updating known nodes to " + ipAddresses); + + File confDir = new File(getBasedir(), "conf"); + File authFile = new File(confDir, "rhq-storage-auth.conf"); + File authBackupFile = new File(confDir, "." + authFile.getName() + ".bak"); + + if (authBackupFile.exists()) { + if (log.isDebugEnabled()) { + log.debug(authBackupFile + " already exists. Deleting it now in preparation of creating new backup " + + "for " + authFile.getName()); + } + if (!authBackupFile.delete()) { + String msg = "Failed to delete backup file " + authBackupFile + ". The operation will abort " + + "since " + authFile + " cannot reliably be backed up before making changes. Please delete " + + authBackupFile + " manually and reschedule the operation once the file has been removed."; + log.error(msg); + result.setErrorMessage(msg); + + return result; + } + } + + try { + FileUtil.copyFile(authFile, authBackupFile); + } catch (IOException e) { + String msg = "Failed to backup " + authFile + " prior to making updates. The operation will abort due " + + "to unexpected error"; + log.error(msg, e); + result.setErrorMessage(msg + ": " + ThrowableUtil.getRootMessage(e)); + return result; + } + + try { + StreamUtil.copy(new StringReader(StringUtil.collectionToString(ipAddresses, "\n")), + new FileWriter(authFile), true); + } catch (IOException e) { + log.error("An error occurred while updating " + authFile, e); + try { + FileUtil.copyFile(authBackupFile, authFile); + } catch (IOException e1) { + log.error("Failed to revert backup of " + authFile, e1); + } + result.setErrorMessage("There was an unexpected error while updating " + authFile + ". Make sure that " + + "it matches " + authBackupFile + " and then reschedule the operation."); + return result; + } + + EmsBean authBean = getEmsConnection().getBean("org.rhq.cassandra.auth:type=RhqInternodeAuthenticator"); + EmsOperation emsOperation = authBean.getOperation("reloadConfiguration"); + emsOperation.invoke(); + + result.setSimpleResult("Successfully updated the set of known nodes."); + + return result; + } + private OperationResult nodeAdded(Configuration params) { boolean runRepair = params.getSimple("runRepair").getBooleanValue(); boolean updateSeedsList = params.getSimple("updateSeedsList").getBooleanValue(); diff --git a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml index 6ed31b7..8156d02 100644 --- a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml +++ b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml @@ -93,6 +93,14 @@ </results> </operation>
+ <operation name="updateKnownNodes"> + <parameters> + <c:list-property name="ipAddresses"> + <c:simple-property name="ipAddress"/> + </c:list-property> + </parameters> + </operation> + <operation name="prepareForUpgrade" description="Prepares the storage node for upgrade (this operation consists of following steps: 1) turning off the RPC server, 2) turning off the gossiper, 3) taking the snapshot (backuping the data), 4) invoking the drain operation"> <parameters> <c:simple-property name="snapshotName" required="false" type="string" displayName="Snapshot Name"
commit 38777a49e6b5c5f92c54c3f41511f5f691d2a548 Author: John Sanda jsanda@redhat.com Date: Sun Jul 21 21:21:45 2013 -0400
update cassandra deployer itest
diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/src/test/resources/expected.cassandra-jvm.properties b/modules/common/cassandra-ccm/cassandra-ccm-core/src/test/resources/expected.cassandra-jvm.properties index a90d23c..774a831 100644 --- a/modules/common/cassandra-ccm/cassandra-ccm-core/src/test/resources/expected.cassandra-jvm.properties +++ b/modules/common/cassandra-ccm/cassandra-ccm-core/src/test/resources/expected.cassandra-jvm.properties @@ -17,9 +17,9 @@ heap_dump_dir=""
thread_stack_size=-Xss180k
-java_agent="" # Enable jamm when running on Java 6 patch version 23 or higher. #java_agent="-javaagent:$CASSANDRA_HOME/lib/jamm-0.2.5.jar" +java_agent=-javaagent:$CASSANDRA_HOME/lib/jamm-0.2.5.jar
# GC tuning options # diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/src/test/resources/expected.cassandra.yaml b/modules/common/cassandra-ccm/cassandra-ccm-core/src/test/resources/expected.cassandra.yaml index 15d08f2..b9e490a 100644 --- a/modules/common/cassandra-ccm/cassandra-ccm-core/src/test/resources/expected.cassandra.yaml +++ b/modules/common/cassandra-ccm/cassandra-ccm-core/src/test/resources/expected.cassandra.yaml @@ -323,7 +323,7 @@ listen_address: localhost
# Internode authentication backend, implementing IInternodeAuthenticator; # used to allow/disallow connections from peer nodes. -# internode_authenticator: org.apache.cassandra.auth.AllowAllInternodeAuthenticator +internode_authenticator: org.rhq.cassandra.auth.RhqInternodeAuthenticator
# Whether to start the native transport server. # Currently, only the thrift server is started by default because the native
commit 1750170844f6a68b11256ba001fe6cffb53439f6 Author: John Sanda jsanda@redhat.com Date: Sun Jul 21 21:14:48 2013 -0400
temporarily disable the quartz job that is kicked off when a new node is added
I need to disable the job while working on the internode authentication stuff. If the maintenance operations run on the nodes, the cluster can actually get into a bad state because the nodes' authentication conf files have not been updated which means the new node is not actually part of the cluster.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index 3f1ec69..c77e229 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -170,7 +170,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
entityManager.persist(storageNode);
- scheduleQuartzJob(storageNodes.size()); +// scheduleQuartzJob(storageNodes.size()); } } }
commit fe6da9b5511a3b4c97c2421079db04020c33c27f Author: John Sanda jsanda@redhat.com Date: Sun Jul 21 21:11:43 2013 -0400
pre-configure internode auth conf file
From testing I have done thus far it appears that a storage node should have its own IP address included in the internode authentication config file. The storage installer updates the auth file to include the node's address as well as and seeds.
diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/CassandraClusterManager.java b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/CassandraClusterManager.java index edf1430..c8bb2ef 100644 --- a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/CassandraClusterManager.java +++ b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/CassandraClusterManager.java @@ -34,6 +34,7 @@ import java.io.FileWriter; import java.io.IOException; import java.io.StringReader; import java.io.StringWriter; +import java.net.InetAddress; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; @@ -109,6 +110,13 @@ public class CassandraClusterManager {
List<StorageNode> nodes = new ArrayList<StorageNode>(deploymentOptions.getNumNodes()); String seeds = collectionToString(calculateLocalIPAddresses(deploymentOptions.getNumNodes())); + Set<InetAddress> ipAddresses = null; + + try { + ipAddresses = getClusterIPAddresses(); + } catch (IOException e) { + throw new RuntimeException("Failed to get cluster IP addresses", e); + }
for (int i = 0; i < deploymentOptions.getNumNodes(); ++i) { File basedir = new File(deploymentOptions.getClusterDir(), "node" + i); @@ -141,7 +149,7 @@ public class CassandraClusterManager { storageNode.setCqlPort(nodeOptions.getNativeTransportPort()); nodes.add(storageNode);
- updateStorageAuthConf(basedir); + deployer.updateStorageAuthConf(ipAddresses);
installedNodeDirs.add(basedir); } catch (Exception e) { @@ -193,6 +201,15 @@ public class CassandraClusterManager { return i <= seedsArray.length ? seedsArray[i - 1] : ("127.0.0." + i); }
+ private Set<InetAddress> getClusterIPAddresses() throws IOException { + Set<InetAddress> ipAddresses = new HashSet<InetAddress>(); + for (String address : calculateLocalIPAddresses(deploymentOptions.getNumNodes())) { + ipAddresses.add(InetAddress.getByName(address)); + } + + return ipAddresses; + } + private List<StorageNode> calculateNodes() { List<StorageNode> nodes = new ArrayList<StorageNode>(deploymentOptions.getNumNodes()); for (int i = 0; i < deploymentOptions.getNumNodes(); ++i) { diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/Deployer.java b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/Deployer.java index b01ebe9..6547043 100644 --- a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/Deployer.java +++ b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/Deployer.java @@ -30,13 +30,18 @@ import java.io.FileReader; import java.io.FileWriter; import java.io.IOException; import java.io.InputStream; +import java.io.StringReader; +import java.net.InetAddress; +import java.util.HashSet; import java.util.Map; import java.util.Properties; +import java.util.Set;
import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory;
import org.rhq.core.util.PropertiesFileUpdate; +import org.rhq.core.util.StringUtil; import org.rhq.core.util.TokenReplacingReader; import org.rhq.core.util.ZipUtil; import org.rhq.core.util.stream.StreamUtil; @@ -171,4 +176,22 @@ public class Deployer { } }
+ public void updateStorageAuthConf(Set<InetAddress> ipAddresses) { + File confDir = new File(deploymentOptions.getBasedir(), "conf"); + File authFile = new File(confDir, "rhq-storage-auth.conf"); + + Set<String> addresses = new HashSet<String>(ipAddresses.size()); + for (InetAddress ipAddress : ipAddresses) { + addresses.add(ipAddress.getHostAddress()); + } + + try { + authFile.delete(); + StreamUtil.copy(new StringReader(StringUtil.collectionToString(addresses, "\n")), + new FileWriter(authFile), true); + } catch (IOException e) { + throw new RuntimeException("Failed to update " + authFile); + } + } + } diff --git a/modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java b/modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java index 5c5ac4e..113d66b 100644 --- a/modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java +++ b/modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java @@ -34,10 +34,12 @@ import java.net.InetAddress; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Properties; +import java.util.Set;
import javax.management.MBeanServerConnection; import javax.management.ObjectName; @@ -386,6 +388,7 @@ public class StorageInstaller { deployer.unzipDistro(); deployer.applyConfigChanges(); deployer.updateFilePerms(); + deployer.updateStorageAuthConf(getAddresses(hostname, seeds));
log.info("Finished installing RHQ Storage Node.");
@@ -499,6 +502,19 @@ public class StorageInstaller { return dir; }
+ private Set<InetAddress> getAddresses(String hostname, String seeds) throws IOException { + Set<InetAddress> addresses = new HashSet<InetAddress>(); + addresses.add(InetAddress.getByName(hostname)); + + if (!StringUtil.isEmpty(seeds)) { + for (String seed : seeds.split(",")) { + addresses.add(InetAddress.getByName(seed)); + } + } + + return addresses; + } + private PropertiesFileUpdate getServerProperties() { String sysprop = System.getProperty("rhq.server.properties-file"); if (sysprop == null) {
commit b63c38cb0c19cd062d14c5e025164021f1e946cc Author: John Sanda jsanda@redhat.com Date: Sat Jul 20 09:53:10 2013 -0400
turn on internode authentication
This enabled internode authentication in cassandra.yaml and the cassandra-auth module is packaged with our Cassandra distro. The authentication config file is automatically updated for integrated tests. There is still work to be done for multi-node dev-container (and production) deployments.
diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/pom.xml b/modules/common/cassandra-ccm/cassandra-ccm-core/pom.xml index 2bde394..c12c567 100644 --- a/modules/common/cassandra-ccm/cassandra-ccm-core/pom.xml +++ b/modules/common/cassandra-ccm/cassandra-ccm-core/pom.xml @@ -113,6 +113,11 @@ <artifactId>snappy-java</artifactId> <version>${cassandra.snappy.version}</version> </dependency> + <dependency> + <groupId>org.rhq</groupId> + <artifactId>rhq-cassandra-auth</artifactId> + <version>${project.version}</version> + </dependency> </dependencies> <executions> <execution> @@ -166,12 +171,15 @@ <delete file="${cassandra.dir}/lib/snappy-java-1.0.4.1.jar"/> <copy file="${settings.localRepository}/org/xerial/snappy/snappy-java/${cassandra.snappy.version}/snappy-java-${cassandra.snappy.version}.jar" todir="${cassandra.dir}/lib"/> + <copy file="${settings.localRepository}/org/rhq/rhq-cassandra-auth/${project.version}/rhq-cassandra-auth-${project.version}.jar" + todir="${cassandra.dir}/lib"/> <move file="${project.build.outputDirectory}/cassandra/conf" todir="${cassandra.dir}"/> <delete file="${cassandra.dir}/bin/cassandra"/> <move file="${project.build.outputDirectory}/cassandra/bin/cassandra" todir="${cassandra.dir}/bin"/> <delete dir="${project.build.outputDirectory}/cassandra"/> <delete dir="${cassandra.dir}/javadoc"/> <delete file="${cassandra.dir}/conf/cassandra-env.sh"/> + <touch file="${cassandra.dir}/conf/rhq-storage-auth.conf"/>
<zip basedir="${cassandra.dir}" destfile="${cassandra.distro.zip}"/> <delete dir="${cassandra.dir}"/> diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/cassandra-jvm.properties b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/cassandra-jvm.properties index 612c65e..1faee9d 100644 --- a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/cassandra-jvm.properties +++ b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/cassandra-jvm.properties @@ -17,9 +17,9 @@ heap_dump_dir=""
thread_stack_size="-Xss180k"
-java_agent="" # Enable jamm when running on Java 6 patch version 23 or higher. #java_agent="-javaagent:$CASSANDRA_HOME/lib/jamm-0.2.5.jar" +java_agent=
# GC tuning options # diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/rhq.cassandra.yaml b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/rhq.cassandra.yaml index 298db9d..da09e92 100644 --- a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/rhq.cassandra.yaml +++ b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/rhq.cassandra.yaml @@ -323,7 +323,7 @@ listen_address: ${rhq.cassandra.listen.address}
# Internode authentication backend, implementing IInternodeAuthenticator; # used to allow/disallow connections from peer nodes. -# internode_authenticator: org.apache.cassandra.auth.AllowAllInternodeAuthenticator +internode_authenticator: org.rhq.cassandra.auth.RhqInternodeAuthenticator
# Whether to start the native transport server. # Currently, only the thrift server is started by default because the native diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/CassandraClusterManager.java b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/CassandraClusterManager.java index 338ef3a..edf1430 100644 --- a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/CassandraClusterManager.java +++ b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/CassandraClusterManager.java @@ -30,7 +30,9 @@ import static org.rhq.core.util.StringUtil.collectionToString; import java.io.ByteArrayInputStream; import java.io.File; import java.io.FileReader; +import java.io.FileWriter; import java.io.IOException; +import java.io.StringReader; import java.io.StringWriter; import java.util.ArrayList; import java.util.Arrays; @@ -50,6 +52,7 @@ import org.rhq.core.system.ProcessExecution; import org.rhq.core.system.ProcessExecutionResults; import org.rhq.core.system.SystemInfo; import org.rhq.core.system.SystemInfoFactory; +import org.rhq.core.util.StringUtil; import org.rhq.core.util.file.FileUtil; import org.rhq.core.util.stream.StreamUtil;
@@ -138,6 +141,8 @@ public class CassandraClusterManager { storageNode.setCqlPort(nodeOptions.getNativeTransportPort()); nodes.add(storageNode);
+ updateStorageAuthConf(basedir); + installedNodeDirs.add(basedir); } catch (Exception e) { log.error("Failed to install node at " + basedir); @@ -152,6 +157,21 @@ public class CassandraClusterManager { return nodes; }
+ private void updateStorageAuthConf(File basedir) { + File confDir = new File(basedir, "conf"); + File authFile = new File(confDir, "rhq-storage-auth.conf"); + authFile.delete(); + + Set<String> addresses = calculateLocalIPAddresses(deploymentOptions.getNumNodes()); + + try { + StreamUtil.copy(new StringReader(StringUtil.collectionToString(addresses, "\n")), + new FileWriter(authFile), true); + } catch (IOException e) { + throw new RuntimeException("Failed to update " + authFile); + } + } + private Set<String> calculateLocalIPAddresses(int numNodes) { Set<String> addresses = new HashSet<String>();
diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/Deployer.java b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/Deployer.java index b222c82..b01ebe9 100644 --- a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/Deployer.java +++ b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/Deployer.java @@ -160,11 +160,6 @@ public class Deployer { return Integer.parseInt(javaVersion.substring(startIndex + 1, javaVersion.length())); }
- private boolean isLaterThanJava1_6() { - String javaVersion = System.getProperty("java.version"); - return javaVersion.compareTo("1.6.0") > 0; - } - public void updateFilePerms() { File deployDir = new File(deploymentOptions.getBasedir()); File binDir = new File(deployDir, "bin");
commit 890a378fc11b04f3e7af64babfbfd1a7e306b7cd Author: John Sanda jsanda@redhat.com Date: Sat Jul 20 07:56:14 2013 -0400
initial commit for cassandra-auth module
This is the first cut at our IInternodeAuthenticator. It loads IP addresses from a config file. There is a JMX operation to reload the config file that can be used after the config file is updated.
diff --git a/modules/common/cassandra-auth/pom.xml b/modules/common/cassandra-auth/pom.xml new file mode 100644 index 0000000..c53c752 --- /dev/null +++ b/modules/common/cassandra-auth/pom.xml @@ -0,0 +1,27 @@ +<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd%22%3E + + <modelVersion>4.0.0</modelVersion> + + <parent> + <groupId>org.rhq</groupId> + <artifactId>rhq-common-parent</artifactId> + <version>4.9.0-SNAPSHOT</version> + </parent> + + <artifactId>rhq-cassandra-auth</artifactId> + <name>RHQ Cassandra Authentication</name> + + <properties> + <moduleName>org.rhq.${project.artifactId}</moduleName> + </properties> + + <dependencies> + <dependency> + <groupId>org.apache.cassandra</groupId> + <artifactId>cassandra-all</artifactId> + <version>${cassandra.version}</version> + <scope>provided</scope> + </dependency> + </dependencies> +</project> diff --git a/modules/common/cassandra-auth/src/main/java/org/rhq/cassandra/auth/RhqInternodeAuthenticator.java b/modules/common/cassandra-auth/src/main/java/org/rhq/cassandra/auth/RhqInternodeAuthenticator.java new file mode 100644 index 0000000..56980f1 --- /dev/null +++ b/modules/common/cassandra-auth/src/main/java/org/rhq/cassandra/auth/RhqInternodeAuthenticator.java @@ -0,0 +1,78 @@ +package org.rhq.cassandra.auth; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileReader; +import java.io.IOException; +import java.lang.management.ManagementFactory; +import java.net.InetAddress; +import java.net.URISyntaxException; +import java.util.HashSet; +import java.util.Set; + +import javax.management.MBeanServer; +import javax.management.ObjectName; + +import org.apache.cassandra.auth.IInternodeAuthenticator; +import org.apache.cassandra.exceptions.ConfigurationException; + +/** + * @author John Sanda + */ +public class RhqInternodeAuthenticator implements IInternodeAuthenticator, RhqInternodeAuthenticatorMBean { + + private final String MBEAN_NAME = "org.rhq.cassandra.auth:type=" + RhqInternodeAuthenticator.class.getSimpleName(); + + private final String CONF_FILE = "rhq-storage-auth.conf"; + + private File authConfFile; + + private Set<InetAddress> addresses = new HashSet<InetAddress>(); + + public RhqInternodeAuthenticator() { + try { + authConfFile = new File(getClass().getResource("/" + CONF_FILE).toURI()); + if (!authConfFile.exists()) { + throw new RuntimeException(authConfFile + " does not exist"); + } + + reloadConfiguration(); + } catch (URISyntaxException e) { + throw new RuntimeException("Failed to load " + CONF_FILE, e); + } + + try { + MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); + ObjectName nameObj = new ObjectName(MBEAN_NAME); + mbs.registerMBean(this, nameObj); + } catch (Exception e) { + throw new RuntimeException("Failed to register MBean " + MBEAN_NAME, e); + } + } + + @Override + public boolean authenticate(InetAddress address, int port) { + return addresses.contains(address); + } + + @Override + public void reloadConfiguration() { + try { + addresses.clear(); + + BufferedReader reader = new BufferedReader(new FileReader(authConfFile)); + String line = reader.readLine(); + + while (line != null) { + addresses.add(InetAddress.getByName(line)); + line = reader.readLine(); + } + } catch (IOException e) { + throw new RuntimeException("Failed to load addresses from " + authConfFile, e); + } + } + + @Override + public void validateConfiguration() throws ConfigurationException { + } +} diff --git a/modules/common/cassandra-auth/src/main/java/org/rhq/cassandra/auth/RhqInternodeAuthenticatorMBean.java b/modules/common/cassandra-auth/src/main/java/org/rhq/cassandra/auth/RhqInternodeAuthenticatorMBean.java new file mode 100644 index 0000000..5e20389 --- /dev/null +++ b/modules/common/cassandra-auth/src/main/java/org/rhq/cassandra/auth/RhqInternodeAuthenticatorMBean.java @@ -0,0 +1,10 @@ +package org.rhq.cassandra.auth; + +/** + * @author John Sanda + */ +public interface RhqInternodeAuthenticatorMBean { + + public void reloadConfiguration(); + +} diff --git a/modules/common/pom.xml b/modules/common/pom.xml index bcbf862..7d12500 100644 --- a/modules/common/pom.xml +++ b/modules/common/pom.xml @@ -30,6 +30,7 @@ <module>ant-bundle</module> <module>drift</module> <module>jboss-as-dmr-client</module> + <module>cassandra-auth</module> <module>cassandra-util</module> <module>cassandra-jmx</module> <module>cassandra-schema</module>
commit 8cf740355dfab9832b904984aaba4e52bf759951 Author: John Sanda jsanda@redhat.com Date: Fri Jul 19 22:18:23 2013 -0400
add logic to enable jamm java agent, which mirrors logic in cassandra-env.sh
diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/Deployer.java b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/Deployer.java index 1e31e14..b222c82 100644 --- a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/Deployer.java +++ b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/Deployer.java @@ -125,6 +125,13 @@ public class Deployer { properties.setProperty("thread_stack_size", "-Xss" + deploymentOptions.getStackSize()); properties.setProperty("jmx_port", deploymentOptions.getJmxPort().toString());
+ String javaVersion = System.getProperty("java.version"); + // The check here is taken right from cassandra-env.sh + if ((!isOpenJDK() || javaVersion.compareTo("1.6.0") > 0) || + (javaVersion.equals("1.6.0") && getJavaPatchVersion() > 23)) { + properties.put("java_agent", "-javaagent:$CASSANDRA_HOME/lib/jamm-0.2.5.jar"); + } + propertiesUpdater.update(properties); } catch (IOException e) { log.error("An error occurred while updating " + jvmPropsFile, e); @@ -132,6 +139,32 @@ public class Deployer { } }
+ private boolean isOpenJDK() { + String javaVMName = System.getProperty("java.vm.name"); + return javaVMName.startsWith("OpenJDK"); + } + + private boolean isJava1_6() { + String javaVersion = System.getProperty("java.version"); + return javaVersion.startsWith("1.6.0"); + } + + private int getJavaPatchVersion() { + String javaVersion = System.getProperty("java.version"); + int startIndex = javaVersion.indexOf('_'); + + if (startIndex == -1) { + return 0; + } + + return Integer.parseInt(javaVersion.substring(startIndex + 1, javaVersion.length())); + } + + private boolean isLaterThanJava1_6() { + String javaVersion = System.getProperty("java.version"); + return javaVersion.compareTo("1.6.0") > 0; + } + public void updateFilePerms() { File deployDir = new File(deploymentOptions.getBasedir()); File binDir = new File(deployDir, "bin");
commit 0517e453381aec1d30b6aaa62bec63a322dfa350 Author: Larry O'Leary loleary@redhat.com Date: Fri Jul 19 19:05:25 2013 -0500
Updated TestLdapSettings to include changes/fixes introduced from the following BZs:
Bug 707047 - LDAP Group Member search doesn't escape special characters Bug 981015 - LDAP auth fails if user's DN contains a backslash
Also includes a fix in where the user auth test would fail to switch back to the directory bind account and result in ACL failures on the following group lookup steps.
diff --git a/etc/dev-utils/TestLdapSettings.java b/etc/dev-utils/TestLdapSettings.java index 4df79dc..2e29b3d 100644 --- a/etc/dev-utils/TestLdapSettings.java +++ b/etc/dev-utils/TestLdapSettings.java @@ -10,12 +10,14 @@ import java.awt.event.WindowAdapter; import java.awt.event.WindowEvent; import java.io.PrintWriter; import java.io.StringWriter; +import java.io.UnsupportedEncodingException; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Properties; import java.util.Set;
+import javax.naming.CompositeName; import javax.naming.Context; import javax.naming.NamingEnumeration; import javax.naming.directory.Attribute; @@ -257,7 +259,17 @@ public class TestLdapSettings extends JFrame { SearchResult si = (SearchResult) answer.next();
// Construct the UserDN - userDN = si.getName() + "," + baseDNs[x]; + userDN = null; + + try { + userDN = si.getNameInNamespace(); + } catch (UnsupportedOperationException use) { + userDN = new CompositeName(si.getName()).get(0); + if (si.isRelative()) { + userDN += "," + baseDNs[x]; + } + } + msg = "STEP-2:PASS: The test user '" + testUserName + "' was succesfully located, and the following userDN will be used in authorization check:\n"; @@ -288,6 +300,21 @@ public class TestLdapSettings extends JFrame { log(msg); proceed=false; } + try { + ctx.addToEnvironment(Context.SECURITY_PRINCIPAL, bindUserName); + ctx.addToEnvironment(Context.SECURITY_CREDENTIALS, bindPassword); + ctx.addToEnvironment(Context.SECURITY_AUTHENTICATION, "simple"); + ctx.reconnect(null); + } catch (Exception ex) { + msg = "STEP-2:WARN: There was an error when switching back to the bind user '" + + bindUserName + "'\n"; + msg += ex.getMessage(); + if(enableVerboseDebugging.isSelected()){ + msg = appendStacktraceToMsg(msg, ex); + } + log(msg); + } + } // with authentication completed, now check authorization. // validate filter components to list all available groups @@ -433,7 +460,7 @@ public class TestLdapSettings extends JFrame { Set<Map<String, String>> ret = new HashSet<Map<String, String>>(); String filter = String.format("(&(%s)(%s=%s))", groupSearchFilter, groupMemberFilter, - userDN); + LDAPStringUtil.encodeForFilter(userDN)); msg = "STEP-4:TESTING: about to do ldap search with filter \n'" + filter + "'\n to locate groups that test user IS authorized to access."; @@ -638,3 +665,74 @@ public class TestLdapSettings extends JFrame { return constraints; } } + +class LDAPStringUtil { + + /** + * <p>Encode a string so that it can be used in an LDAP search filter.</p> + * + * <p>The following table shows the characters that are encoded and their + * encoded version.</p> + * + * <table> + * <tr><th align="center">Character</th><th>Encoded As</th></tr> + * <tr><td align="center">*</td><td>\2a</td></tr> + * <tr><td align="center">(</td><td>\28</td></tr> + * <tr><td align="center">)</td><td>\29</td></tr> + * <tr><td align="center"></td><td>\5c</td></tr> + * <tr><td align="center"><code>null</code></td><td>\00</td></tr> + * </table> + * + * <p>In addition to encoding the above characters, any non-ASCII character + * (any character with a hex value greater then <code>0x7f</code>) is also + * encoded and rewritten as a UTF-8 character or sequence of characters in + * hex notation.</p> + * + * @param filterString a string that is to be encoded + * @return the encoded version of <code>filterString</code> suitable for use + * in a LDAP search filter + * @see <a href="http://tools.ietf.org/html/rfc4515">RFC 4515</a> + */ + public static String encodeForFilter(final String filterString) { + if (filterString != null && filterString.length() > 0) { + StringBuilder encString = new StringBuilder(filterString.length()); + for (int i = 0; i < filterString.length(); i++) { + char ch = filterString.charAt(i); + switch (ch) { + case '*': // encode a wildcard * character + encString.append("\2a"); + break; + case '(': // encode a open parenthesis ( character + encString.append("\28"); + break; + case ')': // encode a close parenthesis ) character + encString.append("\29"); + break; + case '\': // encode a backslash \ character + encString.append("\5c"); + break; + case '\u0000': // encode a null character + encString.append("\00"); + break; + default: + if (ch <= 0x7f) { // an ASCII character + encString.append(ch); + } else if (ch >= 0x80) { // encode to UTF-8 + try { + byte[] utf8bytes = String.valueOf(ch).getBytes("UTF8"); + for (byte b : utf8bytes) { + encString.append(String.format("\%02x", b)); + } + } catch (UnsupportedEncodingException e) { + // ignore + } + } + } + } + return encString.toString(); + } + return filterString; + } + +} +
commit 5d3b55ed04d5da4eab12704c8391872cd93421ad Author: John Sanda jsanda@redhat.com Date: Fri Jul 19 15:35:34 2013 -0400
forgot to include new test resources in previous commit
diff --git a/modules/common/cassandra-installer/src/test/resources/rhq48/storage/conf/cassandra-env.sh b/modules/common/cassandra-installer/src/test/resources/rhq48/storage/conf/cassandra-env.sh new file mode 100644 index 0000000..cd415b3 --- /dev/null +++ b/modules/common/cassandra-installer/src/test/resources/rhq48/storage/conf/cassandra-env.sh @@ -0,0 +1,247 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +calculate_heap_sizes() +{ + case "`uname`" in + Linux) + system_memory_in_mb=`free -m | awk '/Mem:/ {print $2}'` + system_cpu_cores=`egrep -c 'processor([[:space:]]+):.*' /proc/cpuinfo` + ;; + FreeBSD) + system_memory_in_bytes=`sysctl hw.physmem | awk '{print $2}'` + system_memory_in_mb=`expr $system_memory_in_bytes / 1024 / 1024` + system_cpu_cores=`sysctl hw.ncpu | awk '{print $2}'` + ;; + SunOS) + system_memory_in_mb=`prtconf | awk '/Memory size:/ {print $3}'` + system_cpu_cores=`psrinfo | wc -l` + ;; + Darwin) + system_memory_in_bytes=`sysctl hw.memsize | awk '{print $2}'` + system_memory_in_mb=`expr $system_memory_in_bytes / 1024 / 1024` + system_cpu_cores=`sysctl hw.ncpu | awk '{print $2}'` + ;; + *) + # assume reasonable defaults for e.g. a modern desktop or + # cheap server + system_memory_in_mb="2048" + system_cpu_cores="2" + ;; + esac + + # some systems like the raspberry pi don't report cores, use at least 1 + if [ "$system_cpu_cores" -lt "1" ] + then + system_cpu_cores="1" + fi + + # set max heap size based on the following + # max(min(1/2 ram, 1024MB), min(1/4 ram, 8GB)) + # calculate 1/2 ram and cap to 1024MB + # calculate 1/4 ram and cap to 8192MB + # pick the max + half_system_memory_in_mb=`expr $system_memory_in_mb / 2` + quarter_system_memory_in_mb=`expr $half_system_memory_in_mb / 2` + if [ "$half_system_memory_in_mb" -gt "1024" ] + then + half_system_memory_in_mb="1024" + fi + if [ "$quarter_system_memory_in_mb" -gt "8192" ] + then + quarter_system_memory_in_mb="8192" + fi + if [ "$half_system_memory_in_mb" -gt "$quarter_system_memory_in_mb" ] + then + max_heap_size_in_mb="$half_system_memory_in_mb" + else + max_heap_size_in_mb="$quarter_system_memory_in_mb" + fi + MAX_HEAP_SIZE="${max_heap_size_in_mb}M" + + # Young gen: min(max_sensible_per_modern_cpu_core * num_cores, 1/4 * heap size) + max_sensible_yg_per_core_in_mb="100" + max_sensible_yg_in_mb=`expr $max_sensible_yg_per_core_in_mb "*" $system_cpu_cores` + + desired_yg_in_mb=`expr $max_heap_size_in_mb / 4` + + if [ "$desired_yg_in_mb" -gt "$max_sensible_yg_in_mb" ] + then + HEAP_NEWSIZE="${max_sensible_yg_in_mb}M" + else + HEAP_NEWSIZE="${desired_yg_in_mb}M" + fi +} + +# Determine the sort of JVM we'll be running on. + +java_ver_output=`"${JAVA:-java}" -version 2>&1` + +jvmver=`echo "$java_ver_output" | awk -F'"' 'NR==1 {print $2}'` +JVM_VERSION=${jvmver%_*} +JVM_PATCH_VERSION=${jvmver#*_} + +jvm=`echo "$java_ver_output" | awk 'NR==2 {print $1}'` +case "$jvm" in + OpenJDK) + JVM_VENDOR=OpenJDK + # this will be "64-Bit" or "32-Bit" + JVM_ARCH=`echo "$java_ver_output" | awk 'NR==3 {print $2}'` + ;; + "Java(TM)") + JVM_VENDOR=Oracle + # this will be "64-Bit" or "32-Bit" + JVM_ARCH=`echo "$java_ver_output" | awk 'NR==3 {print $3}'` + ;; + *) + # Help fill in other JVM values + JVM_VENDOR=other + JVM_ARCH=unknown + ;; +esac + + +# Override these to set the amount of memory to allocate to the JVM at +# start-up. For production use you may wish to adjust this for your +# environment. MAX_HEAP_SIZE is the total amount of memory dedicated +# to the Java heap; HEAP_NEWSIZE refers to the size of the young +# generation. Both MAX_HEAP_SIZE and HEAP_NEWSIZE should be either set +# or not (if you set one, set the other). +# +# The main trade-off for the young generation is that the larger it +# is, the longer GC pause times will be. The shorter it is, the more +# expensive GC will be (usually). +# +# The example HEAP_NEWSIZE assumes a modern 8-core+ machine for decent pause +# times. If in doubt, and if you do not particularly want to tweak, go with +# 100 MB per physical CPU core. + +#MAX_HEAP_SIZE="4G" +#HEAP_NEWSIZE="800M" + +if [ "x$MAX_HEAP_SIZE" = "x" ] && [ "x$HEAP_NEWSIZE" = "x" ]; then + calculate_heap_sizes +else + if [ "x$MAX_HEAP_SIZE" = "x" ] || [ "x$HEAP_NEWSIZE" = "x" ]; then + echo "please set or unset MAX_HEAP_SIZE and HEAP_NEWSIZE in pairs (see cassandra-env.sh)" + exit 1 + fi +fi + +# Specifies the default port over which Cassandra will be available for +# JMX connections. +JMX_PORT="7399" + + +# Here we create the arguments that will get passed to the jvm when +# starting cassandra. + +# enable assertions. disabling this in production will give a modest +# performance benefit (around 5%). +JVM_OPTS="$JVM_OPTS -ea" + +# add the jamm javaagent +if [ "$JVM_VENDOR" != "OpenJDK" -o "$JVM_VERSION" > "1.6.0" ] \ + || [ "$JVM_VERSION" = "1.6.0" -a "$JVM_PATCH_VERSION" -ge 23 ] +then + JVM_OPTS="$JVM_OPTS -javaagent:$CASSANDRA_HOME/lib/jamm-0.2.5.jar" +fi + +# enable thread priorities, primarily so we can give periodic tasks +# a lower priority to avoid interfering with client workload +JVM_OPTS="$JVM_OPTS -XX:+UseThreadPriorities" +# allows lowering thread priority without being root. see +# http://tech.stolsvik.com/2010/01/linux-java-thread-priorities-workaround.htm... +JVM_OPTS="$JVM_OPTS -XX:ThreadPriorityPolicy=42" + +# min and max heap sizes should be set to the same value to avoid +# stop-the-world GC pauses during resize, and so that we can lock the +# heap in memory on startup to prevent any of it from being swapped +# out. +JVM_OPTS="$JVM_OPTS -Xms512M" +JVM_OPTS="$JVM_OPTS -Xmx512M" +JVM_OPTS="$JVM_OPTS -Xmn128M" +JVM_OPTS="$JVM_OPTS -XX:+HeapDumpOnOutOfMemoryError" + +# set jvm HeapDumpPath with CASSANDRA_HEAPDUMP_DIR +if [ "x$CASSANDRA_HEAPDUMP_DIR" != "x" ]; then + JVM_OPTS="$JVM_OPTS -XX:HeapDumpPath=$CASSANDRA_HEAPDUMP_DIR/cassandra-`date +%s`-pid$$.hprof" +fi + + +startswith() { [ "${1#$2}" != "$1" ]; } + +if [ "`uname`" = "Linux" ] ; then + # reduce the per-thread stack size to minimize the impact of Thrift + # thread-per-client. (Best practice is for client connections to + # be pooled anyway.) Only do so on Linux where it is known to be + # supported. + # u34 and greater need 180k + JVM_OPTS="$JVM_OPTS -Xss180k" +fi +echo "xss = $JVM_OPTS" + +# GC tuning options +JVM_OPTS="$JVM_OPTS -XX:+UseParNewGC" +JVM_OPTS="$JVM_OPTS -XX:+UseConcMarkSweepGC" +JVM_OPTS="$JVM_OPTS -XX:+CMSParallelRemarkEnabled" +JVM_OPTS="$JVM_OPTS -XX:SurvivorRatio=8" +JVM_OPTS="$JVM_OPTS -XX:MaxTenuringThreshold=1" +JVM_OPTS="$JVM_OPTS -XX:CMSInitiatingOccupancyFraction=75" +JVM_OPTS="$JVM_OPTS -XX:+UseCMSInitiatingOccupancyOnly" +JVM_OPTS="$JVM_OPTS -XX:+UseTLAB" +# note: bash evals '1.7.x' as > '1.7' so this is really a >= 1.7 jvm check +if [ "$JVM_VERSION" > "1.7" ] ; then + JVM_OPTS="$JVM_OPTS -XX:+UseCondCardMark" +fi + +# GC logging options -- uncomment to enable +# JVM_OPTS="$JVM_OPTS -XX:+PrintGCDetails" +# JVM_OPTS="$JVM_OPTS -XX:+PrintGCDateStamps" +# JVM_OPTS="$JVM_OPTS -XX:+PrintHeapAtGC" +# JVM_OPTS="$JVM_OPTS -XX:+PrintTenuringDistribution" +# JVM_OPTS="$JVM_OPTS -XX:+PrintGCApplicationStoppedTime" +# JVM_OPTS="$JVM_OPTS -XX:+PrintPromotionFailure" +# JVM_OPTS="$JVM_OPTS -XX:PrintFLSStatistics=1" +# JVM_OPTS="$JVM_OPTS -Xloggc:/var/log/cassandra/gc-`date +%s`.log" +# If you are using JDK 6u34 7u2 or later you can enable GC log rotation +# don't stick the date in the log name if rotation is on. +# JVM_OPTS="$JVM_OPTS -Xloggc:/var/log/cassandra/gc.log" +# JVM_OPTS="$JVM_OPTS -XX:+UseGCLogFileRotation" +# JVM_OPTS="$JVM_OPTS -XX:NumberOfGCLogFiles=10" +# JVM_OPTS="$JVM_OPTS -XX:GCLogFileSize=10M" + +# uncomment to have Cassandra JVM listen for remote debuggers/profilers on port 1414 +# JVM_OPTS="$JVM_OPTS -Xdebug -Xnoagent -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=1414" + +# Prefer binding to IPv4 network intefaces (when net.ipv6.bindv6only=1). See +# http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6342561 (short version: +# comment out this entry to enable IPv6 support). +JVM_OPTS="$JVM_OPTS -Djava.net.preferIPv4Stack=true" + +# jmx: metrics and administration interface +# +# add this if you're having trouble connecting: +# JVM_OPTS="$JVM_OPTS -Djava.rmi.server.hostname=<public name>" +# +# see +# https://blogs.oracle.com/jmxetc/entry/troubleshooting_connection_problems_in... +# for more on configuring JMX through firewalls, etc. (Short version: +# get it working with no firewall first.) +JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT" +JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.ssl=false" +JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.authenticate=false" +JVM_OPTS="$JVM_OPTS $JVM_EXTRA_OPTS" diff --git a/modules/common/cassandra-installer/src/test/resources/rhq48/storage/conf/cassandra.yaml b/modules/common/cassandra-installer/src/test/resources/rhq48/storage/conf/cassandra.yaml new file mode 100644 index 0000000..8c9c68c --- /dev/null +++ b/modules/common/cassandra-installer/src/test/resources/rhq48/storage/conf/cassandra.yaml @@ -0,0 +1,690 @@ +# Cassandra storage config YAML + +# NOTE: +# See http://wiki.apache.org/cassandra/StorageConfiguration for +# full explanations of configuration directives +# /NOTE + +# The name of the cluster. This is mainly used to prevent machines in +# one logical cluster from joining another. +cluster_name: rhq + +# This defines the number of tokens randomly assigned to this node on the ring +# The more tokens, relative to other nodes, the larger the proportion of data +# that this node will store. You probably want all nodes to have the same number +# of tokens assuming they have equal hardware capability. +# +# If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility, +# and will use the initial_token as described below. +# +# Specifying initial_token will override this setting. +# +# If you already have a cluster with 1 token per node, and wish to migrate to +# multiple tokens per node, see http://wiki.apache.org/cassandra/Operations +num_tokens: 256 + +# If you haven't specified num_tokens, or have set it to the default of 1 then +# you should always specify InitialToken when setting up a production +# cluster for the first time, and often when adding capacity later. +# The principle is that each node should be given an equal slice of +# the token ring; see http://wiki.apache.org/cassandra/Operations +# for more details. +# +# If blank, Cassandra will request a token bisecting the range of +# the heaviest-loaded existing node. If there is no load information +# available, such as is the case with a new cluster, it will pick +# a random token, which will lead to hot spots. +#initial_token: + +# See http://wiki.apache.org/cassandra/HintedHandoff +hinted_handoff_enabled: true +# this defines the maximum amount of time a dead host will have hints +# generated. After it has been dead this long, new hints for it will not be +# created until it has been seen alive and gone down again. +max_hint_window_in_ms: 10800000 # 3 hours +# throttle in KB's per second, per delivery thread +hinted_handoff_throttle_in_kb: 1024 +# Number of threads with which to deliver hints; +# Consider increasing this number when you have multi-dc deployments, since +# cross-dc handoff tends to be slower +max_hints_delivery_threads: 2 + +# The following setting populates the page cache on memtable flush and compaction +# WARNING: Enable this setting only when the whole node's data fits in memory. +# Defaults to: false +# populate_io_cache_on_flush: false + +# Authentication backend, implementing IAuthenticator; used to identify users +# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator, +# PasswordAuthenticator}. +# +# - AllowAllAuthenticator performs no checks - set it to disable authentication. +# - PasswordAuthenticator relies on username/password pairs to authenticate +# users. It keeps usernames and hashed passwords in system_auth.credentials table. +# Please increase system_auth keyspace replication factor if you use this authenticator. +authenticator: org.apache.cassandra.auth.PasswordAuthenticator + +# Authorization backend, implementing IAuthorizer; used to limit access/provide permissions +# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer, +# CassandraAuthorizer}. +# +# - AllowAllAuthorizer allows any action to any user - set it to disable authorization. +# - CassandraAuthorizer stores permissions in system_auth.permissions table. Please +# increase system_auth keyspace replication factor if you use this authorizer. +authorizer: org.apache.cassandra.auth.CassandraAuthorizer + +# Validity period for permissions cache (fetching permissions can be an +# expensive operation depending on the authorizer, CassandraAuthorizer is +# one example). Defaults to 2000, set to 0 to disable. +# Will be disabled automatically for AllowAllAuthorizer. +permissions_validity_in_ms: 2000 + +# The partitioner is responsible for distributing rows (by key) across +# nodes in the cluster. Any IPartitioner may be used, including your +# own as long as it is on the classpath. Out of the box, Cassandra +# provides org.apache.cassandra.dht.{Murmur3Partitioner, RandomPartitioner +# ByteOrderedPartitioner, OrderPreservingPartitioner (deprecated)}. +# +# - RandomPartitioner distributes rows across the cluster evenly by md5. +# This is the default prior to 1.2 and is retained for compatibility. +# - Murmur3Partitioner is similar to RandomPartioner but uses Murmur3_128 +# Hash Function instead of md5. When in doubt, this is the best option. +# - ByteOrderedPartitioner orders rows lexically by key bytes. BOP allows +# scanning rows in key order, but the ordering can generate hot spots +# for sequential insertion workloads. +# - OrderPreservingPartitioner is an obsolete form of BOP, that stores +# - keys in a less-efficient format and only works with keys that are +# UTF8-encoded Strings. +# - CollatingOPP colates according to EN,US rules rather than lexical byte +# ordering. Use this as an example if you need custom collation. +# +# See http://wiki.apache.org/cassandra/Operations for more on +# partitioners and token selection. +partitioner: org.apache.cassandra.dht.Murmur3Partitioner + +# directories where Cassandra should store data on disk. +data_file_directories: + - target/rhq48/rhq-storage/data + +# commit log +commitlog_directory: target/rhq48/rhq-storage/commit_log + +# policy for data disk failures: +# stop: shut down gossip and Thrift, leaving the node effectively dead, but +# still inspectable via JMX. +# best_effort: stop using the failed disk and respond to requests based on +# remaining available sstables. This means you WILL see obsolete +# data at CL.ONE! +# ignore: ignore fatal errors and let requests fail, as in pre-1.2 Cassandra +disk_failure_policy: stop + +# Maximum size of the key cache in memory. +# +# Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the +# minimum, sometimes more. The key cache is fairly tiny for the amount of +# time it saves, so it's worthwhile to use it at large numbers. +# The row cache saves even more time, but must store the whole values of +# its rows, so it is extremely space-intensive. It's best to only use the +# row cache if you have hot rows or static rows. +# +# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. +# +# Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache. +key_cache_size_in_mb: + +# Duration in seconds after which Cassandra should +# safe the keys cache. Caches are saved to saved_caches_directory as +# specified in this configuration file. +# +# Saved caches greatly improve cold-start speeds, and is relatively cheap in +# terms of I/O for the key cache. Row cache saving is much more expensive and +# has limited use. +# +# Default is 14400 or 4 hours. +key_cache_save_period: 14400 + +# Number of keys from the key cache to save +# Disabled by default, meaning all keys are going to be saved +# key_cache_keys_to_save: 100 + +# Maximum size of the row cache in memory. +# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. +# +# Default value is 0, to disable row caching. +row_cache_size_in_mb: 0 + +# Duration in seconds after which Cassandra should +# safe the row cache. Caches are saved to saved_caches_directory as specified +# in this configuration file. +# +# Saved caches greatly improve cold-start speeds, and is relatively cheap in +# terms of I/O for the key cache. Row cache saving is much more expensive and +# has limited use. +# +# Default is 0 to disable saving the row cache. +row_cache_save_period: 0 + +# Number of keys from the row cache to save +# Disabled by default, meaning all keys are going to be saved +# row_cache_keys_to_save: 100 + +# The provider for the row cache to use. +# +# Supported values are: ConcurrentLinkedHashCacheProvider, SerializingCacheProvider +# +# SerializingCacheProvider serialises the contents of the row and stores +# it in native memory, i.e., off the JVM Heap. Serialized rows take +# significantly less memory than "live" rows in the JVM, so you can cache +# more rows in a given memory footprint. And storing the cache off-heap +# means you can use smaller heap sizes, reducing the impact of GC pauses. +# +# It is also valid to specify the fully-qualified class name to a class +# that implements org.apache.cassandra.cache.IRowCacheProvider. +# +# Defaults to SerializingCacheProvider +row_cache_provider: SerializingCacheProvider + +# saved caches +saved_caches_directory: target/rhq48/rhq-storage/saved_caches + +# commitlog_sync may be either "periodic" or "batch." +# When in batch mode, Cassandra won't ack writes until the commit log +# has been fsynced to disk. It will wait up to +# commitlog_sync_batch_window_in_ms milliseconds for other writes, before +# performing the sync. +# +# commitlog_sync: batch +# commitlog_sync_batch_window_in_ms: 50 +# +# the other option is "periodic" where writes may be acked immediately +# and the CommitLog is simply synced every commitlog_sync_period_in_ms +# milliseconds. +commitlog_sync: periodic +commitlog_sync_period_in_ms: 10000 + +# The size of the individual commitlog file segments. A commitlog +# segment may be archived, deleted, or recycled once all the data +# in it (potentally from each columnfamily in the system) has been +# flushed to sstables. +# +# The default size is 32, which is almost always fine, but if you are +# archiving commitlog segments (see commitlog_archiving.properties), +# then you probably want a finer granularity of archiving; 8 or 16 MB +# is reasonable. +commitlog_segment_size_in_mb: 32 + +# any class that implements the SeedProvider interface and has a +# constructor that takes a Map<String, String> of parameters will do. +seed_provider: + # Addresses of hosts that are deemed contact points. + # Cassandra nodes use this list of hosts to find each other and learn + # the topology of the ring. You must change this if you are running + # multiple nodes! + - class_name: org.apache.cassandra.locator.SimpleSeedProvider + parameters: + # seeds is actually a comma-delimited list of addresses. + # Ex: "<ip1>,<ip2>,<ip3>" + - seeds: "127.0.0.1" + +# emergency pressure valve: each time heap usage after a full (CMS) +# garbage collection is above this fraction of the max, Cassandra will +# flush the largest memtables. +# +# Set to 1.0 to disable. Setting this lower than +# CMSInitiatingOccupancyFraction is not likely to be useful. +# +# RELYING ON THIS AS YOUR PRIMARY TUNING MECHANISM WILL WORK POORLY: +# it is most effective under light to moderate load, or read-heavy +# workloads; under truly massive write load, it will often be too +# little, too late. +flush_largest_memtables_at: 0.75 + +# emergency pressure valve #2: the first time heap usage after a full +# (CMS) garbage collection is above this fraction of the max, +# Cassandra will reduce cache maximum _capacity_ to the given fraction +# of the current _size_. Should usually be set substantially above +# flush_largest_memtables_at, since that will have less long-term +# impact on the system. +# +# Set to 1.0 to disable. Setting this lower than +# CMSInitiatingOccupancyFraction is not likely to be useful. +reduce_cache_sizes_at: 0.85 +reduce_cache_capacity_to: 0.6 + +# For workloads with more data than can fit in memory, Cassandra's +# bottleneck will be reads that need to fetch data from +# disk. "concurrent_reads" should be set to (16 * number_of_drives) in +# order to allow the operations to enqueue low enough in the stack +# that the OS and drives can reorder them. +# +# On the other hand, since writes are almost never IO bound, the ideal +# number of "concurrent_writes" is dependent on the number of cores in +# your system; (8 * number_of_cores) is a good rule of thumb. +concurrent_reads: 32 +concurrent_writes: 32 + +# Total memory to use for memtables. Cassandra will flush the largest +# memtable when this much memory is used. +# If omitted, Cassandra will set it to 1/3 of the heap. +# memtable_total_space_in_mb: 2048 + +# Total space to use for commitlogs. Since commitlog segments are +# mmapped, and hence use up address space, the default size is 32 +# on 32-bit JVMs, and 1024 on 64-bit JVMs. +# +# If space gets above this value (it will round up to the next nearest +# segment multiple), Cassandra will flush every dirty CF in the oldest +# segment and remove it. So a small total commitlog space will tend +# to cause more flush activity on less-active columnfamilies. +# commitlog_total_space_in_mb: 4096 + +# This sets the amount of memtable flush writer threads. These will +# be blocked by disk io, and each one will hold a memtable in memory +# while blocked. If you have a large heap and many data directories, +# you can increase this value for better flush performance. +# By default this will be set to the amount of data directories defined. +#memtable_flush_writers: 1 + +# the number of full memtables to allow pending flush, that is, +# waiting for a writer thread. At a minimum, this should be set to +# the maximum number of secondary indexes created on a single CF. +memtable_flush_queue_size: 4 + +# Whether to, when doing sequential writing, fsync() at intervals in +# order to force the operating system to flush the dirty +# buffers. Enable this to avoid sudden dirty buffer flushing from +# impacting read latencies. Almost always a good idea on SSD:s; not +# necessarily on platters. +trickle_fsync: false +trickle_fsync_interval_in_kb: 10240 + +# TCP port, for commands and data +storage_port: 7100 + +# SSL port, for encrypted communication. Unused unless enabled in +# encryption_options +ssl_storage_port: 7101 + +# Address to bind to and tell other Cassandra nodes to connect to. You +# _must_ change this if you want multiple nodes to be able to +# communicate! +# +# Leaving it blank leaves it up to InetAddress.getLocalHost(). This +# will always do the Right Thing *if* the node is properly configured +# (hostname, name resolution, etc), and the Right Thing is to use the +# address associated with the hostname (it might not be). +# +# Setting this to 0.0.0.0 is always wrong. +listen_address: 127.0.0.1 + +# Address to broadcast to other Cassandra nodes +# Leaving this blank will set it to the same value as listen_address +# broadcast_address: 1.2.3.4 + +# Internode authentication backend, implementing IInternodeAuthenticator; +# used to allow/disallow connections from peer nodes. +# internode_authenticator: org.apache.cassandra.auth.AllowAllInternodeAuthenticator + +# Whether to start the native transport server. +# Currently, only the thrift server is started by default because the native +# transport is considered beta. +# Please note that the address on which the native transport is bound is the +# same as the rpc_address. The port however is different and specified below. +start_native_transport: true +# port for the CQL native transport to listen for clients on +native_transport_port: 9142 +# The minimum and maximum threads for handling requests when the native +# transport is used. The meaning is those is similar to the one of +# rpc_min_threads and rpc_max_threads, though the default differ slightly and +# are the ones below: +# native_transport_min_threads: 16 +native_transport_max_threads: 128 + + +# Whether to start the thrift rpc server. +start_rpc: false +# The address to bind the Thrift RPC service to -- clients connect +# here. Unlike ListenAddress above, you *can* specify 0.0.0.0 here if +# you want Thrift to listen on all interfaces. +# +# Leaving this blank has the same effect it does for ListenAddress, +# (i.e. it will be based on the configured hostname of the node). +rpc_address: 127.0.0.1 +# port for Thrift to listen for clients on +rpc_port: 9160 + +# enable or disable keepalive on rpc connections +rpc_keepalive: true + +# Cassandra provides three out-of-the-box options for the RPC Server: +# +# sync -> One thread per thrift connection. For a very large number of clients, memory +# will be your limiting factor. On a 64 bit JVM, 128KB is the minimum stack size +# per thread, and that will correspond to your use of virtual memory (but physical memory +# may be limited depending on use of stack space). +# +# hsha -> Stands for "half synchronous, half asynchronous." All thrift clients are handled +# asynchronously using a small number of threads that does not vary with the amount +# of thrift clients (and thus scales well to many clients). The rpc requests are still +# synchronous (one thread per active request). +# +# The default is sync because on Windows hsha is about 30% slower. On Linux, +# sync/hsha performance is about the same, with hsha of course using less memory. +# +# Alternatively, can provide your own RPC server by providing the fully-qualified class name +# of an o.a.c.t.TServerFactory that can create an instance of it. +rpc_server_type: sync + +# Uncomment rpc_min|max_thread to set request pool size limits. +# +# Regardless of your choice of RPC server (see above), the number of maximum requests in the +# RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync +# RPC server, it also dictates the number of clients that can be connected at all). +# +# The default is unlimited and thus provide no protection against clients overwhelming the server. You are +# encouraged to set a maximum that makes sense for you in production, but do keep in mind that +# rpc_max_threads represents the maximum number of client requests this server may execute concurrently. +# +# rpc_min_threads: 16 +# rpc_max_threads: 2048 + +# uncomment to set socket buffer sizes on rpc connections +# rpc_send_buff_size_in_bytes: +# rpc_recv_buff_size_in_bytes: + +# uncomment to set socket buffer size for internode communication +# Note that when setting this, the buffer size is limited by net.core.wmem_max +# and when not setting it it is defined by net.ipv4.tcp_wmem +# See: +# /proc/sys/net/core/wmem_max +# /proc/sys/net/core/rmem_max +# /proc/sys/net/ipv4/tcp_wmem +# /proc/sys/net/ipv4/tcp_wmem +# and: man tcp +# internode_send_buff_size_in_bytes: +# internode_recv_buff_size_in_bytes: + +# Frame size for thrift (maximum field length). +thrift_framed_transport_size_in_mb: 15 + +# The max length of a thrift message, including all fields and +# internal thrift overhead. +thrift_max_message_length_in_mb: 16 + +# Set to true to have Cassandra create a hard link to each sstable +# flushed or streamed locally in a backups/ subdirectory of the +# Keyspace data. Removing these links is the operator's +# responsibility. +incremental_backups: false + +# Whether or not to take a snapshot before each compaction. Be +# careful using this option, since Cassandra won't clean up the +# snapshots for you. Mostly useful if you're paranoid when there +# is a data format change. +snapshot_before_compaction: false + +# Whether or not a snapshot is taken of the data before keyspace truncation +# or dropping of column families. The STRONGLY advised default of true +# should be used to provide data safety. If you set this flag to false, you will +# lose data on truncation or drop. +auto_snapshot: true + +# Add column indexes to a row after its contents reach this size. +# Increase if your column values are large, or if you have a very large +# number of columns. The competing causes are, Cassandra has to +# deserialize this much of the row to read a single column, so you want +# it to be small - at least if you do many partial-row reads - but all +# the index data is read for each access, so you don't want to generate +# that wastefully either. +column_index_size_in_kb: 64 + +# Size limit for rows being compacted in memory. Larger rows will spill +# over to disk and use a slower two-pass compaction process. A message +# will be logged specifying the row key. +in_memory_compaction_limit_in_mb: 64 + +# Number of simultaneous compactions to allow, NOT including +# validation "compactions" for anti-entropy repair. Simultaneous +# compactions can help preserve read performance in a mixed read/write +# workload, by mitigating the tendency of small sstables to accumulate +# during a single long running compactions. The default is usually +# fine and if you experience problems with compaction running too +# slowly or too fast, you should look at +# compaction_throughput_mb_per_sec first. +# +# concurrent_compactors defaults to the number of cores. +# Uncomment to make compaction mono-threaded, the pre-0.8 default. +#concurrent_compactors: 1 + +# Multi-threaded compaction. When enabled, each compaction will use +# up to one thread per core, plus one thread per sstable being merged. +# This is usually only useful for SSD-based hardware: otherwise, +# your concern is usually to get compaction to do LESS i/o (see: +# compaction_throughput_mb_per_sec), not more. +multithreaded_compaction: false + +# Throttles compaction to the given total throughput across the entire +# system. The faster you insert data, the faster you need to compact in +# order to keep the sstable count down, but in general, setting this to +# 16 to 32 times the rate you are inserting data is more than sufficient. +# Setting this to 0 disables throttling. Note that this account for all types +# of compaction, including validation compaction. +compaction_throughput_mb_per_sec: 16 + +# Track cached row keys during compaction, and re-cache their new +# positions in the compacted sstable. Disable if you use really large +# key caches. +compaction_preheat_key_cache: true + +# Throttles all outbound streaming file transfers on this node to the +# given total throughput in Mbps. This is necessary because Cassandra does +# mostly sequential IO when streaming data during bootstrap or repair, which +# can lead to saturating the network connection and degrading rpc performance. +# When unset, the default is 200 Mbps or 25 MB/s. +# stream_throughput_outbound_megabits_per_sec: 200 + +# How long the coordinator should wait for read operations to complete +read_request_timeout_in_ms: 20000 +# How long the coordinator should wait for seq or index scans to complete +range_request_timeout_in_ms: 20000 +# How long the coordinator should wait for writes to complete +write_request_timeout_in_ms: 20000 +# How long the coordinator should wait for truncates to complete +# (This can be much longer, because we need to flush all CFs +# to make sure we can clear out anythink in the commitlog that could +# cause truncated data to reappear.) +truncate_request_timeout_in_ms: 60000 +# The default timeout for other, miscellaneous operations +request_timeout_in_ms: 20000 + +# Enable operation timeout information exchange between nodes to accurately +# measure request timeouts, If disabled cassandra will assuming the request +# was forwarded to the replica instantly by the coordinator +# +# Warning: before enabling this property make sure to ntp is installed +# and the times are synchronized between the nodes. +cross_node_timeout: false + +# Enable socket timeout for streaming operation. +# When a timeout occurs during streaming, streaming is retried from the start +# of the current file. This *can* involve re-streaming an important amount of +# data, so you should avoid setting the value too low. +# Default value is 0, which never timeout streams. +# streaming_socket_timeout_in_ms: 0 + +# phi value that must be reached for a host to be marked down. +# most users should never need to adjust this. +# phi_convict_threshold: 8 + +# endpoint_snitch -- Set this to a class that implements +# IEndpointSnitch. The snitch has two functions: +# - it teaches Cassandra enough about your network topology to route +# requests efficiently +# - it allows Cassandra to spread replicas around your cluster to avoid +# correlated failures. It does this by grouping machines into +# "datacenters" and "racks." Cassandra will do its best not to have +# more than one replica on the same "rack" (which may not actually +# be a physical location) +# +# IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER, +# YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS +# ARE PLACED. +# +# Out of the box, Cassandra provides +# - SimpleSnitch: +# Treats Strategy order as proximity. This improves cache locality +# when disabling read repair, which can further improve throughput. +# Only appropriate for single-datacenter deployments. +# - PropertyFileSnitch: +# Proximity is determined by rack and data center, which are +# explicitly configured in cassandra-topology.properties. +# - GossipingPropertyFileSnitch +# The rack and datacenter for the local node are defined in +# cassandra-rackdc.properties and propagated to other nodes via gossip. If +# cassandra-topology.properties exists, it is used as a fallback, allowing +# migration from the PropertyFileSnitch. +# - RackInferringSnitch: +# Proximity is determined by rack and data center, which are +# assumed to correspond to the 3rd and 2nd octet of each node's +# IP address, respectively. Unless this happens to match your +# deployment conventions (as it did Facebook's), this is best used +# as an example of writing a custom Snitch class. +# - Ec2Snitch: +# Appropriate for EC2 deployments in a single Region. Loads Region +# and Availability Zone information from the EC2 API. The Region is +# treated as the Datacenter, and the Availability Zone as the rack. +# Only private IPs are used, so this will not work across multiple +# Regions. +# - Ec2MultiRegionSnitch: +# Uses public IPs as broadcast_address to allow cross-region +# connectivity. (Thus, you should set seed addresses to the public +# IP as well.) You will need to open the storage_port or +# ssl_storage_port on the public IP firewall. (For intra-Region +# traffic, Cassandra will switch to the private IP after +# establishing a connection.) +# +# You can use a custom Snitch by setting this to the full class name +# of the snitch, which will be assumed to be on your classpath. +endpoint_snitch: SimpleSnitch + +# controls how often to perform the more expensive part of host score +# calculation +dynamic_snitch_update_interval_in_ms: 100 +# controls how often to reset all host scores, allowing a bad host to +# possibly recover +dynamic_snitch_reset_interval_in_ms: 600000 +# if set greater than zero and read_repair_chance is < 1.0, this will allow +# 'pinning' of replicas to hosts in order to increase cache capacity. +# The badness threshold will control how much worse the pinned host has to be +# before the dynamic snitch will prefer other replicas over it. This is +# expressed as a double which represents a percentage. Thus, a value of +# 0.2 means Cassandra would continue to prefer the static snitch values +# until the pinned host was 20% worse than the fastest. +dynamic_snitch_badness_threshold: 0.1 + +# request_scheduler -- Set this to a class that implements +# RequestScheduler, which will schedule incoming client requests +# according to the specific policy. This is useful for multi-tenancy +# with a single Cassandra cluster. +# NOTE: This is specifically for requests from the client and does +# not affect inter node communication. +# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place +# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of +# client requests to a node with a separate queue for each +# request_scheduler_id. The scheduler is further customized by +# request_scheduler_options as described below. +request_scheduler: org.apache.cassandra.scheduler.NoScheduler + +# Scheduler Options vary based on the type of scheduler +# NoScheduler - Has no options +# RoundRobin +# - throttle_limit -- The throttle_limit is the number of in-flight +# requests per client. Requests beyond +# that limit are queued up until +# running requests can complete. +# The value of 80 here is twice the number of +# concurrent_reads + concurrent_writes. +# - default_weight -- default_weight is optional and allows for +# overriding the default which is 1. +# - weights -- Weights are optional and will default to 1 or the +# overridden default_weight. The weight translates into how +# many requests are handled during each turn of the +# RoundRobin, based on the scheduler id. +# +# request_scheduler_options: +# throttle_limit: 80 +# default_weight: 5 +# weights: +# Keyspace1: 1 +# Keyspace2: 5 + +# request_scheduler_id -- An identifer based on which to perform +# the request scheduling. Currently the only valid option is keyspace. +# request_scheduler_id: keyspace + +# index_interval controls the sampling of entries from the primrary +# row index in terms of space versus time. The larger the interval, +# the smaller and less effective the sampling will be. In technicial +# terms, the interval coresponds to the number of index entries that +# are skipped between taking each sample. All the sampled entries +# must fit in memory. Generally, a value between 128 and 512 here +# coupled with a large key cache size on CFs results in the best trade +# offs. This value is not often changed, however if you have many +# very small rows (many to an OS page), then increasing this will +# often lower memory usage without a impact on performance. +index_interval: 128 + +# Enable or disable inter-node encryption +# Default settings are TLS v1, RSA 1024-bit keys (it is imperative that +# users generate their own keys) TLS_RSA_WITH_AES_128_CBC_SHA as the cipher +# suite for authentication, key exchange and encryption of the actual data transfers. +# NOTE: No custom encryption options are enabled at the moment +# The available internode options are : all, none, dc, rack +# +# If set to dc cassandra will encrypt the traffic between the DCs +# If set to rack cassandra will encrypt the traffic between the racks +# +# The passwords used in these options must match the passwords used when generating +# the keystore and truststore. For instructions on generating these files, see: +# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSE... +# +server_encryption_options: + internode_encryption: none + keystore: conf/.keystore + keystore_password: cassandra + truststore: conf/.truststore + truststore_password: cassandra + # More advanced defaults below: + # protocol: TLS + # algorithm: SunX509 + # store_type: JKS + # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA] + # require_client_auth: false + +# enable or disable client/server encryption. +client_encryption_options: + enabled: false + keystore: conf/.keystore + keystore_password: cassandra + # require_client_auth: false + # Set trustore and truststore_password if require_client_auth is true + # truststore: conf/.truststore + # truststore_password: cassandra + # More advanced defaults below: + # protocol: TLS + # algorithm: SunX509 + # store_type: JKS + # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA] + +# internode_compression controls whether traffic between nodes is +# compressed. +# can be: all - all traffic is compressed +# dc - traffic between different datacenters is compressed +# none - nothing is compressed. +internode_compression: all + +# Enable or disable tcp_nodelay for inter-dc communication. +# Disabling it will result in larger (but fewer) network packets being sent, +# reducing overhead from the TCP protocol itself, at the cost of increasing +# latency if you block for cross-datacenter responses. +inter_dc_tcp_nodelay: true diff --git a/modules/common/cassandra-installer/src/test/resources/rhq48/storage/conf/log4j-server.properties b/modules/common/cassandra-installer/src/test/resources/rhq48/storage/conf/log4j-server.properties new file mode 100644 index 0000000..c2e6dab --- /dev/null +++ b/modules/common/cassandra-installer/src/test/resources/rhq48/storage/conf/log4j-server.properties @@ -0,0 +1,45 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# for production, you should probably set pattern to %c instead of %l. +# (%l is slower.) + +# output messages into a rolling log file as well as stdout +log4j.rootLogger=INFO,stdout,R + +# stdout +log4j.appender.stdout=org.apache.log4j.ConsoleAppender +log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +log4j.appender.stdout.layout.ConversionPattern=%5p %d{HH:mm:ss,SSS} %m%n + +# rolling log file +log4j.appender.R=org.apache.log4j.RollingFileAppender +log4j.appender.R.maxFileSize=20MB +log4j.appender.R.maxBackupIndex=50 +log4j.appender.R.layout=org.apache.log4j.PatternLayout +log4j.appender.R.layout.ConversionPattern=%5p [%t] %d{ISO8601} %F (line %L) %m%n +# Edit the next line to point to your logs directory +log4j.appender.R.File=target/rhq48/rhq-storage/logs/rhq-storage.log +log4j.appender.R.Threshold=INFO + +# Application logging options +#log4j.logger.org.apache.cassandra=DEBUG +#log4j.logger.org.apache.cassandra.db=DEBUG +#log4j.logger.org.apache.cassandra.service.StorageProxy=DEBUG + +# Adding this to avoid thrift logging disconnect errors. +log4j.logger.org.apache.thrift.server.TNonblockingServer=ERROR +
commit 656cc6066250f46ff51e16230c0f7c8263f55435 Author: John Sanda jsanda@redhat.com Date: Fri Jul 19 13:43:46 2013 -0400
[BZ 983226] fixing upgrade regression introduced by use of cassandra-jvm.properties
This commit removes cassandra-env.sh from our Cassandra distro since we are no longer using that script. The storage installer upgrade has been cleaned up some so that it will get the jmx port from cassandra-env.sh for 4.8 installs and then update cassandra-jvm.properties.
diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/pom.xml b/modules/common/cassandra-ccm/cassandra-ccm-core/pom.xml index 42f6c8b..2bde394 100644 --- a/modules/common/cassandra-ccm/cassandra-ccm-core/pom.xml +++ b/modules/common/cassandra-ccm/cassandra-ccm-core/pom.xml @@ -171,6 +171,7 @@ <move file="${project.build.outputDirectory}/cassandra/bin/cassandra" todir="${cassandra.dir}/bin"/> <delete dir="${project.build.outputDirectory}/cassandra"/> <delete dir="${cassandra.dir}/javadoc"/> + <delete file="${cassandra.dir}/conf/cassandra-env.sh"/>
<zip basedir="${cassandra.dir}" destfile="${cassandra.distro.zip}"/> <delete dir="${cassandra.dir}"/> diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/bin/cassandra b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/bin/cassandra index 742d9c0..ddbc099 100755 --- a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/bin/cassandra +++ b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/bin/cassandra @@ -106,8 +106,7 @@ if [ -z "$CASSANDRA_CONF" -o -z "$CLASSPATH" ]; then exit 1 fi
-if [ -f "$CASSANDRA_CONF/cassandra-env.sh" ]; then - #. "$CASSANDRA_CONF/cassandra-env.sh" +if [ -f "$CASSANDRA_CONF/cassandra-jvm.properties" ]; then . "$CASSANDRA_CONF/cassandra-jvm.properties" fi
diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/rhq.cassandra-env.sh b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/rhq.cassandra-env.sh deleted file mode 100644 index 99b3128..0000000 --- a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/rhq.cassandra-env.sh +++ /dev/null @@ -1,247 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -calculate_heap_sizes() -{ - case "`uname`" in - Linux) - system_memory_in_mb=`free -m | awk '/Mem:/ {print $2}'` - system_cpu_cores=`egrep -c 'processor([[:space:]]+):.*' /proc/cpuinfo` - ;; - FreeBSD) - system_memory_in_bytes=`sysctl hw.physmem | awk '{print $2}'` - system_memory_in_mb=`expr $system_memory_in_bytes / 1024 / 1024` - system_cpu_cores=`sysctl hw.ncpu | awk '{print $2}'` - ;; - SunOS) - system_memory_in_mb=`prtconf | awk '/Memory size:/ {print $3}'` - system_cpu_cores=`psrinfo | wc -l` - ;; - Darwin) - system_memory_in_bytes=`sysctl hw.memsize | awk '{print $2}'` - system_memory_in_mb=`expr $system_memory_in_bytes / 1024 / 1024` - system_cpu_cores=`sysctl hw.ncpu | awk '{print $2}'` - ;; - *) - # assume reasonable defaults for e.g. a modern desktop or - # cheap server - system_memory_in_mb="2048" - system_cpu_cores="2" - ;; - esac - - # some systems like the raspberry pi don't report cores, use at least 1 - if [ "$system_cpu_cores" -lt "1" ] - then - system_cpu_cores="1" - fi - - # set max heap size based on the following - # max(min(1/2 ram, 1024MB), min(1/4 ram, 8GB)) - # calculate 1/2 ram and cap to 1024MB - # calculate 1/4 ram and cap to 8192MB - # pick the max - half_system_memory_in_mb=`expr $system_memory_in_mb / 2` - quarter_system_memory_in_mb=`expr $half_system_memory_in_mb / 2` - if [ "$half_system_memory_in_mb" -gt "1024" ] - then - half_system_memory_in_mb="1024" - fi - if [ "$quarter_system_memory_in_mb" -gt "8192" ] - then - quarter_system_memory_in_mb="8192" - fi - if [ "$half_system_memory_in_mb" -gt "$quarter_system_memory_in_mb" ] - then - max_heap_size_in_mb="$half_system_memory_in_mb" - else - max_heap_size_in_mb="$quarter_system_memory_in_mb" - fi - MAX_HEAP_SIZE="${max_heap_size_in_mb}M" - - # Young gen: min(max_sensible_per_modern_cpu_core * num_cores, 1/4 * heap size) - max_sensible_yg_per_core_in_mb="100" - max_sensible_yg_in_mb=`expr $max_sensible_yg_per_core_in_mb "*" $system_cpu_cores` - - desired_yg_in_mb=`expr $max_heap_size_in_mb / 4` - - if [ "$desired_yg_in_mb" -gt "$max_sensible_yg_in_mb" ] - then - HEAP_NEWSIZE="${max_sensible_yg_in_mb}M" - else - HEAP_NEWSIZE="${desired_yg_in_mb}M" - fi -} - -# Determine the sort of JVM we'll be running on. - -java_ver_output=`"${JAVA:-java}" -version 2>&1` - -jvmver=`echo "$java_ver_output" | awk -F'"' 'NR==1 {print $2}'` -JVM_VERSION=${jvmver%_*} -JVM_PATCH_VERSION=${jvmver#*_} - -jvm=`echo "$java_ver_output" | awk 'NR==2 {print $1}'` -case "$jvm" in - OpenJDK) - JVM_VENDOR=OpenJDK - # this will be "64-Bit" or "32-Bit" - JVM_ARCH=`echo "$java_ver_output" | awk 'NR==3 {print $2}'` - ;; - "Java(TM)") - JVM_VENDOR=Oracle - # this will be "64-Bit" or "32-Bit" - JVM_ARCH=`echo "$java_ver_output" | awk 'NR==3 {print $3}'` - ;; - *) - # Help fill in other JVM values - JVM_VENDOR=other - JVM_ARCH=unknown - ;; -esac - - -# Override these to set the amount of memory to allocate to the JVM at -# start-up. For production use you may wish to adjust this for your -# environment. MAX_HEAP_SIZE is the total amount of memory dedicated -# to the Java heap; HEAP_NEWSIZE refers to the size of the young -# generation. Both MAX_HEAP_SIZE and HEAP_NEWSIZE should be either set -# or not (if you set one, set the other). -# -# The main trade-off for the young generation is that the larger it -# is, the longer GC pause times will be. The shorter it is, the more -# expensive GC will be (usually). -# -# The example HEAP_NEWSIZE assumes a modern 8-core+ machine for decent pause -# times. If in doubt, and if you do not particularly want to tweak, go with -# 100 MB per physical CPU core. - -#MAX_HEAP_SIZE="4G" -#HEAP_NEWSIZE="800M" - -if [ "x$MAX_HEAP_SIZE" = "x" ] && [ "x$HEAP_NEWSIZE" = "x" ]; then - calculate_heap_sizes -else - if [ "x$MAX_HEAP_SIZE" = "x" ] || [ "x$HEAP_NEWSIZE" = "x" ]; then - echo "please set or unset MAX_HEAP_SIZE and HEAP_NEWSIZE in pairs (see cassandra-env.sh)" - exit 1 - fi -fi - -# Specifies the default port over which Cassandra will be available for -# JMX connections. -JMX_PORT="${rhq.cassandra.jmx.port}" - - -# Here we create the arguments that will get passed to the jvm when -# starting cassandra. - -# enable assertions. disabling this in production will give a modest -# performance benefit (around 5%). -JVM_OPTS="$JVM_OPTS -ea" - -# add the jamm javaagent -if [ "$JVM_VENDOR" != "OpenJDK" -o "$JVM_VERSION" \> "1.6.0" ] \ - || [ "$JVM_VERSION" = "1.6.0" -a "$JVM_PATCH_VERSION" -ge 23 ] -then - JVM_OPTS="$JVM_OPTS -javaagent:$CASSANDRA_HOME/lib/jamm-0.2.5.jar" -fi - -# enable thread priorities, primarily so we can give periodic tasks -# a lower priority to avoid interfering with client workload -JVM_OPTS="$JVM_OPTS -XX:+UseThreadPriorities" -# allows lowering thread priority without being root. see -# http://tech.stolsvik.com/2010/01/linux-java-thread-priorities-workaround.htm... -JVM_OPTS="$JVM_OPTS -XX:ThreadPriorityPolicy=42" - -# min and max heap sizes should be set to the same value to avoid -# stop-the-world GC pauses during resize, and so that we can lock the -# heap in memory on startup to prevent any of it from being swapped -# out. -JVM_OPTS="$JVM_OPTS -Xms${rhq.cassandra.max.heap.size}" -JVM_OPTS="$JVM_OPTS -Xmx${rhq.cassandra.max.heap.size}" -JVM_OPTS="$JVM_OPTS -Xmn${rhq.cassandra.heap.new.size}" -JVM_OPTS="$JVM_OPTS -XX:+HeapDumpOnOutOfMemoryError" - -# set jvm HeapDumpPath with CASSANDRA_HEAPDUMP_DIR -if [ "x$CASSANDRA_HEAPDUMP_DIR" != "x" ]; then - JVM_OPTS="$JVM_OPTS -XX:HeapDumpPath=$CASSANDRA_HEAPDUMP_DIR/cassandra-`date +%s`-pid$$.hprof" -fi - - -startswith() { [ "${1#$2}" != "$1" ]; } - -if [ "`uname`" = "Linux" ] ; then - # reduce the per-thread stack size to minimize the impact of Thrift - # thread-per-client. (Best practice is for client connections to - # be pooled anyway.) Only do so on Linux where it is known to be - # supported. - # u34 and greater need 180k - JVM_OPTS="$JVM_OPTS -Xss${rhq.cassandra.stack.size}" -fi -echo "xss = $JVM_OPTS" - -# GC tuning options -JVM_OPTS="$JVM_OPTS -XX:+UseParNewGC" -JVM_OPTS="$JVM_OPTS -XX:+UseConcMarkSweepGC" -JVM_OPTS="$JVM_OPTS -XX:+CMSParallelRemarkEnabled" -JVM_OPTS="$JVM_OPTS -XX:SurvivorRatio=8" -JVM_OPTS="$JVM_OPTS -XX:MaxTenuringThreshold=1" -JVM_OPTS="$JVM_OPTS -XX:CMSInitiatingOccupancyFraction=75" -JVM_OPTS="$JVM_OPTS -XX:+UseCMSInitiatingOccupancyOnly" -JVM_OPTS="$JVM_OPTS -XX:+UseTLAB" -# note: bash evals '1.7.x' as > '1.7' so this is really a >= 1.7 jvm check -if [ "$JVM_VERSION" \> "1.7" ] ; then - JVM_OPTS="$JVM_OPTS -XX:+UseCondCardMark" -fi - -# GC logging options -- uncomment to enable -# JVM_OPTS="$JVM_OPTS -XX:+PrintGCDetails" -# JVM_OPTS="$JVM_OPTS -XX:+PrintGCDateStamps" -# JVM_OPTS="$JVM_OPTS -XX:+PrintHeapAtGC" -# JVM_OPTS="$JVM_OPTS -XX:+PrintTenuringDistribution" -# JVM_OPTS="$JVM_OPTS -XX:+PrintGCApplicationStoppedTime" -# JVM_OPTS="$JVM_OPTS -XX:+PrintPromotionFailure" -# JVM_OPTS="$JVM_OPTS -XX:PrintFLSStatistics=1" -# JVM_OPTS="$JVM_OPTS -Xloggc:/var/log/cassandra/gc-`date +%s`.log" -# If you are using JDK 6u34 7u2 or later you can enable GC log rotation -# don't stick the date in the log name if rotation is on. -# JVM_OPTS="$JVM_OPTS -Xloggc:/var/log/cassandra/gc.log" -# JVM_OPTS="$JVM_OPTS -XX:+UseGCLogFileRotation" -# JVM_OPTS="$JVM_OPTS -XX:NumberOfGCLogFiles=10" -# JVM_OPTS="$JVM_OPTS -XX:GCLogFileSize=10M" - -# uncomment to have Cassandra JVM listen for remote debuggers/profilers on port 1414 -# JVM_OPTS="$JVM_OPTS -Xdebug -Xnoagent -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=1414" - -# Prefer binding to IPv4 network intefaces (when net.ipv6.bindv6only=1). See -# http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6342561 (short version: -# comment out this entry to enable IPv6 support). -JVM_OPTS="$JVM_OPTS -Djava.net.preferIPv4Stack=true" - -# jmx: metrics and administration interface -# -# add this if you're having trouble connecting: -# JVM_OPTS="$JVM_OPTS -Djava.rmi.server.hostname=<public name>" -# -# see -# https://blogs.oracle.com/jmxetc/entry/troubleshooting_connection_problems_in... -# for more on configuring JMX through firewalls, etc. (Short version: -# get it working with no firewall first.) -JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT" -JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.ssl=false" -JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.authenticate=false" -JVM_OPTS="$JVM_OPTS $JVM_EXTRA_OPTS" diff --git a/modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java b/modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java index 63282e4..5c5ac4e 100644 --- a/modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java +++ b/modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java @@ -25,8 +25,10 @@
package org.rhq.storage.installer;
+import java.io.BufferedReader; import java.io.File; import java.io.FileInputStream; +import java.io.FileReader; import java.io.IOException; import java.net.InetAddress; import java.util.ArrayList; @@ -250,15 +252,32 @@ public class StorageInstaller { File oldConfDir = new File(existingStorageDir, "conf"); File newConfDir = new File(storageBasedir, "conf");
+ File cassandraEnvFile = new File(oldConfDir, "cassandra-env.sh"); + String cassandraYaml = "cassandra.yaml"; String cassandraJvmProps = "cassandra-jvm.properties"; File cassandraJvmPropsFile = new File(newConfDir, cassandraJvmProps); String log4j = "log4j-server.properties";
replaceFile(new File(oldConfDir, cassandraYaml), new File(newConfDir, cassandraYaml)); - replaceFile(new File(oldConfDir, cassandraJvmProps), cassandraJvmPropsFile); replaceFile(new File(oldConfDir, log4j), new File(newConfDir, log4j));
+ if (cassandraEnvFile.exists()) { + // Then this is an RHQ 4.8 install + jmxPort = parseJmxPortFromCassandrEnv(cassandraEnvFile); + Properties jvmProps = new Properties(); + jvmProps.load(new FileInputStream(cassandraJvmPropsFile)); + PropertiesFileUpdate propertiesUpdater = new PropertiesFileUpdate( + cassandraJvmPropsFile.getAbsolutePath()); + jvmProps.setProperty("jmx_port", Integer.toString(jmxPort)); + + propertiesUpdater.update(jvmProps); + + } else { + jmxPort = parseJmxPort(cassandraJvmPropsFile); + replaceFile(new File(oldConfDir, cassandraJvmProps), cassandraJvmPropsFile); + } + log.info("Finished installing RHQ Storage Node.");
log.info("Updating rhq-server.properties..."); @@ -268,8 +287,6 @@ public class StorageInstaller { Map<String, Object> config = (Map<String, Object>) yaml.load(new FileInputStream(yamlFile));
hostname = (String) config.get("listen_address"); - - jmxPort = parseJmxPort(cassandraJvmPropsFile); } else { if (cmdLine.hasOption("dir")) { File basedir = new File(cmdLine.getOptionValue("dir")); @@ -661,6 +678,60 @@ public class StorageInstaller { } }
+ private int parseJmxPortFromCassandrEnv(File cassandraEnvFile) { + Integer port = null; + if (isWindows()) { + // TODO + return defaultJmxPort; + } else { + BufferedReader reader = null; + try { + reader = new BufferedReader(new FileReader(cassandraEnvFile)); + String line = reader.readLine(); + + while (line != null) { + if (line.startsWith("JMX_PORT")) { + int startIndex = "JMX_PORT="".length(); + int endIndex = line.lastIndexOf("""); + + if (startIndex == -1 || endIndex == -1) { + log.error("Failed to parse the JMX port. Make sure that you have the JMX port defined on its " + + "own line as follows, JMX_PORT="<jmx-port>""); + throw new RuntimeException("Cannot determine JMX port"); + } + try { + port = Integer.parseInt(line.substring(startIndex, endIndex)); + } catch (NumberFormatException e) { + log.error("The JMX port must be an integer. [" + port + "] is an invalid value"); + throw new RuntimeException("The JMX port has an invalid value"); + } + return port; + } + line = reader.readLine(); + } + log.error("Failed to parse the JMX port. Make sure that you have the JMX port defined on its " + + "own line as follows, JMX_PORT="<jmx-port>""); + throw new RuntimeException("Cannot determine JMX port"); + } catch (IOException e) { + log.error("Failed to parse JMX port. There was an unexpected IO error", e); + throw new RuntimeException("Failed to parse JMX port due to IO error: " + e.getMessage()); + } finally { + try { + if (reader != null) { + reader.close(); + } + } catch (IOException e) { + if (log.isDebugEnabled()) { + log.debug("An error occurred closing the " + BufferedReader.class.getName() + " used to " + + "parse the JMX port", e); + } else { + log.warn("There was error closing the reader used to parse the JMX port: " + e.getMessage()); + } + } + } + } + } + private int parseJmxPort(File cassandraJvmOptsFile) { Integer port = null; if (isWindows()) { diff --git a/modules/common/cassandra-installer/src/test/java/org/rhq/storage/installer/StorageInstallerTest.java b/modules/common/cassandra-installer/src/test/java/org/rhq/storage/installer/StorageInstallerTest.java index 3fd13c7..a7921c0 100644 --- a/modules/common/cassandra-installer/src/test/java/org/rhq/storage/installer/StorageInstallerTest.java +++ b/modules/common/cassandra-installer/src/test/java/org/rhq/storage/installer/StorageInstallerTest.java @@ -2,11 +2,13 @@ package org.rhq.storage.installer;
import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail;
import java.io.File; import java.io.FileInputStream; +import java.io.FileOutputStream; import java.io.IOException; import java.lang.reflect.Method; import java.util.Properties; @@ -20,41 +22,53 @@ import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test;
import org.rhq.cassandra.CassandraClusterManager; +import org.rhq.core.util.MessageDigestGenerator; import org.rhq.core.util.file.FileUtil; +import org.rhq.core.util.stream.StreamUtil;
/** * @author John Sanda */ public class StorageInstallerTest {
+ private MessageDigestGenerator digestGenerator; + private File basedir;
+ private File serverDir; + private File storageDir;
private StorageInstaller installer;
@BeforeMethod public void initDirs(Method test) throws Exception { + digestGenerator = new MessageDigestGenerator(MessageDigestGenerator.SHA_256); + File dir = new File(getClass().getResource(".").toURI()); basedir = new File(dir, getClass().getSimpleName() + "/" + test.getName()); FileUtil.purge(basedir, true); basedir.mkdirs();
- System.setProperty("rhq.server.basedir", basedir.getAbsolutePath()); + serverDir = new File(basedir, "rhq-server");
- File serverPropsFile = new File(basedir, "rhq-server.properties"); + System.setProperty("rhq.server.basedir", serverDir.getAbsolutePath()); + + File serverPropsFile = new File(serverDir, "rhq-server.properties"); FileUtils.touch(serverPropsFile); System.setProperty("rhq.server.properties-file", serverPropsFile.getAbsolutePath());
- storageDir = new File(basedir, "rhq-storage"); + storageDir = new File(serverDir, "rhq-storage");
installer = new StorageInstaller(); }
@AfterMethod public void shutdownStorageNode() throws Exception { - CassandraClusterManager ccm = new CassandraClusterManager(); - ccm.killNode(storageDir); + if (FileUtils.getFile(storageDir, "bin", "cassandra.pid").exists()) { + CassandraClusterManager ccm = new CassandraClusterManager(); + ccm.killNode(storageDir); + } }
@Test @@ -97,6 +111,65 @@ public class StorageInstallerTest { assertTrue(savedCachesDir.exists(), "Expected to find saved_caches directory at " + savedCachesDir); }
+ @Test + public void upgradeFromRHQ48Install() throws Exception { + File rhq48ServerDir = new File(basedir, "rhq48-server"); + File rhq48StorageDir = new File(rhq48ServerDir, "rhq-storage"); + File rhq48StorageConfDir = new File(rhq48StorageDir, "conf"); + + File oldCassandraYamlFile = new File(rhq48StorageConfDir, "cassandra.yaml"); + File oldCassandraEnvFile = new File(rhq48StorageConfDir, "cassandra-env.sh"); + File oldLog4JFile = new File(rhq48StorageConfDir, "log4j-server.properties"); + + rhq48StorageConfDir.mkdirs(); + StreamUtil.copy(getClass().getResourceAsStream("/rhq48/storage/conf/cassandra.yaml"), + new FileOutputStream(oldCassandraYamlFile), true); + StreamUtil.copy(getClass().getResourceAsStream("/rhq48/storage/conf/cassandra-env.sh"), + new FileOutputStream(oldCassandraEnvFile)); + StreamUtil.copy(getClass().getResourceAsStream("/rhq48/storage/conf/log4j-server.properties"), + new FileOutputStream(oldLog4JFile)); + + CommandLineParser parser = new PosixParser(); + + String[] args = { + "--upgrade", rhq48ServerDir.getAbsolutePath(), + "--dir", storageDir.getAbsolutePath() + }; + + CommandLine cmdLine = parser.parse(installer.getOptions(), args); + int status = installer.run(cmdLine); + + assertEquals(status, 0, "Expected to get back a status code of 0 for a successful upgrade"); + assertNodeIsRunning(); + + File binDir = new File(storageDir, "bin"); + assertTrue(binDir.exists(), "Expected to find bin directory at " + binDir); + + File libDir = new File(storageDir, "lib"); + assertTrue(libDir.exists(), "Expected to find lib directory at " + libDir); + + File confDir = new File(storageDir, "conf"); + assertTrue(confDir.exists(), "Expected to find conf directory at " + confDir); + + File newCassandraYamlFile = new File(confDir, "cassandra.yaml"); + assertEquals(sha256(oldCassandraYamlFile), sha256(newCassandraYamlFile), newCassandraYamlFile + + " does not match the original version"); + + File newLog4JFile = new File(confDir, "log4j-server.properties"); + assertEquals(sha256(oldLog4JFile), sha256(newLog4JFile), newLog4JFile + " does not match the original version"); + + assertFalse(new File(confDir, "cassandra-env.sh").exists(), "cassandra-env.sh should not be used after RHQ 4.8.0"); + + File cassandraJvmPropsFile = new File(confDir, "cassandra-jvm.properties"); + Properties properties = new Properties(); + properties.load(new FileInputStream(cassandraJvmPropsFile)); + + // If this check fails, make sure that the expected value matches the value in + // src/test/resources/rhq48/storage/conf/cassandra-env.sh + assertEquals(properties.getProperty("jmx_port"), "7399", "Failed to update the JMX port in " + + cassandraJvmPropsFile); + } + private void assertNodeIsRunning() { try { installer.verifyNodeIsUp("127.0.0.1", 7299, 3, 1000); @@ -106,7 +179,7 @@ public class StorageInstallerTest { }
private void assertRhqServerPropsUpdated() { - File serverPropsFile = new File(basedir, "rhq-server.properties"); + File serverPropsFile = new File(serverDir, "rhq-server.properties"); Properties properties = new Properties();
try { @@ -120,4 +193,12 @@ public class StorageInstallerTest { assertEquals(seeds, "127.0.0.1|7299|9142"); }
+ private String sha256(File file) { + try { + return digestGenerator.calcDigestString(file); + } catch (IOException e) { + throw new RuntimeException("Failed to calculate SHA-256 hash for " + file.getPath(), e); + } + } + }
commit 8ad6e86b9ba704eb468646fd7fffcf5dd082e2c4 Author: Heiko W. Rupp hwr@redhat.com Date: Fri Jul 19 18:13:31 2013 +0200
Add annotation and processing for configuration properties. Also shuffle some classes around.
diff --git a/modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/ConfigProperty.java b/modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/ConfigProperty.java new file mode 100644 index 0000000..4b5a3ab --- /dev/null +++ b/modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/ConfigProperty.java @@ -0,0 +1,49 @@ +/* + * RHQ Management Platform + * Copyright (C) 2005-2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA + */ + +package org.rhq.helpers.pluginAnnotations.agent; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * A configuration property for resource or plugin config. + * Currently only property simple are supported. + * @author Heiko W. Rupp + */ +@Retention(RetentionPolicy.RUNTIME) +@Target( { ElementType.FIELD}) +public @interface ConfigProperty { + + public Scope scope() default Scope.PLUGIN; + String property() default ""; + String displayName() default ""; + String description() default ""; + boolean readOnly() default false; + String defaultValue() default ""; + RhqType rhqType() default RhqType.VOID; + + + public enum Scope { + PLUGIN, + RESOURCE; + } +} diff --git a/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/AnnotationProcessor.java b/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/AnnotationProcessor.java index 3c90b7b..16ec437 100644 --- a/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/AnnotationProcessor.java +++ b/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/AnnotationProcessor.java @@ -19,15 +19,23 @@
package org.rhq.helpers.pluginGen;
+import java.lang.annotation.Annotation; +import java.lang.reflect.Field; +import java.lang.reflect.Method; import java.util.List;
+import org.rhq.helpers.pluginAnnotations.agent.ConfigProperty; +import org.rhq.helpers.pluginAnnotations.agent.Metric; +import org.rhq.helpers.pluginAnnotations.agent.Operation; +import org.rhq.helpers.pluginAnnotations.agent.Parameter; +import org.rhq.helpers.pluginAnnotations.agent.RhqType; + /** * Processor that scans a directory for annotated classes and generates metrics etc. from them. * @author Heiko W. Rupp */ public class AnnotationProcessor {
- private List<Class> classList; private final DirectoryClassLoader classLoader;
public AnnotationProcessor(String baseDirectory) { @@ -36,9 +44,119 @@ public class AnnotationProcessor { }
public void populate(Props props) { - classList = classLoader.findClasses(); + List<Class> classList = classLoader.findClasses(); + + populateMetrics(props, classList); + populateOperations(props, classList); + populateConfigurations(props, classList); + } + + public void populateMetrics(Props props, List<Class> classes) { + for (Class<?> clazz : classes) { + for (Field field : clazz.getDeclaredFields()) { + Metric metricAnnot = field.getAnnotation(Metric.class); + addMetric(props, metricAnnot, field.getName()); + } + + for (Method method : clazz.getDeclaredMethods()) { + Metric metricAnnot = method.getAnnotation(Metric.class); + addMetric(props, metricAnnot, method.getName()); + } + } + } + + public void populateOperations(Props props, List<Class> classes) { + for (Class<?> clazz : classes) { + for (Method method : clazz.getDeclaredMethods()) { + Operation operationAnnot = method.getAnnotation(Operation.class); + if (operationAnnot != null) { + String property = operationAnnot.name(); + if (property.isEmpty()) { + property = method.getName(); + } + Props.OperationProps op = new Props.OperationProps(property); + op.setDisplayName(operationAnnot.displayName()); + op.setDescription(operationAnnot.description()); + RhqType type = RhqType.findType(method.getReturnType()); + if (type != RhqType.VOID) { + Props.SimpleProperty simpleProperty = new Props.SimpleProperty(type.getRhqName()); + op.setResult(simpleProperty); + }
- props.populateMetrics(classList); - props.populateOperations(classList); + Class[] types = method.getParameterTypes(); + int i=0; + for (Annotation[] annotations : method.getParameterAnnotations() ) { + for (Annotation annotation : annotations) { + if (annotation instanceof Parameter) { + Parameter parameter = (Parameter) annotation; + Props.SimpleProperty simpleProperty = new Props.SimpleProperty(parameter.name()); + simpleProperty.setDescription(parameter.description()); + Class typeClass = types[i]; + RhqType rhqType = RhqType.findType(typeClass); + if (parameter.type()!=RhqType.VOID){ + rhqType = parameter.type(); + } + simpleProperty.setType(rhqType.getRhqName()); + op.getParams().add(simpleProperty); + } + } + i++; + } + props.getOperations().add(op); + } + + } + } + } + + public void populateConfigurations(Props props, List<Class> classes) { + for (Class<?> clazz : classes) { + for (Field field : clazz.getDeclaredFields()) { + ConfigProperty configProperty = field.getAnnotation(ConfigProperty.class); + if (configProperty!=null) { + String name = configProperty.property(); + if(name.isEmpty()) { + name = field.getName(); + } + Props.SimpleProperty property = new Props.SimpleProperty(name); + property.setDescription(configProperty.description()); + property.setDisplayName(configProperty.displayName()); + Class type = field.getType(); + RhqType rhqType = RhqType.findType(type); + if (configProperty.rhqType()!=RhqType.VOID) { + rhqType = configProperty.rhqType(); + } + property.setType(rhqType.getRhqName()); + + switch (configProperty.scope()){ + case PLUGIN: + props.getPluginConfig().add(property); + break; + case RESOURCE: + props.getResourceConfig().add(property); + break; + default: + throw new IllegalStateException("Unknown scope: " +configProperty.scope().name()); + } + } + } + } } + + private void addMetric(Props props, Metric metricAnnot, String name) { + if (metricAnnot != null) { + String property = metricAnnot.property(); + if (property.isEmpty()) { + property = name; + } + Props.MetricProps metric = new Props.MetricProps(property); + metric.setDisplayName(metricAnnot.displayName()); + metric.setDisplayType(metricAnnot.displayType()); + metric.setDataType(metricAnnot.dataType()); + metric.setDescription(metricAnnot.description()); + metric.setUnits(metricAnnot.units()); + props.getMetrics().add(metric); + } + } + } diff --git a/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/Props.java b/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/Props.java index 95be574..c88fbc0 100644 --- a/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/Props.java +++ b/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/Props.java @@ -18,20 +18,12 @@ */ package org.rhq.helpers.pluginGen;
-import java.lang.annotation.Annotation; -import java.lang.reflect.Field; -import java.lang.reflect.Method; import java.util.HashSet; import java.util.LinkedHashSet; -import java.util.List; import java.util.Set;
import org.rhq.helpers.pluginAnnotations.agent.DataType; import org.rhq.helpers.pluginAnnotations.agent.DisplayType; -import org.rhq.helpers.pluginAnnotations.agent.Metric; -import org.rhq.helpers.pluginAnnotations.agent.Operation; -import org.rhq.helpers.pluginAnnotations.agent.Parameter; -import org.rhq.helpers.pluginAnnotations.agent.RhqType; import org.rhq.helpers.pluginAnnotations.agent.Units;
/** @@ -94,7 +86,8 @@ public class Props { /** Embedded children */ private Set<Props> children = new HashSet<Props>();
- private Set<SimpleProperty> simpleProps = new LinkedHashSet<SimpleProperty>(); + private Set<SimpleProperty> pluginConfig = new LinkedHashSet<SimpleProperty>(); + private Set<SimpleProperty> resourceConfig = new LinkedHashSet<SimpleProperty>();
private Set<Template> templates = new HashSet<Template>();
@@ -291,12 +284,12 @@ public class Props { this.rhqVersion = rhqVersion; }
- public Set<SimpleProperty> getSimpleProps() { - return simpleProps; + public Set<SimpleProperty> getPluginConfig() { + return pluginConfig; }
- public void setSimpleProps(Set<SimpleProperty> simpleProps) { - this.simpleProps = simpleProps; + public void setPluginConfig(Set<SimpleProperty> pluginConfig) { + this.pluginConfig = pluginConfig; }
public Set<Template> getTemplates() { @@ -307,7 +300,15 @@ public class Props { this.templates = templates; }
- public Set<MetricProps> getMetrics() { + public Set<SimpleProperty> getResourceConfig() { + return resourceConfig; + } + + public void setResourceConfig(Set<SimpleProperty> resourceConfig) { + this.resourceConfig = resourceConfig; + } + + public Set<MetricProps> getMetrics() { return metrics; }
@@ -363,79 +364,6 @@ public class Props { this.scanForAnnotations = scanForAnnotations; }
- public void populateMetrics(List<Class> classes) { - for (Class<?> clazz : classes) { - for (Field field : clazz.getDeclaredFields()) { - Metric metricAnnot = field.getAnnotation(Metric.class); - addMetric(metricAnnot, field.getName()); - } - - for (Method method : clazz.getDeclaredMethods()) { - Metric metricAnnot = method.getAnnotation(Metric.class); - addMetric(metricAnnot, method.getName()); - } - } - } - - public void populateOperations(List<Class> classes) { - for (Class<?> clazz : classes) { - for (Method method : clazz.getDeclaredMethods()) { - Operation operationAnnot = method.getAnnotation(Operation.class); - if (operationAnnot != null) { - String property = operationAnnot.name(); - if (property.isEmpty()) { - property = method.getName(); - } - OperationProps op = new OperationProps(property); - op.setDisplayName(operationAnnot.displayName()); - op.setDescription(operationAnnot.description()); - RhqType type = RhqType.findType(method.getReturnType()); - if (type != RhqType.VOID) { - SimpleProperty simpleProperty = new SimpleProperty(type.getRhqName()); - op.setResult(simpleProperty); - } - - Class[] types = method.getParameterTypes(); - int i=0; - for (Annotation[] annotations : method.getParameterAnnotations() ) { - for (Annotation annotation : annotations) { - if (annotation instanceof Parameter) { - Parameter parameter = (Parameter) annotation; - SimpleProperty simpleProperty = new SimpleProperty(parameter.name()); - simpleProperty.setDescription(parameter.description()); - Class typeClass = types[i]; - RhqType rhqType = RhqType.findType(typeClass); - if (parameter.type()!=RhqType.VOID){ - rhqType = parameter.type(); - } - simpleProperty.setType(rhqType.getRhqName()); - op.getParams().add(simpleProperty); - } - } - i++; - } - operations.add(op); - } - - } - } - } - - private void addMetric(Metric metricAnnot, String name) { - if (metricAnnot != null) { - String property = metricAnnot.property(); - if (property.isEmpty()) { - property = name; - } - MetricProps metric = new MetricProps(property); - metric.setDisplayName(metricAnnot.displayName()); - metric.setDisplayType(metricAnnot.displayType()); - metric.setDataType(metricAnnot.dataType()); - metric.setDescription(metricAnnot.description()); - metric.setUnits(metricAnnot.units()); - metrics.add(metric); - } - }
@Override public String toString() { @@ -466,7 +394,7 @@ public class Props { sb.append(", dependsOnJmxPlugin=").append(dependsOnJmxPlugin); sb.append(", rhqVersion='").append(rhqVersion).append('''); sb.append(", children=").append(children); - sb.append(", simpleProps=").append(simpleProps); + sb.append(", simpleProps=").append(pluginConfig); sb.append(", templates=").append(templates); sb.append(", runsInsides=").append(runsInsides); sb.append('}'); diff --git a/modules/helpers/pluginGen/src/main/resources/descriptorMain.ftl b/modules/helpers/pluginGen/src/main/resources/descriptorMain.ftl index 5579806..ce35ebe 100644 --- a/modules/helpers/pluginGen/src/main/resources/descriptorMain.ftl +++ b/modules/helpers/pluginGen/src/main/resources/descriptorMain.ftl @@ -45,9 +45,9 @@ name="${props.name}" </runs-inside> </#if>
- <#if props.simpleProps?has_content> + <#if props.pluginConfig?has_content> <plugin-configuration> - <#list props.simpleProps as simpleProps> + <#list props.pluginConfig as simpleProps> <c:simple-property name="${simpleProps.name}" description="${simpleProps.description}" <#if simpleProps.type??>type="${simpleProps.type}"</#if> <#if simpleProps.readOnly>readOnly="true"</#if>/> </#list> <!-- The template section is only for manual resource additions, and default parameters and the ones presented to the user. --> @@ -106,4 +106,12 @@ name="${props.name}" <!-- TODO supply your configuration parameters --> <c:simple-property name="dummy"/> </resource-configuration> - </#if> \ No newline at end of file + </#if> + +<#if props.resourceConfig?has_content> + <resource-configuration> + <#list props.resourceConfig as simpleProps> + <c:simple-property name="${simpleProps.name}" description="${simpleProps.description}" <#if simpleProps.type??>type="${simpleProps.type}"</#if> <#if simpleProps.readOnly>readOnly="true"</#if>/> + </#list> + </resource-configuration> +</#if> diff --git a/modules/helpers/pluginGen/src/test/java/org/rhq/helpers/pluginGen/test/FooBean.java b/modules/helpers/pluginGen/src/test/java/org/rhq/helpers/pluginGen/test/FooBean.java index fbc571b..5027dbb 100644 --- a/modules/helpers/pluginGen/src/test/java/org/rhq/helpers/pluginGen/test/FooBean.java +++ b/modules/helpers/pluginGen/src/test/java/org/rhq/helpers/pluginGen/test/FooBean.java @@ -19,6 +19,7 @@
package org.rhq.helpers.pluginGen.test;
+import org.rhq.helpers.pluginAnnotations.agent.ConfigProperty; import org.rhq.helpers.pluginAnnotations.agent.DataType; import org.rhq.helpers.pluginAnnotations.agent.DisplayType; import org.rhq.helpers.pluginAnnotations.agent.MeasurementType; @@ -26,6 +27,7 @@ import org.rhq.helpers.pluginAnnotations.agent.Metric; import org.rhq.helpers.pluginAnnotations.agent.Operation; import org.rhq.helpers.pluginAnnotations.agent.Parameter; import org.rhq.helpers.pluginAnnotations.agent.RhqType; +import org.rhq.helpers.pluginAnnotations.agent.Units;
/** * Just a sample @@ -34,7 +36,8 @@ import org.rhq.helpers.pluginAnnotations.agent.RhqType;
public class FooBean {
- @Metric(description = "How often was this bean invoked", displayType = DisplayType.SUMMARY, measurementType = MeasurementType.DYNAMIC) + @Metric(description = "How often was this bean invoked", displayType = DisplayType.SUMMARY, measurementType = MeasurementType.DYNAMIC, + units = Units.SECONDS) int invocationCount;
@Metric(description = "Just a foo", dataType = DataType.TRAIT) @@ -51,5 +54,11 @@ public class FooBean { invocationCount -= by; }
+ @ConfigProperty(scope = ConfigProperty.Scope.PLUGIN, displayName="The Password", + readOnly = false, property="thePassword",description = "A password", rhqType = RhqType.PASSWORD) + String password; + + @ConfigProperty(scope = ConfigProperty.Scope.RESOURCE) + int defaultSteps;
}
commit 039a42a2e696bb1bb0b170bec87ed95fe13921e2 Author: Jirka Kremser jkremser@redhat.com Date: Fri Jul 19 16:40:48 2013 +0200
API Checks - Changing StorageNode.QUERY_FIND_BY_ADDRESS. The constants for native queries shouldn't be part of the public API.
diff --git a/modules/core/domain/intentional-api-changes-since-4.8.0.xml b/modules/core/domain/intentional-api-changes-since-4.8.0.xml index f21a45f..a8a77b4 100644 --- a/modules/core/domain/intentional-api-changes-since-4.8.0.xml +++ b/modules/core/domain/intentional-api-changes-since-4.8.0.xml @@ -1,3 +1,11 @@ <?xml version="1.0"?> <differences> + <difference> + <className>org/rhq/core/domain/cloud/StorageNode</className> + <differenceType>6003</differenceType><!-- Value of compile-time constant has changed --> + <field>QUERY_FIND_BY_ADDRESS</field> + <justification> + Changing StorageNode.QUERY_FIND_BY_ADDRESS. The constants for native queries shouldn't be part of the public API. + </justification> + </difference> </differences>
commit 78eb557ae8f799b628769d76ccece61b6cb452a4 Author: Jirka Kremser jkremser@redhat.com Date: Fri Jul 19 16:17:53 2013 +0200
[BZ 959587] - Alert definition should display units when entering a value; e.g. 'seconds' or 'megabytes' - Adding "BaseUnits" field to the popup form. The tooltip displays all allowed units from the same MeasurementUnits.Family.
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/measurement/MeasurementUnits.java b/modules/core/domain/src/main/java/org/rhq/core/domain/measurement/MeasurementUnits.java index 31811ab..d66f23b 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/measurement/MeasurementUnits.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/measurement/MeasurementUnits.java @@ -22,6 +22,9 @@ */ package org.rhq.core.domain.measurement;
+import java.util.ArrayList; +import java.util.List; + import org.rhq.core.domain.measurement.util.MeasurementConversionException;
/** @@ -117,6 +120,16 @@ public enum MeasurementUnits {
return null; } + + public List<MeasurementUnits> getFamilyUnits() { + List<MeasurementUnits> returnList = new ArrayList<MeasurementUnits>(); + for (MeasurementUnits units : MeasurementUnits.values()) { + if (units.family == family) { + returnList.add(units); + } + } + return returnList; + }
public boolean isComparableTo(MeasurementUnits other) { return family == other.family; diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/alert/definitions/ConditionEditor.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/alert/definitions/ConditionEditor.java index 870140d..f4fbcad 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/alert/definitions/ConditionEditor.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/alert/definitions/ConditionEditor.java @@ -44,6 +44,8 @@ import com.smartgwt.client.widgets.form.fields.SelectItem; import com.smartgwt.client.widgets.form.fields.SpacerItem; import com.smartgwt.client.widgets.form.fields.StaticTextItem; import com.smartgwt.client.widgets.form.fields.TextItem; +import com.smartgwt.client.widgets.form.fields.events.ChangedEvent; +import com.smartgwt.client.widgets.form.fields.events.ChangedHandler; import com.smartgwt.client.widgets.layout.HLayout; import com.smartgwt.client.widgets.toolbar.ToolStrip;
@@ -613,6 +615,7 @@ public class ConditionEditor extends EnhancedVLayout { } absoluteValue.setShowIfCondition(ifFunc); formItems.add(absoluteValue); + formItems.add(buildBaseUnitsItem(metricDropDownMenu, ifFunc, editMode)); } else { String noMetricsStr = MSG.view_alert_definition_condition_editor_metric_nometrics(); StaticTextItem noMetrics = buildHelpTextItem(THRESHOLD_NO_METRICS_ITEMNAME, noMetricsStr, ifFunc); @@ -663,6 +666,7 @@ public class ConditionEditor extends EnhancedVLayout {
formItems.add(absoluteLowValue); formItems.add(absoluteHighValue); + formItems.add(buildBaseUnitsItem(metricDropDownMenu, ifFunc, editMode)); } else { String noMetricsStr = MSG.view_alert_definition_condition_editor_metric_nometrics(); StaticTextItem noMetrics = buildHelpTextItem(RANGE_NO_METRICS_ITEMNAME, noMetricsStr, ifFunc); @@ -1220,6 +1224,40 @@ public class ConditionEditor extends EnhancedVLayout { return comparatorSelection; }
+ private StaticTextItem buildBaseUnitsItem(final SelectItem metricDropDownMenu, FormItemIfFunction ifFunc, + boolean editMode) { + String baseUnits = MSG.view_alert_definition_condition_editor_common_baseUnits(); + final StaticTextItem baseUnitsItem = new StaticTextItem("baseUnits", baseUnits); + baseUnitsItem.setHoverWidth(200); + baseUnitsItem.setShowIfCondition(ifFunc); + + metricDropDownMenu.addChangedHandler(new ChangedHandler() { + public void onChanged(ChangedEvent event) { + MeasurementDefinition measDef = getMeasurementDefinition(form.getValueAsString(metricDropDownMenu + .getName())); + baseUnitsItem.setValue(measDef.getUnits() == MeasurementUnits.NONE ? MSG + .view_alert_definition_condition_editor_common_baseUnits_none() + : measDef.getUnits() == MeasurementUnits.MILLISECONDS ? MeasurementUnits.SECONDS : measDef + .getUnits()); + List<MeasurementUnits> availableUnits = measDef.getUnits().getFamilyUnits(); + baseUnitsItem.setTooltip(MSG.view_alert_definition_condition_editor_common_baseUnits_availableUnits() + + (availableUnits.isEmpty() || availableUnits.get(0) == MeasurementUnits.NONE ? MSG + .view_alert_definition_condition_editor_common_baseUnits_none() : availableUnits)); + } + }); + // initialize the field with proper value + MeasurementUnits units = editMode ? existingCondition.getMeasurementDefinition().getUnits() + : ConditionEditor.this.resourceType.getMetricDefinitions().iterator().next().getUnits(); + baseUnitsItem.setValue(units == MeasurementUnits.NONE ? MSG + .view_alert_definition_condition_editor_common_baseUnits_none() + : units == MeasurementUnits.MILLISECONDS ? MeasurementUnits.SECONDS : units); + List<MeasurementUnits> availableUnits = units.getFamilyUnits(); + baseUnitsItem.setTooltip(MSG.view_alert_definition_condition_editor_common_baseUnits_availableUnits() + + (availableUnits.isEmpty() || availableUnits.get(0) == MeasurementUnits.NONE ? MSG + .view_alert_definition_condition_editor_common_baseUnits_none() : availableUnits)); + return baseUnitsItem; + } + private StaticTextItem buildHelpTextItem(String itemName, String helpText, FormItemIfFunction ifFunc) { StaticTextItem help = new StaticTextItem(itemName); help.setShowTitle(false); diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties index ed6b130..19d3fa5 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties @@ -919,6 +919,9 @@ view_alert_definition_condition_editor_availabilityDuration_tooltip_duration = T view_alert_definition_condition_editor_availability_tooltip = Specify the availability state change that will trigger the condition. view_alert_definition_condition_editor_availability_value = Availability view_alert_definition_condition_editor_common_avg = Average +view_alert_definition_condition_editor_common_baseUnits = Base Units +view_alert_definition_condition_editor_common_baseUnits_availableUnits = Available units: +view_alert_definition_condition_editor_common_baseUnits_none = None view_alert_definition_condition_editor_common_max = Maximum view_alert_definition_condition_editor_common_min = Minimum view_alert_definition_condition_editor_common_regex = Regular Expression diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties index 1950003..f71f907 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties @@ -933,6 +933,9 @@ view_alert_definition_condition_editor_availabilityDuration_tooltip_duration = P view_alert_definition_condition_editor_availability_tooltip = Specifikujte stav dostupnosti, kterÃœ poté splnà podmÃnku. view_alert_definition_condition_editor_availability_value = Dostupnost view_alert_definition_condition_editor_common_avg = PrůmÄr +view_alert_definition_condition_editor_common_baseUnits = Základnà jednotky +view_alert_definition_condition_editor_common_baseUnits_availableUnits = Dostupné jednotky: +view_alert_definition_condition_editor_common_baseUnits_none = Źádné view_alert_definition_condition_editor_common_max = Maximum view_alert_definition_condition_editor_common_min = Minimum view_alert_definition_condition_editor_common_regex = Regulárnà vÃœraz diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties index bd8e0b4..524dcc0 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties @@ -844,6 +844,9 @@ view_alert_definition_condition_editor_avilability_option_up = Wird verfÃŒgbar view_alert_definition_condition_editor_avilability_tooltip = Geben Sie die Ãnderung der VerfÃŒgbarkeit an, die die Bedingung auslösen soll. view_alert_definition_condition_editor_avilability_value = VerfÃŒgbarkeit view_alert_definition_condition_editor_common_avg = Durchschnitt +##view_alert_definition_condition_editor_common_baseUnits = Base Units +##view_alert_definition_condition_editor_common_baseUnits_availableUnits = Available units: +##view_alert_definition_condition_editor_common_baseUnits_none = None view_alert_definition_condition_editor_common_max = Maximum view_alert_definition_condition_editor_common_min = Minimum view_alert_definition_condition_editor_common_regex = RegulÀrer Ausdruck diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties index 1c0005b..cb0c35f 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties @@ -908,6 +908,9 @@ view_alert_definition_condition_editor_availabilityDuration_tooltip_duration = view_alert_definition_condition_editor_availability_tooltip = æ¡ä»¶ã®ããªã¬ãŒãšãªãã¢ãã€ã©ããªãã£ç¶æ ã®å€åãæå®ããŸã view_alert_definition_condition_editor_availability_value = ã¢ãã€ã©ããªã㣠view_alert_definition_condition_editor_common_avg = å¹³å +##view_alert_definition_condition_editor_common_baseUnits = Base Units +##view_alert_definition_condition_editor_common_baseUnits_availableUnits = Available units: +##view_alert_definition_condition_editor_common_baseUnits_none = None view_alert_definition_condition_editor_common_max = æ倧 view_alert_definition_condition_editor_common_min = æå° view_alert_definition_condition_editor_common_regex = æ£èŠè¡šçŸ diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties index 5118a2d..32dc73d 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties @@ -797,6 +797,9 @@ view_alert_definition_condition_editor_availabilityDuration_tooltip_duration = view_alert_definition_condition_editor_availability_tooltip = 조걎 ížëŠ¬ê±°íë ê°ì©ì± ìíì ë³í륌 ì§ì í©ëë€. view_alert_definition_condition_editor_availability_value = ê°ì©ì± view_alert_definition_condition_editor_common_avg = íê· +##view_alert_definition_condition_editor_common_baseUnits = Base Units +##view_alert_definition_condition_editor_common_baseUnits_availableUnits = Available units: +##view_alert_definition_condition_editor_common_baseUnits_none = None view_alert_definition_condition_editor_common_max = ìµë view_alert_definition_condition_editor_common_min = ìµì view_alert_definition_condition_editor_common_regex = ì ê· ííì diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties index bfd95e8..f4419e1 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties @@ -916,6 +916,9 @@ view_alert_common_tab_recovery = Recupera\u00E7\u00E3o view_alert_definition_condition_editor_availability_tooltip = Especifica a mudan\u00E7a de estado na disponibilidade do recurso que ir\u00E1 disparar a condi\u00E7\u00E3o. view_alert_definition_condition_editor_availability_value = Disponibilidade view_alert_definition_condition_editor_common_avg = M\u00E9dio +##view_alert_definition_condition_editor_common_baseUnits = Base Units +##view_alert_definition_condition_editor_common_baseUnits_availableUnits = Available units: +##view_alert_definition_condition_editor_common_baseUnits_none = None view_alert_definition_condition_editor_common_max = M\u00E1ximo view_alert_definition_condition_editor_common_min = M\u00EDnimo view_alert_definition_condition_editor_common_regex = Express\u00E3o Regular diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties index e03e1a9..d75b76e 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties @@ -881,6 +881,9 @@ #view_alert_definition_condition_editor_availability_tooltip = Specify the availability state change that will trigger the condition. #view_alert_definition_condition_editor_availability_value = Availability #view_alert_definition_condition_editor_common_avg = Average +##view_alert_definition_condition_editor_common_baseUnits = Base Units +##view_alert_definition_condition_editor_common_baseUnits_availableUnits = Available units: +##view_alert_definition_condition_editor_common_baseUnits_none = None #view_alert_definition_condition_editor_common_max = Maximum #view_alert_definition_condition_editor_common_min = Minimum #view_alert_definition_condition_editor_delete_confirm = Delete the selected alert condition(s)? diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties index 82ddc2a..a9a24df 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties @@ -902,6 +902,9 @@ view_alert_common_tab_recovery = \u8fd8\u539f view_alert_definition_condition_editor_availability_tooltip = Specify the availability state change that will trigger the condition. view_alert_definition_condition_editor_availability_value = \u53ef\u7528\u6027 view_alert_definition_condition_editor_common_avg = \u5e73\u5747 +##view_alert_definition_condition_editor_common_baseUnits = Base Units +##view_alert_definition_condition_editor_common_baseUnits_availableUnits = Available units: +##view_alert_definition_condition_editor_common_baseUnits_none = None view_alert_definition_condition_editor_common_max = \u6700\u5927 view_alert_definition_condition_editor_common_min = \u6700\u5c0f view_alert_definition_condition_editor_common_regex = \u6b63\u5219\u8868\u8fbe\u5f0f
commit 1f6cf05d57599ded2d196241284b4b5167886c80 Author: Jirka Kremser jkremser@redhat.com Date: Fri Jul 19 12:43:35 2013 +0200
[BZÂ 980091] - link in the alert to the parent on which the alert was created - Adding the link to AlertDetailsView if the alert is defined by parent definition. Parent definition could mean group alert definition, autogroup alert definition or template definition
diff --git a/modules/enterprise/gui/coregui/.externalToolBuilders/org.eclipse.wst.jsdt.core.javascriptValidator.launch b/modules/enterprise/gui/coregui/.externalToolBuilders/org.eclipse.wst.jsdt.core.javascriptValidator.launch new file mode 100644 index 0000000..627021f --- /dev/null +++ b/modules/enterprise/gui/coregui/.externalToolBuilders/org.eclipse.wst.jsdt.core.javascriptValidator.launch @@ -0,0 +1,7 @@ +<?xml version="1.0" encoding="UTF-8" standalone="no"?> +<launchConfiguration type="org.eclipse.ant.AntBuilderLaunchConfigurationType"> +<booleanAttribute key="org.eclipse.ui.externaltools.ATTR_BUILDER_ENABLED" value="false"/> +<stringAttribute key="org.eclipse.ui.externaltools.ATTR_DISABLED_BUILDER" value="org.eclipse.wst.jsdt.core.javascriptValidator"/> +<mapAttribute key="org.eclipse.ui.externaltools.ATTR_TOOL_ARGUMENTS"/> +<booleanAttribute key="org.eclipse.ui.externaltools.ATTR_TRIGGERS_CONFIGURED" value="true"/> +</launchConfiguration> diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/alert/AlertDataSource.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/alert/AlertDataSource.java index 17dd6d6..d66baea 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/alert/AlertDataSource.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/alert/AlertDataSource.java @@ -57,6 +57,7 @@ import org.rhq.core.domain.util.PageList; import org.rhq.enterprise.gui.coregui.client.CoreGUI; import org.rhq.enterprise.gui.coregui.client.ImageManager; import org.rhq.enterprise.gui.coregui.client.LinkManager; +import org.rhq.enterprise.gui.coregui.client.admin.templates.AlertDefinitionTemplateTypeView; import org.rhq.enterprise.gui.coregui.client.components.form.DateFilterItem; import org.rhq.enterprise.gui.coregui.client.components.table.TimestampCellFormatter; import org.rhq.enterprise.gui.coregui.client.gwt.AlertGWTServiceAsync; @@ -74,6 +75,8 @@ import org.rhq.enterprise.gui.coregui.client.util.RPCDataSource; * @author John Mazzitelli */ public class AlertDataSource extends RPCDataSource<Alert, AlertCriteria> { + + private static final String FIELD_PARENT = "parent"; // may be template or group alert def parent
public static final String PRIORITY_ICON_HIGH = ImageManager.getAlertIcon(AlertPriority.HIGH); public static final String PRIORITY_ICON_MEDIUM = ImageManager.getAlertIcon(AlertPriority.MEDIUM); @@ -359,6 +362,7 @@ public class AlertDataSource extends RPCDataSource<Alert, AlertCriteria> { } criteria.addFilterEntityContext(entityContext); criteria.fetchConditionLogs(true); +// criteria.fetchGroupAlertDefinition(true);
return criteria; } @@ -410,6 +414,22 @@ public class AlertDataSource extends RPCDataSource<Alert, AlertCriteria> { record.setAttribute(AncestryUtil.RESOURCE_ANCESTRY, resource.getAncestry()); record.setAttribute(AncestryUtil.RESOURCE_TYPE_ID, resource.getResourceType().getId());
+ AlertDefinition groupAlertDefinition = alertDefinition.getGroupAlertDefinition(); + Integer parentId = alertDefinition.getParentId(); + if (groupAlertDefinition != null && groupAlertDefinition.getGroup() != null) { + boolean isAutogroup = groupAlertDefinition.getGroup().getAutoGroupParentResource() != null; + record.setAttribute(FIELD_PARENT, (isAutogroup ? "#Resource/AutoGroup/" : "#ResourceGroup/") + + groupAlertDefinition.getGroup().getId() + "/Alerts/Definitions/" + groupAlertDefinition.getId()); + record.setLinkText(MSG.view_alert_definition_for_group()); + } else if (parentId != null && parentId.intValue() != 0) { + record.setAttribute( + FIELD_PARENT, + LinkManager.getAdminTemplatesEditLink(AlertDefinitionTemplateTypeView.VIEW_ID.getName(), resource + .getResourceType().getId()) + + "/" + parentId); + record.setLinkText(MSG.view_alert_definition_for_type()); + } + Set<AlertConditionLog> conditionLogs = from.getConditionLogs(); String conditionText; String conditionValue; diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/alert/AlertDetailsView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/alert/AlertDetailsView.java index 68a0953..ed98d94 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/alert/AlertDetailsView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/alert/AlertDetailsView.java @@ -40,8 +40,12 @@ import com.smartgwt.client.widgets.tab.Tab; import com.smartgwt.client.widgets.tab.TabSet;
import org.rhq.core.domain.alert.Alert; +import org.rhq.core.domain.alert.AlertDefinition; import org.rhq.core.domain.alert.notification.ResultState; import org.rhq.core.domain.criteria.AlertCriteria; +import org.rhq.core.domain.criteria.AlertDefinitionCriteria; +import org.rhq.core.domain.criteria.ResourceGroupCriteria; +import org.rhq.core.domain.resource.group.ResourceGroup; import org.rhq.core.domain.util.PageList; import org.rhq.enterprise.gui.coregui.client.BookmarkableView; import org.rhq.enterprise.gui.coregui.client.CoreGUI; @@ -83,7 +87,14 @@ public class AlertDetailsView extends EnhancedVLayout implements BookmarkableVie @Override public void onSuccess(PageList<Alert> result) { Alert alert = result.get(0); - show(alert); + Integer parentId = alert.getAlertDefinition().getParentId(); + AlertDefinition groupAlertDefinition = alert.getAlertDefinition().getGroupAlertDefinition(); + if (groupAlertDefinition != null || (parentId != null && parentId.intValue() != 0)) { + fetchDefinitionWithGroupAndTemplate(alert); + } else { + show(alert); + } + }
@Override @@ -92,6 +103,24 @@ public class AlertDetailsView extends EnhancedVLayout implements BookmarkableVie } }); } + + private void fetchDefinitionWithGroupAndTemplate(final Alert alert) { + AlertDefinitionCriteria criteria = new AlertDefinitionCriteria(); + criteria.addFilterAlertId(alert.getId()); + criteria.fetchGroupAlertDefinition(true); + criteria.fetchResourceType(true); + GWTServiceLookup.getAlertDefinitionService().findAlertDefinitionsByCriteria(criteria, new AsyncCallback<PageList<AlertDefinition>>() { + public void onSuccess(PageList<AlertDefinition> result) { + alert.getAlertDefinition().setGroupAlertDefinition(result.get(0).getGroupAlertDefinition()); + alert.getAlertDefinition().setResourceType(result.get(0).getResourceType()); + show(alert); + } + + public void onFailure(Throwable caught) { + CoreGUI.getErrorHandler().handleError(MSG.view_alert_details_loadFailed(), caught); + } + }); + }
private void show(Alert alert) { destroyMembers(); @@ -102,7 +131,7 @@ public class AlertDetailsView extends EnhancedVLayout implements BookmarkableVie addMember(getDetailsTabSet(record)); }
- private TabSet getDetailsTabSet(Record record) { + private TabSet getDetailsTabSet(ListGridRecord record) { TabSet tabset = new NamedTabSet();
Tab generalTab = new NamedTab(new ViewName("general", MSG.view_alert_common_tab_general())); @@ -121,7 +150,7 @@ public class AlertDetailsView extends EnhancedVLayout implements BookmarkableVie return tabset; }
- private DynamicForm getDetailsTableForAlert(Record record) { + private DynamicForm getDetailsTableForAlert(ListGridRecord record) { DynamicForm form = new DynamicForm(); form.setNumCols(4); form.setHeight("15%"); @@ -189,6 +218,13 @@ public class AlertDetailsView extends EnhancedVLayout implements BookmarkableVie MSG.view_alert_details_field_resource_ancestry()); resourceAncestryItem.setValue(record.getAttribute("resourceAncestry")); items.add(resourceAncestryItem); + + String parentUrl = record.getAttribute("parent"); + if (parentUrl != null) { + StaticTextItem parentItem = new StaticTextItem("parent", "Parent Definition"); + parentItem.setValue(LinkManager.getHref(parentUrl, record.getLinkText())); + items.add(parentItem); + }
form.setItems(items.toArray(new FormItem[items.size()]));
diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties index 84e9c6e..ed6b130 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties @@ -1080,6 +1080,7 @@ view_alert_definitions_update_failure = Alert definition update failed view_alert_definitions_update_success = Alert definition successfully updated view_alert_details_field_ack_at = Acknowledged at view_alert_details_field_ack_by = Acknowledged by +view_alert_details_field_parent_definition = Parent definition view_alert_details_field_recovery_info = Recovery Info view_alert_details_field_resource_ancestry = Resource Ancestry view_alert_details_field_watched_resource = Watched Resource diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties index d6a102b..1950003 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties @@ -1094,6 +1094,7 @@ view_alert_definitions_update_failure = NepodaÅilo se zmÄnit definici vÃœstrah view_alert_definitions_update_success = ÃspÄÅ¡nÄ zmÄnÄna definice vÃœstrahy view_alert_details_field_ack_at = Potvrzeno v view_alert_details_field_ack_by = Potvrzeno kÃœm +view_alert_details_field_parent_definition = Definice rodiÄe view_alert_details_field_recovery_info = Informace o obnovÄ view_alert_details_field_resource_ancestry = Původ zdroje view_alert_details_field_watched_resource = SledovanÃœ zdroj diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties index 9237f76..1c0005b 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties @@ -1069,6 +1069,7 @@ view_alert_definitions_update_failure = ã¢ã©ãŒãå®çŸ©ã®æŽæ°ã«å€±æã view_alert_definitions_update_success = ã¢ã©ãŒãå®çŸ©ã®æŽæ°ã«æåããŸãã view_alert_details_field_ack_at = 次ã®å Žæã§ç¢ºèªæžã¿ã§ã view_alert_details_field_ack_by = 次ã®äººã«ãã£ãŠç¢ºèªæžã¿ã§ã +##view_alert_details_field_parent_definition = Parent definition view_alert_details_field_recovery_info = ãªã«ããªæ å ± ##view_alert_details_field_resource_ancestry = Resource Ancestry ##view_alert_details_field_watched_resource = Watched Resource diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties index bd34f9b..5118a2d 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties @@ -935,6 +935,7 @@ view_alert_definitions_table_title_group = 귞룹 ê²œê³ ì ì view_alert_definitions_table_title_resource = 늬ìì€ ê²œê³ ì ì view_alert_details_field_ack_at = ë€ì ìì¹ìì íìžíìµëë€ view_alert_details_field_ack_by = ë€ì ì¬ëì ìíŽ íìžëììµëë€ +##view_alert_details_field_parent_definition = Parent definition view_alert_details_field_recovery_info = 복구 ì 볎 view_alert_details_field_resource_ancestry = 늬ìì€ ì¡°ì view_alert_details_field_watched_resource = êŽì¬ 늬ìì€ diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties index 1330eb7..bfd95e8 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties @@ -1078,6 +1078,7 @@ view_alert_definitions_update_success = Defini\u00E7\u00E3o do alerta atualizada view_alert_details_breadcrumb = Detalhes view_alert_details_field_ack_at = Verificado em view_alert_details_field_ack_by = Verificado por +##view_alert_details_field_parent_definition = Parent definition view_alert_details_field_recovery_info = Informa\u00E7\u00E3o de Recupera\u00E7\u00E3o ##view_alert_details_field_resource_ancestry = Resource Ancestry ##view_alert_details_field_watched_resource = Watched Resource diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties index 72a287b..e03e1a9 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties @@ -1042,6 +1042,7 @@ #view_alert_definitions_update_success = Alert definition successfully updated #view_alert_details_field_ack_at = Acknowledged at #view_alert_details_field_ack_by = Acknowledged by +##view_alert_details_field_parent_definition = Parent definition #view_alert_details_field_recovery_info = Recovery Info #view_alert_details_loadFailed = Failed to fetch alert details #view_alerts_ack_confirm = Acknowledge the selected alert(s)? diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties index 10f9b0a..82ddc2a 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties @@ -1063,6 +1063,7 @@ view_alert_definitions_update_failure = \u544a\u8b66\u5b9a\u4e49\u66f4\u65b0\u59 view_alert_definitions_update_success = \u544a\u8b66\u5b9a\u4e49\u66f4\u65b0\u6210\u529f view_alert_details_field_ack_at = \u786e\u8ba4\u4e8e view_alert_details_field_ack_by = \u786e\u8ba4\u8005 +##view_alert_details_field_parent_definition = Parent definition view_alert_details_field_recovery_info = \u8fd8\u539f\u4fe1\u606f ##view_alert_details_field_resource_ancestry = Resource Ancestry ##view_alert_details_field_watched_resource = Watched Resource
commit 67c442b7baa5db4cdd3715fd89db9ea01b9e2fa8 Author: Jirka Kremser jkremser@redhat.com Date: Fri Jul 19 12:41:13 2013 +0200
Removing deprecated annotation, adding orphanRemoval=true attribute instead.
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/alert/Alert.java b/modules/core/domain/src/main/java/org/rhq/core/domain/alert/Alert.java index 4742c17..44a8eac 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/alert/Alert.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/alert/Alert.java @@ -253,9 +253,8 @@ public class Alert implements Serializable { @ManyToOne private AlertDefinition alertDefinition;
- @OneToMany(mappedBy = "alert", cascade = CascadeType.ALL) + @OneToMany(mappedBy = "alert", cascade = CascadeType.ALL, orphanRemoval=true) @OrderBy - @org.hibernate.annotations.Cascade(org.hibernate.annotations.CascadeType.DELETE_ORPHAN) // primary key private Set<AlertConditionLog> conditionLogs = new LinkedHashSet<AlertConditionLog>();
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/alert/AlertDampening.java b/modules/core/domain/src/main/java/org/rhq/core/domain/alert/AlertDampening.java index 0f2d0b1..be98df5 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/alert/AlertDampening.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/alert/AlertDampening.java @@ -86,8 +86,7 @@ public class AlertDampening implements java.io.Serializable {
// This is required for cascade behavior. We want to be able to cascade delete the AlertDampeningEvents when an // AlertDefinition is removed from the db, due to deleting a Resource from inventory. - @OneToMany(mappedBy = "alertDefinition", cascade = { CascadeType.REFRESH, CascadeType.REMOVE }) - @org.hibernate.annotations.Cascade(org.hibernate.annotations.CascadeType.DELETE_ORPHAN) + @OneToMany(mappedBy = "alertDefinition", cascade = { CascadeType.REFRESH, CascadeType.REMOVE }, orphanRemoval = true) private Set<AlertDampeningEvent> alertDampeningEvents = new HashSet<AlertDampeningEvent>();
protected AlertDampening() { diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/alert/AlertDefinition.java b/modules/core/domain/src/main/java/org/rhq/core/domain/alert/AlertDefinition.java index 02362ce..3445d9e 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/alert/AlertDefinition.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/alert/AlertDefinition.java @@ -293,7 +293,6 @@ public class AlertDefinition implements Serializable {
// do not cascade remove - group removal will be detaching children alert defs from the group def, // and then letting the children be deleted slowly by existing alert def removal mechanisms - @SuppressWarnings("unused") @OneToMany(mappedBy = "groupAlertDefinition", fetch = FetchType.LAZY, cascade = { CascadeType.PERSIST }) @OrderBy private Set<AlertDefinition> groupAlertDefinitionChildren = new LinkedHashSet<AlertDefinition>(); @@ -361,11 +360,10 @@ public class AlertDefinition implements Serializable { // referencing AlertConditionLog records. private Set<AlertCondition> conditions = new LinkedHashSet<AlertCondition>(1); // Most alerts will only have one condition.
- @OneToMany(mappedBy = "alertDefinition", cascade = CascadeType.ALL) + @OneToMany(mappedBy = "alertDefinition", cascade = CascadeType.ALL, orphanRemoval = true) // Although similar to AlertCondition, we do use DELETE_ORPHAN here. The reason is because AlertNotificationLog // does not refer back to the AlertNotification record and therefore the notification logs are not affected // by the loss of the AlertNotification that spawned the notification. - @org.hibernate.annotations.Cascade(org.hibernate.annotations.CascadeType.DELETE_ORPHAN) private List<AlertNotification> alertNotifications = new ArrayList<AlertNotification>();
/**
commit 92a286d611085d96ba1710c442035445dda5318a Author: Jirka Kremser jkremser@redhat.com Date: Thu Jul 18 18:37:48 2013 +0200
[BZÂ 980091] - link in the alert to the parent on which the alert was created - Adding a new filter on AlertDefinitionCriteria (filter by alert id).
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/criteria/AlertDefinitionCriteria.java b/modules/core/domain/src/main/java/org/rhq/core/domain/criteria/AlertDefinitionCriteria.java index 854efaf..9d4e51d 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/criteria/AlertDefinitionCriteria.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/criteria/AlertDefinitionCriteria.java @@ -48,6 +48,7 @@ public class AlertDefinitionCriteria extends Criteria { private String filterName; private String filterDescription; private AlertPriority filterPriority; + private Integer filterAlertId; // requires overrides private NonBindingOverrideFilter filterAlertTemplateOnly; // requires overrides - finds only alert templates private Integer filterAlertTemplateParentId; // requires overrides private Integer filterAlertTemplateResourceTypeId; // requires overrides @@ -75,6 +76,10 @@ public class AlertDefinitionCriteria extends Criteria { private PageOrdering sortResourceName; // requires sort override
public AlertDefinitionCriteria() { + filterOverrides.put("alertId", "" + + "id IN ( SELECT alert.alertDefinition.id " // + + " FROM Alert alert " // + + " WHERE alert.id = ? )"); filterOverrides.put("alertTemplateOnly", "resourceType IS NOT NULL"); filterOverrides.put("alertTemplateParentId", "parentId = ?"); filterOverrides.put("alertTemplateResourceTypeId", "resourceType.id = ?"); @@ -113,6 +118,10 @@ public class AlertDefinitionCriteria extends Criteria { public void addFilterAlertTemplateParentId(Integer filterAlertTemplateParentId) { this.filterAlertTemplateParentId = filterAlertTemplateParentId; } + + public void addFilterAlertId(Integer filterAlertId) { + this.filterAlertId = filterAlertId; + }
public void addFilterAlertTemplateResourceTypeId(Integer filterAlertTemplateResourceTypeId) { this.filterAlertTemplateResourceTypeId = filterAlertTemplateResourceTypeId;
commit b3aa6d8a54378fad077d2052e9f9620768723a4d Author: Michael Burman yak@iki.fi Date: Fri Jul 19 12:27:38 2013 +0200
BZ980076 Check if storage is really running after a crash
diff --git a/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/ControlCommand.java b/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/ControlCommand.java index 8988d98..5528619 100644 --- a/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/ControlCommand.java +++ b/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/ControlCommand.java @@ -1,7 +1,7 @@ /* * * * RHQ Management Platform - * * Copyright (C) 2005-2012 Red Hat, Inc. + * * Copyright (C) 2005-2013 Red Hat, Inc. * * All rights reserved. * * * * This program is free software; you can redistribute it and/or modify @@ -41,6 +41,10 @@ import org.apache.commons.cli.ParseException; import org.apache.commons.cli.PosixParser; import org.apache.commons.configuration.ConfigurationException; import org.apache.commons.configuration.PropertiesConfiguration; +import org.apache.commons.exec.DefaultExecutor; +import org.apache.commons.exec.ExecuteException; +import org.apache.commons.exec.Executor; +import org.apache.commons.exec.PumpStreamHandler; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory;
@@ -171,10 +175,15 @@ public abstract class ControlCommand { return getStorageBasedir().exists(); }
- protected String getStoragePid() throws IOException { + protected File getStoragePidFile() { File storageBasedir = getStorageBasedir(); File storageBinDir = new File(storageBasedir, "bin"); File pidFile = new File(storageBinDir, "cassandra.pid"); + return pidFile; + } + + protected String getStoragePid() throws IOException { + File pidFile = getStoragePidFile();
if (pidFile.exists()) { return StreamUtil.slurp(new FileReader(pidFile)); @@ -282,4 +291,77 @@ public abstract class ControlCommand {
return inUse; } + + protected void waitForProcessToStop(String pid) throws Exception { + + if (isWindows() || pid==null) { + // For the moment we have no better way to just wait some time + Thread.sleep(10*1000L); + } else { + int tries = 5; + while (tries > 0) { + log.debug("."); + if (!isUnixPidRunning(pid)) { + break; + } + Thread.sleep(2*1000L); + tries--; + } + if (tries==0) { + throw new RHQControlException("Process [" + pid + "] did not finish yet. Terminate it manually and retry."); + } + } + + } + + protected void killPid(String pid) throws IOException { + Executor executor = new DefaultExecutor(); + executor.setWorkingDirectory(getBinDir()); + executor.setStreamHandler(new PumpStreamHandler()); + org.apache.commons.exec.CommandLine commandLine; + + commandLine = new org.apache.commons.exec.CommandLine("kill").addArgument(pid); + executor.execute(commandLine); + } + + protected boolean isUnixPidRunning(String pid) { + + Executor executor = new DefaultExecutor(); + executor.setWorkingDirectory(getBinDir()); + executor.setStreamHandler(new PumpStreamHandler()); + org.apache.commons.exec.CommandLine commandLine = new org.apache.commons.exec.CommandLine("/bin/kill") + .addArgument("-0") + .addArgument(pid); + + try { + int code = executor.execute(commandLine); + if (code!=0) { + return false; + } + } catch (ExecuteException ee ) { + if (ee.getExitValue()==1) { + // return code 1 means process does not exist + return false; + } + } catch (IOException e) { + log.error("Checking for running process failed: " + e.getMessage()); + } + return true; + } + + protected boolean isStorageRunning() throws IOException { + String pid = getStoragePid(); + if(pid == null) { + return false; + } else if(pid != null && !isUnixPidRunning(pid)) { + // There is a phantom pidfile + File pidFile = getStoragePidFile(); + if(!pidFile.delete()) { + throw new RHQControlException("Could not delete storage pidfile " + pidFile.getAbsolutePath()); + } + return false; + } else { + return true; + } + } } diff --git a/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/AbstractInstall.java b/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/AbstractInstall.java index fe7cdd4..8c885b0 100644 --- a/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/AbstractInstall.java +++ b/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/AbstractInstall.java @@ -1,7 +1,7 @@ /* * * * RHQ Management Platform - * * Copyright (C) 2005-2012 Red Hat, Inc. + * * Copyright (C) 2005-2013 Red Hat, Inc. * * All rights reserved. * * * * This program is free software; you can redistribute it and/or modify @@ -36,16 +36,12 @@ import java.util.Properties; import org.apache.commons.cli.CommandLine; import org.apache.commons.exec.DefaultExecuteResultHandler; import org.apache.commons.exec.DefaultExecutor; -import org.apache.commons.exec.ExecuteException; import org.apache.commons.exec.Executor; import org.apache.commons.exec.PumpStreamHandler; - import org.jboss.as.controller.client.ModelControllerClient; - import org.rhq.common.jbossas.client.controller.DeploymentJBossASClient; import org.rhq.common.jbossas.client.controller.MCCHelper; import org.rhq.server.control.ControlCommand; -import org.rhq.server.control.RHQControlException;
/** * Common code for commands that perform installs. Basically shared code for Install and Upgrade commands. @@ -104,55 +100,6 @@ public abstract class AbstractInstall extends ControlCommand { } }
- protected void waitForProcessToStop(String pid) throws Exception { - - if (isWindows() || pid==null) { - // For the moment we have no better way to just wait some time - Thread.sleep(10*1000L); - } else { - int tries = 5; - while (tries > 0) { - log.debug("."); - if (!isUnixPidRunning(pid)) { - break; - } - Thread.sleep(2*1000L); - tries--; - } - if (tries==0) { - throw new RHQControlException("Process [" + pid + "] did not finish yet. Terminate it manually and retry."); - } - } - - } - - protected boolean isUnixPidRunning(String pid) { - - Executor executor = new DefaultExecutor(); - executor.setWorkingDirectory(getBinDir()); - executor.setStreamHandler(new PumpStreamHandler()); - org.apache.commons.exec.CommandLine commandLine; - - commandLine = new org.apache.commons.exec.CommandLine("/bin/kill") - .addArgument("-0") - .addArgument(pid); - - try { - int code = executor.execute(commandLine); - if (code!=0) { - return false; - } - } catch (ExecuteException ee ) { - if (ee.getExitValue()==1) { - // return code 1 means process does not exist - return false; - } - } catch (IOException e) { - log.error("Checking for running process failed: " + e.getMessage()); - } - return true; - } - protected void waitForRHQServerToInitialize() throws Exception { try { final long messageInterval = 30000L; diff --git a/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Start.java b/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Start.java index 7e83c9e..f15b13e 100644 --- a/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Start.java +++ b/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Start.java @@ -1,7 +1,7 @@ /* * * * RHQ Management Platform - * * Copyright (C) 2005-2012 Red Hat, Inc. + * * Copyright (C) 2005-2013 Red Hat, Inc. * * All rights reserved. * * * * This program is free software; you can redistribute it and/or modify @@ -26,7 +26,6 @@ package org.rhq.server.control.command;
import java.io.File; -import java.io.FileReader;
import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.Options; @@ -137,14 +136,13 @@ public class Start extends ControlCommand { log.debug("Failed to start storage service", e); } } else { - File storageBinDir = new File(getStorageBasedir(), "bin"); - File pidFile = new File(storageBinDir, "cassandra.pid"); + File pidFile = getStoragePidFile();
// For now we are duplicating logic in the status command. This code will be // replaced when we implement a rhq-storage.sh script. - if (pidFile.exists()) { - String pid = StreamUtil.slurp(new FileReader(pidFile)); + if (isStorageRunning()) { + String pid = getStoragePid(); System.out.println("RHQ storage node (pid " + pid + ") is running"); } else { commandLine = getCommandLine(false, "cassandra", "-p", pidFile.getAbsolutePath()); diff --git a/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Stop.java b/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Stop.java index 53d0374..908aa8e 100644 --- a/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Stop.java +++ b/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Stop.java @@ -1,7 +1,7 @@ /* * * * RHQ Management Platform - * * Copyright (C) 2005-2012 Red Hat, Inc. + * * Copyright (C) 2005-2013 Red Hat, Inc. * * All rights reserved. * * * * This program is free software; you can redistribute it and/or modify @@ -126,13 +126,13 @@ public class Stop extends AbstractInstall { log.debug("Failed to stop storage service", e); } } else { - String pid = getStoragePid(); - if (pid != null) { + if(isStorageRunning()) { + String pid = getStoragePid(); + System.out.println("Stopping RHQ storage node..."); System.out.println("RHQ storage node (pid=" + pid + ") is stopping...");
- commandLine = new org.apache.commons.exec.CommandLine("kill").addArgument(pid); - executor.execute(commandLine); + killPid(pid);
waitForProcessToStop(pid);
commit bf8587569567d261791f272a672f5722d7dca8c0 Author: Thomas Segismont tsegismo@redhat.com Date: Thu Jul 18 23:37:57 2013 +0200
[as7] Fix domain api version discovery
Look for the first node of type element instead of simply calling getFirstChild
diff --git a/modules/plugins/jboss-as-7/src/main/java/org/rhq/modules/plugins/jbossas7/helper/HostConfiguration.java b/modules/plugins/jboss-as-7/src/main/java/org/rhq/modules/plugins/jbossas7/helper/HostConfiguration.java index 329246e..3c71135 100644 --- a/modules/plugins/jboss-as-7/src/main/java/org/rhq/modules/plugins/jbossas7/helper/HostConfiguration.java +++ b/modules/plugins/jboss-as-7/src/main/java/org/rhq/modules/plugins/jbossas7/helper/HostConfiguration.java @@ -1,6 +1,6 @@ /* * RHQ Management Platform - * Copyright (C) 2012 Red Hat, Inc. + * Copyright (C) 2005-2013 Red Hat, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify @@ -13,11 +13,14 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * along with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ package org.rhq.modules.plugins.jbossas7.helper;
+import static org.rhq.core.util.StringUtil.EMPTY_STRING; +import static org.w3c.dom.Node.ELEMENT_NODE; + import java.io.File; import java.io.FileInputStream; import java.io.InputStream; @@ -33,6 +36,7 @@ import javax.xml.xpath.XPathFactory; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.w3c.dom.Document; +import org.w3c.dom.Node;
import org.rhq.core.pluginapi.util.CommandLineOption; import org.rhq.modules.plugins.jbossas7.AS7CommandLine; @@ -234,11 +238,15 @@ public class HostConfiguration { }
public String getDomainApiVersion() { - - String version = document.getFirstChild().getAttributes().getNamedItem("xmlns").getTextContent(); - - version = version.substring(version.lastIndexOf(':')+1); - return version; + // Look for the first child node of type element (<host> in domain mode or <server> in standalone mode) + // We can't just call getFirstChild because first child could be a node of type comment + for (Node childNode = document.getFirstChild(); childNode != null; childNode = childNode.getNextSibling()) { + if (childNode.getNodeType() == ELEMENT_NODE) { + String xmlns = childNode.getAttributes().getNamedItem("xmlns").getTextContent(); + return xmlns.substring(xmlns.lastIndexOf(':') + 1); + } + } + return EMPTY_STRING; }
/**
commit 5283e5356d2ca4bda6e3aeea9d323c186030baa9 Author: Thomas Segismont tsegismo@redhat.com Date: Thu Jul 18 09:41:39 2013 +0200
Bug 969621 - EAP 6 managed plug-in is unable to discover EAP servers when more then one is running on a single host
diff --git a/modules/plugins/jboss-as-7/src/main/java/org/rhq/modules/plugins/jbossas7/BaseProcessDiscovery.java b/modules/plugins/jboss-as-7/src/main/java/org/rhq/modules/plugins/jbossas7/BaseProcessDiscovery.java index cc13406..cd9f276 100644 --- a/modules/plugins/jboss-as-7/src/main/java/org/rhq/modules/plugins/jbossas7/BaseProcessDiscovery.java +++ b/modules/plugins/jboss-as-7/src/main/java/org/rhq/modules/plugins/jbossas7/BaseProcessDiscovery.java @@ -1,6 +1,6 @@ /* * RHQ Management Platform - * Copyright (C) 2005-2012 Red Hat, Inc. + * Copyright (C) 2005-2013 Red Hat, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify @@ -13,8 +13,8 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * along with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ package org.rhq.modules.plugins.jbossas7;
@@ -42,6 +42,7 @@ import org.rhq.core.domain.configuration.Configuration; import org.rhq.core.domain.configuration.PropertyList; import org.rhq.core.domain.configuration.PropertyMap; import org.rhq.core.domain.configuration.PropertySimple; +import org.rhq.core.domain.resource.ResourceUpgradeReport; import org.rhq.core.pluginapi.event.log.LogFileEventResourceComponentHelper; import org.rhq.core.pluginapi.inventory.DiscoveredResourceDetails; import org.rhq.core.pluginapi.inventory.InvalidPluginConfigurationException; @@ -49,6 +50,8 @@ import org.rhq.core.pluginapi.inventory.ManualAddFacet; import org.rhq.core.pluginapi.inventory.ProcessScanResult; import org.rhq.core.pluginapi.inventory.ResourceDiscoveryComponent; import org.rhq.core.pluginapi.inventory.ResourceDiscoveryContext; +import org.rhq.core.pluginapi.upgrade.ResourceUpgradeContext; +import org.rhq.core.pluginapi.upgrade.ResourceUpgradeFacet; import org.rhq.core.pluginapi.util.CommandLineOption; import org.rhq.core.pluginapi.util.FileUtils; import org.rhq.core.pluginapi.util.JavaCommandLine; @@ -66,12 +69,15 @@ import org.rhq.modules.plugins.jbossas7.json.Result; * Abstract base discovery component for the two server types - "JBossAS7 Host Controller" and * "JBossAS7 Standalone Server". */ -public abstract class BaseProcessDiscovery implements ResourceDiscoveryComponent, ManualAddFacet { +public abstract class BaseProcessDiscovery implements ResourceDiscoveryComponent, ManualAddFacet, ResourceUpgradeFacet {
private static final String JBOSS_AS_PREFIX = "jboss-as-"; private static final String JBOSS_EAP_PREFIX = "jboss-eap-"; private static final String WILDFLY_PREFIX = "wildfly-";
+ private static final String LOCAL_RESOURCE_KEY_PREFIX = "hostConfig: "; + private static final String REMOTE_RESOURCE_KEY_PREFIX = "hostPort: "; + private static final String HOME_DIR_SYSPROP = "jboss.home.dir";
private static final String RHQADMIN = "rhqadmin"; @@ -202,7 +208,7 @@ public abstract class BaseProcessDiscovery implements ResourceDiscoveryComponent setStartScriptPluginConfigProps(process, commandLine, pluginConfig, agentProcess); setUserAndPasswordPluginConfigProps(serverPluginConfig, hostConfig, baseDir);
- String key = baseDir.getPath(); + String key = createKeyForLocalResource(serverPluginConfig); HostPort hostPort = hostConfig.getDomainControllerHostPort(commandLine); String name = buildDefaultResourceName(hostPort, managementHostPort, productType); String description = buildDefaultResourceDescription(hostPort, productType); @@ -455,7 +461,7 @@ public abstract class BaseProcessDiscovery implements ResourceDiscoveryComponent HostPort managementHostPort = new HostPort(false); managementHostPort.host = hostname; managementHostPort.port = port; - String key = hostname + ":" + port; + String key = createKeyForRemoteResource(hostname + ":" + port); String name = buildDefaultResourceName(hostPort, managementHostPort, productType); String version = productInfo.getProductVersion(); String description = buildDefaultResourceDescription(hostPort, productType); @@ -469,6 +475,43 @@ public abstract class BaseProcessDiscovery implements ResourceDiscoveryComponent return detail; }
+ @Override + public ResourceUpgradeReport upgrade(ResourceUpgradeContext inventoriedResource) { + String currentResourceKey = inventoriedResource.getResourceKey(); + Configuration pluginConfiguration = inventoriedResource.getPluginConfiguration(); + ServerPluginConfiguration serverPluginConfiguration = new ServerPluginConfiguration(pluginConfiguration); + + if (currentResourceKey.startsWith(LOCAL_RESOURCE_KEY_PREFIX) + || currentResourceKey.startsWith(REMOTE_RESOURCE_KEY_PREFIX)) { + // Resource key already in right format + return null; + } + + ResourceUpgradeReport report = new ResourceUpgradeReport(); + + if (new File(currentResourceKey).isDirectory()) { + // Old key format for a local resource (key is base dir) + report.setNewResourceKey(createKeyForLocalResource(serverPluginConfiguration)); + } else if (currentResourceKey.contains(":")) { + // Old key format for a remote (manually added) resource (key is base dir) + report.setNewResourceKey(createKeyForRemoteResource(currentResourceKey)); + } else { + log.warn("Unknown format, cannot upgrade resource key [" + currentResourceKey + "]"); + return null; + } + + return report; + } + + private String createKeyForRemoteResource(String hostPort) { + return REMOTE_RESOURCE_KEY_PREFIX + hostPort; + } + + private String createKeyForLocalResource(ServerPluginConfiguration serverPluginConfiguration) { + return LOCAL_RESOURCE_KEY_PREFIX + + serverPluginConfiguration.getHostConfigFile().getAbsolutePath(); + } + private <T>T getServerAttribute(ASConnection connection, String attributeName) { Operation op = new ReadAttribute(null, attributeName); Result res = connection.execute(op);
commit e6fa8c734c2bfee9ad46a2b7683d4c86b4897e5c Author: John Sanda jsanda@redhat.com Date: Thu Jul 18 07:23:34 2013 -0400
take 2 at dependency clean up
JNA libraries were getting pulled into the ear as transitive dependencies. JNA is used by the cassandra-ccm-core module to shutdown Cassandra. This functionality though is only used in test code.
diff --git a/modules/enterprise/server/jar/pom.xml b/modules/enterprise/server/jar/pom.xml index c648685..f418afc 100644 --- a/modules/enterprise/server/jar/pom.xml +++ b/modules/enterprise/server/jar/pom.xml @@ -77,6 +77,14 @@ <groupId>org.rhq</groupId> <artifactId>rhq-core-plugin-api</artifactId> </exclusion> + <exclusion> + <groupId>net.java.dev.jna</groupId> + <artifactId>jna</artifactId> + </exclusion> + <exclusion> + <groupId>net.java.dev.jna</groupId> + <artifactId>platform</artifactId> + </exclusion> </exclusions> </dependency>
diff --git a/modules/enterprise/server/server-metrics/pom.xml b/modules/enterprise/server/server-metrics/pom.xml index 2245978..97496f7 100644 --- a/modules/enterprise/server/server-metrics/pom.xml +++ b/modules/enterprise/server/server-metrics/pom.xml @@ -47,18 +47,6 @@ <dependencies> <dependency> <groupId>${project.groupId}</groupId> - <artifactId>rhq-cassandra-ccm-core</artifactId> - <version>${project.version}</version> - <exclusions> - <exclusion> - <groupId>org.apache.cassandra</groupId> - <artifactId>cassandra-clientutil</artifactId> - </exclusion> - </exclusions> - </dependency> - - <dependency> - <groupId>${project.groupId}</groupId> <artifactId>rhq-cassandra-ccm-testng</artifactId> <version>${project.version}</version> <scope>test</scope>
commit 375f6f66fcc9214cc3d13605c17859ac11e3fd45 Author: Stefan Negrea snegrea@redhat.com Date: Thu Jul 18 05:04:00 2013 -0500
Temporary fix for the server jar name for dev profile (it was still using the old name).
diff --git a/modules/enterprise/server/jar/pom.xml b/modules/enterprise/server/jar/pom.xml index 7c236c7..c648685 100644 --- a/modules/enterprise/server/jar/pom.xml +++ b/modules/enterprise/server/jar/pom.xml @@ -776,7 +776,7 @@ <properties> <rhq.rootDir>../../../..</rhq.rootDir> <rhq.containerDir>${rhq.rootDir}/${rhq.devContainerServerPath}</rhq.containerDir> - <rhq.deploymentName>${project.build.finalName}-ejb3.jar</rhq.deploymentName> + <rhq.deploymentName>rhq-server.jar</rhq.deploymentName> <rhq.deploymentDir>${rhq.containerDir}/${rhq.earDeployDir}/${rhq.deploymentName}</rhq.deploymentDir> </properties>
commit b59d1684eb12c4e1c5a78309b4ec40eeb461b7d5 Author: John Sanda jsanda@redhat.com Date: Wed Jul 17 20:17:14 2013 -0400
fix test ear deployment error that resulted from renaming server jar
The ear deployment was failing because classes were not getting added to the rhq-server.jar in the test ear.
diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/AbstractEJB3Test.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/AbstractEJB3Test.java index 4dfbd46..2fd8624 100644 --- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/AbstractEJB3Test.java +++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/AbstractEJB3Test.java @@ -341,7 +341,7 @@ public abstract class AbstractEJB3Test extends Arquillian { // merge rhq.ear into testEar but include only the EJB jars and the supporting libraries. Note that we // don't include the services sar because tests are responsible for prepare/unprepare of all required services, // we don't want the production services performing any unexpected work. - testEar = testEar.merge(rhqEar, Filters.include("/lib.*|/rhq.*ejb3\.jar.*")); + testEar = testEar.merge(rhqEar, Filters.include("/lib.*|/rhq.*ejb3\.jar.*|/rhq-server.jar.*")); // remove startup beans and shutdown listeners, we don't want this to be a full server deployment. The tests // start/stop what they need, typically with test services or mocks. testEar.delete(ArchivePaths @@ -413,7 +413,7 @@ public abstract class AbstractEJB3Test extends Arquillian { //System.out.println("** The Deployment EAR: " + testEar.toString(true) + "\n");
// Save the test EAR to a zip file for inspection (set file explicitly) - //exportZip(testEar, new File("c:/temp/test-ear.ear")); + exportZip(testEar, new File("/Users/jsanda/tmp/test-ear.ear"));
return testEar; }
commit c855df20b787a6db0a7b9aed7b1fb5597eb0e2d6 Author: Jay Shaughnessy jshaughn@redhat.com Date: Wed Jul 17 17:09:55 2013 -0400
Work on the windows rhq48 cassandra upgrade patch
diff --git a/etc/scripts/rhq48-storage-patch/rhq48-storage-patch.bat b/etc/scripts/rhq48-storage-patch/rhq48-storage-patch.bat index e0312dc..c190503 100644 --- a/etc/scripts/rhq48-storage-patch/rhq48-storage-patch.bat +++ b/etc/scripts/rhq48-storage-patch/rhq48-storage-patch.bat @@ -1,14 +1,54 @@ @echo off + +rem =========================================================================== +rem RHQ Storage Node (Cassandra) Windows 4.8.0 upgrade patch script +rem +rem WHO NEEDS TO RUN THE PATCH? +rem +rem Run this patch script if you are: +rem - Running RHQ 4.8.0 on Windows +rem - Planning to upgrade and maintain your Storage node data +rem +rem PREQUISITES: +rem +rem This patch requires the installation of Python. Download Python 2.7.5 from: +rem +rem http://www.python.org/download/releases/2.7.5/ +rem +rem Install as directed. Note that Python will need to be on your PATH to run this patch. Also, +rem this patch will not work with Python3. +rem +rem Edit <rhq-install-dir>\rhq-storage\conf\cassandra.yaml to ensure the following is true: +rem +rem start_rpc: true +rem +rem Note the setting of rpc_port. By default it is 9160, which is fine. +rem +rem RUNNING THE PATCH: +rem +rem > cd <patch-dir> +rem > rhq48-storage-patch.bat <rhq-480-server-dir> <storage-node-ip-address> <thrift-rpc-port> <jmx-port> +rem +rem For example: +rem > rhq48-storage-patch.bat c:\rhq-server-4.8.0 127.0.0.1 9160 7299 +rem +rem Review the output carefully. There should be no errors (be careful, the script may still have completed). +rem If errors are encountered fix the issue and rerun the patch. +rem +rem When done, you can again edit cassandra.yaml and reset start_rpc: false +rem +rem =========================================================================== + setlocal
if /i "%4" == "" ( - echo Usage: rhq48-storage-patch.bat ^<rhq-server-dir^> ^<storage-ip-address^> ^<cql-port^> ^<jmx-port^> + echo Usage: rhq48-storage-patch.bat ^<rhq-server-dir^> ^<storage-ip-address^> ^<thrift-rpc-port^> ^<jmx-port^> exit /B 1 )
set RHQ_SERVER_DIR=%1 -set CQL_HOSTNAME=%2 -set CQL_PORT=%3 +set CQLSH_HOST=%2 +set CQLSH_PORT=%3 set JMX_PORT=%4 set USERNAME="rhqadmin" set PASSWORD="rhqadmin" @@ -29,7 +69,7 @@ echo Waiting for RHQ Storage Node to start up.. rem Sleep is not implemented in all Windows prompts, this one won't work in Vista choice /n /c y /d y /t 3
-set RHQ_STORAGE_BIN=%RHQ_SERVER_DIR%\rhq-storage\bin\ +set RHQ_STORAGE_BIN=%RHQ_SERVER_DIR%\rhq-storage\bin set CQLSH_PATH=%RHQ_STORAGE_BIN%\cqlsh set NODETOOL_PATH=%RHQ_STORAGE_BIN%\nodetool.bat
commit 11b8ae9cff9ca5085bfe317a01653cc8b81d8bb7 Author: Stefan Negrea snegrea@redhat.com Date: Wed Jul 17 14:14:02 2013 -0500
Initial implementation for the Storage Node configuration composite. The final resource operation still needs to be implemented but the rest of the functionality is implemented.
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java index 14043db..575edc74 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java @@ -90,7 +90,7 @@ public class StorageNode implements Serializable { public static final long serialVersionUID = 1L;
public static final String QUERY_FIND_ALL = "StorageNode.findAll"; - public static final String QUERY_FIND_BY_ADDRESS = "StorageNode.findByName"; + public static final String QUERY_FIND_BY_ADDRESS = "StorageNode.findByAddress"; public static final String QUERY_FIND_ALL_NOT_INSTALLED = "StorageNode.findAllCloudMembers"; public static final String QUERY_DELETE_BY_ID = "StorageNode.deleteById"; public static final String QUERY_FIND_ALL_NORMAL = "StorageNode.findAllNormalCloudMembers"; diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNodeConfigurationComposite.java b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNodeConfigurationComposite.java new file mode 100644 index 0000000..e2c64f9 --- /dev/null +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNodeConfigurationComposite.java @@ -0,0 +1,98 @@ +/* + * RHQ Management Platform + * Copyright (C) 2005-2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation, and/or the GNU Lesser + * General Public License, version 2.1, also as published by the Free + * Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License and the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * and the GNU Lesser General Public License along with this program; + * if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ +package org.rhq.core.domain.cloud; + +import java.io.Serializable; + +/** + * @author Stefan Negrea + */ +public class StorageNodeConfigurationComposite implements Serializable { + private static final long serialVersionUID = 1L; + + private StorageNode storageNode; + private int jmxPort; + private String heapSize; + + public StorageNodeConfigurationComposite() { + // GWT needs this + } + + public StorageNodeConfigurationComposite(StorageNode storageNode) { + this.storageNode = storageNode; + } + + /** + * @return associated storage node + */ + public StorageNode getStorageNode() { + return storageNode; + } + + /** + * @param storageNode storage node + */ + protected void setStorageNode(StorageNode storageNode) { + this.storageNode = storageNode; + } + + + /** + * @return the JMX port + */ + public int getJmxPort() { + return jmxPort; + } + + /** + * @param jmxPort JMX port to set + */ + public void setJmxPort(int jmxPort) { + this.jmxPort = jmxPort; + } + + /** + * @return the heap size + */ + public String getHeapSize() { + return heapSize; + } + + /** + * @param heapSize heap size to set + */ + public void setHeapSize(String heapSize) { + this.heapSize = heapSize; + } + + /* (non-Javadoc) + * @see java.lang.Object#toString() + */ + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("storageNode.addresss=").append(storageNode.getAddress()).append(", "); + builder.append("heapSize=").append(heapSize).append(", "); + builder.append("jmxPort=").append(jmxPort).append(""); + return builder.toString(); + } +} diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index fa51fe1..3f1ec69 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -51,14 +51,18 @@ import org.rhq.core.domain.authz.Permission; import org.rhq.core.domain.cloud.Server; import org.rhq.core.domain.cloud.StorageNode; import org.rhq.core.domain.cloud.StorageNode.OperationMode; +import org.rhq.core.domain.cloud.StorageNodeConfigurationComposite; import org.rhq.core.domain.cloud.StorageNodeLoadComposite; import org.rhq.core.domain.common.JobTrigger; import org.rhq.core.domain.configuration.Configuration; import org.rhq.core.domain.criteria.AlertCriteria; import org.rhq.core.domain.criteria.ResourceGroupCriteria; +import org.rhq.core.domain.criteria.ResourceOperationHistoryCriteria; import org.rhq.core.domain.criteria.StorageNodeCriteria; import org.rhq.core.domain.measurement.MeasurementAggregate; import org.rhq.core.domain.measurement.MeasurementUnits; +import org.rhq.core.domain.operation.OperationRequestStatus; +import org.rhq.core.domain.operation.ResourceOperationHistory; import org.rhq.core.domain.operation.bean.GroupOperationSchedule; import org.rhq.core.domain.resource.Resource; import org.rhq.core.domain.resource.ResourceType; @@ -72,6 +76,7 @@ import org.rhq.enterprise.server.auth.SubjectManagerLocal; import org.rhq.enterprise.server.authz.RequiredPermission; import org.rhq.enterprise.server.authz.RequiredPermissions; import org.rhq.enterprise.server.cloud.instance.ServerManagerLocal; +import org.rhq.enterprise.server.configuration.ConfigurationManagerLocal; import org.rhq.enterprise.server.measurement.MeasurementDataManagerLocal; import org.rhq.enterprise.server.operation.OperationManagerLocal; import org.rhq.enterprise.server.resource.ResourceTypeManagerLocal; @@ -96,6 +101,11 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN private static final String RHQ_STORAGE_JMX_PORT_PROPERTY = "jmxPort"; private static final String RHQ_STORAGE_ADDRESS_PROPERTY = "host";
+ private static final int OPERATION_QUERY_TIMEOUT = 1000; + private static final int MAX_ITERATIONS = 5; + private static final String UPDATE_CONFIGURATION_OPERATION = "updateConfiguration"; + private static final String RESTART_OPERATION = "restart"; + @PersistenceContext(unitName = RHQConstants.PERSISTENCE_UNIT_NAME) private EntityManager entityManager;
@@ -120,6 +130,9 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN @EJB private AlertManagerLocal alertManager;
+ @EJB + private ConfigurationManagerLocal configurationManager; + @Override public void linkResource(Resource resource) { List<StorageNode> storageNodes = this.getStorageNodes(); @@ -438,6 +451,19 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN entityManager.flush(); }
+ private StorageNode findStorageNodeByAddress(String address) { + TypedQuery<StorageNode> query = entityManager.<StorageNode> createNamedQuery(StorageNode.QUERY_FIND_BY_ADDRESS, + StorageNode.class); + query.setParameter("address", address); + List<StorageNode> result = query.getResultList(); + + if (result != null && result.size() > 0) { + return result.get(0); + } + + return null; + } + private StorageNodeLoadComposite.MeasurementAggregateWithUnits getMeasurementAggregateWithUnits(Subject subject, int schedId, MeasurementUnits units, long beginTime, long endTime) { MeasurementAggregate measurementAggregate = measurementManager.getAggregate(subject, schedId, beginTime, @@ -559,4 +585,91 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
return resourceIdsWithAlertDefinitions.toArray(new Integer[resourceIdsWithAlertDefinitions.size()]); } + + @Override + public StorageNodeConfigurationComposite retrieveConfiguration(Subject subject, StorageNode storageNode) { + StorageNodeConfigurationComposite configuration = new StorageNodeConfigurationComposite(storageNode); + + if (storageNode != null && storageNode.getResource() != null) { + Resource storageNodeResource = storageNode.getResource(); + Configuration storageNodeConfiguration = configurationManager.getResourceConfiguration(subject, + storageNodeResource.getId()); + + configuration.setHeapSize(storageNodeConfiguration.getSimpleValue("maxHeapSize")); + configuration.setJmxPort(storageNode.getJmxPort()); + } + + return configuration; + } + + @Override + public boolean updateConfiguration(Subject subject, StorageNodeConfigurationComposite storageNodeConfiguration) { + StorageNode storageNode = findStorageNodeByAddress(storageNodeConfiguration.getStorageNode().getAddress()); + + if (storageNode != null && storageNode.getResource() != null) { + Resource storageNodeResource = storageNode.getResource(); + Configuration parameters = new Configuration(); + parameters.setSimpleValue("jmxPort", storageNodeConfiguration.getJmxPort() + ""); + parameters.setSimpleValue("heapSize", storageNodeConfiguration.getHeapSize() + ""); + + boolean updateConfigurationResult = runOperationAndWaitForResult(subject, storageNodeResource, + UPDATE_CONFIGURATION_OPERATION, parameters); + + if (updateConfigurationResult) { + boolean restartResult = runOperationAndWaitForResult(subject, storageNodeResource, RESTART_OPERATION, + null); + + if (restartResult) { + storageNode.setJmxPort(storageNodeConfiguration.getJmxPort()); + entityManager.persist(storageNode); + + return true; + } + } + } + + return false; + } + + private boolean runOperationAndWaitForResult(Subject subject, Resource storageNodeResource, String operationToRun, + Configuration parameters) { + + //scheduling the operation + long operationStartTime = System.currentTimeMillis(); + operationManager.scheduleResourceOperation(subject, storageNodeResource.getId(), operationToRun, 0, 0, 0, 0, + parameters, "Run by StorageNodeManagerBean"); + + //waiting for the operation result then return it + int iteration = 0; + boolean successResultFound = false; + while (iteration < MAX_ITERATIONS && !successResultFound) { + ResourceOperationHistoryCriteria criteria = new ResourceOperationHistoryCriteria(); + criteria.addFilterResourceIds(storageNodeResource.getId()); + criteria.addFilterStartTime(operationStartTime); + criteria.addFilterOperationName(operationToRun); + criteria.addFilterStatus(OperationRequestStatus.SUCCESS); + criteria.setPageControl(PageControl.getUnlimitedInstance()); + + PageList<ResourceOperationHistory> results = operationManager.findResourceOperationHistoriesByCriteria( + subject, criteria); + + if (results != null && results.size() > 0) { + successResultFound = true; + } + + if (successResultFound) { + break; + } else { + try { + Thread.sleep(OPERATION_QUERY_TIMEOUT); + } catch (Exception e) { + log.error(e); + } + } + + iteration++; + } + + return successResultFound; + } } \ No newline at end of file diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java index a9b2514..6fca820 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java @@ -25,6 +25,7 @@ import javax.ejb.Local; import org.rhq.core.domain.alert.Alert; import org.rhq.core.domain.auth.Subject; import org.rhq.core.domain.cloud.StorageNode; +import org.rhq.core.domain.cloud.StorageNodeConfigurationComposite; import org.rhq.core.domain.cloud.StorageNodeLoadComposite; import org.rhq.core.domain.criteria.StorageNodeCriteria; import org.rhq.core.domain.resource.Resource; @@ -56,6 +57,10 @@ public interface StorageNodeManagerLocal { */ StorageNodeLoadComposite getLoad(Subject subject, StorageNode node, long beginTime, long endTime);
+ StorageNodeConfigurationComposite retrieveConfiguration(Subject subject, StorageNode storageNode); + + boolean updateConfiguration(Subject subject, StorageNodeConfigurationComposite storageNodeConfiguration); + /** * Fetches the list of StorageNode entities based on provided criteria. * diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerRemote.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerRemote.java index 72432db..7be1b07 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerRemote.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerRemote.java @@ -23,6 +23,7 @@ import javax.ejb.Remote; import org.rhq.core.domain.alert.Alert; import org.rhq.core.domain.auth.Subject; import org.rhq.core.domain.cloud.StorageNode; +import org.rhq.core.domain.cloud.StorageNodeConfigurationComposite; import org.rhq.core.domain.cloud.StorageNodeLoadComposite; import org.rhq.core.domain.criteria.StorageNodeCriteria; import org.rhq.core.domain.util.PageList; @@ -48,6 +49,10 @@ public interface StorageNodeManagerRemote { */ StorageNodeLoadComposite getLoad(Subject subject, StorageNode node, long beginTime, long endTime);
+ StorageNodeConfigurationComposite retrieveConfiguration(Subject subject, StorageNode storageNode); + + boolean updateConfiguration(Subject subject, StorageNodeConfigurationComposite storageNodeConfiguration); + /** * <p>Fetches the list of {@link StorageNode} entities based on provided criteria.</p> * diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/scheduler/jobs/StorageNodeMaintenanceJob.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/scheduler/jobs/StorageNodeMaintenanceJob.java index 6b1940d..54ca4c2 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/scheduler/jobs/StorageNodeMaintenanceJob.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/scheduler/jobs/StorageNodeMaintenanceJob.java @@ -71,7 +71,6 @@ public class StorageNodeMaintenanceJob extends AbstractStatefulJob { private final static String RUN_REPAIR_PROPERTY = "runRepair"; private final static String UPDATE_SEEDS_LIST = "updateSeedsList"; private final static String SEEDS_LIST = "seedsList"; - private final static String SUCCEED_PROPERTY = "succeed"; private static final String USERNAME_PROP = "rhq.cassandra.username"; private static final String PASSWORD_PROP = "rhq.cassandra.password";
@@ -169,8 +168,6 @@ public class StorageNodeMaintenanceJob extends AbstractStatefulJob { List<Resource> childResources = LookupUtil.getResourceManager().findResourcesByCriteria( LookupUtil.getSubjectManager().getOverlord(), c);
- - for (Resource childResource : childResources) { if (STORAGE_SERVICE.equals(childResource.getName())) { try { diff --git a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java index 6194146..380da65 100644 --- a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java +++ b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java @@ -82,11 +82,19 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper return prepareForUpgrade(parameters); } else if (name.equals("readRepair")) { return readRepair(); + } else if (name.equals("updateConfiguration")) { + return updateConfiguration(parameters); } else { return super.invokeOperation(name, parameters); } }
+ private OperationResult updateConfiguration(Configuration params) { + OperationResult result = new OperationResult("Configuration updated."); + //TODO: implement updates to various sub-resources here + return result; + } + private OperationResult nodeAdded(Configuration params) { boolean runRepair = params.getSimple("runRepair").getBooleanValue(); boolean updateSeedsList = params.getSimple("updateSeedsList").getBooleanValue(); @@ -222,12 +230,12 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper } return result; } - + private OperationResult prepareForUpgrade(Configuration parameters) throws Exception { EmsConnection emsConnection = getEmsConnection(); EmsBean storageService = emsConnection.getBean("org.apache.cassandra.db:type=StorageService"); Class<?>[] emptyParams = new Class<?>[0]; - + if (log.isDebugEnabled()) { log.debug("Disabling native transport..."); } @@ -249,7 +257,7 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper snapshotName = System.currentTimeMillis() + ""; } operation.invoke(snapshotName, new String[] {}); - + // max 2 sec waitForTaskToComplete(500, 10, 150);
@@ -261,7 +269,7 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper
return new OperationResult(); } - + private void waitForTaskToComplete(int initialWaiting, int maxTries, int sleepMillis) { // initial waiting try { diff --git a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml index 5a60d19..6ed31b7 100644 --- a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml +++ b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml @@ -73,7 +73,7 @@ </c:list-property> </results> </operation> - + <operation name="addNodeMaintenance"> <parameters> <c:simple-property name="runRepair" type="boolean" default="true"/> @@ -111,6 +111,16 @@ </parameters> </operation>
+ <operation name="updateConfiguration" description="Updates the node configuration. Will require a separate server restart for the settings to take effect."> + <parameters> + <c:simple-property name="heapSize" type="string" description="The heap size to be used for both -Xms and -Xmx JVM options."/> + <c:simple-property name="jmxPort" type="integer" description="JMX port JVM option."/> + </parameters> + <results> + <c:simple-property name="operationResult" description="Outcome of updating the configuration."/> + </results> + </operation> + <resource-configuration> <c:group name="MemorySettings"> <c:simple-property name="minHeapSize" @@ -195,3 +205,4 @@ <service name="Keyspace" sourcePlugin="Cassandra" sourceType="Keyspace"/> </server> </plugin> +
commit b268d00034cb7af8d929006e8ac4688eeae58549 Author: John Sanda jsanda@redhat.com Date: Wed Jul 17 13:10:58 2013 -0400
Revert "cleaning up dependencies"
This reverts commit e798901b83e37cd267a2d11489664569918ef90d.
diff --git a/modules/common/cassandra-schema/pom.xml b/modules/common/cassandra-schema/pom.xml index 55dedb8..8e3b44a 100644 --- a/modules/common/cassandra-schema/pom.xml +++ b/modules/common/cassandra-schema/pom.xml @@ -15,6 +15,12 @@ <dependencies> <dependency> <groupId>${project.groupId}</groupId> + <artifactId>rhq-cassandra-ccm-core</artifactId> + <version>${project.version}</version> + </dependency> + + <dependency> + <groupId>${project.groupId}</groupId> <artifactId>rhq-cassandra-util</artifactId> <version>${project.version}</version> </dependency> diff --git a/modules/enterprise/server/server-metrics/pom.xml b/modules/enterprise/server/server-metrics/pom.xml index 97496f7..2245978 100644 --- a/modules/enterprise/server/server-metrics/pom.xml +++ b/modules/enterprise/server/server-metrics/pom.xml @@ -47,6 +47,18 @@ <dependencies> <dependency> <groupId>${project.groupId}</groupId> + <artifactId>rhq-cassandra-ccm-core</artifactId> + <version>${project.version}</version> + <exclusions> + <exclusion> + <groupId>org.apache.cassandra</groupId> + <artifactId>cassandra-clientutil</artifactId> + </exclusion> + </exclusions> + </dependency> + + <dependency> + <groupId>${project.groupId}</groupId> <artifactId>rhq-cassandra-ccm-testng</artifactId> <version>${project.version}</version> <scope>test</scope>
commit a05f561f5fcac94b3308937b07d3b1e1e3f84d58 Author: Thomas Segismont tsegismo@redhat.com Date: Wed Jul 17 16:59:03 2013 +0200
Print simpler log message when component invocation is interrupted and log level is higher than debug
diff --git a/modules/core/plugin-container/src/main/java/org/rhq/core/pc/inventory/ResourceContainer.java b/modules/core/plugin-container/src/main/java/org/rhq/core/pc/inventory/ResourceContainer.java index a9fe1c2..3669947 100644 --- a/modules/core/plugin-container/src/main/java/org/rhq/core/pc/inventory/ResourceContainer.java +++ b/modules/core/plugin-container/src/main/java/org/rhq/core/pc/inventory/ResourceContainer.java @@ -671,8 +671,19 @@ public class ResourceContainer implements Serializable {
public void markContextInterrupted() { localContext.markInterrupted(); - LOG.warn("Invocation has been marked interrupted for method [" + method + "] on resource [" - + resourceContainer.getResource() + "]"); + LOG.warn(getContextInterruptedWarningMessage(LOG.isDebugEnabled())); + } + + private String getContextInterruptedWarningMessage(boolean detailed) { + StringBuilder sb = new StringBuilder(); + sb.append("Invocation has been marked interrupted for method ["); + if (detailed) { + sb.append(method.toGenericString()); + } else { + sb.append(method.getDeclaringClass().getSimpleName()).append(".").append(method.getName()); + } + sb.append("] on resource [").append(resourceContainer.getResource()).append("]"); + return sb.toString(); } } }
commit 01cd91b130f563ba62cd96a46f2cb3a2ac567a48 Author: Larry O'Leary loleary@redhat.com Date: Wed Jul 17 16:32:05 2013 +0200
BZ 981015 - Ldap auth failed if DN contained a backslash
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/core/jaas/LdapLoginModule.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/core/jaas/LdapLoginModule.java index 6b7add6..0db28f7 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/core/jaas/LdapLoginModule.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/core/jaas/LdapLoginModule.java @@ -23,6 +23,7 @@ import java.util.Iterator; import java.util.Map.Entry; import java.util.Properties;
+import javax.naming.CompositeName; import javax.naming.Context; import javax.naming.NamingEnumeration; import javax.naming.directory.SearchControls; @@ -194,7 +195,18 @@ public class LdapLoginModule extends UsernamePasswordLoginModule { SearchResult si = (SearchResult) answer.next();
// Construct the UserDN - String userDN = si.getName() + "," + baseDNs[x]; + String userDN = null; + + try { + userDN = si.getNameInNamespace(); + } catch (UnsupportedOperationException use) { + userDN = new CompositeName(si.getName()).get(0); + if (si.isRelative()) { + userDN += "," + baseDNs[x]; + } + } + + log.debug("Using LDAP userDN=" + userDN);
ctx.addToEnvironment(Context.SECURITY_PRINCIPAL, userDN); ctx.addToEnvironment(Context.SECURITY_CREDENTIALS, inputPassword); @@ -209,7 +221,7 @@ public class LdapLoginModule extends UsernamePasswordLoginModule { // If we try all the BaseDN's and have not found a match, return false return false; } catch (Exception e) { - log.info("Failed to validate password: " + e.getMessage()); + log.info("Failed to validate password for [" + userName + "]: " + e.getMessage()); return false; } } diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java index 532a944..a28c709 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java @@ -1,6 +1,6 @@ /* * RHQ Management Platform - * Copyright (C) 2005-2011 Red Hat, Inc. + * Copyright (C) 2005-2013 Red Hat, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify @@ -13,8 +13,8 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * along with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
package org.rhq.enterprise.server.resource.group; @@ -33,6 +33,7 @@ import java.util.Set;
import javax.ejb.EJB; import javax.ejb.Stateless; +import javax.naming.CompositeName; import javax.naming.Context; import javax.naming.NamingEnumeration; import javax.naming.NamingException; @@ -345,12 +346,9 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { try { userDN = si.getNameInNamespace(); } catch (UnsupportedOperationException use) { - userDN = si.getName(); - if (userDN.startsWith(""")) { - userDN = userDN.substring(1, userDN.length()); - } - if (userDN.endsWith(""")) { - userDN = userDN.substring(0, userDN.length() - 1); + userDN = new CompositeName(si.getName()).get(0); + if (si.isRelative()) { + userDN += "," + baseDNs[x]; } userDN = userDN + "," + baseDNs[x]; } @@ -507,7 +505,7 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal {
/** Translate SystemSettings to familiar Properties instance since we're * passing not one but multiple values. - * + * * @param systemSettings * @return */ @@ -532,7 +530,7 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal { /** Executes the LDAP group query using the filters, context and search controls, etc. parameters passed in. * The matching groups located during processing this pages of results are added as new entries to the * groupDetailsMap passed in. - * + * * @param filter * @param groupDetailsMap * @param ctx
commit 165c9c9f8da7251c0000444f04161171a7d5794f Author: John Sanda jsanda@redhat.com Date: Wed Jul 17 07:37:12 2013 -0400
need to make cassandra-ccm-core a direct dependency now
diff --git a/modules/helpers/metrics-simulator/pom.xml b/modules/helpers/metrics-simulator/pom.xml index a55afa5..0a06610 100644 --- a/modules/helpers/metrics-simulator/pom.xml +++ b/modules/helpers/metrics-simulator/pom.xml @@ -13,6 +13,12 @@ <dependencies> <dependency> <groupId>org.rhq</groupId> + <artifactId>rhq-cassandra-ccm-core</artifactId> + <version>${project.version}</version> + </dependency> + + <dependency> + <groupId>org.rhq</groupId> <artifactId>rhq-cassandra-schema</artifactId> <version>${project.version}</version> </dependency>
commit fb59ea4fb09414875afcaa9543788e3cfc1b14f1 Author: John Sanda jsanda@redhat.com Date: Wed Jul 17 07:35:22 2013 -0400
initial commit for StorageInstallerTest
This commit provides some initial test coverage for the storage installer. More tests will be added to cover various error handling situations and upgrade scenarios.
diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/CassandraClusterManager.java b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/CassandraClusterManager.java index 7496d08..338ef3a 100644 --- a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/CassandraClusterManager.java +++ b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/CassandraClusterManager.java @@ -303,7 +303,7 @@ public class CassandraClusterManager { return nodeIds; }
- private void killNode(File nodeDir) throws Exception { + public void killNode(File nodeDir) throws Exception { long pid = getPid(nodeDir); CLibrary.kill((int) pid, 9); } diff --git a/modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java b/modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java index 2fec2dc..63282e4 100644 --- a/modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java +++ b/modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java @@ -485,7 +485,7 @@ public class StorageInstaller { private PropertiesFileUpdate getServerProperties() { String sysprop = System.getProperty("rhq.server.properties-file"); if (sysprop == null) { - throw new RuntimeException("The required system property [rhq.server.properties] is not defined."); + throw new RuntimeException("The required system property [rhq.server.properties-file] is not defined."); }
File file = new File(sysprop); @@ -605,7 +605,7 @@ public class StorageInstaller { return new File(binDir, "cassandra.pid").exists(); }
- private boolean verifyNodeIsUp(String address, int jmxPort, int retries, long timeout) throws Exception { + boolean verifyNodeIsUp(String address, int jmxPort, int retries, long timeout) throws Exception { String url = "service:jmx:rmi:///jndi/rmi://" + address + ":" + jmxPort + "/jmxrmi"; JMXServiceURL serviceURL = new JMXServiceURL(url); JMXConnector connector = null; diff --git a/modules/common/cassandra-installer/src/test/java/org/rhq/storage/installer/StorageInstallerTest.java b/modules/common/cassandra-installer/src/test/java/org/rhq/storage/installer/StorageInstallerTest.java new file mode 100644 index 0000000..3fd13c7 --- /dev/null +++ b/modules/common/cassandra-installer/src/test/java/org/rhq/storage/installer/StorageInstallerTest.java @@ -0,0 +1,123 @@ +package org.rhq.storage.installer; + + +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertTrue; +import static org.testng.Assert.fail; + +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.lang.reflect.Method; +import java.util.Properties; + +import org.apache.commons.cli.CommandLine; +import org.apache.commons.cli.CommandLineParser; +import org.apache.commons.cli.PosixParser; +import org.apache.commons.io.FileUtils; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +import org.rhq.cassandra.CassandraClusterManager; +import org.rhq.core.util.file.FileUtil; + +/** + * @author John Sanda + */ +public class StorageInstallerTest { + + private File basedir; + + private File storageDir; + + private StorageInstaller installer; + + @BeforeMethod + public void initDirs(Method test) throws Exception { + File dir = new File(getClass().getResource(".").toURI()); + basedir = new File(dir, getClass().getSimpleName() + "/" + test.getName()); + FileUtil.purge(basedir, true); + basedir.mkdirs(); + + System.setProperty("rhq.server.basedir", basedir.getAbsolutePath()); + + File serverPropsFile = new File(basedir, "rhq-server.properties"); + FileUtils.touch(serverPropsFile); + System.setProperty("rhq.server.properties-file", serverPropsFile.getAbsolutePath()); + + storageDir = new File(basedir, "rhq-storage"); + + installer = new StorageInstaller(); + } + + @AfterMethod + public void shutdownStorageNode() throws Exception { + CassandraClusterManager ccm = new CassandraClusterManager(); + ccm.killNode(storageDir); + } + + @Test + public void performValidInstall() throws Exception { + CommandLineParser parser = new PosixParser(); + + String[] args = { + "--dir", storageDir.getAbsolutePath(), + "--commitlog", new File(storageDir, "commit_log").getAbsolutePath(), + "--data", new File(storageDir, "data").getAbsolutePath(), + "--saved-caches", new File(storageDir, "saved_caches").getAbsolutePath(), + "--heap-size", "256M", + "--heap-new-size", "64M", + "--hostname", "127.0.0.1" + }; + + CommandLine cmdLine = parser.parse(installer.getOptions(), args); + int status = installer.run(cmdLine); + + assertEquals(status, 0, "Expected to get back a status code of 0 for a successful install"); + assertNodeIsRunning(); + assertRhqServerPropsUpdated(); + + File binDir = new File(storageDir, "bin"); + assertTrue(binDir.exists(), "Expected to find bin directory at " + binDir); + + File confDir = new File(storageDir, "conf"); + assertTrue(confDir.exists(), "Expected to find conf directory at " + confDir); + + File libDir = new File(storageDir, "lib"); + assertTrue(libDir.exists(), "Expected to find lib directory at " + libDir); + + File commitLogDir = new File(storageDir, "commit_log"); + assertTrue(commitLogDir.exists(), "Expected to find commit_log directory at " + commitLogDir); + + File dataDir = new File(storageDir, "data"); + assertTrue(dataDir.exists(), "Expected to find data directory at " + dataDir); + + File savedCachesDir = new File(storageDir, "saved_caches"); + assertTrue(savedCachesDir.exists(), "Expected to find saved_caches directory at " + savedCachesDir); + } + + private void assertNodeIsRunning() { + try { + installer.verifyNodeIsUp("127.0.0.1", 7299, 3, 1000); + } catch (Exception e) { + fail("Failed to verify that node is up", e); + } + } + + private void assertRhqServerPropsUpdated() { + File serverPropsFile = new File(basedir, "rhq-server.properties"); + Properties properties = new Properties(); + + try { + properties.load(new FileInputStream(serverPropsFile)); + } catch (IOException e) { + fail("Failed to verify that " + serverPropsFile + " was updated", e); + } + + String seeds = properties.getProperty("rhq.cassandra.seeds"); + + assertEquals(seeds, "127.0.0.1|7299|9142"); + } + +} diff --git a/modules/common/cassandra-installer/src/test/resources/log4j.properties b/modules/common/cassandra-installer/src/test/resources/log4j.properties new file mode 100644 index 0000000..d56cc88 --- /dev/null +++ b/modules/common/cassandra-installer/src/test/resources/log4j.properties @@ -0,0 +1,41 @@ +# +# /* +# * RHQ Management Platform +# * Copyright (C) 2005-2012 Red Hat, Inc. +# * All rights reserved. +# * +# * This program is free software; you can redistribute it and/or modify +# * it under the terms of the GNU General Public License, version 2, as +# * published by the Free Software Foundation, and/or the GNU Lesser +# * General Public License, version 2.1, also as published by the Free +# * Software Foundation. +# * +# * This program is distributed in the hope that it will be useful, +# * but WITHOUT ANY WARRANTY; without even the implied warranty of +# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# * GNU General Public License and the GNU Lesser General Public License +# * for more details. +# * +# * You should have received a copy of the GNU General Public License +# * and the GNU Lesser General Public License along with this program; +# * if not, write to the Free Software Foundation, Inc., +# * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# */ +# + +log4j.rootCategory=WARN, FILE, CONSOLE + +log4j.appender.FILE=org.apache.log4j.DailyRollingFileAppender +log4j.appender.FILE.DatePattern='.'yyyy-MM-dd +log4j.appender.FILE.File=./target/test.log +log4j.appender.FILE.layout=org.apache.log4j.PatternLayout +log4j.appender.FILE.layout.ConversionPattern=%d{ABSOLUTE} %-5p %c %m%n +#log4j.appender.A1.layout.ConversionPattern=%d [%t] %-5p %c - %m%n +log4j.appender.FILE.Append=false + +log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender +log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout +#log4j.appender.CONSOLE.layout.ConversionPattern=%5p %d{HH:mm:ss,SSS} %m%n +log4j.appender.CONSOLE.layout.ConversionPattern=%d{ABSOLUTE} %-5p %c %m%n + +log4j.logger.org.rhq=DEBUG
commit e798901b83e37cd267a2d11489664569918ef90d Author: John Sanda jsanda@redhat.com Date: Tue Jul 16 16:16:01 2013 -0400
cleaning up dependencies
diff --git a/modules/common/cassandra-schema/pom.xml b/modules/common/cassandra-schema/pom.xml index 8e3b44a..55dedb8 100644 --- a/modules/common/cassandra-schema/pom.xml +++ b/modules/common/cassandra-schema/pom.xml @@ -15,12 +15,6 @@ <dependencies> <dependency> <groupId>${project.groupId}</groupId> - <artifactId>rhq-cassandra-ccm-core</artifactId> - <version>${project.version}</version> - </dependency> - - <dependency> - <groupId>${project.groupId}</groupId> <artifactId>rhq-cassandra-util</artifactId> <version>${project.version}</version> </dependency> diff --git a/modules/enterprise/server/server-metrics/pom.xml b/modules/enterprise/server/server-metrics/pom.xml index 2245978..97496f7 100644 --- a/modules/enterprise/server/server-metrics/pom.xml +++ b/modules/enterprise/server/server-metrics/pom.xml @@ -47,18 +47,6 @@ <dependencies> <dependency> <groupId>${project.groupId}</groupId> - <artifactId>rhq-cassandra-ccm-core</artifactId> - <version>${project.version}</version> - <exclusions> - <exclusion> - <groupId>org.apache.cassandra</groupId> - <artifactId>cassandra-clientutil</artifactId> - </exclusion> - </exclusions> - </dependency> - - <dependency> - <groupId>${project.groupId}</groupId> <artifactId>rhq-cassandra-ccm-testng</artifactId> <version>${project.version}</version> <scope>test</scope>
commit ae96f1c0ad6e797e889923dc47a1f3c55c8eb394 Author: Heiko W. Rupp hwr@redhat.com Date: Wed Jul 17 11:06:21 2013 +0200
Allow to specify a directory with classes to scan for plugin annotations and to auto-create metrics+operations from them.
diff --git a/modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/Parameter.java b/modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/Parameter.java index 4b82a13..29dcc1e 100644 --- a/modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/Parameter.java +++ b/modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/Parameter.java @@ -29,13 +29,14 @@ import java.lang.annotation.Target;
/** * Parameter. - * + * * @author Galder Zamarreño * @since 4.0 */ @Target(ElementType.PARAMETER) @Retention(RetentionPolicy.RUNTIME) public @interface Parameter { - String name() default ""; + String name() ; String description() default ""; + RhqType type() default RhqType.VOID; } diff --git a/modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/RhqType.java b/modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/RhqType.java new file mode 100644 index 0000000..1487a51 --- /dev/null +++ b/modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/RhqType.java @@ -0,0 +1,77 @@ +/* + * RHQ Management Platform + * Copyright (C) 2005-2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA + */ + +package org.rhq.helpers.pluginAnnotations.agent; + +import java.io.File; + + +/** + * Base data types from RHQ for properties + * @author Heiko W. Rupp + */ +public enum RhqType { + INTEGER(new Class<?>[]{Integer.class,int.class},Boolean.class), + LONG(new Class<?>[]{Long.class,long.class},Long.class), + DOUBLE(new Class<?>[]{Double.class,double.class},Double.class), + STRING(new Class<?>[]{String.class},String.class), + LONG_STRING(new Class<?>[]{},String.class), + PASSWORD(new Class<?>[]{},String.class), + BOOLEAN(new Class<?>[]{Boolean.class, boolean.class},Boolean.class), + FLOAT(new Class<?>[]{Float.class, float.class},Float.class), + FILE(new Class<?>[]{File.class},File.class), + DIRECTORY(new Class<?>[]{},File.class), + VOID(new Class<?>[]{Void.class,void.class},Void.class) + ; + private Class<?>[] fromClasses; + private Class<?> toClass; + + private RhqType(Class<?>[] fromClasses,Class<?> toClass) { + + this.fromClasses = fromClasses; + this.toClass = toClass; + } + + public Class<?>[] getFromClasses() { + return fromClasses; + } + + public Class<?> getToClass() { + return toClass; + } + + public static RhqType findType(Class<?> clazz) { + for (RhqType type : RhqType.values()) { + for (Class from : type.getFromClasses()) { + if (clazz.equals(from)) { + return type; + } + } + } + return null; + } + + public String getRhqName() { + String name = name().toLowerCase(); + if (name.equals("long_string")) { + name = "longString"; + } + return name; + } +} diff --git a/modules/helpers/pluginGen/pom.xml b/modules/helpers/pluginGen/pom.xml index 5aa512c..e7744e4 100644 --- a/modules/helpers/pluginGen/pom.xml +++ b/modules/helpers/pluginGen/pom.xml @@ -19,6 +19,11 @@ <name>RHQ plugin generator</name> <description>Helper to generate plugin skeletons</description>
+ <properties> + <!-- we are using JDK 1.7 here, as JavaFX needs this and the generator is standalone anyway --> + <animal.sniffer.java.signature.artifactId>java17</animal.sniffer.java.signature.artifactId> + </properties> + <build> <plugins>
@@ -104,7 +109,7 @@ <dependency> <groupId>org.rhq.helpers</groupId> <artifactId>rhq-pluginAnnotations</artifactId> - <version>4.8.0-SNAPSHOT</version> + <version>4.9.0-SNAPSHOT</version> </dependency> <dependency> <groupId>com.oracle</groupId> diff --git a/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/AnnotationProcessor.java b/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/AnnotationProcessor.java new file mode 100644 index 0000000..3c90b7b --- /dev/null +++ b/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/AnnotationProcessor.java @@ -0,0 +1,44 @@ +/* + * RHQ Management Platform + * Copyright (C) 2005-2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA + */ + +package org.rhq.helpers.pluginGen; + +import java.util.List; + +/** + * Processor that scans a directory for annotated classes and generates metrics etc. from them. + * @author Heiko W. Rupp + */ +public class AnnotationProcessor { + + private List<Class> classList; + private final DirectoryClassLoader classLoader; + + public AnnotationProcessor(String baseDirectory) { + classLoader = new DirectoryClassLoader(); + classLoader.setBaseDir(baseDirectory); + } + + public void populate(Props props) { + classList = classLoader.findClasses(); + + props.populateMetrics(classList); + props.populateOperations(classList); + } +} diff --git a/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/DirectoryClassLoader.java b/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/DirectoryClassLoader.java new file mode 100644 index 0000000..b4a729f --- /dev/null +++ b/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/DirectoryClassLoader.java @@ -0,0 +1,142 @@ +/* + * RHQ Management Platform + * Copyright (C) 2005-2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA + */ + +package org.rhq.helpers.pluginGen; + +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.FileFilter; +import java.io.FileInputStream; +import java.nio.file.FileVisitor; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Hashtable; +import java.util.List; + +/** + * Classloader to load from a given directory + * @author Heiko W. Rupp + */ +public class DirectoryClassLoader extends ClassLoader { + private Hashtable classes = new Hashtable(); //used to cache already defined classes + private String baseDir; + + @Override + protected Class<?> findClass(String pathName) throws ClassNotFoundException { + + + if (baseDir==null) { + throw new IllegalStateException("Must set baseDir first"); + } + + byte classByte[]; + Class result = null; + + String className = pathName.substring(baseDir.length()+1); // remove base dir + className = className.substring(0,className.length()-6); // remove .class + className = className.replaceAll(File.separator,"."); // change / -> . + + result = (Class) classes.get(className); //checks in cached classes + if (result != null) { + return result; + } + try { + File classFile = new File(pathName); + FileInputStream fis = new FileInputStream(classFile); + + ByteArrayOutputStream byteStream = new ByteArrayOutputStream(); + int nextValue = fis.read(); + while (-1 != nextValue) { + byteStream.write(nextValue); + nextValue = fis.read(); + } + + classByte = byteStream.toByteArray(); + result = defineClass(className, classByte, 0, classByte.length, null); + classes.put(className, result); + return result; + } catch (Exception e) { + return null; + } + } + + public void setBaseDir(String baseDir) { + this.baseDir = baseDir; + } + + public List<Class> findClasses() { + if (baseDir==null) { + throw new IllegalStateException("Must set baseDir first"); + } + + File baseFile = new File(baseDir); + if(!baseFile.isDirectory()) { + throw new IllegalStateException("BaseDir is no directory"); + } + if (!baseFile.canRead()) { + throw new IllegalStateException("BaseDir is not readable"); + } + + List<File> files = walk(baseFile); + + List<Class> classes = new ArrayList<>(); + for (File file : files) { + String fileName = file.getAbsolutePath(); + + Class clazz = null; + try { + clazz = findClass(fileName); + } catch (ClassNotFoundException e) { + e.printStackTrace(); // TODO: Customise this generated block + } + classes.add(clazz); + + } + + return classes; + } + + private List<File> walk(File path) { + + List<File> files = new ArrayList<>(); + + File[] list = path.listFiles(); + + if (list == null) { + return files; + } + + for ( File f : list ) { + if ( f.isDirectory() ) { + List<File> newFiles = walk( f ); + System.out.println( "Dir:" + f.getAbsoluteFile() ); + files.addAll(newFiles); + } + else { + System.out.println( "File:" + f.getAbsoluteFile() ); + if (f.getName().endsWith(".class") && !f.getName().contains("$")) { + files.add(f); + } + + } + } + return files; + } +} diff --git a/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/Generator.java b/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/Generator.java index e7da952..21d0d4d 100644 --- a/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/Generator.java +++ b/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/Generator.java @@ -149,9 +149,12 @@ public class Generator extends Application{ Label label = new Label("Messages:"); msgBox.getChildren().add(label); errorMessage = new Text(); - errorMessage.setFont(Font.font("Arial", FontWeight.SEMI_BOLD, 12)); + errorMessage.setFont(Font.font("Arial", FontWeight.SEMI_BOLD, 15)); errorMessage.setId("errorMessage"); msgBox.getChildren().add(errorMessage); + msgBox.setPadding(new Insets(5)); + msgBox.setSpacing(3); + msgBox.setAlignment(Pos.BASELINE_LEFT); return msgBox; }
@@ -172,94 +175,14 @@ public class Generator extends Application{ // Now add the field itself final Class propType = prop.getType(); if (propType.equals(String.class)) { - final Pattern pattern = Pattern.compile(prop.getValidationRegex()); - - final TextField input = new TextField(); - // Add field leave event to fill in the props with the result - input.focusedProperty().addListener(new ChangeListener<Boolean>() { - @Override - public void changed(ObservableValue<? extends Boolean> observableValue, Boolean oldState, - Boolean newState) { - if (newState) { // User entered input field - descriptionField.setText(prop.getDescription()); - } - else { // User left input field - descriptionField.setText(""); - setPropsValue(prop.getVariableName(),input.getText(), propType); // TODO right place? - } - } - }); - // Add validation of the input - input.textProperty().addListener(new ChangeListener<String>() { - @Override - public void changed(ObservableValue<? extends String> observableValue, String s, String newText) { - Matcher m = pattern.matcher(newText); - if (!m.matches()) { - setErrorMessage("Input does not match " + prop.getValidationRegex()); - } else { - clearErrorMessage(); - } - - } - }); - root.add(input, 1, row); + addStringField(root, descriptionField, row, prop); } else if (propType.equals(Boolean.class) || propType.equals(boolean.class)) { - final ChoiceBox choiceBox = new ChoiceBox(); - choiceBox.getItems().addAll("Yes", "No"); - choiceBox.getSelectionModel().selectLast(); // NO is default - choiceBox.getSelectionModel().selectedItemProperty().addListener(new ChangeListener<String>() { - @Override - public void changed(ObservableValue<? extends String> observableValue, String s, String newValue) { - setPropsValue(prop.getVariableName(), newValue.equals("Yes"), propType); - } - }); - Tooltip tooltip = new Tooltip(prop.getDescription()); // TODO make this a hover listener - choiceBox.setTooltip(tooltip); - - root.add(choiceBox, 1, row); + addBooleanField(root, row, prop); } else if (propType.equals(ResourceCategory.class)) { - final ChoiceBox choiceBox = new ChoiceBox(); - for (ResourceCategory cat : ResourceCategory.values()) { - choiceBox.getItems().add(cat.getLowerName()); - } - choiceBox.getSelectionModel().selectLast(); // service is default - choiceBox.getSelectionModel().selectedItemProperty().addListener(new ChangeListener<String>() { - @Override - public void changed(ObservableValue<? extends String> observableValue, String s, String newValue) { - ResourceCategory newCategory = ResourceCategory.valueOf(newValue.toUpperCase()); - setPropsValue(prop.getVariableName(),newCategory,propType); - } - }); - Tooltip tooltip = new Tooltip(prop.getDescription()); // TODO make this a hover listener - choiceBox.setTooltip(tooltip); - root.add(choiceBox,1,row); + addResourceTypeChooser(root, row, prop); } else if (propType.equals(File.class)) { - // Can not add this directly, so add a button to trigger it - final Text text = new Text(); - text.setText("Pick a directory"); - root.add(text,1,row); - Tooltip tooltip = new Tooltip("Pick the (parent) directory where the plugin will be put in."); - Button pickButton = new Button("Pick"); - pickButton.setTooltip(tooltip); - pickButton.setOnAction(new EventHandler<ActionEvent>() { - @Override - public void handle(ActionEvent actionEvent) { - DirectoryChooser chooser = new DirectoryChooser(); - chooser.setTitle("Pick a directory where the plugin will be put in."); - File dir = chooser.showDialog(primaryStage); - if (dir != null) { - String dirName = dir.getAbsolutePath(); - props.setFileSystemRoot(dirName); - clearErrorMessage(); - text.setText(dirName); - } else { - setErrorMessage("No directory selected"); - text.setText("Pick a directory"); - } - } - }); - - root.add(pickButton,2,row); + addDirectoryChooserField(root, row, prop, descriptionField); + }
row++; @@ -269,6 +192,115 @@ public class Generator extends Application{ return row; }
+ private void addDirectoryChooserField(GridPane root, int row, final Prop prop, final Text descriptionField) { + // Can not add this directly, so add a button to trigger it + final TextField input = new TextField(); + root.add(input,1,row); + Tooltip tooltip = new Tooltip(prop.getDescription()); + Button pickButton = new Button("Pick"); + input.setTooltip(tooltip); + pickButton.setTooltip(tooltip); + pickButton.setOnAction(new EventHandler<ActionEvent>() { + @Override + public void handle(ActionEvent actionEvent) { + DirectoryChooser chooser = new DirectoryChooser(); + chooser.setTitle(prop.getDescription()); + File dir = chooser.showDialog(primaryStage); + if (dir != null) { + String dirName = dir.getAbsolutePath(); + setPropsValue(prop.getVariableName(), dirName, String.class); + clearErrorMessage(); + input.setText(dirName); + } else { + setErrorMessage("No directory selected"); + input.setText("Pick a directory"); + } + } + }); + input.focusedProperty().addListener(new ShowFieldDescriptionHandler(prop,descriptionField,input)); + // Add validation of the input + input.textProperty().addListener(new ChangeListener<String>() { + @Override + public void changed(ObservableValue<? extends String> observableValue, String s, String newText) { + File file = new File(newText); + if (!file.isDirectory()) { + setErrorMessage(newText + " is no directory"); + } else if (prop.isDirectoryWriteable() && !file.canWrite()) { + setErrorMessage(newText + " is not writable"); + } else if (!prop.isDirectoryWriteable() && !file.canRead()) { + setErrorMessage(newText + " is not readable"); + } else { + clearErrorMessage(); + } + + } + }); + + + + root.add(pickButton,2,row); + } + + private void addResourceTypeChooser(GridPane root, int row, final Prop prop) { + final ChoiceBox choiceBox = new ChoiceBox(); + for (ResourceCategory cat : ResourceCategory.values()) { + choiceBox.getItems().add(cat.getLowerName()); + } + choiceBox.getSelectionModel().selectLast(); // service is default + choiceBox.getSelectionModel().selectedItemProperty().addListener(new ChangeListener<String>() { + @Override + public void changed(ObservableValue<? extends String> observableValue, String s, String newValue) { + ResourceCategory newCategory = ResourceCategory.valueOf(newValue.toUpperCase()); + setPropsValue(prop.getVariableName(),newCategory,prop.getType()); + } + }); + Tooltip tooltip = new Tooltip(prop.getDescription()); // TODO make this a hover listener + choiceBox.setTooltip(tooltip); + root.add(choiceBox,1,row); + } + + private void addBooleanField(GridPane root, int row, final Prop prop) { + final ChoiceBox choiceBox = new ChoiceBox(); + choiceBox.getItems().addAll("Yes", "No"); + choiceBox.getSelectionModel().selectLast(); // NO is default + choiceBox.getSelectionModel().selectedItemProperty().addListener(new ChangeListener<String>() { + @Override + public void changed(ObservableValue<? extends String> observableValue, String s, String newValue) { + setPropsValue(prop.getVariableName(), newValue.equals("Yes"), prop.getType()); + } + }); + Tooltip tooltip = new Tooltip(prop.getDescription()); // TODO make this a hover listener + choiceBox.setTooltip(tooltip); + + root.add(choiceBox, 1, row); + } + + private void addStringField(GridPane root, final Text descriptionField, int row, final Prop prop) { + final Pattern pattern = Pattern.compile(prop.getValidationRegex()); + + final TextField input = new TextField(); + if (prop.getDefaultValue()!=null && !prop.getDefaultValue().isEmpty()) { + input.setText(prop.getDefaultValue()); + setPropsValue(prop.getVariableName(),prop.getDefaultValue(),prop.getType()); + } + // Add field leave event to fill in the props with the result + input.focusedProperty().addListener(new ShowFieldDescriptionHandler(prop,descriptionField,input)); + // Add validation of the input + input.textProperty().addListener(new ChangeListener<String>() { + @Override + public void changed(ObservableValue<? extends String> observableValue, String s, String newText) { + Matcher m = pattern.matcher(newText); + if (!m.matches()) { + setErrorMessage("Input does not match " + prop.getValidationRegex()); + } else { + clearErrorMessage(); + } + + } + }); + root.add(input, 1, row); + } + private void setInfoMessage(String message) { errorMessage.setText(message); errorMessage.setFill(Color.DARKGREEN); @@ -297,4 +329,29 @@ public class Generator extends Application{ } }
+ + private class ShowFieldDescriptionHandler implements ChangeListener<Boolean> { + + private Prop prop; + private Text descriptionField; + private TextField input; + + private ShowFieldDescriptionHandler(Prop prop,Text descriptionField, TextField input) { + this.prop = prop; + this.descriptionField = descriptionField; + this.input = input; + } + + @Override + public void changed(ObservableValue<? extends Boolean> observableValue, Boolean oldState, + Boolean newState) { + if (newState) { // User entered input field + descriptionField.setText(prop.getDescription()); + } + else { // User left input field + descriptionField.setText(""); + setPropsValue(prop.getVariableName(),input.getText(), String.class); // TODO right place? + } + } + } } diff --git a/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/PluginGen.java b/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/PluginGen.java index 64b5509..8722446 100644 --- a/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/PluginGen.java +++ b/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/PluginGen.java @@ -51,6 +51,19 @@ public class PluginGen { private final Log log = LogFactory.getLog(PluginGen.class);
public static void main(String[] arg) throws Exception { + + if (arg.length>0) { + if (arg[0].equals("-ui")) { + Generator.main(arg); + + } + else { + System.out.println("use option -ui to start the UI version"); + } + System.exit(0); + } + + PluginGen pg = new PluginGen(); pg.run();
@@ -121,10 +134,24 @@ public class PluginGen { String pkg = props.getPackagePrefix() + "." + props.getName(); props.setPkg(pkg);
+ String name = props.getName(); // Type name + + if (props.getComponentClass().contains("{name}")) { + props.setComponentClass(props.getComponentClass().replace("{name}",name)); + } + + if (props.getDiscoveryClass().contains("{name}")) { + props.setDiscoveryClass(props.getDiscoveryClass().replace("{name}",name)); + } + for (Props cProp : props.getChildren()) { cProp.setPkg(pkg); }
+ if (props.getScanForAnnotations()!=null) { + AnnotationProcessor ap = new AnnotationProcessor(props.getScanForAnnotations()); + ap.populate(props); + } }
/** @@ -247,7 +274,7 @@ public class PluginGen { }
boolean success; - File activeDirectory = new File(props.getFileSystemRoot(), props.getName()); + File activeDirectory = new File(props.getFileSystemRoot(), props.getPluginName());
if (!activeDirectory.exists()) { success = activeDirectory.mkdir(); @@ -260,7 +287,7 @@ public class PluginGen { // write pom.xml createFile(props, "pom", "pom.xml", activeDirectory.getAbsolutePath());
- // Create java directory hierarchie + // Create java directory hierarchy String path = activeDirectory.getAbsolutePath() + File.separator + "src" + File.separator + "main" + File.separator;
diff --git a/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/Prop.java b/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/Prop.java index 4ed019a..40e729c 100644 --- a/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/Prop.java +++ b/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/Prop.java @@ -30,48 +30,62 @@ public enum Prop { PLUGIN_NAME("pluginName", String.class,"Name of the plugin", "\w+" , true ), PLUGIN_DESCRIPTION("pluginDescription", String.class,"Description of the plugin",".*" , true ), PACKAGE("packagePrefix", String.class,"Default Package","[a-zA-Z\.]+",true ), - FILE_ROOT("fileSystemRoot", File.class,"Root directory to put the plugin",".*",true ), + FILE_ROOT("fileSystemRoot", File.class,"Root directory to put the plugin into",".*",true , true,null), RHQ_VERSION("rhqVersion",String.class,"RHQ version to use","[0-9][0-9\.]+",true),
CATEGORY("category", ResourceCategory.class, "Category of the resource type (platform = host level)",null), TYPE_NAME("name", String.class, "Name of the resource type", "\w+"), DESCRIPTION("description", String.class, "Description of the type", ".*"), - DISCOVERY_CLASS("discoveryClass", String.class, "Discovery class", "[A-Z][a-zA-Z0-9]*"), - COMPONENT_CLASS("componentClass", String.class, "Discovery class", "[A-Z][a-zA-Z0-9]*"), + DISCOVERY_CLASS("discoveryClass", String.class, "Name of the Discovery class. '{name}' will be replaced with the type name", "[A-Z][a-zA-Z0-9]*",false,false,"{name}Discovery"), + COMPONENT_CLASS("componentClass", String.class, "Name of the Discovery class. '{name}' will be replaced with the type name", "[A-Z][a-zA-Z0-9]*",false,false,"{name}Component"), IS_SINGLETON("singleton",boolean.class,"Is this type a singleton, which means that" + - " there can only be one resource of that type for the given parent?",null), - HAS_METRICS("hasMetrics",boolean.class,"Does this type support taking metrics?",null), - HAS_OPERATIONS("hasOperations",boolean.class,"Does this type support operations?",null), - HAS_EVENTS("events",boolean.class,"Does this type support events?",null), - HAS_SUPPORT_FACET("supportFacet",boolean.class,"Does this type support the support facet?",null), + " there can only be one resource of that type for the given parent?"), + HAS_METRICS("hasMetrics",boolean.class,"Does this type support taking metrics?"), + HAS_OPERATIONS("hasOperations",boolean.class,"Does this type support operations?"), + HAS_EVENTS("events",boolean.class,"Does this type support events?"), + HAS_SUPPORT_FACET("supportFacet",boolean.class,"Does this type support the support facet?"), RESOURCE_CONFIGURATION("resourceConfiguration",boolean.class,"Does this type support " + - "configuring the resource?",".*"), - CAN_CREATE_CHILDREN("createChildren",boolean.class,"Can the type create child resources?",null), - CAN_DELETE_CHILDREN("deleteChildren",boolean.class,"Can the type delete child resources?",null), - - // TODO add the remaining properties from Prop.class - + "configuring the resource?"), + CAN_CREATE_CHILDREN("createChildren",boolean.class,"Can the type create child resources?"), + CAN_DELETE_CHILDREN("deleteChildren",boolean.class,"Can the type delete child resources?"), + USE_EXTENAL_JARS("usesExternalJarsInPlugin",boolean.class,"Will the plugin use external jars in the plugin jar?"), + ALLOW_MANUAL_ADD("manualAddOfResourceType",boolean.class,"Should manually adding resource be supported?"), + USE_LIFECYLE_API("usePluginLifecycleListenerApi",boolean.class,"Should the plugin lifecycle api be supported?"), + DEPENDS_ON_JMX_PLUGIN("dependsOnJmxPlugin",boolean.class,"Does the plugin use JMX and extend the JMX Plugin?"), + DEPENDS_ON_AS7_PLUGIN("dependsOnAs7Plugin",boolean.class,"Does the plugin use DMR and extend the AS7 Plugin?"), + USE_SUPPORT_FACET("supportFacet",boolean.class,"Will the support facet be used?"), + + SCAN_FOR_ANNOTATIONS("scanForAnnotations",File.class,"Directory to scan for plugin annotations to include in type",null, false,false, null) ;
private String variableName; private Class type; private String description; private boolean pluginLevel; + private boolean directoryWriteable; + private String defaultValue; private String validationRegex;
- private Prop(String variableName, Class type, String description, String validationRegex, boolean pluginLevel) { + private Prop(String variableName, Class type, String description, String validationRegex, boolean pluginLevel, boolean directoryWriteable, String defaultValue) { this.variableName = variableName; this.type = type; this.description = description; this.validationRegex = validationRegex; this.pluginLevel = pluginLevel; + this.directoryWriteable = directoryWriteable; + this.defaultValue = defaultValue; + } + + private Prop(String variableName, Class type, String description, String validationRegex, boolean pluginLevel) { + this(variableName,type,description,validationRegex,pluginLevel,false,null); }
private Prop(String variableName, Class type, String description, String validationRegex) { - this.variableName = variableName; - this.type = type; - this.description = description; - this.validationRegex = validationRegex; + this(variableName,type,description,validationRegex,false,false,null); + } + + private Prop(String variableName, Class type, String description) { + this(variableName,type,description,null,false,false,null); }
public String getVariableName() { @@ -111,4 +125,11 @@ public enum Prop { return builder.toString(); }
+ public boolean isDirectoryWriteable() { + return directoryWriteable; + } + + public String getDefaultValue() { + return defaultValue; + } } diff --git a/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/Props.java b/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/Props.java index 237f5ad..95be574 100644 --- a/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/Props.java +++ b/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/Props.java @@ -18,6 +18,9 @@ */ package org.rhq.helpers.pluginGen;
+import java.lang.annotation.Annotation; +import java.lang.reflect.Field; +import java.lang.reflect.Method; import java.util.HashSet; import java.util.LinkedHashSet; import java.util.List; @@ -26,6 +29,9 @@ import java.util.Set; import org.rhq.helpers.pluginAnnotations.agent.DataType; import org.rhq.helpers.pluginAnnotations.agent.DisplayType; import org.rhq.helpers.pluginAnnotations.agent.Metric; +import org.rhq.helpers.pluginAnnotations.agent.Operation; +import org.rhq.helpers.pluginAnnotations.agent.Parameter; +import org.rhq.helpers.pluginAnnotations.agent.RhqType; import org.rhq.helpers.pluginAnnotations.agent.Units;
/** @@ -33,6 +39,7 @@ import org.rhq.helpers.pluginAnnotations.agent.Units; * * @author Heiko W. Rupp */ +@SuppressWarnings("unused") public class Props {
/** What category is this ? */ @@ -69,7 +76,7 @@ public class Props { private boolean createChildren; /** Can the service delete children ? */ private boolean deleteChildren; - /** Use externals chars in the plugin jar ? */ + /** Use externals jars in the plugin jar ? */ private boolean usesExternalJarsInPlugin; /** Does it support manual add of children ? */ private boolean manualAddOfResourceType; @@ -77,8 +84,12 @@ public class Props { private boolean usePluginLifecycleListenerApi; /** Depends on JMX plugin ? */ private boolean dependsOnJmxPlugin; + /** Depends on AS7 plugin ? */ + private boolean dependsOnAs7Plugin; + /** Directory with java files to scan for plugin annotations */ + private String scanForAnnotations; /** What version of RHQ should this plugin's pom use ? */ - private String rhqVersion = "3.0.0"; + private String rhqVersion = "4.8.0";
/** Embedded children */ private Set<Props> children = new HashSet<Props>(); @@ -336,22 +347,97 @@ public class Props { this.runsInsides = runsInsides; }
+ public boolean isDependsOnAs7Plugin() { + return dependsOnAs7Plugin; + } + + public void setDependsOnAs7Plugin(boolean dependsOnAs7Plugin) { + this.dependsOnAs7Plugin = dependsOnAs7Plugin; + } + + public String getScanForAnnotations() { + return scanForAnnotations; + } + + public void setScanForAnnotations(String scanForAnnotations) { + this.scanForAnnotations = scanForAnnotations; + } + public void populateMetrics(List<Class> classes) { - for (Class<?> clazz : classes) { - Metric metricAnnot = clazz.getAnnotation(Metric.class); - if (metricAnnot != null) { - MetricProps metric = new MetricProps(metricAnnot.property()); + for (Class<?> clazz : classes) { + for (Field field : clazz.getDeclaredFields()) { + Metric metricAnnot = field.getAnnotation(Metric.class); + addMetric(metricAnnot, field.getName()); + } + + for (Method method : clazz.getDeclaredMethods()) { + Metric metricAnnot = method.getAnnotation(Metric.class); + addMetric(metricAnnot, method.getName()); + } + } + } + + public void populateOperations(List<Class> classes) { + for (Class<?> clazz : classes) { + for (Method method : clazz.getDeclaredMethods()) { + Operation operationAnnot = method.getAnnotation(Operation.class); + if (operationAnnot != null) { + String property = operationAnnot.name(); + if (property.isEmpty()) { + property = method.getName(); + } + OperationProps op = new OperationProps(property); + op.setDisplayName(operationAnnot.displayName()); + op.setDescription(operationAnnot.description()); + RhqType type = RhqType.findType(method.getReturnType()); + if (type != RhqType.VOID) { + SimpleProperty simpleProperty = new SimpleProperty(type.getRhqName()); + op.setResult(simpleProperty); + } + + Class[] types = method.getParameterTypes(); + int i=0; + for (Annotation[] annotations : method.getParameterAnnotations() ) { + for (Annotation annotation : annotations) { + if (annotation instanceof Parameter) { + Parameter parameter = (Parameter) annotation; + SimpleProperty simpleProperty = new SimpleProperty(parameter.name()); + simpleProperty.setDescription(parameter.description()); + Class typeClass = types[i]; + RhqType rhqType = RhqType.findType(typeClass); + if (parameter.type()!=RhqType.VOID){ + rhqType = parameter.type(); + } + simpleProperty.setType(rhqType.getRhqName()); + op.getParams().add(simpleProperty); + } + } + i++; + } + operations.add(op); + } + + } + } + } + + private void addMetric(Metric metricAnnot, String name) { + if (metricAnnot != null) { + String property = metricAnnot.property(); + if (property.isEmpty()) { + property = name; + } + MetricProps metric = new MetricProps(property); metric.setDisplayName(metricAnnot.displayName()); metric.setDisplayType(metricAnnot.displayType()); metric.setDataType(metricAnnot.dataType()); metric.setDescription(metricAnnot.description()); + metric.setUnits(metricAnnot.units()); metrics.add(metric); - } - - } - } + } + }
- @Override + @Override public String toString() { final StringBuilder sb = new StringBuilder(); sb.append("Props"); @@ -387,7 +473,7 @@ public class Props { return sb.toString(); }
- public static class TypeKey { + public static class TypeKey { private String name; private String pluginName;
diff --git a/modules/helpers/pluginGen/src/main/resources/component.ftl b/modules/helpers/pluginGen/src/main/resources/component.ftl index 1b3dda1..b445008 100644 --- a/modules/helpers/pluginGen/src/main/resources/component.ftl +++ b/modules/helpers/pluginGen/src/main/resources/component.ftl @@ -187,7 +187,6 @@ public class ${props.componentClass} implements ResourceComponent<#if props.pare
<#if props.operations??>
- public void startOperationFacet(OperationContext context) {
} @@ -203,6 +202,13 @@ public class ${props.componentClass} implements ResourceComponent<#if props.pare public OperationResult invokeOperation(String name, Configuration params) throws Exception {
OperationResult res = new OperationResult(); +<#if props.operations?has_content> + <#list props.operations as operation> + if (name.equals("${operation.name}") { + // TODO implement me + } + </#list> +<#else> if ("dummyOperation".equals(name)) { // TODO implement me
@@ -210,6 +216,7 @@ public class ${props.componentClass} implements ResourceComponent<#if props.pare return res; } </#if> +</#if>
<#if props.resourceConfiguration> diff --git a/modules/helpers/pluginGen/src/main/resources/descriptor.ftl b/modules/helpers/pluginGen/src/main/resources/descriptor.ftl index 85195e8..1fe2a6b 100644 --- a/modules/helpers/pluginGen/src/main/resources/descriptor.ftl +++ b/modules/helpers/pluginGen/src/main/resources/descriptor.ftl @@ -1,7 +1,7 @@ <#-- /* * RHQ Management Platform - * Copyright (C) 2005-2008 Red Hat, Inc. + * Copyright (C) 2005-2013 Red Hat, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify @@ -38,6 +38,9 @@ <#if props.dependsOnJmxPlugin> <depends plugin="JMX" useClasses="true"/> </#if> +<#if props.dependsOnAs7Plugin> + <depends plugin="JBossAS7" useClasses="true"/> +</#if>
<${props.category.lowerName} <#include "descriptorMain.ftl"/>
diff --git a/modules/helpers/pluginGen/src/main/resources/descriptorMain.ftl b/modules/helpers/pluginGen/src/main/resources/descriptorMain.ftl index b650f7a..5579806 100644 --- a/modules/helpers/pluginGen/src/main/resources/descriptorMain.ftl +++ b/modules/helpers/pluginGen/src/main/resources/descriptorMain.ftl @@ -1,7 +1,7 @@ <#-- /* * RHQ Management Platform - * Copyright (C) 2005-2010 Red Hat, Inc. + * Copyright (C) 2005-2013 Red Hat, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify @@ -39,6 +39,11 @@ name="${props.name}" </#list> </runs-inside> </#if> + <#if props.dependsOnAs7Plugin> + <runs-inside> <!-- TODO adjust type --> + <parent-resource-type name="JBossAS7 Standalone Server" plugin="JBossAS7"/> + </runs-inside> + </#if>
<#if props.simpleProps?has_content> <plugin-configuration> @@ -57,14 +62,14 @@ name="${props.name}" </plugin-configuration> </#if>
- <#if props.hasOperations> + <#if props.hasOperations || props.operations?has_content> <#if props.operations?has_content> <#list props.operations as operation> - <operation name="${operation.name}" displayName="${operation.displayName}" description="${operation.description}"> + <operation name="${operation.name}" <#if operation.displayName?has_content>displayName="${operation.displayName}"</#if> description="${operation.description}"> <#if operation.params?has_content> <parameters> <#list operation.params as param> - <c:simple-property name="${param.name}" <#if param.description??>description="${param.description}"</#if>/> + <c:simple-property name="${param.name}" <#if param.description??>description="${param.description}"</#if> type="${param.type}"/> </#list> </parameters> </#if> @@ -82,10 +87,10 @@ name="${props.name}" </#if> </#if>
- <#if props.hasMetrics> + <#if props.hasMetrics || props.metrics?has_content> <#if props.metrics?has_content> <#list props.metrics as metric> - <metric property="${metric.property}" displayName="${metric.displayName}" displayType="${metric.displayType}" units="${metric.units}" dataType="${metric.dataType}" + <metric property="${metric.property}" <#if metric.displayName?has_content>displayName="${metric.displayName}"</#if> displayType="${metric.displayType}" units="${metric.units}" dataType="${metric.dataType}" description="${metric.description}" /> </#list> <#else> diff --git a/modules/helpers/pluginGen/src/main/resources/pom.ftl b/modules/helpers/pluginGen/src/main/resources/pom.ftl index 01daca1..7c799b8 100644 --- a/modules/helpers/pluginGen/src/main/resources/pom.ftl +++ b/modules/helpers/pluginGen/src/main/resources/pom.ftl @@ -2,7 +2,7 @@ <#-- /* * RHQ Management Platform - * Copyright (C) 2005-20012 Red Hat, Inc. + * Copyright (C) 2005-20013 Red Hat, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify @@ -35,10 +35,10 @@ </parent>
<groupId>org.rhq</groupId> - <artifactId>${props.name}-plugin</artifactId> + <artifactId>${props.pluginName}-plugin</artifactId> <packaging>jar</packaging>
- <name>RHQ ${props.name} Plugin</name> + <name>RHQ ${props.pluginName} Plugin</name> <#if props.description??> <description>${props.description}</description> </#if> @@ -86,7 +86,6 @@ </executions> </plugin> </#if> - </plugins> </build>
@@ -232,6 +231,14 @@ <scope>provided</scope> </dependency> </#if> +<#if props.dependsOnAs7Plugin> + <dependency> + <groupId>org.rhq</groupId> + <artifactId>rhq-jboss-as-7-plugin</artifactId> + <version>${r"${project.version}"}</version> + <scope>provided</scope> + </dependency> +</#if>
<!-- TODO add your dependencies here -->
diff --git a/modules/helpers/pluginGen/src/test/java/org/rhq/helpers/pluginGen/test/FooBean.java b/modules/helpers/pluginGen/src/test/java/org/rhq/helpers/pluginGen/test/FooBean.java new file mode 100644 index 0000000..fbc571b --- /dev/null +++ b/modules/helpers/pluginGen/src/test/java/org/rhq/helpers/pluginGen/test/FooBean.java @@ -0,0 +1,55 @@ +/* + * RHQ Management Platform + * Copyright (C) 2005-2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA + */ + +package org.rhq.helpers.pluginGen.test; + +import org.rhq.helpers.pluginAnnotations.agent.DataType; +import org.rhq.helpers.pluginAnnotations.agent.DisplayType; +import org.rhq.helpers.pluginAnnotations.agent.MeasurementType; +import org.rhq.helpers.pluginAnnotations.agent.Metric; +import org.rhq.helpers.pluginAnnotations.agent.Operation; +import org.rhq.helpers.pluginAnnotations.agent.Parameter; +import org.rhq.helpers.pluginAnnotations.agent.RhqType; + +/** + * Just a sample + * @author Heiko W. Rupp + */ + +public class FooBean { + + @Metric(description = "How often was this bean invoked", displayType = DisplayType.SUMMARY, measurementType = MeasurementType.DYNAMIC) + int invocationCount; + + @Metric(description = "Just a foo", dataType = DataType.TRAIT) + String lastCommand; + + @Operation(description = "Increase the invocation count") + public int increaseCounter() { + invocationCount++; + return invocationCount; + } + + @Operation(description = "Decrease the counter") + public void decreaseCounter(@Parameter(description = "How much to decrease?", name = "by") int by) { + invocationCount -= by; + } + + +}
commit 2ee2ef305dd71c1f2869a5a8c4e8ebe9f9bae243 Author: Michael Burman yak@iki.fi Date: Tue Jul 16 15:58:05 2013 -0400
committing windows version of RHQ 4.8 storage patch script
diff --git a/etc/scripts/rhq48-storage-patch/rhq48-storage-patch.bat b/etc/scripts/rhq48-storage-patch/rhq48-storage-patch.bat new file mode 100644 index 0000000..e0312dc --- /dev/null +++ b/etc/scripts/rhq48-storage-patch/rhq48-storage-patch.bat @@ -0,0 +1,64 @@ +@echo off +setlocal + +if /i "%4" == "" ( + echo Usage: rhq48-storage-patch.bat ^<rhq-server-dir^> ^<storage-ip-address^> ^<cql-port^> ^<jmx-port^> + exit /B 1 +) + +set RHQ_SERVER_DIR=%1 +set CQL_HOSTNAME=%2 +set CQL_PORT=%3 +set JMX_PORT=%4 +set USERNAME="rhqadmin" +set PASSWORD="rhqadmin" + +set PATCH="apache-cassandra-1.2.4-patch-1.jar" + +rem swap out the Cassandra jar file with the patched version +echo Copying patch file to %RHQ_SERVER_DIR%\rhq-storage\lib +move %RHQ_SERVER_DIR%\rhq-storage\lib\apache-cassandra-1.2.4.jar . +copy %PATCH% %RHQ_SERVER_DIR%\rhq-storage\lib + +rem restart the storage node +echo Starting RHQ Storage node +call %RHQ_SERVER_DIR%\bin\rhqctl.bat start --storage + +rem sleep for a few seconds while Cassandra starts up +echo Waiting for RHQ Storage Node to start up.. +rem Sleep is not implemented in all Windows prompts, this one won't work in Vista +choice /n /c y /d y /t 3 + +set RHQ_STORAGE_BIN=%RHQ_SERVER_DIR%\rhq-storage\bin\ +set CQLSH_PATH=%RHQ_STORAGE_BIN%\cqlsh +set NODETOOL_PATH=%RHQ_STORAGE_BIN%\nodetool.bat + +rem run the CQL script +echo Running CQL script to disable table compression +python %CQLSH_PATH% -u %USERNAME% -p %PASSWORD% -f ./disable_compression.cql + +rem rewrite all sstables +echo Rebuilding data files for system keyspace +call %NODETOOL_PATH% -u rhqadmin -pw rhqadmin -p %JMX_PORT% upgradesstables --include-all-sstables system + +echo Rebuilding data files for system_traces keyspace +call %NODETOOL_PATH% -u rhqadmin -pw rhqadmin -p %JMX_PORT% upgradesstables --include-all-sstables system_traces + +echo Rebuilding data files for system_auth keyspace +call %NODETOOL_PATH% -u rhqadmin -pw rhqadmin -p %JMX_PORT% upgradesstables --include-all-sstables system_auth + +echo Rebuilding data files for rhq keyspace +call %NODETOOL_PATH% -u rhqadmin -pw rhqadmin -p %JMX_PORT% upgradesstables --include-all-sstables rhq + +rem flush memtables and commit log to ensure no data loss prior to upgrade +call %NODETOOL_PATH% -u rhqadmin -pw rhqadmin -p %JMX_PORT% drain + +echo Shutting down the RHQ Storage node +call %RHQ_SERVER_DIR%\bin\rhqctl.bat stop --storage + +echo Removing patch file +del %RHQ_SERVER_DIR%\rhq-storage\lib%PATCH% +move apache-cassandra-1.2.4.jar %RHQ_SERVER_DIR%\rhq-storage\lib + +echo Table compression has been disabled for all keyspaces. You are now ready to upgrade your RHQ installation. +endlocal
commit 9983f60ec49f72b5bb0afcfb452afde616c622a6 Author: Thomas Segismont tsegismo@redhat.com Date: Tue Jul 16 17:51:59 2013 +0200
Fix InterruptibleOperationsTest. Mock resource context needs a ComponentInvocationContext instance.
diff --git a/modules/plugins/jboss-as-7/src/test/java/org/rhq/modules/plugins/jbossas7/itest/standalone/InterruptibleOperationsTest.java b/modules/plugins/jboss-as-7/src/test/java/org/rhq/modules/plugins/jbossas7/itest/standalone/InterruptibleOperationsTest.java index 192117c..b0acf4d 100644 --- a/modules/plugins/jboss-as-7/src/test/java/org/rhq/modules/plugins/jbossas7/itest/standalone/InterruptibleOperationsTest.java +++ b/modules/plugins/jboss-as-7/src/test/java/org/rhq/modules/plugins/jbossas7/itest/standalone/InterruptibleOperationsTest.java @@ -47,6 +47,7 @@ import org.eclipse.jetty.servlet.ServletHolder; import org.mockito.Mock; import org.mockito.Mockito; import org.mockito.MockitoAnnotations; +import org.rhq.core.pluginapi.component.ComponentInvocationContext; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; @@ -95,6 +96,7 @@ public class InterruptibleOperationsTest { int httpPort = setupJettyServer(); setupResourceContext(httpPort); executorService = Executors.newSingleThreadExecutor(); + when(resourceContext.getComponentInvocationContext()).thenReturn(new MockComponentInvocationContext()); serverComponent.start(resourceContext); }
@@ -160,6 +162,17 @@ public class InterruptibleOperationsTest { assertEquals(operationResult.getSimpleResult(), "Success"); }
+ private static class MockComponentInvocationContext implements ComponentInvocationContext { + @Override + public boolean isInterrupted() { + return false; + } + + @Override + public void markInterrupted() { + } + } + private class RestartJetty implements Runnable { @Override public void run() {
commit fb809005b42f358d3a2090993431769649ade732 Author: Lukas Krejci lkrejci@redhat.com Date: Tue Jul 16 15:54:07 2013 +0200
Updating the list of the devs in the IRC bot
diff --git a/etc/rhq-ircBot/src/main/java/org/rhq/etc/ircbot/RhqIrcBotListener.java b/etc/rhq-ircBot/src/main/java/org/rhq/etc/ircbot/RhqIrcBotListener.java index dd22dbf..cc10d2d 100644 --- a/etc/rhq-ircBot/src/main/java/org/rhq/etc/ircbot/RhqIrcBotListener.java +++ b/etc/rhq-ircBot/src/main/java/org/rhq/etc/ircbot/RhqIrcBotListener.java @@ -77,8 +77,7 @@ public class RhqIrcBotListener extends ListenerAdapter<RhqIrcBot> {
private static final Set<String> JON_DEVS = new HashSet<String>(); static { - JON_DEVS.add("ccrouch"); - JON_DEVS.add("ips"); + JON_DEVS.add("theute"); JON_DEVS.add("jkremser"); JON_DEVS.add("jsanda"); JON_DEVS.add("jshaughn"); @@ -377,4 +376,4 @@ public class RhqIrcBotListener extends ListenerAdapter<RhqIrcBot> { this.docspacePassword = docspacePassword; }
-} \ No newline at end of file +}
commit 98258686d712608aef7a21108f6a5505ffb820b7 Author: Stefan Negrea snegrea@redhat.com Date: Tue Jul 16 08:16:57 2013 -0500
Add justification for the api changes to the Storage Node Manager bean.
diff --git a/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml b/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml new file mode 100644 index 0000000..a9ce322 --- /dev/null +++ b/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml @@ -0,0 +1,18 @@ +<?xml version="1.0"?> +<differences> + + <difference> + <className>org/rhq/enterprise/server/cloud/StorageNodeManagerRemote</className> + <differenceType>7012</differenceType> <!-- method added to an interface --> + <method>org.rhq.core.domain.util.PageList findNotAcknowledgedStorageNodeAlerts(org.rhq.core.domain.auth.Subject)</method> + <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> + </difference> + + <difference> + <className>org/rhq/enterprise/server/cloud/StorageNodeManagerRemote</className> + <differenceType>7012</differenceType> <!-- method added to an interface --> + <method>org.rhq.core.domain.util.PageList findAllStorageNodeAlerts(org.rhq.core.domain.auth.Subject)</method> + <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> + </difference> + +</differences>
commit 6ad3c87c08631a00cb960a4ffa4c4929329aa6d8 Author: Thomas Segismont tsegismo@redhat.com Date: Tue Jul 16 13:42:50 2013 +0200
Bug 982804 - AS7 reload behavior includes hardcoded timeout of 20 seconds
diff --git a/modules/core/plugin-api/src/main/java/org/rhq/core/pluginapi/inventory/ResourceContext.java b/modules/core/plugin-api/src/main/java/org/rhq/core/pluginapi/inventory/ResourceContext.java index 8c708db..41a1c41 100644 --- a/modules/core/plugin-api/src/main/java/org/rhq/core/pluginapi/inventory/ResourceContext.java +++ b/modules/core/plugin-api/src/main/java/org/rhq/core/pluginapi/inventory/ResourceContext.java @@ -173,6 +173,8 @@ public class ResourceContext<T extends ResourceComponent<?>> { * @param availabilityContext a {@link AvailabilityContext} the plugin can use to interoperate with the * plugin container inventory manager * @param pluginContainerDeployment indicates where the plugin container is running + * @param componentInvocationContext a {@link ComponentInvocationContext} the plugin can use to determine if the + * current component invocation has been canceled or timed out. */ public ResourceContext(Resource resource, T parentResourceComponent, ResourceContext<?> parentResourceContext, ResourceDiscoveryComponent<T> resourceDiscoveryComponent, SystemInfo systemInfo, File temporaryDirectory, diff --git a/modules/core/plugin-container/src/main/java/org/rhq/core/pc/inventory/ResourceContainer.java b/modules/core/plugin-container/src/main/java/org/rhq/core/pc/inventory/ResourceContainer.java index 4cb2cff..a9fe1c2 100644 --- a/modules/core/plugin-container/src/main/java/org/rhq/core/pc/inventory/ResourceContainer.java +++ b/modules/core/plugin-container/src/main/java/org/rhq/core/pc/inventory/ResourceContainer.java @@ -609,6 +609,8 @@ public class ResourceContainer implements Serializable { }
private static class ComponentInvocation implements Callable { + private static final Log LOG = LogFactory.getLog(ComponentInvocation.class); + private final ResourceContainer resourceContainer; private final Method method; private final Object[] args; @@ -669,6 +671,8 @@ public class ResourceContainer implements Serializable {
public void markContextInterrupted() { localContext.markInterrupted(); + LOG.warn("Invocation has been marked interrupted for method [" + method + "] on resource [" + + resourceContainer.getResource() + "]"); } } } diff --git a/modules/plugins/jboss-as-7/src/main/java/org/rhq/modules/plugins/jbossas7/StandaloneASComponent.java b/modules/plugins/jboss-as-7/src/main/java/org/rhq/modules/plugins/jbossas7/StandaloneASComponent.java index d399d1b..ccb8ce0 100644 --- a/modules/plugins/jboss-as-7/src/main/java/org/rhq/modules/plugins/jbossas7/StandaloneASComponent.java +++ b/modules/plugins/jboss-as-7/src/main/java/org/rhq/modules/plugins/jbossas7/StandaloneASComponent.java @@ -18,6 +18,8 @@ */ package org.rhq.modules.plugins.jbossas7;
+import static java.util.concurrent.TimeUnit.SECONDS; + import java.io.File; import java.util.HashSet; import java.util.Map; @@ -184,39 +186,28 @@ public class StandaloneASComponent<T extends ResourceComponent<?>> extends BaseS return operationResult; }
- private boolean waitUntilReloaded() { + private boolean waitUntilReloaded() throws InterruptedException { boolean reloaded = false; - int count = 0; - while (!reloaded) { Operation op = new ReadAttribute(new Address(), "release-version"); - try{ + try { Result res = getASConnection().execute(op); if (res.isSuccess() && !res.isReloadRequired()) { reloaded = true; - } else if (count > 20) { - break; } } catch (Exception e) { //do absolutely nothing //if an exception is thrown that means the server is still reloading, so consider this //a single failed attempt, equivalent to res.isSuccess == false } - if (!reloaded) { - try { - Thread.sleep(1000); // Wait 1s - } catch (InterruptedException e) { - // ignore + if (context.getComponentInvocationContext().isInterrupted()) { + // Operation canceled or timed out + throw new InterruptedException(); } + Thread.sleep(SECONDS.toMillis(1)); } - count++; } - - if (log.isDebugEnabled()) { - log.debug("waitUntilReloaded: Used " + count + " delay round(s) to reload. Reload=" + reloaded); - } - return reloaded; }
commit 56f845bec82369f3032b565a8caeed751d1a4093 Author: Heiko W. Rupp hwr@redhat.com Date: Tue Jul 16 10:13:13 2013 +0200
Fix test fallout that got introduced by my changes.
diff --git a/modules/enterprise/server/plugins/alert-snmp/src/test/java/org/rhq/enterprise/server/plugins/alertSnmp/SnmpInfoTest.java b/modules/enterprise/server/plugins/alert-snmp/src/test/java/org/rhq/enterprise/server/plugins/alertSnmp/SnmpInfoTest.java index 97b9a05..ecc995d 100644 --- a/modules/enterprise/server/plugins/alert-snmp/src/test/java/org/rhq/enterprise/server/plugins/alertSnmp/SnmpInfoTest.java +++ b/modules/enterprise/server/plugins/alert-snmp/src/test/java/org/rhq/enterprise/server/plugins/alertSnmp/SnmpInfoTest.java @@ -116,11 +116,11 @@ public class SnmpInfoTest { // String representation is constrained (even not strongly). Configuration configuration = new Configuration(); assertEquals(SnmpInfo.load(configuration).toString(), "UnknownHost:" + DEFAULT_PORT - + " (UnknownOID) (DefaultTrapOID)"); + + " (Unknown Binding Prefix) (DefaultTrapOID)"); configuration.setSimpleValue(PARAM_HOST, "pipo"); - assertEquals(SnmpInfo.load(configuration).toString(), "pipo:" + DEFAULT_PORT + " (UnknownOID) (DefaultTrapOID)"); + assertEquals(SnmpInfo.load(configuration).toString(), "pipo:" + DEFAULT_PORT + " (Unknown Binding Prefix) (DefaultTrapOID)"); configuration.setSimpleValue(PARAM_PORT, "35162"); - assertEquals(SnmpInfo.load(configuration).toString(), "pipo:35162 (UnknownOID) (DefaultTrapOID)"); + assertEquals(SnmpInfo.load(configuration).toString(), "pipo:35162 (Unknown Binding Prefix) (DefaultTrapOID)"); configuration.setSimpleValue(PARAM_VARIABLE_BINDING_PREFIX, "molo"); assertEquals(SnmpInfo.load(configuration).toString(), "pipo:35162 (molo) (DefaultTrapOID)"); configuration.setSimpleValue(PARAM_TRAP_OID, "logo"); diff --git a/modules/enterprise/server/plugins/alert-snmp/src/test/java/org/rhq/enterprise/server/plugins/alertSnmp/SnmpSenderTest.java b/modules/enterprise/server/plugins/alert-snmp/src/test/java/org/rhq/enterprise/server/plugins/alertSnmp/SnmpSenderTest.java index 38b36d7..9583a07 100644 --- a/modules/enterprise/server/plugins/alert-snmp/src/test/java/org/rhq/enterprise/server/plugins/alertSnmp/SnmpSenderTest.java +++ b/modules/enterprise/server/plugins/alert-snmp/src/test/java/org/rhq/enterprise/server/plugins/alertSnmp/SnmpSenderTest.java @@ -95,6 +95,7 @@ public class SnmpSenderTest { private AlertManagerLocal alertManager;
private TestSnmpSender snmpSender; + private Configuration pluginConfiguration;
@BeforeMethod public void setUp() throws Exception { @@ -114,7 +115,7 @@ public class SnmpSenderTest {
MockitoAnnotations.initMocks(this);
- Configuration pluginConfiguration = new Configuration(); + pluginConfiguration = new Configuration(); pluginConfiguration.setSimpleValue("snmpVersion", "2c"); pluginConfiguration.setSimpleValue("trapOid", TEST_TRAP_OID_PLUGIN_CONFIG); pluginConfiguration.setSimpleValue("community", "public"); @@ -153,7 +154,7 @@ public class SnmpSenderTest { assertNotNull(result); assertEquals(result.getState(), FAILURE); assertEquals(result.getFailureMessages().size(), 1); - String expectedError = SnmpInfo.load(alertParameters).error; + String expectedError = SnmpInfo.load(alertParameters,pluginConfiguration).error; assertNotNull(expectedError); assertEquals(result.getFailureMessages().get(0), expectedError); }
commit 609c93f0d219afd19db976d55c565c95bb96d1ab Author: Heiko W. Rupp hwr@redhat.com Date: Mon Jul 15 11:03:05 2013 +0200
Display tool tips, for pick button and choice boxes
diff --git a/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/Generator.java b/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/Generator.java index 06f3fec..e7da952 100644 --- a/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/Generator.java +++ b/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/Generator.java @@ -210,11 +210,13 @@ public class Generator extends Application{ choiceBox.getSelectionModel().selectedItemProperty().addListener(new ChangeListener<String>() { @Override public void changed(ObservableValue<? extends String> observableValue, String s, String newValue) { - setPropsValue(prop.getVariableName(),newValue.equals("Yes"),propType); + setPropsValue(prop.getVariableName(), newValue.equals("Yes"), propType); } }); + Tooltip tooltip = new Tooltip(prop.getDescription()); // TODO make this a hover listener + choiceBox.setTooltip(tooltip);
- root.add(choiceBox,1,row); + root.add(choiceBox, 1, row); } else if (propType.equals(ResourceCategory.class)) { final ChoiceBox choiceBox = new ChoiceBox(); for (ResourceCategory cat : ResourceCategory.values()) { @@ -228,14 +230,17 @@ public class Generator extends Application{ setPropsValue(prop.getVariableName(),newCategory,propType); } }); - + Tooltip tooltip = new Tooltip(prop.getDescription()); // TODO make this a hover listener + choiceBox.setTooltip(tooltip); root.add(choiceBox,1,row); } else if (propType.equals(File.class)) { // Can not add this directly, so add a button to trigger it final Text text = new Text(); text.setText("Pick a directory"); root.add(text,1,row); + Tooltip tooltip = new Tooltip("Pick the (parent) directory where the plugin will be put in."); Button pickButton = new Button("Pick"); + pickButton.setTooltip(tooltip); pickButton.setOnAction(new EventHandler<ActionEvent>() { @Override public void handle(ActionEvent actionEvent) { diff --git a/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/Prop.java b/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/Prop.java index 73c4cd9..4ed019a 100644 --- a/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/Prop.java +++ b/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/Prop.java @@ -38,7 +38,7 @@ public enum Prop { DESCRIPTION("description", String.class, "Description of the type", ".*"), DISCOVERY_CLASS("discoveryClass", String.class, "Discovery class", "[A-Z][a-zA-Z0-9]*"), COMPONENT_CLASS("componentClass", String.class, "Discovery class", "[A-Z][a-zA-Z0-9]*"), - IS_SINGLETON("singleton",Boolean.class,"Is this type a singleton, which means that" + + IS_SINGLETON("singleton",boolean.class,"Is this type a singleton, which means that" + " there can only be one resource of that type for the given parent?",null), HAS_METRICS("hasMetrics",boolean.class,"Does this type support taking metrics?",null), HAS_OPERATIONS("hasOperations",boolean.class,"Does this type support operations?",null),
commit f24eb1cfcb98ce8c1c4a9edb00453aef60c7e7dd Author: John Sanda jsanda@redhat.com Date: Mon Jul 15 22:06:57 2013 -0400
copy cassandra-jvm.properties on upgrade. we're no longer using cassandra-env.sh.
diff --git a/modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java b/modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java index 9a6a1c0..2fec2dc 100644 --- a/modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java +++ b/modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java @@ -25,10 +25,8 @@
package org.rhq.storage.installer;
-import java.io.BufferedReader; import java.io.File; import java.io.FileInputStream; -import java.io.FileReader; import java.io.IOException; import java.net.InetAddress; import java.util.ArrayList; @@ -37,6 +35,7 @@ import java.util.HashMap; import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Properties;
import javax.management.MBeanServerConnection; import javax.management.ObjectName; @@ -65,6 +64,7 @@ import org.rhq.cassandra.DeploymentOptions; import org.rhq.cassandra.DeploymentOptionsFactory; import org.rhq.cassandra.installer.RMIContextFactory; import org.rhq.core.util.PropertiesFileUpdate; +import org.rhq.core.util.StringUtil; import org.rhq.core.util.exception.ThrowableUtil; import org.rhq.core.util.file.FileUtil;
@@ -244,24 +244,19 @@ public class StorageInstaller { deployer.updateFilePerms();
// For upgrades we will copy the existing cassandra.yaml, - // log4j-server.properties, and cassandra-env.sh file from the existing + // log4j-server.properties, and cassandra-jvm.properties from the existing // storage node installation. Going forward though we need to add support - // for merging in the existing cassandra.yaml with the new one. We also - // need to do something about cassandra-env.sh. There is no easy to parse - // it and merge in changes which is problematic since that is where heap - // settings and JMX port and other JMV options are specified. Maybe we can - // replace it with a cassandra-in.sh that is essentially a properties file - // which we can easily parse and update. + // for merging in the existing cassandra.yaml with the new one. File oldConfDir = new File(existingStorageDir, "conf"); File newConfDir = new File(storageBasedir, "conf");
String cassandraYaml = "cassandra.yaml"; - String cassandraEnv = "cassandra-env.sh"; - File cassandraEnvFile = new File(newConfDir, cassandraEnv); + String cassandraJvmProps = "cassandra-jvm.properties"; + File cassandraJvmPropsFile = new File(newConfDir, cassandraJvmProps); String log4j = "log4j-server.properties";
replaceFile(new File(oldConfDir, cassandraYaml), new File(newConfDir, cassandraYaml)); - replaceFile(new File(oldConfDir, cassandraEnv), cassandraEnvFile); + replaceFile(new File(oldConfDir, cassandraJvmProps), cassandraJvmPropsFile); replaceFile(new File(oldConfDir, log4j), new File(newConfDir, log4j));
log.info("Finished installing RHQ Storage Node."); @@ -274,7 +269,7 @@ public class StorageInstaller {
hostname = (String) config.get("listen_address");
- jmxPort = parseJmxPort(cassandraEnvFile); + jmxPort = parseJmxPort(cassandraJvmPropsFile); } else { if (cmdLine.hasOption("dir")) { File basedir = new File(cmdLine.getOptionValue("dir")); @@ -666,56 +661,26 @@ public class StorageInstaller { } }
- private int parseJmxPort(File cassandraEnvFile) { + private int parseJmxPort(File cassandraJvmOptsFile) { Integer port = null; if (isWindows()) { // TODO return defaultJmxPort; } else { - BufferedReader reader = null; try { - reader = new BufferedReader(new FileReader(cassandraEnvFile)); - String line = reader.readLine(); - - while (line != null) { - if (line.startsWith("JMX_PORT")) { - int startIndex = line.indexOf("JMX_PORT="") + 1; - int endIndex = line.lastIndexOf("""); - - if (startIndex == -1 || endIndex == -1) { - log.error("Failed to parse the JMX port. Make sure that you have the JMX port defined on its " - + "own line as follows, JMX_PORT="<jmx-port>""); - throw new RuntimeException("Cannot determine JMX port"); - } - try { - port = Integer.parseInt(line.substring(startIndex, endIndex)); - } catch (NumberFormatException e) { - log.error("The JMX port must be an integer. [" + port + "] is an invalid value"); - throw new RuntimeException("The JMX port has an invalid value"); - } - return port; - } - line = reader.readLine(); + Properties properties = new Properties(); + properties.load(new FileInputStream(cassandraJvmOptsFile)); + + String jmxPort = properties.getProperty("jmx_port"); + if (StringUtil.isEmpty(jmxPort)) { + log.error("The property [jmx_port] is undefined."); + throw new RuntimeException("Cannot determine JMX port"); } - log.error("Failed to parse the JMX port. Make sure that you have the JMX port defined on its " - + "own line as follows, JMX_PORT="<jmx-port>""); - throw new RuntimeException("Cannot determine JMX port"); + + return Integer.parseInt(jmxPort); } catch (IOException e) { log.error("Failed to parse JMX port. There was an unexpected IO error", e); throw new RuntimeException("Failed to parse JMX port due to IO error: " + e.getMessage()); - } finally { - try { - if (reader != null) { - reader.close(); - } - } catch (IOException e) { - if (log.isDebugEnabled()) { - log.debug("An error occurred closing the " + BufferedReader.class.getName() + " used to " - + "parse the JMX port", e); - } else { - log.warn("There was error closing the reader used to parse the JMX port: " + e.getMessage()); - } - } } } }
commit 27138835d3ac9fb10fe18519fbe441fc1979387e Author: John Sanda jsanda@redhat.com Date: Mon Jul 15 21:47:34 2013 -0400
fixing bug in script
diff --git a/etc/scripts/rhq48-storage-patch/rhq48-storage-patch.sh b/etc/scripts/rhq48-storage-patch/rhq48-storage-patch.sh index 7c0b88a..33984d1 100755 --- a/etc/scripts/rhq48-storage-patch/rhq48-storage-patch.sh +++ b/etc/scripts/rhq48-storage-patch/rhq48-storage-patch.sh @@ -19,6 +19,9 @@ CQL_HOSTNAME=$2 CQL_PORT=$3 JMX_PORT=$4
+export CQLSH_HOST=$2 +export CQL_PORT=$3 + PATCH="apache-cassandra-1.2.4-patch-1.jar"
# swap out the Cassandra jar file with the patched version @@ -36,8 +39,6 @@ sleep 3
# run the CQL script echo "Running CQL script to disable table compression" -export CQLSH_HOST=$CQLSH_HOST -export CQL_PORT=$CQL_PORT $RHQ_SERVER_DIR/rhq-storage/bin/cqlsh -u rhqadmin -p rhqadmin -f ./disable_compression.cql
# rewrite all sstables
commit 34514a874e079e19736bbc0fd71db099a1b9b25a Author: John Sanda jsanda@redhat.com Date: Mon Jul 15 16:23:06 2013 -0400
make parsing of heap options more robust
diff --git a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeConfigDelegate.java b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeConfigDelegate.java index a0220ba..8d1771d 100644 --- a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeConfigDelegate.java +++ b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeConfigDelegate.java @@ -32,18 +32,13 @@ public class StorageNodeConfigDelegate implements ConfigurationFacet {
Configuration config = new Configuration();
- String heapMinProp = properties.getProperty("heap_min"); - String heapMaxProp = properties.getProperty("heap_max"); - String heapNewProp = properties.getProperty("heap_new"); - String threadStackSizeProp = properties.getProperty("thread_stack_size"); String heapDumpOnOOMError = properties.getProperty("heap_dump_on_OOMError"); String heapDumpDir = properties.getProperty("heap_dump_dir");
- config.put(new PropertySimple("minHeapSize", heapMinProp.substring(4))); - config.put(new PropertySimple("maxHeapSize", heapMaxProp.substring(4))); - config.put(new PropertySimple("heapNewSize", heapNewProp.substring(4))); - config.put(new PropertySimple("threadStackSize", threadStackSizeProp.substring(4, - threadStackSizeProp.length() - 1))); + config.put(new PropertySimple("minHeapSize", getHeapMinProp(properties))); + config.put(new PropertySimple("maxHeapSize", getHeapMaxProp(properties))); + config.put(new PropertySimple("heapNewSize", getHeapNewProp(properties))); + config.put(new PropertySimple("threadStackSize", getStackSizeProp(properties)));
if (!StringUtil.isEmpty(heapDumpOnOOMError)) { config.put(new PropertySimple("heapDumpOnOOMError", true)); @@ -61,6 +56,62 @@ public class StorageNodeConfigDelegate implements ConfigurationFacet { return config; }
+ private String getHeapMinProp(Properties properties) { + String value = properties.getProperty("heap_min"); + + if (StringUtil.isEmpty(value)) { + return ""; + } + + if (!value.startsWith("-Xms")) { + return value; + } + + return value.substring(4); + } + + private String getHeapMaxProp(Properties properties) { + String value = properties.getProperty("heap_max"); + + if (StringUtil.isEmpty(value)) { + return ""; + } + + if (!value.startsWith("-Xmx")) { + return value; + } + + return value.substring(4); + } + + private String getHeapNewProp(Properties properties) { + String value = properties.getProperty("heap_new"); + + if (StringUtil.isEmpty(value)) { + return ""; + } + + if (!value.startsWith("-Xmn")) { + return value; + } + + return value.substring(4); + } + + private String getStackSizeProp(Properties properties) { + String value = properties.getProperty("thread_stack_size"); + + if (StringUtil.isEmpty(value)) { + return ""; + } + + if (!(value.startsWith("-Xss") || value.endsWith("k") || value.length() > 5)) { + return value; + } + + return value.substring(4, value.length() - 1); + } + @Override public void updateResourceConfiguration(ConfigurationUpdateReport configurationUpdateReport) { try {
commit 632eb18cc8984f12c794678c6d13a455048a2096 Author: John Sanda jsanda@redhat.com Date: Mon Jul 15 15:35:48 2013 -0400
adding validation checks for heap and thread args
diff --git a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeConfigDelegate.java b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeConfigDelegate.java index c87d2ac..a0220ba 100644 --- a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeConfigDelegate.java +++ b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeConfigDelegate.java @@ -42,7 +42,8 @@ public class StorageNodeConfigDelegate implements ConfigurationFacet { config.put(new PropertySimple("minHeapSize", heapMinProp.substring(4))); config.put(new PropertySimple("maxHeapSize", heapMaxProp.substring(4))); config.put(new PropertySimple("heapNewSize", heapNewProp.substring(4))); - config.put(new PropertySimple("threadStackSize", threadStackSizeProp.substring(4))); + config.put(new PropertySimple("threadStackSize", threadStackSizeProp.substring(4, + threadStackSizeProp.length() - 1)));
if (!StringUtil.isEmpty(heapDumpOnOOMError)) { config.put(new PropertySimple("heapDumpOnOOMError", true)); @@ -70,6 +71,7 @@ public class StorageNodeConfigDelegate implements ConfigurationFacet {
String maxHeapSize = config.getSimpleValue("maxHeapSize"); if (!StringUtil.isEmpty(maxHeapSize)) { + validateHeapArg("maxHeapSize", maxHeapSize); // We want min and max heap to be the same properties.setProperty("heap_min", "-Xms" + maxHeapSize); properties.setProperty("heap_max", "-Xmx" + maxHeapSize); @@ -77,12 +79,14 @@ public class StorageNodeConfigDelegate implements ConfigurationFacet {
String heapNewSize = config.getSimpleValue("heapNewSize"); if (!StringUtil.isEmpty(heapNewSize)) { + validateHeapArg("heapNewSize", heapNewSize); properties.setProperty("heap_new", "-Xmn" + heapNewSize); }
String threadStackSize = config.getSimpleValue("threadStackSize"); if (!StringUtil.isEmpty(threadStackSize)) { - properties.setProperty("thread_stack_size", "-Xss" + threadStackSize); + validateStackArg(threadStackSize); + properties.setProperty("thread_stack_size", "-Xss" + threadStackSize + "k"); }
PropertySimple heapDumpOnOMMError = config.getSimple("heapDumpOnOOMError"); @@ -102,9 +106,36 @@ public class StorageNodeConfigDelegate implements ConfigurationFacet { propertiesUpdater.update(properties);
configurationUpdateReport.setStatus(ConfigurationUpdateStatus.SUCCESS); + } catch (IllegalArgumentException e) { + configurationUpdateReport.setErrorMessage("No configuration update was applied: " + e.getMessage()); } catch (IOException e) { configurationUpdateReport.setErrorMessageFromThrowable(e); } + } + + private void validateHeapArg(String name, String value) { + if (value.length() < 2) { + throw new IllegalArgumentException(value + " is not a legal value for the property [" + name + "]"); + } + + char[] chars = value.toCharArray(); + for (int i = 0; i < chars.length - 1; ++i) { + if (!Character.isDigit(chars[i])) { + throw new IllegalArgumentException(value + " is not a legal value for the property [" + name + "]"); + } + }
+ char lastChar = Character.toUpperCase(chars[chars.length - 1]); + if (!(lastChar == 'M' || lastChar == 'G')) { + throw new IllegalArgumentException(value + " is not a legal value for the property [" + name + "]"); + } + } + + private void validateStackArg(String value) { + try { + Integer.parseInt(value); + } catch (NumberFormatException e) { + throw new IllegalArgumentException(value + " is not a legal value for the property [threadStackSize]"); + } } } diff --git a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml index c700257..5a60d19 100644 --- a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml +++ b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml @@ -118,12 +118,24 @@ description="The minimum heap size. This value will be used with the -Xms JVM option. This is read only because it is automatically set to the same value as Max Heap Size."/> <c:simple-property name="maxHeapSize" - description="The maximum heap size. This value will be used with the -Xmx JVM option."/> + description="The maximum heap size. This value will be used with the -Xmx JVM option. The + value should be an integer with a suffix of M or G to indicate megabytes or gigabytes."> + <c:constraint> + <c:regex-constraint expression="\d+[mMgG]"/> + </c:constraint> + </c:simple-property> <c:simple-property name="heapNewSize" description="The size of the new generation portion of the heap. This value will be used with - the -Xmn JVM option."/> + the -Xmn JVM option. The value should be an integer with a suffix of M or G to indicate + megabytes or gigabytes."> + <c:constraint> + <c:regex-constraint expression="\d+[mMgG]"/> + </c:constraint> + </c:simple-property> <c:simple-property name="threadStackSize" - description="The thread stack size. This memory is allocated to each thread off heap."/> + type="integer" + description="The thread stack size. This memory is allocated to each thread off heap. The + value should be an integer that will be interpreted in kilobytes."/> <c:simple-property name="heapDumpOnOOMError" displayName="Heap Dump on OutOfMemoryError" type="boolean" default="true" description="Generate a heap dump when an OutOfMemoryError occurs"/> diff --git a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeConfigDelegateTest.java b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeConfigDelegateTest.java index ee34f43..64d14b2 100644 --- a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeConfigDelegateTest.java +++ b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeConfigDelegateTest.java @@ -13,8 +13,10 @@ import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test;
import org.rhq.core.domain.configuration.Configuration; +import org.rhq.core.domain.configuration.ConfigurationUpdateStatus; import org.rhq.core.domain.configuration.PropertySimple; import org.rhq.core.pluginapi.configuration.ConfigurationUpdateReport; +import org.rhq.core.util.file.FileUtil;
/** * @author John Sanda @@ -29,6 +31,7 @@ public class StorageNodeConfigDelegateTest { public void initDirs(Method test) throws Exception { File dir = new File(getClass().getResource(".").toURI()); basedir = new File(dir, getClass().getSimpleName() + "/" + test.getName()); + FileUtil.purge(basedir, true); configDelegate = new StorageNodeConfigDelegate(basedir); }
@@ -42,7 +45,7 @@ public class StorageNodeConfigDelegateTest { assertEquals(config.getSimpleValue("minHeapSize"), "512M", "Failed to load property [minHeapSize]"); assertEquals(config.getSimpleValue("maxHeapSize"), "512M", "Failed to load property [maxHepSize]"); assertEquals(config.getSimpleValue("heapNewSize"), "128M", "Failed to load property [heapNewSize]"); - assertEquals(config.getSimpleValue("threadStackSize"), "180k", "Failed to load property [threadStackSize]"); + assertEquals(config.getSimpleValue("threadStackSize"), "180", "Failed to load property [threadStackSize]"); assertEquals(config.getSimple("heapDumpOnOOMError").getBooleanValue(), (Boolean) true, "Failed to load property [heapDumpOnOOMError]"); assertEquals(new File(config.getSimpleValue("heapDumpDir")), binDir(), "Failed to load property [heapDumpDir]"); @@ -56,7 +59,7 @@ public class StorageNodeConfigDelegateTest { config.put(new PropertySimple("minHeapSize", "1024M")); config.put(new PropertySimple("maxHeapSize", "1024M")); config.put(new PropertySimple("heapNewSize", "256M")); - config.put(new PropertySimple("threadStackSize", "240k")); + config.put(new PropertySimple("threadStackSize", "240")); config.put(new PropertySimple("heapDumpOnOOMError", true)); config.put(new PropertySimple("heapDumpDir", confDir()));
@@ -96,6 +99,60 @@ public class StorageNodeConfigDelegateTest { "should be the same as [maxHeapSize]."); }
+ @Test + public void disableHeapDumps() throws Exception { + createDefaultConfig(); + + ConfigurationUpdateReport report = new ConfigurationUpdateReport(Configuration.builder() + .addSimple("heapDumpOnOOMError", false).build()); + + configDelegate.updateResourceConfiguration(report); + + Properties properties = loadCassandraJvmProps(); + + assertEquals(properties.getProperty("heap_dump_on_OOMError"), "", "Failed to disable property " + + "[heapDumpOnOOMError]"); + } + + @Test + public void updateShouldFailWhenMaxHeapSizeIsInvalid() throws Exception { + createDefaultConfig(); + + ConfigurationUpdateReport report = new ConfigurationUpdateReport(Configuration.builder() + .addSimple("maxHeapSize", "256GB").build()); + + configDelegate.updateResourceConfiguration(report); + + assertEquals(report.getStatus(), ConfigurationUpdateStatus.FAILURE, "The configuration update should fail " + + "when [maxHeapSize] has an invalid value."); + } + + @Test + public void updateShouldFailWhenHeapNewSizeIsInvalid() throws Exception { + createDefaultConfig(); + + ConfigurationUpdateReport report = new ConfigurationUpdateReport(Configuration.builder() + .addSimple("heapNewSize", "25^G").build()); + + configDelegate.updateResourceConfiguration(report); + + assertEquals(report.getStatus(), ConfigurationUpdateStatus.FAILURE, "The configuration update should fail " + + "when [heapNewSize] has an invalid value."); + } + + @Test + public void updateShouldFailWhenThreadStackSizeIsInvalid() throws Exception { + createDefaultConfig(); + + ConfigurationUpdateReport report = new ConfigurationUpdateReport(Configuration.builder() + .addSimple("threadStackSize", "128M").build()); + + configDelegate.updateResourceConfiguration(report); + + assertEquals(report.getStatus(), ConfigurationUpdateStatus.FAILURE, "The configuration update should fail " + + "when [threadStackSize] has an invalid value."); + } + private void createDefaultConfig() throws IOException { Properties properties = new Properties(); properties.setProperty("heap_min", "-Xms512M");
commit fe38a28bd0ba7df967ec8b6a7f5f2b4a6bb839d6 Author: Thomas Segismont tsegismo@redhat.com Date: Wed Jul 3 12:27:55 2013 +0200
Bug 923400 - Sigar creates high number of blocked threads (unbounded) if mount is gone
diff --git a/modules/core/native-system/src/main/java/org/rhq/core/system/SigarAccess.java b/modules/core/native-system/src/main/java/org/rhq/core/system/SigarAccess.java index 4319f9a..8c7cea9 100644 --- a/modules/core/native-system/src/main/java/org/rhq/core/system/SigarAccess.java +++ b/modules/core/native-system/src/main/java/org/rhq/core/system/SigarAccess.java @@ -1,6 +1,6 @@ /* * RHQ Management Platform - * Copyright (C) 2005-2008 Red Hat, Inc. + * Copyright (C) 2005-2013 Red Hat, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify @@ -13,17 +13,13 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * along with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ package org.rhq.core.system;
-import java.lang.reflect.InvocationHandler; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; import java.lang.reflect.Proxy;
-import org.hyperic.sigar.Sigar; import org.hyperic.sigar.SigarProxy;
/** @@ -34,7 +30,6 @@ import org.hyperic.sigar.SigarProxy; * @author John Mazzitelli */ public class SigarAccess { - private static SigarAccessHandler invocationHandler; private static SigarProxy sigarProxy;
@@ -83,35 +78,4 @@ public class SigarAccess {
}
- private static class SigarAccessHandler implements InvocationHandler { - private Sigar sigar; - - public Object invoke(Object proxy, Method meth, Object[] args) throws Throwable { - // its possible in the time between this handler's creation and now, someone disabled the native layer. - // throw a runtime exception if the native system was disabled - if (SystemInfoFactory.isNativeSystemInfoDisabled()) { - throw new SystemInfoException("Native system has been disabled"); - } - - try { - synchronized (this) { - if (sigar == null) { - this.sigar = new Sigar(); - } - - return meth.invoke(sigar, args); - } - } catch (InvocationTargetException e) { - throw e.getTargetException(); - } - } - - public synchronized void close() { - if (this.sigar != null) { - this.sigar.close(); - this.sigar = null; - } - } - } - -} \ No newline at end of file +} diff --git a/modules/core/native-system/src/main/java/org/rhq/core/system/SigarAccessHandler.java b/modules/core/native-system/src/main/java/org/rhq/core/system/SigarAccessHandler.java new file mode 100644 index 0000000..a781641 --- /dev/null +++ b/modules/core/native-system/src/main/java/org/rhq/core/system/SigarAccessHandler.java @@ -0,0 +1,201 @@ +/* + * RHQ Management Platform + * Copyright (C) 2005-2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA + */ + +package org.rhq.core.system; + +import static java.util.concurrent.TimeUnit.MINUTES; +import static java.util.concurrent.TimeUnit.SECONDS; + +import java.lang.management.ManagementFactory; +import java.lang.management.ThreadInfo; +import java.lang.reflect.InvocationHandler; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.locks.ReentrantLock; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.hyperic.sigar.Sigar; + +/** + * An {@link InvocationHandler} for a {@link org.hyperic.sigar.SigarProxy}. + * <p>A single instance of this class will be created by the {@link SigarAccess} class.</p> + * This class holds a shared {@link Sigar} instance and serializes calls. If a thread waits more than + * 'sharedSigarLockMaxWait' seconds, it will be given a new Sigar instance, which will be destroyed at the end of the + * call. + * <p>Every 5 minutes, a background task checks that 'localSigarInstancesWarningThreshold' has not been exceeded. It it + * has, a warning message will be logged, optionally with a thread dump. + * <p>This class is configurable with System properties: + * <ul> + * <li><strong>sharedSigarLockMaxWait</strong>: maximum time in seconds a thread will wait for the shared Sigar lock + * acquistion; defaults to 2 seconds</li> + * <li><strong>localSigarInstancesWarningThreshold</strong>: threshold of currently living Sigar instances at which + * the background task will print warning messages; defaults to 50</li> + * <li><strong>maxLocalSigarInstances</strong>: maximum number of local Sigar instances which can be created, zero + * and negative values being interpreted as 'no limit'; defaults to 50</li> + * <li><strong>threadDumpOnlocalSigarInstancesWarningThreshold</strong>: if set to true (case insensitive), the + * background task will also log a thread dump when <strong>localSigarInstancesWarningThreshold</strong> is met</li> + * </ul> + * </p> + * + * @author Thomas Segismont + */ +class SigarAccessHandler implements InvocationHandler { + private static final Log LOG = LogFactory.getLog(SigarAccessHandler.class); + + private static final String LINE_SEPARATOR = System.getProperty("line.separator"); + private static final int SHARED_SIGAR_LOCK_MAX_WAIT = Integer.getInteger("sharedSigarLockMaxWait", 2); + private static final int LOCAL_SIGAR_INSTANCES_WARNING_THRESHOLD = Integer.getInteger( + "localSigarInstancesWarningThreshold", 50); + private static final int MAX_LOCAL_SIGAR_INSTANCES = Integer.getInteger("maxLocalSigarInstances", 50); + private static final boolean LIMIT_LOCAL_SIGAR_INSTANCES = MAX_LOCAL_SIGAR_INSTANCES > 0; + private static final boolean THREAD_DUMP_ON_SIGAR_INSTANCES_THRESHOLD = Boolean + .getBoolean("threadDumpOnlocalSigarInstancesWarningThreshold"); + + private final SigarFactory sigarFactory; + private final ReentrantLock sharedSigarLock; + private final ReentrantLock localSigarLock; + private final ScheduledExecutorService scheduledExecutorService; + private volatile int localSigarInstancesCount; + private Sigar sharedSigar; + + SigarAccessHandler() { + this(new DefaultSigarFactory()); + } + + SigarAccessHandler(SigarFactory sigarFactory) { + this.sigarFactory = sigarFactory; + sharedSigarLock = new ReentrantLock(); + localSigarLock = new ReentrantLock(); + scheduledExecutorService = Executors.newSingleThreadScheduledExecutor(); + scheduledExecutorService.scheduleWithFixedDelay(new ThresholdChecker(), 1, 5, MINUTES); + localSigarInstancesCount = 0; + } + + public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { + // its possible in the time between this handler's creation and now, someone disabled the native layer. + // throw a runtime exception if the native system was disabled + if (SystemInfoFactory.isNativeSystemInfoDisabled()) { + throw new SystemInfoException("Native system has been disabled"); + } + + // Acquire lock for shared Sigar instance. Wait 'sharedSigarLockMaxWait' seconds at most + boolean acquiredLock = sharedSigarLock.tryLock(SHARED_SIGAR_LOCK_MAX_WAIT, SECONDS); + if (acquiredLock) { + if (sharedSigar == null) { + this.sharedSigar = sigarFactory.createSigarInstance(); + } + try { + return method.invoke(sharedSigar, args); + } catch (InvocationTargetException e) { + throw e.getTargetException(); + } finally { + sharedSigarLock.unlock(); + } + } else { + Sigar localSigar = createLocalSigarInstance(); + try { + return method.invoke(localSigar, args); + } catch (InvocationTargetException e) { + throw e.getTargetException(); + } finally { + closeLocalSigarInstance(localSigar); + } + } + } + + private Sigar createLocalSigarInstance() { + localSigarLock.lock(); + try { + if (LIMIT_LOCAL_SIGAR_INSTANCES && localSigarInstancesCount >= MAX_LOCAL_SIGAR_INSTANCES) { + throw new RuntimeException("Too many Sigar instances created"); + } + Sigar sigarInstance = sigarFactory.createSigarInstance(); + localSigarInstancesCount++; + return sigarInstance; + } finally { + localSigarLock.unlock(); + } + } + + private void closeLocalSigarInstance(Sigar sigar) { + localSigarLock.lock(); + try { + sigar.close(); + localSigarInstancesCount--; + } finally { + localSigarLock.unlock(); + } + } + + void close() { + if (sharedSigar != null) { + sharedSigarLock.lock(); + try { + sharedSigar.close(); + sharedSigar = null; + } finally { + sharedSigarLock.unlock(); + } + } + scheduledExecutorService.shutdownNow(); + } + + int localSigarInstancesCount() { + return localSigarInstancesCount; + } + + interface SigarFactory { + Sigar createSigarInstance(); + } + + private static class DefaultSigarFactory implements SigarFactory { + @Override + public Sigar createSigarInstance() { + return new Sigar(); + } + } + + private class ThresholdChecker implements Runnable { + @Override + public void run() { + int currentCount = localSigarInstancesCount; + if (currentCount > LOCAL_SIGAR_INSTANCES_WARNING_THRESHOLD) { + StringBuilder sb = new StringBuilder(); + sb.append("There are ").append(currentCount).append(" local Sigar instances currently active. ") + .append("This may indicate that a call to the shared Sigar instance did not complete."); + if (THREAD_DUMP_ON_SIGAR_INSTANCES_THRESHOLD) { + sb.append(LINE_SEPARATOR); + threadDump(sb); + } + LOG.warn(sb.toString()); + } + } + + private void threadDump(StringBuilder sb) { + ThreadInfo[] threadInfos = ManagementFactory.getThreadMXBean().dumpAllThreads(true, true); + for (ThreadInfo threadInfo : threadInfos) { + sb.append(threadInfo); + sb.append(LINE_SEPARATOR); + } + } + } +} diff --git a/modules/core/native-system/src/test/java/org/rhq/core/system/SigarAccessHandlerTest.java b/modules/core/native-system/src/test/java/org/rhq/core/system/SigarAccessHandlerTest.java new file mode 100644 index 0000000..e993725 --- /dev/null +++ b/modules/core/native-system/src/test/java/org/rhq/core/system/SigarAccessHandlerTest.java @@ -0,0 +1,172 @@ +/* + * RHQ Management Platform + * Copyright (C) 2005-2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA + */ + +package org.rhq.core.system; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertTrue; + +import java.lang.reflect.Method; +import java.util.LinkedList; +import java.util.List; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; + +import org.hyperic.sigar.Sigar; +import org.hyperic.sigar.SigarProxy; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +/** + * @author Thomas Segismont + */ +@Test(singleThreaded = true) +public class SigarAccessHandlerTest { + private static final int THREAD_POOL_SIZE = 100; + + private Method getSwapMethod; + private Method getFileSystemMapMethod; + private Sigar sigarMock; + private ExecutorService executorService; + private SigarAccessHandler sigarAccessHandler; + + @BeforeClass + private void setFields() throws Exception { + getSwapMethod = SigarProxy.class.getMethod("getSwap", new Class[0]); + getFileSystemMapMethod = SigarProxy.class.getMethod("getFileSystemMap", new Class[0]); + sigarMock = mock(Sigar.class); + // Wait 15 seconds on call to getSwap + when(sigarMock.getSwap()).then(new Answer<Object>() { + @Override + public Object answer(InvocationOnMock invocation) throws Throwable { + Thread.sleep(1000 * 15); + return null; + } + }); + } + + @BeforeMethod + public void setUp() throws Exception { + sigarAccessHandler = new SigarAccessHandler(new SigarAccessHandler.SigarFactory() { + @Override + public Sigar createSigarInstance() { + return sigarMock; + } + }); + executorService = Executors.newFixedThreadPool(THREAD_POOL_SIZE); + } + + @AfterMethod + public void tearDown() throws Exception { + sigarAccessHandler.close(); + executorService.shutdownNow(); + + } + + @Test(timeOut = 1000 * 30) + public void testOnDemandSigarInstanceCreation() throws Throwable { + // Start concurrent invocations of the long running getSwap method + for (int i = 0; i < 10; i++) { + executorService.submit(new Runnable() { + @Override + public void run() { + try { + sigarAccessHandler.invoke(null, getSwapMethod, new Object[0]); + } catch (Throwable ignore) { + } + } + }); + } + Thread.sleep(1000 * 8); + + assertEquals(sigarAccessHandler.localSigarInstancesCount(), 9); + } + + @Test(timeOut = 1000 * 30) + public void testSigarInstanceDestruction() throws Throwable { + // Start concurrent invocations of the short running getFileSystemMapMethod method and an invocation of the long + // running getSwap method + for (int i = 0; i < 10; i++) { + if (i == 0) { + executorService.submit(new Runnable() { + @Override + public void run() { + try { + sigarAccessHandler.invoke(null, getSwapMethod, new Object[0]); + } catch (Throwable ignore) { + } + } + }); + } else { + executorService.submit(new Runnable() { + @Override + public void run() { + try { + sigarAccessHandler.invoke(null, getFileSystemMapMethod, new Object[0]); + } catch (Throwable ignore) { + } + } + }); + } + } + Thread.sleep(1000 * 8); + + assertEquals(sigarAccessHandler.localSigarInstancesCount(), 0); + } + + @Test(timeOut = 1000 * 30) + public void testMaxSigarInstanceCreation() throws Throwable { + // Start concurrent invocations of the long running getSwap method + List<Future<Throwable>> futures = new LinkedList<Future<Throwable>>(); + for (int i = 0; i < 60; i++) { + futures.add(executorService.submit(new Callable<Throwable>() { + @Override + public Throwable call() throws Exception { + try { + sigarAccessHandler.invoke(null, getSwapMethod, new Object[0]); + } catch (Throwable throwable) { + return throwable; + } + return null; + } + })); + } + Thread.sleep(1000 * 8); + + assertEquals(sigarAccessHandler.localSigarInstancesCount(), 50); + int failedCallsCount = 0; + for (Future<Throwable> future : futures) { + Throwable throwable = future.get(); + if (throwable != null) { + failedCallsCount++; + assertTrue(throwable instanceof RuntimeException); + assertEquals(throwable.getMessage(), "Too many Sigar instances created"); + } + } + assertEquals(failedCallsCount, 9); + } +}
commit 05ff27a9dec1422f46c91f69d524a3b388b3e421 Author: Thomas Segismont tsegismo@redhat.com Date: Mon Jul 15 19:57:08 2013 +0200
Bug 885655 - Datasource - Security Deployment Type is by default set to "Domain and application" instead of "None" as stated in description
Set default value in #createResource if needed Update resource after creation as Security deployment type value is ignored by EAP servers
diff --git a/modules/plugins/jboss-as-5/src/main/java/org/rhq/plugins/jbossas5/ApplicationServerComponent.java b/modules/plugins/jboss-as-5/src/main/java/org/rhq/plugins/jbossas5/ApplicationServerComponent.java index 0d45594..e0699b0 100644 --- a/modules/plugins/jboss-as-5/src/main/java/org/rhq/plugins/jbossas5/ApplicationServerComponent.java +++ b/modules/plugins/jboss-as-5/src/main/java/org/rhq/plugins/jbossas5/ApplicationServerComponent.java @@ -19,12 +19,15 @@
package org.rhq.plugins.jbossas5;
+import static org.rhq.core.domain.resource.CreateResourceStatus.SUCCESS; + import java.io.File; import java.io.InputStream; import java.io.Serializable; import java.net.URI; import java.net.URISyntaxException; import java.util.ArrayList; +import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -47,13 +50,17 @@ import org.jboss.deployers.spi.management.deploy.ProgressEvent; import org.jboss.deployers.spi.management.deploy.ProgressListener; import org.jboss.managed.api.ComponentType; import org.jboss.managed.api.ManagedComponent; +import org.jboss.managed.api.ManagedProperty; +import org.jboss.metatype.api.values.MetaValue; import org.jboss.metatype.api.values.SimpleValue; import org.jboss.on.common.jbossas.JBPMWorkflowManager; import org.jboss.on.common.jbossas.JBossASPaths; import org.jboss.on.common.jbossas.JmxConnectionHelper;
import org.rhq.core.domain.configuration.Configuration; +import org.rhq.core.domain.configuration.PropertyMap; import org.rhq.core.domain.configuration.PropertySimple; +import org.rhq.core.domain.configuration.definition.ConfigurationDefinition; import org.rhq.core.domain.content.PackageType; import org.rhq.core.domain.content.transfer.DeployPackageStep; import org.rhq.core.domain.content.transfer.DeployPackagesResponse; @@ -81,6 +88,8 @@ import org.rhq.core.pluginapi.inventory.ResourceContext; import org.rhq.core.pluginapi.measurement.MeasurementFacet; import org.rhq.core.pluginapi.operation.OperationFacet; import org.rhq.core.pluginapi.operation.OperationResult; +import org.rhq.plugins.jbossas5.adapter.api.PropertyAdapter; +import org.rhq.plugins.jbossas5.adapter.api.PropertyAdapterFactory; import org.rhq.plugins.jbossas5.connection.LocalProfileServiceConnectionProvider; import org.rhq.plugins.jbossas5.connection.ProfileServiceConnection; import org.rhq.plugins.jbossas5.connection.ProfileServiceConnectionProvider; @@ -88,6 +97,7 @@ import org.rhq.plugins.jbossas5.connection.RemoteProfileServiceConnectionProvide import org.rhq.plugins.jbossas5.helper.CreateChildResourceFacetDelegate; import org.rhq.plugins.jbossas5.helper.InPluginControlActionFacade; import org.rhq.plugins.jbossas5.helper.JBossAS5ConnectionTypeDescriptor; +import org.rhq.plugins.jbossas5.util.ConversionUtils; import org.rhq.plugins.jbossas5.util.ManagedComponentUtils;
/** @@ -300,8 +310,58 @@ public class ApplicationServerComponent<T extends ResourceComponent<?>> implemen
// CreateChildResourceFacet --------------------------------------------
- public CreateResourceReport createResource(CreateResourceReport createResourceReport) { - return this.createChildResourceDelegate.createResource(createResourceReport); + public CreateResourceReport createResource(CreateResourceReport report) { + if (creatingDatasourceOrConnectionFactory(report)) { + report = preCreateDatasourceOrConnectionFactory(report); + } + report = this.createChildResourceDelegate.createResource(report); + if (report.getStatus() != SUCCESS) { + return report; + } + if (creatingDatasourceOrConnectionFactory(report)) { + report = postCreateDatasourceOrConnectionFactory(report); + } + return report; + } + + private CreateResourceReport preCreateDatasourceOrConnectionFactory(CreateResourceReport report) { + Configuration resourceConfiguration = report.getResourceConfiguration(); + PropertyMap securityDomainPropertyMap = (PropertyMap) resourceConfiguration.get("security-domain"); + PropertySimple securityDeploymentType = securityDomainPropertyMap.getSimple("securityDeploymentType"); + if (securityDeploymentType.getStringValue() == null) { + securityDeploymentType.setValue("NONE"); + } + return report; + } + + private CreateResourceReport postCreateDatasourceOrConnectionFactory(CreateResourceReport report) { + Configuration resourceConfiguration = report.getResourceConfiguration(); + ConfigurationDefinition resourceConfigurationDefinition = report.getResourceType() + .getResourceConfigurationDefinition(); + ComponentType componentType = ConversionUtils.getComponentType(report.getResourceType()); + String componentName = report.getResourceKey(); + try { + ManagementView managementView = getConnection().getManagementView(); + ManagedComponent managedComponent = managementView.getComponent(componentName, componentType); + Map<String, ManagedProperty> managedProperties = managedComponent.getProperties(); + ManagedProperty managedProperty = managedProperties.get("security-domain"); + MetaValue metaValue = managedProperty.getValue(); + PropertyAdapter propertyAdapter = PropertyAdapterFactory.getPropertyAdapter(metaValue); + propertyAdapter.populateMetaValueFromProperty(resourceConfiguration.get("security-domain"), metaValue, + resourceConfigurationDefinition.get("security-domain")); + managementView.updateComponent(managedComponent); + managementView.load(); + } catch (Exception e) { + report.setErrorMessage("Resource was created but an error occured while updating security-domain property"); + report.setException(e); + } + return report; + } + + private boolean creatingDatasourceOrConnectionFactory(CreateResourceReport report) { + String resourceTypeName = report.getResourceType().getName(); + return Arrays.asList("No Tx Datasource", "Local Tx Datasource", "XA Datasource", "No Tx ConnectionFactory", + "Tx ConnectionFactory").contains(resourceTypeName); }
// ProgressListener --------------------------------------------
commit 68d4c3d2c3c5eb67c09cde41ac78f77f6b32ec33 Author: John Sanda jsanda@redhat.com Date: Mon Jul 15 10:34:28 2013 -0400
[BZ 984433] revert changes for empty directory check
diff --git a/modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java b/modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java index 386cda7..9a6a1c0 100644 --- a/modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java +++ b/modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java @@ -55,7 +55,6 @@ import org.apache.commons.cli.PosixParser; import org.apache.commons.exec.DefaultExecutor; import org.apache.commons.exec.Executor; import org.apache.commons.exec.PumpStreamHandler; -import org.apache.commons.io.FileUtils; import org.apache.commons.io.output.NullOutputStream; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -450,7 +449,13 @@ public class StorageInstaller { }
private boolean isDirectoryEmpty(File dir) { - return FileUtils.sizeOf(dir) == 0; + // TODO need to check subdirectories + if (dir.isDirectory()) { + File[] files = dir.listFiles(); + return (files == null || files.length == 0); + } else { + return true; + } }
private int getPort(CommandLine cmdLine, String option, int defaultValue) {
commit 0bc81e9df0976365252b5ca11795b25d78172ec4 Author: John Sanda jsanda@redhat.com Date: Mon Jul 15 10:13:35 2013 -0400
check that properties are set before trying to update them
diff --git a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeConfigDelegate.java b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeConfigDelegate.java index e82f290..c87d2ac 100644 --- a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeConfigDelegate.java +++ b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeConfigDelegate.java @@ -68,19 +68,36 @@ public class StorageNodeConfigDelegate implements ConfigurationFacet {
Configuration config = configurationUpdateReport.getConfiguration();
- // We want min and max heap to be the same - properties.setProperty("heap_min", "-Xms" + config.getSimpleValue("maxHeapSize")); - properties.setProperty("heap_max", "-Xmx" + config.getSimpleValue("maxHeapSize")); - properties.setProperty("heap_new", "-Xmn" + config.getSimpleValue("heapNewSize")); - properties.setProperty("thread_stack_size", "-Xss" + config.getSimpleValue("threadStackSize")); - - if (config.getSimple("heapDumpOnOOMError").getBooleanValue()) { - properties.setProperty("heap_dump_on_OOMError", "-XX:+HeapDumpOnOutOfMemoryError"); - } else { - properties.setProperty("heap_dump_on_OOMError", ""); + String maxHeapSize = config.getSimpleValue("maxHeapSize"); + if (!StringUtil.isEmpty(maxHeapSize)) { + // We want min and max heap to be the same + properties.setProperty("heap_min", "-Xms" + maxHeapSize); + properties.setProperty("heap_max", "-Xmx" + maxHeapSize); }
- properties.setProperty("heap_dump_dir", config.getSimpleValue("heapDumpDir")); + String heapNewSize = config.getSimpleValue("heapNewSize"); + if (!StringUtil.isEmpty(heapNewSize)) { + properties.setProperty("heap_new", "-Xmn" + heapNewSize); + } + + String threadStackSize = config.getSimpleValue("threadStackSize"); + if (!StringUtil.isEmpty(threadStackSize)) { + properties.setProperty("thread_stack_size", "-Xss" + threadStackSize); + } + + PropertySimple heapDumpOnOMMError = config.getSimple("heapDumpOnOOMError"); + if (heapDumpOnOMMError != null) { + if (heapDumpOnOMMError.getBooleanValue()) { + properties.setProperty("heap_dump_on_OOMError", "-XX:+HeapDumpOnOutOfMemoryError"); + } else { + properties.setProperty("heap_dump_on_OOMError", ""); + } + } + + String heapDumpDir = config.getSimpleValue("heapDumpDir"); + if (!StringUtil.isEmpty(heapDumpDir)) { + properties.setProperty("heap_dump_dir", heapDumpDir); + }
propertiesUpdater.update(properties);
diff --git a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeConfigDelegateTest.java b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeConfigDelegateTest.java index 5c5f089..ee34f43 100644 --- a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeConfigDelegateTest.java +++ b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeConfigDelegateTest.java @@ -23,10 +23,13 @@ public class StorageNodeConfigDelegateTest {
private File basedir;
+ private StorageNodeConfigDelegate configDelegate; + @BeforeMethod public void initDirs(Method test) throws Exception { File dir = new File(getClass().getResource(".").toURI()); basedir = new File(dir, getClass().getSimpleName() + "/" + test.getName()); + configDelegate = new StorageNodeConfigDelegate(basedir); }
@@ -34,7 +37,6 @@ public class StorageNodeConfigDelegateTest { public void loadValidConfig() throws Exception { createDefaultConfig();
- StorageNodeConfigDelegate configDelegate = new StorageNodeConfigDelegate(basedir); Configuration config = configDelegate.loadResourceConfiguration();
assertEquals(config.getSimpleValue("minHeapSize"), "512M", "Failed to load property [minHeapSize]"); @@ -60,11 +62,9 @@ public class StorageNodeConfigDelegateTest {
ConfigurationUpdateReport report = new ConfigurationUpdateReport(config);
- StorageNodeConfigDelegate configDelegate = new StorageNodeConfigDelegate(basedir); configDelegate.updateResourceConfiguration(report);
- Properties properties = new Properties(); - properties.load(new FileInputStream(new File(confDir(), "cassandra-jvm.properties"))); + Properties properties = loadCassandraJvmProps();
assertEquals(properties.getProperty("heap_min"), "-Xms1024M", "Failed to update property [minHeapSize]"); assertEquals(properties.getProperty("heap_max"), "-Xmx1024M", "Failed to update property [maxHeapSize]"); @@ -77,6 +77,25 @@ public class StorageNodeConfigDelegateTest { "Failed to update property [heap_dump_dir]"); }
+ @Test + public void minHeapSizeShouldBeTheSameAsMaxHeapSize() throws Exception { + createDefaultConfig(); + + Configuration config = new Configuration(); + config.put(new PropertySimple("minHeapSize", "512M")); + config.put(new PropertySimple("maxHeapSize", "768M")); + + ConfigurationUpdateReport report = new ConfigurationUpdateReport(config); + + configDelegate.updateResourceConfiguration(report); + + Properties properties = loadCassandraJvmProps(); + + assertEquals(properties.getProperty("heap_max"), "-Xmx768M", "Failed to update property [maxHeapSize]"); + assertEquals(properties.getProperty("heap_min"), "-Xms768M", "Failed to update property [maxHeapSize]. It " + + "should be the same as [maxHeapSize]."); + } + private void createDefaultConfig() throws IOException { Properties properties = new Properties(); properties.setProperty("heap_min", "-Xms512M"); @@ -84,10 +103,18 @@ public class StorageNodeConfigDelegateTest { properties.setProperty("heap_new", "-Xmn128M"); properties.setProperty("thread_stack_size", "-Xss180k"); properties.setProperty("heap_dump_on_OOMError", "-XX:+HeapDumpOnOutOfMemoryError"); + properties.setProperty("heap_dump_dir", binDir().getAbsolutePath());
properties.store(new FileOutputStream(new File(confDir(), "cassandra-jvm.properties")), ""); }
+ private Properties loadCassandraJvmProps() throws Exception { + Properties properties = new Properties(); + properties.load(new FileInputStream(new File(confDir(), "cassandra-jvm.properties"))); + + return properties; + } + private File confDir() { return mkdirIfNecessary(basedir, "conf"); }
commit 46ae96d60f17d0215185c0f79b8e004bdbc9c1ee Author: Lukas Krejci lkrejci@redhat.com Date: Thu May 23 11:03:31 2013 +0200
Trivial: javadoc clean up.
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/configuration/Configuration.java b/modules/core/domain/src/main/java/org/rhq/core/domain/configuration/Configuration.java index 13b1e32..4dae88a 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/configuration/Configuration.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/configuration/Configuration.java @@ -850,10 +850,9 @@ public class Configuration implements Serializable, Cloneable, AbstractPropertyM * * @return a clone of this configuration * - * @throws CloneNotSupportedException - * * @see #deepCopy() */ + @SuppressWarnings("override") //@Override //GWT trips over this, WTH! public Configuration clone() { return deepCopy(); @@ -1002,7 +1001,7 @@ public class Configuration implements Serializable, Cloneable, AbstractPropertyM /** * Getter for the properties reference. * - * @return Map<String, Property> + * @return {@code Map<String, Property>} */ public Map<String, Property> getAllProperties() { return this.properties;
commit b415c386059131057d91060a3bebef41699fcb3d Author: Lukas Krejci lkrejci@redhat.com Date: Thu May 23 10:46:16 2013 +0200
Added missing generic type decls.
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/configuration/Configuration.java b/modules/core/domain/src/main/java/org/rhq/core/domain/configuration/Configuration.java index b006425..13b1e32 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/configuration/Configuration.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/configuration/Configuration.java @@ -271,7 +271,7 @@ public class Configuration implements Serializable, Cloneable, AbstractPropertyM } }
- public static class MapInList<Parent extends AbstractPropertyListBuilder> extends AbstractPropertyMapBuilder<PropertyMap, MapInList<Parent>> { + public static class MapInList<Parent extends AbstractPropertyListBuilder<?>> extends AbstractPropertyMapBuilder<PropertyMap, MapInList<Parent>> { private Parent parent;
public MapInList(Parent parent, String name) { @@ -307,7 +307,7 @@ public class Configuration implements Serializable, Cloneable, AbstractPropertyM } }
- public static class ListInList<Parent extends AbstractPropertyListBuilder> extends AbstractPropertyListBuilder<ListInList<Parent>> { + public static class ListInList<Parent extends AbstractPropertyListBuilder<?>> extends AbstractPropertyListBuilder<ListInList<Parent>> { private Parent parent;
private ListInList(Parent parent, String name, String memberName) {
commit abbbf13986af10b65cf812ce9b0a2a49c297934a Author: Lukas Krejci lkrejci@redhat.com Date: Wed May 22 19:24:00 2013 +0200
A configuration instance builder. This should greatly simplify the process of creating complex configuration objects.
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/configuration/Configuration.java b/modules/core/domain/src/main/java/org/rhq/core/domain/configuration/Configuration.java index b1e6e22..b006425 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/configuration/Configuration.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/configuration/Configuration.java @@ -143,6 +143,244 @@ public class Configuration implements Serializable, Cloneable, AbstractPropertyM public static final String QUERY_DELETE_RAW_CONFIGURATIONS_CONFIGURATION_IDS = "Configuration.deleteRawByConfigurationIds"; public static final String QUERY_DELETE_CONFIGURATIONS_BY_CONFIGURATION_IDs = "Configuration.deleteByConfigurationIdS";
+ private static abstract class AbstractPropertyMapBuilder<T extends AbstractPropertyMap, This extends AbstractPropertyMapBuilder<T, This>> { + private T map; + + protected AbstractPropertyMapBuilder(T map) { + this.map = map; + } + + /** + * Adds a simple property. + * @param name the name of the simple property + * @param value the value of the simple property + * @return continue with the definition + */ + public This addSimple(String name, Object value) { + getMap().put(new PropertySimple(name, value)); + return castThis(); + } + + /** + * Starts defining a new sub list. + * @param name the name of the sub list + * @param memberName the names of the member properties of the sub list + * @return the builder of the list + */ + public Builder.ListInMap<This> openList(String name, String memberName) { + return new Builder.ListInMap<This>(castThis(), name, memberName); + } + + /** + * Starts defining a new sub map. + * @param name the name of the sub map + * @return the builder of the map + */ + public Builder.MapInMap<This> openMap(String name) { + return new Builder.MapInMap<This>(castThis(), name); + } + + protected T getMap() { + return map; + } + + @SuppressWarnings("unchecked") + private This castThis() { + return (This) this; + } + } + + private static abstract class AbstractPropertyListBuilder<This extends AbstractPropertyListBuilder<This>> { + private PropertyList list; + + private AbstractPropertyListBuilder(String name, String memberName) { + this.list = new PropertyList(name); + this.list.memberPropertyName = memberName; + } + + /** + * Adds a simple property. The name of the property is the member name defined by this list. + * @param value the value of the simple property + * @return continue with the definition + */ + public This addSimple(Object value) { + list.add(new PropertySimple(list.memberPropertyName, value)); + return castThis(); + } + + /** + * Adds a number of simple properties. The names of the properties are the member name defined by this list. + * @param values the values of the simple properties + * @return continue with the definition + */ + public This addSimples(Object... values) { + for(Object v : values) { + list.add(new PropertySimple(list.memberPropertyName, v)); + } + + return castThis(); + } + + /** + * Starts defining a new sub map. + * @return the builder of the map + */ + public Builder.MapInList<This> openMap() { + return new Builder.MapInList<This>(castThis(), list.memberPropertyName); + } + + /** + * Starts defining a new sub list. + * @param memberName the names of the member properties of the sub list + * @return the builder of the list + */ + public Builder.ListInList<This> openList(String memberName) { + return new Builder.ListInList<This>(castThis(), list.memberPropertyName, memberName); + } + + protected PropertyList getList() { + return list; + } + + @SuppressWarnings("unchecked") + private This castThis() { + return (This) this; + } + } + + /** + * A builder to easily build Configuration instances using a fluent API. + */ + public static class Builder extends AbstractPropertyMapBuilder<Configuration, Builder> { + + public static class MapInMap<Parent extends AbstractPropertyMapBuilder<?, ?>> extends AbstractPropertyMapBuilder<PropertyMap, MapInMap<Parent>> { + private Parent parent; + + private MapInMap(Parent parent, String name) { + super(new PropertyMap(name)); + this.parent = parent; + } + + /** + * Closes the definition of the current map and returns to the parent context. + * @return the parent context + */ + public Parent closeMap() { + parent.getMap().put(getMap()); + return parent; + } + } + + public static class MapInList<Parent extends AbstractPropertyListBuilder> extends AbstractPropertyMapBuilder<PropertyMap, MapInList<Parent>> { + private Parent parent; + + public MapInList(Parent parent, String name) { + super(new PropertyMap(name)); + this.parent = parent; + } + + /** + * Closes the definition of the current map and returns to the parent context. + * @return the parent context + */ + public Parent closeMap() { + parent.getList().add(getMap()); + return parent; + } + } + + public static class ListInMap<Parent extends AbstractPropertyMapBuilder<?, ?>> extends AbstractPropertyListBuilder<ListInMap<Parent>> { + private Parent parent; + + private ListInMap(Parent parent, String name, String memberName) { + super(name, memberName); + this.parent = parent; + } + + /** + * Closes the definition of the current list and returns to the parent context. + * @return the parent context + */ + public Parent closeList() { + parent.getMap().put(getList()); + return parent; + } + } + + public static class ListInList<Parent extends AbstractPropertyListBuilder> extends AbstractPropertyListBuilder<ListInList<Parent>> { + private Parent parent; + + private ListInList(Parent parent, String name, String memberName) { + super(name, memberName); + this.parent = parent; + } + + /** + * Closes the definition of the current list and returns to the parent context. + * @return the parent context + */ + public Parent closeList() { + parent.getList().add(getList()); + return parent; + } + } + + public class RawConfigurationBuilder { + + private RawConfiguration rawConfig; + + public RawConfigurationBuilder() { + rawConfig = new RawConfiguration(); + rawConfig.setConfiguration(getMap()); + } + + public RawConfigurationBuilder withPath(String path) { + rawConfig.setPath(path); + return this; + } + + public RawConfigurationBuilder withContents(String content, String sha256) { + rawConfig.setContents(content, sha256); + return this; + } + + /** + * Closes the definition of the current raw configuration and returns to the parent context. + * @return the parent context + */ + public Builder closeRawConfiguration() { + getMap().getRawConfigurations().add(rawConfig); + return Builder.this; + } + } + + public Builder() { + super(new Configuration()); + } + + public Builder withNotes(String notes) { + getMap().setNotes(notes); + return this; + } + + public Builder withVersion(long version) { + getMap().setVersion(version); + return this; + } + + /** + * Starts defining a new raw configuration that will become part of this configuration. + * @return the builder of the raw configuration + */ + public RawConfigurationBuilder openRawConfiguration() { + return new RawConfigurationBuilder(); + } + + public Configuration build() { + return getMap(); + } + } + @GeneratedValue(generator = "RHQ_CONFIG_ID_SEQ", strategy = GenerationType.AUTO) @Id private int id; @@ -283,6 +521,10 @@ public class Configuration implements Serializable, Cloneable, AbstractPropertyM @Column(name = "MTIME") private long mtime = System.currentTimeMillis();
+ public static Builder builder() { + return new Builder(); + } + public Configuration() { }
diff --git a/modules/core/domain/src/test/java/org/rhq/core/domain/configuration/ConfigurationBuilderTest.java b/modules/core/domain/src/test/java/org/rhq/core/domain/configuration/ConfigurationBuilderTest.java new file mode 100644 index 0000000..ad071ee --- /dev/null +++ b/modules/core/domain/src/test/java/org/rhq/core/domain/configuration/ConfigurationBuilderTest.java @@ -0,0 +1,237 @@ +package org.rhq.core.domain.configuration; + +import org.testng.annotations.Test; + +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertSame; + +/** + * @author Lukas Krejci + */ +@Test +public class ConfigurationBuilderTest { + + public void testConfigurationProperties() { + Configuration config = Configuration.builder().withNotes("notes").withVersion(2).build(); + assert "notes".equals(config.getNotes()) : "Unexpected notes"; + assert 2 == config.getVersion() : "Unexpected version"; + } + + public void testSimples() { + Configuration config = Configuration.builder().addSimple("1", 1).addSimple("2", 2).build(); + + assert config.getSimpleValue("1").equals("1") : "1 != 1"; + assert config.getSimpleValue("2").equals("2") : "2 != 2"; + + assert config.getSimple("1").getConfiguration() == config : "Configuration not set on property"; + } + + public void testListOfSimples() { + Configuration config = Configuration.builder().openList("l1", "m").addSimple(1).addSimple(2).closeList() + .openList("l2", "m").addSimples(1, 2).closeList().build(); + + PropertyList l1 = config.getList("l1"); + PropertyList l2 = config.getList("l2"); + + assert l1 != null : "Could not find l1"; + assert l2 != null : "Could not find l2"; + + assert l1.getConfiguration() == config : "Configuration on l1 not set"; + assert l2.getConfiguration() == config : "Configuration on l2 not set"; + + assert l1.getList().size() == 2 : "Unexpected number of props in the list 1"; + assert l2.getList().size() == 2 : "Unexpected number of props in the list 2"; + + testSimple((PropertySimple) l1.getList().get(0), null, l1, "1"); + testSimple((PropertySimple) l1.getList().get(1), null, l1, "2"); + testSimple((PropertySimple) l2.getList().get(0), null, l2, "1"); + testSimple((PropertySimple) l2.getList().get(1), null, l2, "2"); + } + + public void testMapOfSimples() { + Configuration config = Configuration.builder().openMap("m").addSimple("1", 1).addSimple("2", 2).closeMap() + .build(); + + PropertyMap m = config.getMap("m"); + + assert m != null : "Cound not find map"; + + assert m.getConfiguration() == config : "Configuration on the map not set"; + + testSimple(m.getSimple("1"), m, null, "1"); + testSimple(m.getSimple("2"), m, null, "2"); + } + + public void testListOfLists() { + Configuration config = new Configuration.Builder().openList("l", "m").openList("ml1").addSimples(1, 2) + .closeList().openList("ml2").addSimple(1).addSimple(2).closeList().closeList().build(); + + PropertyList l = config.getList("l"); + + assert l != null : "Could not find top-level list"; + + assert l.getList().size() == 2 : "Unexpected number of props in the top level list"; + + PropertyList m1 = (PropertyList) l.getList().get(0); + PropertyList m2 = (PropertyList) l.getList().get(1); + + assert m1.getParentList() == l : "Parent list on m1 not set"; + assert m1.getParentMap() == null : "Unexpected parent map on m1"; + assert m2.getParentList() == l : "Parent list on m2 not set"; + assert m1.getParentMap() == null : "Unexpected parent map on m2"; + + assert m1.getList().size() == 2 : "Unexpected number of props in the list 1"; + assert m2.getList().size() == 2 : "Unexpected number of props in the list 2"; + + testSimple((PropertySimple) m1.getList().get(0), null, m1, "1"); + testSimple((PropertySimple) m1.getList().get(1), null, m1, "2"); + testSimple((PropertySimple) m2.getList().get(0), null, m2, "1"); + testSimple((PropertySimple) m2.getList().get(1), null, m2, "2"); + } + + public void testListOfMaps() { + Configuration config = Configuration.builder().openList("l", "m").openMap().addSimple("c1", 1) + .addSimple("c2", 2).closeMap() + .openMap().addSimple("c1", 3).addSimple("c2", 4).closeMap().closeList().build(); + + PropertyList l = config.getList("l"); + + assert l != null : "Could not find top-level list"; + + assert l.getList().size() == 2 : "Unexpected number of props in the top level list"; + + PropertyMap m1 = (PropertyMap) l.getList().get(0); + PropertyMap m2 = (PropertyMap) l.getList().get(1); + + assert m1.getParentList() == l : "Parent list on m1 not set"; + assert m1.getParentMap() == null : "Unexpected parent map on m1"; + assert m2.getParentList() == l : "Parent list on m2 not set"; + assert m1.getParentMap() == null : "Unexpected parent map on m2"; + + assert m1.getMap().size() == 2 : "Unexpected number of props in the list 1"; + assert m2.getMap().size() == 2 : "Unexpected number of props in the list 2"; + + testSimple(m1.getSimple("c1"), m1, null, "1"); + testSimple(m1.getSimple("c2"), m1, null, "2"); + testSimple(m2.getSimple("c1"), m2, null, "3"); + testSimple(m2.getSimple("c2"), m2, null, "4"); + } + + public void testMapOfMaps() { + Configuration config = Configuration.builder().openMap("m").openMap("im1").addSimple("c1", 1).addSimple("c2", 2) + .closeMap().openMap("im2").addSimple("c1", 3).addSimple("c2", 4).closeMap().closeMap().build(); + + PropertyMap m = config.getMap("m"); + + assert m != null : "Could not find the top level map"; + + assert m.getMap().size() == 2 : "Unexpected number of props in the top level map"; + + PropertyMap m1 = m.getMap("im1"); + PropertyMap m2 = m.getMap("im2"); + + assert m1 != null : "Could not find im1"; + assert m2 != null : "Could not find im2"; + + assert m1.getParentList() == null : "Unexpected parent list on m1"; + assert m1.getParentMap() == m : "Unexpected parent map on m1"; + assert m2.getParentList() == null : "Unexpected parent list on m2"; + assert m1.getParentMap() == m : "Unexpected parent map on m2"; + + testSimple(m1.getSimple("c1"), m1, null, "1"); + testSimple(m1.getSimple("c2"), m1, null, "2"); + testSimple(m2.getSimple("c1"), m2, null, "3"); + testSimple(m2.getSimple("c2"), m2, null, "4"); + } + + public void testMapOfLists() { + Configuration config = Configuration.builder().openMap("m").openList("il1", "m").addSimples(1, 2) + .closeList().openList("il2", "m").addSimples(3, 4).closeList().closeMap().build(); + + PropertyMap m = config.getMap("m"); + + assert m != null : "Could not find the top level map"; + + assert m.getMap().size() == 2 : "Unexpected number of props in the top level map"; + + PropertyList l1 = m.getList("il1"); + PropertyList l2 = m.getList("il2"); + + assert l1 != null : "Could not find il1"; + assert l2 != null : "Could not find il2"; + + assert l1.getParentList() == null : "Unexpected parent list on l1"; + assert l1.getParentMap() == m : "Unexpected parent map on l1"; + assert l2.getParentList() == null : "Unexpected parent list on l2"; + assert l2.getParentMap() == m : "Unexpected parent map on l2"; + + testSimple((PropertySimple) l1.getList().get(0), null, l1, "1"); + testSimple((PropertySimple) l1.getList().get(1), null, l1, "2"); + testSimple((PropertySimple) l2.getList().get(0), null, l2, "3"); + testSimple((PropertySimple) l2.getList().get(1), null, l2, "4"); + } + + public void testUtterMess() { + Configuration config = Configuration.builder() + .openList("l", "m") // + /**/.openMap() // + /**//**/.openMap("innerMap") // + /**//**//**/.addSimple("c1", 1) // + /**//**//**/.openList("c2", "m") // + /**//**//**/.closeList() // + /**//**/.closeMap() // + /**//**/.addSimple("simple", 2) // + /**//**/.openList("innerList", "m") // + /**//**/.closeList() // + /**/.closeMap() // + /**/.addSimple(3) // + /**/.openList("im") // + /**//**/.openList("iim") // + /**//**/.closeList() // + /**//**/.openMap() // + /**//**/.closeMap() // + /**/.closeList() // + .closeList().build(); + + PropertyList l = config.getList("l"); + PropertyMap lm = (PropertyMap) l.getList().get(0); + PropertyMap innerMap = lm.getMap("innerMap"); + PropertySimple c1 = innerMap.getSimple("c1"); + PropertyList c2 = innerMap.getList("c2"); + PropertySimple simple = lm.getSimple("simple"); + PropertyList innerList = lm.getList("innerList"); + PropertySimple ls = (PropertySimple) l.getList().get(1); + PropertyList ll = (PropertyList) l.getList().get(2); + PropertyList lll = (PropertyList) ll.getList().get(0); + PropertyMap llm = (PropertyMap) ll.getList().get(1); + + //all the aspects of the above mess have been tested in the previous tests + //this just really is here to prove the point of how messy our configs can be + + assert c1 != null; + assert c2 != null; + assert simple != null; + assert innerList != null; + assert ls != null; + assert lll != null; + assert llm != null; + } + + public void testRawConfigs() { + Configuration config = Configuration.builder().openRawConfiguration().withPath("a/b") + .withContents("asdf", "123").closeRawConfiguration().build(); + + RawConfiguration r = config.getRawConfigurations().iterator().next(); + assert r.getPath().equals("a/b") : "Unexpected path"; + assert r.getContents().equals("asdf") : "Unexpected contents"; + assert r.getSha256().equals("123") : "Unexpected sha256"; + assert r.getConfiguration() == config : "Unexpected raw config owning configuration"; + } + + private void testSimple(PropertySimple p, PropertyMap expectedParentMap, PropertyList expectedParentList, + String expectedValue) { + assertSame(p.getParentMap(), expectedParentMap, "Unexpected parent map"); + assertSame(p.getParentList(), expectedParentList, "Unexpected parent list"); + assertEquals(p.getStringValue(), expectedValue, "Unexpected value"); + } +}
commit eb3c52f1ba91a70fa7bdb45405849a1af4aa524a Author: Heiko W. Rupp hwr@redhat.com Date: Mon Jul 15 10:18:15 2013 +0200
Add a JavaFX frontend to the PluginGenerator.
diff --git a/modules/helpers/pluginGen/pom.xml b/modules/helpers/pluginGen/pom.xml index e9a5e29..5aa512c 100644 --- a/modules/helpers/pluginGen/pom.xml +++ b/modules/helpers/pluginGen/pom.xml @@ -22,6 +22,14 @@ <build> <plugins>
+ <plugin> + <artifactId>maven-compiler-plugin</artifactId> + <configuration> + <source>1.7</source> + <target>1.7</target> + </configuration> + </plugin> + <plugin> <artifactId>maven-jar-plugin</artifactId> <configuration> @@ -98,6 +106,13 @@ <artifactId>rhq-pluginAnnotations</artifactId> <version>4.8.0-SNAPSHOT</version> </dependency> + <dependency> + <groupId>com.oracle</groupId> + <artifactId>javafx</artifactId> + <version>2.0</version> + <systemPath>${java.home}/lib/jfxrt.jar</systemPath> + <scope>system</scope> + </dependency> </dependencies>
</project> diff --git a/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/Generator.java b/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/Generator.java new file mode 100644 index 0000000..06f3fec --- /dev/null +++ b/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/Generator.java @@ -0,0 +1,295 @@ +/* + * RHQ Management Platform + * Copyright (C) 2005-2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA + */ + +package org.rhq.helpers.pluginGen; + +import java.io.File; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import javafx.application.Application; +import javafx.beans.value.ChangeListener; +import javafx.beans.value.ObservableValue; +import javafx.collections.ObservableList; +import javafx.event.ActionEvent; +import javafx.event.EventHandler; +import javafx.geometry.Insets; +import javafx.geometry.Pos; +import javafx.scene.Node; +import javafx.scene.Scene; +import javafx.scene.control.*; +import javafx.scene.layout.BorderPane; +import javafx.scene.layout.GridPane; +import javafx.scene.layout.HBox; +import javafx.scene.layout.VBox; +import javafx.scene.paint.Color; +import javafx.scene.text.Font; +import javafx.scene.text.FontPosture; +import javafx.scene.text.FontWeight; +import javafx.scene.text.Text; +import javafx.scene.text.TextAlignment; +import javafx.stage.DirectoryChooser; +import javafx.stage.Stage; + +/** + * JavaFX version of the plugin generator + * @author Heiko W. Rupp + */ +public class Generator extends Application{ + + Props props = new Props(); + private Text errorMessage; + private Stage primaryStage; + + public static void main(String[] args) { + launch(args); + } + + + @Override + public void start(Stage stage) throws Exception { + primaryStage = stage; + + Button createButton = new Button(); + createButton.setText("Create!"); + createButton.setAlignment(Pos.BOTTOM_RIGHT); + createButton.setOnAction(new EventHandler<ActionEvent>() { + @Override + public void handle(ActionEvent actionEvent) { + + PluginGen pluginGen = new PluginGen(); + pluginGen.postprocess(props); + try { + pluginGen.generate(props); + setInfoMessage("Generated!"); + + } + catch (Exception e) { + setErrorMessage("Error during generation: " + e.getMessage()); + } + } + }); + + GridPane pluginLevelPane = new GridPane(); + pluginLevelPane.setPadding(new Insets(10)); + Text pluginLevelDescription = new Text("Plugin level properties"); + pluginLevelDescription.setTextAlignment(TextAlignment.CENTER); + pluginLevelDescription.setFont(Font.font("Arial", FontWeight.BOLD, 14)); + + Text resourceLevelDescription = new Text("ResourceType level properties"); + resourceLevelDescription.setTextAlignment(TextAlignment.CENTER); + resourceLevelDescription.setFont(Font.font("Arial", FontWeight.BOLD, 14)); + + Text descriptionDescription = new Text("Field description"); + descriptionDescription.setTextAlignment(TextAlignment.CENTER); + descriptionDescription.setFont(Font.font("Arial", FontWeight.BOLD, 14)); + + HBox msgBox = getMessagesBox(); + + GridPane resourceLevelPane = new GridPane(); + resourceLevelPane.setPadding(new Insets(10)); + Text descriptionText = new Text(); + descriptionText.setFont(Font.font("Arial", FontPosture.ITALIC,12)); + + VBox innerBox = new VBox(); + innerBox.setAlignment(Pos.CENTER_LEFT); + innerBox.setPadding(new Insets(25, 25, 25, 25)); + innerBox.setSpacing(8); + + addFields(pluginLevelPane, true, descriptionText); + addFields(resourceLevelPane, false, descriptionText); + + ObservableList<Node> children = innerBox.getChildren(); + children.add(pluginLevelDescription); + children.add(pluginLevelPane); + children.add(resourceLevelDescription); + children.add(resourceLevelPane); + + + ScrollPane scrollPane = new ScrollPane(); + scrollPane.setContent(innerBox); + + BorderPane outerBox = new BorderPane(); + outerBox.setTop(msgBox); + outerBox.setPadding(new Insets(5)); + outerBox.setCenter(scrollPane); + + VBox descriptionBox = new VBox(); + descriptionBox.getChildren().add(descriptionDescription); + descriptionBox.getChildren().add(descriptionText); + outerBox.setBottom(descriptionBox); + + outerBox.setRight(createButton); + + + stage.setScene(new Scene(outerBox, 600, 550)); + stage.show(); + } + + private HBox getMessagesBox() { + HBox msgBox = new HBox(); + Label label = new Label("Messages:"); + msgBox.getChildren().add(label); + errorMessage = new Text(); + errorMessage.setFont(Font.font("Arial", FontWeight.SEMI_BOLD, 12)); + errorMessage.setId("errorMessage"); + msgBox.getChildren().add(errorMessage); + return msgBox; + } + + private int addFields(final GridPane root, boolean pluginLevel, final Text descriptionField) { + + int row = 0; + for (final Prop prop : Prop.values()) { + + if (!prop.isPluginLevel()==pluginLevel) { + continue; + } + + // Add the label + String name = prop.readableName(); + Label fieldName = new Label(name); + root.add(fieldName,0,row); + + // Now add the field itself + final Class propType = prop.getType(); + if (propType.equals(String.class)) { + final Pattern pattern = Pattern.compile(prop.getValidationRegex()); + + final TextField input = new TextField(); + // Add field leave event to fill in the props with the result + input.focusedProperty().addListener(new ChangeListener<Boolean>() { + @Override + public void changed(ObservableValue<? extends Boolean> observableValue, Boolean oldState, + Boolean newState) { + if (newState) { // User entered input field + descriptionField.setText(prop.getDescription()); + } + else { // User left input field + descriptionField.setText(""); + setPropsValue(prop.getVariableName(),input.getText(), propType); // TODO right place? + } + } + }); + // Add validation of the input + input.textProperty().addListener(new ChangeListener<String>() { + @Override + public void changed(ObservableValue<? extends String> observableValue, String s, String newText) { + Matcher m = pattern.matcher(newText); + if (!m.matches()) { + setErrorMessage("Input does not match " + prop.getValidationRegex()); + } else { + clearErrorMessage(); + } + + } + }); + root.add(input, 1, row); + } else if (propType.equals(Boolean.class) || propType.equals(boolean.class)) { + final ChoiceBox choiceBox = new ChoiceBox(); + choiceBox.getItems().addAll("Yes", "No"); + choiceBox.getSelectionModel().selectLast(); // NO is default + choiceBox.getSelectionModel().selectedItemProperty().addListener(new ChangeListener<String>() { + @Override + public void changed(ObservableValue<? extends String> observableValue, String s, String newValue) { + setPropsValue(prop.getVariableName(),newValue.equals("Yes"),propType); + } + }); + + root.add(choiceBox,1,row); + } else if (propType.equals(ResourceCategory.class)) { + final ChoiceBox choiceBox = new ChoiceBox(); + for (ResourceCategory cat : ResourceCategory.values()) { + choiceBox.getItems().add(cat.getLowerName()); + } + choiceBox.getSelectionModel().selectLast(); // service is default + choiceBox.getSelectionModel().selectedItemProperty().addListener(new ChangeListener<String>() { + @Override + public void changed(ObservableValue<? extends String> observableValue, String s, String newValue) { + ResourceCategory newCategory = ResourceCategory.valueOf(newValue.toUpperCase()); + setPropsValue(prop.getVariableName(),newCategory,propType); + } + }); + + root.add(choiceBox,1,row); + } else if (propType.equals(File.class)) { + // Can not add this directly, so add a button to trigger it + final Text text = new Text(); + text.setText("Pick a directory"); + root.add(text,1,row); + Button pickButton = new Button("Pick"); + pickButton.setOnAction(new EventHandler<ActionEvent>() { + @Override + public void handle(ActionEvent actionEvent) { + DirectoryChooser chooser = new DirectoryChooser(); + chooser.setTitle("Pick a directory where the plugin will be put in."); + File dir = chooser.showDialog(primaryStage); + if (dir != null) { + String dirName = dir.getAbsolutePath(); + props.setFileSystemRoot(dirName); + clearErrorMessage(); + text.setText(dirName); + } else { + setErrorMessage("No directory selected"); + text.setText("Pick a directory"); + } + } + }); + + root.add(pickButton,2,row); + } + + row++; + + } + + return row; + } + + private void setInfoMessage(String message) { + errorMessage.setText(message); + errorMessage.setFill(Color.DARKGREEN); + } + private void setErrorMessage(String message) { + errorMessage.setText(message); + errorMessage.setFill(Color.RED); + } + + private void clearErrorMessage() { + errorMessage.setText(""); + errorMessage.setFill(Color.WHITE); + } + + private void setPropsValue(String variableName, Object value, Class type) { + + String var = variableName.substring(0,1).toUpperCase() + variableName.substring(1); + String setterName = "set"+ var; + + try { + Method setter = Props.class.getDeclaredMethod(setterName,type); + setter.invoke(props,value); + } catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException e) { + e.printStackTrace(); // TODO: Customise this generated block + setErrorMessage(e.getMessage()); + } + } + +} diff --git a/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/PluginGen.java b/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/PluginGen.java index 223a57f..64b5509 100644 --- a/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/PluginGen.java +++ b/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/PluginGen.java @@ -115,7 +115,7 @@ public class PluginGen { * Do some post processing over the input received. * @param props The properties just recorded from the user input */ - private void postprocess(Props props) { + protected void postprocess(Props props) {
// Set the package String pkg = props.getPackagePrefix() + "." + props.getName(); @@ -231,7 +231,7 @@ public class PluginGen { * Trigger the generation of the directory hierarchy. * @param props Parameters to take into account */ - private void generate(Props props) { + protected void generate(Props props) {
log.info("Generating...");
diff --git a/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/Prop.java b/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/Prop.java new file mode 100644 index 0000000..73c4cd9 --- /dev/null +++ b/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/Prop.java @@ -0,0 +1,114 @@ +/* + * RHQ Management Platform + * Copyright (C) 2005-2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA + */ + +package org.rhq.helpers.pluginGen; + +import java.io.File; + +/** + * All the properties that can be configured along with type, description and simple validation rules + * @author Heiko W. Rupp + */ +public enum Prop { + + PLUGIN_NAME("pluginName", String.class,"Name of the plugin", "\w+" , true ), + PLUGIN_DESCRIPTION("pluginDescription", String.class,"Description of the plugin",".*" , true ), + PACKAGE("packagePrefix", String.class,"Default Package","[a-zA-Z\.]+",true ), + FILE_ROOT("fileSystemRoot", File.class,"Root directory to put the plugin",".*",true ), + RHQ_VERSION("rhqVersion",String.class,"RHQ version to use","[0-9][0-9\.]+",true), + + CATEGORY("category", ResourceCategory.class, "Category of the resource type (platform = host level)",null), + TYPE_NAME("name", String.class, "Name of the resource type", "\w+"), + DESCRIPTION("description", String.class, "Description of the type", ".*"), + DISCOVERY_CLASS("discoveryClass", String.class, "Discovery class", "[A-Z][a-zA-Z0-9]*"), + COMPONENT_CLASS("componentClass", String.class, "Discovery class", "[A-Z][a-zA-Z0-9]*"), + IS_SINGLETON("singleton",Boolean.class,"Is this type a singleton, which means that" + + " there can only be one resource of that type for the given parent?",null), + HAS_METRICS("hasMetrics",boolean.class,"Does this type support taking metrics?",null), + HAS_OPERATIONS("hasOperations",boolean.class,"Does this type support operations?",null), + HAS_EVENTS("events",boolean.class,"Does this type support events?",null), + HAS_SUPPORT_FACET("supportFacet",boolean.class,"Does this type support the support facet?",null), + RESOURCE_CONFIGURATION("resourceConfiguration",boolean.class,"Does this type support " + + "configuring the resource?",".*"), + CAN_CREATE_CHILDREN("createChildren",boolean.class,"Can the type create child resources?",null), + CAN_DELETE_CHILDREN("deleteChildren",boolean.class,"Can the type delete child resources?",null), + + // TODO add the remaining properties from Prop.class + + ; + + private String variableName; + private Class type; + private String description; + private boolean pluginLevel; + private String validationRegex; + + private Prop(String variableName, Class type, String description, String validationRegex, boolean pluginLevel) { + this.variableName = variableName; + this.type = type; + this.description = description; + this.validationRegex = validationRegex; + this.pluginLevel = pluginLevel; + } + + private Prop(String variableName, Class type, String description, String validationRegex) { + this.variableName = variableName; + this.type = type; + this.description = description; + this.validationRegex = validationRegex; + } + + public String getVariableName() { + return variableName; + } + + public Class getType() { + return type; + } + + public String getDescription() { + return description; + } + + public boolean isPluginLevel() { + return pluginLevel; + } + + public String getValidationRegex() { + return validationRegex; + } + + public String readableName() { + + String name = name(); + name = name.replaceAll("_", " "); + String[] parts = name.split(" "); + StringBuilder builder = new StringBuilder(); + for (int i = 0; i < parts.length; i++) { + String part = parts[i]; + part = part.substring(0,1).toUpperCase() + part.substring(1).toLowerCase(); + builder.append(part); + if (i < parts.length-1) { + builder.append(" "); + } + } + return builder.toString(); + } + +} diff --git a/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/Props.java b/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/Props.java index 484d591..237f5ad 100644 --- a/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/Props.java +++ b/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/Props.java @@ -36,7 +36,7 @@ import org.rhq.helpers.pluginAnnotations.agent.Units; public class Props {
/** What category is this ? */ - private ResourceCategory category; + private ResourceCategory category = ResourceCategory.SERVICE; /** The name of this item */ private String name; /** A description of the plugin */
commit 18c42dafe1e9e51a16a5ceba068a1e27cb8788c0 Author: John Sanda jsanda@redhat.com Date: Sun Jul 14 22:36:40 2013 -0400
adding heap dump properties to resource configuration
The heap dump directory defaults to <rhq-storage-basedir>/bin and we are generating heap dumps by default. Also refactoring and updating tests some.
diff --git a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeConfigDelegate.java b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeConfigDelegate.java index 99ba48b..e82f290 100644 --- a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeConfigDelegate.java +++ b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeConfigDelegate.java @@ -11,6 +11,7 @@ import org.rhq.core.domain.configuration.PropertySimple; import org.rhq.core.pluginapi.configuration.ConfigurationFacet; import org.rhq.core.pluginapi.configuration.ConfigurationUpdateReport; import org.rhq.core.util.PropertiesFileUpdate; +import org.rhq.core.util.StringUtil;
/** * @author John Sanda @@ -35,12 +36,27 @@ public class StorageNodeConfigDelegate implements ConfigurationFacet { String heapMaxProp = properties.getProperty("heap_max"); String heapNewProp = properties.getProperty("heap_new"); String threadStackSizeProp = properties.getProperty("thread_stack_size"); + String heapDumpOnOOMError = properties.getProperty("heap_dump_on_OOMError"); + String heapDumpDir = properties.getProperty("heap_dump_dir");
config.put(new PropertySimple("minHeapSize", heapMinProp.substring(4))); config.put(new PropertySimple("maxHeapSize", heapMaxProp.substring(4))); config.put(new PropertySimple("heapNewSize", heapNewProp.substring(4))); config.put(new PropertySimple("threadStackSize", threadStackSizeProp.substring(4)));
+ if (!StringUtil.isEmpty(heapDumpOnOOMError)) { + config.put(new PropertySimple("heapDumpOnOOMError", true)); + } else { + config.put(new PropertySimple("heapDumpOnOOMError", false)); + } + + if (!StringUtil.isEmpty(heapDumpDir)) { + config.put(new PropertySimple("heapDumpDir", heapDumpDir)); + } else { + File basedir = jvmOptsFile.getParentFile().getParentFile(); + config.put(new PropertySimple("heapDumpDir", new File(basedir, "bin").getAbsolutePath())); + } + return config; }
@@ -58,6 +74,14 @@ public class StorageNodeConfigDelegate implements ConfigurationFacet { properties.setProperty("heap_new", "-Xmn" + config.getSimpleValue("heapNewSize")); properties.setProperty("thread_stack_size", "-Xss" + config.getSimpleValue("threadStackSize"));
+ if (config.getSimple("heapDumpOnOOMError").getBooleanValue()) { + properties.setProperty("heap_dump_on_OOMError", "-XX:+HeapDumpOnOutOfMemoryError"); + } else { + properties.setProperty("heap_dump_on_OOMError", ""); + } + + properties.setProperty("heap_dump_dir", config.getSimpleValue("heapDumpDir")); + propertiesUpdater.update(properties);
configurationUpdateReport.setStatus(ConfigurationUpdateStatus.SUCCESS); diff --git a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml index 5713288..c700257 100644 --- a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml +++ b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml @@ -124,6 +124,11 @@ the -Xmn JVM option."/> <c:simple-property name="threadStackSize" description="The thread stack size. This memory is allocated to each thread off heap."/> + <c:simple-property name="heapDumpOnOOMError" displayName="Heap Dump on OutOfMemoryError" type="boolean" + default="true" + description="Generate a heap dump when an OutOfMemoryError occurs"/> + <c:simple-property name="heapDumpDir" displayName="Heap Dump Directory" required="false" + description="The directory in which heap dumps will be written."/> </c:group> </resource-configuration>
diff --git a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeConfigDelegateTest.java b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeConfigDelegateTest.java index f8d7bdc..5c5f089 100644 --- a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeConfigDelegateTest.java +++ b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeConfigDelegateTest.java @@ -6,8 +6,10 @@ import java.io.File; import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.IOException; +import java.lang.reflect.Method; import java.util.Properties;
+import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test;
import org.rhq.core.domain.configuration.Configuration; @@ -19,16 +21,18 @@ import org.rhq.core.pluginapi.configuration.ConfigurationUpdateReport; */ public class StorageNodeConfigDelegateTest {
+ private File basedir; + + @BeforeMethod + public void initDirs(Method test) throws Exception { + File dir = new File(getClass().getResource(".").toURI()); + basedir = new File(dir, getClass().getSimpleName() + "/" + test.getName()); + }
@Test public void loadValidConfig() throws Exception { - File dir = new File(getClass().getResource(".").toURI()); - File basedir = new File(dir, getClass().getSimpleName() + "/loadValidConfig"); - File confDir = new File(basedir, "conf"); - - confDir.mkdirs(); - createDefaultConfig(confDir); + createDefaultConfig();
StorageNodeConfigDelegate configDelegate = new StorageNodeConfigDelegate(basedir); Configuration config = configDelegate.loadResourceConfiguration(); @@ -37,22 +41,22 @@ public class StorageNodeConfigDelegateTest { assertEquals(config.getSimpleValue("maxHeapSize"), "512M", "Failed to load property [maxHepSize]"); assertEquals(config.getSimpleValue("heapNewSize"), "128M", "Failed to load property [heapNewSize]"); assertEquals(config.getSimpleValue("threadStackSize"), "180k", "Failed to load property [threadStackSize]"); + assertEquals(config.getSimple("heapDumpOnOOMError").getBooleanValue(), (Boolean) true, + "Failed to load property [heapDumpOnOOMError]"); + assertEquals(new File(config.getSimpleValue("heapDumpDir")), binDir(), "Failed to load property [heapDumpDir]"); }
@Test public void updateValidConfig() throws Exception { - File dir = new File(getClass().getResource(".").toURI()); - File basedir = new File(dir, getClass().getSimpleName() + "/updateValidConfig"); - File confDir = new File(basedir, "conf"); - - confDir.mkdirs(); - createDefaultConfig(confDir); + createDefaultConfig();
Configuration config = new Configuration(); config.put(new PropertySimple("minHeapSize", "1024M")); config.put(new PropertySimple("maxHeapSize", "1024M")); config.put(new PropertySimple("heapNewSize", "256M")); config.put(new PropertySimple("threadStackSize", "240k")); + config.put(new PropertySimple("heapDumpOnOOMError", true)); + config.put(new PropertySimple("heapDumpDir", confDir()));
ConfigurationUpdateReport report = new ConfigurationUpdateReport(config);
@@ -60,23 +64,44 @@ public class StorageNodeConfigDelegateTest { configDelegate.updateResourceConfiguration(report);
Properties properties = new Properties(); - properties.load(new FileInputStream(new File(confDir, "cassandra-jvm.properties"))); + properties.load(new FileInputStream(new File(confDir(), "cassandra-jvm.properties")));
assertEquals(properties.getProperty("heap_min"), "-Xms1024M", "Failed to update property [minHeapSize]"); assertEquals(properties.getProperty("heap_max"), "-Xmx1024M", "Failed to update property [maxHeapSize]"); assertEquals(properties.getProperty("heap_new"), "-Xmn256M", "Failed to update property [heapNewSize]"); assertEquals(properties.getProperty("thread_stack_size"), "-Xss240k", "Failed to update property [threadStackSize]"); + assertEquals(properties.getProperty("heap_dump_on_OOMError"), "-XX:+HeapDumpOnOutOfMemoryError", + "Failed to update property [heap_dump_on_OOMError]"); + assertEquals(properties.getProperty("heap_dump_dir"), confDir().getAbsolutePath(), + "Failed to update property [heap_dump_dir]"); }
- private void createDefaultConfig(File confDir) throws IOException { + private void createDefaultConfig() throws IOException { Properties properties = new Properties(); properties.setProperty("heap_min", "-Xms512M"); properties.setProperty("heap_max", "-Xmx512M"); properties.setProperty("heap_new", "-Xmn128M"); properties.setProperty("thread_stack_size", "-Xss180k"); + properties.setProperty("heap_dump_on_OOMError", "-XX:+HeapDumpOnOutOfMemoryError"); + + properties.store(new FileOutputStream(new File(confDir(), "cassandra-jvm.properties")), ""); + } + + private File confDir() { + return mkdirIfNecessary(basedir, "conf"); + } + + private File binDir() { + return mkdirIfNecessary(basedir, "bin"); + }
- properties.store(new FileOutputStream(new File(confDir, "cassandra-jvm.properties")), ""); + private File mkdirIfNecessary(File parent, String path) { + File dir = new File(parent, path); + if (!dir.exists()) { + dir.mkdirs(); + } + return dir; }
}
commit 1130a8be9a9a44c6e4670053bd7347aed4e01f30 Author: John Sanda jsanda@redhat.com Date: Sun Jul 14 09:44:04 2013 -0400
adding initial support for resource config updates of StorageNodeComponent
Also making minHeapSize read only as we also want it the same as maxHeapSize.
diff --git a/modules/plugins/rhq-storage/pom.xml b/modules/plugins/rhq-storage/pom.xml index ea3b9ea..a88f56e 100644 --- a/modules/plugins/rhq-storage/pom.xml +++ b/modules/plugins/rhq-storage/pom.xml @@ -39,103 +39,6 @@
<profiles> <profile> - <id>integration-tests</id> - <activation> - <property> - <name>maven.test.skip</name> - <value>!true</value> - </property> - </activation> - - <build> - <plugins> - - <plugin> - <artifactId>maven-antrun-plugin</artifactId> - <executions> - <execution> - <phase>pre-integration-test</phase> - <configuration> - <target> - <echo>Setting up plugin dependencies...</echo> - <property name="settings.localRepository" location="${user.home}/.m2/repository" /> - <mkdir dir="target/itest" /> - <mkdir dir="target/itest/plugins" /> - <mkdir dir="target/itest/lib" /> - <copy toDir="target/itest/plugins" flatten="true"> - <!-- Plugin dependencies on other plugins --> - <fileset dir="${settings.localRepository}/"> - <include name="org/rhq/rhq-platform-plugin/${project.version}/rhq-platform-plugin-${project.version}.jar" /> - <include name="org/rhq/rhq-jmx-plugin/${project.version}/rhq-jmx-plugin-${project.version}.jar" /> - <include name="org/rhq/rhq-jboss-as-7-jmx-plugin/${project.version}/rhq-jboss-as-7-jmx-plugin-${project.version}.jar" /> - </fileset> - <fileset dir="${project.build.directory}"> - <include name="rhq-rhqserver-plugin-${project.version}.jar" /> - </fileset> - </copy> - <unzip dest="target/itest/lib"> - <fileset dir="${settings.localRepository}/org/hyperic/sigar-dist/${sigar.version}" includes="*.zip" /> - <patternset> - <include name="**/lib/sigar.jar" /> - <include name="**/lib/bcel*.jar" /> - <include name="**/lib/*.so" /> - <include name="**/lib/*.sl" /> - <include name="**/lib/*.dll" /> - <include name="**/lib/*.dylib" /> - </patternset> - </unzip> - <move todir="target/itest/lib" flatten="true"> - <fileset dir="target/itest/lib"> - <include name="**/lib/*" /> - </fileset> - </move> - <delete dir="target/itest/lib/hyperic-sigar-${sigar.version}" /> - </target> - </configuration> - <goals> - <goal>run</goal> - </goals> - </execution> - </executions> - </plugin> - - <plugin> - <artifactId>maven-surefire-plugin</artifactId> - <configuration> - <skip>true</skip> - </configuration> - <executions> - <execution> - <id>surefire-it</id> - <phase>integration-test</phase> - <goals> - <goal>test</goal> - </goals> - <configuration> - <skip>${maven.test.skip}</skip> - <excludedGroups>${rhq.testng.excludedGroups}</excludedGroups> - <useSystemClassLoader>false</useSystemClassLoader> - <argLine>${jacoco.integration-test.args} -Dorg.hyperic.sigar.path=${basedir}/target/itest/lib</argLine> - <systemProperties> - <property> - <name>project.artifactId</name> - <value>${project.artifactId}</value> - </property> - <property> - <name>project.version</name> - <value>${project.version}</value> - </property> - </systemProperties> - </configuration> - </execution> - </executions> - </plugin> - - </plugins> - </build> - </profile> - - <profile> <id>dev</id>
<properties> diff --git a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java index 737dded..6194146 100644 --- a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java +++ b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java @@ -26,9 +26,7 @@ package org.rhq.plugins.storage;
import java.io.File; -import java.io.FileInputStream; import java.util.List; -import java.util.Properties;
import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -62,25 +60,13 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper
@Override public Configuration loadResourceConfiguration() throws Exception { - File confDir = new File(getBasedir(), "conf"); - File jvmOptsFile = new File(confDir, "cassandra-jvm.properties"); - - Properties properties = new Properties(); - properties.load(new FileInputStream(jvmOptsFile)); - - Configuration config = new Configuration(); - - String heapMinProp = properties.getProperty("heap_min"); - String heapMaxProp = properties.getProperty("heap_max"); - String heapNewProp = properties.getProperty("heap_new"); - String threadStackSizeProp = properties.getProperty("thread_stack_size"); - - config.put(new PropertySimple("minHeapSize", heapMinProp.substring(4))); - config.put(new PropertySimple("maxHeapSize", heapMaxProp.substring(4))); - config.put(new PropertySimple("heapNewSize", heapNewProp.substring(4))); - config.put(new PropertySimple("threadStackSize", threadStackSizeProp.substring(4))); + return new StorageNodeConfigDelegate(getBasedir()).loadResourceConfiguration(); + }
- return config; + @Override + public void updateResourceConfiguration(ConfigurationUpdateReport configurationUpdateReport) { + StorageNodeConfigDelegate configDelegate = new StorageNodeConfigDelegate(getBasedir()); + configDelegate.updateResourceConfiguration(configurationUpdateReport); }
private File getBasedir() { @@ -89,10 +75,6 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper }
@Override - public void updateResourceConfiguration(ConfigurationUpdateReport configurationUpdateReport) { - } - - @Override public OperationResult invokeOperation(String name, Configuration parameters) throws Exception { if (name.equals("addNodeMaintenance")) { return nodeAdded(parameters); diff --git a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeConfigDelegate.java b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeConfigDelegate.java new file mode 100644 index 0000000..99ba48b --- /dev/null +++ b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeConfigDelegate.java @@ -0,0 +1,69 @@ +package org.rhq.plugins.storage; + +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.util.Properties; + +import org.rhq.core.domain.configuration.Configuration; +import org.rhq.core.domain.configuration.ConfigurationUpdateStatus; +import org.rhq.core.domain.configuration.PropertySimple; +import org.rhq.core.pluginapi.configuration.ConfigurationFacet; +import org.rhq.core.pluginapi.configuration.ConfigurationUpdateReport; +import org.rhq.core.util.PropertiesFileUpdate; + +/** + * @author John Sanda + */ +public class StorageNodeConfigDelegate implements ConfigurationFacet { + + private File jvmOptsFile; + + public StorageNodeConfigDelegate(File basedir) { + File confDir = new File(basedir, "conf"); + jvmOptsFile = new File(confDir, "cassandra-jvm.properties"); + } + + @Override + public Configuration loadResourceConfiguration() throws Exception { + Properties properties = new Properties(); + properties.load(new FileInputStream(jvmOptsFile)); + + Configuration config = new Configuration(); + + String heapMinProp = properties.getProperty("heap_min"); + String heapMaxProp = properties.getProperty("heap_max"); + String heapNewProp = properties.getProperty("heap_new"); + String threadStackSizeProp = properties.getProperty("thread_stack_size"); + + config.put(new PropertySimple("minHeapSize", heapMinProp.substring(4))); + config.put(new PropertySimple("maxHeapSize", heapMaxProp.substring(4))); + config.put(new PropertySimple("heapNewSize", heapNewProp.substring(4))); + config.put(new PropertySimple("threadStackSize", threadStackSizeProp.substring(4))); + + return config; + } + + @Override + public void updateResourceConfiguration(ConfigurationUpdateReport configurationUpdateReport) { + try { + PropertiesFileUpdate propertiesUpdater = new PropertiesFileUpdate(jvmOptsFile.getAbsolutePath()); + Properties properties = propertiesUpdater.loadExistingProperties(); + + Configuration config = configurationUpdateReport.getConfiguration(); + + // We want min and max heap to be the same + properties.setProperty("heap_min", "-Xms" + config.getSimpleValue("maxHeapSize")); + properties.setProperty("heap_max", "-Xmx" + config.getSimpleValue("maxHeapSize")); + properties.setProperty("heap_new", "-Xmn" + config.getSimpleValue("heapNewSize")); + properties.setProperty("thread_stack_size", "-Xss" + config.getSimpleValue("threadStackSize")); + + propertiesUpdater.update(properties); + + configurationUpdateReport.setStatus(ConfigurationUpdateStatus.SUCCESS); + } catch (IOException e) { + configurationUpdateReport.setErrorMessageFromThrowable(e); + } + + } +} diff --git a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml index b012f1c..5713288 100644 --- a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml +++ b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml @@ -114,8 +114,9 @@ <resource-configuration> <c:group name="MemorySettings"> <c:simple-property name="minHeapSize" + readOnly="true" description="The minimum heap size. This value will be used with the -Xms JVM option. This - should always be the same as Max Heap Size."/> + is read only because it is automatically set to the same value as Max Heap Size."/> <c:simple-property name="maxHeapSize" description="The maximum heap size. This value will be used with the -Xmx JVM option."/> <c:simple-property name="heapNewSize" diff --git a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeConfigDelegateTest.java b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeConfigDelegateTest.java new file mode 100644 index 0000000..f8d7bdc --- /dev/null +++ b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeConfigDelegateTest.java @@ -0,0 +1,82 @@ +package org.rhq.plugins.storage; + +import static org.testng.Assert.assertEquals; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.util.Properties; + +import org.testng.annotations.Test; + +import org.rhq.core.domain.configuration.Configuration; +import org.rhq.core.domain.configuration.PropertySimple; +import org.rhq.core.pluginapi.configuration.ConfigurationUpdateReport; + +/** + * @author John Sanda + */ +public class StorageNodeConfigDelegateTest { + + + + @Test + public void loadValidConfig() throws Exception { + File dir = new File(getClass().getResource(".").toURI()); + File basedir = new File(dir, getClass().getSimpleName() + "/loadValidConfig"); + File confDir = new File(basedir, "conf"); + + confDir.mkdirs(); + createDefaultConfig(confDir); + + StorageNodeConfigDelegate configDelegate = new StorageNodeConfigDelegate(basedir); + Configuration config = configDelegate.loadResourceConfiguration(); + + assertEquals(config.getSimpleValue("minHeapSize"), "512M", "Failed to load property [minHeapSize]"); + assertEquals(config.getSimpleValue("maxHeapSize"), "512M", "Failed to load property [maxHepSize]"); + assertEquals(config.getSimpleValue("heapNewSize"), "128M", "Failed to load property [heapNewSize]"); + assertEquals(config.getSimpleValue("threadStackSize"), "180k", "Failed to load property [threadStackSize]"); + } + + @Test + public void updateValidConfig() throws Exception { + File dir = new File(getClass().getResource(".").toURI()); + File basedir = new File(dir, getClass().getSimpleName() + "/updateValidConfig"); + File confDir = new File(basedir, "conf"); + + confDir.mkdirs(); + createDefaultConfig(confDir); + + Configuration config = new Configuration(); + config.put(new PropertySimple("minHeapSize", "1024M")); + config.put(new PropertySimple("maxHeapSize", "1024M")); + config.put(new PropertySimple("heapNewSize", "256M")); + config.put(new PropertySimple("threadStackSize", "240k")); + + ConfigurationUpdateReport report = new ConfigurationUpdateReport(config); + + StorageNodeConfigDelegate configDelegate = new StorageNodeConfigDelegate(basedir); + configDelegate.updateResourceConfiguration(report); + + Properties properties = new Properties(); + properties.load(new FileInputStream(new File(confDir, "cassandra-jvm.properties"))); + + assertEquals(properties.getProperty("heap_min"), "-Xms1024M", "Failed to update property [minHeapSize]"); + assertEquals(properties.getProperty("heap_max"), "-Xmx1024M", "Failed to update property [maxHeapSize]"); + assertEquals(properties.getProperty("heap_new"), "-Xmn256M", "Failed to update property [heapNewSize]"); + assertEquals(properties.getProperty("thread_stack_size"), "-Xss240k", + "Failed to update property [threadStackSize]"); + } + + private void createDefaultConfig(File confDir) throws IOException { + Properties properties = new Properties(); + properties.setProperty("heap_min", "-Xms512M"); + properties.setProperty("heap_max", "-Xmx512M"); + properties.setProperty("heap_new", "-Xmn128M"); + properties.setProperty("thread_stack_size", "-Xss180k"); + + properties.store(new FileOutputStream(new File(confDir, "cassandra-jvm.properties")), ""); + } + +}
commit cabe99fea2aa6ecbd2e26f2040ed70951c451dc2 Author: John Sanda jsanda@redhat.com Date: Sat Jul 13 16:30:19 2013 -0400
initial support for resource configuration with StorageNodeComponent
This commit introduces initial support for reading JVM options.
diff --git a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java index cc34fe3..737dded 100644 --- a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java +++ b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java @@ -25,7 +25,10 @@
package org.rhq.plugins.storage;
+import java.io.File; +import java.io.FileInputStream; import java.util.List; +import java.util.Properties;
import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -38,6 +41,8 @@ import org.rhq.core.domain.configuration.Configuration; import org.rhq.core.domain.configuration.PropertyList; import org.rhq.core.domain.configuration.PropertyMap; import org.rhq.core.domain.configuration.PropertySimple; +import org.rhq.core.pluginapi.configuration.ConfigurationFacet; +import org.rhq.core.pluginapi.configuration.ConfigurationUpdateReport; import org.rhq.core.pluginapi.operation.OperationFacet; import org.rhq.core.pluginapi.operation.OperationResult; import org.rhq.core.util.exception.ThrowableUtil; @@ -47,7 +52,7 @@ import org.rhq.plugins.cassandra.util.KeyspaceService; /** * @author John Sanda */ -public class StorageNodeComponent extends CassandraNodeComponent implements OperationFacet { +public class StorageNodeComponent extends CassandraNodeComponent implements OperationFacet, ConfigurationFacet {
private Log log = LogFactory.getLog(StorageNodeComponent.class);
@@ -56,6 +61,38 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper private static final String RHQ_KEYSPACE = "rhq";
@Override + public Configuration loadResourceConfiguration() throws Exception { + File confDir = new File(getBasedir(), "conf"); + File jvmOptsFile = new File(confDir, "cassandra-jvm.properties"); + + Properties properties = new Properties(); + properties.load(new FileInputStream(jvmOptsFile)); + + Configuration config = new Configuration(); + + String heapMinProp = properties.getProperty("heap_min"); + String heapMaxProp = properties.getProperty("heap_max"); + String heapNewProp = properties.getProperty("heap_new"); + String threadStackSizeProp = properties.getProperty("thread_stack_size"); + + config.put(new PropertySimple("minHeapSize", heapMinProp.substring(4))); + config.put(new PropertySimple("maxHeapSize", heapMaxProp.substring(4))); + config.put(new PropertySimple("heapNewSize", heapNewProp.substring(4))); + config.put(new PropertySimple("threadStackSize", threadStackSizeProp.substring(4))); + + return config; + } + + private File getBasedir() { + Configuration pluginConfig = getResourceContext().getPluginConfiguration(); + return new File(pluginConfig.getSimpleValue("baseDir")); + } + + @Override + public void updateResourceConfiguration(ConfigurationUpdateReport configurationUpdateReport) { + } + + @Override public OperationResult invokeOperation(String name, Configuration parameters) throws Exception { if (name.equals("addNodeMaintenance")) { return nodeAdded(parameters); diff --git a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml index f02c8e6..b012f1c 100644 --- a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml +++ b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml @@ -111,6 +111,21 @@ </parameters> </operation>
+ <resource-configuration> + <c:group name="MemorySettings"> + <c:simple-property name="minHeapSize" + description="The minimum heap size. This value will be used with the -Xms JVM option. This + should always be the same as Max Heap Size."/> + <c:simple-property name="maxHeapSize" + description="The maximum heap size. This value will be used with the -Xmx JVM option."/> + <c:simple-property name="heapNewSize" + description="The size of the new generation portion of the heap. This value will be used with + the -Xmn JVM option."/> + <c:simple-property name="threadStackSize" + description="The thread stack size. This memory is allocated to each thread off heap."/> + </c:group> + </resource-configuration> + <server name="Cassandra Server JVM" sourcePlugin="JMX" sourceType="JMX Server" discovery="org.rhq.plugins.jmx.EmbeddedJMXServerDiscoveryComponent"
commit 5d1cb191a0ea22dd4f24379597645aeff3934df7 Author: John Sanda jsanda@redhat.com Date: Sat Jul 13 13:09:41 2013 -0400
updating/fixing integration test failure
diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/src/test/java/org/rhq/cassandra/DeployerITest.java b/modules/common/cassandra-ccm/cassandra-ccm-core/src/test/java/org/rhq/cassandra/DeployerITest.java index 91a5459..76ff090 100644 --- a/modules/common/cassandra-ccm/cassandra-ccm-core/src/test/java/org/rhq/cassandra/DeployerITest.java +++ b/modules/common/cassandra-ccm/cassandra-ccm-core/src/test/java/org/rhq/cassandra/DeployerITest.java @@ -103,8 +103,8 @@ public class DeployerITest { }
@Test(dependsOnMethods = "applyConfigChanges") - public void verifyConfigChangesToCassandraEnv() throws Exception { - assertFileDeployedAndUpdated("cassandra-env.sh"); + public void verifyConfigChangesToCassandraJvmOpts() throws Exception { + assertFileDeployedAndUpdated("cassandra-jvm.properties"); }
private void assertFileDeployedAndUpdated(String fileName) throws Exception { diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/src/test/resources/expected.cassandra-env.sh b/modules/common/cassandra-ccm/cassandra-ccm-core/src/test/resources/expected.cassandra-env.sh deleted file mode 100644 index 62dc0f2..0000000 --- a/modules/common/cassandra-ccm/cassandra-ccm-core/src/test/resources/expected.cassandra-env.sh +++ /dev/null @@ -1,247 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -calculate_heap_sizes() -{ - case "`uname`" in - Linux) - system_memory_in_mb=`free -m | awk '/Mem:/ {print $2}'` - system_cpu_cores=`egrep -c 'processor([[:space:]]+):.*' /proc/cpuinfo` - ;; - FreeBSD) - system_memory_in_bytes=`sysctl hw.physmem | awk '{print $2}'` - system_memory_in_mb=`expr $system_memory_in_bytes / 1024 / 1024` - system_cpu_cores=`sysctl hw.ncpu | awk '{print $2}'` - ;; - SunOS) - system_memory_in_mb=`prtconf | awk '/Memory size:/ {print $3}'` - system_cpu_cores=`psrinfo | wc -l` - ;; - Darwin) - system_memory_in_bytes=`sysctl hw.memsize | awk '{print $2}'` - system_memory_in_mb=`expr $system_memory_in_bytes / 1024 / 1024` - system_cpu_cores=`sysctl hw.ncpu | awk '{print $2}'` - ;; - *) - # assume reasonable defaults for e.g. a modern desktop or - # cheap server - system_memory_in_mb="2048" - system_cpu_cores="2" - ;; - esac - - # some systems like the raspberry pi don't report cores, use at least 1 - if [ "$system_cpu_cores" -lt "1" ] - then - system_cpu_cores="1" - fi - - # set max heap size based on the following - # max(min(1/2 ram, 1024MB), min(1/4 ram, 8GB)) - # calculate 1/2 ram and cap to 1024MB - # calculate 1/4 ram and cap to 8192MB - # pick the max - half_system_memory_in_mb=`expr $system_memory_in_mb / 2` - quarter_system_memory_in_mb=`expr $half_system_memory_in_mb / 2` - if [ "$half_system_memory_in_mb" -gt "1024" ] - then - half_system_memory_in_mb="1024" - fi - if [ "$quarter_system_memory_in_mb" -gt "8192" ] - then - quarter_system_memory_in_mb="8192" - fi - if [ "$half_system_memory_in_mb" -gt "$quarter_system_memory_in_mb" ] - then - max_heap_size_in_mb="$half_system_memory_in_mb" - else - max_heap_size_in_mb="$quarter_system_memory_in_mb" - fi - MAX_HEAP_SIZE="${max_heap_size_in_mb}M" - - # Young gen: min(max_sensible_per_modern_cpu_core * num_cores, 1/4 * heap size) - max_sensible_yg_per_core_in_mb="100" - max_sensible_yg_in_mb=`expr $max_sensible_yg_per_core_in_mb "*" $system_cpu_cores` - - desired_yg_in_mb=`expr $max_heap_size_in_mb / 4` - - if [ "$desired_yg_in_mb" -gt "$max_sensible_yg_in_mb" ] - then - HEAP_NEWSIZE="${max_sensible_yg_in_mb}M" - else - HEAP_NEWSIZE="${desired_yg_in_mb}M" - fi -} - -# Determine the sort of JVM we'll be running on. - -java_ver_output=`"${JAVA:-java}" -version 2>&1` - -jvmver=`echo "$java_ver_output" | awk -F'"' 'NR==1 {print $2}'` -JVM_VERSION=${jvmver%_*} -JVM_PATCH_VERSION=${jvmver#*_} - -jvm=`echo "$java_ver_output" | awk 'NR==2 {print $1}'` -case "$jvm" in - OpenJDK) - JVM_VENDOR=OpenJDK - # this will be "64-Bit" or "32-Bit" - JVM_ARCH=`echo "$java_ver_output" | awk 'NR==3 {print $2}'` - ;; - "Java(TM)") - JVM_VENDOR=Oracle - # this will be "64-Bit" or "32-Bit" - JVM_ARCH=`echo "$java_ver_output" | awk 'NR==3 {print $3}'` - ;; - *) - # Help fill in other JVM values - JVM_VENDOR=other - JVM_ARCH=unknown - ;; -esac - - -# Override these to set the amount of memory to allocate to the JVM at -# start-up. For production use you may wish to adjust this for your -# environment. MAX_HEAP_SIZE is the total amount of memory dedicated -# to the Java heap; HEAP_NEWSIZE refers to the size of the young -# generation. Both MAX_HEAP_SIZE and HEAP_NEWSIZE should be either set -# or not (if you set one, set the other). -# -# The main trade-off for the young generation is that the larger it -# is, the longer GC pause times will be. The shorter it is, the more -# expensive GC will be (usually). -# -# The example HEAP_NEWSIZE assumes a modern 8-core+ machine for decent pause -# times. If in doubt, and if you do not particularly want to tweak, go with -# 100 MB per physical CPU core. - -#MAX_HEAP_SIZE="4G" -#HEAP_NEWSIZE="800M" - -if [ "x$MAX_HEAP_SIZE" = "x" ] && [ "x$HEAP_NEWSIZE" = "x" ]; then - calculate_heap_sizes -else - if [ "x$MAX_HEAP_SIZE" = "x" ] || [ "x$HEAP_NEWSIZE" = "x" ]; then - echo "please set or unset MAX_HEAP_SIZE and HEAP_NEWSIZE in pairs (see cassandra-env.sh)" - exit 1 - fi -fi - -# Specifies the default port over which Cassandra will be available for -# JMX connections. -JMX_PORT="7200" - - -# Here we create the arguments that will get passed to the jvm when -# starting cassandra. - -# enable assertions. disabling this in production will give a modest -# performance benefit (around 5%). -JVM_OPTS="$JVM_OPTS -ea" - -# add the jamm javaagent -if [ "$JVM_VENDOR" != "OpenJDK" -o "$JVM_VERSION" > "1.6.0" ] \ - || [ "$JVM_VERSION" = "1.6.0" -a "$JVM_PATCH_VERSION" -ge 23 ] -then - JVM_OPTS="$JVM_OPTS -javaagent:$CASSANDRA_HOME/lib/jamm-0.2.5.jar" -fi - -# enable thread priorities, primarily so we can give periodic tasks -# a lower priority to avoid interfering with client workload -JVM_OPTS="$JVM_OPTS -XX:+UseThreadPriorities" -# allows lowering thread priority without being root. see -# http://tech.stolsvik.com/2010/01/linux-java-thread-priorities-workaround.htm... -JVM_OPTS="$JVM_OPTS -XX:ThreadPriorityPolicy=42" - -# min and max heap sizes should be set to the same value to avoid -# stop-the-world GC pauses during resize, and so that we can lock the -# heap in memory on startup to prevent any of it from being swapped -# out. -JVM_OPTS="$JVM_OPTS -Xms${MAX_HEAP_SIZE}" -JVM_OPTS="$JVM_OPTS -Xmx${MAX_HEAP_SIZE}" -JVM_OPTS="$JVM_OPTS -Xmn${HEAP_NEWSIZE}" -JVM_OPTS="$JVM_OPTS -XX:+HeapDumpOnOutOfMemoryError" - -# set jvm HeapDumpPath with CASSANDRA_HEAPDUMP_DIR -if [ "x$CASSANDRA_HEAPDUMP_DIR" != "x" ]; then - JVM_OPTS="$JVM_OPTS -XX:HeapDumpPath=$CASSANDRA_HEAPDUMP_DIR/cassandra-`date +%s`-pid$$.hprof" -fi - - -startswith() { [ "${1#$2}" != "$1" ]; } - -if [ "`uname`" = "Linux" ] ; then - # reduce the per-thread stack size to minimize the impact of Thrift - # thread-per-client. (Best practice is for client connections to - # be pooled anyway.) Only do so on Linux where it is known to be - # supported. - # u34 and greater need 180k - JVM_OPTS="$JVM_OPTS -Xss180k" -fi -echo "xss = $JVM_OPTS" - -# GC tuning options -JVM_OPTS="$JVM_OPTS -XX:+UseParNewGC" -JVM_OPTS="$JVM_OPTS -XX:+UseConcMarkSweepGC" -JVM_OPTS="$JVM_OPTS -XX:+CMSParallelRemarkEnabled" -JVM_OPTS="$JVM_OPTS -XX:SurvivorRatio=8" -JVM_OPTS="$JVM_OPTS -XX:MaxTenuringThreshold=1" -JVM_OPTS="$JVM_OPTS -XX:CMSInitiatingOccupancyFraction=75" -JVM_OPTS="$JVM_OPTS -XX:+UseCMSInitiatingOccupancyOnly" -JVM_OPTS="$JVM_OPTS -XX:+UseTLAB" -# note: bash evals '1.7.x' as > '1.7' so this is really a >= 1.7 jvm check -if [ "$JVM_VERSION" > "1.7" ] ; then - JVM_OPTS="$JVM_OPTS -XX:+UseCondCardMark" -fi - -# GC logging options -- uncomment to enable -# JVM_OPTS="$JVM_OPTS -XX:+PrintGCDetails" -# JVM_OPTS="$JVM_OPTS -XX:+PrintGCDateStamps" -# JVM_OPTS="$JVM_OPTS -XX:+PrintHeapAtGC" -# JVM_OPTS="$JVM_OPTS -XX:+PrintTenuringDistribution" -# JVM_OPTS="$JVM_OPTS -XX:+PrintGCApplicationStoppedTime" -# JVM_OPTS="$JVM_OPTS -XX:+PrintPromotionFailure" -# JVM_OPTS="$JVM_OPTS -XX:PrintFLSStatistics=1" -# JVM_OPTS="$JVM_OPTS -Xloggc:/var/log/cassandra/gc-`date +%s`.log" -# If you are using JDK 6u34 7u2 or later you can enable GC log rotation -# don't stick the date in the log name if rotation is on. -# JVM_OPTS="$JVM_OPTS -Xloggc:/var/log/cassandra/gc.log" -# JVM_OPTS="$JVM_OPTS -XX:+UseGCLogFileRotation" -# JVM_OPTS="$JVM_OPTS -XX:NumberOfGCLogFiles=10" -# JVM_OPTS="$JVM_OPTS -XX:GCLogFileSize=10M" - -# uncomment to have Cassandra JVM listen for remote debuggers/profilers on port 1414 -# JVM_OPTS="$JVM_OPTS -Xdebug -Xnoagent -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=1414" - -# Prefer binding to IPv4 network intefaces (when net.ipv6.bindv6only=1). See -# http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6342561 (short version: -# comment out this entry to enable IPv6 support). -JVM_OPTS="$JVM_OPTS -Djava.net.preferIPv4Stack=true" - -# jmx: metrics and administration interface -# -# add this if you're having trouble connecting: -# JVM_OPTS="$JVM_OPTS -Djava.rmi.server.hostname=<public name>" -# -# see -# https://blogs.oracle.com/jmxetc/entry/troubleshooting_connection_problems_in... -# for more on configuring JMX through firewalls, etc. (Short version: -# get it working with no firewall first.) -JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT" -JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.ssl=false" -JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.authenticate=false" -JVM_OPTS="$JVM_OPTS $JVM_EXTRA_OPTS" diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/src/test/resources/expected.cassandra-jvm.properties b/modules/common/cassandra-ccm/cassandra-ccm-core/src/test/resources/expected.cassandra-jvm.properties new file mode 100644 index 0000000..a90d23c --- /dev/null +++ b/modules/common/cassandra-ccm/cassandra-ccm-core/src/test/resources/expected.cassandra-jvm.properties @@ -0,0 +1,112 @@ +enable_assertions="-ea" + +# JMX settings +jmx_port=7200 + +use_thread_priorities="-XX:+UseThreadPriorities" + +thread_priority_policy="-XX:ThreadPriorityPolicy=42" + +heap_min=-Xms${MAX_HEAP_SIZE} +heap_max=-Xmx${MAX_HEAP_SIZE} +heap_new=-Xmn${HEAP_NEWSIZE} + +heap_dump_on_OOMError="-XX:+HeapDumpOnOutOfMemoryError" + +heap_dump_dir="" + +thread_stack_size=-Xss180k + +java_agent="" +# Enable jamm when running on Java 6 patch version 23 or higher. +#java_agent="-javaagent:$CASSANDRA_HOME/lib/jamm-0.2.5.jar" + +# GC tuning options +# +# Uses a parallel version of the young generation copying collector alongside +# the default collector. This minimizes pauses by using all available CPUs in +# parallel. The collector is compatible with both the default collector and the +# Concurrent Mark and Sweep (CMS) collector. +# This is a parallel collector option. +use_par_new_gc="-XX:+UseParNewGC" + +# Whether parallel remark enabled (only if ParNewGC) +# This is a parallel collector option. +cms_parallel_remark_enabled="-XX:+CMSParallelRemarkEnabled" + +# Uses the Concurrent Mark and Sweep collector. It is a mostly concurrent +# collector with a low pause time. It is a good match for multi-core systems. +use_concur_mark_sweep_gc="-XX:+UseConcMarkSweepGC" + +# Ratio of eden/survivor space size. The default value is 8. +# This is a Garbage First (G1) garbage collector option. +survivor_ratio="-XX:SurvivorRatio=8" + +# Maximum value for tenuring threshold. +# Young collection tuning option. +max_tenuring_threshold="-XX:MaxTenuringThreshold=1" + +# Percentage CMS generation occupancy to start a CMS collection cycle (A +# negative value means that CMSTirggerRatio is used). +# This is a CMS tuning option. +cms_initiating_occupancy_fraction="-XX:CMSInitiatingOccupancyFraction=75" + +# Only use occupancy as a criterion for starting a CMS collection. +# This is a CMS tuning option. +use_cms_initiating_occupancy_only="-XX:+UseCMSInitiatingOccupancyOnly" + +# Use thread local allocation blocks in young space. Enabled by default. +# This is a young collection tuning option. +use_tlab="-XX:+UseTLAB" + +##################### +# GC logging options +##################### +# Print more details at garbage collection +#print_gc_details="-XX:+PrintGCDetails" +print_gc_details="" + +# Prefix logging statements with wall clock time. +#print_gc_datestamps="XX:+PrintGCDateStamps" +print_gc_datestamps="" + +# Print heap layout before and after each garbage collection. +#print_heap_at_gc="-XX:+PrintHeapAtGC" +print_heap_at_gc="" + +# Print tenuring age information. +#print_tenuring_distribution="-XX:+PrintTenuringDistribution" +print_tenuring_distribution="" + +# Print the time the application has been stopped. +#print_gc_application_stopped_time="-XX:+PrintGCApplicationStoppedTime" +print_gc_application_stopped_time="" + +# Print more details about promotion failures. +#print_promotion_failure="-XX:+PrintPromotionFailure" +print_promotion_failure="" + +# Print statistics for CMS' FreeListSpace. +#print_flss_stats="-XX:PrintFLSStatistics=1" +print_flss_stats="" + +# Redirects GC output to file instead of console. +#gc_log_file="-Xloggc:/var/log/cassandra/gc-`date +%s`.log" +gc_log_file="" + +############################################################################### +# THE FOLLOWING IS READ ONLY +# Now build the JVM_OPTS variable. The following variables should NOT be +# directly modified. Instead update the preceding variables. The following +# variables build up the JVM_OPTS variable that is read by the cassandra and +# cassandra.bat scripts. +############################################################################### +HEAP_OPTS="${heap_min} ${heap_max} ${heap_new} ${heap_dump_on_OOMError} ${thread_stack_size}" + +GC_TUNING_OPTS="${use_par_new_gc} ${use_concur_mark_sweep_gc} ${cms_parallel_remark_enabled} ${survivor_ratio} ${max_tenuring_threshold} ${cms_initiating_occupancy_fraction} ${use_cms_initiating_occupancy_only} ${use_tlab}" + +GC_LOGGING_OPTS="${print_gc_details} ${print_gc_datestamps} ${print_heap_at_gc} ${print_tenuring_distribution} ${print_gc_application_stopped_time} ${print_promotion_failure} ${print_flss_stats} ${gc_log_file}" + +JMX_OPTS="-Dcom.sun.management.jmxremote.port=${jmx_port} -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false" + +JVM_OPTS="${enable_assertions} ${java_agent} ${use_thread_priorities} ${thread_priority_policy} ${HEAP_OPTS} ${GC_TUNING_OPTS} ${GC_LOGGING_OPTS} ${JMX_OPTS}" diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/src/test/resources/expected.cassandra.yaml b/modules/common/cassandra-ccm/cassandra-ccm-core/src/test/resources/expected.cassandra.yaml index 82778c9..15d08f2 100644 --- a/modules/common/cassandra-ccm/cassandra-ccm-core/src/test/resources/expected.cassandra.yaml +++ b/modules/common/cassandra-ccm/cassandra-ccm-core/src/test/resources/expected.cassandra.yaml @@ -681,7 +681,7 @@ client_encryption_options: # can be: all - all traffic is compressed # dc - traffic between different datacenters is compressed # none - nothing is compressed. -internode_compression: all +internode_compression: none
# Enable or disable tcp_nodelay for inter-dc communication. # Disabling it will result in larger (but fewer) network packets being sent,
commit bf78527f20bfeb95fe6040ae5f744bf633d08140 Author: John Sanda jsanda@redhat.com Date: Sat Jul 13 12:46:07 2013 -0400
remove usage of cassandra-env.sh for cassandra start up
We want to manage C* heap settings and other options/arguments passed to the C* JVM command line. On linux, the JVM options are set using the JVM_OPTS shell script variable which is done in cassandra-env.sh. The cassandra-env.sh script however does not lend itself well to our structured, resource configuration. The JVM_OPTS variable is now built up in cassandra-jvm.properties which is a java properties file that also uses legal shell syntax. The file is sourced from the cassandra start script. This paves the way for resource configuration of the Cassandra JVM options.
I am also hoping that we can do something similar for Windows where we can use cassandra-jvm.properties. This would substantially reduce the need for platform specific code in the storage plugin.
diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/pom.xml b/modules/common/cassandra-ccm/cassandra-ccm-core/pom.xml index 0b015b4..42f6c8b 100644 --- a/modules/common/cassandra-ccm/cassandra-ccm-core/pom.xml +++ b/modules/common/cassandra-ccm/cassandra-ccm-core/pom.xml @@ -167,6 +167,9 @@ <copy file="${settings.localRepository}/org/xerial/snappy/snappy-java/${cassandra.snappy.version}/snappy-java-${cassandra.snappy.version}.jar" todir="${cassandra.dir}/lib"/> <move file="${project.build.outputDirectory}/cassandra/conf" todir="${cassandra.dir}"/> + <delete file="${cassandra.dir}/bin/cassandra"/> + <move file="${project.build.outputDirectory}/cassandra/bin/cassandra" todir="${cassandra.dir}/bin"/> + <delete dir="${project.build.outputDirectory}/cassandra"/> <delete dir="${cassandra.dir}/javadoc"/>
<zip basedir="${cassandra.dir}" destfile="${cassandra.distro.zip}"/> diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/bin/cassandra b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/bin/cassandra new file mode 100755 index 0000000..742d9c0 --- /dev/null +++ b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/bin/cassandra @@ -0,0 +1,193 @@ +#!/bin/sh +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# OPTIONS: +# -f: start in foreground +# -p <filename>: log the pid to a file (useful to kill it later) +# -v: print version string and exit + +# CONTROLLING STARTUP: +# +# This script relies on few environment variables to determine startup +# behavior, those variables are: +# +# CLASSPATH -- A Java classpath containing everything necessary to run. +# JVM_OPTS -- Additional arguments to the JVM for heap size, etc +# CASSANDRA_CONF -- Directory containing Cassandra configuration files. +# +# As a convenience, a fragment of shell is sourced in order to set one or +# more of these variables. This so-called `include' can be placed in a +# number of locations and will be searched for in order. The lowest +# priority search path is the same directory as the startup script, and +# since this is the location of the sample in the project tree, it should +# almost work Out Of The Box. +# +# Any serious use-case though will likely require customization of the +# include. For production installations, it is recommended that you copy +# the sample to one of /usr/share/cassandra/cassandra.in.sh, +# /usr/local/share/cassandra/cassandra.in.sh, or +# /opt/cassandra/cassandra.in.sh and make your modifications there. +# +# Another option is to specify the full path to the include file in the +# environment. For example: +# +# $ CASSANDRA_INCLUDE=/path/to/in.sh cassandra -p /var/run/cass.pid +# +# Note: This is particularly handy for running multiple instances on a +# single installation, or for quick tests. +# +# Finally, developers and enthusiasts who frequently run from an SVN +# checkout, and do not want to locally modify bin/cassandra.in.sh, can put +# a customized include file at ~/.cassandra.in.sh. +# +# If you would rather configure startup entirely from the environment, you +# can disable the include by exporting an empty CASSANDRA_INCLUDE, or by +# ensuring that no include files exist in the aforementioned search list. +# Be aware that you will be entirely responsible for populating the needed +# environment variables. + +# NB: Developers should be aware that this script should remain compatible with +# POSIX sh and Solaris sh. This means, in particular, no $(( )) and no $( ). + +# If an include wasn't specified in the environment, then search for one... +if [ "x$CASSANDRA_INCLUDE" = "x" ]; then + # Locations (in order) to use when searching for an include file. + for include in /usr/share/cassandra/cassandra.in.sh \ + /usr/local/share/cassandra/cassandra.in.sh \ + /opt/cassandra/cassandra.in.sh \ + "$HOME/.cassandra.in.sh" \ + "`dirname $0`/cassandra.in.sh"; do + if [ -r "$include" ]; then + . "$include" + break + fi + done +# ...otherwise, source the specified include. +elif [ -r "$CASSANDRA_INCLUDE" ]; then + . "$CASSANDRA_INCLUDE" +fi + +# Use JAVA_HOME if set, otherwise look for java in PATH +if [ -n "$JAVA_HOME" ]; then + JAVA="$JAVA_HOME/bin/java" +else + JAVA=java +fi + +# If numactl is available, use it. For Cassandra, the priority is to +# avoid disk I/O. Even for the purpose of CPU efficiency, we don't +# really have CPU<->data affinity anyway. Also, empirically test that numactl +# works before trying to use it (CASSANDRA-3245). +NUMACTL_ARGS="--interleave=all" +if which numactl >/dev/null 2>/dev/null && numactl $NUMACTL_ARGS ls / >/dev/null 2>/dev/null +then + NUMACTL="numactl $NUMACTL_ARGS" +else + NUMACTL="" +fi + +if [ -z "$CASSANDRA_CONF" -o -z "$CLASSPATH" ]; then + echo "You must set the CASSANDRA_CONF and CLASSPATH vars" >&2 + exit 1 +fi + +if [ -f "$CASSANDRA_CONF/cassandra-env.sh" ]; then + #. "$CASSANDRA_CONF/cassandra-env.sh" + . "$CASSANDRA_CONF/cassandra-jvm.properties" +fi + +# Special-case path variables. +case "`uname`" in + CYGWIN*) + CLASSPATH=`cygpath -p -w "$CLASSPATH"` + CASSANDRA_CONF=`cygpath -p -w "$CASSANDRA_CONF"` + ;; +esac + +launch_service() +{ + pidpath=$1 + foreground=$2 + props=$3 + class=$4 + cassandra_parms="-Dlog4j.configuration=log4j-server.properties -Dlog4j.defaultInitOverride=true" + + if [ "x$pidpath" != "x" ]; then + cassandra_parms="$cassandra_parms -Dcassandra-pidfile=$pidpath" + fi + + # The cassandra-foreground option will tell CassandraDaemon not + # to close stdout/stderr, but it's up to us not to background. + if [ "x$foreground" != "x" ]; then + cassandra_parms="$cassandra_parms -Dcassandra-foreground=yes" + exec $NUMACTL "$JAVA" $JVM_OPTS $cassandra_parms -cp "$CLASSPATH" $props "$class" + # Startup CassandraDaemon, background it, and write the pid. + else + exec $NUMACTL "$JAVA" $JVM_OPTS $cassandra_parms -cp "$CLASSPATH" $props "$class" <&- & + [ ! -z "$pidpath" ] && printf "%d" $! > "$pidpath" + true + fi + + return $? +} + +# Parse any command line options. +args=`getopt vfhp:bD: "$@"` +eval set -- "$args" + +classname="org.apache.cassandra.service.CassandraDaemon" + +while true; do + case "$1" in + -p) + pidfile="$2" + shift 2 + ;; + -f) + foreground="yes" + shift + ;; + -h) + echo "Usage: $0 [-f] [-h] [-p pidfile]" + exit 0 + ;; + -v) + "$JAVA" -cp "$CLASSPATH" org.apache.cassandra.tools.GetVersion + exit 0 + ;; + -D) + properties="$properties -D$2" + shift 2 + ;; + --) + shift + break + ;; + *) + echo "Error parsing arguments!" >&2 + exit 1 + ;; + esac +done + +# Start up the service +launch_service "$pidfile" "$foreground" "$properties" "$classname" + +exit $? + +# vi:ai sw=4 ts=4 tw=0 et diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/cassandra-jvm.properties b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/cassandra-jvm.properties new file mode 100644 index 0000000..612c65e --- /dev/null +++ b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/cassandra-jvm.properties @@ -0,0 +1,112 @@ +enable_assertions="-ea" + +# JMX settings +jmx_port="7299" + +use_thread_priorities="-XX:+UseThreadPriorities" + +thread_priority_policy="-XX:ThreadPriorityPolicy=42" + +heap_min="-Xms256M" +heap_max="-Xms256M" +heap_new="-Xmn64M" + +heap_dump_on_OOMError="-XX:+HeapDumpOnOutOfMemoryError" + +heap_dump_dir="" + +thread_stack_size="-Xss180k" + +java_agent="" +# Enable jamm when running on Java 6 patch version 23 or higher. +#java_agent="-javaagent:$CASSANDRA_HOME/lib/jamm-0.2.5.jar" + +# GC tuning options +# +# Uses a parallel version of the young generation copying collector alongside +# the default collector. This minimizes pauses by using all available CPUs in +# parallel. The collector is compatible with both the default collector and the +# Concurrent Mark and Sweep (CMS) collector. +# This is a parallel collector option. +use_par_new_gc="-XX:+UseParNewGC" + +# Whether parallel remark enabled (only if ParNewGC) +# This is a parallel collector option. +cms_parallel_remark_enabled="-XX:+CMSParallelRemarkEnabled" + +# Uses the Concurrent Mark and Sweep collector. It is a mostly concurrent +# collector with a low pause time. It is a good match for multi-core systems. +use_concur_mark_sweep_gc="-XX:+UseConcMarkSweepGC" + +# Ratio of eden/survivor space size. The default value is 8. +# This is a Garbage First (G1) garbage collector option. +survivor_ratio="-XX:SurvivorRatio=8" + +# Maximum value for tenuring threshold. +# Young collection tuning option. +max_tenuring_threshold="-XX:MaxTenuringThreshold=1" + +# Percentage CMS generation occupancy to start a CMS collection cycle (A +# negative value means that CMSTirggerRatio is used). +# This is a CMS tuning option. +cms_initiating_occupancy_fraction="-XX:CMSInitiatingOccupancyFraction=75" + +# Only use occupancy as a criterion for starting a CMS collection. +# This is a CMS tuning option. +use_cms_initiating_occupancy_only="-XX:+UseCMSInitiatingOccupancyOnly" + +# Use thread local allocation blocks in young space. Enabled by default. +# This is a young collection tuning option. +use_tlab="-XX:+UseTLAB" + +##################### +# GC logging options +##################### +# Print more details at garbage collection +#print_gc_details="-XX:+PrintGCDetails" +print_gc_details="" + +# Prefix logging statements with wall clock time. +#print_gc_datestamps="XX:+PrintGCDateStamps" +print_gc_datestamps="" + +# Print heap layout before and after each garbage collection. +#print_heap_at_gc="-XX:+PrintHeapAtGC" +print_heap_at_gc="" + +# Print tenuring age information. +#print_tenuring_distribution="-XX:+PrintTenuringDistribution" +print_tenuring_distribution="" + +# Print the time the application has been stopped. +#print_gc_application_stopped_time="-XX:+PrintGCApplicationStoppedTime" +print_gc_application_stopped_time="" + +# Print more details about promotion failures. +#print_promotion_failure="-XX:+PrintPromotionFailure" +print_promotion_failure="" + +# Print statistics for CMS' FreeListSpace. +#print_flss_stats="-XX:PrintFLSStatistics=1" +print_flss_stats="" + +# Redirects GC output to file instead of console. +#gc_log_file="-Xloggc:/var/log/cassandra/gc-`date +%s`.log" +gc_log_file="" + +############################################################################### +# THE FOLLOWING IS READ ONLY +# Now build the JVM_OPTS variable. The following variables should NOT be +# directly modified. Instead update the preceding variables. The following +# variables build up the JVM_OPTS variable that is read by the cassandra and +# cassandra.bat scripts. +############################################################################### +HEAP_OPTS="${heap_min} ${heap_max} ${heap_new} ${heap_dump_on_OOMError} ${thread_stack_size}" + +GC_TUNING_OPTS="${use_par_new_gc} ${use_concur_mark_sweep_gc} ${cms_parallel_remark_enabled} ${survivor_ratio} ${max_tenuring_threshold} ${cms_initiating_occupancy_fraction} ${use_cms_initiating_occupancy_only} ${use_tlab}" + +GC_LOGGING_OPTS="${print_gc_details} ${print_gc_datestamps} ${print_heap_at_gc} ${print_tenuring_distribution} ${print_gc_application_stopped_time} ${print_promotion_failure} ${print_flss_stats} ${gc_log_file}" + +JMX_OPTS="-Dcom.sun.management.jmxremote.port=${jmx_port} -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false" + +JVM_OPTS="${enable_assertions} ${java_agent} ${use_thread_priorities} ${thread_priority_policy} ${HEAP_OPTS} ${GC_TUNING_OPTS} ${GC_LOGGING_OPTS} ${JMX_OPTS}" diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/Deployer.java b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/Deployer.java index e85813d..1e31e14 100644 --- a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/Deployer.java +++ b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/Deployer.java @@ -31,10 +31,12 @@ import java.io.FileWriter; import java.io.IOException; import java.io.InputStream; import java.util.Map; +import java.util.Properties;
import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory;
+import org.rhq.core.util.PropertiesFileUpdate; import org.rhq.core.util.TokenReplacingReader; import org.rhq.core.util.ZipUtil; import org.rhq.core.util.stream.StreamUtil; @@ -83,10 +85,13 @@ public class Deployer {
applyConfigChanges(confDir, "cassandra.yaml", tokens); applyConfigChanges(confDir, "log4j-server.properties", tokens); - applyConfigChanges(confDir, "cassandra-env.sh", tokens); + applyChangesToCassandraJvmProps(confDir, deploymentOptions); +// applyConfigChanges(confDir, "cassandra-env.sh", tokens); }
- private void applyConfigChanges(File confDir, String fileName, Map<String, String> tokens) throws DeploymentException { + private void applyConfigChanges(File confDir, String fileName, Map<String, String> tokens) + throws DeploymentException { + File filteredFile = new File(confDir, fileName); try { if (log.isInfoEnabled()) { @@ -104,6 +109,29 @@ public class Deployer { } }
+ private void applyChangesToCassandraJvmProps(File confDir, DeploymentOptions deploymentOptions) + throws DeploymentException { + + File jvmPropsFile = new File(confDir, "cassandra-jvm.properties"); + try { + log.info("Applying configuration changes to " + jvmPropsFile); + + PropertiesFileUpdate propertiesUpdater = new PropertiesFileUpdate(jvmPropsFile.getAbsolutePath()); + Properties properties = propertiesUpdater.loadExistingProperties(); + + properties.setProperty("heap_min", "-Xms" + deploymentOptions.getHeapSize()); + properties.setProperty("heap_max", "-Xmx" + deploymentOptions.getHeapSize()); + properties.setProperty("heap_new", "-Xmn" + deploymentOptions.getHeapNewSize()); + properties.setProperty("thread_stack_size", "-Xss" + deploymentOptions.getStackSize()); + properties.setProperty("jmx_port", deploymentOptions.getJmxPort().toString()); + + propertiesUpdater.update(properties); + } catch (IOException e) { + log.error("An error occurred while updating " + jvmPropsFile, e); + throw new DeploymentException("An error occurred while updating " + jvmPropsFile, e); + } + } + public void updateFilePerms() { File deployDir = new File(deploymentOptions.getBasedir()); File binDir = new File(deployDir, "bin");
commit 7185fb3fbaf98dde194872657c7362134d342c10 Author: John Sanda jsanda@redhat.com Date: Sat Jul 13 10:08:22 2013 -0400
remove obsolete files that are no longer used
diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/deploy.xml b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/deploy.xml deleted file mode 100644 index 19d5647..0000000 --- a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/deploy.xml +++ /dev/null @@ -1,221 +0,0 @@ -<project name="rhq_cassandra_bundle" - default="main" - xmlns:rhq="antlib:org.rhq.bundle"> - <rhq:bundle name="${rhq.cassandra.bundle.name}" - version="${rhq.cassandra.bundle.version}" - description="A bundle for deploying RHQ Cassandra nodes."> - - <!-- - NOTE: the name attribute of an rhq:input-property does not support using a dash. - There is a convention where dashes are used in property names in rhq properties files - in the trailing part of a property name. If an rhq:input-property has a corresponding - property in cassandra.properties and contains a dash, the dash will be changed to an - underscore in this file. - --> - - <rhq:input-property name="cluster.name" - description="The name of the cluster. This is used to prevent machines in one logical cluster from joining another" - required="true" - defaultValue="rhqdev" - type="string"/> - - <rhq:input-property name="cluster.dir" - description="The directory in which Cassandra nodes will be installed" - required="true" - defaultValue="" - type="string"/> - - <rhq:input-property name="data.dir" - description="The directory where Cassandra should store data files." - required="true" - defaultValue="data" - type="string"/> - - <rhq:input-property name="commitlog.dir" - description="The directory where Cassandra stores its commit logs." - required="true" - defaultValue="commit_log" - type="string"/> - - <rhq:input-property name="saved.caches.dir" - description="The directory where Cassandra stores saved caches." - required="true" - defaultValue="saved_caches" - type="string"/> - - <rhq:input-property name="log.dir" - description="The directory where Cassandra stores log files." - required="true" - defaultValue="logs" - type="string"/> - - <rhq:input-property name="rhq.cassandra.log.file.name" - description="The name of the log file to which Cassandra writes." - required="true" - defaultValue="rhq-storage-node.log" - type="string"/> - - <rhq:input-property name="logging.level" - description="The log4j logging level to use." - required="false" - defaultValue="DEBUG" - type="string"/> - - <rhq:input-property name="seeds" - description="A comma-delimited list of IP addresses/host names that are deemed contact points. Cassandra nodes use this list of hosts to find each other and learn the topology of the ring. If you are running a local development cluster, be sure to have aliases set up for localhost." - required="false" - defaultValue="127.0.0.1" - type="string"/> - - <rhq:input-property name="rhq.cassandra.num_tokens" - description="Defines the number of tokens randomly assigned to a node on the ring. The more tokens, relative to other nodes, the larger the proportion of data that this node will store. You probably want all nodes to have the same number of tokens assuming they have equal hardware capability." - required="false" - defaultValue="256" - type="string"/> - - <rhq:input-property name="jmx.port" - description="The port over which Cassandra listens for JMX connections. Each node should be assigned a unique port." - required="false" - defaultValue="7200" - type="string"/> - - <rhq:input-property name="rhq.cassandra.storage.port" - description="The port used for inter-node communication." - required="false" - defaultValue="7000" - type="string"/> - - <rhq:input-property name="rhq.cassandra.ssl.storage.port" - description="The port used for encrypted inter-node communication." - required="false" - defaultValue="7001" - type="string"/> - - <rhq:input-property name="listen.address" - description="Address used for inter-node communication. Defaults to value of hostname property." - required="true" - defaultValue="" - type="string"/> - - <rhq:input-property name="rpc.address" - description="Address used for Thrift RPC client communication. Defaults to value of hostname property." - required="true" - defaultValue="" - type="string"/> - - <rhq:input-property name="cassandra.ring.delay.property" - required="false" - defaultValue="" - type="string"/> - - <rhq:input-property name="cassandra.ring.delay" - description="When a node initializes it contacts a seed and then sleeps for RING_DELAY (milliseconds) to learn about other nodes in the cluster. Cassandra uses a default value of 30 seconds." - required="false" - defaultValue="" - type="string"/> - - <rhq:input-property name="rhq.cassandra.native_transport_max_threads" - description="The maximum number of threads handling native CQL requests." - required="false" - defaultValue="64" - type="integer"/> - - <rhq:input-property name="rhq.cassandra.native_transport_port" - description="The port for the CQL native transport to listen for clients on." - required="false" - defaultValue="9042" - type="integer"/> - - <rhq:input-property name="rhq.cassandra.rpc_port" - description="The port to listen for Thrift clients on." - required="false" - defaultValue="9160" - type="integer"/> - - <rhq:input-property name="rhq.cassandra.authenticator" - description="A class that performs authentication. The value should be a fully qualified class name and implement IAuthenticator." - required="false" - defaultValue="org.rhq.cassandra.auth.SimpleAuthenticator" - type="string"/> - - <rhq:input-property name="rhq.cassandra.authorizer" - description="A class that performs authorization. Used to limit/provide permissions. The value should be a fully qualified class name and implement IAuthorizer." - required="false" - defaultValue="org.rhq.cassandra.auth.SimpleAuthorizer" - type="string"/> - - <rhq:input-property name="rhq.cassandra.password.properties.file" - description="The location of the password properties file used by SimpleAuthenticator. If a relative path is specified, its location is resolved relative to Cassandra's bin directory." - required="false" - defaultValue="./../conf/passwd.properties" - type="file"/> - - <rhq:input-property name="rhq.cassandra.access.properties.file" - description="The location of the authorization properties file used by SimpleAuthority. If a relative path is specified, its location is resolved relative to Cassandra's bin directory." - required="false" - defaultValue="./../conf/access.properties" - type="file"/> - - <rhq:input-property name="rhq.cassandra.username" - description="The username with which to authenticate requests to Cassandra." - required="true" - type="string"/> - - <rhq:input-property name="rhq.cassandra.password" - description="The password with which to authenticate requests to Cassandra." - required="true" - type="string"/> - - <rhq:input-property name="rhq.cassandra.max.heap.size" - required="true" - defaultValue="${MAX_HEAP_SIZE}"/> - - <rhq:input-property name="rhq.cassandra.heap.new.size" - required="true" - defaultValue="${HEAP_NEWSIZE}"/> - - <rhq:deployment-unit name="cassandra" preinstallTarget="pre-install" postinstallTarget="post-install"> -<!-- - <rhq:file name="dbsetup.script" destinationFile="scripts/dbsetup.script" replace="true"/> ---> - <rhq:archive name="cassandra.zip"> - rhq:replace - <rhq:fileset dir="conf"> - <include name="cassandra.yaml"/> - </rhq:fileset> - <rhq:fileset dir="conf"> - <include name="cassandra-env.sh"/> - </rhq:fileset> - <rhq:fileset dir="conf"> - <include name="log4j-server.properties"/> - </rhq:fileset> - <rhq:fileset dir="conf"> - <include name="passwd.properties"/> - </rhq:fileset> -<!-- - <rhq:fileset dir="scripts"> - <include name="dbsetup.script"/> - </rhq:fileset> ---> - </rhq:replace> - </rhq:archive> - </rhq:deployment-unit> - </rhq:bundle> - - <target name="main"/> - - <target name="pre-install"> - <mkdir dir="${cluster.dir}"/> - - </target> - - <target name="post-install"> - <property name="bin.dir" value="${rhq.deploy.dir}/bin"/> - - <chmod file="${bin.dir}/cassandra" perm="+x"/> - <chmod file="${bin.dir}/cqlsh" perm="+x"/> - <chmod file="${bin.dir}/cassandra-cli" perm="+x"/> - <chmod file="${bin.dir}/nodetool" perm="+x"/> - </target> - -</project> diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/cassandra/cli/dbsetup.script b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/cassandra/cli/dbsetup.script deleted file mode 100644 index 73bc2a7..0000000 --- a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/cassandra/cli/dbsetup.script +++ /dev/null @@ -1,40 +0,0 @@ -create keyspace rhq - with placement_strategy = 'org.apache.cassandra.locator.SimpleStrategy' and - strategy_options = {replication_factor:1}; - -use rhq; - -create column family raw_metrics - with comparator = DateType and - default_validation_class = DoubleType and - key_validation_class = Int32Type; - -create column family one_hour_metric_data - with comparator = 'CompositeType(DateType, Int32Type)' and - default_validation_class = DoubleType and - key_validation_class = Int32Type; - -create column family six_hour_metric_data - with comparator = 'CompositeType(DateType, Int32Type)' and - default_validation_class = DoubleType and - key_validation_class = Int32Type; - -create column family twenty_four_hour_metric_data - with comparator = 'CompositeType(DateType, Int32Type)' and - default_validation_class = DoubleType and - key_validation_class = Int32Type; - -create column family metrics_work_queue - with comparator = 'CompositeType(DateType, Int32Type)' and - default_validation_class = Int32Type and - key_validation_class = UTF8Type; - -create column family resource_traits - with comparator = 'CompositeType(DateType, Int32Type, Int32Type, UTF8Type, UTF8Type)' and - default_validation_class = UTF8Type and - key_validation_class = Int32Type; - -create column family traits - with comparator = DateType and - default_validation_class = UTF8Type and - key_validation_class = Int32Type; diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/cassandra/cql/create_keyspace.cql b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/cassandra/cql/create_keyspace.cql deleted file mode 100644 index 9df13a0..0000000 --- a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/cassandra/cql/create_keyspace.cql +++ /dev/null @@ -1 +0,0 @@ -CREATE KEYSPACE rhq WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}; diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/cassandra/cql/dbsetup.cql b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/cassandra/cql/dbsetup.cql deleted file mode 100644 index 189b35b..0000000 --- a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/cassandra/cql/dbsetup.cql +++ /dev/null @@ -1,42 +0,0 @@ -CREATE KEYSPACE rhq WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}; - -USE rhq; - -CREATE TABLE raw_metrics ( - schedule_id int, - time timestamp, - value double, - PRIMARY KEY (schedule_id, time) -); - -CREATE TABLE one_hour_metrics ( - schedule_id int, - time timestamp, - type int, - value double, - PRIMARY KEY (schedule_id, time, type) -); - -CREATE TABLE six_hour_metrics ( - schedule_id int, - time timestamp, - type int, - value double, - PRIMARY KEY (schedule_id, time, type) -); - -CREATE TABLE twenty_four_hour_metrics ( - schedule_id int, - time timestamp, - type int, - value double, - PRIMARY KEY (schedule_id, time, type) -); - -CREATE TABLE metrics_index ( - bucket varchar, - time timestamp, - schedule_id int, - null_col boolean, - PRIMARY KEY (bucket, time, schedule_id) -);
commit f9f3f712f702058e0d6ef5f1b99dcbe2162e63a9 Author: Heiko W. Rupp hwr@redhat.com Date: Fri Jul 12 11:09:16 2013 +0200
Downgrade to 1.2.3, as the jboss nexus repo does not find 1.2.5. It is more recent than 1.1.1 anyway
diff --git a/pom.xml b/pom.xml index 6b1d75f..c5cf5a7 100644 --- a/pom.xml +++ b/pom.xml @@ -151,7 +151,7 @@ <augeas.zip.mask>*-${augeas.classifier}.zip</augeas.zip.mask> <ant.contrib.version>1.0b3</ant.contrib.version> <freemarker.version>2.3.18</freemarker.version> - <swagger-annotations.version>1.2.5</swagger-annotations.version> + <swagger-annotations.version>1.2.3</swagger-annotations.version> <powermock.version>1.4.12</powermock.version> <arquillian.version>1.0.3.Final</arquillian.version> <arquillian.jboss.container.version>7.2.0.Final</arquillian.jboss.container.version>
commit c33f3226056333a1aaaa303306d1ce6dadcefe47 Author: Heiko W. Rupp hwr@redhat.com Date: Fri Jul 12 10:35:44 2013 +0200
Bump swagger annotations version to a stable version and remove the snapshot repo. Thanks gaYak for reminding me.
diff --git a/modules/enterprise/server/jar/pom.xml b/modules/enterprise/server/jar/pom.xml index 2127930..7c236c7 100644 --- a/modules/enterprise/server/jar/pom.xml +++ b/modules/enterprise/server/jar/pom.xml @@ -439,7 +439,7 @@ <dependency> <groupId>com.wordnik</groupId> <artifactId>swagger-annotations_2.9.1</artifactId> - <version>1.1.1-SNAPSHOT</version> + <version>${swagger-annotations.version}</version> <scope>provided</scope> </dependency>
@@ -686,15 +686,6 @@
</build>
- <repositories> - <repository> - <!-- TODO change when the annotations are published. This is temporary for the swagger annotations for REST-docu --> - <id>sonatype-oss-snapshot</id> - <name>Sonatype OSS Snapshot repository</name> - <url>https://oss.sonatype.org/content/repositories/snapshots</url> - </repository> - - </repositories>
<profiles>
diff --git a/modules/helpers/rest-docs-generator/pom.xml b/modules/helpers/rest-docs-generator/pom.xml index 9cc4fa1..c672abf 100644 --- a/modules/helpers/rest-docs-generator/pom.xml +++ b/modules/helpers/rest-docs-generator/pom.xml @@ -122,16 +122,6 @@ </plugins> </build>
- <repositories> - <repository> - <!-- TODO temporary for the swagger annotations --> - <id>sonatype-oss-snapshot</id> - <name>Sonatype OSS Snapshot repository</name> - <url>https://oss.sonatype.org/content/repositories/snapshots</url> - </repository> - - </repositories> -
<dependencies> <dependency> @@ -149,7 +139,7 @@ <dependency> <groupId>com.wordnik</groupId> <artifactId>swagger-annotations_2.9.1</artifactId> - <version>1.1.1-SNAPSHOT</version> + <version>${swagger-annotations.version}</version> </dependency> <dependency> <groupId>org.jboss.resteasy</groupId> diff --git a/pom.xml b/pom.xml index 26703d1..6b1d75f 100644 --- a/pom.xml +++ b/pom.xml @@ -151,6 +151,7 @@ <augeas.zip.mask>*-${augeas.classifier}.zip</augeas.zip.mask> <ant.contrib.version>1.0b3</ant.contrib.version> <freemarker.version>2.3.18</freemarker.version> + <swagger-annotations.version>1.2.5</swagger-annotations.version> <powermock.version>1.4.12</powermock.version> <arquillian.version>1.0.3.Final</arquillian.version> <arquillian.jboss.container.version>7.2.0.Final</arquillian.jboss.container.version> @@ -901,7 +902,7 @@ <artifactId>javax.servlet-api</artifactId> <version>3.0.1</version> </dependency> - + <dependency> <groupId>org.yaml</groupId> <artifactId>snakeyaml</artifactId>
commit 524d1b66879fa6d20788505e1c43c0e046e06e48 Author: Stefan Negrea snegrea@redhat.com Date: Fri Jul 12 00:58:48 2013 -0500
Initial implementation for fetching all the alerts triggered for Storage Nodes resource and sub-sources.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index 9d073ee..fa51fe1 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -28,8 +28,10 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Date; import java.util.HashMap; +import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.Queue;
import javax.ejb.EJB; import javax.ejb.Stateless; @@ -43,6 +45,7 @@ import org.quartz.JobDataMap; import org.quartz.SimpleTrigger; import org.quartz.Trigger;
+import org.rhq.core.domain.alert.Alert; import org.rhq.core.domain.auth.Subject; import org.rhq.core.domain.authz.Permission; import org.rhq.core.domain.cloud.Server; @@ -51,6 +54,7 @@ import org.rhq.core.domain.cloud.StorageNode.OperationMode; import org.rhq.core.domain.cloud.StorageNodeLoadComposite; import org.rhq.core.domain.common.JobTrigger; import org.rhq.core.domain.configuration.Configuration; +import org.rhq.core.domain.criteria.AlertCriteria; import org.rhq.core.domain.criteria.ResourceGroupCriteria; import org.rhq.core.domain.criteria.StorageNodeCriteria; import org.rhq.core.domain.measurement.MeasurementAggregate; @@ -59,8 +63,11 @@ import org.rhq.core.domain.operation.bean.GroupOperationSchedule; import org.rhq.core.domain.resource.Resource; import org.rhq.core.domain.resource.ResourceType; import org.rhq.core.domain.resource.group.ResourceGroup; +import org.rhq.core.domain.util.PageControl; import org.rhq.core.domain.util.PageList; +import org.rhq.core.domain.util.PageOrdering; import org.rhq.enterprise.server.RHQConstants; +import org.rhq.enterprise.server.alert.AlertManagerLocal; import org.rhq.enterprise.server.auth.SubjectManagerLocal; import org.rhq.enterprise.server.authz.RequiredPermission; import org.rhq.enterprise.server.authz.RequiredPermissions; @@ -110,6 +117,9 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN @EJB private OperationManagerLocal operationManager;
+ @EJB + private AlertManagerLocal alertManager; + @Override public void linkResource(Resource resource) { List<StorageNode> storageNodes = this.getStorageNodes(); @@ -427,7 +437,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN } entityManager.flush(); } - + private StorageNodeLoadComposite.MeasurementAggregateWithUnits getMeasurementAggregateWithUnits(Subject subject, int schedId, MeasurementUnits units, long beginTime, long endTime) { MeasurementAggregate measurementAggregate = measurementManager.getAggregate(subject, schedId, beginTime, @@ -472,4 +482,81 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
operationManager.scheduleGroupOperation(subjectManager.getOverlord(), schedule); } -} + + @Override + public PageList<Alert> findNotAcknowledgedStorageNodeAlerts(Subject subject) { + return findStorageNodeAlerts(subject, false); + } + + @Override + public PageList<Alert> findAllStorageNodeAlerts(Subject subject) { + return findStorageNodeAlerts(subject, true); + } + + /** + * Find the set of alerts related to Storage Node resources and sub-resources. + * + * @param subject subject + * @param allAlerts if [true] then return all alerts; if [false] then return only alerts that are not acknowledged + * @return alerts + */ + private PageList<Alert> findStorageNodeAlerts(Subject subject, boolean allAlerts) { + Integer[] resouceIdsWithAlertDefinitions = findResourcesWithAlertDefinitions(); + PageList<Alert> alerts = new PageList<Alert>(); + + if( resouceIdsWithAlertDefinitions != null && resouceIdsWithAlertDefinitions.length != 0 ){ + AlertCriteria criteria = new AlertCriteria(); + criteria.setPageControl(PageControl.getUnlimitedInstance()); + criteria.addFilterResourceIds(resouceIdsWithAlertDefinitions); + criteria.addSortCtime(PageOrdering.DESC); + + alerts = alertManager.findAlertsByCriteria(subject, criteria); + + if (!allAlerts) { + //select on alerts that are not acknowledge + PageList<Alert> trimmedAlerts = new PageList<Alert>(); + for (Alert alert : alerts) { + if (alert.getAcknowledgeTime() == null || alert.getAcknowledgeTime() <= 0) { + trimmedAlerts.add(alert); + } + } + + alerts = trimmedAlerts; + } + } + + return alerts; + } + + /** + * Return resource Ids for all resources and sub-resources of Storage Nodes that + * have alert definitions. This will be used by the resource criteria to find + * all alerts triggered for storage nodes. + * + * @return + */ + private Integer[] findResourcesWithAlertDefinitions() { + List<Integer> resourceIdsWithAlertDefinitions = new ArrayList<Integer>(); + List<StorageNode> test2 = getStorageNodes(); + + Queue<Resource> unvisitedResources = new LinkedList<Resource>(); + for (StorageNode node : test2) { + if (node.getResource() != null) { + unvisitedResources.add(node.getResource()); + } + } + + while(!unvisitedResources.isEmpty()){ + Resource resource = unvisitedResources.poll(); + if (resource.getAlertDefinitions() != null) { + resourceIdsWithAlertDefinitions.add(resource.getId()); + } + + for(Resource child: resource.getChildResources()){ + unvisitedResources.add(child); + } + } + + return resourceIdsWithAlertDefinitions.toArray(new Integer[resourceIdsWithAlertDefinitions.size()]); + } +} \ No newline at end of file diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java index 5c3f092..a9b2514 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java @@ -22,6 +22,7 @@ import java.util.List;
import javax.ejb.Local;
+import org.rhq.core.domain.alert.Alert; import org.rhq.core.domain.auth.Subject; import org.rhq.core.domain.cloud.StorageNode; import org.rhq.core.domain.cloud.StorageNodeLoadComposite; @@ -65,13 +66,29 @@ public interface StorageNodeManagerLocal { * @return list of nodes */ PageList<StorageNode> findStorageNodesByCriteria(Subject subject, StorageNodeCriteria criteria); - + + /** + * Fetches the list of Storage Node related alerts that have not yet been acknowledged. + * + * @param subject subject + * @return storage nodes alerts not acknowledged + */ + PageList<Alert> findNotAcknowledgedStorageNodeAlerts(Subject subject); + + /** + * Fetches all the Storage Node related alerts. + * + * @param subject subject + * @return all storage nodes alerts + */ + PageList<Alert> findAllStorageNodeAlerts(Subject subject); + /** * <p>Prepares the node for subsequent upgrade.</p> * <p> CAUTION: this method will set the RHQ server to maintenance mode, RHQ storage flushes all the data to disk * and backup of all the keyspaces is created</p> * <p>the subject needs to have <code>MANAGE_SETTINGS</code> and <code>MANAGE_INVENTORY</code> permissions.</p> - * + * * @param subject caller * @param storageNode storage node on which the prepareForUpgrade operation should be run */ diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerRemote.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerRemote.java index d4c9f78..72432db 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerRemote.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerRemote.java @@ -20,6 +20,7 @@ package org.rhq.enterprise.server.cloud;
import javax.ejb.Remote;
+import org.rhq.core.domain.alert.Alert; import org.rhq.core.domain.auth.Subject; import org.rhq.core.domain.cloud.StorageNode; import org.rhq.core.domain.cloud.StorageNodeLoadComposite; @@ -28,7 +29,7 @@ import org.rhq.core.domain.util.PageList;
/** * Remote interface to the manager responsible for creating and managing storage nodes. - * + * * @author Jirka Kremser */ @Remote @@ -36,9 +37,9 @@ public interface StorageNodeManagerRemote {
/** * <p>Returns the summary of load of the storage node.</p> - * + * * <p>the subject needs to have <code>MANAGE_SETTINGS</code> permissions.</p> - * + * * @param subject user that must have proper permissions * @param node storage node entity (it can be a new object, but the id should be set properly) * @param beginTime the start time @@ -58,4 +59,19 @@ public interface StorageNodeManagerRemote { */ PageList<StorageNode> findStorageNodesByCriteria(Subject subject, StorageNodeCriteria criteria);
+ /** + * Fetches the list of Storage Node related alerts that have not yet been acknowledged. + * + * @param subject subject + * @return storage nodes alerts not acknowledged + */ + PageList<Alert> findNotAcknowledgedStorageNodeAlerts(Subject subject); + + /** + * Fetches all the Storage Node related alerts. + * + * @param subject subject + * @return all storage nodes alerts + */ + PageList<Alert> findAllStorageNodeAlerts(Subject subject); }
commit 4674c9c66fef570a0abb3961205056fed3e75097 Author: Stefan Negrea snegrea@redhat.com Date: Thu Jul 11 14:58:52 2013 -0500
Update the calculation for the percentage of disk space used. The percentage now represents the amount of disk used by the Cassandra data files.
diff --git a/modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java b/modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java index dcfdc37..8995ec0 100644 --- a/modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java +++ b/modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java @@ -300,7 +300,7 @@ public class AlertDefinitionServerPluginComponent implements ServerPluginCompone AlertCondition ac = new AlertCondition(); ac.setCategory(AlertConditionCategory.THRESHOLD); ac.setComparator(">"); - ac.setThreshold(0.75D); + ac.setThreshold(0.5D);
List<Integer> measurementDefinitionIds = new ArrayList<Integer>(1); for (MeasurementDefinition d : resourceType.getMetricDefinitions()) { diff --git a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/StorageServiceComponent.java b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/StorageServiceComponent.java index 7281f9b..e5a2283 100644 --- a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/StorageServiceComponent.java +++ b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/StorageServiceComponent.java @@ -60,6 +60,8 @@ public class StorageServiceComponent extends ComplexConfigurationResourceCompone private static final String OWNERSHIP_METRIC_NAME = "Ownership"; private static final String PARTITION_DISK_USED_PERCENTAGE_METRIC_NAME = "Calculated.PartitionDiskUsedPercentage"; private static final String DATA_FILE_LOCATIONS_NAME = "AllDataFileLocations"; + private static final String LOAD_NAME = "Load"; + private Log log = LogFactory.getLog(StorageServiceComponent.class); private InetAddress host;
@@ -178,23 +180,30 @@ public class StorageServiceComponent extends ComplexConfigurationResourceCompone } break; } else if (PARTITION_DISK_USED_PERCENTAGE_METRIC_NAME.equals(request.getName())) { - EmsAttribute attribute = bean.getAttribute(DATA_FILE_LOCATIONS_NAME); - Object valueObject = attribute.refresh(); - if (valueObject instanceof String[]) { + + + EmsAttribute loadAttribute = bean.getAttribute(LOAD_NAME); + Object loadValue = loadAttribute.refresh(); + + EmsAttribute dataFileLocationAttribute = bean.getAttribute(DATA_FILE_LOCATIONS_NAME); + Object dataFileLocationValue = dataFileLocationAttribute.refresh(); + + if (loadValue != null && dataFileLocationValue != null && dataFileLocationValue instanceof String[]) { //Please visit for details: https://issues.apache.org/jira/browse/CASSANDRA-2749 //The average usage of all partitions with the data will be reported. //Cassandra selects the partition with most free space for SStable flush and compaction. - report.addData(new MeasurementDataNumeric(request, - getPartitionDiskUsedPercentage((String[]) valueObject))); + double load = Double.parseDouble(loadValue.toString()); + + report.addData(new MeasurementDataNumeric(request, getPartitionDiskUsedPercentage(load, + (String[]) dataFileLocationValue))); } } } }
- private double getPartitionDiskUsedPercentage(String[] paths) { + private double getPartitionDiskUsedPercentage(double dataSize, String[] paths) { List<String> visitedMountPoints = new ArrayList<String>(); long totalDiskSpace = 0; - long totalUsedDiskSpace = 0;
for (String path : paths) { try { @@ -202,7 +211,6 @@ public class StorageServiceComponent extends ComplexConfigurationResourceCompone if (!visitedMountPoints.contains(fileSystemInfo.getMountPoint())) { visitedMountPoints.add(fileSystemInfo.getMountPoint()); totalDiskSpace += fileSystemInfo.getFileSystemUsage().getTotal(); - totalUsedDiskSpace += fileSystemInfo.getFileSystemUsage().getUsed(); } } catch (Exception e) { log.error("Unable to determine file system usage information for data file location " + path, e); @@ -210,7 +218,7 @@ public class StorageServiceComponent extends ComplexConfigurationResourceCompone }
if (totalDiskSpace != 0) { - double rawPercentage = ((double) totalUsedDiskSpace) / ((double) totalDiskSpace); + double rawPercentage = dataSize / ((double) totalDiskSpace); return Math.round(rawPercentage * 100.0) / 100.0; }
diff --git a/modules/plugins/cassandra/src/main/resources/META-INF/rhq-plugin.xml b/modules/plugins/cassandra/src/main/resources/META-INF/rhq-plugin.xml index 3aa701b..e6bc1da 100644 --- a/modules/plugins/cassandra/src/main/resources/META-INF/rhq-plugin.xml +++ b/modules/plugins/cassandra/src/main/resources/META-INF/rhq-plugin.xml @@ -187,8 +187,8 @@ </operation>
<metric property="CurrentGenerationNumber" dataType="trait" displayType="summary" description="Current generation number"/> - <metric property="Calculated.PartitionDiskUsedPercentage" dataType="measurement" units="percentage" displayType="summary" description="Percentage of total disk space used for the partition that contains the data files. - If multiple data locations are specified then this will report the average utilization accross all the partitions."/> + <metric property="Calculated.PartitionDiskUsedPercentage" displayName="Data File Disk Used Percentage" dataType="measurement" units="percentage" displayType="summary" description="Percentage of disk space used by the data files. If multiple data locations are specified then this will report + the average utilization accross all the partitions that contain data files."/> <metric property="ExceptionCount" measurementType="trendsup" dataType="measurement" displayType="summary" description="Exception Count"/> <metric property="Initialized" dataType="trait" displayType="summary" description="Initialized"/> <metric property="Joined" dataType="trait" displayType="summary" description="Joined"/>
commit a1c2dcac50a7a0df8ab17898952e7fa6270dfc3b Author: John Sanda jsanda@redhat.com Date: Thu Jul 11 21:41:34 2013 -0400
use upgradesstables and not scrub command to regenerate sstables
Also execute a drain operation at the end to flush all memtables and commit log to ensure no data loss prior to upgrade.
diff --git a/etc/scripts/rhq48-storage-patch/rhq48-storage-patch.sh b/etc/scripts/rhq48-storage-patch/rhq48-storage-patch.sh index 8a83320..7c0b88a 100755 --- a/etc/scripts/rhq48-storage-patch/rhq48-storage-patch.sh +++ b/etc/scripts/rhq48-storage-patch/rhq48-storage-patch.sh @@ -30,24 +30,31 @@ cp $PATCH $RHQ_SERVER_DIR/rhq-storage/lib echo "Starting RHQ Storage node" $RHQ_SERVER_DIR/bin/rhqctl start --storage
+# sleep for a few seconds while Cassandra starts up +echo "Waiting for RHQ Storage Node to start up..." +sleep 3 + # run the CQL script echo "Running CQL script to disable table compression" export CQLSH_HOST=$CQLSH_HOST export CQL_PORT=$CQL_PORT $RHQ_SERVER_DIR/rhq-storage/bin/cqlsh -u rhqadmin -p rhqadmin -f ./disable_compression.cql
-# scrub all keyspaces +# rewrite all sstables echo "Rebuilding data files for system keyspace" -$RHQ_SERVER_DIR/rhq-storage/bin/nodetool -u rhqadmin -pw rhqadmin -p $JMX_PORT scrub system +$RHQ_SERVER_DIR/rhq-storage/bin/nodetool -u rhqadmin -pw rhqadmin -p $JMX_PORT upgradesstables --include-all-sstables system
echo "Rebuilding data files for system_traces keyspace" -$RHQ_SERVER_DIR/rhq-storage/bin/nodetool -u rhqadmin -pw rhqadmin -p $JMX_PORT scrub system_traces +$RHQ_SERVER_DIR/rhq-storage/bin/nodetool -u rhqadmin -pw rhqadmin -p $JMX_PORT upgradesstables --include-all-sstables system_traces
echo "Rebuilding data files for system_auth keyspace" -$RHQ_SERVER_DIR/rhq-storage/bin/nodetool -u rhqadmin -pw rhqadmin -p $JMX_PORT scrub system_auth +$RHQ_SERVER_DIR/rhq-storage/bin/nodetool -u rhqadmin -pw rhqadmin -p $JMX_PORT upgradesstables --include-all-sstables system_auth
echo "Rebuilding data files for rhq keyspace" -$RHQ_SERVER_DIR/rhq-storage/bin/nodetool -u rhqadmin -pw rhqadmin -p $JMX_PORT scrub rhq +$RHQ_SERVER_DIR/rhq-storage/bin/nodetool -u rhqadmin -pw rhqadmin -p $JMX_PORT upgradesstables --include-all-sstables rhq + +# flush memtables and commit log to ensure no data loss prior to upgrade +$RHQ_SERVER_DIR/rhq-storage/bin/nodetool -u rhqadmin -pw rhqadmin -p $JMX_PORT drain
echo "Shutting down the RHQ Storage node" $RHQ_SERVER_DIR/bin/rhqctl stop
commit 957781e62543ff5023c4b59567ee76c6ab2a9d9a Author: John Mazzitelli mazz@redhat.com Date: Thu Jul 11 17:14:48 2013 -0400
BZ 983552 shorten the deployment names of the ejb3 jar and the startup subsystem
diff --git a/etc/agent-versioning/build.xml b/etc/agent-versioning/build.xml index d6c18a1..7a60e51 100644 --- a/etc/agent-versioning/build.xml +++ b/etc/agent-versioning/build.xml @@ -112,7 +112,7 @@ ant -Dserver.home.dir=/my/server/rhq-agent -Dnew.version=0.0.0.GA -Dnew.build=0 <target name="init-server"> <property name="env.RHQ_SERVER_HOME" location="dev-container"/> <property name="server.home.dir" location="${env.RHQ_SERVER_HOME}"/> - <property name="server.download.dir" location="${server.home.dir}/modules/org/rhq/rhq-enterprise-server-startup-subsystem/main/deployments/rhq.ear/rhq-downloads/rhq-agent"/> + <property name="server.download.dir" location="${server.home.dir}/modules/org/rhq/server-startup/main/deployments/rhq.ear/rhq-downloads/rhq-agent"/> <available property="_server.download.dir.exists" file="${server.download.dir}"/> <fail unless="_server.download.dir.exists" message="Cannot find the server download directory at ${server.download.dir}"/>
diff --git a/modules/enterprise/gui/coregui/src/main/webapp/WEB-INF/web.xml b/modules/enterprise/gui/coregui/src/main/webapp/WEB-INF/web.xml index 6d17177..481073e 100644 --- a/modules/enterprise/gui/coregui/src/main/webapp/WEB-INF/web.xml +++ b/modules/enterprise/gui/coregui/src/main/webapp/WEB-INF/web.xml @@ -1,5 +1,5 @@ <?xml version="1.0"?> - + <web-app xmlns="http://java.sun.com/xml/ns/javaee" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://java.sun.com/xml/ns/javaee http://java.sun.com/xml/ns/javaee/web-app_2_5.xsd" @@ -9,7 +9,7 @@
<context-param> <param-name>resteasy.jndi.resources</param-name> - <param-value>java:app/rhq-enterprise-server-ejb3/InventorySummaryHandler,java:app/rhq-enterprise-server-ejb3/AlertDefinitionHandler,java:app/rhq-enterprise-server-ejb3/ConfigurationHistoryHandler,java:app/rhq-enterprise-server-ejb3/RecentAlertHandler,java:app/rhq-enterprise-server-ejb3/SuspectMetricHandler,java:app/rhq-enterprise-server-ejb3/DriftComplianceHandler,java:app/rhq-enterprise-server-ejb3/RecentOperationsHandler,java:app/rhq-enterprise-server-ejb3/RecentDriftHandler,java:app/rhq-enterprise-server-ejb3/PlatformUtilizationHandler</param-value> + <param-value>java:app/rhq-server/InventorySummaryHandler,java:app/rhq-server/AlertDefinitionHandler,java:app/rhq-server/ConfigurationHistoryHandler,java:app/rhq-server/RecentAlertHandler,java:app/rhq-server/SuspectMetricHandler,java:app/rhq-server/DriftComplianceHandler,java:app/rhq-server/RecentOperationsHandler,java:app/rhq-server/RecentDriftHandler,java:app/rhq-server/PlatformUtilizationHandler</param-value> <!--<description>List of jndi names of EJBs local interfaces that define REST stuff</description>--> </context-param> <context-param> @@ -192,7 +192,7 @@ <servlet> <servlet-name>org.rhq.enterprise.gui.coregui.CoreGUI DriftGWTService</servlet-name> <servlet-class>org.rhq.enterprise.gui.coregui.server.gwt.DriftGWTServiceImpl</servlet-class> - </servlet> + </servlet> <servlet> <servlet-name>org.rhq.enterprise.gui.coregui.CoreGUI PlatformUtilizationGWTService</servlet-name> <servlet-class>org.rhq.enterprise.gui.coregui.server.gwt.PlatformUtilizationGWTServiceImpl</servlet-class> @@ -205,7 +205,7 @@ <servlet-name>org.rhq.enterprise.gui.coregui.CoreGUI TopologyGWTService</servlet-name> <servlet-class>org.rhq.enterprise.gui.coregui.server.gwt.TopologyGWTServiceImpl</servlet-class> </servlet> - +
<servlet> <servlet-name>Resteasy</servlet-name> @@ -373,7 +373,7 @@ <servlet-mapping> <servlet-name>org.rhq.enterprise.gui.coregui.CoreGUI DriftGWTService</servlet-name> <url-pattern>/org.rhq.enterprise.gui.coregui.CoreGUI/DriftGWTService</url-pattern> - </servlet-mapping> + </servlet-mapping> <servlet-mapping> <servlet-name>org.rhq.enterprise.gui.coregui.CoreGUI PlatformUtilizationGWTService</servlet-name> <url-pattern>/org.rhq.enterprise.gui.coregui.CoreGUI/PlatformUtilizationGWTService</url-pattern> @@ -398,7 +398,7 @@ </welcome-file-list>
- <!-- Uncomment the below to automatically redirect all HTTP requests to the + <!-- Uncomment the below to automatically redirect all HTTP requests to the corresponding HTTPS URLs. --> <!-- <security-constraint> diff --git a/modules/enterprise/remoting/cli/src/etc/generate-jaxb-client-types.bat b/modules/enterprise/remoting/cli/src/etc/generate-jaxb-client-types.bat index 2f197478..68e8706 100644 --- a/modules/enterprise/remoting/cli/src/etc/generate-jaxb-client-types.bat +++ b/modules/enterprise/remoting/cli/src/etc/generate-jaxb-client-types.bat @@ -1,16 +1,16 @@ rem This script should consume all the wsdls and compile the JAXB types all rem into one directory.
-call ../../../../dev-container/jbossas/bin/wsconsume.bat -k http://127.0.0.1:7080/rhq-rhq-enterprise-server-ejb3/RoleManagerBean?wsdl -p org.rhq.enterprise.server.ws +call ../../../../dev-container/jbossas/bin/wsconsume.bat -k http://127.0.0.1:7080/rhq-rhq-server/RoleManagerBean?wsdl -p org.rhq.enterprise.server.ws
-call ../../../../dev-container/jbossas/bin/wsconsume.bat -k http://127.0.0.1:7080/rhq-rhq-enterprise-server-ejb3/ContentManagerBean?wsdl -p org.rhq.enterprise.server.ws +call ../../../../dev-container/jbossas/bin/wsconsume.bat -k http://127.0.0.1:7080/rhq-rhq-server/ContentManagerBean?wsdl -p org.rhq.enterprise.server.ws
-call ../../../../dev-container/jbossas/bin/wsconsume.bat -k http://127.0.0.1:7080/rhq-rhq-enterprise-server-ejb3/SubjectManagerBean?wsdl -p org.rhq.enterprise.server.ws +call ../../../../dev-container/jbossas/bin/wsconsume.bat -k http://127.0.0.1:7080/rhq-rhq-server/SubjectManagerBean?wsdl -p org.rhq.enterprise.server.ws
-call ../../../../dev-container/jbossas/bin/wsconsume.bat -k http://127.0.0.1:7080/rhq-rhq-enterprise-server-ejb3/OperationManagerBean?ws... -p org.rhq.enterprise.server.ws +call ../../../../dev-container/jbossas/bin/wsconsume.bat -k http://127.0.0.1:7080/rhq-rhq-server/OperationManagerBean?wsdl -p org.rhq.enterprise.server.ws
-call ../../../../dev-container/jbossas/bin/wsconsume.bat -k http://127.0.0.1:7080/rhq-rhq-enterprise-server-ejb3/RepoManagerBean?wsdl -p org.rhq.enterprise.server.ws +call ../../../../dev-container/jbossas/bin/wsconsume.bat -k http://127.0.0.1:7080/rhq-rhq-server/RepoManagerBean?wsdl -p org.rhq.enterprise.server.ws
-call ../../../../dev-container/jbossas/bin/wsconsume.bat -k http://127.0.0.1:7080/rhq-rhq-enterprise-server-ejb3/ConfigurationManagerBea... -p org.rhq.enterprise.server.ws +call ../../../../dev-container/jbossas/bin/wsconsume.bat -k http://127.0.0.1:7080/rhq-rhq-server/ConfigurationManagerBean?wsdl -p org.rhq.enterprise.server.ws
-call ../../../../dev-container/jbossas/bin/wsconsume.bat -k http://127.0.0.1:7080/rhq-rhq-enterprise-server-ejb3/ResourceManagerBean?wsd... -p org.rhq.enterprise.server.ws \ No newline at end of file +call ../../../../dev-container/jbossas/bin/wsconsume.bat -k http://127.0.0.1:7080/rhq-rhq-server/ResourceManagerBean?wsdl -p org.rhq.enterprise.server.ws \ No newline at end of file diff --git a/modules/enterprise/remoting/cli/src/etc/generate-jaxb-client-types.sh b/modules/enterprise/remoting/cli/src/etc/generate-jaxb-client-types.sh index e229fb4..f83293b 100644 --- a/modules/enterprise/remoting/cli/src/etc/generate-jaxb-client-types.sh +++ b/modules/enterprise/remoting/cli/src/etc/generate-jaxb-client-types.sh @@ -3,16 +3,16 @@ # This script should consume all the wsdls and compile the JAXB types all # into one directory.
-../../../../dev-container/jbossas/bin/wsconsume.sh -k http://localhost:7080/rhq-rhq-enterprise-server-ejb3/RoleManagerBean?wsdl -p org.rhq.enterprise.server.ws +../../../../dev-container/jbossas/bin/wsconsume.sh -k http://localhost:7080/rhq-rhq-server/RoleManagerBean?wsdl -p org.rhq.enterprise.server.ws
-../../../../dev-container/jbossas/bin/wsconsume.sh -k http://localhost:7080/rhq-rhq-enterprise-server-ejb3/ContentManagerBean?wsdl -p org.rhq.enterprise.server.ws +../../../../dev-container/jbossas/bin/wsconsume.sh -k http://localhost:7080/rhq-rhq-server/ContentManagerBean?wsdl -p org.rhq.enterprise.server.ws
-../../../../dev-container/jbossas/bin/wsconsume.sh -k http://localhost:7080/rhq-rhq-enterprise-server-ejb3/SubjectManagerBean?wsdl -p org.rhq.enterprise.server.ws +../../../../dev-container/jbossas/bin/wsconsume.sh -k http://localhost:7080/rhq-rhq-server/SubjectManagerBean?wsdl -p org.rhq.enterprise.server.ws
-../../../../dev-container/jbossas/bin/wsconsume.sh -k http://localhost:7080/rhq-rhq-enterprise-server-ejb3/OperationManagerBean?ws... -p org.rhq.enterprise.server.ws +../../../../dev-container/jbossas/bin/wsconsume.sh -k http://localhost:7080/rhq-rhq-server/OperationManagerBean?wsdl -p org.rhq.enterprise.server.ws
-../../../../dev-container/jbossas/bin/wsconsume.sh -k http://localhost:7080/rhq-rhq-enterprise-server-ejb3/RepoManagerBean?wsdl -p org.rhq.enterprise.server.ws +../../../../dev-container/jbossas/bin/wsconsume.sh -k http://localhost:7080/rhq-rhq-server/RepoManagerBean?wsdl -p org.rhq.enterprise.server.ws
-../../../../dev-container/jbossas/bin/wsconsume.sh -k http://localhost:7080/rhq-rhq-enterprise-server-ejb3/ConfigurationManagerBea... -p org.rhq.enterprise.server.ws +../../../../dev-container/jbossas/bin/wsconsume.sh -k http://localhost:7080/rhq-rhq-server/ConfigurationManagerBean?wsdl -p org.rhq.enterprise.server.ws
-../../../../dev-container/jbossas/bin/wsconsume.sh -k http://localhost:7080/rhq-rhq-enterprise-server-ejb3/ResourceManagerBean?wsd... -p org.rhq.enterprise.server.ws \ No newline at end of file +../../../../dev-container/jbossas/bin/wsconsume.sh -k http://localhost:7080/rhq-rhq-server/ResourceManagerBean?wsdl -p org.rhq.enterprise.server.ws \ No newline at end of file diff --git a/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml b/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml index bf06886..a81b6cd 100644 --- a/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml +++ b/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml @@ -80,7 +80,7 @@ <property name="jboss.modules.dir" location="${project.build.outputDirectory}/modules" /> <mkdir dir="${jboss.modules.dir}" />
- <property name="rhq.ear.dir" location="${jboss.modules.dir}/org/rhq/rhq-enterprise-server-startup-subsystem/main/deployments/${rhq.earName}" /> + <property name="rhq.ear.dir" location="${jboss.modules.dir}/org/rhq/server-startup/main/deployments/${rhq.earName}" />
<condition property="use-postgres" value="true"> <contains string="${rhq.server.database.type-mapping}" substring="postgres" casesensitive="false" /> @@ -164,7 +164,7 @@ <fail unless="jboss.home.is.valid" message="${jboss.home} does not contain a valid JBossAS installation - aborting!" />
<echo>Adding main EAR app module to ${jboss.modules.dir} ...</echo> - <unzip src="${settings.localRepository}/org/rhq/rhq-enterprise-server-startup-subsystem/${project.version}/rhq-enterprise-server-startup-subsystem-${project.version}.zip" + <unzip src="${settings.localRepository}/org/rhq/server-startup/${project.version}/server-startup-${project.version}.zip" dest="${jboss.modules.dir}" />
<echo>Adding installer module to ${jboss.modules.dir} ...</echo> diff --git a/modules/enterprise/server/appserver/src/main/scripts/storage_setup.groovy b/modules/enterprise/server/appserver/src/main/scripts/storage_setup.groovy index ccb90b9..857c268 100644 --- a/modules/enterprise/server/appserver/src/main/scripts/storage_setup.groovy +++ b/modules/enterprise/server/appserver/src/main/scripts/storage_setup.groovy @@ -168,11 +168,11 @@ def prepareModulesDir(basedir) { ) ant.symlink(link: "$modulesDir/org/rhq/rhq-server-control", resource: "$defaultModulesDir/org/rhq/rhq-server-control")
- def downloadsDir = "$modulesDir/org/rhq/rhq-enterprise-server-startup-subsystem/main/deployments/rhq.ear/rhq-downloads" + def downloadsDir = "$modulesDir/org/rhq/server-startup/main/deployments/rhq.ear/rhq-downloads" ant.mkdir(dir: downloadsDir) ant.symlink( link: "$downloadsDir/rhq-agent", - resource: "$defaultModulesDir/org/rhq/rhq-enterprise-server-startup-subsystem/main/deployments/rhq.ear/rhq-downloads/rhq-agent" + resource: "$defaultModulesDir/org/rhq/server-startup/main/deployments/rhq.ear/rhq-downloads/rhq-agent" ) }
diff --git a/modules/enterprise/server/client-api/src/test/java/org/rhq/enterprise/client/test/LocalClientTest.java b/modules/enterprise/server/client-api/src/test/java/org/rhq/enterprise/client/test/LocalClientTest.java index 6e2fcf6..f350843 100644 --- a/modules/enterprise/server/client-api/src/test/java/org/rhq/enterprise/client/test/LocalClientTest.java +++ b/modules/enterprise/server/client-api/src/test/java/org/rhq/enterprise/client/test/LocalClientTest.java @@ -47,7 +47,7 @@ import org.rhq.enterprise.client.LocalClient; import org.rhq.test.JMockTest;
/** - * + * * * @author Lukas Krejci */ @@ -59,29 +59,29 @@ public class LocalClientTest extends JMockTest { return CONTEXT_MOCK_FOR_TEST; } } - + public static Context CONTEXT_MOCK_FOR_TEST = null; - + @BeforeClass public void setUpNaming() { System.setProperty(Context.INITIAL_CONTEXT_FACTORY, FakeContextFactory.class.getName()); } - + /** * Needs to be called from within a test method so that the "context" variable is available. - * + * * @throws NamingException */ private void setupFakeJndiLookup() throws NamingException { CONTEXT_MOCK_FOR_TEST = context.mock(Context.class); - + context.checking(new Expectations() {{ allowing(CONTEXT_MOCK_FOR_TEST).lookup(with(any(String.class))); will(new CustomAction("Fake JNDI lookup") {
@Override public Object invoke(Invocation invocation) throws Throwable { - // JNDI name format like: java:global/rhq/rhq-enterprise-server-ejb3/SystemManagerBean!org.rhq.enterprise.server.system.SystemManagerLocal + // JNDI name format like: java:global/rhq/rhq-server/SystemManagerBean!org.rhq.enterprise.server.system.SystemManagerLocal String jndiName = (String) invocation.getParameter(0); String beanName = jndiName.substring(0,jndiName.indexOf('!')); beanName = beanName.substring(beanName.lastIndexOf('/') + 1); @@ -105,7 +105,7 @@ public class LocalClientTest extends JMockTest { }); } }); - + allowing(CONTEXT_MOCK_FOR_TEST).close(); }}); } @@ -130,15 +130,15 @@ public class LocalClientTest extends JMockTest { @Test public void testResilienceAgainstContextClassloaders() throws Exception { setupFakeJndiLookup(); - + ClassLoader origCl = Thread.currentThread().getContextClassLoader(); try { ClassLoader differentCl = new URLClassLoader(new URL[0], getClass().getClassLoader()); - + Thread.currentThread().setContextClassLoader(differentCl);
LocalClient lc = new LocalClient(null); - + //this call creates the proxy and is theoretically prone to the context classloader Object am = lc.getScriptingAPI().get(RhqManager.AlertManager);
diff --git a/modules/enterprise/server/ear/pom.xml b/modules/enterprise/server/ear/pom.xml index 0e4bc83..e2a4cdb 100644 --- a/modules/enterprise/server/ear/pom.xml +++ b/modules/enterprise/server/ear/pom.xml @@ -238,7 +238,7 @@ <ejbModule> <groupId>${project.groupId}</groupId> <artifactId>rhq-enterprise-server</artifactId> - <bundleFileName>rhq-enterprise-server-ejb3.jar</bundleFileName> + <bundleFileName>rhq-server.jar</bundleFileName> </ejbModule>
<ejbClientModule> @@ -832,7 +832,7 @@ <rhq.rootDir>../../../..</rhq.rootDir> <rhq.containerDir>${rhq.rootDir}/${rhq.devContainerServerPath}</rhq.containerDir> <rhq.deploymentName>${project.build.finalName}.ear</rhq.deploymentName> - <rhq.deploymentDir>${rhq.containerDir}/modules/org/rhq/rhq-enterprise-server-startup-subsystem/main/deployments/${rhq.deploymentName}</rhq.deploymentDir> + <rhq.deploymentDir>${rhq.containerDir}/modules/org/rhq/server-startup/main/deployments/${rhq.deploymentName}</rhq.deploymentDir> </properties>
<build> diff --git a/modules/enterprise/server/ear/src/main/application/META-INF/jboss-deployment-structure.xml b/modules/enterprise/server/ear/src/main/application/META-INF/jboss-deployment-structure.xml index d0a665d..80a648f 100644 --- a/modules/enterprise/server/ear/src/main/application/META-INF/jboss-deployment-structure.xml +++ b/modules/enterprise/server/ear/src/main/application/META-INF/jboss-deployment-structure.xml @@ -33,7 +33,7 @@ </sub-deployment>
<!-- This corresponds to the Server EJB3 JAR --> - <sub-deployment name="rhq-enterprise-server-ejb3.jar"> + <sub-deployment name="rhq-server.jar"> <dependencies> <module name="org.jboss.msc" export="true" /> <module name="org.jboss.as.controller" export="true" /> diff --git a/modules/enterprise/server/installer/src/main/java/org/rhq/enterprise/server/installer/InstallerServiceImpl.java b/modules/enterprise/server/installer/src/main/java/org/rhq/enterprise/server/installer/InstallerServiceImpl.java index 284ddbd..160b5dd 100644 --- a/modules/enterprise/server/installer/src/main/java/org/rhq/enterprise/server/installer/InstallerServiceImpl.java +++ b/modules/enterprise/server/installer/src/main/java/org/rhq/enterprise/server/installer/InstallerServiceImpl.java @@ -49,7 +49,7 @@ import org.rhq.enterprise.server.installer.ServerInstallUtil.SupportedDatabaseTy */ public class InstallerServiceImpl implements InstallerService {
- private static final String RHQ_EXTENSION_NAME = "org.rhq.rhq-enterprise-server-startup-subsystem"; + private static final String RHQ_EXTENSION_NAME = "org.rhq.server-startup"; private static final String RHQ_SUBSYSTEM_NAME = "rhq-startup"; private static final String EAR_NAME = "rhq.ear"; private static final String SYSPROP_PROPFILE = "rhq.server.properties-file"; diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/client/security/test/JndiAccessTest.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/client/security/test/JndiAccessTest.java index 334e768..1cdc1d5 100644 --- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/client/security/test/JndiAccessTest.java +++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/client/security/test/JndiAccessTest.java @@ -34,7 +34,7 @@ import org.rhq.enterprise.server.system.SystemManagerBean; import org.rhq.enterprise.server.util.LookupUtil;
/** - * + * * * @author Lukas Krejci */ @@ -59,21 +59,21 @@ public class JndiAccessTest extends ScriptableAbstractEJB3Test { public void testEjbsAccessibleThroughPrivilegedCode() { LookupUtil.getSubjectManager().getOverlord(); } - + public void testEjbsAccessibleThroughLocalClient() throws ScriptException, IOException { Subject overlord = LookupUtil.getSubjectManager().getOverlord(); - + ScriptEngine engine = getEngine(overlord); - + engine.eval("SubjectManager.getSubjectByName('rhqadmin');"); } - + public void testLocalEjbsInaccessibleThroughJndiLookup() throws ScriptException, IOException { Subject overlord = LookupUtil.getSubjectManager().getOverlord(); - + ScriptEngine engine = getEngine(overlord); - - String jndiName = "java:global/rhq/rhq-enterprise-server-ejb3/" + + String jndiName = "java:global/rhq/rhq-server/" + SystemManagerBean.class.getSimpleName() + "!" + SystemManagerBean.class.getName().replace("Bean", "Local"); @@ -85,19 +85,19 @@ public class JndiAccessTest extends ScriptableAbstractEJB3Test { + jndiName + "');\n" + "systemManager.isDebugModeEnabled();"); - + Assert.fail("The script shouldn't have been able to call local SLSB method."); } catch (ScriptException e) { checkIsDesiredSecurityException(e); } } - + public void testLocalEjbsInaccessibleThroughJndiLookupWithCustomUrlPackages() throws ScriptException, IOException { Subject overlord = LookupUtil.getSubjectManager().getOverlord();
ScriptEngine engine = getEngine(overlord);
- String jndiName = "java:global/rhq/rhq-enterprise-server-ejb3/" + String jndiName = "java:global/rhq/rhq-server/" + SystemManagerBean.class.getSimpleName() + "!" + SystemManagerBean.class.getName().replace("Bean", "Local"); @@ -122,10 +122,10 @@ public class JndiAccessTest extends ScriptableAbstractEJB3Test {
public void testRemoteEjbsInaccessibleThroughJndiLookup() throws ScriptException, IOException { Subject overlord = LookupUtil.getSubjectManager().getOverlord(); - + ScriptEngine engine = getEngine(overlord);
- String jndiName = "java:global/rhq/rhq-enterprise-server-ejb3/" + String jndiName = "java:global/rhq/rhq-server/" + SystemManagerBean.class.getSimpleName() + "!" + SystemManagerBean.class.getName().replace("Bean", "Remote"); @@ -137,28 +137,28 @@ public class JndiAccessTest extends ScriptableAbstractEJB3Test { + jndiName + "');\n" + "systemManager.getSystemSettings(subject);"); - + Assert.fail("The script shouldn't have been able to call remote SLSB method directly."); } catch (ScriptException e) { checkIsDesiredSecurityException(e); } } - + public void testScriptCantUseSessionManagerMethods() throws Exception {
- Subject overlord = LookupUtil.getSubjectManager().getOverlord(); - + Subject overlord = LookupUtil.getSubjectManager().getOverlord(); + final ScriptEngine engine = getEngine(overlord); - + class G { private String sessionManager = "" + "org.rhq.enterprise.server.auth.SessionManager.getInstance()."; - + public void testInvoke(String methodCall) throws ScriptException { String code = sessionManager + methodCall;
try { - engine.eval(code); + engine.eval(code); Assert.fail("The script shouldn't have been able to call a method on a SessionManager: " + methodCall); } catch (ScriptException e) { checkIsDesiredSecurityException(e); @@ -166,7 +166,7 @@ public class JndiAccessTest extends ScriptableAbstractEJB3Test { } }; G manager = new G(); - + manager.testInvoke("getLastAccess(0);"); manager.testInvoke("getOverlord()"); manager.testInvoke("getSubject(2);"); @@ -175,27 +175,27 @@ public class JndiAccessTest extends ScriptableAbstractEJB3Test { manager.testInvoke("put(new org.rhq.core.domain.auth.Subject());"); manager.testInvoke("put(new org.rhq.core.domain.auth.Subject(), 0);"); } - + public void testScriptCantObtainRawJDBCConnectionsWithoutCredentials() throws Exception { Subject overlord = LookupUtil.getSubjectManager().getOverlord(); - + ScriptEngine engine = getEngine(overlord); - + try { engine.eval("" + "var ctx = new javax.naming.InitialContext();\n" + "var datasource = ctx.lookup('" + RHQConstants.DATASOURCE_JNDI_NAME + "');\n" + "con = datasource.getConnection();"); - + Assert.fail("The script shouldn't have been able to obtain the datasource from the JNDI."); } catch (ScriptException e) { checkIsDesiredSecurityException(e); } } - + public void testScriptCantUseEntityManager() throws Exception { - Subject overlord = LookupUtil.getSubjectManager().getOverlord(); - + Subject overlord = LookupUtil.getSubjectManager().getOverlord(); + ScriptEngine engine = getEngine(overlord);
try { @@ -204,12 +204,12 @@ public class JndiAccessTest extends ScriptableAbstractEJB3Test { + "var entityManagerFactory = ctx.lookup('" + RHQConstants.ENTITY_MANAGER_JNDI_NAME + "');\n" + "var entityManager = entityManagerFactory.createEntityManager();\n" + "entityManager.find(java.lang.Class.forName('org.rhq.core.domain.resource.Resource'), java.lang.Integer.valueOf('10001'));"); - + Assert.fail("The script shouldn't have been able to use the EntityManager."); } catch (ScriptException e) { checkIsDesiredSecurityException(e); - } - + } + //try harder with manually specifying the initial context factory try { engine.eval("" @@ -220,20 +220,20 @@ public class JndiAccessTest extends ScriptableAbstractEJB3Test { + "var entityManagerFactory = ctx.lookup('" + RHQConstants.ENTITY_MANAGER_JNDI_NAME + "');\n" + "var entityManager = entityManagerFactory.createEntityManager();\n" + "entityManager.find(java.lang.Class.forName('org.rhq.core.domain.resource.Resource'), java.lang.Integer.valueOf('10001'));"); - + Assert.fail("The script shouldn't have been able to use the EntityManager even using custom initial context factory."); } catch (ScriptException e) { checkIsDesiredSecurityException(e); - } + } }
public void testProxyFactoryWorksWithSecuredScriptEngine() throws Exception { - Subject overlord = LookupUtil.getSubjectManager().getOverlord(); - + Subject overlord = LookupUtil.getSubjectManager().getOverlord(); + ScriptEngine engine = getEngine(overlord); - + try { - engine.eval("var resource = ProxyFactory.getResource(10001);"); + engine.eval("var resource = ProxyFactory.getResource(10001);"); } catch (ScriptException e) { //if the script fails (there is no resource with ID 10001) //it should not be because of an access control exception @@ -243,10 +243,10 @@ public class JndiAccessTest extends ScriptableAbstractEJB3Test {
//THIS IS A NEW REQUIREMENT THAT DOESN'T CURRENTLY WORK... // public void testInitialContextFactoryBuilderNotReplaceableUsingScripts() throws Exception { - // Subject overlord = LookupUtil.getSubjectManager().getOverlord(); - // + // Subject overlord = LookupUtil.getSubjectManager().getOverlord(); + // // ScriptEngine engine = getEngine(overlord); - // + // // try { // engine.eval("" // + "var mgrCls = java.lang.Class.forName('javax.naming.spi.NamingManager');\n" @@ -262,11 +262,11 @@ public class JndiAccessTest extends ScriptableAbstractEJB3Test { // NamingHack.bruteForceInitialContextFactoryBuilder(); // } // } - + private static void checkIsDesiredSecurityException(ScriptException e) { String message = e.getMessage(); String permissionTrace = "org.rhq.allow.server.internals.access"; - + if (!message.contains(permissionTrace)) { Assert .fail( @@ -278,7 +278,7 @@ public class JndiAccessTest extends ScriptableAbstractEJB3Test { private static void checkIsNotASecurityException(ScriptException e) { String message = e.getMessage(); String permissionTrace = "org.rhq.allow.server.internals.access"; - + if (message.contains(permissionTrace)) { Assert .fail( diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/AbstractEJB3Test.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/AbstractEJB3Test.java index 9ddba97..4dfbd46 100644 --- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/AbstractEJB3Test.java +++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/AbstractEJB3Test.java @@ -124,20 +124,20 @@ public abstract class AbstractEJB3Test extends Arquillian { // we need a way to deploy dependent ears needed to support the server/jar classes. But // building this jar up (as is done in core/domain) was too difficult due to the huge number // of dependencies. It was easier, and made sense, to use the already built rhq.ear - // and run as true integration tests. We do thin rhq.ear by removing all of the WAR files, and + // and run as true integration tests. We do thin rhq.ear by removing all of the WAR files, and // deploy only the EJB jars, and the services, which are really the objects under test.
@Deployment protected static EnterpriseArchive getBaseDeployment() {
- // Ensure the test working dir exists + // Ensure the test working dir exists tmpdirRoot.mkdirs();
// deploy the test classes in their own jar, under /lib JavaArchive testClassesJar = ShrinkWrap.create(JavaArchive.class, "test-classes.jar"); testClassesJar = addClasses(testClassesJar, new File("target/test-classes/org"), null);
- // add non itests-2 RHQ classes used by the test classes, as well as needed resources + // add non itests-2 RHQ classes used by the test classes, as well as needed resources testClassesJar.addClass(ThrowableUtil.class); testClassesJar.addClass(MessageDigestGenerator.class); testClassesJar.addClass(StreamUtil.class); @@ -340,22 +340,22 @@ public abstract class AbstractEJB3Test extends Arquillian { .withoutTransitivity().asSingle(EnterpriseArchive.class); // merge rhq.ear into testEar but include only the EJB jars and the supporting libraries. Note that we // don't include the services sar because tests are responsible for prepare/unprepare of all required services, - // we don't want the production services performing any unexpected work. + // we don't want the production services performing any unexpected work. testEar = testEar.merge(rhqEar, Filters.include("/lib.*|/rhq.*ejb3\.jar.*")); // remove startup beans and shutdown listeners, we don't want this to be a full server deployment. The tests // start/stop what they need, typically with test services or mocks. testEar.delete(ArchivePaths - .create("/rhq-enterprise-server-ejb3.jar/org/rhq/enterprise/server/core/StartupBean.class")); + .create("/rhq-server.jar/org/rhq/enterprise/server/core/StartupBean.class")); testEar.delete(ArchivePaths - .create("/rhq-enterprise-server-ejb3.jar/org/rhq/enterprise/server/core/StartupBean$1.class")); + .create("/rhq-server.jar/org/rhq/enterprise/server/core/StartupBean$1.class")); testEar.delete(ArchivePaths - .create("/rhq-enterprise-server-ejb3.jar/org/rhq/enterprise/server/core/ShutdownListener.class")); + .create("/rhq-server.jar/org/rhq/enterprise/server/core/ShutdownListener.class"));
//replace the above startup beans with stripped down versions testEar.add(new ClassAsset(StrippedDownStartupBean.class), ArchivePaths - .create("/rhq-enterprise-server-ejb3.jar/org/rhq/enterprise/server/test/StrippedDownStartupBean.class")); + .create("/rhq-server.jar/org/rhq/enterprise/server/test/StrippedDownStartupBean.class")); testEar.add(new ClassAsset(StrippedDownStartupBeanPreparation.class), ArchivePaths - .create("/rhq-enterprise-server-ejb3.jar/org/rhq/enterprise/server/test/" + .create("/rhq-server.jar/org/rhq/enterprise/server/test/" + "StrippedDownStartupBeanPreparation.class")); testEar.addAsManifestResource(new ByteArrayAsset("<beans/>".getBytes()), ArchivePaths.create("beans.xml"));
@@ -368,7 +368,7 @@ public abstract class AbstractEJB3Test extends Arquillian { // add the application xml declaring the ejb jars testEar.setApplicationXML("application.xml");
- // add additional 3rd party dependent jars needed to support test classes + // add additional 3rd party dependent jars needed to support test classes Collection thirdPartyDeps = new ArrayList(); thirdPartyDeps.add("joda-time:joda-time"); thirdPartyDeps.add("org.jboss.shrinkwrap:shrinkwrap-impl-base"); @@ -496,15 +496,15 @@ public abstract class AbstractEJB3Test extends Arquillian { /** * <p>DO NOT OVERRIDE.</p> * <p>DO NOT DEFINE AN @BeforeMethod</p> - * + * * Instead, override {@link #beforeMethod()}. If you must override, for example, if you * need to use special attributes on your annotation, then ensure you protect the code with * and {@link #inContainer()} call. */ @BeforeMethod protected void __beforeMethod(Method method) throws Throwable { - // Note that Arquillian calls the testng BeforeMethod twice (as of 1.0.2.Final, once - // out of container and once in container. In general the expectation is to execute it + // Note that Arquillian calls the testng BeforeMethod twice (as of 1.0.2.Final, once + // out of container and once in container. In general the expectation is to execute it // one time, and doing it in container allows for the expected injections and context. if (inContainer()) { try { @@ -539,7 +539,7 @@ public abstract class AbstractEJB3Test extends Arquillian { /** * <p>DO NOT OVERRIDE.</p> * <p>DO NOT DEFINE AN @AfterMethod</p> - * + * * Instead, override {@link #afterMethod()}. */ @AfterMethod(alwaysRun = true) @@ -559,33 +559,33 @@ public abstract class AbstractEJB3Test extends Arquillian { }
protected boolean inContainer() { - // If the injection is done we're running in the container. + // If the injection is done we're running in the container. return (null != initialContext); }
/** - * Override Point! Do not implement a @BeforeMethod, instead override this method. + * Override Point! Do not implement a @BeforeMethod, instead override this method. */ protected void beforeMethod() throws Exception { // do nothing if we're not overridden }
/** - * Override Point! Do not implement a @BeforeMethod, instead override this method. + * Override Point! Do not implement a @BeforeMethod, instead override this method. */ protected void beforeMethod(Method method) throws Exception { // do nothing if we're not overridden }
/** - * Override Point! Do not implement an @AfterMethod, instead override this method. note: alwaysRun=true + * Override Point! Do not implement an @AfterMethod, instead override this method. note: alwaysRun=true */ protected void afterMethod() throws Exception { // do nothing if we're not overridden }
/** - * Override Point! Do not implement an @AfterMethod, instead override this method. note: alwaysRun=true + * Override Point! Do not implement an @AfterMethod, instead override this method. note: alwaysRun=true */ protected void afterMethod(ITestResult result, Method meth) throws Exception { // do nothing if we're not overridden @@ -706,7 +706,7 @@ public abstract class AbstractEJB3Test extends Arquillian { }
/* The old AbstractEJB3Test impl extended AssertJUnit. Continue to support the used methods - * with various call-thru methods. + * with various call-thru methods. */
protected void assertNotNull(Object o) { @@ -1020,7 +1020,7 @@ public abstract class AbstractEJB3Test extends Arquillian { scanner.setAgentPluginDir(pluginDirPath); // we don't want to scan for these scanner.setServerPluginDir("ignore no plugins here"); // we don't want to scan for these scanner.setUserPluginDir("ignore no plugins here"); // we don't want to scan for these - scanner.setScanPeriod("9999999"); // we want to manually scan - don't allow for auto-scan to happen + scanner.setScanPeriod("9999999"); // we want to manually scan - don't allow for auto-scan to happen
return preparePluginScannerService(scanner); } @@ -1028,7 +1028,7 @@ public abstract class AbstractEJB3Test extends Arquillian { /** * Note that the standard plugin scanner service is deployed automatically with the test rhq ear, * this is only necessary if you want a custom service. - * + * * @param scannerService */ public PluginDeploymentScannerMBean preparePluginScannerService(PluginDeploymentScannerMBean scannerService) { @@ -1072,7 +1072,7 @@ public abstract class AbstractEJB3Test extends Arquillian { * annotations by creating sessions for different users with different permissions. * * @param subject a JON subject - * @return the session activated subject, a copy of the subject passed in. + * @return the session activated subject, a copy of the subject passed in. */ public Subject createSession(Subject subject) { return SessionManager.getInstance().put(subject); @@ -1086,9 +1086,9 @@ public abstract class AbstractEJB3Test extends Arquillian { * A utility for writing out various objects that need to be persisted for use between * tests. Arquillian (1.0.2) basically "new"s the testng test class on each test, so instance * variables can not be used between tests. Instead, the db or this mechanism needs to be used. - * + * * The file will be placed in the standard temp dir. If it already exists it will be replaced. - * + * * @param filename Do not include the directory. The value will be prepended with the class name. * @param objects * @throws Exception @@ -1106,9 +1106,9 @@ public abstract class AbstractEJB3Test extends Arquillian { /** * A utility for reading in objects written with {@link #writeObjects(String, Object...). They are * placed in the result List in the same order they were written. - * + * * @param filename The same filename used in the write. Do not include the directory. - * @param numObjects the number of objects to read out. Can be less than total written, not greater. + * @param numObjects the number of objects to read out. Can be less than total written, not greater. * @throws Exception */ protected List<Object> readObjects(String filename, int numObjects) throws Exception { @@ -1130,16 +1130,16 @@ public abstract class AbstractEJB3Test extends Arquillian {
/** * A utility for cleaning up files created with {@link #writeObjects(String, Object...). - * - * @param filename The same filename used in the write. Do not include the directory. - * @return true if deleted, false otherwise. + * + * @param filename The same filename used in the write. Do not include the directory. + * @return true if deleted, false otherwise. */ protected boolean deleteObjects(String filename) { File file = new File(getTempDir(), filename); return file.delete(); }
- /** + /** * @return a temp directory for testing that is specific to this test class. Specifically tmpdirRoot/this.getClass().getSimpleName(). */ public File getTempDir() { diff --git a/modules/enterprise/server/itests-2/src/test/resources/application.xml b/modules/enterprise/server/itests-2/src/test/resources/application.xml index 06b81e6..e28f755 100644 --- a/modules/enterprise/server/itests-2/src/test/resources/application.xml +++ b/modules/enterprise/server/itests-2/src/test/resources/application.xml @@ -6,6 +6,6 @@ <ejb>rhq-core-domain-ejb3.jar</ejb> </module> <module> - <ejb>rhq-enterprise-server-ejb3.jar</ejb> - </module> + <ejb>rhq-server.jar</ejb> + </module> </application> \ No newline at end of file diff --git a/modules/enterprise/server/itests-2/src/test/resources/jboss-deployment-structure.xml b/modules/enterprise/server/itests-2/src/test/resources/jboss-deployment-structure.xml index 48a80f2..1b5ceb9 100644 --- a/modules/enterprise/server/itests-2/src/test/resources/jboss-deployment-structure.xml +++ b/modules/enterprise/server/itests-2/src/test/resources/jboss-deployment-structure.xml @@ -20,7 +20,7 @@ </deployment>
<!-- This corresponds to the Server EJB3 JAR --> - <sub-deployment name="rhq-enterprise-server-ejb3.jar"> + <sub-deployment name="rhq-server.jar"> <dependencies> <module name="org.jboss.msc" export="true" /> <module name="org.jboss.as.controller" export="true" /> diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/remote/RemoteSafeInvocationHandler.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/remote/RemoteSafeInvocationHandler.java index 3a435ef..2602cf8 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/remote/RemoteSafeInvocationHandler.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/remote/RemoteSafeInvocationHandler.java @@ -44,8 +44,8 @@ import org.rhq.enterprise.server.safeinvoker.HibernateDetachUtility; * Although, we execute only locals to bypass the serialization performed by a remote invocation. Even * though this handler is co-located, for remotes, remoting will serialize the return data immediately. * This is bad for us because since we return domain objects we ned to scrub the data, removing - * hibernate proxies (see {@link HibernateDetachUtility}. - * + * hibernate proxies (see {@link HibernateDetachUtility}. + * * @author Greg Hinkle * @autor Jay Shaughnessy */ @@ -103,7 +103,7 @@ public class RemoteSafeInvocationHandler implements ServerInvocationHandler { Object target = ic.lookup(jndiName); Method m = target.getClass().getMethod(methodInfo[1], sig);
- // switch to the local + // switch to the local jndiName = getLocalJNDIName(remoteClass); target = ic.lookup(jndiName);
@@ -146,12 +146,12 @@ public class RemoteSafeInvocationHandler implements ServerInvocationHandler { }
private static <T> String getLocalJNDIName(Class<?> remoteClass) { - return ("java:global/rhq/rhq-enterprise-server-ejb3/" + remoteClass.getSimpleName().replace("Remote", "Bean") + return ("java:global/rhq/rhq-server/" + remoteClass.getSimpleName().replace("Remote", "Bean") + "!" + remoteClass.getName().replace("Remote", "Local")); }
private static <T> String getRemoteJNDIName(Class<?> remoteClass) { - return ("java:global/rhq/rhq-enterprise-server-ejb3/" + remoteClass.getSimpleName().replace("Remote", "Bean") + return ("java:global/rhq/rhq-server/" + remoteClass.getSimpleName().replace("Remote", "Bean") + "!" + remoteClass.getName()); }
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/util/LookupUtil.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/util/LookupUtil.java index 7b60e13..d597363 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/util/LookupUtil.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/util/LookupUtil.java @@ -482,7 +482,7 @@ public final class LookupUtil { public static TopologyManagerLocal getTopologyManager() { return lookupLocal(TopologyManagerBean.class); } - + public static StorageNodeManagerLocal getStorageNodeManager() { return lookupLocal(StorageNodeManagerBean.class); } @@ -636,10 +636,10 @@ public final class LookupUtil {
/** * Generic method to lookup an Ejb bean by name and the interface name - * + * * @param beanName the name of the EJB bean * @param interfaceName the full class name of either the remote or local interface - * + * * @return the bean accessed through specified interface */ public static Object getEjb(String beanName, String interfaceName) { @@ -656,7 +656,7 @@ public final class LookupUtil { // Private Methods
private static String getLocalJNDIName(String beanName, String interfaceName) { - return "java:global/rhq/rhq-enterprise-server-ejb3/" + beanName + "!" + interfaceName; + return "java:global/rhq/rhq-server/" + beanName + "!" + interfaceName; }
private static <T> String getLocalJNDIName(@NotNull @@ -673,7 +673,7 @@ public final class LookupUtil { */ private static <T> String getRemoteJNDIName(@NotNull Class<? extends T> beanClass) { - return ("java:global/rhq/rhq-enterprise-server-ejb3/" + beanClass.getSimpleName() + "!" + beanClass.getName() + return ("java:global/rhq/rhq-server/" + beanClass.getSimpleName() + "!" + beanClass.getName() .replace("Bean", "Remote")); }
diff --git a/modules/enterprise/server/jar/src/main/resources/server-comm-configuration-overrides.properties b/modules/enterprise/server/jar/src/main/resources/server-comm-configuration-overrides.properties index 91ccc85..f529be9 100644 --- a/modules/enterprise/server/jar/src/main/resources/server-comm-configuration-overrides.properties +++ b/modules/enterprise/server/jar/src/main/resources/server-comm-configuration-overrides.properties @@ -1,5 +1,5 @@ # location of the plugin jars and other files agents can download directly from sever -rhq.server.agent-files-directory=${rhq.server.home}/modules/org/rhq/rhq-enterprise-server-startup-subsystem/main/deployments/rhq.ear/rhq-downloads +rhq.server.agent-files-directory=${rhq.server.home}/modules/org/rhq/server-startup/main/deployments/rhq.ear/rhq-downloads # location where data files can be stored rhq.communications.data-directory=${jboss.server.data.dir} # global limit on number of concurrent incoming messages allowed diff --git a/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Install.java b/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Install.java index df07b5a..bb6aa40 100644 --- a/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Install.java +++ b/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Install.java @@ -354,7 +354,7 @@ public class Install extends AbstractInstall {
private File getAgentInstaller() { File agentDownloadDir = new File(getBaseDir(), - "modules/org/rhq/rhq-enterprise-server-startup-subsystem/main/deployments/rhq.ear/rhq-downloads/rhq-agent"); + "modules/org/rhq/server-startup/main/deployments/rhq.ear/rhq-downloads/rhq-agent"); return agentDownloadDir.listFiles(new FileFilter() { @Override public boolean accept(File file) { diff --git a/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Upgrade.java b/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Upgrade.java index 37667c6..9245baf 100644 --- a/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Upgrade.java +++ b/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Upgrade.java @@ -571,7 +571,7 @@ public class Upgrade extends AbstractInstall {
private File getFileDownload(String directory, final String fileMatch) { File downloadDir = new File(getBaseDir(), - "modules/org/rhq/rhq-enterprise-server-startup-subsystem/main/deployments/rhq.ear/rhq-downloads/" + "modules/org/rhq/server-startup/main/deployments/rhq.ear/rhq-downloads/" + directory); return downloadDir.listFiles(new FileFilter() { @Override diff --git a/modules/enterprise/server/startup-subsystem/pom.xml b/modules/enterprise/server/startup-subsystem/pom.xml index 3b5b2ba..e69df2e 100644 --- a/modules/enterprise/server/startup-subsystem/pom.xml +++ b/modules/enterprise/server/startup-subsystem/pom.xml @@ -13,7 +13,7 @@ <relativePath>../../../../pom.xml</relativePath> </parent>
- <artifactId>rhq-enterprise-server-startup-subsystem</artifactId> + <artifactId>server-startup</artifactId>
<name>RHQ Server Startup AS7 Subsystem</name>
@@ -106,7 +106,7 @@ <artifactId>jboss-as-ee</artifactId> <scope>provided</scope> </dependency> - + <!-- our startup subsystem will be deploying this --> <dependency> <groupId>org.rhq</groupId> diff --git a/pom.xml b/pom.xml index d52dcc7..26703d1 100644 --- a/pom.xml +++ b/pom.xml @@ -42,7 +42,7 @@ <rhq.defaultDevContainerPath>dev-container</rhq.defaultDevContainerPath> <rhq.devContainerServerPath>${rhq.defaultDevContainerPath}/rhq-server</rhq.devContainerServerPath> <!-- path relative to under dev container path where the EAR is deployed --> - <rhq.earDeployDir>modules/org/rhq/rhq-enterprise-server-startup-subsystem/main/deployments/${rhq.earName}</rhq.earDeployDir> + <rhq.earDeployDir>modules/org/rhq/server-startup/main/deployments/${rhq.earName}</rhq.earDeployDir> <!-- path relative to under dev container path where other things are stored --> <rhq.agentPluginDir>${rhq.earDeployDir}/rhq-downloads/rhq-plugins</rhq.agentPluginDir> <rhq.serverPluginDir>${rhq.earDeployDir}/rhq-serverplugins</rhq.serverPluginDir>
commit d3336fcd24c36f257a0702b13114b95146d44d83 Author: John Mazzitelli mazz@redhat.com Date: Thu Jul 11 14:04:39 2013 -0400
cassandra-installer uses a commons-io 2.1 API, we need to put this up front in classpath in eclipse
diff --git a/.classpath b/.classpath index f09f8ed..e5e7a21 100644 --- a/.classpath +++ b/.classpath @@ -217,6 +217,7 @@ <classpathentry kind="src" path="modules/enterprise/server/data-migration/src/test/java"/> <classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.7"/> <classpathentry exported="true" kind="var" path="M2_REPO/org/apache/httpcomponents/httpclient/4.2.3/httpclient-4.2.3.jar" sourcepath="M2_REPO/org/apache/httpcomponents/httpclient/4.2.3/httpclient-4.2.3-sources.jar"/> + <classpathentry exported="true" kind="var" path="M2_REPO/commons-io/commons-io/2.1/commons-io-2.1.jar" sourcepath="M2_REPO/commons-io/commons-io/2.1/commons-io-2.1-sources.jar"/> <classpathentry exported="true" kind="var" path="M2_REPO/org/apache/httpcomponents/httpcore/4.2.2/httpcore-4.2.2.jar" sourcepath="M2_REPO/org/apache/httpcomponents/httpcore/4.2.2/httpcore-4.2.2-sources.jar"/> <classpathentry exported="true" kind="var" path="M2_REPO/org/apache/httpcomponents/httpmime/4.2.3/httpmime-4.2.3.jar" sourcepath="M2_REPO/org/apache/httpcomponents/httpmime/4.2.3/httpmime-4.2.3-sources.jar"/> <classpathentry exported="true" kind="var" path="M2_REPO/org/jboss/integration/jboss-profileservice-spi/5.1.0.SP1/jboss-profileservice-spi-5.1.0.SP1.jar"/>
commit 794d564b16b697870ca92040f27acbcdcad63522 Author: Rémy Maucherat rmaucher@redhat.com Date: Thu Jul 11 15:33:55 2013 +0200
BZ863502: Fix OOM connecting to Tomcat instance. Patch by Thomas Segismont.
diff --git a/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatServerComponent.java b/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatServerComponent.java index f1e2c51..1b595db 100644 --- a/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatServerComponent.java +++ b/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatServerComponent.java @@ -1,24 +1,20 @@ /* * RHQ Management Platform - * Copyright (C) 2005-2010 Red Hat, Inc. + * Copyright (C) 2005-2013 Red Hat, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation, and/or the GNU Lesser - * General Public License, version 2.1, also as published by the Free - * Software Foundation. + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License and the GNU Lesser General Public License - * for more details. + * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * and the GNU Lesser General Public License along with this program; - * if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * along with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ package org.jboss.on.plugins.tomcat;
@@ -195,10 +191,9 @@ public class TomcatServerComponent<T extends ResourceComponent<?>> implements JM // to have a version compatible local install and set the install path to the local path, even though // the server url was remote. String catalinaHome = pluginConfig.getSimpleValue(PLUGIN_CONFIG_CATALINA_HOME_PATH, null); - boolean hasLocalJars = new File(catalinaHome).isDirectory(); - - if (hasLocalJars) { - connectionSettings.setLibraryURI(catalinaHome); + File libDir = getLibDir(catalinaHome); + if (libDir != null) { + connectionSettings.setLibraryURI(libDir.getAbsolutePath()); connectionFactory.discoverServerClasses(connectionSettings);
// Tell EMS to make copies of jar files so that the ems classloader doesn't lock @@ -276,6 +271,22 @@ public class TomcatServerComponent<T extends ResourceComponent<?>> implements JM return connection; }
+ private File getLibDir(String catalinaHome) { + if (catalinaHome != null) { + // Tomcat 6 and Tomcat 7 have Catalina JARS in CATALINA_HOME/lib + File libDir = new File(catalinaHome, "lib"); + if (libDir.isDirectory()) { + return libDir; + } + // Tomcat 5.5 has Catalina JARS in CATALINA_HOME/server/lib + libDir = new File(catalinaHome, "server" + File.separator + "lib"); + if (libDir.isDirectory()) { + return libDir; + } + } + return null; + } + public Configuration getPluginConfiguration() { return resourceContext.getPluginConfiguration(); } @@ -485,4 +496,4 @@ public class TomcatServerComponent<T extends ResourceComponent<?>> implements JM operationParameters.put(shutdownScriptEnv); } } -} \ No newline at end of file +}
commit 6b054aa471107a3894fc45ea2ed97c7f8615562f Author: Thomas Segismont tsegismo@redhat.com Date: Thu Jul 11 15:29:55 2013 +0200
Improve CassandraNodeComponent availability check performance.
Keep a reference of the ProcessInfo instance and get a new one only if needed.
diff --git a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java index 90afafb..0037bfe 100644 --- a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java +++ b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java @@ -1,30 +1,25 @@ /* * RHQ Management Platform - * Copyright (C) 2005-2012 Red Hat, Inc. + * Copyright (C) 2005-2013 Red Hat, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation, and/or the GNU Lesser - * General Public License, version 2.1, also as published by the Free - * Software Foundation. + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License and the GNU Lesser General Public License - * for more details. + * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * and the GNU Lesser General Public License along with this program; - * if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * along with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ package org.rhq.plugins.cassandra;
-import static org.rhq.core.domain.measurement.AvailabilityType.DOWN; -import static org.rhq.core.domain.measurement.AvailabilityType.UNKNOWN; -import static org.rhq.core.domain.measurement.AvailabilityType.UP; +import static java.util.concurrent.TimeUnit.NANOSECONDS; +import static java.util.concurrent.TimeUnit.SECONDS; import static org.rhq.core.system.OperatingSystemType.WINDOWS;
import java.io.File; @@ -65,6 +60,7 @@ import org.rhq.core.pluginapi.util.ProcessExecutionUtility; import org.rhq.core.system.ProcessExecution; import org.rhq.core.system.ProcessExecutionResults; import org.rhq.core.system.ProcessInfo; +import org.rhq.core.system.ProcessInfo.ProcessInfoSnapshot; import org.rhq.core.system.SystemInfo; import org.rhq.core.util.StringUtil; import org.rhq.core.util.exception.ThrowableUtil; @@ -75,17 +71,19 @@ import org.rhq.plugins.jmx.JMXServerComponent; * @author John Sanda */ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent<?>> implements OperationFacet { - - private Log log = LogFactory.getLog(CassandraNodeComponent.class); + private static final Log LOG = LogFactory.getLog(CassandraNodeComponent.class);
private Session cassandraSession; private String host; + private ProcessInfo processInfo;
@SuppressWarnings("rawtypes") @Override public void start(ResourceContext context) throws Exception { super.start(context);
+ processInfo = context.getNativeProcess(); + host = context.getPluginConfiguration().getSimpleValue("host", "localhost"); String clusterName = context.getPluginConfiguration().getSimpleValue("clusterName", "unknown"); String username = context.getPluginConfiguration().getSimpleValue("username", "cassandra"); @@ -98,7 +96,7 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent nativePort = Integer.parseInt(context.getPluginConfiguration() .getSimpleValue("nativeTransportPort", "9042")); } catch (Exception e) { - log.debug("Native transport port parsing failed...", e); + LOG.debug("Native transport port parsing failed...", e); }
@@ -115,42 +113,39 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent
this.cassandraSession = clusterBuilder.build().connect(clusterName); } catch (Exception e) { - log.error("Connect to Cassandra " + host + ":" + nativePort, e); + LOG.error("Connect to Cassandra " + host + ":" + nativePort, e); throw e; } };
@Override public void stop() { - log.info("Shutting down Cassandra client"); + processInfo = null; + LOG.info("Shutting down Cassandra client"); cassandraSession.getCluster().shutdown(); - log.info("Shutdown is complete"); + LOG.info("Shutdown is complete"); }
@Override public AvailabilityType getAvailability() { - long start = System.currentTimeMillis(); + long start = System.nanoTime(); try { - ResourceContext<?> context = getResourceContext(); - ProcessInfo processInfo = context.getNativeProcess(); - - if (processInfo == null) { - return UNKNOWN; - } else { - // It is safe to read prior snapshot as getNativeProcess always return a fresh instance - // ProcessInfoSnapshot processInfoSnaphot = processInfo.freshSnapshot(); - if (processInfo.priorSnaphot().isRunning()) { - return UP; - } else { - return DOWN; - } + // Get a fresh snapshot of the process + ProcessInfoSnapshot processInfoSnapshot = (processInfo == null) ? null : processInfo.freshSnapshot(); + if (processInfoSnapshot == null || !processInfoSnapshot.isRunning()) { + processInfo = getResourceContext().getNativeProcess(); + // Safe to get prior snapshot here, we've just recreated the process info instance + processInfoSnapshot = (processInfo == null) ? null : processInfo.priorSnaphot(); } + return (processInfoSnapshot != null && processInfoSnapshot.isRunning()) ? AvailabilityType.UP + : AvailabilityType.DOWN; } finally { - long end = System.currentTimeMillis(); - long totalTime = end - start; - log.debug("Finished availability check in " + totalTime + " ms"); - if (totalTime > (1000 * 5)) { - log.warn("Availability check exceeded five seconds. Total time was " + totalTime + " ms"); + long totalTimeMillis = NANOSECONDS.toMillis(System.nanoTime() - start); + if (LOG.isDebugEnabled()) { + LOG.debug("Finished availability check in " + totalTimeMillis + " ms"); + } + if (totalTimeMillis > SECONDS.toMillis(5)) { + LOG.warn("Availability check exceeded five seconds. Total time was " + totalTimeMillis + " ms"); } } } @@ -175,28 +170,28 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent protected OperationResult shutdownNode() { ResourceContext<?> context = getResourceContext();
- if (log.isInfoEnabled()) { - log.info("Starting shutdown operation on " + CassandraNodeComponent.class.getName() + + if (LOG.isInfoEnabled()) { + LOG.info("Starting shutdown operation on " + CassandraNodeComponent.class.getName() + " with resource key " + context.getResourceKey()); } EmsConnection emsConnection = getEmsConnection(); EmsBean storageService = emsConnection.getBean("org.apache.cassandra.db:type=StorageService"); Class[] emptyParams = new Class[0];
- if (log.isDebugEnabled()) { - log.debug("Disabling thrift..."); + if (LOG.isDebugEnabled()) { + LOG.debug("Disabling thrift..."); } EmsOperation operation = storageService.getOperation("stopRPCServer", emptyParams); operation.invoke((Object[]) emptyParams);
- if (log.isDebugEnabled()) { - log.debug("Disabling gossip..."); + if (LOG.isDebugEnabled()) { + LOG.debug("Disabling gossip..."); } operation = storageService.getOperation("stopGossiping", emptyParams); operation.invoke((Object[]) emptyParams);
- if (log.isDebugEnabled()) { - log.debug("Initiating drain..."); + if (LOG.isDebugEnabled()) { + LOG.debug("Initiating drain..."); } operation = storageService.getOperation("drain", emptyParams); operation.invoke((Object[]) emptyParams); @@ -207,7 +202,7 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent process.kill("KILL"); return new OperationResult("Successfully shut down Cassandra daemon with pid " + pid); } catch (SigarException e) { - log.warn("Failed to shut down Cassandra node with pid " + pid, e); + LOG.warn("Failed to shut down Cassandra node with pid " + pid, e); OperationResult failure = new OperationResult("Failed to shut down Cassandra node with pid " + pid); failure.setErrorMessage(ThrowableUtil.getAllMessages(e)); return failure; @@ -256,7 +251,7 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent try { updateSeedsList(addresses); } catch (Exception e) { - log.error("An error occurred while updating the seeds list property", e); + LOG.error("An error occurred while updating the seeds list property", e); Throwable rootCause = ThrowableUtil.getRootCause(e); result.setErrorMessage(ThrowableUtil.getStackAsString(rootCause)); } @@ -277,7 +272,7 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent try { addresses = convertToIPAddresses(seeds); } catch (UnknownHostException e) { - log.error("Failed to update seeds list", e); + LOG.error("Failed to update seeds list", e); throw new IOException("Failed to update seeds list. Make sure that " + seeds + " is a list of valid " + "hostnames or IP addresses that can be resolved.", e); } @@ -329,7 +324,7 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent if (!yamlFile.delete()) { String msg = "Failed to delete [" + yamlFile + "] in preparation of writing updated configuration. The " + "changes will be aborted."; - log.error(msg); + LOG.error(msg); deleteYamlBackupFile(yamlFileBackup); throw new IOException(msg); } @@ -339,8 +334,8 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent yaml.dump(cassandraConfig, writer); deleteYamlBackupFile(yamlFileBackup); } catch (Exception e) { - log.error("An error occurred while trying to write the updated configuration back to " + yamlFile, e); - log.error("Reverting changes to " + yamlFile); + LOG.error("An error occurred while trying to write the updated configuration back to " + yamlFile, e); + LOG.error("Reverting changes to " + yamlFile);
if (yamlFile.delete()) { StreamUtil.copy(new FileInputStream(yamlFileBackup), new FileOutputStream(yamlFile)); @@ -348,7 +343,7 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent } else { String msg = "Failed updates to " + yamlFile.getName() + " cannot be rolled back. The file cannot be " + "deleted. " + yamlFile + " should be replaced by " + yamlFileBackup; - log.error(msg); + LOG.error(msg); throw new IOException(msg); } } finally { @@ -367,8 +362,8 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent
private void deleteYamlBackupFile(File yamlBackup) { if (!yamlBackup.delete()) { - log.warn("Failed to delete Cassandra configuration backup file [" + yamlBackup + "]. This file " + - "should be deleted."); + LOG.warn("Failed to delete Cassandra configuration backup file [" + yamlBackup + "]. This file " + + "should be deleted."); } }
commit 3f21a52f2eeef0e5b46f9002980e158ff26f21d9 Author: John Sanda jsanda@redhat.com Date: Thu Jul 11 09:18:02 2013 -0400
don't use $TMPDIR since it might not be defined on all platforms
diff --git a/etc/scripts/rhq48-storage-patch/rhq48-storage-patch.sh b/etc/scripts/rhq48-storage-patch/rhq48-storage-patch.sh index 947155f..8a83320 100755 --- a/etc/scripts/rhq48-storage-patch/rhq48-storage-patch.sh +++ b/etc/scripts/rhq48-storage-patch/rhq48-storage-patch.sh @@ -23,7 +23,7 @@ PATCH="apache-cassandra-1.2.4-patch-1.jar"
# swap out the Cassandra jar file with the patched version echo "Copying patch file to $RHQ_SERVER_DIR/rhq-storage/lib" -mv $RHQ_SERVER_DIR/rhq-storage/lib/apache-cassandra-1.2.4.jar $TMPDIR +mv $RHQ_SERVER_DIR/rhq-storage/lib/apache-cassandra-1.2.4.jar . cp $PATCH $RHQ_SERVER_DIR/rhq-storage/lib
# restart the storage node @@ -54,6 +54,6 @@ $RHQ_SERVER_DIR/bin/rhqctl stop
echo "Removing patch file" rm $RHQ_SERVER_DIR/rhq-storage/lib/$PATCH -mv $TMPDIR/apache-cassandra-1.2.4.jar $RHQ_SERVER_DIR/rhq-storage/lib +mv ./apache-cassandra-1.2.4.jar $RHQ_SERVER_DIR/rhq-storage/lib
echo "Table compression has been disabled for all keyspaces. You are now ready to upgrade your RHQ installation."
commit 9356e5fe02c664fc758376166a95ed4b85a603cb Author: Heiko W. Rupp hwr@redhat.com Date: Thu Jul 11 11:19:45 2013 +0200
BZ 966294 Further improvements. Also work around BZ 983275
diff --git a/modules/enterprise/server/plugins/alert-snmp/src/main/java/org/rhq/enterprise/server/plugins/alertSnmp/SnmpInfo.java b/modules/enterprise/server/plugins/alert-snmp/src/main/java/org/rhq/enterprise/server/plugins/alertSnmp/SnmpInfo.java index 7b174cc..296696c 100644 --- a/modules/enterprise/server/plugins/alert-snmp/src/main/java/org/rhq/enterprise/server/plugins/alertSnmp/SnmpInfo.java +++ b/modules/enterprise/server/plugins/alert-snmp/src/main/java/org/rhq/enterprise/server/plugins/alertSnmp/SnmpInfo.java @@ -37,6 +37,7 @@ public class SnmpInfo {
// Default remote port for SNMP trap static final String DEFAULT_PORT = "162"; + private static final String DEFAULT_RHQ_BINDING = "1.3.6.1.4.1.18016.2.1";
final public String host; final public String port; @@ -84,8 +85,11 @@ public class SnmpInfo { if (host==null || host.isEmpty()) { host = preferences.getSimpleValue("defaultTargetHost",null); } - String port = configuration.getSimpleValue(PARAM_PORT, DEFAULT_PORT); - String oid = configuration.getSimpleValue(PARAM_VARIABLE_BINDING_PREFIX, null); // required + String port = configuration.getSimpleValue(PARAM_PORT, null); + if (port==null||port.isEmpty()) { + port = preferences.getSimpleValue("defaultPort",DEFAULT_PORT); + } + String oid = configuration.getSimpleValue(PARAM_VARIABLE_BINDING_PREFIX, DEFAULT_RHQ_BINDING); // required String trapOid = configuration.getSimpleValue(PARAM_TRAP_OID, null); return new SnmpInfo(host, port, oid, trapOid);
@@ -94,7 +98,7 @@ public class SnmpInfo { @Override public String toString() { String hostString = (host == null ? "UnknownHost" : host); - String oidString = (oid == null ? "UnknownOID" : oid); + String oidString = (oid == null ? "Unknown Binding Prefix" : oid); String trapOidString = (trapOid == null ? "DefaultTrapOID" : trapOid); return hostString + ":" + port + " (" + oidString + ") (" + trapOidString + ")"; } diff --git a/modules/enterprise/server/plugins/alert-snmp/src/main/resources/META-INF/rhq-serverplugin.xml b/modules/enterprise/server/plugins/alert-snmp/src/main/resources/META-INF/rhq-serverplugin.xml index 3f7a35c..8e2a4cc 100644 --- a/modules/enterprise/server/plugins/alert-snmp/src/main/resources/META-INF/rhq-serverplugin.xml +++ b/modules/enterprise/server/plugins/alert-snmp/src/main/resources/META-INF/rhq-serverplugin.xml @@ -25,7 +25,7 @@ </c:property-options> </c:simple-property>
- <c:simple-property name="defaultTargetHost" displayName="Default trap target host" required="false"/> + <c:simple-property name="defaultTargetHost" displayName="Default trap target host" required="false" default="localhost"/> <c:simple-property name="defaultPort" displayName="Default trap target port" required="false" type="integer" default="162" defaultValue="162"/> <c:simple-property name="transport" defaultValue="UDP"> @@ -35,7 +35,7 @@ </c:property-options> </c:simple-property>
- <c:simple-property name="trapOid" displayName="Trap OID" description="OID for the trap sent" type="string" + <c:simple-property name="trapOid" displayName="Trap OID" description="OID of the trap sent" type="string" default="1.3.6.1.4.1.18016.2.1.2.0.1"/> <c:simple-property name="community" type="string" default="public" description="Community - v1 and v2c only" required="false"/>
@@ -43,10 +43,10 @@
<c:simple-property name="genericId" required="false" default="6" type="integer" description="Set the generic trap type. Default is 6 (=Enterprise specific)."/> - <c:simple-property name="enterpriseOid" required="false" default="1.3.6.1.4.1.18016.2.3" + <c:simple-property name="specificId" required="false" default="0" type="integer" + description="Enterprise-specific ID of the trap. If this is set, the generic ID must be set to 6."/> + <c:simple-property name="enterpriseOid" required="false" default="1.3.6.1.4.1.18016.2.3" description="OID of the sender, identifies the type of managed object generating the trap. Default is enterprise.jboss.rhq.rhqServer"/> - <c:simple-property name="specificId" required="false" default="0" type="integer" - description="Enterprise-specific ID of the trap. If this is set, the generic ID must be set to 6."/> <c:simple-property name="agentAddress" description="Address of our SNMP agent (=the RHQ server)" required="false"/> </c:group> <!-- @@ -91,7 +91,7 @@ sender-wide preferences"/> <c:simple-property name="port" type="integer" required="false" default="162" description="Trap target port"/> <c:simple-property name="oid" displayName="Variable bindings prefix" type="string" required="true" - defaultValue="1.3.6.1.4.1.18016.2.1"> + defaultValue="1.3.6.1.4.1.18016.2.1" default="1.3.6.1.4.1.18016.2.1"> <c:description> <![CDATA[ <p>RHQ will send alert notification details as a list of variable bindings in the @@ -103,11 +103,11 @@ <c:simple-property name="trapOid" required="false"> <c:description> <![CDATA[ - <p>If set, the value of this paramater will override the value of the Trap Oid parameter defined globally + <p>If set, the value of this parameter will override the value of the Trap Oid parameter defined globally in the server plugin configuration. The Trap OID is sent as the second variable binding in the SNMP trap PDU.</p> <p><strong>Only applicable to v2c and v3 traps.</strong></p> - <p><strong>Do not confuse this paramater with 'Variable bindings prefix'.</strong></p> + <p><strong>Do not confuse this parameter with 'Variable bindings prefix'.</strong></p> ]]> </c:description> </c:simple-property>
commit 9320e9df35f7dfeba8c6ddcf0dc16df11b2f162b Author: John Sanda jsanda@redhat.com Date: Wed Jul 10 22:10:35 2013 -0400
add verification to updateSeedsList resource operation
The updateSeedsList operation will convert each seed to an InetAddress to 1) make sure it is a valid hostname/IP address and 2) convert it to an IP address to ensure the seeds property in cassandra.yaml uses IP addresses.
diff --git a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java index 5e7692b..90afafb 100644 --- a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java +++ b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java @@ -32,6 +32,8 @@ import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.FileWriter; import java.io.IOException; +import java.net.InetAddress; +import java.net.UnknownHostException; import java.util.ArrayList; import java.util.Date; import java.util.List; @@ -63,7 +65,6 @@ import org.rhq.core.pluginapi.util.ProcessExecutionUtility; import org.rhq.core.system.ProcessExecution; import org.rhq.core.system.ProcessExecutionResults; import org.rhq.core.system.ProcessInfo; -import org.rhq.core.system.ProcessInfo.ProcessInfoSnapshot; import org.rhq.core.system.SystemInfo; import org.rhq.core.util.StringUtil; import org.rhq.core.util.exception.ThrowableUtil; @@ -271,7 +272,16 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent return addresses; }
- protected void updateSeedsList(List<String> addresses) throws IOException { + protected void updateSeedsList(List<String> seeds) throws IOException { + List<String> addresses = null; + try { + addresses = convertToIPAddresses(seeds); + } catch (UnknownHostException e) { + log.error("Failed to update seeds list", e); + throw new IOException("Failed to update seeds list. Make sure that " + seeds + " is a list of valid " + + "hostnames or IP addresses that can be resolved.", e); + } + ResourceContext<?> context = getResourceContext(); Configuration pluginConfig = context.getPluginConfiguration();
@@ -346,6 +356,15 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent } }
+ private List<String> convertToIPAddresses(List<String> seeds) throws UnknownHostException { + List<String> ipAddresses = new ArrayList<String>(seeds.size()); + for (String seed : seeds) { + InetAddress address = InetAddress.getByName(seed); + ipAddresses.add(address.getHostAddress()); + } + return ipAddresses; + } + private void deleteYamlBackupFile(File yamlBackup) { if (!yamlBackup.delete()) { log.warn("Failed to delete Cassandra configuration backup file [" + yamlBackup + "]. This file " + diff --git a/modules/plugins/cassandra/src/main/resources/META-INF/rhq-plugin.xml b/modules/plugins/cassandra/src/main/resources/META-INF/rhq-plugin.xml index cac0a8d..3aa701b 100644 --- a/modules/plugins/cassandra/src/main/resources/META-INF/rhq-plugin.xml +++ b/modules/plugins/cassandra/src/main/resources/META-INF/rhq-plugin.xml @@ -75,7 +75,11 @@ </results> </operation>
- <operation name="updateSeedsList" description="Updatess the node's seeds property in cassandra.yaml"> + <operation name="updateSeedsList" + description="Updates the node's seeds property in cassandra.yaml. seedsList must be a list of valid + hostnames or IP addresses that are reachable from the agent machine. Note that if hostnames are + specified they will be converted to IP addresses so that the seeds property will consist solely of IP + addresses."> <parameters> <c:list-property name="seedsList"> <c:simple-property name="seed" type="string"/>
commit 6430ccb39805a3b1de2fe7a57d303a6b640ce059 Author: John Sanda jsanda@redhat.com Date: Wed Jul 10 21:32:43 2013 -0400
adding custom java dbupgrade task for updating storage node addresss
Forgot to include this file in my previous commit.
diff --git a/modules/core/dbutils/src/main/java/org/rhq/core/db/upgrade/StorageNodeAddressUpgradeTask.java b/modules/core/dbutils/src/main/java/org/rhq/core/db/upgrade/StorageNodeAddressUpgradeTask.java new file mode 100644 index 0000000..9fc90ed --- /dev/null +++ b/modules/core/dbutils/src/main/java/org/rhq/core/db/upgrade/StorageNodeAddressUpgradeTask.java @@ -0,0 +1,54 @@ +package org.rhq.core.db.upgrade; + +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.sql.Connection; +import java.sql.SQLException; +import java.util.List; + +import org.rhq.core.db.DatabaseType; +import org.rhq.core.db.DbUtilsI18NFactory; +import org.rhq.core.db.DbUtilsI18NResourceKeys; + +import mazz.i18n.Logger; + +/** + * Updates the address field of storage node entities to ensure we are storing IP addresses and not hostnames. We want + * to store the IP address since that is what Cassandra uses for inter-node communication. JMX operations that return + * nodes will return the node IP addresses and not hostnames. + * + * @author John Sanda + */ +public class StorageNodeAddressUpgradeTask implements DatabaseUpgradeTask { + + private final Logger log = DbUtilsI18NFactory.getLogger(StorageNodeAddressUpgradeTask.class); + + @Override + public void execute(DatabaseType databaseType, Connection connection) throws SQLException { + String sql = "SELECT id, address FROM rhq_storage_node"; + + log.debug(DbUtilsI18NResourceKeys.EXECUTING_SQL, sql); + List<Object[]> results = databaseType.executeSelectSql(connection, sql); + + Integer id = null; + String storageNodeAddress = null; + try { + for (Object[] row : results) { + id = (Integer) row[0]; + storageNodeAddress = (String) row[1]; + InetAddress address = InetAddress.getByName(storageNodeAddress); + if (!storageNodeAddress.equals(address.getHostAddress())) { + log.debug(DbUtilsI18NResourceKeys.MESSAGE, "Updating address for StorageNode[id= " + id + ", ]" + + "address= " + storageNodeAddress + "]"); + String update = "UPDATE rhq_storage_node SET address = '" + address.getHostAddress() + "' " + + "WHERE id = " + id; + log.debug(DbUtilsI18NResourceKeys.EXECUTING_SQL, update); + databaseType.executeSql(connection, update); + } + } + } catch (UnknownHostException e) { + throw new RuntimeException("Failed to look up IP address for StorageNode[id =" + id + ", address=" + + storageNodeAddress + "]", e); + } + } +}
commit b1c1f6de18af6c1e598268944e2b41100cbb1a3d Author: John Sanda jsanda@redhat.com Date: Wed Jul 10 21:23:06 2013 -0400
make sure that we store the IP address of storage nodes and not hostnames
When doing cluster maintenance on the server, we make calls to get C* node addresses. C* returns IP addresses since it uses IP addresses for gossip; consequently, we want to make sure we store the IP address to avoid having to perform any hostname/IP address mapping.
diff --git a/modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java b/modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java index d919ee3..386cda7 100644 --- a/modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java +++ b/modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java @@ -132,7 +132,8 @@ public class StorageInstaller {
Option hostname = new Option("n", "hostname", true, "The hostname or IP address on which the node will listen for " - + "requests. If not specified, defaults to the hostname for localhost."); + + "requests. Note that if a hostname is specified, the IP address is used. Defaults to the IP " + + "address of the local host (which depending on hostname configuration may not be localhost)."); hostname.setArgName("HOSTNAME");
Option seeds = new Option("s", "seeds", true, "A comma-delimited list of hostnames or IP addresses that " @@ -282,7 +283,7 @@ public class StorageInstaller { }
if (cmdLine.hasOption("n")) { - hostname = cmdLine.getOptionValue("n"); + hostname = InetAddress.getByName(cmdLine.getOptionValue("n")).getHostAddress(); } else { hostname = InetAddress.getLocalHost().getHostAddress(); } diff --git a/modules/core/dbutils/pom.xml b/modules/core/dbutils/pom.xml index 356ff48..483688f 100644 --- a/modules/core/dbutils/pom.xml +++ b/modules/core/dbutils/pom.xml @@ -17,7 +17,7 @@ <description>Database schema setup, upgrade and other utilities</description>
<properties> - <db.schema.version>2.133</db.schema.version> + <db.schema.version>2.134</db.schema.version> <rhq.ds.type-mapping>${rhq.test.ds.type-mapping}</rhq.ds.type-mapping> <rhq.ds.server-name>${rhq.test.ds.server-name}</rhq.ds.server-name> <rhq.ds.db-name>${rhq.test.ds.db-name}</rhq.ds.db-name> diff --git a/modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml b/modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml index 0e1afd3..bb5fa50 100644 --- a/modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml +++ b/modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml @@ -2074,6 +2074,11 @@ </schema-directSQL> </schemaSpec>
+ <schemaSpec version="2.134"> + <!-- Make sure existing storage nodes are storing IP addresses and not hostnames --> + <schema-javaTask className="StorageNodeAddressUpgradeTask" /> + </schemaSpec> + </dbupgrade> </target> </project>
commit d124d4589120090628c53c04c6767b948c7a270a Author: John Sanda jsanda@redhat.com Date: Wed Jul 10 14:59:57 2013 -0400
remove C* native library dependencies
JNA and lz4-java have been removed from our C* distro. The distro now includes a version of snappy-java that excludes the native components. We still have to include snappy-java because C* still has a runtime dependency on snappy-java classes.
diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/pom.xml b/modules/common/cassandra-ccm/cassandra-ccm-core/pom.xml index 0f6eaca..0b015b4 100644 --- a/modules/common/cassandra-ccm/cassandra-ccm-core/pom.xml +++ b/modules/common/cassandra-ccm/cassandra-ccm-core/pom.xml @@ -97,9 +97,6 @@ <exclude>src/main/resources/cassandra/conf/cassandra-env.sh</exclude> </excludes> </resource> - <!--<resource>--> - <!--<directory>src/main/cassandra/cql</directory>--> - <!--</resource>--> </resources> <delimiters> <delimiter>${*}</delimiter> @@ -110,6 +107,13 @@
<plugin> <artifactId>maven-antrun-plugin</artifactId> + <dependencies> + <dependency> + <groupId>org.xerial.snappy</groupId> + <artifactId>snappy-java</artifactId> + <version>${cassandra.snappy.version}</version> + </dependency> + </dependencies> <executions> <execution> <!-- @@ -158,12 +162,13 @@ <property name="cassandra.distro.filename" value="cassandra.zip"/> <property name="cassandra.distro.zip" value="${project.build.outputDirectory}/${cassandra.distro.filename}"/> - <copy file="${settings.localRepository}/net/java/dev/jna/platform/${cassandra.jna.version}/platform-${cassandra.jna.version}.jar" - todir="${cassandra.dir}/lib"/> - <copy file="${settings.localRepository}/net/java/dev/jna/jna/${cassandra.jna.version}/jna-${cassandra.jna.version}.jar" + <delete file="${cassandra.dir}/lib/lz4-1.1.0.jar"/> + <delete file="${cassandra.dir}/lib/snappy-java-1.0.4.1.jar"/> + <copy file="${settings.localRepository}/org/xerial/snappy/snappy-java/${cassandra.snappy.version}/snappy-java-${cassandra.snappy.version}.jar" todir="${cassandra.dir}/lib"/> <move file="${project.build.outputDirectory}/cassandra/conf" todir="${cassandra.dir}"/> <delete dir="${cassandra.dir}/javadoc"/> + <zip basedir="${cassandra.dir}" destfile="${cassandra.distro.zip}"/> <delete dir="${cassandra.dir}"/> <delete file="${project.build.outputDirectory}/deploy.xml"/> @@ -262,65 +267,5 @@ </plugins> </build> </profile> - - <!-- - Cassandra uses the snappy-java compression library, and it uses a native library that - is packaged in the snappy-java JAR. Running on Mac OS X with Java 7 will result in, - - NoClassDefFoundError Could not initialize class org.xerial.snappy.Snappy - - due to the file name extension that the Java 7 JVM looks for on Mac OS X. This issue - was logged and fixed under https://github.com/xerial/snappy-java/issues/6. Cassandra - however does not yet bundle a newer version of snappy-java. This profile is activated - when running on Mac OS X and replaces the packaged version of snappy-java with a newer - version so that snappy compression can still be used during development. Note that - this is **not** an issue when running on Java 6. - - - jsanda 10/03/2012 - --> - <profile> - <id>snappy-mac-workaround</id> - <activation> - <os> - <family>Mac</family> - </os> - </activation> - <build> - <plugins> - <plugin> - <artifactId>maven-antrun-plugin</artifactId> - <inherited>false</inherited> - <dependencies> - <dependency> - <groupId>org.xerial.snappy</groupId> - <artifactId>snappy-java</artifactId> - <version>1.1.0-M3</version> - </dependency> - </dependencies> - <executions> - <execution> - <id>setup-pkg-mac</id> - <phase>process-resources</phase> - <goals> - <goal>run</goal> - </goals> - <configuration> - <target> - <property name="cassandra.dir" - value="${project.build.outputDirectory}/cassandra-${cassandra.version}"/> - <property name="cassandra.lib.dir" value="${cassandra.dir}/lib"/> - <property name="snappy.jar.original" value="${cassandra.lib.dir}/snappy-java-1.0.4.1.jar"/> - <property name="snappy.jar.updated" - value="${local.repo}/org/xerial/snappy/snappy-java/${cassandra.driver.snappy.version}/snappy-java-${cassandra.driver.snappy.version}.jar"/> - <delete file="${snappy.jar.original}"/> - <copy file="${snappy.jar.updated}" todir="${cassandra.lib.dir}"/> - </target> - </configuration> - </execution> - </executions> - </plugin> - </plugins> - </build> - </profile> </profiles> </project> diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/rhq.cassandra.yaml b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/rhq.cassandra.yaml index bae3ecc..298db9d 100644 --- a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/rhq.cassandra.yaml +++ b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/rhq.cassandra.yaml @@ -681,7 +681,7 @@ client_encryption_options: # can be: all - all traffic is compressed # dc - traffic between different datacenters is compressed # none - nothing is compressed. -internode_compression: all +internode_compression: none
# Enable or disable tcp_nodelay for inter-dc communication. # Disabling it will result in larger (but fewer) network packets being sent, diff --git a/modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java b/modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java index 8062d6a..d919ee3 100644 --- a/modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java +++ b/modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java @@ -32,6 +32,7 @@ import java.io.FileReader; import java.io.IOException; import java.net.InetAddress; import java.util.ArrayList; +import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Locale; @@ -54,6 +55,7 @@ import org.apache.commons.cli.PosixParser; import org.apache.commons.exec.DefaultExecutor; import org.apache.commons.exec.Executor; import org.apache.commons.exec.PumpStreamHandler; +import org.apache.commons.io.FileUtils; import org.apache.commons.io.output.NullOutputStream; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -90,6 +92,8 @@ public class StorageInstaller {
private final Log log = LogFactory.getLog(StorageInstaller.class);
+ private final String VERIFY_DATA_DIRS_EMPTY = "verify-data-dirs-empty"; + private Options options;
private File serverBasedir; @@ -191,12 +195,15 @@ public class StorageInstaller { + "where the existing RHQ server is installed."); upgradeOption.setArgName("RHQ_SERVER_DIR");
+ Option verifyDataDirsEmptyOption = new Option(null, VERIFY_DATA_DIRS_EMPTY, true, "Will cause the installer " + + "to abort if any of the data directories is not empty. Defaults to true."); + options = new Options().addOption(new Option("h", "help", false, "Show this message.")).addOption(hostname) .addOption(seeds).addOption(jmxPortOption).addOption(startOption).addOption(checkStatus) .addOption(commitLogOption).addOption(dataDirOption).addOption(savedCachesDirOption) .addOption(nativeTransportPortOption).addOption(storagePortOption).addOption(sslStoragePortOption) .addOption(basedirOption).addOption(heapSizeOption).addOption(heapNewSizeOption).addOption(stackSizeOption) - .addOption(upgradeOption); + .addOption(upgradeOption).addOption(verifyDataDirsEmptyOption); }
public int run(CommandLine cmdLine) throws Exception { @@ -296,21 +303,25 @@ public class StorageInstaller { File dataDirFile = new File(dataDir); File savedCachesDirFile = new File(savedCachesDir);
- // validate the three data directories are empty - if they are not, we are probably stepping on another storage node - if (!isDirectoryEmpty(commitLogDirFile)) { - log.error("Commitlog directory is not empty. It should not exist for a new Storage Node [" - + commitLogDirFile.getAbsolutePath() + "]"); - return STATUS_DATA_DIR_NOT_EMPTY; - } - if (!isDirectoryEmpty(dataDirFile)) { - log.error("Data directory is not empty. It should not exist for a new Storage Node [" - + dataDirFile.getAbsolutePath() + "]"); - return STATUS_DATA_DIR_NOT_EMPTY; - } - if (!isDirectoryEmpty(savedCachesDirFile)) { - log.error("Saved caches directory is not empty. It should not exist for a new Storage Node [" - + savedCachesDirFile.getAbsolutePath() + "]"); - return STATUS_DATA_DIR_NOT_EMPTY; + boolean verifyDataDirsEmpty = Boolean.valueOf(cmdLine.getOptionValue(VERIFY_DATA_DIRS_EMPTY, "true")); + if (verifyDataDirsEmpty) { + // validate the three data directories are empty - if they are not, we are probably stepping on + // another storage node + if (!isDirectoryEmpty(commitLogDirFile)) { + log.error("Commitlog directory is not empty. It should not exist for a new Storage Node [" + + commitLogDirFile.getAbsolutePath() + "]"); + return STATUS_DATA_DIR_NOT_EMPTY; + } + if (!isDirectoryEmpty(dataDirFile)) { + log.error("Data directory is not empty. It should not exist for a new Storage Node [" + + dataDirFile.getAbsolutePath() + "]"); + return STATUS_DATA_DIR_NOT_EMPTY; + } + if (!isDirectoryEmpty(savedCachesDirFile)) { + log.error("Saved caches directory is not empty. It should not exist for a new Storage Node [" + + savedCachesDirFile.getAbsolutePath() + "]"); + return STATUS_DATA_DIR_NOT_EMPTY; + } }
jmxPort = getPort(cmdLine, "jmx-port", defaultJmxPort); @@ -438,12 +449,7 @@ public class StorageInstaller { }
private boolean isDirectoryEmpty(File dir) { - if (dir.isDirectory()) { - File[] files = dir.listFiles(); - return (files == null || files.length == 0); - } else { - return true; - } + return FileUtils.sizeOf(dir) == 0; }
private int getPort(CommandLine cmdLine, String option, int defaultValue) { @@ -709,12 +715,22 @@ public class StorageInstaller { }
public void printUsage() { - Options options = getOptions(); HelpFormatter helpFormatter = new HelpFormatter(); String syntax = "rhq-storage-installer.sh|bat [options]"; String header = "";
- helpFormatter.printHelp(syntax, header, options, null); + helpFormatter.printHelp(syntax, header, getHelpOptions(), null); + } + + public Options getHelpOptions() { + Options helpOptions = new Options(); + for (Option option : (Collection<Option>)options.getOptions()) { + if (option.getLongOpt().equals(VERIFY_DATA_DIRS_EMPTY)) { + continue; + } + helpOptions.addOption(option); + } + return helpOptions; }
public Options getOptions() { diff --git a/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ClusterBuilder.java b/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ClusterBuilder.java index 4687b4b..14f12e1 100644 --- a/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ClusterBuilder.java +++ b/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ClusterBuilder.java @@ -94,9 +94,10 @@ public class ClusterBuilder { * @see com.datastax.driver.core.Cluster.Builder#build() */ public Cluster build() { - if (compression == null && !isIBMJRE()) { - builder.withCompression(ProtocolOptions.Compression.SNAPPY); - } +// if (compression == null && !isIBMJRE()) { +// builder.withCompression(ProtocolOptions.Compression.SNAPPY); +// } + builder.withCompression(ProtocolOptions.Compression.NONE); return builder.build(); }
diff --git a/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml b/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml index cfe13be..bf06886 100644 --- a/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml +++ b/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml @@ -243,7 +243,7 @@ toDir="${jboss.modules.dir}/com/datastax/cassandra/cassandra-driver-core/main" verbose="true"/> <copy file="${settings.localRepository}/org/slf4j/slf4j-log4j12/1.7.2/slf4j-log4j12-1.7.2.jar" toDir="${jboss.modules.dir}/com/datastax/cassandra/cassandra-driver-core/main" verbose="true"/> - <copy file="${settings.localRepository}/org/xerial/snappy/snappy-java/1.0.5-M3-p1/snappy-java-1.0.5-M3-p1.jar" + <copy file="${settings.localRepository}/org/xerial/snappy/snappy-java/${cassandra.snappy.version}/snappy-java-${cassandra.snappy.version}.jar" toDir="${jboss.modules.dir}/com/datastax/cassandra/cassandra-driver-core/main" verbose="true"/>
<echo file="${jboss.modules.dir}/com/datastax/cassandra/cassandra-driver-core/main/module.xml"><![CDATA[<?xml version="1.0" encoding="UTF-8"?> @@ -258,7 +258,7 @@ <resource-root path="metrics-core-2.2.0.jar"/> <resource-root path="slf4j-api-1.7.2.jar"/> <resource-root path="slf4j-log4j12-1.7.2.jar"/> - <resource-root path="snappy-java-1.0.5-M3-p1.jar"/> + <resource-root path="snappy-java-${cassandra.snappy.version}.jar"/> <resource-root path="avro-1.4.0-cassandra-1.jar"/> </resources>
diff --git a/modules/enterprise/server/appserver/src/main/scripts/storage_setup.groovy b/modules/enterprise/server/appserver/src/main/scripts/storage_setup.groovy index f8fae7d..ccb90b9 100644 --- a/modules/enterprise/server/appserver/src/main/scripts/storage_setup.groovy +++ b/modules/enterprise/server/appserver/src/main/scripts/storage_setup.groovy @@ -97,6 +97,7 @@ heap-new-size=${heapNewSize} hostname=127.0.0.$nodeId jmx-port=${defaultJmxPort + (nodeId - 1)} seeds=${seeds} +verify-data-dirs-empty=false """ ) } diff --git a/modules/enterprise/server/itests-2/pom.xml b/modules/enterprise/server/itests-2/pom.xml index 1819f02..a3e9c7b 100644 --- a/modules/enterprise/server/itests-2/pom.xml +++ b/modules/enterprise/server/itests-2/pom.xml @@ -326,7 +326,7 @@ toDir="${jboss.modules.dir}/com/datastax/cassandra/cassandra-driver-core/main" verbose="true"/> <copy file="${settings.localRepository}/org/slf4j/slf4j-log4j12/1.7.2/slf4j-log4j12-1.7.2.jar" toDir="${jboss.modules.dir}/com/datastax/cassandra/cassandra-driver-core/main" verbose="true"/> - <copy file="${settings.localRepository}/org/xerial/snappy/snappy-java/1.0.5-M3-p1/snappy-java-1.0.5-M3-p1.jar" + <copy file="${settings.localRepository}/org/xerial/snappy/snappy-java/${cassandra.snappy.version}/snappy-java-${cassandra.snappy.version}.jar" toDir="${jboss.modules.dir}/com/datastax/cassandra/cassandra-driver-core/main" verbose="true"/>
<echo file="${jboss.modules.dir}/com/datastax/cassandra/cassandra-driver-core/main/module.xml"><![CDATA[<?xml version="1.0" encoding="UTF-8"?> @@ -341,7 +341,7 @@ <resource-root path="metrics-core-2.2.0.jar"/> <resource-root path="slf4j-api-1.7.2.jar"/> <resource-root path="slf4j-log4j12-1.7.2.jar"/> - <resource-root path="snappy-java-1.0.5-M3-p1.jar"/> + <resource-root path="snappy-java-${cassandra.snappy.version}.jar"/> <resource-root path="avro-1.4.0-cassandra-1.jar"/> </resources>
diff --git a/modules/plugins/cassandra/pom.xml b/modules/plugins/cassandra/pom.xml index 76a781d..da90f09 100644 --- a/modules/plugins/cassandra/pom.xml +++ b/modules/plugins/cassandra/pom.xml @@ -27,7 +27,7 @@ <dependency> <groupId>org.xerial.snappy</groupId> <artifactId>snappy-java</artifactId> - <version>1.0.5-M3-p1</version> + <version>${cassandra.snappy.version}</version> </dependency> <dependency> <groupId>org.apache.cassandra</groupId> @@ -141,7 +141,7 @@ <artifactItem> <groupId>org.xerial.snappy</groupId> <artifactId>snappy-java</artifactId> - <version>1.0.5-M3-p1</version> + <version>${cassandra.snappy.version}</version> </artifactItem> <artifactItem> <groupId>org.codehaus.jackson</groupId> diff --git a/pom.xml b/pom.xml index 99c1bdc..d52dcc7 100644 --- a/pom.xml +++ b/pom.xml @@ -179,7 +179,7 @@ <cassandra.thrift.version>0.7.0</cassandra.thrift.version> <cassandra.driver.version>1.0.0-rhq-1.2.4</cassandra.driver.version> <cassandra.driver.netty.version>3.6.3.Final</cassandra.driver.netty.version> - <cassandra.driver.snappy.version>1.0.5-M3-p1</cassandra.driver.snappy.version> + <cassandra.snappy.version>1.0.4.1-rhq-p1</cassandra.snappy.version> <cassandra.snakeyaml.version>1.6</cassandra.snakeyaml.version>
<rhq.db.admin.username>rhqadmin</rhq.db.admin.username>
commit 3783018826b0ebbe069751842cde15e75bc29bb2 Author: Jirka Kremser jkremser@redhat.com Date: Wed Jul 10 18:44:17 2013 +0200
[BZ 966293] - Add validation for expression when creating Dynagroup Definition - adding a check to GroupDefinitionManagerBean.validate() method. Also making sure, the right message is correctly propagated to the web UI.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/definitions/GroupDefinitionDataSource.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/definitions/GroupDefinitionDataSource.java index 74484dd..fed6e32 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/definitions/GroupDefinitionDataSource.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/definitions/GroupDefinitionDataSource.java @@ -130,7 +130,13 @@ public class GroupDefinitionDataSource extends RPCDataSource<GroupDefinition, Re @Override public void onFailure(Throwable caught) { Map<String, String> errors = new HashMap<String, String>(); - errors.put("name", MSG.view_dynagroup_definitionAlreadyExists()); + String msg = caught.getMessage(); + String cannotParse = "Cannot parse the expression: "; + if (msg != null && msg.contains(cannotParse)) { + errors.put("expression", msg.substring(msg.indexOf(cannotParse) + cannotParse.length())); + } else { + errors.put("name", MSG.view_dynagroup_definitionAlreadyExists()); + } response.setErrors(errors); response.setStatus(RPCResponse.STATUS_VALIDATION_ERROR); processResponse(request.getRequestId(), response); diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/definitions/SingleGroupDefinitionView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/definitions/SingleGroupDefinitionView.java index 1afa30d..7b36717 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/definitions/SingleGroupDefinitionView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/definitions/SingleGroupDefinitionView.java @@ -199,6 +199,7 @@ public class SingleGroupDefinitionView extends EnhancedVLayout implements Bookma @Override public void execute(DSResponse response, Object rawData, DSRequest request) { boolean hasDuplicateNameError = false; + boolean hasParseExpressionError = false; if (form.isNewRecord()) { Record[] results = response.getData(); if (results.length != 1) { @@ -212,10 +213,14 @@ public class SingleGroupDefinitionView extends EnhancedVLayout implements Bookma String errorValue = (String) thisEntry.getValue(); CoreGUI.getErrorHandler().handleError(errorValue); hasDuplicateNameError = true; + } else if (fieldKey.equals("expression")) { + String errorValue = (String) thisEntry.getValue(); + CoreGUI.getErrorHandler().handleError(errorValue); + hasParseExpressionError = true; } }
- if (!hasDuplicateNameError) { + if (!hasDuplicateNameError && !hasParseExpressionError) { CoreGUI.getErrorHandler().handleError( MSG.view_dynagroup_singleSaveFailure(String.valueOf(results.length))); } diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/definition/GroupDefinitionManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/definition/GroupDefinitionManagerBean.java index a276efb..373368c 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/definition/GroupDefinitionManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/definition/GroupDefinitionManagerBean.java @@ -175,11 +175,6 @@ public class GroupDefinitionManagerBean implements GroupDefinitionManagerLocal, throw new GroupDefinitionUpdateException(gde.getMessage()); }
- ExpressionEvaluator evaluator = new ExpressionEvaluator(); - for (String expression : groupDefinition.getExpressionAsList()) { - evaluator.addExpression(expression); - } - RecursivityChangeType changeType = RecursivityChangeType.None; GroupDefinition attachedGroupDefinition = null; try { @@ -251,6 +246,15 @@ public class GroupDefinitionManagerBean implements GroupDefinitionManagerLocal, if (definition.getExpression() == null || definition.getExpression().isEmpty()) { throw new GroupDefinitionException("Expression is empty"); } + + try { + ExpressionEvaluator evaluator = new ExpressionEvaluator(); + for (String expression : definition.getExpressionAsList()) { + evaluator.addExpression(expression); + } + } catch (InvalidExpressionException e) { + throw new GroupDefinitionException("Cannot parse the expression: " + e.getMessage()); + }
Query query = entityManager.createNamedQuery(GroupDefinition.QUERY_FIND_BY_NAME); query.setParameter("name", name);
commit 0d1662bbbec85dbbb820cc8e778963015027caa2 Author: Jirka Kremser jkremser@redhat.com Date: Wed Jul 10 17:33:48 2013 +0200
[BZ 976265] - Unable to remove one dynagroup definition - Anotating method GroupDefinitionManagerBean.updateGroupDefinition() with @TransactionAttribute.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/definition/GroupDefinitionManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/definition/GroupDefinitionManagerBean.java index bb425f6..a276efb 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/definition/GroupDefinitionManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/definition/GroupDefinitionManagerBean.java @@ -162,6 +162,8 @@ public class GroupDefinitionManagerBean implements GroupDefinitionManagerLocal, }
@RequiredPermission(Permission.MANAGE_INVENTORY) + @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) + // required for the recalculation thread (same like calculateGroupMembership) this fixes BZ 976265 public GroupDefinition updateGroupDefinition(Subject subject, GroupDefinition groupDefinition) throws GroupDefinitionAlreadyExistsException, GroupDefinitionUpdateException, InvalidExpressionException, ResourceGroupUpdateException {
commit 627c48a7a18a03d9a91032a32c1833d4fcbae86a Author: Jirka Kremser jkremser@redhat.com Date: Wed Jul 10 17:10:04 2013 +0200
[BZ 974501] - Its possible to create a dynaGroup definition without expression via CLI - check whether expression is null of an empty string
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/definition/GroupDefinitionManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/definition/GroupDefinitionManagerBean.java index bb2587f..bb425f6 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/definition/GroupDefinitionManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/definition/GroupDefinitionManagerBean.java @@ -230,27 +230,25 @@ public class GroupDefinitionManagerBean implements GroupDefinitionManagerLocal, if (name.equals("")) { throw new GroupDefinitionException("Name is a required property"); } - if (name.length() > 100) { throw new GroupDefinitionException("Name is limited to 100 characters"); } - if (description.length() > 100) { throw new GroupDefinitionException("Description is limited to 100 characters"); } - if (name.contains("<") || name.contains("$") || name.contains("'") || name.contains("{") || name.contains("[")) { throw new GroupDefinitionException("Name must not contain <,$,',[,{ characters"); } - if (definition.getRecalculationInterval() < 0) { throw new GroupDefinitionException("Recalculation interval cannot be negative"); } - if (definition.getRecalculationInterval() > 0 && definition.getRecalculationInterval() < 60 * 1000) { throw new GroupDefinitionException( "Recalculation interval cannot be a positive number lower than 1 minute (60000ms)"); } + if (definition.getExpression() == null || definition.getExpression().isEmpty()) { + throw new GroupDefinitionException("Expression is empty"); + }
Query query = entityManager.createNamedQuery(GroupDefinition.QUERY_FIND_BY_NAME); query.setParameter("name", name);
commit d1f030ca0033ce7d7a7ea4ab223214789118de99 Author: John Sanda jsanda@redhat.com Date: Wed Jul 10 10:48:16 2013 -0400
add some basic argument validation
The script now takes as arguments the rhq server directory, the ip address of the storage node, the cql port, and the jmx port.
diff --git a/etc/scripts/rhq48-storage-patch/rhq48-storage-patch.sh b/etc/scripts/rhq48-storage-patch/rhq48-storage-patch.sh index 756ba20..947155f 100755 --- a/etc/scripts/rhq48-storage-patch/rhq48-storage-patch.sh +++ b/etc/scripts/rhq48-storage-patch/rhq48-storage-patch.sh @@ -1,14 +1,23 @@ #!/bin/bash
-if [ "x$1" = "x" ]; then - echo "Usage: $0 <rhq-server-dir>" +function usage() { + echo "Usage: $0 <rhq-server-dir> <storage-ip-address> <cql-port> <jmx-port>" +} + +if [ $# -ne 4 ]; then + usage + exit 1 +fi + +if [[ "x$1" = "x" ]] || [[ "x$2" = "x" ]] || [[ "x$3" = "x" ]] || [[ "x$4" = "x" ]]; then + usage exit 1 fi
RHQ_SERVER_DIR=$1 CQL_HOSTNAME=$2 -CQL_PORT=9142 -JMX_PORT=7299 +CQL_PORT=$3 +JMX_PORT=$4
PATCH="apache-cassandra-1.2.4-patch-1.jar"
commit 08f7934eecde748578f075d26018fcc348693fae Author: Jirka Kremser jkremser@redhat.com Date: Wed Jul 10 16:44:00 2013 +0200
[BZ 977782] - Incorrect error message is shown when creating a new dynaGroup definition with incorrect name - added regexp validator to the coregui.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/definitions/SingleGroupDefinitionView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/definitions/SingleGroupDefinitionView.java index 99e0192..1afa30d 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/definitions/SingleGroupDefinitionView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/definitions/SingleGroupDefinitionView.java @@ -56,6 +56,8 @@ import com.smartgwt.client.widgets.form.fields.events.ChangedEvent; import com.smartgwt.client.widgets.form.fields.events.ChangedHandler; import com.smartgwt.client.widgets.form.fields.events.FormItemClickHandler; import com.smartgwt.client.widgets.form.fields.events.FormItemIconClickEvent; +import com.smartgwt.client.widgets.form.validator.RegExpValidator; +import com.smartgwt.client.widgets.form.validator.Validator; import com.smartgwt.client.widgets.grid.CellFormatter; import com.smartgwt.client.widgets.grid.HoverCustomizer; import com.smartgwt.client.widgets.grid.ListGrid; @@ -82,10 +84,10 @@ import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; import org.rhq.enterprise.gui.coregui.client.inventory.groups.ResourceGroupsDataSource; import org.rhq.enterprise.gui.coregui.client.inventory.groups.definitions.GroupDefinitionExpressionBuilder.AddExpressionHandler; import org.rhq.enterprise.gui.coregui.client.util.StringUtility; -import org.rhq.enterprise.gui.coregui.client.util.message.Message; -import org.rhq.enterprise.gui.coregui.client.util.message.Message.Severity; import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedIButton; import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedVLayout; +import org.rhq.enterprise.gui.coregui.client.util.message.Message; +import org.rhq.enterprise.gui.coregui.client.util.message.Message.Severity;
/** * @author Joseph Marques @@ -136,6 +138,10 @@ public class SingleGroupDefinitionView extends EnhancedVLayout implements Bookma description.setValue(groupDefinition.getDescription()); recalculationInterval.setValue(groupDefinition.getRecalculationInterval() / (60 * 1000)); expression.setValue(groupDefinition.getExpression()); + + Validator nameValidator = new RegExpValidator("^[^\<\$\'\{\[]{1,100}$"); + nameValidator.setErrorMessage("Name must not contain following characters: < $ ' [ {"); + name.setValidators(nameValidator);
final DynamicForm form = new DynamicForm(); form.setFields(id, name, description, templateSelectorTitleSpacer, templateSelector, expression, recursive,
commit fa8e8225c6777c5d7e57c25aae3b80a180cbc58b Author: Jirka Kremser jkremser@redhat.com Date: Wed Jul 10 14:54:21 2013 +0200
[BZ 975502] - Its possible to create a dynaGroup definition with recalculation Interval < 1 minute via CLI - added simple checks to the GroupDefinitionManagerBean.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/definition/GroupDefinitionManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/definition/GroupDefinitionManagerBean.java index 8713928..bb2587f 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/definition/GroupDefinitionManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/definition/GroupDefinitionManagerBean.java @@ -242,6 +242,15 @@ public class GroupDefinitionManagerBean implements GroupDefinitionManagerLocal, if (name.contains("<") || name.contains("$") || name.contains("'") || name.contains("{") || name.contains("[")) { throw new GroupDefinitionException("Name must not contain <,$,',[,{ characters"); } + + if (definition.getRecalculationInterval() < 0) { + throw new GroupDefinitionException("Recalculation interval cannot be negative"); + } + + if (definition.getRecalculationInterval() > 0 && definition.getRecalculationInterval() < 60 * 1000) { + throw new GroupDefinitionException( + "Recalculation interval cannot be a positive number lower than 1 minute (60000ms)"); + }
Query query = entityManager.createNamedQuery(GroupDefinition.QUERY_FIND_BY_NAME); query.setParameter("name", name);
commit d61c77462f80bf1f8164341e58783ab75d3ff5f0 Author: Stefan Negrea snegrea@redhat.com Date: Wed Jul 10 09:17:57 2013 -0500
AllDataFileLocations is not a metric and thus no need to change the collection interval.
diff --git a/modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java b/modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java index 1cff654..dcfdc37 100644 --- a/modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java +++ b/modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java @@ -62,7 +62,6 @@ public class AlertDefinitionServerPluginComponent implements ServerPluginCompone private final Log log = LogFactory.getLog(AlertDefinitionServerPluginComponent.class);
private static final String PARTITION_DISK_USED_PERCENTAGE_METRIC_NAME = "Calculated.PartitionDiskUsedPercentage"; - private static final String DATA_FILE_LOCATIONS_NAME = "AllDataFileLocations";
static private final List<InjectedTemplate> injectedTemplates; static private final InjectedTemplate storageNodeHighHeapTemplate; @@ -309,8 +308,6 @@ public class AlertDefinitionServerPluginComponent implements ServerPluginCompone measurementDefinitionIds.add(d.getId()); ac.setMeasurementDefinition(d); ac.setName(d.getDisplayName()); - } else if (DATA_FILE_LOCATIONS_NAME.equals(d.getName())) { - measurementDefinitionIds.add(d.getId()); } } assert null != ac.getMeasurementDefinition() : "Did not find expected measurement definition " @@ -331,7 +328,8 @@ public class AlertDefinitionServerPluginComponent implements ServerPluginCompone // is set by default. MeasurementScheduleManagerLocal measurementManager = LookupUtil.getMeasurementScheduleManager(); measurementManager.updateDefaultCollectionIntervalAndEnablementForMeasurementDefinitions( - subjectManager.getOverlord(), ArrayUtils.toPrimitive(measurementDefinitionIds.toArray(new Integer[2])), + subjectManager.getOverlord(), + ArrayUtils.toPrimitive(measurementDefinitionIds.toArray(new Integer[measurementDefinitionIds.size()])), 60000L, true, true);
return newTemplateId;
commit 5b0415a3703ce56bbc27fd348c9808e446207fed Author: Thomas Segismont tsegismo@redhat.com Date: Wed Jul 10 15:52:07 2013 +0200
Bug 910646 - Unable to add more than 100 resources to a Compatible Group
Add integration test and throw UnsupportedOperationException if criteria has unlimited PageControl
diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/resource/test/GroupMemberCandidateResourcesTest.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/resource/test/GroupMemberCandidateResourcesTest.java new file mode 100644 index 0000000..3447799 --- /dev/null +++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/resource/test/GroupMemberCandidateResourcesTest.java @@ -0,0 +1,190 @@ +/* + * RHQ Management Platform + * Copyright (C) 2005-2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA + */ + +package org.rhq.enterprise.server.resource.test; + +import static org.rhq.core.domain.util.PageOrdering.ASC; + +import java.util.Arrays; + +import javax.ejb.EJBException; + +import org.testng.annotations.Test; + +import org.rhq.core.domain.auth.Subject; +import org.rhq.core.domain.criteria.ResourceCriteria; +import org.rhq.core.domain.resource.Resource; +import org.rhq.core.domain.resource.ResourceType; +import org.rhq.core.domain.util.PageControl; +import org.rhq.core.domain.util.PageList; +import org.rhq.enterprise.server.auth.SubjectManagerLocal; +import org.rhq.enterprise.server.resource.ResourceManagerLocal; +import org.rhq.enterprise.server.test.AbstractEJB3Test; +import org.rhq.enterprise.server.test.TransactionCallback; +import org.rhq.enterprise.server.util.LookupUtil; +import org.rhq.enterprise.server.util.SessionTestHelper; + +/** + * @author Thomas Segismont + */ +public class GroupMemberCandidateResourcesTest extends AbstractEJB3Test { + + private static final String GROUP_MEMBER_CANDIDATE_NAME_PREFIX = GroupMemberCandidateResourcesTest.class + .getSimpleName() + "-"; + + private Subject overlord; + + private ResourceManagerLocal resourceManager; + + @Override + protected void beforeMethod() throws Exception { + SubjectManagerLocal subjectManager = LookupUtil.getSubjectManager(); + overlord = subjectManager.getOverlord(); + resourceManager = LookupUtil.getResourceManager(); + } + + @Test + public void testNoCandidates() { + executeInTransaction(new TransactionCallback() { + @Override + public void execute() throws Exception { + ResourceCriteria criteria = new ResourceCriteria(); + criteria.setCaseSensitive(false); + criteria.addFilterName(GROUP_MEMBER_CANDIDATE_NAME_PREFIX); + + PageList<Resource> groupMemberCandidateResources = resourceManager.findGroupMemberCandidateResources( + overlord, criteria, new int[0]); + + assertEquals(0, groupMemberCandidateResources.getTotalSize()); + assertEquals(0, groupMemberCandidateResources.size()); + } + }); + } + + @Test + public void testNoCandidatesWithExclusions() { + executeInTransaction(new TransactionCallback() { + @Override + public void execute() throws Exception { + ResourceCriteria criteria = new ResourceCriteria(); + criteria.setCaseSensitive(false); + criteria.addFilterName(GROUP_MEMBER_CANDIDATE_NAME_PREFIX); + int[] alreadySelectedResourceIds = new int[100]; + for (int i = 0; i < alreadySelectedResourceIds.length; i++) { + alreadySelectedResourceIds[i] = i; + } + + PageList<Resource> groupMemberCandidateResources = resourceManager.findGroupMemberCandidateResources( + overlord, criteria, alreadySelectedResourceIds); + + assertEquals(0, groupMemberCandidateResources.getTotalSize()); + assertEquals(0, groupMemberCandidateResources.size()); + } + }); + } + + @Test + public void testUnlimitedPageControl() { + executeInTransaction(new TransactionCallback() { + @Override + public void execute() throws Exception { + ResourceCriteria criteria = new ResourceCriteria(); + criteria.setPageControl(PageControl.getUnlimitedInstance()); + + try { + PageList<Resource> groupMemberCandidateResources = resourceManager + .findGroupMemberCandidateResources(overlord, criteria, new int[0]); + fail("findGroupMemberCandidateResources should throw UnsupportedOperationException"); + } catch (EJBException e) { + assertTrue(e.getCausedByException() instanceof UnsupportedOperationException); + } + } + }); + } + + @Test + public void testPageControl() { + executeInTransaction(new TransactionCallback() { + @Override + public void execute() throws Exception { + int resourcesCount = 100; + createTestResources(resourcesCount); + ResourceCriteria criteria = new ResourceCriteria(); + criteria.setCaseSensitive(false); + criteria.addFilterName(GROUP_MEMBER_CANDIDATE_NAME_PREFIX); + criteria.addSortName(ASC); + int pageNumber = 5; + int pageSize = 7; + criteria.setPaging(pageNumber, pageSize); + + PageList<Resource> groupMemberCandidateResources = resourceManager.findGroupMemberCandidateResources( + overlord, criteria, new int[0]); + + assertEquals(resourcesCount, groupMemberCandidateResources.getTotalSize()); + assertEquals(pageSize, groupMemberCandidateResources.size()); + for (Resource groupMemberCandidateResource : groupMemberCandidateResources) { + assertTrue(groupMemberCandidateResource.getName().startsWith(GROUP_MEMBER_CANDIDATE_NAME_PREFIX)); + } + assertEquals(pageNumber, groupMemberCandidateResources.getPageControl().getPageNumber()); + assertEquals(pageSize, groupMemberCandidateResources.getPageControl().getPageSize()); + } + }); + } + + @Test + public void testPageControlWithExclusions() { + executeInTransaction(new TransactionCallback() { + @Override + public void execute() throws Exception { + int resourcesCount = 100; + int[] resourcesIds = createTestResources(resourcesCount); + ResourceCriteria criteria = new ResourceCriteria(); + criteria.setCaseSensitive(false); + criteria.addFilterName(GROUP_MEMBER_CANDIDATE_NAME_PREFIX); + criteria.addSortName(ASC); + int pageNumber = 5; + int pageSize = 7; + criteria.setPaging(pageNumber, pageSize); + int[] alreadySelectedResourceIds = Arrays.copyOfRange(resourcesIds, 13, 59); + + PageList<Resource> groupMemberCandidateResources = resourceManager.findGroupMemberCandidateResources( + overlord, criteria, alreadySelectedResourceIds); + + assertEquals(resourcesCount, groupMemberCandidateResources.getTotalSize()); + assertEquals(pageSize, groupMemberCandidateResources.size()); + for (Resource groupMemberCandidateResource : groupMemberCandidateResources) { + assertTrue(groupMemberCandidateResource.getName().startsWith(GROUP_MEMBER_CANDIDATE_NAME_PREFIX)); + } + assertEquals(pageNumber, groupMemberCandidateResources.getPageControl().getPageNumber()); + assertEquals(pageSize, groupMemberCandidateResources.getPageControl().getPageSize()); + } + }); + } + + private int[] createTestResources(int resourcesCount) { + int[] resourcesIds = new int[resourcesCount]; + ResourceType testResourceType = SessionTestHelper.createNewResourceType(em); + for (int i = 0; i < resourcesCount; i++) { + resourcesIds[i] = SessionTestHelper.createNewResource(em, GROUP_MEMBER_CANDIDATE_NAME_PREFIX + i, + testResourceType).getId(); + } + return resourcesIds; + } + +} diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/ResourceManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/ResourceManagerBean.java index 0c8594d..0aa09a4 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/ResourceManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/ResourceManagerBean.java @@ -2930,10 +2930,14 @@ public class ResourceManagerBean implements ResourceManagerLocal, ResourceManage public PageList<Resource> findGroupMemberCandidateResources(Subject subject, ResourceCriteria criteria, int[] alreadySelectedResourceIds) {
+ PageControl originalPageControl = getPageControl(criteria); + if (originalPageControl.isUnlimited()) { + throw new UnsupportedOperationException("Supplied criteria has an unlimited PageControl"); + } + Set<Integer> alreadySelectedResourceIdSet = new HashSet<Integer>( - ArrayUtils.wrapInList(alreadySelectedResourceIds)); + ArrayUtils.wrapInList(alreadySelectedResourceIds == null ? new int[0] : alreadySelectedResourceIds));
- PageControl originalPageControl = getPageControl(criteria); PageControl pageControl = (PageControl) originalPageControl.clone(); criteria.setPageControl(pageControl);
commit 07e921660b7336a96593c71b3bb57624f7645396 Author: Heiko W. Rupp hwr@redhat.com Date: Wed Jul 10 09:05:05 2013 +0200
Don't bail out on empty plugin description. Bump default pom version
diff --git a/modules/helpers/pluginGen/src/main/resources/descriptor.ftl b/modules/helpers/pluginGen/src/main/resources/descriptor.ftl index a62af11..85195e8 100644 --- a/modules/helpers/pluginGen/src/main/resources/descriptor.ftl +++ b/modules/helpers/pluginGen/src/main/resources/descriptor.ftl @@ -22,7 +22,11 @@ <?xml version="1.0"?> <plugin name="${props.pluginName}" displayName="${props.pluginName}Plugin" +<#if props.pluginDescription??> description="${props.pluginDescription}" +<#else> + description="TODO provide a description for the plugin" +</#if> <#if props.usePluginLifecycleListenerApi> pluginLifecycleListener="${props.componentClass}" </#if> diff --git a/modules/helpers/pluginGen/src/main/resources/pom.ftl b/modules/helpers/pluginGen/src/main/resources/pom.ftl index 2434ddd..01daca1 100644 --- a/modules/helpers/pluginGen/src/main/resources/pom.ftl +++ b/modules/helpers/pluginGen/src/main/resources/pom.ftl @@ -31,7 +31,7 @@ <parent> <groupId>org.rhq</groupId> <artifactId>rhq-plugins-parent</artifactId> - <version><#if props.rhqVersion??>${props.rhqVersion}<#else >4.5.0-SNAPSHOT</#if></version><!-- TODO adjust RHQ version --> + <version><#if props.rhqVersion??>${props.rhqVersion}<#else >4.9.0-SNAPSHOT</#if></version><!-- TODO adjust RHQ version --> </parent>
<groupId>org.rhq</groupId> @@ -49,8 +49,8 @@ <plugin> <artifactId>maven-compiler-plugin</artifactId> <configuration> - <source>1.5</source> - <target>1.5</target> + <source>1.6</source> + <target>1.6</target> </configuration> </plugin>
commit 206bc37502ef973145cf689b3a077722bb8bf2e9 Author: Heiko W. Rupp hwr@redhat.com Date: Wed Jul 10 08:49:08 2013 +0200
BZ 981809 Disable compression when wrapping. Also some cleanup in the file.
diff --git a/modules/enterprise/gui/rest-war/src/main/java/org/rhq/enterprise/rest/JsonPFilter.java b/modules/enterprise/gui/rest-war/src/main/java/org/rhq/enterprise/rest/JsonPFilter.java index 3a785fd..8d43f03 100644 --- a/modules/enterprise/gui/rest-war/src/main/java/org/rhq/enterprise/rest/JsonPFilter.java +++ b/modules/enterprise/gui/rest-war/src/main/java/org/rhq/enterprise/rest/JsonPFilter.java @@ -1,6 +1,6 @@ /* * RHQ Management Platform - * Copyright (C) 2005-2012 Red Hat, Inc. + * Copyright (C) 2005-2013 Red Hat, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify @@ -13,8 +13,8 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * along with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
package org.rhq.enterprise.rest; @@ -45,6 +45,9 @@ import javax.servlet.http.HttpServletResponseWrapper;
/** * A filter to wrap json answers as jsonp + * For this to happen, the user has to pass ?<filter.jsonp.callback>=<name> in the url like + * <pre>http://localhost:7080/rest/metric/data/10001/raw.json?jsonp=foo</pre> + * The <filter.jsonp.callback> is defined in web.xml and defaults to 'jsonp'. * @author Heiko W. Rupp */ public class JsonPFilter implements Filter { @@ -132,7 +135,6 @@ public class JsonPFilter implements Filter { private static class JsonPRequestWrapper extends HttpServletRequestWrapper { int contentLength; BufferedReader reader; - String method; ByteArrayInputStream bais; Map<String, String> headers = new HashMap<String, String>();
@@ -147,6 +149,11 @@ public class JsonPFilter implements Filter { Enumeration headers = request.getHeaderNames(); while (headers.hasMoreElements()) { String key = (String) headers.nextElement(); + if (key.equalsIgnoreCase("Accept-Encoding")) { + // Filter Content codings like compression, as we would end up + // with compressed inner data and uncompressed wrapper + continue; + } String value = request.getHeader(key); this.headers.put(key, value); } @@ -157,14 +164,6 @@ public class JsonPFilter implements Filter { }
-/* - public void setBody(String body) { - bais = new ByteArrayInputStream(body.getBytes()); - contentLength = body.length(); - headers.put("content-length", Integer.toString(contentLength)); - } -*/ - @Override public BufferedReader getReader() throws IOException { reader = new BufferedReader(new InputStreamReader(bais));
commit 1d75e54e136107c19d0b1163feb69e64993f7944 Author: Heiko W. Rupp hwr@redhat.com Date: Tue Jul 9 17:46:46 2013 +0200
Bump pom versions. Allow to start the generator via mvn exec:java
diff --git a/modules/helpers/pluginAnnotations/pom.xml b/modules/helpers/pluginAnnotations/pom.xml index be5e976..7ce1859 100644 --- a/modules/helpers/pluginAnnotations/pom.xml +++ b/modules/helpers/pluginAnnotations/pom.xml @@ -7,14 +7,14 @@ <parent> <groupId>org.rhq</groupId> <artifactId>rhq-parent</artifactId> - <version>4.8.0-SNAPSHOT</version> + <version>4.9.0-SNAPSHOT</version> <relativePath>../../../pom.xml</relativePath> </parent>
<groupId>org.rhq.helpers</groupId> <artifactId>rhq-pluginAnnotations</artifactId> <packaging>jar</packaging> - <version>4.8.0-SNAPSHOT</version> + <version>4.9.0-SNAPSHOT</version>
<name>RHQ plugin annotations</name> <description>Annotations to help generate plugin descriptors</description> diff --git a/modules/helpers/pluginGen/pom.xml b/modules/helpers/pluginGen/pom.xml index fb2cce6..e9a5e29 100644 --- a/modules/helpers/pluginGen/pom.xml +++ b/modules/helpers/pluginGen/pom.xml @@ -7,14 +7,14 @@ <parent> <groupId>org.rhq</groupId> <artifactId>rhq-parent</artifactId> - <version>4.8.0-SNAPSHOT</version> + <version>4.9.0-SNAPSHOT</version> <relativePath>../../../pom.xml</relativePath> </parent>
<groupId>org.rhq.helpers</groupId> <artifactId>rhq-pluginGen</artifactId> <packaging>jar</packaging> - <version>4.8.0-SNAPSHOT</version> + <version>4.9.0-SNAPSHOT</version>
<name>RHQ plugin generator</name> <description>Helper to generate plugin skeletons</description> @@ -62,6 +62,22 @@ <version>2.1</version> </plugin>
+ <plugin> + <groupId>org.codehaus.mojo</groupId> + <artifactId>exec-maven-plugin</artifactId> + <version>1.2.1</version> + <executions> + <execution> + <goals> + <goal>java</goal> + </goals> + </execution> + </executions> + <configuration> + <mainClass>org.rhq.helpers.pluginGen.PluginGen</mainClass> + </configuration> + </plugin> + </plugins>
</build>
commit fd4316e643321b5571389e5707771e69722e2c13 Author: Mike Thompson mithomps@redhat.com Date: Tue Jul 9 14:19:41 2013 -0700
[BZ 924725] Graph refresh also refreshes screen behind graph dialog.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3GraphListView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3GraphListView.java index 0e63274..204d2ec 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3GraphListView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3GraphListView.java @@ -142,7 +142,6 @@ public abstract class CompositeGroupD3GraphListView extends EnhancedVLayout impl Log.warn("# of charted graphs: " + measurementForEachResource.size()); } drawGraph(); - redraw(); } });
commit 2fc86b70b19ae7aea2f25bb52c0318dd40eb08eb Author: Mike Thompson mithomps@redhat.com Date: Tue Jul 9 14:11:35 2013 -0700
On multi-resource graph normalize units to highest scale (ex. GB instead of MB units if possible).
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3GraphListView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3GraphListView.java index a5a0c9d..0e63274 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3GraphListView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3GraphListView.java @@ -32,6 +32,7 @@ import org.rhq.core.domain.criteria.ResourceGroupCriteria; import org.rhq.core.domain.measurement.MeasurementDefinition; import org.rhq.core.domain.measurement.MeasurementUnits; import org.rhq.core.domain.measurement.composite.MeasurementDataNumericHighLowComposite; +import org.rhq.core.domain.measurement.composite.MeasurementNumericValueAndUnits; import org.rhq.core.domain.resource.Resource; import org.rhq.core.domain.resource.ResourceType; import org.rhq.core.domain.resource.group.ResourceGroup; @@ -47,6 +48,7 @@ import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.ButtonBarDat import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.RedrawGraphs; import org.rhq.enterprise.gui.coregui.client.inventory.resource.type.ResourceTypeRepository; import org.rhq.enterprise.gui.coregui.client.util.Log; +import org.rhq.enterprise.gui.coregui.client.util.MeasurementConverterClient; import org.rhq.enterprise.gui.coregui.client.util.async.Command; import org.rhq.enterprise.gui.coregui.client.util.async.CountDownLatch; import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedHLayout; @@ -77,6 +79,7 @@ public abstract class CompositeGroupD3GraphListView extends EnhancedVLayout impl private MeasurementDefinition definition; private MeasurementUserPreferences measurementUserPreferences; private ButtonBarDateTimeRangeEditor buttonBarDateTimeRangeEditor; + private String adjustedMeasurementUnits; /** * measurementForEachResource is a list of a list of single Measurement data for multiple resources. */ @@ -292,9 +295,7 @@ public abstract class CompositeGroupD3GraphListView extends EnhancedVLayout impl return definition.getName(); }
- public String getYAxisUnits() { - return definition.getUnits().toString(); - } +
public String getXAxisTitle() { return MSG.view_charts_time_axis_label(); @@ -353,7 +354,11 @@ public abstract class CompositeGroupD3GraphListView extends EnhancedVLayout impl for (MeasurementDataNumericHighLowComposite measurement : measurementList) { if (!Double.isNaN(measurement.getValue())) { sb.append("{ "x":" + measurement.getTimestamp() + ","); - sb.append(" "y":" + MeasurementUnits.scaleUp(measurement.getValue(), definition.getUnits()) + "},"); + MeasurementNumericValueAndUnits dataValue = normalizeUnitsAndValues(measurement.getValue(), + definition.getUnits()); + //sb.append(" "y":" + MeasurementUnits.scaleUp(measurement.getValue(), definition.getUnits()) + "},"); + sb.append(" "y":" + dataValue.getValue() + "},"); + adjustedMeasurementUnits = dataValue.getUnits().toString(); } } sb.setLength(sb.length() - 1); // delete the last ',' @@ -380,6 +385,29 @@ public abstract class CompositeGroupD3GraphListView extends EnhancedVLayout impl return sb.toString(); }
+ protected MeasurementNumericValueAndUnits normalizeUnitsAndValues(double value, MeasurementUnits measurementUnits) { + MeasurementNumericValueAndUnits newValue = MeasurementConverterClient.fit(value, measurementUnits); + MeasurementNumericValueAndUnits returnValue; + + // adjust for percentage numbers + if (measurementUnits.equals(MeasurementUnits.PERCENTAGE)) { + returnValue = new MeasurementNumericValueAndUnits(newValue.getValue() * 100, newValue.getUnits()); + } else { + returnValue = new MeasurementNumericValueAndUnits(newValue.getValue(), newValue.getUnits()); + } + + return returnValue; + } + + public String getYAxisUnits() { + if (adjustedMeasurementUnits == null) { + Log.warn("ResourceMetricD3GraphView.adjustedMeasurementUnits is populated by getJsonMetrics. Make sure it is called first."); + return ""; + } else { + return adjustedMeasurementUnits; + } + } + protected String getXAxisTimeFormatHoursMinutes() { return MSG.chart_xaxis_time_format_hours_minutes(); }
commit 746007a5df10d15da2e42891b9921ef3d7fc1bcc Author: Mike Thompson mithomps@redhat.com Date: Tue Jul 9 13:33:59 2013 -0700
[BZ 980014] Syntax error for Empty group charts.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/MetricGraphData.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/MetricGraphData.java index fca4e80..ecd3b46 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/MetricGraphData.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/MetricGraphData.java @@ -294,9 +294,10 @@ public class MetricGraphData implements JsonMetricProducer { * @todo: future: this should really use GSON or some Json marshaller */ public String getJsonMetrics() { - StringBuilder sb = new StringBuilder("["); + StringBuilder sb = new StringBuilder(); boolean gotAdjustedMeasurementUnits = false; if (null != metricData) { + sb = new StringBuilder("["); long firstBarTime = metricData.get(0).getTimestamp(); long secondBarTime = metricData.get(1).getTimestamp(); long barDuration = secondBarTime - firstBarTime; @@ -341,8 +342,8 @@ public class MetricGraphData implements JsonMetricProducer { } } sb.setLength(sb.length() - 1); // delete the last ',' + sb.append("]"); } - sb.append("]"); Log.debug("Json data for: "+getChartTitle()); Log.debug(sb.toString()); return sb.toString();
commit e4acf0fc299e0c37173cf70f573f583bcb81a714 Author: Mike Thompson mithomps@redhat.com Date: Tue Jul 9 13:02:08 2013 -0700
Remove nvd3.js charting library in favor of base d3.js.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupNvD3MultiLineGraph.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupNvD3MultiLineGraph.java deleted file mode 100644 index d2ae7ef..0000000 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupNvD3MultiLineGraph.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * RHQ Management Platform - * Copyright (C) 2005-2012 Red Hat, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ -package org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table; - - - -/** - * A MultiLine version of the Composite group single metric multiple resource charts. - * - * @author Mike Thompson - */ -@Deprecated -public final class CompositeGroupNvD3MultiLineGraph extends CompositeGroupD3GraphListView -{ - - public CompositeGroupNvD3MultiLineGraph(int groupId, int defId, boolean isAutogroup) - { - super(groupId, defId, isAutogroup); - } - - - - @Override - public native void drawJsniChart() /*-{ - console.log("Draw nvd3 charts for composite multiline graph"); - var chartId = this.@org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table.CompositeGroupD3GraphListView::getChartId()(), - chartHandle = "#mChart-"+chartId, - chartSelection = chartHandle + " svg", - yAxisUnits = this.@org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table.CompositeGroupD3GraphListView::getYAxisUnits()(), - xAxisLabel = this.@org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table.CompositeGroupD3GraphListView::getXAxisTitle()(), - xAxisTimeFormat = this.@org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table.CompositeGroupD3MultiLineGraph::getXAxisTimeFormatHoursMinutes()(); - json = eval(this.@org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table.CompositeGroupD3GraphListView::getJsonMetrics()()); - - $wnd.nv.addGraph(function() { - var chart = $wnd.nv.models.lineChart(); - - chart.xAxis.axisLabel(xAxisLabel) - .tickFormat(function(d) { return $wnd.d3.time.format(xAxisTimeFormat)(new Date(d)) }); - - chart.yAxis - .axisLabel(yAxisUnits) - .tickFormat($wnd.d3.format('.02f')); - - $wnd.d3.select(chartSelection) - .datum(json) - .transition().duration(300) - .call(chart); - - $wnd.nv.utils.windowResize(chart.update); - - return chart; - }); - - }-*/; - - - -} diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/CoreGUI.gwt.xml b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/CoreGUI.gwt.xml index 51bec0a..8ef78e0 100644 --- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/CoreGUI.gwt.xml +++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/CoreGUI.gwt.xml @@ -58,7 +58,6 @@ <script src="/coregui/js/jquery-1.7.2.min.js"/> <script src="/coregui/js/jquery.sparkline-2.1.min.js"/> <script src="/coregui/js/d3.v3.min.js"/> - <script src="/coregui/js/nv.d3.min.js"/> <script src="/coregui/js/jquery.tipsy.js"/>
diff --git a/modules/enterprise/gui/coregui/src/main/webapp/js/nv.d3.js b/modules/enterprise/gui/coregui/src/main/webapp/js/nv.d3.js deleted file mode 100755 index 62ca71e..0000000 --- a/modules/enterprise/gui/coregui/src/main/webapp/js/nv.d3.js +++ /dev/null @@ -1,13048 +0,0 @@ -(function(){ - -var nv = window.nv || {}; - -nv.version = '0.0.1a'; -nv.dev = true //set false when in production - -window.nv = nv; - -nv.tooltip = {}; // For the tooltip system -nv.utils = {}; // Utility subsystem -nv.models = {}; //stores all the possible models/components -nv.charts = {}; //stores all the ready to use charts -nv.graphs = []; //stores all the graphs currently on the page -nv.logs = {}; //stores some statistics and potential error messages - -nv.dispatch = d3.dispatch('render_start', 'render_end'); - -// ************************************************************************* -// Development render timers - disabled if dev = false - -if (nv.dev) { - nv.dispatch.on('render_start', function(e) { - nv.logs.startTime = +new Date(); - }); - - nv.dispatch.on('render_end', function(e) { - nv.logs.endTime = +new Date(); - nv.logs.totalTime = nv.logs.endTime - nv.logs.startTime; - nv.log('total', nv.logs.totalTime); // used for development, to keep track of graph generation times - }); -} - -// ******************************************** -// Public Core NV functions - -// Logs all arguments, and returns the last so you can test things in place -nv.log = function() { - if (nv.dev && console.log && console.log.apply) - console.log.apply(console, arguments) - else if (nv.dev && console.log && Function.prototype.bind) { - var log = Function.prototype.bind.call(console.log, console); - log.apply(console, arguments); - } - return arguments[arguments.length - 1]; -}; - - -nv.render = function render(step) { - step = step || 1; // number of graphs to generate in each timeout loop - - nv.render.active = true; - nv.dispatch.render_start(); - - setTimeout(function() { - var chart, graph; - - for (var i = 0; i < step && (graph = nv.render.queue[i]); i++) { - chart = graph.generate(); - if (typeof graph.callback == typeof(Function)) graph.callback(chart); - nv.graphs.push(chart); - } - - nv.render.queue.splice(0, i); - - if (nv.render.queue.length) setTimeout(arguments.callee, 0); - else { nv.render.active = false; nv.dispatch.render_end(); } - }, 0); -}; - -nv.render.active = false; -nv.render.queue = []; - -nv.addGraph = function(obj) { - if (typeof arguments[0] === typeof(Function)) - obj = {generate: arguments[0], callback: arguments[1]}; - - nv.render.queue.push(obj); - - if (!nv.render.active) nv.render(); -}; - -nv.identity = function(d) { return d; }; - -nv.strip = function(s) { return s.replace(/(\s|&)/g,''); }; - -function daysInMonth(month,year) { - return (new Date(year, month+1, 0)).getDate(); -} - -function d3_time_range(floor, step, number) { - return function(t0, t1, dt) { - var time = floor(t0), times = []; - if (time < t0) step(time); - if (dt > 1) { - while (time < t1) { - var date = new Date(+time); - if ((number(date) % dt === 0)) times.push(date); - step(time); - } - } else { - while (time < t1) { times.push(new Date(+time)); step(time); } - } - return times; - }; -} - -d3.time.monthEnd = function(date) { - return new Date(date.getFullYear(), date.getMonth(), 0); -}; - -d3.time.monthEnds = d3_time_range(d3.time.monthEnd, function(date) { - date.setUTCDate(date.getUTCDate() + 1); - date.setDate(daysInMonth(date.getMonth() + 1, date.getFullYear())); - }, function(date) { - return date.getMonth(); - } -); - - -/***** - * A no-frills tooltip implementation. - *****/ - - -(function() { - - var nvtooltip = window.nv.tooltip = {}; - - nvtooltip.show = function(pos, content, gravity, dist, parentContainer, classes) { - - var container = document.createElement('div'); - container.className = 'nvtooltip ' + (classes ? classes : 'xy-tooltip'); - - gravity = gravity || 's'; - dist = dist || 20; - - var body = parentContainer; - if ( !parentContainer || parentContainer.tagName.match(/g|svg/i)) { - //If the parent element is an SVG element, place tooltip in the <body> element. - body = document.getElementsByTagName('body')[0]; - } - - container.innerHTML = content; - container.style.left = 0; - container.style.top = 0; - container.style.opacity = 0; - - body.appendChild(container); - - var height = parseInt(container.offsetHeight), - width = parseInt(container.offsetWidth), - windowWidth = nv.utils.windowSize().width, - windowHeight = nv.utils.windowSize().height, - scrollTop = window.scrollY, - scrollLeft = window.scrollX, - left, top; - - windowHeight = window.innerWidth >= document.body.scrollWidth ? windowHeight : windowHeight - 16; - windowWidth = window.innerHeight >= document.body.scrollHeight ? windowWidth : windowWidth - 16; - - var tooltipTop = function ( Elem ) { - var offsetTop = top; - do { - if( !isNaN( Elem.offsetTop ) ) { - offsetTop += (Elem.offsetTop); - } - } while( Elem = Elem.offsetParent ); - return offsetTop; - } - - var tooltipLeft = function ( Elem ) { - var offsetLeft = left; - do { - if( !isNaN( Elem.offsetLeft ) ) { - offsetLeft += (Elem.offsetLeft); - } - } while( Elem = Elem.offsetParent ); - return offsetLeft; - } - - switch (gravity) { - case 'e': - left = pos[0] - width - dist; - top = pos[1] - (height / 2); - var tLeft = tooltipLeft(container); - var tTop = tooltipTop(container); - if (tLeft < scrollLeft) left = pos[0] + dist > scrollLeft ? pos[0] + dist : scrollLeft - tLeft + left; - if (tTop < scrollTop) top = scrollTop - tTop + top; - if (tTop + height > scrollTop + windowHeight) top = scrollTop + windowHeight - tTop + top - height; - break; - case 'w': - left = pos[0] + dist; - top = pos[1] - (height / 2); - if (tLeft + width > windowWidth) left = pos[0] - width - dist; - if (tTop < scrollTop) top = scrollTop + 5; - if (tTop + height > scrollTop + windowHeight) top = scrollTop - height - 5; - break; - case 'n': - left = pos[0] - (width / 2) - 5; - top = pos[1] + dist; - var tLeft = tooltipLeft(container); - var tTop = tooltipTop(container); - if (tLeft < scrollLeft) left = scrollLeft + 5; - if (tLeft + width > windowWidth) left = left - width/2 + 5; - if (tTop + height > scrollTop + windowHeight) top = scrollTop + windowHeight - tTop + top - height; - break; - case 's': - left = pos[0] - (width / 2); - top = pos[1] - height - dist; - var tLeft = tooltipLeft(container); - var tTop = tooltipTop(container); - if (tLeft < scrollLeft) left = scrollLeft + 5; - if (tLeft + width > windowWidth) left = left - width/2 + 5; - if (scrollTop > tTop) top = scrollTop; - break; - } - - - container.style.left = left+'px'; - container.style.top = top+'px'; - container.style.opacity = 1; - container.style.position = 'absolute'; //fix scroll bar issue - container.style.pointerEvents = 'none'; //fix scroll bar issue - - return container; - }; - - nvtooltip.cleanup = function() { - - // Find the tooltips, mark them for removal by this class (so others cleanups won't find it) - var tooltips = document.getElementsByClassName('nvtooltip'); - var purging = []; - while(tooltips.length) { - purging.push(tooltips[0]); - tooltips[0].style.transitionDelay = '0 !important'; - tooltips[0].style.opacity = 0; - tooltips[0].className = 'nvtooltip-pending-removal'; - } - - - setTimeout(function() { - - while (purging.length) { - var removeMe = purging.pop(); - removeMe.parentNode.removeChild(removeMe); - } - }, 500); - }; - - -})(); - -nv.utils.windowSize = function() { - // Sane defaults - var size = {width: 640, height: 480}; - - // Earlier IE uses Doc.body - if (document.body && document.body.offsetWidth) { - size.width = document.body.offsetWidth; - size.height = document.body.offsetHeight; - } - - // IE can use depending on mode it is in - if (document.compatMode=='CSS1Compat' && - document.documentElement && - document.documentElement.offsetWidth ) { - size.width = document.documentElement.offsetWidth; - size.height = document.documentElement.offsetHeight; - } - - // Most recent browsers use - if (window.innerWidth && window.innerHeight) { - size.width = window.innerWidth; - size.height = window.innerHeight; - } - return (size); -}; - - - -// Easy way to bind multiple functions to window.onresize -// TODO: give a way to remove a function after its bound, other than removing all of them -nv.utils.windowResize = function(fun){ - var oldresize = window.onresize; - - window.onresize = function(e) { - if (typeof oldresize == 'function') oldresize(e); - fun(e); - } -} - -// Backwards compatible way to implement more d3-like coloring of graphs. -// If passed an array, wrap it in a function which implements the old default -// behavior -nv.utils.getColor = function(color) { - if (!arguments.length) return nv.utils.defaultColor(); //if you pass in nothing, get default colors back - - if( Object.prototype.toString.call( color ) === '[object Array]' ) - return function(d, i) { return d.color || color[i % color.length]; }; - else - return color; - //can't really help it if someone passes rubbish as color -} - -// Default color chooser uses the index of an object as before. -nv.utils.defaultColor = function() { - var colors = d3.scale.category20().range(); - return function(d, i) { return d.color || colors[i % colors.length] }; -} - - -// Returns a color function that takes the result of 'getKey' for each series and -// looks for a corresponding color from the dictionary, -nv.utils.customTheme = function(dictionary, getKey, defaultColors) { - getKey = getKey || function(series) { return series.key }; // use default series.key if getKey is undefined - defaultColors = defaultColors || d3.scale.category20().range(); //default color function - - var defIndex = defaultColors.length; //current default color (going in reverse) - - return function(series, index) { - var key = getKey(series); - - if (!defIndex) defIndex = defaultColors.length; //used all the default colors, start over - - if (typeof dictionary[key] !== "undefined") - return (typeof dictionary[key] === "function") ? dictionary[key]() : dictionary[key]; - else - return defaultColors[--defIndex]; // no match in dictionary, use default color - } -} - - - -// From the PJAX example on d3js.org, while this is not really directly needed -// it's a very cool method for doing pjax, I may expand upon it a little bit, -// open to suggestions on anything that may be useful -nv.utils.pjax = function(links, content) { - d3.selectAll(links).on("click", function() { - history.pushState(this.href, this.textContent, this.href); - load(this.href); - d3.event.preventDefault(); - }); - - function load(href) { - d3.html(href, function(fragment) { - var target = d3.select(content).node(); - target.parentNode.replaceChild(d3.select(fragment).select(content).node(), target); - nv.utils.pjax(links, content); - }); - } - - d3.select(window).on("popstate", function() { - if (d3.event.state) load(d3.event.state); - }); -} - -/* For situations where we want to approximate the width in pixels for an SVG:text element. -Most common instance is when the element is in a display:none; container. -Forumla is : text.length * font-size * constant_factor -*/ -nv.utils.calcApproxTextWidth = function (svgTextElem) { - if (svgTextElem instanceof d3.selection) { - var fontSize = parseInt(svgTextElem.style("font-size").replace("px","")); - var textLength = svgTextElem.text().length; - - return textLength * fontSize * 0.5; - } - return 0; -}; -nv.models.axis = function() { - - //============================================================ - // Public Variables with Default Settings - //------------------------------------------------------------ - - var axis = d3.svg.axis() - ; - - var margin = {top: 0, right: 0, bottom: 0, left: 0} - , width = 75 //only used for tickLabel currently - , height = 60 //only used for tickLabel currently - , scale = d3.scale.linear() - , axisLabelText = null - , showMaxMin = true //TODO: showMaxMin should be disabled on all ordinal scaled axes - , highlightZero = true - , rotateLabels = 0 - , rotateYLabel = true - , staggerLabels = false - , isOrdinal = false - , ticks = null - ; - - axis - .scale(scale) - .orient('bottom') - .tickFormat(function(d) { return d }) - ; - - //============================================================ - - - //============================================================ - // Private Variables - //------------------------------------------------------------ - - var scale0; - - //============================================================ - - - function chart(selection) { - selection.each(function(data) { - var container = d3.select(this); - - - //------------------------------------------------------------ - // Setup containers and skeleton of chart - - var wrap = container.selectAll('g.nv-wrap.nv-axis').data([data]); - var wrapEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-wrap nv-axis'); - var gEnter = wrapEnter.append('g'); - var g = wrap.select('g') - - //------------------------------------------------------------ - - - if (ticks !== null) - axis.ticks(ticks); - else if (axis.orient() == 'top' || axis.orient() == 'bottom') - axis.ticks(Math.abs(scale.range()[1] - scale.range()[0]) / 100); - - - //TODO: consider calculating width/height based on whether or not label is added, for reference in charts using this component - - - d3.transition(g) - .call(axis); - - scale0 = scale0 || axis.scale(); - - var fmt = axis.tickFormat(); - if (fmt == null) { - fmt = scale0.tickFormat(); - } - - var axisLabel = g.selectAll('text.nv-axislabel') - .data([axisLabelText || null]); - axisLabel.exit().remove(); - switch (axis.orient()) { - case 'top': - axisLabel.enter().append('text').attr('class', 'nv-axislabel'); - var w = (scale.range().length==2) ? scale.range()[1] : (scale.range()[scale.range().length-1]+(scale.range()[1]-scale.range()[0])); - axisLabel - .attr('text-anchor', 'middle') - .attr('y', 0) - .attr('x', w/2); - if (showMaxMin) { - var axisMaxMin = wrap.selectAll('g.nv-axisMaxMin') - .data(scale.domain()); - axisMaxMin.enter().append('g').attr('class', 'nv-axisMaxMin').append('text'); - axisMaxMin.exit().remove(); - axisMaxMin - .attr('transform', function(d,i) { - return 'translate(' + scale(d) + ',0)' - }) - .select('text') - .attr('dy', '0em') - .attr('y', -axis.tickPadding()) - .attr('text-anchor', 'middle') - .text(function(d,i) { - var v = fmt(d); - return ('' + v).match('NaN') ? '' : v; - }); - d3.transition(axisMaxMin) - .attr('transform', function(d,i) { - return 'translate(' + scale.range()[i] + ',0)' - }); - } - break; - case 'bottom': - var xLabelMargin = 36; - var maxTextWidth = 30; - var xTicks = g.selectAll('g').select("text"); - if (rotateLabels%360) { - //Calculate the longest xTick width - xTicks.each(function(d,i){ - var width = this.getBBox().width; - if(width > maxTextWidth) maxTextWidth = width; - }); - //Convert to radians before calculating sin. Add 30 to margin for healthy padding. - var sin = Math.abs(Math.sin(rotateLabels*Math.PI/180)); - var xLabelMargin = (sin ? sin*maxTextWidth : maxTextWidth)+30; - //Rotate all xTicks - xTicks - .attr('transform', function(d,i,j) { return 'rotate(' + rotateLabels + ' 0,0)' }) - .attr('text-anchor', rotateLabels%360 > 0 ? 'start' : 'end'); - } - axisLabel.enter().append('text').attr('class', 'nv-axislabel'); - var w = (scale.range().length==2) ? scale.range()[1] : (scale.range()[scale.range().length-1]+(scale.range()[1]-scale.range()[0])); - axisLabel - .attr('text-anchor', 'middle') - .attr('y', xLabelMargin) - .attr('x', w/2); - if (showMaxMin) { - //if (showMaxMin && !isOrdinal) { - var axisMaxMin = wrap.selectAll('g.nv-axisMaxMin') - //.data(scale.domain()) - .data([scale.domain()[0], scale.domain()[scale.domain().length - 1]]); - axisMaxMin.enter().append('g').attr('class', 'nv-axisMaxMin').append('text'); - axisMaxMin.exit().remove(); - axisMaxMin - .attr('transform', function(d,i) { - return 'translate(' + (scale(d) + (isOrdinal ? scale.rangeBand() / 2 : 0)) + ',0)' - }) - .select('text') - .attr('dy', '.71em') - .attr('y', axis.tickPadding()) - .attr('transform', function(d,i,j) { return 'rotate(' + rotateLabels + ' 0,0)' }) - .attr('text-anchor', rotateLabels ? (rotateLabels%360 > 0 ? 'start' : 'end') : 'middle') - .text(function(d,i) { - var v = fmt(d); - return ('' + v).match('NaN') ? '' : v; - }); - d3.transition(axisMaxMin) - .attr('transform', function(d,i) { - //return 'translate(' + scale.range()[i] + ',0)' - //return 'translate(' + scale(d) + ',0)' - return 'translate(' + (scale(d) + (isOrdinal ? scale.rangeBand() / 2 : 0)) + ',0)' - }); - } - if (staggerLabels) - xTicks - .attr('transform', function(d,i) { return 'translate(0,' + (i % 2 == 0 ? '0' : '12') + ')' }); - - break; - case 'right': - axisLabel.enter().append('text').attr('class', 'nv-axislabel'); - axisLabel - .attr('text-anchor', rotateYLabel ? 'middle' : 'begin') - .attr('transform', rotateYLabel ? 'rotate(90)' : '') - .attr('y', rotateYLabel ? (-Math.max(margin.right,width) + 12) : -10) //TODO: consider calculating this based on largest tick width... OR at least expose this on chart - .attr('x', rotateYLabel ? (scale.range()[0] / 2) : axis.tickPadding()); - if (showMaxMin) { - var axisMaxMin = wrap.selectAll('g.nv-axisMaxMin') - .data(scale.domain()); - axisMaxMin.enter().append('g').attr('class', 'nv-axisMaxMin').append('text') - .style('opacity', 0); - axisMaxMin.exit().remove(); - axisMaxMin - .attr('transform', function(d,i) { - return 'translate(0,' + scale(d) + ')' - }) - .select('text') - .attr('dy', '.32em') - .attr('y', 0) - .attr('x', axis.tickPadding()) - .attr('text-anchor', 'start') - .text(function(d,i) { - var v = fmt(d); - return ('' + v).match('NaN') ? '' : v; - }); - d3.transition(axisMaxMin) - .attr('transform', function(d,i) { - return 'translate(0,' + scale.range()[i] + ')' - }) - .select('text') - .style('opacity', 1); - } - break; - case 'left': - /* - //For dynamically placing the label. Can be used with dynamically-sized chart axis margins - var yTicks = g.selectAll('g').select("text"); - yTicks.each(function(d,i){ - var labelPadding = this.getBBox().width + axis.tickPadding() + 16; - if(labelPadding > width) width = labelPadding; - }); - */ - axisLabel.enter().append('text').attr('class', 'nv-axislabel'); - axisLabel - .attr('text-anchor', rotateYLabel ? 'middle' : 'end') - .attr('transform', rotateYLabel ? 'rotate(-90)' : '') - .attr('y', rotateYLabel ? (-Math.max(margin.left,width) + 12) : -10) //TODO: consider calculating this based on largest tick width... OR at least expose this on chart - .attr('x', rotateYLabel ? (-scale.range()[0] / 2) : -axis.tickPadding()); - if (showMaxMin) { - var axisMaxMin = wrap.selectAll('g.nv-axisMaxMin') - .data(scale.domain()); - axisMaxMin.enter().append('g').attr('class', 'nv-axisMaxMin').append('text') - .style('opacity', 0); - axisMaxMin.exit().remove(); - axisMaxMin - .attr('transform', function(d,i) { - return 'translate(0,' + scale0(d) + ')' - }) - .select('text') - .attr('dy', '.32em') - .attr('y', 0) - .attr('x', -axis.tickPadding()) - .attr('text-anchor', 'end') - .text(function(d,i) { - var v = fmt(d); - return ('' + v).match('NaN') ? '' : v; - }); - d3.transition(axisMaxMin) - .attr('transform', function(d,i) { - return 'translate(0,' + scale.range()[i] + ')' - }) - .select('text') - .style('opacity', 1); - } - break; - } - axisLabel - .text(function(d) { return d }); - - - if (showMaxMin && (axis.orient() === 'left' || axis.orient() === 'right')) { - //check if max and min overlap other values, if so, hide the values that overlap - g.selectAll('g') // the g's wrapping each tick - .each(function(d,i) { - d3.select(this).select('text').attr('opacity', 1); - if (scale(d) < scale.range()[1] + 10 || scale(d) > scale.range()[0] - 10) { // 10 is assuming text height is 16... if d is 0, leave it! - if (d > 1e-10 || d < -1e-10) // accounts for minor floating point errors... though could be problematic if the scale is EXTREMELY SMALL - d3.select(this).attr('opacity', 0); - - d3.select(this).select('text').attr('opacity', 0); // Don't remove the ZERO line!! - } - }); - - //if Max and Min = 0 only show min, Issue #281 - if (scale.domain()[0] == scale.domain()[1] && scale.domain()[0] == 0) - wrap.selectAll('g.nv-axisMaxMin') - .style('opacity', function(d,i) { return !i ? 1 : 0 }); - - } - - if (showMaxMin && (axis.orient() === 'top' || axis.orient() === 'bottom')) { - var maxMinRange = []; - wrap.selectAll('g.nv-axisMaxMin') - .each(function(d,i) { - try { - if (i) // i== 1, max position - maxMinRange.push(scale(d) - this.getBBox().width - 4) //assuming the max and min labels are as wide as the next tick (with an extra 4 pixels just in case) - else // i==0, min position - maxMinRange.push(scale(d) + this.getBBox().width + 4) - }catch (err) { - if (i) // i== 1, max position - maxMinRange.push(scale(d) - 4) //assuming the max and min labels are as wide as the next tick (with an extra 4 pixels just in case) - else // i==0, min position - maxMinRange.push(scale(d) + 4) - } - }); - g.selectAll('g') // the g's wrapping each tick - .each(function(d,i) { - if (scale(d) < maxMinRange[0] || scale(d) > maxMinRange[1]) { - if (d > 1e-10 || d < -1e-10) // accounts for minor floating point errors... though could be problematic if the scale is EXTREMELY SMALL - d3.select(this).remove(); - else - d3.select(this).select('text').remove(); // Don't remove the ZERO line!! - } - }); - } - - - //highlight zero line ... Maybe should not be an option and should just be in CSS? - if (highlightZero) - g.selectAll('.tick') - .filter(function(d) { return !parseFloat(Math.round(d.__data__*100000)/1000000) && (d.__data__ !== undefined) }) //this is because sometimes the 0 tick is a very small fraction, TODO: think of cleaner technique - .classed('zero', true); - - //store old scales for use in transitions on update - scale0 = scale.copy(); - - }); - - return chart; - } - - - //============================================================ - // Expose Public Variables - //------------------------------------------------------------ - - // expose chart's sub-components - chart.axis = axis; - - d3.rebind(chart, axis, 'orient', 'tickValues', 'tickSubdivide', 'tickSize', 'tickPadding', 'tickFormat'); - d3.rebind(chart, scale, 'domain', 'range', 'rangeBand', 'rangeBands'); //these are also accessible by chart.scale(), but added common ones directly for ease of use - - chart.margin = function(_) { - if(!arguments.length) return margin; - margin.top = typeof _.top != 'undefined' ? _.top : margin.top; - margin.right = typeof _.right != 'undefined' ? _.right : margin.right; - margin.bottom = typeof _.bottom != 'undefined' ? _.bottom : margin.bottom; - margin.left = typeof _.left != 'undefined' ? _.left : margin.left; - return chart; - } - - chart.width = function(_) { - if (!arguments.length) return width; - width = _; - return chart; - }; - - chart.ticks = function(_) { - if (!arguments.length) return ticks; - ticks = _; - return chart; - }; - - chart.height = function(_) { - if (!arguments.length) return height; - height = _; - return chart; - }; - - chart.axisLabel = function(_) { - if (!arguments.length) return axisLabelText; - axisLabelText = _; - return chart; - } - - chart.showMaxMin = function(_) { - if (!arguments.length) return showMaxMin; - showMaxMin = _; - return chart; - } - - chart.highlightZero = function(_) { - if (!arguments.length) return highlightZero; - highlightZero = _; - return chart; - } - - chart.scale = function(_) { - if (!arguments.length) return scale; - scale = _; - axis.scale(scale); - isOrdinal = typeof scale.rangeBands === 'function'; - d3.rebind(chart, scale, 'domain', 'range', 'rangeBand', 'rangeBands'); - return chart; - } - - chart.rotateYLabel = function(_) { - if(!arguments.length) return rotateYLabel; - rotateYLabel = _; - return chart; - } - - chart.rotateLabels = function(_) { - if(!arguments.length) return rotateLabels; - rotateLabels = _; - return chart; - } - - chart.staggerLabels = function(_) { - if (!arguments.length) return staggerLabels; - staggerLabels = _; - return chart; - }; - - - //============================================================ - - - return chart; -} - -// Chart design based on the recommendations of Stephen Few. Implementation -// based on the work of Clint Ivy, Jamie Love, and Jason Davies. -// http://projects.instantcognition.com/protovis/bulletchart/ - -nv.models.bullet = function() { - - //============================================================ - // Public Variables with Default Settings - //------------------------------------------------------------ - - var margin = {top: 0, right: 0, bottom: 0, left: 0} - , orient = 'left' // TODO top & bottom - , reverse = false - , ranges = function(d) { return d.ranges } - , markers = function(d) { return d.markers } - , measures = function(d) { return d.measures } - , forceX = [0] // List of numbers to Force into the X scale (ie. 0, or a max / min, etc.) - , width = 380 - , height = 30 - , tickFormat = null - , color = nv.utils.getColor(['#1f77b4']) - , dispatch = d3.dispatch('elementMouseover', 'elementMouseout') - ; - - //============================================================ - - - function chart(selection) { - selection.each(function(d, i) { - var availableWidth = width - margin.left - margin.right, - availableHeight = height - margin.top - margin.bottom, - container = d3.select(this); - - var rangez = ranges.call(this, d, i).slice().sort(d3.descending), - markerz = markers.call(this, d, i).slice().sort(d3.descending), - measurez = measures.call(this, d, i).slice().sort(d3.descending); - - - //------------------------------------------------------------ - // Setup Scales - - // Compute the new x-scale. - var x1 = d3.scale.linear() - .domain( d3.extent(d3.merge([forceX, rangez])) ) - .range(reverse ? [availableWidth, 0] : [0, availableWidth]); - - // Retrieve the old x-scale, if this is an update. - var x0 = this.__chart__ || d3.scale.linear() - .domain([0, Infinity]) - .range(x1.range()); - - // Stash the new scale. - this.__chart__ = x1; - - - var rangeMin = d3.min(rangez), //rangez[2] - rangeMax = d3.max(rangez), //rangez[0] - rangeAvg = rangez[1]; - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Setup containers and skeleton of chart - - var wrap = container.selectAll('g.nv-wrap.nv-bullet').data([d]); - var wrapEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-wrap nv-bullet'); - var gEnter = wrapEnter.append('g'); - var g = wrap.select('g'); - - gEnter.append('rect').attr('class', 'nv-range nv-rangeMax'); - gEnter.append('rect').attr('class', 'nv-range nv-rangeAvg'); - gEnter.append('rect').attr('class', 'nv-range nv-rangeMin'); - gEnter.append('rect').attr('class', 'nv-measure'); - gEnter.append('path').attr('class', 'nv-markerTriangle'); - - wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')'); - - //------------------------------------------------------------ - - - - var w0 = function(d) { return Math.abs(x0(d) - x0(0)) }, // TODO: could optimize by precalculating x0(0) and x1(0) - w1 = function(d) { return Math.abs(x1(d) - x1(0)) }; - var xp0 = function(d) { return d < 0 ? x0(d) : x0(0) }, - xp1 = function(d) { return d < 0 ? x1(d) : x1(0) }; - - - g.select('rect.nv-rangeMax') - .attr('height', availableHeight) - .attr('width', w1(rangeMax > 0 ? rangeMax : rangeMin)) - .attr('x', xp1(rangeMax > 0 ? rangeMax : rangeMin)) - .datum(rangeMax > 0 ? rangeMax : rangeMin) - /* - .attr('x', rangeMin < 0 ? - rangeMax > 0 ? - x1(rangeMin) - : x1(rangeMax) - : x1(0)) - */ - - g.select('rect.nv-rangeAvg') - .attr('height', availableHeight) - .attr('width', w1(rangeAvg)) - .attr('x', xp1(rangeAvg)) - .datum(rangeAvg) - /* - .attr('width', rangeMax <= 0 ? - x1(rangeMax) - x1(rangeAvg) - : x1(rangeAvg) - x1(rangeMin)) - .attr('x', rangeMax <= 0 ? - x1(rangeAvg) - : x1(rangeMin)) - */ - - g.select('rect.nv-rangeMin') - .attr('height', availableHeight) - .attr('width', w1(rangeMax)) - .attr('x', xp1(rangeMax)) - .attr('width', w1(rangeMax > 0 ? rangeMin : rangeMax)) - .attr('x', xp1(rangeMax > 0 ? rangeMin : rangeMax)) - .datum(rangeMax > 0 ? rangeMin : rangeMax) - /* - .attr('width', rangeMax <= 0 ? - x1(rangeAvg) - x1(rangeMin) - : x1(rangeMax) - x1(rangeAvg)) - .attr('x', rangeMax <= 0 ? - x1(rangeMin) - : x1(rangeAvg)) - */ - - g.select('rect.nv-measure') - .style('fill', color) - .attr('height', availableHeight / 3) - .attr('y', availableHeight / 3) - .attr('width', measurez < 0 ? - x1(0) - x1(measurez[0]) - : x1(measurez[0]) - x1(0)) - .attr('x', xp1(measurez)) - .on('mouseover', function() { - dispatch.elementMouseover({ - value: measurez[0], - label: 'Current', - pos: [x1(measurez[0]), availableHeight/2] - }) - }) - .on('mouseout', function() { - dispatch.elementMouseout({ - value: measurez[0], - label: 'Current' - }) - }) - - var h3 = availableHeight / 6; - if (markerz[0]) { - g.selectAll('path.nv-markerTriangle') - .attr('transform', function(d) { return 'translate(' + x1(markerz[0]) + ',' + (availableHeight / 2) + ')' }) - .attr('d', 'M0,' + h3 + 'L' + h3 + ',' + (-h3) + ' ' + (-h3) + ',' + (-h3) + 'Z') - .on('mouseover', function() { - dispatch.elementMouseover({ - value: markerz[0], - label: 'Previous', - pos: [x1(markerz[0]), availableHeight/2] - }) - }) - .on('mouseout', function() { - dispatch.elementMouseout({ - value: markerz[0], - label: 'Previous' - }) - }); - } else { - g.selectAll('path.nv-markerTriangle').remove(); - } - - - wrap.selectAll('.nv-range') - .on('mouseover', function(d,i) { - var label = !i ? "Maximum" : i == 1 ? "Mean" : "Minimum"; - - dispatch.elementMouseover({ - value: d, - label: label, - pos: [x1(d), availableHeight/2] - }) - }) - .on('mouseout', function(d,i) { - var label = !i ? "Maximum" : i == 1 ? "Mean" : "Minimum"; - - dispatch.elementMouseout({ - value: d, - label: label - }) - }) - -/* // THIS IS THE PREVIOUS BULLET IMPLEMENTATION, WILL REMOVE SHORTLY - // Update the range rects. - var range = g.selectAll('rect.nv-range') - .data(rangez); - - range.enter().append('rect') - .attr('class', function(d, i) { return 'nv-range nv-s' + i; }) - .attr('width', w0) - .attr('height', availableHeight) - .attr('x', reverse ? x0 : 0) - .on('mouseover', function(d,i) { - dispatch.elementMouseover({ - value: d, - label: (i <= 0) ? 'Maximum' : (i > 1) ? 'Minimum' : 'Mean', //TODO: make these labels a variable - pos: [x1(d), availableHeight/2] - }) - }) - .on('mouseout', function(d,i) { - dispatch.elementMouseout({ - value: d, - label: (i <= 0) ? 'Minimum' : (i >=1) ? 'Maximum' : 'Mean' //TODO: make these labels a variable - }) - }) - - d3.transition(range) - .attr('x', reverse ? x1 : 0) - .attr('width', w1) - .attr('height', availableHeight); - - - // Update the measure rects. - var measure = g.selectAll('rect.nv-measure') - .data(measurez); - - measure.enter().append('rect') - .attr('class', function(d, i) { return 'nv-measure nv-s' + i; }) - .style('fill', function(d,i) { return color(d,i ) }) - .attr('width', w0) - .attr('height', availableHeight / 3) - .attr('x', reverse ? x0 : 0) - .attr('y', availableHeight / 3) - .on('mouseover', function(d) { - dispatch.elementMouseover({ - value: d, - label: 'Current', //TODO: make these labels a variable - pos: [x1(d), availableHeight/2] - }) - }) - .on('mouseout', function(d) { - dispatch.elementMouseout({ - value: d, - label: 'Current' //TODO: make these labels a variable - }) - }) - - d3.transition(measure) - .attr('width', w1) - .attr('height', availableHeight / 3) - .attr('x', reverse ? x1 : 0) - .attr('y', availableHeight / 3); - - - - // Update the marker lines. - var marker = g.selectAll('path.nv-markerTriangle') - .data(markerz); - - var h3 = availableHeight / 6; - marker.enter().append('path') - .attr('class', 'nv-markerTriangle') - .attr('transform', function(d) { return 'translate(' + x0(d) + ',' + (availableHeight / 2) + ')' }) - .attr('d', 'M0,' + h3 + 'L' + h3 + ',' + (-h3) + ' ' + (-h3) + ',' + (-h3) + 'Z') - .on('mouseover', function(d,i) { - dispatch.elementMouseover({ - value: d, - label: 'Previous', - pos: [x1(d), availableHeight/2] - }) - }) - .on('mouseout', function(d,i) { - dispatch.elementMouseout({ - value: d, - label: 'Previous' - }) - }); - - d3.transition(marker) - .attr('transform', function(d) { return 'translate(' + (x1(d) - x1(0)) + ',' + (availableHeight / 2) + ')' }); - - marker.exit().remove(); -*/ - - }); - - // d3.timer.flush(); // Not needed? - - return chart; - } - - - //============================================================ - // Expose Public Variables - //------------------------------------------------------------ - - chart.dispatch = dispatch; - - // left, right, top, bottom - chart.orient = function(_) { - if (!arguments.length) return orient; - orient = _; - reverse = orient == 'right' || orient == 'bottom'; - return chart; - }; - - // ranges (bad, satisfactory, good) - chart.ranges = function(_) { - if (!arguments.length) return ranges; - ranges = _; - return chart; - }; - - // markers (previous, goal) - chart.markers = function(_) { - if (!arguments.length) return markers; - markers = _; - return chart; - }; - - // measures (actual, forecast) - chart.measures = function(_) { - if (!arguments.length) return measures; - measures = _; - return chart; - }; - - chart.forceX = function(_) { - if (!arguments.length) return forceX; - forceX = _; - return chart; - }; - - chart.width = function(_) { - if (!arguments.length) return width; - width = _; - return chart; - }; - - chart.height = function(_) { - if (!arguments.length) return height; - height = _; - return chart; - }; - - chart.margin = function(_) { - if (!arguments.length) return margin; - margin.top = typeof _.top != 'undefined' ? _.top : margin.top; - margin.right = typeof _.right != 'undefined' ? _.right : margin.right; - margin.bottom = typeof _.bottom != 'undefined' ? _.bottom : margin.bottom; - margin.left = typeof _.left != 'undefined' ? _.left : margin.left; - return chart; - }; - - chart.tickFormat = function(_) { - if (!arguments.length) return tickFormat; - tickFormat = _; - return chart; - }; - - chart.color = function(_) { - if (!arguments.length) return color; - color = nv.utils.getColor(_); - return chart; - }; - - //============================================================ - - - return chart; -}; - - - -// Chart design based on the recommendations of Stephen Few. Implementation -// based on the work of Clint Ivy, Jamie Love, and Jason Davies. -// http://projects.instantcognition.com/protovis/bulletchart/ -nv.models.bulletChart = function() { - - //============================================================ - // Public Variables with Default Settings - //------------------------------------------------------------ - - var bullet = nv.models.bullet() - ; - - var orient = 'left' // TODO top & bottom - , reverse = false - , margin = {top: 5, right: 40, bottom: 20, left: 120} - , ranges = function(d) { return d.ranges } - , markers = function(d) { return d.markers } - , measures = function(d) { return d.measures } - , width = null - , height = 55 - , tickFormat = null - , tooltips = true - , tooltip = function(key, x, y, e, graph) { - return '<h3>' + x + '</h3>' + - '<p>' + y + '</p>' - } - , noData = 'No Data Available.' - , dispatch = d3.dispatch('tooltipShow', 'tooltipHide') - ; - - //============================================================ - - - //============================================================ - // Private Variables - //------------------------------------------------------------ - - var showTooltip = function(e, offsetElement) { - var left = e.pos[0] + ( offsetElement.offsetLeft || 0 ) + margin.left, - top = e.pos[1] + ( offsetElement.offsetTop || 0) + margin.top, - content = tooltip(e.key, e.label, e.value, e, chart); - - nv.tooltip.show([left, top], content, e.value < 0 ? 'e' : 'w', null, offsetElement); - }; - - //============================================================ - - - function chart(selection) { - selection.each(function(d, i) { - var container = d3.select(this); - - var availableWidth = (width || parseInt(container.style('width')) || 960) - - margin.left - margin.right, - availableHeight = height - margin.top - margin.bottom, - that = this; - - - chart.update = function() { chart(selection) }; - chart.container = this; - - //------------------------------------------------------------ - // Display No Data message if there's nothing to show. - - if (!d || !ranges.call(this, d, i)) { - var noDataText = container.selectAll('.nv-noData').data([noData]); - - noDataText.enter().append('text') - .attr('class', 'nvd3 nv-noData') - .attr('dy', '-.7em') - .style('text-anchor', 'middle'); - - noDataText - .attr('x', margin.left + availableWidth / 2) - .attr('y', 18 + margin.top + availableHeight / 2) - .text(function(d) { return d }); - - return chart; - } else { - container.selectAll('.nv-noData').remove(); - } - - //------------------------------------------------------------ - - - - var rangez = ranges.call(this, d, i).slice().sort(d3.descending), - markerz = markers.call(this, d, i).slice().sort(d3.descending), - measurez = measures.call(this, d, i).slice().sort(d3.descending); - - - //------------------------------------------------------------ - // Setup containers and skeleton of chart - - var wrap = container.selectAll('g.nv-wrap.nv-bulletChart').data([d]); - var wrapEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-wrap nv-bulletChart'); - var gEnter = wrapEnter.append('g'); - var g = wrap.select('g'); - - gEnter.append('g').attr('class', 'nv-bulletWrap'); - gEnter.append('g').attr('class', 'nv-titles'); - - wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')'); - - //------------------------------------------------------------ - - - // Compute the new x-scale. - var x1 = d3.scale.linear() - .domain([0, Math.max(rangez[0], markerz[0], measurez[0])]) // TODO: need to allow forceX and forceY, and xDomain, yDomain - .range(reverse ? [availableWidth, 0] : [0, availableWidth]); - - // Retrieve the old x-scale, if this is an update. - var x0 = this.__chart__ || d3.scale.linear() - .domain([0, Infinity]) - .range(x1.range()); - - // Stash the new scale. - this.__chart__ = x1; - - /* - // Derive width-scales from the x-scales. - var w0 = bulletWidth(x0), - w1 = bulletWidth(x1); - - function bulletWidth(x) { - var x0 = x(0); - return function(d) { - return Math.abs(x(d) - x(0)); - }; - } - - function bulletTranslate(x) { - return function(d) { - return 'translate(' + x(d) + ',0)'; - }; - } - */ - - var w0 = function(d) { return Math.abs(x0(d) - x0(0)) }, // TODO: could optimize by precalculating x0(0) and x1(0) - w1 = function(d) { return Math.abs(x1(d) - x1(0)) }; - - - var title = gEnter.select('.nv-titles').append('g') - .attr('text-anchor', 'end') - .attr('transform', 'translate(-6,' + (height - margin.top - margin.bottom) / 2 + ')'); - title.append('text') - .attr('class', 'nv-title') - .text(function(d) { return d.title; }); - - title.append('text') - .attr('class', 'nv-subtitle') - .attr('dy', '1em') - .text(function(d) { return d.subtitle; }); - - - - bullet - .width(availableWidth) - .height(availableHeight) - - var bulletWrap = g.select('.nv-bulletWrap'); - - d3.transition(bulletWrap).call(bullet); - - - - // Compute the tick format. - var format = tickFormat || x1.tickFormat( availableWidth / 100 ); - - // Update the tick groups. - var tick = g.selectAll('g.nv-tick') - .data(x1.ticks( availableWidth / 50 ), function(d) { - return this.textContent || format(d); - }); - - // Initialize the ticks with the old scale, x0. - var tickEnter = tick.enter().append('g') - .attr('class', 'nv-tick') - .attr('transform', function(d) { return 'translate(' + x0(d) + ',0)' }) - .style('opacity', 1e-6); - - tickEnter.append('line') - .attr('y1', availableHeight) - .attr('y2', availableHeight * 7 / 6); - - tickEnter.append('text') - .attr('text-anchor', 'middle') - .attr('dy', '1em') - .attr('y', availableHeight * 7 / 6) - .text(format); - - - // Transition the updating ticks to the new scale, x1. - var tickUpdate = d3.transition(tick) - .attr('transform', function(d) { return 'translate(' + x1(d) + ',0)' }) - .style('opacity', 1); - - tickUpdate.select('line') - .attr('y1', availableHeight) - .attr('y2', availableHeight * 7 / 6); - - tickUpdate.select('text') - .attr('y', availableHeight * 7 / 6); - - // Transition the exiting ticks to the new scale, x1. - d3.transition(tick.exit()) - .attr('transform', function(d) { return 'translate(' + x1(d) + ',0)' }) - .style('opacity', 1e-6) - .remove(); - - - //============================================================ - // Event Handling/Dispatching (in chart's scope) - //------------------------------------------------------------ - - dispatch.on('tooltipShow', function(e) { - e.key = d.title; - if (tooltips) showTooltip(e, that.parentNode); - }); - - //============================================================ - - }); - - d3.timer.flush(); - - return chart; - } - - - //============================================================ - // Event Handling/Dispatching (out of chart's scope) - //------------------------------------------------------------ - - bullet.dispatch.on('elementMouseover.tooltip', function(e) { - dispatch.tooltipShow(e); - }); - - bullet.dispatch.on('elementMouseout.tooltip', function(e) { - dispatch.tooltipHide(e); - }); - - dispatch.on('tooltipHide', function() { - if (tooltips) nv.tooltip.cleanup(); - }); - - //============================================================ - - - //============================================================ - // Expose Public Variables - //------------------------------------------------------------ - - chart.dispatch = dispatch; - chart.bullet = bullet; - - d3.rebind(chart, bullet, 'color'); - - // left, right, top, bottom - chart.orient = function(x) { - if (!arguments.length) return orient; - orient = x; - reverse = orient == 'right' || orient == 'bottom'; - return chart; - }; - - // ranges (bad, satisfactory, good) - chart.ranges = function(x) { - if (!arguments.length) return ranges; - ranges = x; - return chart; - }; - - // markers (previous, goal) - chart.markers = function(x) { - if (!arguments.length) return markers; - markers = x; - return chart; - }; - - // measures (actual, forecast) - chart.measures = function(x) { - if (!arguments.length) return measures; - measures = x; - return chart; - }; - - chart.width = function(x) { - if (!arguments.length) return width; - width = x; - return chart; - }; - - chart.height = function(x) { - if (!arguments.length) return height; - height = x; - return chart; - }; - - chart.margin = function(_) { - if (!arguments.length) return margin; - margin.top = typeof _.top != 'undefined' ? _.top : margin.top; - margin.right = typeof _.right != 'undefined' ? _.right : margin.right; - margin.bottom = typeof _.bottom != 'undefined' ? _.bottom : margin.bottom; - margin.left = typeof _.left != 'undefined' ? _.left : margin.left; - return chart; - }; - - chart.tickFormat = function(x) { - if (!arguments.length) return tickFormat; - tickFormat = x; - return chart; - }; - - chart.tooltips = function(_) { - if (!arguments.length) return tooltips; - tooltips = _; - return chart; - }; - - chart.tooltipContent = function(_) { - if (!arguments.length) return tooltip; - tooltip = _; - return chart; - }; - - chart.noData = function(_) { - if (!arguments.length) return noData; - noData = _; - return chart; - }; - - //============================================================ - - - return chart; -}; - - - -nv.models.cumulativeLineChart = function() { - - //============================================================ - // Public Variables with Default Settings - //------------------------------------------------------------ - - var lines = nv.models.line() - , xAxis = nv.models.axis() - , yAxis = nv.models.axis() - , legend = nv.models.legend() - , controls = nv.models.legend() - ; - - var margin = {top: 30, right: 30, bottom: 50, left: 60} - , color = nv.utils.defaultColor() - , width = null - , height = null - , showLegend = true - , tooltips = true - , showControls = true - , rescaleY = true - , tooltip = function(key, x, y, e, graph) { - return '<h3>' + key + '</h3>' + - '<p>' + y + ' at ' + x + '</p>' - } - , x //can be accessed via chart.xScale() - , y //can be accessed via chart.yScale() - , id = lines.id() - , state = { index: 0, rescaleY: rescaleY } - , defaultState = null - , noData = 'No Data Available.' - , average = function(d) { return d.average } - , dispatch = d3.dispatch('tooltipShow', 'tooltipHide', 'stateChange', 'changeState') - ; - - xAxis - .orient('bottom') - .tickPadding(7) - ; - yAxis - .orient('left') - ; - - //============================================================ - - - //============================================================ - // Private Variables - //------------------------------------------------------------ - - var dx = d3.scale.linear() - , index = {i: 0, x: 0} - ; - - var showTooltip = function(e, offsetElement) { - var left = e.pos[0] + ( offsetElement.offsetLeft || 0 ), - top = e.pos[1] + ( offsetElement.offsetTop || 0), - x = xAxis.tickFormat()(lines.x()(e.point, e.pointIndex)), - y = yAxis.tickFormat()(lines.y()(e.point, e.pointIndex)), - content = tooltip(e.series.key, x, y, e, chart); - - nv.tooltip.show([left, top], content, null, null, offsetElement); - }; - -/* - //Moved to see if we can get better behavior to fix issue #315 - var indexDrag = d3.behavior.drag() - .on('dragstart', dragStart) - .on('drag', dragMove) - .on('dragend', dragEnd); - - function dragStart(d,i) { - d3.select(chart.container) - .style('cursor', 'ew-resize'); - } - - function dragMove(d,i) { - d.x += d3.event.dx; - d.i = Math.round(dx.invert(d.x)); - - d3.select(this).attr('transform', 'translate(' + dx(d.i) + ',0)'); - chart.update(); - } - - function dragEnd(d,i) { - d3.select(chart.container) - .style('cursor', 'auto'); - chart.update(); - } -*/ - - //============================================================ - - - function chart(selection) { - selection.each(function(data) { - var container = d3.select(this).classed('nv-chart-' + id, true), - that = this; - - var availableWidth = (width || parseInt(container.style('width')) || 960) - - margin.left - margin.right, - availableHeight = (height || parseInt(container.style('height')) || 400) - - margin.top - margin.bottom; - - - chart.update = function() { container.transition().call(chart) }; - chart.container = this; - - //set state.disabled - state.disabled = data.map(function(d) { return !!d.disabled }); - - if (!defaultState) { - var key; - defaultState = {}; - for (key in state) { - if (state[key] instanceof Array) - defaultState[key] = state[key].slice(0); - else - defaultState[key] = state[key]; - } - } - - var indexDrag = d3.behavior.drag() - .on('dragstart', dragStart) - .on('drag', dragMove) - .on('dragend', dragEnd); - - - function dragStart(d,i) { - d3.select(chart.container) - .style('cursor', 'ew-resize'); - } - - function dragMove(d,i) { - index.x = d3.event.x; - index.i = Math.round(dx.invert(index.x)); - updateZero(); - } - - function dragEnd(d,i) { - d3.select(chart.container) - .style('cursor', 'auto'); - - // update state and send stateChange with new index - state.index = index.i; - dispatch.stateChange(state); - } - - - - - //------------------------------------------------------------ - // Display No Data message if there's nothing to show. - - if (!data || !data.length || !data.filter(function(d) { return d.values.length }).length) { - var noDataText = container.selectAll('.nv-noData').data([noData]); - - noDataText.enter().append('text') - .attr('class', 'nvd3 nv-noData') - .attr('dy', '-.7em') - .style('text-anchor', 'middle'); - - noDataText - .attr('x', margin.left + availableWidth / 2) - .attr('y', margin.top + availableHeight / 2) - .text(function(d) { return d }); - - return chart; - } else { - container.selectAll('.nv-noData').remove(); - } - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Setup Scales - - x = lines.xScale(); - y = lines.yScale(); - - - if (!rescaleY) { - var seriesDomains = data - .filter(function(series) { return !series.disabled }) - .map(function(series,i) { - var initialDomain = d3.extent(series.values, lines.y()); - - //account for series being disabled when losing 95% or more - if (initialDomain[0] < -.95) initialDomain[0] = -.95; - - return [ - (initialDomain[0] - initialDomain[1]) / (1 + initialDomain[1]), - (initialDomain[1] - initialDomain[0]) / (1 + initialDomain[0]) - ]; - }); - - var completeDomain = [ - d3.min(seriesDomains, function(d) { return d[0] }), - d3.max(seriesDomains, function(d) { return d[1] }) - ] - - lines.yDomain(completeDomain); - } else { - lines.yDomain(null); - } - - - dx .domain([0, data[0].values.length - 1]) //Assumes all series have same length - .range([0, availableWidth]) - .clamp(true); - - //------------------------------------------------------------ - - - var data = indexify(index.i, data); - - - //------------------------------------------------------------ - // Setup containers and skeleton of chart - - var wrap = container.selectAll('g.nv-wrap.nv-cumulativeLine').data([data]); - var gEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-wrap nv-cumulativeLine').append('g'); - var g = wrap.select('g'); - - gEnter.append('g').attr('class', 'nv-x nv-axis'); - gEnter.append('g').attr('class', 'nv-y nv-axis'); - gEnter.append('g').attr('class', 'nv-background'); - gEnter.append('g').attr('class', 'nv-linesWrap'); - gEnter.append('g').attr('class', 'nv-avgLinesWrap'); - gEnter.append('g').attr('class', 'nv-legendWrap'); - gEnter.append('g').attr('class', 'nv-controlsWrap'); - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Legend - - if (showLegend) { - legend.width(availableWidth); - - g.select('.nv-legendWrap') - .datum(data) - .call(legend); - - if ( margin.top != legend.height()) { - margin.top = legend.height(); - availableHeight = (height || parseInt(container.style('height')) || 400) - - margin.top - margin.bottom; - } - - g.select('.nv-legendWrap') - .attr('transform', 'translate(0,' + (-margin.top) +')') - } - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Controls - - if (showControls) { - var controlsData = [ - { key: 'Re-scale y-axis', disabled: !rescaleY } - ]; - - controls.width(140).color(['#444', '#444', '#444']); - g.select('.nv-controlsWrap') - .datum(controlsData) - .attr('transform', 'translate(0,' + (-margin.top) +')') - .call(controls); - } - - //------------------------------------------------------------ - - - wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')'); - - - // Show error if series goes below 100% - var tempDisabled = data.filter(function(d) { return d.tempDisabled }); - - wrap.select('.tempDisabled').remove(); //clean-up and prevent duplicates - if (tempDisabled.length) { - wrap.append('text').attr('class', 'tempDisabled') - .attr('x', availableWidth / 2) - .attr('y', '-.71em') - .style('text-anchor', 'end') - .text(tempDisabled.map(function(d) { return d.key }).join(', ') + ' values cannot be calculated for this time period.'); - } - - //------------------------------------------------------------ - // Main Chart Component(s) - - gEnter.select('.nv-background') - .append('rect'); - - g.select('.nv-background rect') - .attr('width', availableWidth) - .attr('height', availableHeight); - - lines - //.x(function(d) { return d.x }) - .y(function(d) { return d.display.y }) - .width(availableWidth) - .height(availableHeight) - .color(data.map(function(d,i) { - return d.color || color(d, i); - }).filter(function(d,i) { return !data[i].disabled && !data[i].tempDisabled; })); - - - - var linesWrap = g.select('.nv-linesWrap') - .datum(data.filter(function(d) { return !d.disabled && !d.tempDisabled })); - - //d3.transition(linesWrap).call(lines); - linesWrap.call(lines); - - /*Handle average lines [AN-612] ----------------------------*/ - - //Store a series index number in the data array. - data.forEach(function(d,i) { - d.seriesIndex = i; - }); - - var avgLineData = data.filter(function(d) { - return !d.disabled && !!average(d); - }); - - var avgLines = g.select(".nv-avgLinesWrap").selectAll("line") - .data(avgLineData, function(d) { return d.key; }); - - avgLines.enter() - .append('line') - .style('stroke-width',2) - .style('stroke-dasharray','10,10') - .style('stroke',function (d,i) { - return lines.color()(d,d.seriesIndex); - }) - .attr('x1',0) - .attr('x2',availableWidth) - .attr('y1', function(d) { return y(average(d)); }) - .attr('y2', function(d) { return y(average(d)); }); - - avgLines - .attr('x1',0) - .attr('x2',availableWidth) - .attr('y1', function(d) { return y(average(d)); }) - .attr('y2', function(d) { return y(average(d)); }); - - avgLines.exit().remove(); - - //Create index line ----------------------------------------- - - var indexLine = linesWrap.selectAll('.nv-indexLine') - .data([index]); - indexLine.enter().append('rect').attr('class', 'nv-indexLine') - .attr('width', 3) - .attr('x', -2) - .attr('fill', 'red') - .attr('fill-opacity', .5) - .call(indexDrag) - - indexLine - .attr('transform', function(d) { return 'translate(' + dx(d.i) + ',0)' }) - .attr('height', availableHeight) - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Setup Axes - - xAxis - .scale(x) - //Suggest how many ticks based on the chart width and D3 should listen (70 is the optimal number for MM/DD/YY dates) - .ticks( Math.min(data[0].values.length,availableWidth/70) ) - .tickSize(-availableHeight, 0); - - g.select('.nv-x.nv-axis') - .attr('transform', 'translate(0,' + y.range()[0] + ')'); - d3.transition(g.select('.nv-x.nv-axis')) - .call(xAxis); - - - yAxis - .scale(y) - .ticks( availableHeight / 36 ) - .tickSize( -availableWidth, 0); - - d3.transition(g.select('.nv-y.nv-axis')) - .call(yAxis); - - //------------------------------------------------------------ - - - //============================================================ - // Event Handling/Dispatching (in chart's scope) - //------------------------------------------------------------ - - - function updateZero() { - indexLine - .data([index]); - - container.call(chart); - } - - g.select('.nv-background rect') - .on('click', function() { - index.x = d3.mouse(this)[0]; - index.i = Math.round(dx.invert(index.x)); - - // update state and send stateChange with new index - state.index = index.i; - dispatch.stateChange(state); - - updateZero(); - }); - - lines.dispatch.on('elementClick', function(e) { - index.i = e.pointIndex; - index.x = dx(index.i); - - // update state and send stateChange with new index - state.index = index.i; - dispatch.stateChange(state); - - updateZero(); - }); - - controls.dispatch.on('legendClick', function(d,i) { - d.disabled = !d.disabled; - rescaleY = !d.disabled; - - state.rescaleY = rescaleY; - dispatch.stateChange(state); - - //selection.transition().call(chart); - chart.update(); - }); - - - legend.dispatch.on('legendClick', function(d,i) { - d.disabled = !d.disabled; - - if (!data.filter(function(d) { return !d.disabled }).length) { - data.map(function(d) { - d.disabled = false; - wrap.selectAll('.nv-series').classed('disabled', false); - return d; - }); - } - - state.disabled = data.map(function(d) { return !!d.disabled }); - dispatch.stateChange(state); - - //selection.transition().call(chart); - chart.update(); - }); - - legend.dispatch.on('legendDblclick', function(d) { - //Double clicking should always enable current series, and disabled all others. - data.forEach(function(d) { - d.disabled = true; - }); - d.disabled = false; - - state.disabled = data.map(function(d) { return !!d.disabled }); - dispatch.stateChange(state); - chart.update(); - }); - - -/* - // - legend.dispatch.on('legendMouseover', function(d, i) { - d.hover = true; - selection.transition().call(chart) - }); - - legend.dispatch.on('legendMouseout', function(d, i) { - d.hover = false; - selection.transition().call(chart) - }); -*/ - - dispatch.on('tooltipShow', function(e) { - if (tooltips) showTooltip(e, that.parentNode); - }); - - - // Update chart from a state object passed to event handler - dispatch.on('changeState', function(e) { - - if (typeof e.disabled !== 'undefined') { - data.forEach(function(series,i) { - series.disabled = e.disabled[i]; - }); - - state.disabled = e.disabled; - } - - - if (typeof e.index !== 'undefined') { - index.i = e.index; - index.x = dx(index.i); - - state.index = e.index; - - indexLine - .data([index]); - } - - - if (typeof e.rescaleY !== 'undefined') { - rescaleY = e.rescaleY; - } - - chart.update(); - }); - - //============================================================ - - }); - - return chart; - } - - - //============================================================ - // Event Handling/Dispatching (out of chart's scope) - //------------------------------------------------------------ - - lines.dispatch.on('elementMouseover.tooltip', function(e) { - e.pos = [e.pos[0] + margin.left, e.pos[1] + margin.top]; - dispatch.tooltipShow(e); - }); - - lines.dispatch.on('elementMouseout.tooltip', function(e) { - dispatch.tooltipHide(e); - }); - - dispatch.on('tooltipHide', function() { - if (tooltips) nv.tooltip.cleanup(); - }); - - //============================================================ - - - //============================================================ - // Expose Public Variables - //------------------------------------------------------------ - - // expose chart's sub-components - chart.dispatch = dispatch; - chart.lines = lines; - chart.legend = legend; - chart.xAxis = xAxis; - chart.yAxis = yAxis; - - d3.rebind(chart, lines, 'defined', 'isArea', 'x', 'y', 'size', 'xDomain', 'yDomain', 'forceX', 'forceY', 'interactive', 'clipEdge', 'clipVoronoi', 'id'); - - chart.margin = function(_) { - if (!arguments.length) return margin; - margin.top = typeof _.top != 'undefined' ? _.top : margin.top; - margin.right = typeof _.right != 'undefined' ? _.right : margin.right; - margin.bottom = typeof _.bottom != 'undefined' ? _.bottom : margin.bottom; - margin.left = typeof _.left != 'undefined' ? _.left : margin.left; - return chart; - }; - - chart.width = function(_) { - if (!arguments.length) return width; - width = _; - return chart; - }; - - chart.height = function(_) { - if (!arguments.length) return height; - height = _; - return chart; - }; - - chart.color = function(_) { - if (!arguments.length) return color; - color = nv.utils.getColor(_); - legend.color(color); - return chart; - }; - - chart.rescaleY = function(_) { - if (!arguments.length) return rescaleY; - rescaleY = _ - return rescaleY; - }; - - chart.showControls = function(_) { - if (!arguments.length) return showControls; - showControls = _; - return chart; - }; - - chart.showLegend = function(_) { - if (!arguments.length) return showLegend; - showLegend = _; - return chart; - }; - - chart.tooltips = function(_) { - if (!arguments.length) return tooltips; - tooltips = _; - return chart; - }; - - chart.tooltipContent = function(_) { - if (!arguments.length) return tooltip; - tooltip = _; - return chart; - }; - - chart.state = function(_) { - if (!arguments.length) return state; - state = _; - return chart; - }; - - chart.defaultState = function(_) { - if (!arguments.length) return defaultState; - defaultState = _; - return chart; - }; - - chart.noData = function(_) { - if (!arguments.length) return noData; - noData = _; - return chart; - }; - - chart.average = function(_) { - if(!arguments.length) return average; - average = _; - return chart; - }; - - //============================================================ - - - //============================================================ - // Functions - //------------------------------------------------------------ - - /* Normalize the data according to an index point. */ - function indexify(idx, data) { - return data.map(function(line, i) { - if (!line.values) { - return line; - } - var v = lines.y()(line.values[idx], idx); - - //TODO: implement check below, and disable series if series loses 100% or more cause divide by 0 issue - if (v < -.95) { - //if a series loses more than 100%, calculations fail.. anything close can cause major distortion (but is mathematically correct till it hits 100) - line.tempDisabled = true; - return line; - } - - line.tempDisabled = false; - - line.values = line.values.map(function(point, pointIndex) { - point.display = {'y': (lines.y()(point, pointIndex) - v) / (1 + v) }; - return point; - }) - - return line; - }) - } - - //============================================================ - - - return chart; -} -//TODO: consider deprecating by adding necessary features to multiBar model -nv.models.discreteBar = function() { - - //============================================================ - // Public Variables with Default Settings - //------------------------------------------------------------ - - var margin = {top: 0, right: 0, bottom: 0, left: 0} - , width = 960 - , height = 500 - , id = Math.floor(Math.random() * 10000) //Create semi-unique ID in case user doesn't select one - , x = d3.scale.ordinal() - , y = d3.scale.linear() - , getX = function(d) { return d.x } - , getY = function(d) { return d.y } - , forceY = [0] // 0 is forced by default.. this makes sense for the majority of bar graphs... user can always do chart.forceY([]) to remove - , color = nv.utils.defaultColor() - , showValues = false - , valueFormat = d3.format(',.2f') - , xDomain - , yDomain - , dispatch = d3.dispatch('chartClick', 'elementClick', 'elementDblClick', 'elementMouseover', 'elementMouseout') - , rectClass = 'discreteBar' - ; - - //============================================================ - - - //============================================================ - // Private Variables - //------------------------------------------------------------ - - var x0, y0; - - //============================================================ - - - function chart(selection) { - selection.each(function(data) { - var availableWidth = width - margin.left - margin.right, - availableHeight = height - margin.top - margin.bottom, - container = d3.select(this); - - - //add series index to each data point for reference - data = data.map(function(series, i) { - series.values = series.values.map(function(point) { - point.series = i; - return point; - }); - return series; - }); - - - //------------------------------------------------------------ - // Setup Scales - - // remap and flatten the data for use in calculating the scales' domains - var seriesData = (xDomain && yDomain) ? [] : // if we know xDomain and yDomain, no need to calculate - data.map(function(d) { - return d.values.map(function(d,i) { - return { x: getX(d,i), y: getY(d,i), y0: d.y0 } - }) - }); - - x .domain(xDomain || d3.merge(seriesData).map(function(d) { return d.x })) - .rangeBands([0, availableWidth], .1); - - y .domain(yDomain || d3.extent(d3.merge(seriesData).map(function(d) { return d.y }).concat(forceY))); - - - // If showValues, pad the Y axis range to account for label height - if (showValues) y.range([availableHeight - (y.domain()[0] < 0 ? 12 : 0), y.domain()[1] > 0 ? 12 : 0]); - else y.range([availableHeight, 0]); - - //store old scales if they exist - x0 = x0 || x; - y0 = y0 || y.copy().range([y(0),y(0)]); - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Setup containers and skeleton of chart - - var wrap = container.selectAll('g.nv-wrap.nv-discretebar').data([data]); - var wrapEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-wrap nv-discretebar'); - var gEnter = wrapEnter.append('g'); - var g = wrap.select('g'); - - gEnter.append('g').attr('class', 'nv-groups'); - - wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')'); - - //------------------------------------------------------------ - - - - //TODO: by definition, the discrete bar should not have multiple groups, will modify/remove later - var groups = wrap.select('.nv-groups').selectAll('.nv-group') - .data(function(d) { return d }, function(d) { return d.key }); - groups.enter().append('g') - .style('stroke-opacity', 1e-6) - .style('fill-opacity', 1e-6); - d3.transition(groups.exit()) - .style('stroke-opacity', 1e-6) - .style('fill-opacity', 1e-6) - .remove(); - groups - .attr('class', function(d,i) { return 'nv-group nv-series-' + i }) - .classed('hover', function(d) { return d.hover }); - d3.transition(groups) - .style('stroke-opacity', 1) - .style('fill-opacity', .75); - - - var bars = groups.selectAll('g.nv-bar') - .data(function(d) { return d.values }); - - bars.exit().remove(); - - - var barsEnter = bars.enter().append('g') - .attr('transform', function(d,i,j) { - return 'translate(' + (x(getX(d,i)) + x.rangeBand() * .05 ) + ', ' + y(0) + ')' - }) - .on('mouseover', function(d,i) { //TODO: figure out why j works above, but not here - d3.select(this).classed('hover', true); - dispatch.elementMouseover({ - value: getY(d,i), - point: d, - series: data[d.series], - pos: [x(getX(d,i)) + (x.rangeBand() * (d.series + .5) / data.length), y(getY(d,i))], // TODO: Figure out why the value appears to be shifted - pointIndex: i, - seriesIndex: d.series, - e: d3.event - }); - }) - .on('mouseout', function(d,i) { - d3.select(this).classed('hover', false); - dispatch.elementMouseout({ - value: getY(d,i), - point: d, - series: data[d.series], - pointIndex: i, - seriesIndex: d.series, - e: d3.event - }); - }) - .on('click', function(d,i) { - dispatch.elementClick({ - value: getY(d,i), - point: d, - series: data[d.series], - pos: [x(getX(d,i)) + (x.rangeBand() * (d.series + .5) / data.length), y(getY(d,i))], // TODO: Figure out why the value appears to be shifted - pointIndex: i, - seriesIndex: d.series, - e: d3.event - }); - d3.event.stopPropagation(); - }) - .on('dblclick', function(d,i) { - dispatch.elementDblClick({ - value: getY(d,i), - point: d, - series: data[d.series], - pos: [x(getX(d,i)) + (x.rangeBand() * (d.series + .5) / data.length), y(getY(d,i))], // TODO: Figure out why the value appears to be shifted - pointIndex: i, - seriesIndex: d.series, - e: d3.event - }); - d3.event.stopPropagation(); - }); - - barsEnter.append('rect') - .attr('height', 0) - .attr('width', x.rangeBand() * .9 / data.length ) - - if (showValues) { - barsEnter.append('text') - .attr('text-anchor', 'middle') - bars.select('text') - .attr('x', x.rangeBand() * .9 / 2) - .attr('y', function(d,i) { return getY(d,i) < 0 ? y(getY(d,i)) - y(0) + 12 : -4 }) - .text(function(d,i) { return valueFormat(getY(d,i)) }); - } else { - bars.selectAll('text').remove(); - } - - bars - .attr('class', function(d,i) { return getY(d,i) < 0 ? 'nv-bar negative' : 'nv-bar positive' }) - .style('fill', function(d,i) { return d.color || color(d,i) }) - .style('stroke', function(d,i) { return d.color || color(d,i) }) - .select('rect') - .attr('class', rectClass) - .attr('width', x.rangeBand() * .9 / data.length); - d3.transition(bars) - //.delay(function(d,i) { return i * 1200 / data[0].values.length }) - .attr('transform', function(d,i) { - var left = x(getX(d,i)) + x.rangeBand() * .05, - top = getY(d,i) < 0 ? - y(0) : - y(0) - y(getY(d,i)) < 1 ? - y(0) - 1 : //make 1 px positive bars show up above y=0 - y(getY(d,i)); - - return 'translate(' + left + ', ' + top + ')' - }) - .select('rect') - .attr('height', function(d,i) { - return Math.max(Math.abs(y(getY(d,i)) - y(0)) || 1) - }); - - - //store old scales for use in transitions on update - x0 = x.copy(); - y0 = y.copy(); - - }); - - return chart; - } - - - //============================================================ - // Expose Public Variables - //------------------------------------------------------------ - - chart.dispatch = dispatch; - - chart.x = function(_) { - if (!arguments.length) return getX; - getX = _; - return chart; - }; - - chart.y = function(_) { - if (!arguments.length) return getY; - getY = _; - return chart; - }; - - chart.margin = function(_) { - if (!arguments.length) return margin; - margin.top = typeof _.top != 'undefined' ? _.top : margin.top; - margin.right = typeof _.right != 'undefined' ? _.right : margin.right; - margin.bottom = typeof _.bottom != 'undefined' ? _.bottom : margin.bottom; - margin.left = typeof _.left != 'undefined' ? _.left : margin.left; - return chart; - }; - - chart.width = function(_) { - if (!arguments.length) return width; - width = _; - return chart; - }; - - chart.height = function(_) { - if (!arguments.length) return height; - height = _; - return chart; - }; - - chart.xScale = function(_) { - if (!arguments.length) return x; - x = _; - return chart; - }; - - chart.yScale = function(_) { - if (!arguments.length) return y; - y = _; - return chart; - }; - - chart.xDomain = function(_) { - if (!arguments.length) return xDomain; - xDomain = _; - return chart; - }; - - chart.yDomain = function(_) { - if (!arguments.length) return yDomain; - yDomain = _; - return chart; - }; - - chart.forceY = function(_) { - if (!arguments.length) return forceY; - forceY = _; - return chart; - }; - - chart.color = function(_) { - if (!arguments.length) return color; - color = nv.utils.getColor(_); - return chart; - }; - - chart.id = function(_) { - if (!arguments.length) return id; - id = _; - return chart; - }; - - chart.showValues = function(_) { - if (!arguments.length) return showValues; - showValues = _; - return chart; - }; - - chart.valueFormat= function(_) { - if (!arguments.length) return valueFormat; - valueFormat = _; - return chart; - }; - - chart.rectClass= function(_) { - if (!arguments.length) return rectClass; - rectClass = _; - return chart; - } - //============================================================ - - - return chart; -} - -nv.models.discreteBarChart = function() { - - //============================================================ - // Public Variables with Default Settings - //------------------------------------------------------------ - - var discretebar = nv.models.discreteBar() - , xAxis = nv.models.axis() - , yAxis = nv.models.axis() - ; - - var margin = {top: 15, right: 10, bottom: 50, left: 60} - , width = null - , height = null - , color = nv.utils.getColor() - , staggerLabels = false - , tooltips = true - , tooltip = function(key, x, y, e, graph) { - return '<h3>' + x + '</h3>' + - '<p>' + y + '</p>' - } - , x - , y - , noData = "No Data Available." - , dispatch = d3.dispatch('tooltipShow', 'tooltipHide', 'beforeUpdate') - ; - - xAxis - .orient('bottom') - .highlightZero(false) - .showMaxMin(false) - .tickFormat(function(d) { return d }) - ; - yAxis - .orient('left') - .tickFormat(d3.format(',.1f')) - ; - - //============================================================ - - - //============================================================ - // Private Variables - //------------------------------------------------------------ - - var showTooltip = function(e, offsetElement) { - var left = e.pos[0] + ( offsetElement.offsetLeft || 0 ), - top = e.pos[1] + ( offsetElement.offsetTop || 0), - x = xAxis.tickFormat()(discretebar.x()(e.point, e.pointIndex)), - y = yAxis.tickFormat()(discretebar.y()(e.point, e.pointIndex)), - content = tooltip(e.series.key, x, y, e, chart); - - nv.tooltip.show([left, top], content, e.value < 0 ? 'n' : 's', null, offsetElement); - }; - - //============================================================ - - - function chart(selection) { - selection.each(function(data) { - var container = d3.select(this), - that = this; - - var availableWidth = (width || parseInt(container.style('width')) || 960) - - margin.left - margin.right, - availableHeight = (height || parseInt(container.style('height')) || 400) - - margin.top - margin.bottom; - - - chart.update = function() { dispatch.beforeUpdate(); container.transition().call(chart); }; - chart.container = this; - - - //------------------------------------------------------------ - // Display No Data message if there's nothing to show. - - if (!data || !data.length || !data.filter(function(d) { return d.values.length }).length) { - var noDataText = container.selectAll('.nv-noData').data([noData]); - - noDataText.enter().append('text') - .attr('class', 'nvd3 nv-noData') - .attr('dy', '-.7em') - .style('text-anchor', 'middle'); - - noDataText - .attr('x', margin.left + availableWidth / 2) - .attr('y', margin.top + availableHeight / 2) - .text(function(d) { return d }); - - return chart; - } else { - container.selectAll('.nv-noData').remove(); - } - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Setup Scales - - x = discretebar.xScale(); - y = discretebar.yScale(); - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Setup containers and skeleton of chart - - var wrap = container.selectAll('g.nv-wrap.nv-discreteBarWithAxes').data([data]); - var gEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-wrap nv-discreteBarWithAxes').append('g'); - var defsEnter = gEnter.append('defs'); - var g = wrap.select('g'); - - gEnter.append('g').attr('class', 'nv-x nv-axis'); - gEnter.append('g').attr('class', 'nv-y nv-axis'); - gEnter.append('g').attr('class', 'nv-barsWrap'); - - g.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')'); - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Main Chart Component(s) - - discretebar - .width(availableWidth) - .height(availableHeight); - - - var barsWrap = g.select('.nv-barsWrap') - .datum(data.filter(function(d) { return !d.disabled })) - - d3.transition(barsWrap).call(discretebar); - - //------------------------------------------------------------ - - - - defsEnter.append('clipPath') - .attr('id', 'nv-x-label-clip-' + discretebar.id()) - .append('rect'); - - g.select('#nv-x-label-clip-' + discretebar.id() + ' rect') - .attr('width', x.rangeBand() * (staggerLabels ? 2 : 1)) - .attr('height', 16) - .attr('x', -x.rangeBand() / (staggerLabels ? 1 : 2 )); - - - //------------------------------------------------------------ - // Setup Axes - - xAxis - .scale(x) - .ticks( availableWidth / 100 ) - .tickSize(-availableHeight, 0); - - g.select('.nv-x.nv-axis') - .attr('transform', 'translate(0,' + (y.range()[0] + ((discretebar.showValues() && y.domain()[0] < 0) ? 16 : 0)) + ')'); - //d3.transition(g.select('.nv-x.nv-axis')) - g.select('.nv-x.nv-axis').transition().duration(0) - .call(xAxis); - - - var xTicks = g.select('.nv-x.nv-axis').selectAll('g'); - - if (staggerLabels) { - xTicks - .selectAll('text') - .attr('transform', function(d,i,j) { return 'translate(0,' + (j % 2 == 0 ? '5' : '17') + ')' }) - } - - yAxis - .scale(y) - .ticks( availableHeight / 36 ) - .tickSize( -availableWidth, 0); - - d3.transition(g.select('.nv-y.nv-axis')) - .call(yAxis); - - //------------------------------------------------------------ - - - //============================================================ - // Event Handling/Dispatching (in chart's scope) - //------------------------------------------------------------ - - dispatch.on('tooltipShow', function(e) { - if (tooltips) showTooltip(e, that.parentNode); - }); - - //============================================================ - - - }); - - return chart; - } - - //============================================================ - // Event Handling/Dispatching (out of chart's scope) - //------------------------------------------------------------ - - discretebar.dispatch.on('elementMouseover.tooltip', function(e) { - e.pos = [e.pos[0] + margin.left, e.pos[1] + margin.top]; - dispatch.tooltipShow(e); - }); - - discretebar.dispatch.on('elementMouseout.tooltip', function(e) { - dispatch.tooltipHide(e); - }); - - dispatch.on('tooltipHide', function() { - if (tooltips) nv.tooltip.cleanup(); - }); - - //============================================================ - - - //============================================================ - // Expose Public Variables - //------------------------------------------------------------ - - // expose chart's sub-components - chart.dispatch = dispatch; - chart.discretebar = discretebar; - chart.xAxis = xAxis; - chart.yAxis = yAxis; - - d3.rebind(chart, discretebar, 'x', 'y', 'xDomain', 'yDomain', 'forceX', 'forceY', 'id', 'showValues', 'valueFormat'); - - chart.margin = function(_) { - if (!arguments.length) return margin; - margin.top = typeof _.top != 'undefined' ? _.top : margin.top; - margin.right = typeof _.right != 'undefined' ? _.right : margin.right; - margin.bottom = typeof _.bottom != 'undefined' ? _.bottom : margin.bottom; - margin.left = typeof _.left != 'undefined' ? _.left : margin.left; - return chart; - }; - - chart.width = function(_) { - if (!arguments.length) return width; - width = _; - return chart; - }; - - chart.height = function(_) { - if (!arguments.length) return height; - height = _; - return chart; - }; - - chart.color = function(_) { - if (!arguments.length) return color; - color = nv.utils.getColor(_); - discretebar.color(color); - return chart; - }; - - chart.staggerLabels = function(_) { - if (!arguments.length) return staggerLabels; - staggerLabels = _; - return chart; - }; - - chart.tooltips = function(_) { - if (!arguments.length) return tooltips; - tooltips = _; - return chart; - }; - - chart.tooltipContent = function(_) { - if (!arguments.length) return tooltip; - tooltip = _; - return chart; - }; - - chart.noData = function(_) { - if (!arguments.length) return noData; - noData = _; - return chart; - }; - - //============================================================ - - - return chart; -} - -nv.models.distribution = function() { - - //============================================================ - // Public Variables with Default Settings - //------------------------------------------------------------ - - var margin = {top: 0, right: 0, bottom: 0, left: 0} - , width = 400 //technically width or height depending on x or y.... - , size = 8 - , axis = 'x' // 'x' or 'y'... horizontal or vertical - , getData = function(d) { return d[axis] } // defaults d.x or d.y - , color = nv.utils.defaultColor() - , scale = d3.scale.linear() - , domain - ; - - //============================================================ - - - //============================================================ - // Private Variables - //------------------------------------------------------------ - - var scale0; - - //============================================================ - - - function chart(selection) { - selection.each(function(data) { - var availableLength = width - (axis === 'x' ? margin.left + margin.right : margin.top + margin.bottom), - naxis = axis == 'x' ? 'y' : 'x', - container = d3.select(this); - - - //------------------------------------------------------------ - // Setup Scales - - scale0 = scale0 || scale; - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Setup containers and skeleton of chart - - var wrap = container.selectAll('g.nv-distribution').data([data]); - var wrapEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-distribution'); - var gEnter = wrapEnter.append('g'); - var g = wrap.select('g'); - - wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')') - - //------------------------------------------------------------ - - - var distWrap = g.selectAll('g.nv-dist') - .data(function(d) { return d }, function(d) { return d.key }); - - distWrap.enter().append('g'); - distWrap - .attr('class', function(d,i) { return 'nv-dist nv-series-' + i }) - .style('stroke', function(d,i) { return color(d, i) }); - - var dist = distWrap.selectAll('line.nv-dist' + axis) - .data(function(d) { return d.values }) - dist.enter().append('line') - .attr(axis + '1', function(d,i) { return scale0(getData(d,i)) }) - .attr(axis + '2', function(d,i) { return scale0(getData(d,i)) }) - d3.transition(distWrap.exit().selectAll('line.nv-dist' + axis)) - .attr(axis + '1', function(d,i) { return scale(getData(d,i)) }) - .attr(axis + '2', function(d,i) { return scale(getData(d,i)) }) - .style('stroke-opacity', 0) - .remove(); - dist - .attr('class', function(d,i) { return 'nv-dist' + axis + ' nv-dist' + axis + '-' + i }) - .attr(naxis + '1', 0) - .attr(naxis + '2', size); - d3.transition(dist) - .attr(axis + '1', function(d,i) { return scale(getData(d,i)) }) - .attr(axis + '2', function(d,i) { return scale(getData(d,i)) }) - - - scale0 = scale.copy(); - - }); - - return chart; - } - - - //============================================================ - // Expose Public Variables - //------------------------------------------------------------ - - chart.margin = function(_) { - if (!arguments.length) return margin; - margin.top = typeof _.top != 'undefined' ? _.top : margin.top; - margin.right = typeof _.right != 'undefined' ? _.right : margin.right; - margin.bottom = typeof _.bottom != 'undefined' ? _.bottom : margin.bottom; - margin.left = typeof _.left != 'undefined' ? _.left : margin.left; - return chart; - }; - - chart.width = function(_) { - if (!arguments.length) return width; - width = _; - return chart; - }; - - chart.axis = function(_) { - if (!arguments.length) return axis; - axis = _; - return chart; - }; - - chart.size = function(_) { - if (!arguments.length) return size; - size = _; - return chart; - }; - - chart.getData = function(_) { - if (!arguments.length) return getData; - getData = d3.functor(_); - return chart; - }; - - chart.scale = function(_) { - if (!arguments.length) return scale; - scale = _; - return chart; - }; - - chart.color = function(_) { - if (!arguments.length) return color; - color = nv.utils.getColor(_); - return chart; - }; - - //============================================================ - - - return chart; -} -//TODO: consider deprecating and using multibar with single series for this -nv.models.historicalBar = function() { - - //============================================================ - // Public Variables with Default Settings - //------------------------------------------------------------ - - var margin = {top: 0, right: 0, bottom: 0, left: 0} - , width = 960 - , height = 500 - , id = Math.floor(Math.random() * 10000) //Create semi-unique ID in case user doesn't select one - , x = d3.scale.linear() - , y = d3.scale.linear() - , getX = function(d) { return d.x } - , getY = function(d) { return d.y } - , forceX = [] - , forceY = [0] - , padData = false - , clipEdge = true - , color = nv.utils.defaultColor() - , xDomain - , yDomain - , dispatch = d3.dispatch('chartClick', 'elementClick', 'elementDblClick', 'elementMouseover', 'elementMouseout') - ; - - //============================================================ - - - function chart(selection) { - selection.each(function(data) { - var availableWidth = width - margin.left - margin.right, - availableHeight = height - margin.top - margin.bottom, - container = d3.select(this); - - - //------------------------------------------------------------ - // Setup Scales - - x .domain(xDomain || d3.extent(data[0].values.map(getX).concat(forceX) )) - - if (padData) - x.range([availableWidth * .5 / data[0].values.length, availableWidth * (data[0].values.length - .5) / data[0].values.length ]); - else - x.range([0, availableWidth]); - - y .domain(yDomain || d3.extent(data[0].values.map(getY).concat(forceY) )) - .range([availableHeight, 0]); - - // If scale's domain don't have a range, slightly adjust to make one... so a chart can show a single data point - if (x.domain()[0] === x.domain()[1] || y.domain()[0] === y.domain()[1]) singlePoint = true; - if (x.domain()[0] === x.domain()[1]) - x.domain()[0] ? - x.domain([x.domain()[0] - x.domain()[0] * 0.01, x.domain()[1] + x.domain()[1] * 0.01]) - : x.domain([-1,1]); - - if (y.domain()[0] === y.domain()[1]) - y.domain()[0] ? - y.domain([y.domain()[0] + y.domain()[0] * 0.01, y.domain()[1] - y.domain()[1] * 0.01]) - : y.domain([-1,1]); - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Setup containers and skeleton of chart - - var wrap = container.selectAll('g.nv-wrap.nv-bar').data([data[0].values]); - var wrapEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-wrap nv-bar'); - var defsEnter = wrapEnter.append('defs'); - var gEnter = wrapEnter.append('g'); - var g = wrap.select('g'); - - gEnter.append('g').attr('class', 'nv-bars'); - - wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')'); - - //------------------------------------------------------------ - - - container - .on('click', function(d,i) { - dispatch.chartClick({ - data: d, - index: i, - pos: d3.event, - id: id - }); - }); - - - defsEnter.append('clipPath') - .attr('id', 'nv-chart-clip-path-' + id) - .append('rect'); - - wrap.select('#nv-chart-clip-path-' + id + ' rect') - .attr('width', availableWidth) - .attr('height', availableHeight); - - g .attr('clip-path', clipEdge ? 'url(#nv-chart-clip-path-' + id + ')' : ''); - - - - var bars = wrap.select('.nv-bars').selectAll('.nv-bar') - .data(function(d) { return d }); - - bars.exit().remove(); - - - var barsEnter = bars.enter().append('rect') - //.attr('class', function(d,i,j) { return (getY(d,i) < 0 ? 'nv-bar negative' : 'nv-bar positive') + ' nv-bar-' + j + '-' + i }) - .attr('x', 0 ) - .attr('y', function(d,i) { return y(Math.max(0, getY(d,i))) }) - .attr('height', function(d,i) { return Math.abs(y(getY(d,i)) - y(0)) }) - .on('mouseover', function(d,i) { - d3.select(this).classed('hover', true); - dispatch.elementMouseover({ - point: d, - series: data[0], - pos: [x(getX(d,i)), y(getY(d,i))], // TODO: Figure out why the value appears to be shifted - pointIndex: i, - seriesIndex: 0, - e: d3.event - }); - - }) - .on('mouseout', function(d,i) { - d3.select(this).classed('hover', false); - dispatch.elementMouseout({ - point: d, - series: data[0], - pointIndex: i, - seriesIndex: 0, - e: d3.event - }); - }) - .on('click', function(d,i) { - dispatch.elementClick({ - //label: d[label], - value: getY(d,i), - data: d, - index: i, - pos: [x(getX(d,i)), y(getY(d,i))], - e: d3.event, - id: id - }); - d3.event.stopPropagation(); - }) - .on('dblclick', function(d,i) { - dispatch.elementDblClick({ - //label: d[label], - value: getY(d,i), - data: d, - index: i, - pos: [x(getX(d,i)), y(getY(d,i))], - e: d3.event, - id: id - }); - d3.event.stopPropagation(); - }); - - bars - .attr('fill', function(d,i) { return color(d, i); }) - .attr('class', function(d,i,j) { return (getY(d,i) < 0 ? 'nv-bar negative' : 'nv-bar positive') + ' nv-bar-' + j + '-' + i }) - .attr('transform', function(d,i) { return 'translate(' + (x(getX(d,i)) - availableWidth / data[0].values.length * .45) + ',0)'; }) //TODO: better width calculations that don't assume always uniform data spacing;w - .attr('width', (availableWidth / data[0].values.length) * .9 ) - - - d3.transition(bars) - //.attr('y', function(d,i) { return y(Math.max(0, getY(d,i))) }) - .attr('y', function(d,i) { - return getY(d,i) < 0 ? - y(0) : - y(0) - y(getY(d,i)) < 1 ? - y(0) - 1 : - y(getY(d,i)) - }) - .attr('height', function(d,i) { return Math.max(Math.abs(y(getY(d,i)) - y(0)),1) }); - //.order(); // not sure if this makes any sense for this model - - }); - - return chart; - } - - - //============================================================ - // Expose Public Variables - //------------------------------------------------------------ - - chart.dispatch = dispatch; - - chart.x = function(_) { - if (!arguments.length) return getX; - getX = _; - return chart; - }; - - chart.y = function(_) { - if (!arguments.length) return getY; - getY = _; - return chart; - }; - - chart.margin = function(_) { - if (!arguments.length) return margin; - margin.top = typeof _.top != 'undefined' ? _.top : margin.top; - margin.right = typeof _.right != 'undefined' ? _.right : margin.right; - margin.bottom = typeof _.bottom != 'undefined' ? _.bottom : margin.bottom; - margin.left = typeof _.left != 'undefined' ? _.left : margin.left; - return chart; - }; - - chart.width = function(_) { - if (!arguments.length) return width; - width = _; - return chart; - }; - - chart.height = function(_) { - if (!arguments.length) return height; - height = _; - return chart; - }; - - chart.xScale = function(_) { - if (!arguments.length) return x; - x = _; - return chart; - }; - - chart.yScale = function(_) { - if (!arguments.length) return y; - y = _; - return chart; - }; - - chart.xDomain = function(_) { - if (!arguments.length) return xDomain; - xDomain = _; - return chart; - }; - - chart.yDomain = function(_) { - if (!arguments.length) return yDomain; - yDomain = _; - return chart; - }; - - chart.forceX = function(_) { - if (!arguments.length) return forceX; - forceX = _; - return chart; - }; - - chart.forceY = function(_) { - if (!arguments.length) return forceY; - forceY = _; - return chart; - }; - - chart.padData = function(_) { - if (!arguments.length) return padData; - padData = _; - return chart; - }; - - chart.clipEdge = function(_) { - if (!arguments.length) return clipEdge; - clipEdge = _; - return chart; - }; - - chart.color = function(_) { - if (!arguments.length) return color; - color = nv.utils.getColor(_); - return chart; - }; - - chart.id = function(_) { - if (!arguments.length) return id; - id = _; - return chart; - }; - - //============================================================ - - - return chart; -} - -nv.models.historicalBarChart = function() { - - //============================================================ - // Public Variables with Default Settings - //------------------------------------------------------------ - - var bars = nv.models.historicalBar() - , xAxis = nv.models.axis() - , yAxis = nv.models.axis() - , legend = nv.models.legend() - ; - - - var margin = {top: 30, right: 90, bottom: 50, left: 90} - , color = nv.utils.defaultColor() - , width = null - , height = null - , showLegend = false - , showXAxis = true - , showYAxis = true - , rightAlignYAxis = false - , tooltips = true - , tooltip = function(key, x, y, e, graph) { - return '<h3>' + key + '</h3>' + - '<p>' + y + ' at ' + x + '</p>' - } - , x - , y - , state = {} - , defaultState = null - , noData = 'No Data Available.' - , dispatch = d3.dispatch('tooltipShow', 'tooltipHide', 'stateChange', 'changeState') - ; - - xAxis - .orient('bottom') - .tickPadding(7) - ; - yAxis - .orient( (rightAlignYAxis) ? 'right' : 'left') - ; - - //============================================================ - - - //============================================================ - // Private Variables - //------------------------------------------------------------ - - var showTooltip = function(e, offsetElement) { - - // New addition to calculate position if SVG is scaled with viewBox, may move TODO: consider implementing everywhere else - if (offsetElement) { - var svg = d3.select(offsetElement).select('svg'); - var viewBox = (svg.node()) ? svg.attr('viewBox') : null; - if (viewBox) { - viewBox = viewBox.split(' '); - var ratio = parseInt(svg.style('width')) / viewBox[2]; - e.pos[0] = e.pos[0] * ratio; - e.pos[1] = e.pos[1] * ratio; - } - } - - var left = e.pos[0] + ( offsetElement.offsetLeft || 0 ), - top = e.pos[1] + ( offsetElement.offsetTop || 0), - x = xAxis.tickFormat()(bars.x()(e.point, e.pointIndex)), - y = yAxis.tickFormat()(bars.y()(e.point, e.pointIndex)), - content = tooltip(e.series.key, x, y, e, chart); - - nv.tooltip.show([left, top], content, null, null, offsetElement); - }; - - //============================================================ - - - function chart(selection) { - selection.each(function(data) { - var container = d3.select(this), - that = this; - - var availableWidth = (width || parseInt(container.style('width')) || 960) - - margin.left - margin.right, - availableHeight = (height || parseInt(container.style('height')) || 400) - - margin.top - margin.bottom; - - - chart.update = function() { chart(selection) }; - chart.container = this; - - //set state.disabled - state.disabled = data.map(function(d) { return !!d.disabled }); - - if (!defaultState) { - var key; - defaultState = {}; - for (key in state) { - if (state[key] instanceof Array) - defaultState[key] = state[key].slice(0); - else - defaultState[key] = state[key]; - } - } - - //------------------------------------------------------------ - // Display noData message if there's nothing to show. - - if (!data || !data.length || !data.filter(function(d) { return d.values.length }).length) { - var noDataText = container.selectAll('.nv-noData').data([noData]); - - noDataText.enter().append('text') - .attr('class', 'nvd3 nv-noData') - .attr('dy', '-.7em') - .style('text-anchor', 'middle'); - - noDataText - .attr('x', margin.left + availableWidth / 2) - .attr('y', margin.top + availableHeight / 2) - .text(function(d) { return d }); - - return chart; - } else { - container.selectAll('.nv-noData').remove(); - } - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Setup Scales - - x = bars.xScale(); - y = bars.yScale(); - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Setup containers and skeleton of chart - - var wrap = container.selectAll('g.nv-wrap.nv-lineChart').data([data]); - var gEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-wrap nv-lineChart').append('g'); - var g = wrap.select('g'); - - gEnter.append('g').attr('class', 'nv-x nv-axis'); - gEnter.append('g').attr('class', 'nv-y nv-axis'); - gEnter.append('g').attr('class', 'nv-barsWrap'); - gEnter.append('g').attr('class', 'nv-legendWrap'); - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Legend - - if (showLegend) { - legend.width(availableWidth); - - g.select('.nv-legendWrap') - .datum(data) - .call(legend); - - if ( margin.top != legend.height()) { - margin.top = legend.height(); - availableHeight = (height || parseInt(container.style('height')) || 400) - - margin.top - margin.bottom; - } - - wrap.select('.nv-legendWrap') - .attr('transform', 'translate(0,' + (-margin.top) +')') - } - - //------------------------------------------------------------ - - wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')'); - - if (rightAlignYAxis) { - g.select(".nv-y.nv-axis") - .attr("transform", "translate(" + availableWidth + ",0)"); - } - - - //------------------------------------------------------------ - // Main Chart Component(s) - - bars - .width(availableWidth) - .height(availableHeight) - .color(data.map(function(d,i) { - return d.color || color(d, i); - }).filter(function(d,i) { return !data[i].disabled })); - - - var barsWrap = g.select('.nv-barsWrap') - .datum(data.filter(function(d) { return !d.disabled })) - - d3.transition(barsWrap).call(bars); - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Setup Axes - - if (showXAxis) { - xAxis - .scale(x) - .tickSize(-availableHeight, 0); - - g.select('.nv-x.nv-axis') - .attr('transform', 'translate(0,' + y.range()[0] + ')'); - g.select('.nv-x.nv-axis') - .transition() - .call(xAxis); - } - - if (showYAxis) { - yAxis - .scale(y) - .ticks( availableHeight / 36 ) - .tickSize( -availableWidth, 0); - - g.select('.nv-y.nv-axis') - .transition().duration(0) - .call(yAxis); - } - //------------------------------------------------------------ - - - //============================================================ - // Event Handling/Dispatching (in chart's scope) - //------------------------------------------------------------ - - legend.dispatch.on('legendClick', function(d,i) { - d.disabled = !d.disabled; - - if (!data.filter(function(d) { return !d.disabled }).length) { - data.map(function(d) { - d.disabled = false; - wrap.selectAll('.nv-series').classed('disabled', false); - return d; - }); - } - - state.disabled = data.map(function(d) { return !!d.disabled }); - dispatch.stateChange(state); - - selection.transition().call(chart); - }); - - legend.dispatch.on('legendDblclick', function(d) { - //Double clicking should always enable current series, and disabled all others. - data.forEach(function(d) { - d.disabled = true; - }); - d.disabled = false; - - state.disabled = data.map(function(d) { return !!d.disabled }); - dispatch.stateChange(state); - chart.update(); - }); - -/* - legend.dispatch.on('legendMouseover', function(d, i) { - d.hover = true; - selection.transition().call(chart) - }); - - legend.dispatch.on('legendMouseout', function(d, i) { - d.hover = false; - selection.transition().call(chart) - }); -*/ - - dispatch.on('tooltipShow', function(e) { - if (tooltips) showTooltip(e, that.parentNode); - }); - - - dispatch.on('changeState', function(e) { - - if (typeof e.disabled !== 'undefined') { - data.forEach(function(series,i) { - series.disabled = e.disabled[i]; - }); - - state.disabled = e.disabled; - } - - selection.call(chart); - }); - - //============================================================ - - }); - - return chart; - } - - - //============================================================ - // Event Handling/Dispatching (out of chart's scope) - //------------------------------------------------------------ - - bars.dispatch.on('elementMouseover.tooltip', function(e) { - e.pos = [e.pos[0] + margin.left, e.pos[1] + margin.top]; - dispatch.tooltipShow(e); - }); - - bars.dispatch.on('elementMouseout.tooltip', function(e) { - dispatch.tooltipHide(e); - }); - - dispatch.on('tooltipHide', function() { - if (tooltips) nv.tooltip.cleanup(); - }); - - //============================================================ - - - //============================================================ - // Expose Public Variables - //------------------------------------------------------------ - - // expose chart's sub-components - chart.dispatch = dispatch; - chart.bars = bars; - chart.legend = legend; - chart.xAxis = xAxis; - chart.yAxis = yAxis; - - d3.rebind(chart, bars, 'defined', 'isArea', 'x', 'y', 'size', 'xScale', 'yScale', 'xDomain', 'yDomain', 'forceX', 'forceY', 'interactive', 'clipEdge', 'clipVoronoi', 'id', 'interpolate'); - - chart.margin = function(_) { - if (!arguments.length) return margin; - margin.top = typeof _.top != 'undefined' ? _.top : margin.top; - margin.right = typeof _.right != 'undefined' ? _.right : margin.right; - margin.bottom = typeof _.bottom != 'undefined' ? _.bottom : margin.bottom; - margin.left = typeof _.left != 'undefined' ? _.left : margin.left; - return chart; - }; - - chart.width = function(_) { - if (!arguments.length) return width; - width = _; - return chart; - }; - - chart.height = function(_) { - if (!arguments.length) return height; - height = _; - return chart; - }; - - chart.color = function(_) { - if (!arguments.length) return color; - color = nv.utils.getColor(_); - legend.color(color); - return chart; - }; - - chart.showLegend = function(_) { - if (!arguments.length) return showLegend; - showLegend = _; - return chart; - }; - - chart.showXAxis = function(_) { - if (!arguments.length) return showXAxis; - showXAxis = _; - return chart; - }; - - chart.showYAxis = function(_) { - if (!arguments.length) return showYAxis; - showYAxis = _; - return chart; - }; - - chart.rightAlignYAxis = function(_) { - if(!arguments.length) return rightAlignYAxis; - rightAlignYAxis = _; - yAxis.orient( (_) ? 'right' : 'left'); - return chart; - }; - - chart.tooltips = function(_) { - if (!arguments.length) return tooltips; - tooltips = _; - return chart; - }; - - chart.tooltipContent = function(_) { - if (!arguments.length) return tooltip; - tooltip = _; - return chart; - }; - - chart.state = function(_) { - if (!arguments.length) return state; - state = _; - return chart; - }; - - chart.defaultState = function(_) { - if (!arguments.length) return defaultState; - defaultState = _; - return chart; - }; - - chart.noData = function(_) { - if (!arguments.length) return noData; - noData = _; - return chart; - }; - - //============================================================ - - - return chart; -} -nv.models.indentedTree = function() { - - //============================================================ - // Public Variables with Default Settings - //------------------------------------------------------------ - - var margin = {top: 0, right: 0, bottom: 0, left: 0} //TODO: implement, maybe as margin on the containing div - , width = 960 - , height = 500 - , color = nv.utils.defaultColor() - , id = Math.floor(Math.random() * 10000) - , header = true - , filterZero = false - , noData = "No Data Available." - , childIndent = 20 - , columns = [{key:'key', label: 'Name', type:'text'}] //TODO: consider functions like chart.addColumn, chart.removeColumn, instead of a block like this - , tableClass = null - , iconOpen = 'images/grey-plus.png' //TODO: consider removing this and replacing with a '+' or '-' unless user defines images - , iconClose = 'images/grey-minus.png' - , dispatch = d3.dispatch('elementClick', 'elementDblclick', 'elementMouseover', 'elementMouseout') - ; - - //============================================================ - - var idx = 0; - - function chart(selection) { - selection.each(function(data) { - var depth = 1, - container = d3.select(this); - - var tree = d3.layout.tree() - .children(function(d) { return d.values }) - .size([height, childIndent]); //Not sure if this is needed now that the result is HTML - - chart.update = function() { container.transition().duration(600).call(chart) }; - - - //------------------------------------------------------------ - // Display No Data message if there's nothing to show. - if (!data[0]) data[0] = {key: noData}; - - //------------------------------------------------------------ - - - var nodes = tree.nodes(data[0]); - - // nodes.map(function(d) { - // d.id = i++; - // }) - - //------------------------------------------------------------ - // Setup containers and skeleton of chart - - var wrap = d3.select(this).selectAll('div').data([[nodes]]); - var wrapEnter = wrap.enter().append('div').attr('class', 'nvd3 nv-wrap nv-indentedtree'); - var tableEnter = wrapEnter.append('table'); - var table = wrap.select('table').attr('width', '100%').attr('class', tableClass); - - //------------------------------------------------------------ - - - if (header) { - var thead = tableEnter.append('thead'); - - var theadRow1 = thead.append('tr'); - - columns.forEach(function(column) { - theadRow1 - .append('th') - .attr('width', column.width ? column.width : '10%') - .style('text-align', column.type == 'numeric' ? 'right' : 'left') - .append('span') - .text(column.label); - }); - } - - - var tbody = table.selectAll('tbody') - .data(function(d) { return d }); - tbody.enter().append('tbody'); - - - - //compute max generations - depth = d3.max(nodes, function(node) { return node.depth }); - tree.size([height, depth * childIndent]); //TODO: see if this is necessary at all - - - // Update the nodes⊠- var node = tbody.selectAll('tr') - // .data(function(d) { return d; }, function(d) { return d.id || (d.id == ++i)}); - .data(function(d) { return d.filter(function(d) { return (filterZero && !d.children) ? filterZero(d) : true; } )}, function(d,i) { return d.id || (d.id || ++idx)}); - //.style('display', 'table-row'); //TODO: see if this does anything - - node.exit().remove(); - - node.select('img.nv-treeicon') - .attr('src', icon) - .classed('folded', folded); - - var nodeEnter = node.enter().append('tr'); - - - columns.forEach(function(column, index) { - - var nodeName = nodeEnter.append('td') - .style('padding-left', function(d) { return (index ? 0 : d.depth * childIndent + 12 + (icon(d) ? 0 : 16)) + 'px' }, 'important') //TODO: check why I did the ternary here - .style('text-align', column.type == 'numeric' ? 'right' : 'left'); - - - if (index == 0) { - nodeName.append('img') - .classed('nv-treeicon', true) - .classed('nv-folded', folded) - .attr('src', icon) - .style('width', '14px') - .style('height', '14px') - .style('padding', '0 1px') - .style('display', function(d) { return icon(d) ? 'inline-block' : 'none'; }) - .on('click', click); - } - - - nodeName.append('span') - .attr('class', d3.functor(column.classes) ) - .text(function(d) { return column.format ? column.format(d) : - (d[column.key] || '-') }); - - if (column.showCount) { - nodeName.append('span') - .attr('class', 'nv-childrenCount'); - - node.selectAll('span.nv-childrenCount').text(function(d) { - return ((d.values && d.values.length) || (d._values && d._values.length)) ? //If this is a parent - '(' + ((d.values && (d.values.filter(function(d) { return filterZero ? filterZero(d) : true; }).length)) //If children are in values check its children and filter - || (d._values && d._values.filter(function(d) { return filterZero ? filterZero(d) : true; }).length) //Otherwise, do the same, but with the other name, _values... - || 0) + ')' //This is the catch-all in case there are no children after a filter - : '' //If this is not a parent, just give an empty string - }); - } - - if (column.click) - nodeName.select('span').on('click', column.click); - - }); - - node - .order() - .on('click', function(d) { - dispatch.elementClick({ - row: this, //TODO: decide whether or not this should be consistent with scatter/line events or should be an html link (a href) - data: d, - pos: [d.x, d.y] - }); - }) - .on('dblclick', function(d) { - dispatch.elementDblclick({ - row: this, - data: d, - pos: [d.x, d.y] - }); - }) - .on('mouseover', function(d) { - dispatch.elementMouseover({ - row: this, - data: d, - pos: [d.x, d.y] - }); - }) - .on('mouseout', function(d) { - dispatch.elementMouseout({ - row: this, - data: d, - pos: [d.x, d.y] - }); - }); - - - - - // Toggle children on click. - function click(d, _, unshift) { - d3.event.stopPropagation(); - - if(d3.event.shiftKey && !unshift) { - //If you shift-click, it'll toggle fold all the children, instead of itself - d3.event.shiftKey = false; - d.values && d.values.forEach(function(node){ - if (node.values || node._values) { - click(node, 0, true); - } - }); - return true; - } - if(!hasChildren(d)) { - //download file - //window.location.href = d.url; - return true; - } - if (d.values) { - d._values = d.values; - d.values = null; - } else { - d.values = d._values; - d._values = null; - } - chart.update(); - } - - - function icon(d) { - return (d._values && d._values.length) ? iconOpen : (d.values && d.values.length) ? iconClose : ''; - } - - function folded(d) { - return (d._values && d._values.length); - } - - function hasChildren(d) { - var values = d.values || d._values; - - return (values && values.length); - } - - - }); - - return chart; - } - - - //============================================================ - // Expose Public Variables - //------------------------------------------------------------ - - chart.margin = function(_) { - if (!arguments.length) return margin; - margin.top = typeof _.top != 'undefined' ? _.top : margin.top; - margin.right = typeof _.right != 'undefined' ? _.right : margin.right; - margin.bottom = typeof _.bottom != 'undefined' ? _.bottom : margin.bottom; - margin.left = typeof _.left != 'undefined' ? _.left : margin.left; - return chart; - }; - - chart.width = function(_) { - if (!arguments.length) return width; - width = _; - return chart; - }; - - chart.height = function(_) { - if (!arguments.length) return height; - height = _; - return chart; - }; - - chart.color = function(_) { - if (!arguments.length) return color; - color = nv.utils.getColor(_); - scatter.color(color); - return chart; - }; - - chart.id = function(_) { - if (!arguments.length) return id; - id = _; - return chart; - }; - - chart.header = function(_) { - if (!arguments.length) return header; - header = _; - return chart; - }; - - chart.noData = function(_) { - if (!arguments.length) return noData; - noData = _; - return chart; - }; - - chart.filterZero = function(_) { - if (!arguments.length) return filterZero; - filterZero = _; - return chart; - }; - - chart.columns = function(_) { - if (!arguments.length) return columns; - columns = _; - return chart; - }; - - chart.tableClass = function(_) { - if (!arguments.length) return tableClass; - tableClass = _; - return chart; - }; - - chart.iconOpen = function(_){ - if (!arguments.length) return iconOpen; - iconOpen = _; - return chart; - } - - chart.iconClose = function(_){ - if (!arguments.length) return iconClose; - iconClose = _; - return chart; - } - - //============================================================ - - - return chart; -};nv.models.legend = function() { - - //============================================================ - // Public Variables with Default Settings - //------------------------------------------------------------ - - var margin = {top: 5, right: 0, bottom: 5, left: 0} - , width = 400 - , height = 20 - , getKey = function(d) { return d.key } - , color = nv.utils.defaultColor() - , align = true - , dispatch = d3.dispatch('legendClick', 'legendDblclick', 'legendMouseover', 'legendMouseout') - ; - - //============================================================ - - - function chart(selection) { - selection.each(function(data) { - var availableWidth = width - margin.left - margin.right, - container = d3.select(this); - - - //------------------------------------------------------------ - // Setup containers and skeleton of chart - - var wrap = container.selectAll('g.nv-legend').data([data]); - var gEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-legend').append('g'); - var g = wrap.select('g'); - - wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')'); - - //------------------------------------------------------------ - - - var series = g.selectAll('.nv-series') - .data(function(d) { return d }); - var seriesEnter = series.enter().append('g').attr('class', 'nv-series') - .on('mouseover', function(d,i) { - dispatch.legendMouseover(d,i); //TODO: Make consistent with other event objects - }) - .on('mouseout', function(d,i) { - dispatch.legendMouseout(d,i); - }) - .on('click', function(d,i) { - dispatch.legendClick(d,i); - }) - .on('dblclick', function(d,i) { - dispatch.legendDblclick(d,i); - }); - seriesEnter.append('circle') - .style('stroke-width', 2) - .attr('r', 5); - seriesEnter.append('text') - .attr('text-anchor', 'start') - .attr('dy', '.32em') - .attr('dx', '8'); - series.classed('disabled', function(d) { return d.disabled }); - series.exit().remove(); - series.select('circle') - .style('fill', function(d,i) { return d.color || color(d,i)}) - .style('stroke', function(d,i) { return d.color || color(d, i) }); - series.select('text').text(getKey); - - - //TODO: implement fixed-width and max-width options (max-width is especially useful with the align option) - - // NEW ALIGNING CODE, TODO: clean up - if (align) { - - var seriesWidths = []; - series.each(function(d,i) { - var legendText = d3.select(this).select('text'); - var svgComputedTextLength = legendText.node().getComputedTextLength() - || nv.utils.calcApproxTextWidth(legendText); - seriesWidths.push(svgComputedTextLength + 28); // 28 is ~ the width of the circle plus some padding - }); - - //nv.log('Series Widths: ', JSON.stringify(seriesWidths)); - - var seriesPerRow = 0; - var legendWidth = 0; - var columnWidths = []; - - while ( legendWidth < availableWidth && seriesPerRow < seriesWidths.length) { - columnWidths[seriesPerRow] = seriesWidths[seriesPerRow]; - legendWidth += seriesWidths[seriesPerRow++]; - } - - - while ( legendWidth > availableWidth && seriesPerRow > 1 ) { - columnWidths = []; - seriesPerRow--; - - for (k = 0; k < seriesWidths.length; k++) { - if (seriesWidths[k] > (columnWidths[k % seriesPerRow] || 0) ) - columnWidths[k % seriesPerRow] = seriesWidths[k]; - } - - legendWidth = columnWidths.reduce(function(prev, cur, index, array) { - return prev + cur; - }); - } - //console.log(columnWidths, legendWidth, seriesPerRow); - - var xPositions = []; - for (var i = 0, curX = 0; i < seriesPerRow; i++) { - xPositions[i] = curX; - curX += columnWidths[i]; - } - - series - .attr('transform', function(d, i) { - return 'translate(' + xPositions[i % seriesPerRow] + ',' + (5 + Math.floor(i / seriesPerRow) * 20) + ')'; - }); - - //position legend as far right as possible within the total width - g.attr('transform', 'translate(' + (width - margin.right - legendWidth) + ',' + margin.top + ')'); - - height = margin.top + margin.bottom + (Math.ceil(seriesWidths.length / seriesPerRow) * 20); - - } else { - - var ypos = 5, - newxpos = 5, - maxwidth = 0, - xpos; - series - .attr('transform', function(d, i) { - var length = d3.select(this).select('text').node().getComputedTextLength() + 28; - xpos = newxpos; - - if (width < margin.left + margin.right + xpos + length) { - newxpos = xpos = 5; - ypos += 20; - } - - newxpos += length; - if (newxpos > maxwidth) maxwidth = newxpos; - - return 'translate(' + xpos + ',' + ypos + ')'; - }); - - //position legend as far right as possible within the total width - g.attr('transform', 'translate(' + (width - margin.right - maxwidth) + ',' + margin.top + ')'); - - height = margin.top + margin.bottom + ypos + 15; - - } - - }); - - return chart; - } - - - //============================================================ - // Expose Public Variables - //------------------------------------------------------------ - - chart.dispatch = dispatch; - - chart.margin = function(_) { - if (!arguments.length) return margin; - margin.top = typeof _.top != 'undefined' ? _.top : margin.top; - margin.right = typeof _.right != 'undefined' ? _.right : margin.right; - margin.bottom = typeof _.bottom != 'undefined' ? _.bottom : margin.bottom; - margin.left = typeof _.left != 'undefined' ? _.left : margin.left; - return chart; - }; - - chart.width = function(_) { - if (!arguments.length) return width; - width = _; - return chart; - }; - - chart.height = function(_) { - if (!arguments.length) return height; - height = _; - return chart; - }; - - chart.key = function(_) { - if (!arguments.length) return getKey; - getKey = _; - return chart; - }; - - chart.color = function(_) { - if (!arguments.length) return color; - color = nv.utils.getColor(_); - return chart; - }; - - chart.align = function(_) { - if (!arguments.length) return align; - align = _; - return chart; - }; - - //============================================================ - - - return chart; -} - -nv.models.line = function() { - - //============================================================ - // Public Variables with Default Settings - //------------------------------------------------------------ - - var scatter = nv.models.scatter() - ; - - var margin = {top: 0, right: 0, bottom: 0, left: 0} - , width = 960 - , height = 500 - , color = nv.utils.defaultColor() // a function that returns a color - , getX = function(d) { return d.x } // accessor to get the x value from a data point - , getY = function(d) { return d.y } // accessor to get the y value from a data point - , defined = function(d,i) { return !isNaN(getY(d,i)) && getY(d,i) !== null } // allows a line to be not continuous when it is not defined - , isArea = function(d) { return d.area } // decides if a line is an area or just a line - , clipEdge = false // if true, masks lines within x and y scale - , x //can be accessed via chart.xScale() - , y //can be accessed via chart.yScale() - , interpolate = "linear" // controls the line interpolation - ; - - scatter - .size(16) // default size - .sizeDomain([16,256]) //set to speed up calculation, needs to be unset if there is a custom size accessor - ; - - //============================================================ - - - //============================================================ - // Private Variables - //------------------------------------------------------------ - - var x0, y0 //used to store previous scales - ; - - //============================================================ - - - function chart(selection) { - selection.each(function(data) { - var availableWidth = width - margin.left - margin.right, - availableHeight = height - margin.top - margin.bottom, - container = d3.select(this); - - //------------------------------------------------------------ - // Setup Scales - - x = scatter.xScale(); - y = scatter.yScale(); - - x0 = x0 || x; - y0 = y0 || y; - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Setup containers and skeleton of chart - - var wrap = container.selectAll('g.nv-wrap.nv-line').data([data]); - var wrapEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-wrap nv-line'); - var defsEnter = wrapEnter.append('defs'); - var gEnter = wrapEnter.append('g'); - var g = wrap.select('g') - - gEnter.append('g').attr('class', 'nv-groups'); - gEnter.append('g').attr('class', 'nv-scatterWrap'); - - wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')'); - - //------------------------------------------------------------ - - - - - scatter - .width(availableWidth) - .height(availableHeight) - - var scatterWrap = wrap.select('.nv-scatterWrap'); - //.datum(data); // Data automatically trickles down from the wrap - - d3.transition(scatterWrap).call(scatter); - - - - defsEnter.append('clipPath') - .attr('id', 'nv-edge-clip-' + scatter.id()) - .append('rect'); - - wrap.select('#nv-edge-clip-' + scatter.id() + ' rect') - .attr('width', availableWidth) - .attr('height', availableHeight); - - g .attr('clip-path', clipEdge ? 'url(#nv-edge-clip-' + scatter.id() + ')' : ''); - scatterWrap - .attr('clip-path', clipEdge ? 'url(#nv-edge-clip-' + scatter.id() + ')' : ''); - - - - - var groups = wrap.select('.nv-groups').selectAll('.nv-group') - .data(function(d) { return d }, function(d) { return d.key }); - groups.enter().append('g') - .style('stroke-opacity', 1e-6) - .style('fill-opacity', 1e-6); - d3.transition(groups.exit()) - .style('stroke-opacity', 1e-6) - .style('fill-opacity', 1e-6) - .remove(); - groups - .attr('class', function(d,i) { return 'nv-group nv-series-' + i }) - .classed('hover', function(d) { return d.hover }) - .style('fill', function(d,i){ return color(d, i) }) - .style('stroke', function(d,i){ return color(d, i)}); - d3.transition(groups) - .style('stroke-opacity', 1) - .style('fill-opacity', .5); - - - - var areaPaths = groups.selectAll('path.nv-area') - .data(function(d) { return isArea(d) ? [d] : [] }); // this is done differently than lines because I need to check if series is an area - areaPaths.enter().append('path') - .attr('class', 'nv-area') - .attr('d', function(d) { - return d3.svg.area() - .interpolate(interpolate) - .defined(defined) - .x(function(d,i) { return x0(getX(d,i)) }) - .y0(function(d,i) { return y0(getY(d,i)) }) - .y1(function(d,i) { return y0( y.domain()[0] <= 0 ? y.domain()[1] >= 0 ? 0 : y.domain()[1] : y.domain()[0] ) }) - //.y1(function(d,i) { return y0(0) }) //assuming 0 is within y domain.. may need to tweak this - .apply(this, [d.values]) - }); - d3.transition(groups.exit().selectAll('path.nv-area')) - .attr('d', function(d) { - return d3.svg.area() - .interpolate(interpolate) - .defined(defined) - .x(function(d,i) { return x(getX(d,i)) }) - .y0(function(d,i) { return y(getY(d,i)) }) - .y1(function(d,i) { return y( y.domain()[0] <= 0 ? y.domain()[1] >= 0 ? 0 : y.domain()[1] : y.domain()[0] ) }) - //.y1(function(d,i) { return y0(0) }) //assuming 0 is within y domain.. may need to tweak this - .apply(this, [d.values]) - }); - d3.transition(areaPaths) - .attr('d', function(d) { - return d3.svg.area() - .interpolate(interpolate) - .defined(defined) - .x(function(d,i) { return x(getX(d,i)) }) - .y0(function(d,i) { return y(getY(d,i)) }) - .y1(function(d,i) { return y( y.domain()[0] <= 0 ? y.domain()[1] >= 0 ? 0 : y.domain()[1] : y.domain()[0] ) }) - //.y1(function(d,i) { return y0(0) }) //assuming 0 is within y domain.. may need to tweak this - .apply(this, [d.values]) - }); - - - - var linePaths = groups.selectAll('path.nv-line') - .data(function(d) { return [d.values] }); - linePaths.enter().append('path') - .attr('class', 'nv-line') - .attr('d', - d3.svg.line() - .interpolate(interpolate) - .defined(defined) - .x(function(d,i) { return x0(getX(d,i)) }) - .y(function(d,i) { return y0(getY(d,i)) }) - ); - d3.transition(groups.exit().selectAll('path.nv-line')) - .attr('d', - d3.svg.line() - .interpolate(interpolate) - .defined(defined) - .x(function(d,i) { return x(getX(d,i)) }) - .y(function(d,i) { return y(getY(d,i)) }) - ); - d3.transition(linePaths) - .attr('d', - d3.svg.line() - .interpolate(interpolate) - .defined(defined) - .x(function(d,i) { return x(getX(d,i)) }) - .y(function(d,i) { return y(getY(d,i)) }) - ); - - - - //store old scales for use in transitions on update - x0 = x.copy(); - y0 = y.copy(); - - }); - - return chart; - } - - - //============================================================ - // Expose Public Variables - //------------------------------------------------------------ - - chart.dispatch = scatter.dispatch; - chart.scatter = scatter; - - d3.rebind(chart, scatter, 'id', 'interactive', 'size', 'xScale', 'yScale', 'zScale', 'xDomain', 'yDomain', 'sizeDomain', 'forceX', 'forceY', 'forceSize', 'clipVoronoi', 'clipRadius', 'padData'); - - chart.margin = function(_) { - if (!arguments.length) return margin; - margin.top = typeof _.top != 'undefined' ? _.top : margin.top; - margin.right = typeof _.right != 'undefined' ? _.right : margin.right; - margin.bottom = typeof _.bottom != 'undefined' ? _.bottom : margin.bottom; - margin.left = typeof _.left != 'undefined' ? _.left : margin.left; - return chart; - }; - - chart.width = function(_) { - if (!arguments.length) return width; - width = _; - return chart; - }; - - chart.height = function(_) { - if (!arguments.length) return height; - height = _; - return chart; - }; - - chart.x = function(_) { - if (!arguments.length) return getX; - getX = _; - scatter.x(_); - return chart; - }; - - chart.y = function(_) { - if (!arguments.length) return getY; - getY = _; - scatter.y(_); - return chart; - }; - - chart.clipEdge = function(_) { - if (!arguments.length) return clipEdge; - clipEdge = _; - return chart; - }; - - chart.color = function(_) { - if (!arguments.length) return color; - color = nv.utils.getColor(_); - scatter.color(color); - return chart; - }; - - chart.interpolate = function(_) { - if (!arguments.length) return interpolate; - interpolate = _; - return chart; - }; - - chart.defined = function(_) { - if (!arguments.length) return defined; - defined = _; - return chart; - }; - - chart.isArea = function(_) { - if (!arguments.length) return isArea; - isArea = d3.functor(_); - return chart; - }; - - //============================================================ - - - return chart; -} - -nv.models.lineChart = function() { - - //============================================================ - // Public Variables with Default Settings - //------------------------------------------------------------ - - var lines = nv.models.line() - , xAxis = nv.models.axis() - , yAxis = nv.models.axis() - , legend = nv.models.legend() - ; - -//set margin.right to 23 to fit dates on the x-axis within the chart - var margin = {top: 30, right: 20, bottom: 50, left: 60} - , color = nv.utils.defaultColor() - , width = null - , height = null - , showLegend = true - , showXAxis = true - , showYAxis = true - , rightAlignYAxis = false - , tooltips = true - , tooltip = function(key, x, y, e, graph) { - return '<h3>' + key + '</h3>' + - '<p>' + y + ' at ' + x + '</p>' - } - , x - , y - , state = {} - , defaultState = null - , noData = 'No Data Available.' - , dispatch = d3.dispatch('tooltipShow', 'tooltipHide', 'stateChange', 'changeState') - ; - - xAxis - .orient('bottom') - .tickPadding(7) - ; - yAxis - .orient((rightAlignYAxis) ? 'right' : 'left') - ; - - //============================================================ - - - //============================================================ - // Private Variables - //------------------------------------------------------------ - - var showTooltip = function(e, offsetElement) { - - // New addition to calculate position if SVG is scaled with viewBox, may move TODO: consider implementing everywhere else - if (offsetElement) { - var svg = d3.select(offsetElement).select('svg'); - var viewBox = (svg.node()) ? svg.attr('viewBox') : null; - if (viewBox) { - viewBox = viewBox.split(' '); - var ratio = parseInt(svg.style('width')) / viewBox[2]; - e.pos[0] = e.pos[0] * ratio; - e.pos[1] = e.pos[1] * ratio; - } - } - - var left = e.pos[0] + ( offsetElement.offsetLeft || 0 ), - top = e.pos[1] + ( offsetElement.offsetTop || 0), - x = xAxis.tickFormat()(lines.x()(e.point, e.pointIndex)), - y = yAxis.tickFormat()(lines.y()(e.point, e.pointIndex)), - content = tooltip(e.series.key, x, y, e, chart); - - nv.tooltip.show([left, top], content, null, null, offsetElement); - }; - - //============================================================ - - - function chart(selection) { - selection.each(function(data) { - var container = d3.select(this), - that = this; - - var availableWidth = (width || parseInt(container.style('width')) || 960) - - margin.left - margin.right, - availableHeight = (height || parseInt(container.style('height')) || 400) - - margin.top - margin.bottom; - - - chart.update = function() { container.transition().call(chart) }; - chart.container = this; - - //set state.disabled - state.disabled = data.map(function(d) { return !!d.disabled }); - - if (!defaultState) { - var key; - defaultState = {}; - for (key in state) { - if (state[key] instanceof Array) - defaultState[key] = state[key].slice(0); - else - defaultState[key] = state[key]; - } - } - - //------------------------------------------------------------ - // Display noData message if there's nothing to show. - - if (!data || !data.length || !data.filter(function(d) { return d.values.length }).length) { - var noDataText = container.selectAll('.nv-noData').data([noData]); - - noDataText.enter().append('text') - .attr('class', 'nvd3 nv-noData') - .attr('dy', '-.7em') - .style('text-anchor', 'middle'); - - noDataText - .attr('x', margin.left + availableWidth / 2) - .attr('y', margin.top + availableHeight / 2) - .text(function(d) { return d }); - - return chart; - } else { - container.selectAll('.nv-noData').remove(); - } - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Setup Scales - - x = lines.xScale(); - y = lines.yScale(); - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Setup containers and skeleton of chart - - var wrap = container.selectAll('g.nv-wrap.nv-lineChart').data([data]); - var gEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-wrap nv-lineChart').append('g'); - var g = wrap.select('g'); - - gEnter.append('g').attr('class', 'nv-x nv-axis'); - gEnter.append('g').attr('class', 'nv-y nv-axis'); - gEnter.append('g').attr('class', 'nv-linesWrap'); - gEnter.append('g').attr('class', 'nv-legendWrap'); - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Legend - - if (showLegend) { - legend.width(availableWidth); - - g.select('.nv-legendWrap') - .datum(data) - .call(legend); - - if ( margin.top != legend.height()) { - margin.top = legend.height(); - availableHeight = (height || parseInt(container.style('height')) || 400) - - margin.top - margin.bottom; - } - - wrap.select('.nv-legendWrap') - .attr('transform', 'translate(0,' + (-margin.top) +')') - } - - //------------------------------------------------------------ - - wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')'); - - if (rightAlignYAxis) { - g.select(".nv-y.nv-axis") - .attr("transform", "translate(" + availableWidth + ",0)"); - } - - //------------------------------------------------------------ - // Main Chart Component(s) - - lines - .width(availableWidth) - .height(availableHeight) - .color(data.map(function(d,i) { - return d.color || color(d, i); - }).filter(function(d,i) { return !data[i].disabled })); - - - var linesWrap = g.select('.nv-linesWrap') - .datum(data.filter(function(d) { return !d.disabled })) - - d3.transition(linesWrap).call(lines); - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Setup Axes - - if (showXAxis) { - xAxis - .scale(x) - .ticks( availableWidth / 100 ) - .tickSize(-availableHeight, 0); - - g.select('.nv-x.nv-axis') - .attr('transform', 'translate(0,' + y.range()[0] + ')'); - d3.transition(g.select('.nv-x.nv-axis')) - .call(xAxis); - } - - if (showYAxis) { - yAxis - .scale(y) - .ticks( availableHeight / 36 ) - .tickSize( -availableWidth, 0); - - d3.transition(g.select('.nv-y.nv-axis')) - .call(yAxis); - } - //------------------------------------------------------------ - - - //============================================================ - // Event Handling/Dispatching (in chart's scope) - //------------------------------------------------------------ - - legend.dispatch.on('legendClick', function(d,i) { - d.disabled = !d.disabled; - - if (!data.filter(function(d) { return !d.disabled }).length) { - data.map(function(d) { - d.disabled = false; - wrap.selectAll('.nv-series').classed('disabled', false); - return d; - }); - } - - state.disabled = data.map(function(d) { return !!d.disabled }); - dispatch.stateChange(state); - - // container.transition().call(chart); - chart.update(); - }); - - legend.dispatch.on('legendDblclick', function(d) { - //Double clicking should always enable current series, and disabled all others. - data.forEach(function(d) { - d.disabled = true; - }); - d.disabled = false; - - state.disabled = data.map(function(d) { return !!d.disabled }); - dispatch.stateChange(state); - chart.update(); - }); - - -/* - legend.dispatch.on('legendMouseover', function(d, i) { - d.hover = true; - selection.transition().call(chart) - }); - - legend.dispatch.on('legendMouseout', function(d, i) { - d.hover = false; - selection.transition().call(chart) - }); -*/ - - dispatch.on('tooltipShow', function(e) { - if (tooltips) showTooltip(e, that.parentNode); - }); - - - dispatch.on('changeState', function(e) { - - if (typeof e.disabled !== 'undefined') { - data.forEach(function(series,i) { - series.disabled = e.disabled[i]; - }); - - state.disabled = e.disabled; - } - - chart.update(); - }); - - //============================================================ - - }); - - return chart; - } - - - //============================================================ - // Event Handling/Dispatching (out of chart's scope) - //------------------------------------------------------------ - - lines.dispatch.on('elementMouseover.tooltip', function(e) { - e.pos = [e.pos[0] + margin.left, e.pos[1] + margin.top]; - dispatch.tooltipShow(e); - }); - - lines.dispatch.on('elementMouseout.tooltip', function(e) { - dispatch.tooltipHide(e); - }); - - dispatch.on('tooltipHide', function() { - if (tooltips) nv.tooltip.cleanup(); - }); - - //============================================================ - - - //============================================================ - // Expose Public Variables - //------------------------------------------------------------ - - // expose chart's sub-components - chart.dispatch = dispatch; - chart.lines = lines; - chart.legend = legend; - chart.xAxis = xAxis; - chart.yAxis = yAxis; - - d3.rebind(chart, lines, 'defined', 'isArea', 'x', 'y', 'size', 'xScale', 'yScale', 'xDomain', 'yDomain', 'forceX', 'forceY', 'interactive', 'clipEdge', 'clipVoronoi', 'id', 'interpolate'); - - chart.margin = function(_) { - if (!arguments.length) return margin; - margin.top = typeof _.top != 'undefined' ? _.top : margin.top; - margin.right = typeof _.right != 'undefined' ? _.right : margin.right; - margin.bottom = typeof _.bottom != 'undefined' ? _.bottom : margin.bottom; - margin.left = typeof _.left != 'undefined' ? _.left : margin.left; - return chart; - }; - - chart.width = function(_) { - if (!arguments.length) return width; - width = _; - return chart; - }; - - chart.height = function(_) { - if (!arguments.length) return height; - height = _; - return chart; - }; - - chart.color = function(_) { - if (!arguments.length) return color; - color = nv.utils.getColor(_); - legend.color(color); - return chart; - }; - - chart.showLegend = function(_) { - if (!arguments.length) return showLegend; - showLegend = _; - return chart; - }; - - chart.showXAxis = function(_) { - if (!arguments.length) return showXAxis; - showXAxis = _; - return chart; - }; - - chart.showYAxis = function(_) { - if (!arguments.length) return showYAxis; - showYAxis = _; - return chart; - }; - - chart.rightAlignYAxis = function(_) { - if(!arguments.length) return rightAlignYAxis; - rightAlignYAxis = _; - yAxis.orient( (_) ? 'right' : 'left'); - return chart; - }; - - chart.tooltips = function(_) { - if (!arguments.length) return tooltips; - tooltips = _; - return chart; - }; - - chart.tooltipContent = function(_) { - if (!arguments.length) return tooltip; - tooltip = _; - return chart; - }; - - chart.state = function(_) { - if (!arguments.length) return state; - state = _; - return chart; - }; - - chart.defaultState = function(_) { - if (!arguments.length) return defaultState; - defaultState = _; - return chart; - }; - - chart.noData = function(_) { - if (!arguments.length) return noData; - noData = _; - return chart; - }; - - //============================================================ - - - return chart; -} - -nv.models.linePlusBarChart = function() { - - //============================================================ - // Public Variables with Default Settings - //------------------------------------------------------------ - - var lines = nv.models.line() - , bars = nv.models.historicalBar() - , xAxis = nv.models.axis() - , y1Axis = nv.models.axis() - , y2Axis = nv.models.axis() - , legend = nv.models.legend() - ; - - var margin = {top: 30, right: 60, bottom: 50, left: 60} - , width = null - , height = null - , getX = function(d) { return d.x } - , getY = function(d) { return d.y } - , color = nv.utils.defaultColor() - , showLegend = true - , tooltips = true - , tooltip = function(key, x, y, e, graph) { - return '<h3>' + key + '</h3>' + - '<p>' + y + ' at ' + x + '</p>'; - } - , x - , y1 - , y2 - , state = {} - , defaultState = null - , noData = "No Data Available." - , dispatch = d3.dispatch('tooltipShow', 'tooltipHide', 'stateChange', 'changeState') - ; - - bars - .padData(true) - ; - lines - .clipEdge(false) - .padData(true) - ; - xAxis - .orient('bottom') - .tickPadding(7) - .highlightZero(false) - ; - y1Axis - .orient('left') - ; - y2Axis - .orient('right') - ; - - //============================================================ - - - //============================================================ - // Private Variables - //------------------------------------------------------------ - - var showTooltip = function(e, offsetElement) { - var left = e.pos[0] + ( offsetElement.offsetLeft || 0 ), - top = e.pos[1] + ( offsetElement.offsetTop || 0), - x = xAxis.tickFormat()(lines.x()(e.point, e.pointIndex)), - y = (e.series.bar ? y1Axis : y2Axis).tickFormat()(lines.y()(e.point, e.pointIndex)), - content = tooltip(e.series.key, x, y, e, chart); - - nv.tooltip.show([left, top], content, e.value < 0 ? 'n' : 's', null, offsetElement); - } - ; - - //------------------------------------------------------------ - - - - function chart(selection) { - selection.each(function(data) { - var container = d3.select(this), - that = this; - - var availableWidth = (width || parseInt(container.style('width')) || 960) - - margin.left - margin.right, - availableHeight = (height || parseInt(container.style('height')) || 400) - - margin.top - margin.bottom; - - chart.update = function() { container.transition().call(chart); }; - // chart.container = this; - - //set state.disabled - state.disabled = data.map(function(d) { return !!d.disabled }); - - if (!defaultState) { - var key; - defaultState = {}; - for (key in state) { - if (state[key] instanceof Array) - defaultState[key] = state[key].slice(0); - else - defaultState[key] = state[key]; - } - } - - //------------------------------------------------------------ - // Display No Data message if there's nothing to show. - - if (!data || !data.length || !data.filter(function(d) { return d.values.length }).length) { - var noDataText = container.selectAll('.nv-noData').data([noData]); - - noDataText.enter().append('text') - .attr('class', 'nvd3 nv-noData') - .attr('dy', '-.7em') - .style('text-anchor', 'middle'); - - noDataText - .attr('x', margin.left + availableWidth / 2) - .attr('y', margin.top + availableHeight / 2) - .text(function(d) { return d }); - - return chart; - } else { - container.selectAll('.nv-noData').remove(); - } - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Setup Scales - - var dataBars = data.filter(function(d) { return !d.disabled && d.bar }); - var dataLines = data.filter(function(d) { return !d.bar }); // removed the !d.disabled clause here to fix Issue #240 - - //x = xAxis.scale(); - x = dataLines.filter(function(d) { return !d.disabled; }).length && dataLines.filter(function(d) { return !d.disabled; })[0].values.length ? lines.xScale() : bars.xScale(); - //x = dataLines.filter(function(d) { return !d.disabled; }).length ? lines.xScale() : bars.xScale(); //old code before change above - y1 = bars.yScale(); - y2 = lines.yScale(); - - //------------------------------------------------------------ - - //------------------------------------------------------------ - // Setup containers and skeleton of chart - - var wrap = d3.select(this).selectAll('g.nv-wrap.nv-linePlusBar').data([data]); - var gEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-wrap nv-linePlusBar').append('g'); - var g = wrap.select('g'); - - gEnter.append('g').attr('class', 'nv-x nv-axis'); - gEnter.append('g').attr('class', 'nv-y1 nv-axis'); - gEnter.append('g').attr('class', 'nv-y2 nv-axis'); - gEnter.append('g').attr('class', 'nv-barsWrap'); - gEnter.append('g').attr('class', 'nv-linesWrap'); - gEnter.append('g').attr('class', 'nv-legendWrap'); - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Legend - - if (showLegend) { - legend.width( availableWidth / 2 ); - - g.select('.nv-legendWrap') - .datum(data.map(function(series) { - series.originalKey = series.originalKey === undefined ? series.key : series.originalKey; - series.key = series.originalKey + (series.bar ? ' (left axis)' : ' (right axis)'); - return series; - })) - .call(legend); - - if ( margin.top != legend.height()) { - margin.top = legend.height(); - availableHeight = (height || parseInt(container.style('height')) || 400) - - margin.top - margin.bottom; - } - - g.select('.nv-legendWrap') - .attr('transform', 'translate(' + ( availableWidth / 2 ) + ',' + (-margin.top) +')'); - } - - //------------------------------------------------------------ - - - wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')'); - - - //------------------------------------------------------------ - // Main Chart Component(s) - - - lines - .width(availableWidth) - .height(availableHeight) - .color(data.map(function(d,i) { - return d.color || color(d, i); - }).filter(function(d,i) { return !data[i].disabled && !data[i].bar })) - - bars - .width(availableWidth) - .height(availableHeight) - .color(data.map(function(d,i) { - return d.color || color(d, i); - }).filter(function(d,i) { return !data[i].disabled && data[i].bar })) - - - - var barsWrap = g.select('.nv-barsWrap') - .datum(dataBars.length ? dataBars : [{values:[]}]) - - var linesWrap = g.select('.nv-linesWrap') - .datum(dataLines[0] && !dataLines[0].disabled ? dataLines : [{values:[]}] ); - //.datum(!dataLines[0].disabled ? dataLines : [{values:dataLines[0].values.map(function(d) { return [d[0], null] }) }] ); - - d3.transition(barsWrap).call(bars); - d3.transition(linesWrap).call(lines); - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Setup Axes - - xAxis - .scale(x) - .ticks( availableWidth / 100 ) - .tickSize(-availableHeight, 0); - - g.select('.nv-x.nv-axis') - .attr('transform', 'translate(0,' + y1.range()[0] + ')'); - d3.transition(g.select('.nv-x.nv-axis')) - .call(xAxis); - - - y1Axis - .scale(y1) - .ticks( availableHeight / 36 ) - .tickSize(-availableWidth, 0); - - d3.transition(g.select('.nv-y1.nv-axis')) - .style('opacity', dataBars.length ? 1 : 0) - .call(y1Axis); - - - y2Axis - .scale(y2) - .ticks( availableHeight / 36 ) - .tickSize(dataBars.length ? 0 : -availableWidth, 0); // Show the y2 rules only if y1 has none - - g.select('.nv-y2.nv-axis') - .style('opacity', dataLines.length ? 1 : 0) - .attr('transform', 'translate(' + availableWidth + ',0)'); - //.attr('transform', 'translate(' + x.range()[1] + ',0)'); - - d3.transition(g.select('.nv-y2.nv-axis')) - .call(y2Axis); - - //------------------------------------------------------------ - - - //============================================================ - // Event Handling/Dispatching (in chart's scope) - //------------------------------------------------------------ - - legend.dispatch.on('legendClick', function(d,i) { - d.disabled = !d.disabled; - - if (!data.filter(function(d) { return !d.disabled }).length) { - data.map(function(d) { - d.disabled = false; - wrap.selectAll('.nv-series').classed('disabled', false); - return d; - }); - } - - state.disabled = data.map(function(d) { return !!d.disabled }); - dispatch.stateChange(state); - - chart.update(); - }); - - legend.dispatch.on('legendDblclick', function(d) { - //Double clicking should always enable current series, and disabled all others. - data.forEach(function(d) { - d.disabled = true; - }); - d.disabled = false; - - state.disabled = data.map(function(d) { return !!d.disabled }); - dispatch.stateChange(state); - chart.update(); - }); - - - dispatch.on('tooltipShow', function(e) { - if (tooltips) showTooltip(e, that.parentNode); - }); - - - // Update chart from a state object passed to event handler - dispatch.on('changeState', function(e) { - - if (typeof e.disabled !== 'undefined') { - data.forEach(function(series,i) { - series.disabled = e.disabled[i]; - }); - - state.disabled = e.disabled; - } - - chart.update(); - }); - - //============================================================ - - - }); - - return chart; - } - - - //============================================================ - // Event Handling/Dispatching (out of chart's scope) - //------------------------------------------------------------ - - lines.dispatch.on('elementMouseover.tooltip', function(e) { - e.pos = [e.pos[0] + margin.left, e.pos[1] + margin.top]; - dispatch.tooltipShow(e); - }); - - lines.dispatch.on('elementMouseout.tooltip', function(e) { - dispatch.tooltipHide(e); - }); - - bars.dispatch.on('elementMouseover.tooltip', function(e) { - e.pos = [e.pos[0] + margin.left, e.pos[1] + margin.top]; - dispatch.tooltipShow(e); - }); - - bars.dispatch.on('elementMouseout.tooltip', function(e) { - dispatch.tooltipHide(e); - }); - - dispatch.on('tooltipHide', function() { - if (tooltips) nv.tooltip.cleanup(); - }); - - //============================================================ - - - //============================================================ - // Expose Public Variables - //------------------------------------------------------------ - - // expose chart's sub-components - chart.dispatch = dispatch; - chart.legend = legend; - chart.lines = lines; - chart.bars = bars; - chart.xAxis = xAxis; - chart.y1Axis = y1Axis; - chart.y2Axis = y2Axis; - - d3.rebind(chart, lines, 'defined', 'size', 'clipVoronoi', 'interpolate'); - //TODO: consider rebinding x, y and some other stuff, and simply do soemthign lile bars.x(lines.x()), etc. - //d3.rebind(chart, lines, 'x', 'y', 'size', 'xDomain', 'yDomain', 'forceX', 'forceY', 'interactive', 'clipEdge', 'clipVoronoi', 'id'); - - chart.x = function(_) { - if (!arguments.length) return getX; - getX = _; - lines.x(_); - bars.x(_); - return chart; - }; - - chart.y = function(_) { - if (!arguments.length) return getY; - getY = _; - lines.y(_); - bars.y(_); - return chart; - }; - - chart.margin = function(_) { - if (!arguments.length) return margin; - margin.top = typeof _.top != 'undefined' ? _.top : margin.top; - margin.right = typeof _.right != 'undefined' ? _.right : margin.right; - margin.bottom = typeof _.bottom != 'undefined' ? _.bottom : margin.bottom; - margin.left = typeof _.left != 'undefined' ? _.left : margin.left; - return chart; - }; - - chart.width = function(_) { - if (!arguments.length) return width; - width = _; - return chart; - }; - - chart.height = function(_) { - if (!arguments.length) return height; - height = _; - return chart; - }; - - chart.color = function(_) { - if (!arguments.length) return color; - color = nv.utils.getColor(_); - legend.color(color); - return chart; - }; - - chart.showLegend = function(_) { - if (!arguments.length) return showLegend; - showLegend = _; - return chart; - }; - - chart.tooltips = function(_) { - if (!arguments.length) return tooltips; - tooltips = _; - return chart; - }; - - chart.tooltipContent = function(_) { - if (!arguments.length) return tooltip; - tooltip = _; - return chart; - }; - - chart.state = function(_) { - if (!arguments.length) return state; - state = _; - return chart; - }; - - chart.defaultState = function(_) { - if (!arguments.length) return defaultState; - defaultState = _; - return chart; - }; - - chart.noData = function(_) { - if (!arguments.length) return noData; - noData = _; - return chart; - }; - - //============================================================ - - - return chart; -} - -nv.models.lineWithFocusChart = function() { - - //============================================================ - // Public Variables with Default Settings - //------------------------------------------------------------ - - var lines = nv.models.line() - , lines2 = nv.models.line() - , xAxis = nv.models.axis() - , yAxis = nv.models.axis() - , x2Axis = nv.models.axis() - , y2Axis = nv.models.axis() - , legend = nv.models.legend() - , brush = d3.svg.brush() - ; - - var margin = {top: 30, right: 30, bottom: 30, left: 60} - , margin2 = {top: 0, right: 30, bottom: 20, left: 60} - , color = nv.utils.defaultColor() - , width = null - , height = null - , height2 = 100 - , x - , y - , x2 - , y2 - , showLegend = true - , brushExtent = null - , tooltips = true - , tooltip = function(key, x, y, e, graph) { - return '<h3>' + key + '</h3>' + - '<p>' + y + ' at ' + x + '</p>' - } - , noData = "No Data Available." - , dispatch = d3.dispatch('tooltipShow', 'tooltipHide', 'brush') - ; - - lines - .clipEdge(true) - ; - lines2 - .interactive(false) - ; - xAxis - .orient('bottom') - .tickPadding(5) - ; - yAxis - .orient('left') - ; - x2Axis - .orient('bottom') - .tickPadding(5) - ; - y2Axis - .orient('left') - ; - //============================================================ - - - //============================================================ - // Private Variables - //------------------------------------------------------------ - - var showTooltip = function(e, offsetElement) { - var left = e.pos[0] + ( offsetElement.offsetLeft || 0 ), - top = e.pos[1] + ( offsetElement.offsetTop || 0), - x = xAxis.tickFormat()(lines.x()(e.point, e.pointIndex)), - y = yAxis.tickFormat()(lines.y()(e.point, e.pointIndex)), - content = tooltip(e.series.key, x, y, e, chart); - - nv.tooltip.show([left, top], content, null, null, offsetElement); - }; - - //============================================================ - - - function chart(selection) { - selection.each(function(data) { - var container = d3.select(this), - that = this; - - var availableWidth = (width || parseInt(container.style('width')) || 960) - - margin.left - margin.right, - availableHeight1 = (height || parseInt(container.style('height')) || 400) - - margin.top - margin.bottom - height2, - availableHeight2 = height2 - margin2.top - margin2.bottom; - - chart.update = function() { container.transition().call(chart) }; - chart.container = this; - - - //------------------------------------------------------------ - // Display No Data message if there's nothing to show. - - if (!data || !data.length || !data.filter(function(d) { return d.values.length }).length) { - var noDataText = container.selectAll('.nv-noData').data([noData]); - - noDataText.enter().append('text') - .attr('class', 'nvd3 nv-noData') - .attr('dy', '-.7em') - .style('text-anchor', 'middle'); - - noDataText - .attr('x', margin.left + availableWidth / 2) - .attr('y', margin.top + availableHeight1 / 2) - .text(function(d) { return d }); - - return chart; - } else { - container.selectAll('.nv-noData').remove(); - } - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Setup Scales - - x = lines.xScale(); - y = lines.yScale(); - x2 = lines2.xScale(); - y2 = lines2.yScale(); - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Setup containers and skeleton of chart - - var wrap = container.selectAll('g.nv-wrap.nv-lineWithFocusChart').data([data]); - var gEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-wrap nv-lineWithFocusChart').append('g'); - var g = wrap.select('g'); - - gEnter.append('g').attr('class', 'nv-legendWrap'); - - var focusEnter = gEnter.append('g').attr('class', 'nv-focus'); - focusEnter.append('g').attr('class', 'nv-x nv-axis'); - focusEnter.append('g').attr('class', 'nv-y nv-axis'); - focusEnter.append('g').attr('class', 'nv-linesWrap'); - - var contextEnter = gEnter.append('g').attr('class', 'nv-context'); - contextEnter.append('g').attr('class', 'nv-x nv-axis'); - contextEnter.append('g').attr('class', 'nv-y nv-axis'); - contextEnter.append('g').attr('class', 'nv-linesWrap'); - contextEnter.append('g').attr('class', 'nv-brushBackground'); - contextEnter.append('g').attr('class', 'nv-x nv-brush'); - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Legend - - if (showLegend) { - legend.width(availableWidth); - - g.select('.nv-legendWrap') - .datum(data) - .call(legend); - - if ( margin.top != legend.height()) { - margin.top = legend.height(); - availableHeight1 = (height || parseInt(container.style('height')) || 400) - - margin.top - margin.bottom - height2; - } - - g.select('.nv-legendWrap') - .attr('transform', 'translate(0,' + (-margin.top) +')') - } - - //------------------------------------------------------------ - - - wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')'); - - - //------------------------------------------------------------ - // Main Chart Component(s) - - lines - .width(availableWidth) - .height(availableHeight1) - .color( - data - .map(function(d,i) { - return d.color || color(d, i); - }) - .filter(function(d,i) { - return !data[i].disabled; - }) - ); - - lines2 - .defined(lines.defined()) - .width(availableWidth) - .height(availableHeight2) - .color( - data - .map(function(d,i) { - return d.color || color(d, i); - }) - .filter(function(d,i) { - return !data[i].disabled; - }) - ); - - g.select('.nv-context') - .attr('transform', 'translate(0,' + ( availableHeight1 + margin.bottom + margin2.top) + ')') - - var contextLinesWrap = g.select('.nv-context .nv-linesWrap') - .datum(data.filter(function(d) { return !d.disabled })) - - d3.transition(contextLinesWrap).call(lines2); - - //------------------------------------------------------------ - - - /* - var focusLinesWrap = g.select('.nv-focus .nv-linesWrap') - .datum(data.filter(function(d) { return !d.disabled })) - - d3.transition(focusLinesWrap).call(lines); - */ - - - //------------------------------------------------------------ - // Setup Main (Focus) Axes - - xAxis - .scale(x) - .ticks( availableWidth / 100 ) - .tickSize(-availableHeight1, 0); - - yAxis - .scale(y) - .ticks( availableHeight1 / 36 ) - .tickSize( -availableWidth, 0); - - g.select('.nv-focus .nv-x.nv-axis') - .attr('transform', 'translate(0,' + availableHeight1 + ')'); - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Setup Brush - - brush - .x(x2) - .on('brush', onBrush); - - if (brushExtent) brush.extent(brushExtent); - - var brushBG = g.select('.nv-brushBackground').selectAll('g') - .data([brushExtent || brush.extent()]) - - var brushBGenter = brushBG.enter() - .append('g'); - - brushBGenter.append('rect') - .attr('class', 'left') - .attr('x', 0) - .attr('y', 0) - .attr('height', availableHeight2); - - brushBGenter.append('rect') - .attr('class', 'right') - .attr('x', 0) - .attr('y', 0) - .attr('height', availableHeight2); - - gBrush = g.select('.nv-x.nv-brush') - .call(brush); - gBrush.selectAll('rect') - //.attr('y', -5) - .attr('height', availableHeight2); - gBrush.selectAll('.resize').append('path').attr('d', resizePath); - - onBrush(); - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Setup Secondary (Context) Axes - - x2Axis - .scale(x2) - .ticks( availableWidth / 100 ) - .tickSize(-availableHeight2, 0); - - g.select('.nv-context .nv-x.nv-axis') - .attr('transform', 'translate(0,' + y2.range()[0] + ')'); - d3.transition(g.select('.nv-context .nv-x.nv-axis')) - .call(x2Axis); - - - y2Axis - .scale(y2) - .ticks( availableHeight2 / 36 ) - .tickSize( -availableWidth, 0); - - d3.transition(g.select('.nv-context .nv-y.nv-axis')) - .call(y2Axis); - - g.select('.nv-context .nv-x.nv-axis') - .attr('transform', 'translate(0,' + y2.range()[0] + ')'); - - //------------------------------------------------------------ - - - //============================================================ - // Event Handling/Dispatching (in chart's scope) - //------------------------------------------------------------ - - legend.dispatch.on('legendClick', function(d,i) { - d.disabled = !d.disabled; - - if (!data.filter(function(d) { return !d.disabled }).length) { - data.map(function(d) { - d.disabled = false; - wrap.selectAll('.nv-series').classed('disabled', false); - return d; - }); - } - - container.transition().call(chart); - }); - - dispatch.on('tooltipShow', function(e) { - if (tooltips) showTooltip(e, that.parentNode); - }); - - //============================================================ - - - //============================================================ - // Functions - //------------------------------------------------------------ - - // Taken from crossfilter (http://square.github.com/crossfilter/) - function resizePath(d) { - var e = +(d == 'e'), - x = e ? 1 : -1, - y = availableHeight2 / 3; - return 'M' + (.5 * x) + ',' + y - + 'A6,6 0 0 ' + e + ' ' + (6.5 * x) + ',' + (y + 6) - + 'V' + (2 * y - 6) - + 'A6,6 0 0 ' + e + ' ' + (.5 * x) + ',' + (2 * y) - + 'Z' - + 'M' + (2.5 * x) + ',' + (y + 8) - + 'V' + (2 * y - 8) - + 'M' + (4.5 * x) + ',' + (y + 8) - + 'V' + (2 * y - 8); - } - - - function updateBrushBG() { - if (!brush.empty()) brush.extent(brushExtent); - brushBG - .data([brush.empty() ? x2.domain() : brushExtent]) - .each(function(d,i) { - var leftWidth = x2(d[0]) - x.range()[0], - rightWidth = x.range()[1] - x2(d[1]); - d3.select(this).select('.left') - .attr('width', leftWidth < 0 ? 0 : leftWidth); - - d3.select(this).select('.right') - .attr('x', x2(d[1])) - .attr('width', rightWidth < 0 ? 0 : rightWidth); - }); - } - - - function onBrush() { - brushExtent = brush.empty() ? null : brush.extent(); - extent = brush.empty() ? x2.domain() : brush.extent(); - - - dispatch.brush({extent: extent, brush: brush}); - - - updateBrushBG(); - - // Update Main (Focus) - var focusLinesWrap = g.select('.nv-focus .nv-linesWrap') - .datum( - data - .filter(function(d) { return !d.disabled }) - .map(function(d,i) { - return { - key: d.key, - values: d.values.filter(function(d,i) { - return lines.x()(d,i) >= extent[0] && lines.x()(d,i) <= extent[1]; - }) - } - }) - ); - d3.transition(focusLinesWrap).call(lines); - - - // Update Main (Focus) Axes - d3.transition(g.select('.nv-focus .nv-x.nv-axis')) - .call(xAxis); - d3.transition(g.select('.nv-focus .nv-y.nv-axis')) - .call(yAxis); - } - - //============================================================ - - - }); - - return chart; - } - - - //============================================================ - // Event Handling/Dispatching (out of chart's scope) - //------------------------------------------------------------ - - lines.dispatch.on('elementMouseover.tooltip', function(e) { - e.pos = [e.pos[0] + margin.left, e.pos[1] + margin.top]; - dispatch.tooltipShow(e); - }); - - lines.dispatch.on('elementMouseout.tooltip', function(e) { - dispatch.tooltipHide(e); - }); - - dispatch.on('tooltipHide', function() { - if (tooltips) nv.tooltip.cleanup(); - }); - - //============================================================ - - - //============================================================ - // Expose Public Variables - //------------------------------------------------------------ - - // expose chart's sub-components - chart.dispatch = dispatch; - chart.legend = legend; - chart.lines = lines; - chart.lines2 = lines2; - chart.xAxis = xAxis; - chart.yAxis = yAxis; - chart.x2Axis = x2Axis; - chart.y2Axis = y2Axis; - - d3.rebind(chart, lines, 'defined', 'isArea', 'size', 'xDomain', 'yDomain', 'forceX', 'forceY', 'interactive', 'clipEdge', 'clipVoronoi', 'id'); - - chart.x = function(_) { - if (!arguments.length) return lines.x; - lines.x(_); - lines2.x(_); - return chart; - }; - - chart.y = function(_) { - if (!arguments.length) return lines.y; - lines.y(_); - lines2.y(_); - return chart; - }; - - chart.margin = function(_) { - if (!arguments.length) return margin; - margin.top = typeof _.top != 'undefined' ? _.top : margin.top; - margin.right = typeof _.right != 'undefined' ? _.right : margin.right; - margin.bottom = typeof _.bottom != 'undefined' ? _.bottom : margin.bottom; - margin.left = typeof _.left != 'undefined' ? _.left : margin.left; - return chart; - }; - - chart.margin2 = function(_) { - if (!arguments.length) return margin2; - margin2 = _; - return chart; - }; - - chart.width = function(_) { - if (!arguments.length) return width; - width = _; - return chart; - }; - - chart.height = function(_) { - if (!arguments.length) return height; - height = _; - return chart; - }; - - chart.height2 = function(_) { - if (!arguments.length) return height2; - height2 = _; - return chart; - }; - - chart.color = function(_) { - if (!arguments.length) return color; - color =nv.utils.getColor(_); - legend.color(color); - return chart; - }; - - chart.showLegend = function(_) { - if (!arguments.length) return showLegend; - showLegend = _; - return chart; - }; - - chart.tooltips = function(_) { - if (!arguments.length) return tooltips; - tooltips = _; - return chart; - }; - - chart.tooltipContent = function(_) { - if (!arguments.length) return tooltip; - tooltip = _; - return chart; - }; - - chart.interpolate = function(_) { - if (!arguments.length) return lines.interpolate(); - lines.interpolate(_); - lines2.interpolate(_); - return chart; - }; - - chart.noData = function(_) { - if (!arguments.length) return noData; - noData = _; - return chart; - }; - - // Chart has multiple similar Axes, to prevent code duplication, probably need to link all axis functions manually like below - chart.xTickFormat = function(_) { - if (!arguments.length) return xAxis.tickFormat(); - xAxis.tickFormat(_); - x2Axis.tickFormat(_); - return chart; - }; - - chart.yTickFormat = function(_) { - if (!arguments.length) return yAxis.tickFormat(); - yAxis.tickFormat(_); - y2Axis.tickFormat(_); - return chart; - }; - - //============================================================ - - - return chart; -} - -nv.models.linePlusBarWithFocusChart = function() { - - //============================================================ - // Public Variables with Default Settings - //------------------------------------------------------------ - - var lines = nv.models.line() - , lines2 = nv.models.line() - , bars = nv.models.historicalBar() - , bars2 = nv.models.historicalBar() - , xAxis = nv.models.axis() - , x2Axis = nv.models.axis() - , y1Axis = nv.models.axis() - , y2Axis = nv.models.axis() - , y3Axis = nv.models.axis() - , y4Axis = nv.models.axis() - , legend = nv.models.legend() - , brush = d3.svg.brush() - ; - - var margin = {top: 30, right: 30, bottom: 30, left: 60} - , margin2 = {top: 0, right: 30, bottom: 20, left: 60} - , width = null - , height = null - , height2 = 100 - , getX = function(d) { return d.x } - , getY = function(d) { return d.y } - , color = nv.utils.defaultColor() - , showLegend = true - , extent - , brushExtent = null - , tooltips = true - , tooltip = function(key, x, y, e, graph) { - return '<h3>' + key + '</h3>' + - '<p>' + y + ' at ' + x + '</p>'; - } - , x - , x2 - , y1 - , y2 - , y3 - , y4 - , noData = "No Data Available." - , dispatch = d3.dispatch('tooltipShow', 'tooltipHide', 'brush') - ; - - lines - .clipEdge(true) - ; - lines2 - .interactive(false) - ; - xAxis - .orient('bottom') - .tickPadding(5) - ; - y1Axis - .orient('left') - ; - y2Axis - .orient('right') - ; - x2Axis - .orient('bottom') - .tickPadding(5) - ; - y3Axis - .orient('left') - ; - y4Axis - .orient('right') - ; - - //============================================================ - - - //============================================================ - // Private Variables - //------------------------------------------------------------ - - var showTooltip = function(e, offsetElement) { - if (extent) { - e.pointIndex += Math.ceil(extent[0]); - } - var left = e.pos[0] + ( offsetElement.offsetLeft || 0 ), - top = e.pos[1] + ( offsetElement.offsetTop || 0), - x = xAxis.tickFormat()(lines.x()(e.point, e.pointIndex)), - y = (e.series.bar ? y1Axis : y2Axis).tickFormat()(lines.y()(e.point, e.pointIndex)), - content = tooltip(e.series.key, x, y, e, chart); - - nv.tooltip.show([left, top], content, e.value < 0 ? 'n' : 's', null, offsetElement); - }; - - //------------------------------------------------------------ - - - - function chart(selection) { - selection.each(function(data) { - var container = d3.select(this), - that = this; - - var availableWidth = (width || parseInt(container.style('width')) || 960) - - margin.left - margin.right, - availableHeight1 = (height || parseInt(container.style('height')) || 400) - - margin.top - margin.bottom - height2, - availableHeight2 = height2 - margin2.top - margin2.bottom; - - chart.update = function() { container.transition().call(chart); }; - chart.container = this; - - - //------------------------------------------------------------ - // Display No Data message if there's nothing to show. - - if (!data || !data.length || !data.filter(function(d) { return d.values.length }).length) { - var noDataText = container.selectAll('.nv-noData').data([noData]); - - noDataText.enter().append('text') - .attr('class', 'nvd3 nv-noData') - .attr('dy', '-.7em') - .style('text-anchor', 'middle'); - - noDataText - .attr('x', margin.left + availableWidth / 2) - .attr('y', margin.top + availableHeight1 / 2) - .text(function(d) { return d }); - - return chart; - } else { - container.selectAll('.nv-noData').remove(); - } - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Setup Scales - - var dataBars = data.filter(function(d) { return !d.disabled && d.bar }); - var dataLines = data.filter(function(d) { return !d.bar }); // removed the !d.disabled clause here to fix Issue #240 - - x = bars.xScale(); - x2 = x2Axis.scale(); - y1 = bars.yScale(); - y2 = lines.yScale(); - y3 = bars2.yScale(); - y4 = lines2.yScale(); - - var series1 = data - .filter(function(d) { return !d.disabled && d.bar }) - .map(function(d) { - return d.values.map(function(d,i) { - return { x: getX(d,i), y: getY(d,i) } - }) - }); - - var series2 = data - .filter(function(d) { return !d.disabled && !d.bar }) - .map(function(d) { - return d.values.map(function(d,i) { - return { x: getX(d,i), y: getY(d,i) } - }) - }); - - x .range([0, availableWidth]); - - x2 .domain(d3.extent(d3.merge(series1.concat(series2)), function(d) { return d.x } )) - .range([0, availableWidth]); - - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Setup containers and skeleton of chart - - var wrap = container.selectAll('g.nv-wrap.nv-linePlusBar').data([data]); - var gEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-wrap nv-linePlusBar').append('g'); - var g = wrap.select('g'); - - gEnter.append('g').attr('class', 'nv-legendWrap'); - - var focusEnter = gEnter.append('g').attr('class', 'nv-focus'); - focusEnter.append('g').attr('class', 'nv-x nv-axis'); - focusEnter.append('g').attr('class', 'nv-y1 nv-axis'); - focusEnter.append('g').attr('class', 'nv-y2 nv-axis'); - focusEnter.append('g').attr('class', 'nv-barsWrap'); - focusEnter.append('g').attr('class', 'nv-linesWrap'); - - var contextEnter = gEnter.append('g').attr('class', 'nv-context'); - contextEnter.append('g').attr('class', 'nv-x nv-axis'); - contextEnter.append('g').attr('class', 'nv-y1 nv-axis'); - contextEnter.append('g').attr('class', 'nv-y2 nv-axis'); - contextEnter.append('g').attr('class', 'nv-barsWrap'); - contextEnter.append('g').attr('class', 'nv-linesWrap'); - contextEnter.append('g').attr('class', 'nv-brushBackground'); - contextEnter.append('g').attr('class', 'nv-x nv-brush'); - - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Legend - - if (showLegend) { - legend.width( availableWidth / 2 ); - - g.select('.nv-legendWrap') - .datum(data.map(function(series) { - series.originalKey = series.originalKey === undefined ? series.key : series.originalKey; - series.key = series.originalKey + (series.bar ? ' (left axis)' : ' (right axis)'); - return series; - })) - .call(legend); - - if ( margin.top != legend.height()) { - margin.top = legend.height(); - availableHeight1 = (height || parseInt(container.style('height')) || 400) - - margin.top - margin.bottom - height2; - } - - g.select('.nv-legendWrap') - .attr('transform', 'translate(' + ( availableWidth / 2 ) + ',' + (-margin.top) +')'); - } - - //------------------------------------------------------------ - - - wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')'); - - - //------------------------------------------------------------ - // Context Components - - bars2 - .width(availableWidth) - .height(availableHeight2) - .color(data.map(function(d,i) { - return d.color || color(d, i); - }).filter(function(d,i) { return !data[i].disabled && data[i].bar })); - - lines2 - .width(availableWidth) - .height(availableHeight2) - .color(data.map(function(d,i) { - return d.color || color(d, i); - }).filter(function(d,i) { return !data[i].disabled && !data[i].bar })); - - var bars2Wrap = g.select('.nv-context .nv-barsWrap') - .datum(dataBars.length ? dataBars : [{values:[]}]); - - var lines2Wrap = g.select('.nv-context .nv-linesWrap') - .datum(!dataLines[0].disabled ? dataLines : [{values:[]}]); - - g.select('.nv-context') - .attr('transform', 'translate(0,' + ( availableHeight1 + margin.bottom + margin2.top) + ')') - - d3.transition(bars2Wrap).call(bars2); - d3.transition(lines2Wrap).call(lines2); - - //------------------------------------------------------------ - - - - //------------------------------------------------------------ - // Setup Brush - - brush - .x(x2) - .on('brush', onBrush); - - if (brushExtent) brush.extent(brushExtent); - - var brushBG = g.select('.nv-brushBackground').selectAll('g') - .data([brushExtent || brush.extent()]) - - var brushBGenter = brushBG.enter() - .append('g'); - - brushBGenter.append('rect') - .attr('class', 'left') - .attr('x', 0) - .attr('y', 0) - .attr('height', availableHeight2); - - brushBGenter.append('rect') - .attr('class', 'right') - .attr('x', 0) - .attr('y', 0) - .attr('height', availableHeight2); - - var gBrush = g.select('.nv-x.nv-brush') - .call(brush); - gBrush.selectAll('rect') - //.attr('y', -5) - .attr('height', availableHeight2); - gBrush.selectAll('.resize').append('path').attr('d', resizePath); - - //------------------------------------------------------------ - - //------------------------------------------------------------ - // Setup Secondary (Context) Axes - - x2Axis - .ticks( availableWidth / 100 ) - .tickSize(-availableHeight2, 0); - - g.select('.nv-context .nv-x.nv-axis') - .attr('transform', 'translate(0,' + y3.range()[0] + ')'); - d3.transition(g.select('.nv-context .nv-x.nv-axis')) - .call(x2Axis); - - - y3Axis - .scale(y3) - .ticks( availableHeight2 / 36 ) - .tickSize( -availableWidth, 0); - - g.select('.nv-context .nv-y1.nv-axis') - .style('opacity', dataBars.length ? 1 : 0) - .attr('transform', 'translate(0,' + x2.range()[0] + ')'); - - d3.transition(g.select('.nv-context .nv-y1.nv-axis')) - .call(y3Axis); - - - y4Axis - .scale(y4) - .ticks( availableHeight2 / 36 ) - .tickSize(dataBars.length ? 0 : -availableWidth, 0); // Show the y2 rules only if y1 has none - - g.select('.nv-context .nv-y2.nv-axis') - .style('opacity', dataLines.length ? 1 : 0) - .attr('transform', 'translate(' + x2.range()[1] + ',0)'); - - d3.transition(g.select('.nv-context .nv-y2.nv-axis')) - .call(y4Axis); - - //------------------------------------------------------------ - - //============================================================ - // Event Handling/Dispatching (in chart's scope) - //------------------------------------------------------------ - - legend.dispatch.on('legendClick', function(d,i) { - d.disabled = !d.disabled; - - if (!data.filter(function(d) { return !d.disabled }).length) { - data.map(function(d) { - d.disabled = false; - wrap.selectAll('.nv-series').classed('disabled', false); - return d; - }); - } - - chart.update(); - }); - - dispatch.on('tooltipShow', function(e) { - if (tooltips) showTooltip(e, that.parentNode); - }); - - //============================================================ - - - //============================================================ - // Functions - //------------------------------------------------------------ - - // Taken from crossfilter (http://square.github.com/crossfilter/) - function resizePath(d) { - var e = +(d == 'e'), - x = e ? 1 : -1, - y = availableHeight2 / 3; - return 'M' + (.5 * x) + ',' + y - + 'A6,6 0 0 ' + e + ' ' + (6.5 * x) + ',' + (y + 6) - + 'V' + (2 * y - 6) - + 'A6,6 0 0 ' + e + ' ' + (.5 * x) + ',' + (2 * y) - + 'Z' - + 'M' + (2.5 * x) + ',' + (y + 8) - + 'V' + (2 * y - 8) - + 'M' + (4.5 * x) + ',' + (y + 8) - + 'V' + (2 * y - 8); - } - - - function updateBrushBG() { - if (!brush.empty()) brush.extent(brushExtent); - brushBG - .data([brush.empty() ? x2.domain() : brushExtent]) - .each(function(d,i) { - var leftWidth = x2(d[0]) - x2.range()[0], - rightWidth = x2.range()[1] - x2(d[1]); - d3.select(this).select('.left') - .attr('width', leftWidth < 0 ? 0 : leftWidth); - - d3.select(this).select('.right') - .attr('x', x2(d[1])) - .attr('width', rightWidth < 0 ? 0 : rightWidth); - }); - } - - - function onBrush() { - brushExtent = brush.empty() ? null : brush.extent(); - extent = brush.empty() ? x2.domain() : brush.extent(); - - - dispatch.brush({extent: extent, brush: brush}); - - updateBrushBG(); - - - //------------------------------------------------------------ - // Prepare Main (Focus) Bars and Lines - - bars - .width(availableWidth) - .height(availableHeight1) - .color(data.map(function(d,i) { - return d.color || color(d, i); - }).filter(function(d,i) { return !data[i].disabled && data[i].bar })); - - - lines - .width(availableWidth) - .height(availableHeight1) - .color(data.map(function(d,i) { - return d.color || color(d, i); - }).filter(function(d,i) { return !data[i].disabled && !data[i].bar })); - - var focusBarsWrap = g.select('.nv-focus .nv-barsWrap') - .datum(!dataBars.length ? [{values:[]}] : - dataBars - .map(function(d,i) { - return { - key: d.key, - values: d.values.filter(function(d,i) { - return bars.x()(d,i) >= extent[0] && bars.x()(d,i) <= extent[1]; - }) - } - }) - ); - - var focusLinesWrap = g.select('.nv-focus .nv-linesWrap') - .datum(dataLines[0].disabled ? [{values:[]}] : - dataLines - .map(function(d,i) { - return { - key: d.key, - values: d.values.filter(function(d,i) { - return lines.x()(d,i) >= extent[0] && lines.x()(d,i) <= extent[1]; - }) - } - }) - ); - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Update Main (Focus) X Axis - - if (dataBars.length) { - x = bars.xScale(); - } else { - x = lines.xScale(); - } - - xAxis - .scale(x) - .ticks( availableWidth / 100 ) - .tickSize(-availableHeight1, 0); - - xAxis.domain([Math.ceil(extent[0]), Math.floor(extent[1])]); - - d3.transition(g.select('.nv-x.nv-axis')) - .call(xAxis); - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Update Main (Focus) Bars and Lines - - d3.transition(focusBarsWrap).call(bars); - d3.transition(focusLinesWrap).call(lines); - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Setup and Update Main (Focus) Y Axes - - g.select('.nv-focus .nv-x.nv-axis') - .attr('transform', 'translate(0,' + y1.range()[0] + ')'); - - - y1Axis - .scale(y1) - .ticks( availableHeight1 / 36 ) - .tickSize(-availableWidth, 0); - - g.select('.nv-focus .nv-y1.nv-axis') - .style('opacity', dataBars.length ? 1 : 0); - - - y2Axis - .scale(y2) - .ticks( availableHeight1 / 36 ) - .tickSize(dataBars.length ? 0 : -availableWidth, 0); // Show the y2 rules only if y1 has none - - g.select('.nv-focus .nv-y2.nv-axis') - .style('opacity', dataLines.length ? 1 : 0) - .attr('transform', 'translate(' + x.range()[1] + ',0)'); - - d3.transition(g.select('.nv-focus .nv-y1.nv-axis')) - .call(y1Axis); - d3.transition(g.select('.nv-focus .nv-y2.nv-axis')) - .call(y2Axis); - } - - //============================================================ - - onBrush(); - - }); - - return chart; - } - - - //============================================================ - // Event Handling/Dispatching (out of chart's scope) - //------------------------------------------------------------ - - lines.dispatch.on('elementMouseover.tooltip', function(e) { - e.pos = [e.pos[0] + margin.left, e.pos[1] + margin.top]; - dispatch.tooltipShow(e); - }); - - lines.dispatch.on('elementMouseout.tooltip', function(e) { - dispatch.tooltipHide(e); - }); - - bars.dispatch.on('elementMouseover.tooltip', function(e) { - e.pos = [e.pos[0] + margin.left, e.pos[1] + margin.top]; - dispatch.tooltipShow(e); - }); - - bars.dispatch.on('elementMouseout.tooltip', function(e) { - dispatch.tooltipHide(e); - }); - - dispatch.on('tooltipHide', function() { - if (tooltips) nv.tooltip.cleanup(); - }); - - //============================================================ - - - //============================================================ - // Expose Public Variables - //------------------------------------------------------------ - - // expose chart's sub-components - chart.dispatch = dispatch; - chart.legend = legend; - chart.lines = lines; - chart.lines2 = lines2; - chart.bars = bars; - chart.bars2 = bars2; - chart.xAxis = xAxis; - chart.x2Axis = x2Axis; - chart.y1Axis = y1Axis; - chart.y2Axis = y2Axis; - chart.y3Axis = y3Axis; - chart.y4Axis = y4Axis; - - d3.rebind(chart, lines, 'defined', 'size', 'clipVoronoi', 'interpolate'); - //TODO: consider rebinding x, y and some other stuff, and simply do soemthign lile bars.x(lines.x()), etc. - //d3.rebind(chart, lines, 'x', 'y', 'size', 'xDomain', 'yDomain', 'forceX', 'forceY', 'interactive', 'clipEdge', 'clipVoronoi', 'id'); - - chart.x = function(_) { - if (!arguments.length) return getX; - getX = _; - lines.x(_); - bars.x(_); - return chart; - }; - - chart.y = function(_) { - if (!arguments.length) return getY; - getY = _; - lines.y(_); - bars.y(_); - return chart; - }; - - chart.margin = function(_) { - if (!arguments.length) return margin; - margin.top = typeof _.top != 'undefined' ? _.top : margin.top; - margin.right = typeof _.right != 'undefined' ? _.right : margin.right; - margin.bottom = typeof _.bottom != 'undefined' ? _.bottom : margin.bottom; - margin.left = typeof _.left != 'undefined' ? _.left : margin.left; - return chart; - }; - - chart.width = function(_) { - if (!arguments.length) return width; - width = _; - return chart; - }; - - chart.height = function(_) { - if (!arguments.length) return height; - height = _; - return chart; - }; - - chart.color = function(_) { - if (!arguments.length) return color; - color = nv.utils.getColor(_); - legend.color(color); - return chart; - }; - - chart.showLegend = function(_) { - if (!arguments.length) return showLegend; - showLegend = _; - return chart; - }; - - chart.tooltips = function(_) { - if (!arguments.length) return tooltips; - tooltips = _; - return chart; - }; - - chart.tooltipContent = function(_) { - if (!arguments.length) return tooltip; - tooltip = _; - return chart; - }; - - chart.noData = function(_) { - if (!arguments.length) return noData; - noData = _; - return chart; - }; - - chart.brushExtent = function(_) { - if (!arguments.length) return brushExtent; - brushExtent = _; - return chart; - }; - - - //============================================================ - - - return chart; -} - -nv.models.multiBar = function() { - - //============================================================ - // Public Variables with Default Settings - //------------------------------------------------------------ - - var margin = {top: 0, right: 0, bottom: 0, left: 0} - , width = 960 - , height = 500 - , x = d3.scale.ordinal() - , y = d3.scale.linear() - , id = Math.floor(Math.random() * 10000) //Create semi-unique ID in case user doesn't select one - , getX = function(d) { return d.x } - , getY = function(d) { return d.y } - , forceY = [0] // 0 is forced by default.. this makes sense for the majority of bar graphs... user can always do chart.forceY([]) to remove - , clipEdge = true - , stacked = false - , color = nv.utils.defaultColor() - , hideable = false - , barColor = null // adding the ability to set the color for each rather than the whole group - , disabled // used in conjunction with barColor to communicate from multiBarHorizontalChart what series are disabled - , delay = 1200 - , drawTime = 500 - , xDomain - , yDomain - , dispatch = d3.dispatch('chartClick', 'elementClick', 'elementDblClick', 'elementMouseover', 'elementMouseout') - ; - - //============================================================ - - - //============================================================ - // Private Variables - //------------------------------------------------------------ - - var x0, y0 //used to store previous scales - ; - - //============================================================ - - - function chart(selection) { - selection.each(function(data) { - var availableWidth = width - margin.left - margin.right, - availableHeight = height - margin.top - margin.bottom, - container = d3.select(this); - - if(hideable && data.length) hideable = [{ - values: data[0].values.map(function(d) { - return { - x: d.x, - y: 0, - series: d.series, - size: 0.01 - };} - )}]; - - if (stacked) - data = d3.layout.stack() - .offset('zero') - .values(function(d){ return d.values }) - .y(getY) - (!data.length && hideable ? hideable : data); - - - //add series index to each data point for reference - data = data.map(function(series, i) { - series.values = series.values.map(function(point) { - point.series = i; - return point; - }); - return series; - }); - - - //------------------------------------------------------------ - // HACK for negative value stacking - if (stacked) - data[0].values.map(function(d,i) { - var posBase = 0, negBase = 0; - data.map(function(d) { - var f = d.values[i] - f.size = Math.abs(f.y); - if (f.y<0) { - f.y1 = negBase; - negBase = negBase - f.size; - } else - { - f.y1 = f.size + posBase; - posBase = posBase + f.size; - } - }); - }); - - //------------------------------------------------------------ - // Setup Scales - - // remap and flatten the data for use in calculating the scales' domains - var seriesData = (xDomain && yDomain) ? [] : // if we know xDomain and yDomain, no need to calculate - data.map(function(d) { - return d.values.map(function(d,i) { - return { x: getX(d,i), y: getY(d,i), y0: d.y0, y1: d.y1 } - }) - }); - - x .domain(d3.merge(seriesData).map(function(d) { return d.x })) - .rangeBands([0, availableWidth], .1); - - //y .domain(yDomain || d3.extent(d3.merge(seriesData).map(function(d) { return d.y + (stacked ? d.y1 : 0) }).concat(forceY))) - y .domain(yDomain || d3.extent(d3.merge(seriesData).map(function(d) { return stacked ? (d.y > 0 ? d.y1 : d.y1 + d.y ) : d.y }).concat(forceY))) - .range([availableHeight, 0]); - - // If scale's domain don't have a range, slightly adjust to make one... so a chart can show a single data point - if (x.domain()[0] === x.domain()[1] || y.domain()[0] === y.domain()[1]) singlePoint = true; - if (x.domain()[0] === x.domain()[1]) - x.domain()[0] ? - x.domain([x.domain()[0] - x.domain()[0] * 0.01, x.domain()[1] + x.domain()[1] * 0.01]) - : x.domain([-1,1]); - - if (y.domain()[0] === y.domain()[1]) - y.domain()[0] ? - y.domain([y.domain()[0] + y.domain()[0] * 0.01, y.domain()[1] - y.domain()[1] * 0.01]) - : y.domain([-1,1]); - - - x0 = x0 || x; - y0 = y0 || y; - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Setup containers and skeleton of chart - - var wrap = container.selectAll('g.nv-wrap.nv-multibar').data([data]); - var wrapEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-wrap nv-multibar'); - var defsEnter = wrapEnter.append('defs'); - var gEnter = wrapEnter.append('g'); - var g = wrap.select('g') - - gEnter.append('g').attr('class', 'nv-groups'); - - wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')'); - - //------------------------------------------------------------ - - - - defsEnter.append('clipPath') - .attr('id', 'nv-edge-clip-' + id) - .append('rect'); - wrap.select('#nv-edge-clip-' + id + ' rect') - .attr('width', availableWidth) - .attr('height', availableHeight); - - g .attr('clip-path', clipEdge ? 'url(#nv-edge-clip-' + id + ')' : ''); - - - - var groups = wrap.select('.nv-groups').selectAll('.nv-group') - .data(function(d) { return d }, function(d) { return d.key }); - groups.enter().append('g') - .style('stroke-opacity', 1e-6) - .style('fill-opacity', 1e-6); - - - groups.exit() - .selectAll('rect.nv-bar') - .transition() - .delay(function(d,i) { return i * delay/ data[0].values.length }) - .attr('y', function(d) { return stacked ? y0(d.y0) : y0(0) }) - .attr('height', 0) - .remove(); - groups - .attr('class', function(d,i) { return 'nv-group nv-series-' + i }) - .classed('hover', function(d) { return d.hover }) - .style('fill', function(d,i){ return color(d, i) }) - .style('stroke', function(d,i){ return color(d, i) }); - d3.transition(groups) - .style('stroke-opacity', 1) - .style('fill-opacity', .75); - - - var bars = groups.selectAll('rect.nv-bar') - .data(function(d) { return (hideable && !data.length) ? hideable.values : d.values }); - - bars.exit().remove(); - - - var barsEnter = bars.enter().append('rect') - .attr('class', function(d,i) { return getY(d,i) < 0 ? 'nv-bar negative' : 'nv-bar positive'}) - .attr('x', function(d,i,j) { - return stacked ? 0 : (j * x.rangeBand() / data.length ) - }) - .attr('y', function(d) { return y0(stacked ? d.y0 : 0) }) - .attr('height', 0) - .attr('width', x.rangeBand() / (stacked ? 1 : data.length) ); - bars - .style('fill', function(d,i,j){ return color(d, j, i); }) - .style('stroke', function(d,i,j){ return color(d, j, i); }) - .on('mouseover', function(d,i) { //TODO: figure out why j works above, but not here - d3.select(this).classed('hover', true); - dispatch.elementMouseover({ - value: getY(d,i), - point: d, - series: data[d.series], - pos: [x(getX(d,i)) + (x.rangeBand() * (stacked ? data.length / 2 : d.series + .5) / data.length), y(getY(d,i) + (stacked ? d.y0 : 0))], // TODO: Figure out why the value appears to be shifted - pointIndex: i, - seriesIndex: d.series, - e: d3.event - }); - }) - .on('mouseout', function(d,i) { - d3.select(this).classed('hover', false); - dispatch.elementMouseout({ - value: getY(d,i), - point: d, - series: data[d.series], - pointIndex: i, - seriesIndex: d.series, - e: d3.event - }); - }) - .on('click', function(d,i) { - dispatch.elementClick({ - value: getY(d,i), - point: d, - series: data[d.series], - pos: [x(getX(d,i)) + (x.rangeBand() * (stacked ? data.length / 2 : d.series + .5) / data.length), y(getY(d,i) + (stacked ? d.y0 : 0))], // TODO: Figure out why the value appears to be shifted - pointIndex: i, - seriesIndex: d.series, - e: d3.event - }); - d3.event.stopPropagation(); - }) - .on('dblclick', function(d,i) { - dispatch.elementDblClick({ - value: getY(d,i), - point: d, - series: data[d.series], - pos: [x(getX(d,i)) + (x.rangeBand() * (stacked ? data.length / 2 : d.series + .5) / data.length), y(getY(d,i) + (stacked ? d.y0 : 0))], // TODO: Figure out why the value appears to be shifted - pointIndex: i, - seriesIndex: d.series, - e: d3.event - }); - d3.event.stopPropagation(); - }); - bars - .attr('class', function(d,i) { return getY(d,i) < 0 ? 'nv-bar negative' : 'nv-bar positive'}) - .attr('transform', function(d,i) { return 'translate(' + x(getX(d,i)) + ',0)'; }) - - if (barColor) { - if (!disabled) disabled = data.map(function() { return true }); - bars - //.style('fill', barColor) - //.style('stroke', barColor) - //.style('fill', function(d,i,j) { return d3.rgb(barColor(d,i)).darker(j).toString(); }) - //.style('stroke', function(d,i,j) { return d3.rgb(barColor(d,i)).darker(j).toString(); }) - .style('fill', function(d,i,j) { return d3.rgb(barColor(d,i)).darker( disabled.map(function(d,i) { return i }).filter(function(d,i){ return !disabled[i] })[j] ).toString(); }) - .style('stroke', function(d,i,j) { return d3.rgb(barColor(d,i)).darker( disabled.map(function(d,i) { return i }).filter(function(d,i){ return !disabled[i] })[j] ).toString(); }); - } - - - if (stacked) - bars.transition() - - .delay(function(d,i) { return i * delay / data[0].values.length }) - .attr('y', function(d,i) { - - return y((stacked ? d.y1 : 0)); - }) - .attr('height', function(d,i) { - return Math.max(Math.abs(y(d.y + (stacked ? d.y0 : 0)) - y((stacked ? d.y0 : 0))),1); - }) - .each('end', function() { - d3.select(this).transition().duration(drawTime) - .attr('x', function(d,i) { - return stacked ? 0 : (d.series * x.rangeBand() / data.length ) - }) - .attr('width', x.rangeBand() / (stacked ? 1 : data.length) ); - }) - else - d3.transition(bars).duration(drawTime) - .delay(function(d,i) { return i * delay/ data[0].values.length }) - .attr('x', function(d,i) { - return d.series * x.rangeBand() / data.length - }) - .attr('width', x.rangeBand() / data.length) - .each('end', function() { - d3.select(this).transition().duration(drawTime) - .attr('y', function(d,i) { - return getY(d,i) < 0 ? - y(0) : - y(0) - y(getY(d,i)) < 1 ? - y(0) - 1 : - y(getY(d,i)) || 0; - }) - .attr('height', function(d,i) { - return Math.max(Math.abs(y(getY(d,i)) - y(0)),1) || 0; - }); - }) - - - //store old scales for use in transitions on update - x0 = x.copy(); - y0 = y.copy(); - - }); - - return chart; - } - - - //============================================================ - // Expose Public Variables - //------------------------------------------------------------ - - chart.dispatch = dispatch; - - chart.x = function(_) { - if (!arguments.length) return getX; - getX = _; - return chart; - }; - - chart.y = function(_) { - if (!arguments.length) return getY; - getY = _; - return chart; - }; - - chart.margin = function(_) { - if (!arguments.length) return margin; - margin.top = typeof _.top != 'undefined' ? _.top : margin.top; - margin.right = typeof _.right != 'undefined' ? _.right : margin.right; - margin.bottom = typeof _.bottom != 'undefined' ? _.bottom : margin.bottom; - margin.left = typeof _.left != 'undefined' ? _.left : margin.left; - return chart; - }; - - chart.width = function(_) { - if (!arguments.length) return width; - width = _; - return chart; - }; - - chart.height = function(_) { - if (!arguments.length) return height; - height = _; - return chart; - }; - - chart.xScale = function(_) { - if (!arguments.length) return x; - x = _; - return chart; - }; - - chart.yScale = function(_) { - if (!arguments.length) return y; - y = _; - return chart; - }; - - chart.xDomain = function(_) { - if (!arguments.length) return xDomain; - xDomain = _; - return chart; - }; - - chart.yDomain = function(_) { - if (!arguments.length) return yDomain; - yDomain = _; - return chart; - }; - - chart.forceY = function(_) { - if (!arguments.length) return forceY; - forceY = _; - return chart; - }; - - chart.stacked = function(_) { - if (!arguments.length) return stacked; - stacked = _; - return chart; - }; - - chart.clipEdge = function(_) { - if (!arguments.length) return clipEdge; - clipEdge = _; - return chart; - }; - - chart.color = function(_) { - if (!arguments.length) return color; - color = nv.utils.getColor(_); - return chart; - }; - - chart.barColor = function(_) { - if (!arguments.length) return barColor; - barColor = nv.utils.getColor(_); - return chart; - }; - - chart.disabled = function(_) { - if (!arguments.length) return disabled; - disabled = _; - return chart; - }; - - chart.id = function(_) { - if (!arguments.length) return id; - id = _; - return chart; - }; - - chart.hideable = function(_) { - if (!arguments.length) return hideable; - hideable = _; - return chart; - }; - - chart.delay = function(_) { - if (!arguments.length) return delay; - delay = _; - return chart; - }; - - chart.drawTime = function(_) { - if (!arguments.length) return drawTime; - drawTime = _; - return chart; - }; - - //============================================================ - - - return chart; -} - -nv.models.multiBarChart = function() { - - //============================================================ - // Public Variables with Default Settings - //------------------------------------------------------------ - - var multibar = nv.models.multiBar() - , xAxis = nv.models.axis() - , yAxis = nv.models.axis() - , legend = nv.models.legend() - , controls = nv.models.legend() - ; - - var margin = {top: 30, right: 20, bottom: 50, left: 60} - , width = null - , height = null - , color = nv.utils.defaultColor() - , showControls = true - , showLegend = true - , reduceXTicks = true // if false a tick will show for every data point - , staggerLabels = false - , rotateLabels = 0 - , tooltips = true - , tooltip = function(key, x, y, e, graph) { - return '<h3>' + key + '</h3>' + - '<p>' + y + ' on ' + x + '</p>' - } - , x //can be accessed via chart.xScale() - , y //can be accessed via chart.yScale() - , state = { stacked: false } - , defaultState = null - , noData = "No Data Available." - , dispatch = d3.dispatch('tooltipShow', 'tooltipHide', 'stateChange', 'changeState') - , controlWidth = function() { return showControls ? 180 : 0 } - ; - - multibar - .stacked(false) - ; - xAxis - .orient('bottom') - .tickPadding(7) - .highlightZero(true) - .showMaxMin(false) - .tickFormat(function(d) { return d }) - ; - yAxis - .orient('left') - .tickFormat(d3.format(',.1f')) - ; - - //============================================================ - - - //============================================================ - // Private Variables - //------------------------------------------------------------ - - var showTooltip = function(e, offsetElement) { - var left = e.pos[0] + ( offsetElement.offsetLeft || 0 ), - top = e.pos[1] + ( offsetElement.offsetTop || 0), - x = xAxis.tickFormat()(multibar.x()(e.point, e.pointIndex)), - y = yAxis.tickFormat()(multibar.y()(e.point, e.pointIndex)), - content = tooltip(e.series.key, x, y, e, chart); - - nv.tooltip.show([left, top], content, e.value < 0 ? 'n' : 's', null, offsetElement); - }; - - //============================================================ - - - function chart(selection) { - selection.each(function(data) { - var container = d3.select(this), - that = this; - - var availableWidth = (width || parseInt(container.style('width')) || 960) - - margin.left - margin.right, - availableHeight = (height || parseInt(container.style('height')) || 400) - - margin.top - margin.bottom; - - chart.update = function() { container.transition().call(chart) }; - chart.container = this; - - //set state.disabled - state.disabled = data.map(function(d) { return !!d.disabled }); - - if (!defaultState) { - var key; - defaultState = {}; - for (key in state) { - if (state[key] instanceof Array) - defaultState[key] = state[key].slice(0); - else - defaultState[key] = state[key]; - } - } - //------------------------------------------------------------ - // Display noData message if there's nothing to show. - - if (!data || !data.length || !data.filter(function(d) { return d.values.length }).length) { - var noDataText = container.selectAll('.nv-noData').data([noData]); - - noDataText.enter().append('text') - .attr('class', 'nvd3 nv-noData') - .attr('dy', '-.7em') - .style('text-anchor', 'middle'); - - noDataText - .attr('x', margin.left + availableWidth / 2) - .attr('y', margin.top + availableHeight / 2) - .text(function(d) { return d }); - - return chart; - } else { - container.selectAll('.nv-noData').remove(); - } - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Setup Scales - - x = multibar.xScale(); - y = multibar.yScale(); - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Setup containers and skeleton of chart - - var wrap = container.selectAll('g.nv-wrap.nv-multiBarWithLegend').data([data]); - var gEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-wrap nv-multiBarWithLegend').append('g'); - var g = wrap.select('g'); - - gEnter.append('g').attr('class', 'nv-x nv-axis'); - gEnter.append('g').attr('class', 'nv-y nv-axis'); - gEnter.append('g').attr('class', 'nv-barsWrap'); - gEnter.append('g').attr('class', 'nv-legendWrap'); - gEnter.append('g').attr('class', 'nv-controlsWrap'); - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Legend - - if (showLegend) { - legend.width(availableWidth - controlWidth()); - - if (multibar.barColor()) - data.forEach(function(series,i) { - series.color = d3.rgb('#ccc').darker(i * 1.5).toString(); - }) - - g.select('.nv-legendWrap') - .datum(data) - .call(legend); - - if ( margin.top != legend.height()) { - margin.top = legend.height(); - availableHeight = (height || parseInt(container.style('height')) || 400) - - margin.top - margin.bottom; - } - - g.select('.nv-legendWrap') - .attr('transform', 'translate(' + controlWidth() + ',' + (-margin.top) +')'); - } - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Controls - - if (showControls) { - var controlsData = [ - { key: 'Grouped', disabled: multibar.stacked() }, - { key: 'Stacked', disabled: !multibar.stacked() } - ]; - - controls.width(controlWidth()).color(['#444', '#444', '#444']); - g.select('.nv-controlsWrap') - .datum(controlsData) - .attr('transform', 'translate(0,' + (-margin.top) +')') - .call(controls); - } - - //------------------------------------------------------------ - - - wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')'); - - - //------------------------------------------------------------ - // Main Chart Component(s) - - multibar - .disabled(data.map(function(series) { return series.disabled })) - .width(availableWidth) - .height(availableHeight) - .color(data.map(function(d,i) { - return d.color || color(d, i); - }).filter(function(d,i) { return !data[i].disabled })) - - - var barsWrap = g.select('.nv-barsWrap') - .datum(data.filter(function(d) { return !d.disabled })) - - d3.transition(barsWrap).call(multibar); - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Setup Axes - - xAxis - .scale(x) - .ticks( availableWidth / 100 ) - .tickSize(-availableHeight, 0); - - g.select('.nv-x.nv-axis') - .attr('transform', 'translate(0,' + y.range()[0] + ')'); - d3.transition(g.select('.nv-x.nv-axis')) - .call(xAxis); - - var xTicks = g.select('.nv-x.nv-axis > g').selectAll('g'); - - xTicks - .selectAll('line, text') - .style('opacity', 1) - - if (staggerLabels) { - var getTranslate = function(x,y) { - return "translate(" + x + "," + y + ")"; - }; - - var staggerUp = 5, staggerDown = 17; //pixels to stagger by - // Issue #140 - xTicks - .selectAll("text") - .attr('transform', function(d,i,j) { - return getTranslate(0, (j % 2 == 0 ? staggerUp : staggerDown)); - }); - - var totalInBetweenTicks = d3.selectAll(".nv-x.nv-axis .nv-wrap g g text")[0].length; - g.selectAll(".nv-x.nv-axis .nv-axisMaxMin text") - .attr("transform", function(d,i) { - return getTranslate(0, (i === 0 || totalInBetweenTicks % 2 !== 0) ? staggerDown : staggerUp); - }); - } - - - if (reduceXTicks) - xTicks - .filter(function(d,i) { - return i % Math.ceil(data[0].values.length / (availableWidth / 100)) !== 0; - }) - .selectAll('text, line') - .style('opacity', 0); - - if(rotateLabels) - xTicks - .selectAll('text') - .attr('transform', 'rotate(' + rotateLabels + ' 0,0)') - .attr('text-anchor', rotateLabels > 0 ? 'start' : 'end'); - - g.select('.nv-x.nv-axis').selectAll('g.nv-axisMaxMin text') - .style('opacity', 1); - - yAxis - .scale(y) - .ticks( availableHeight / 36 ) - .tickSize( -availableWidth, 0); - - d3.transition(g.select('.nv-y.nv-axis')) - .call(yAxis); - - //------------------------------------------------------------ - - - - //============================================================ - // Event Handling/Dispatching (in chart's scope) - //------------------------------------------------------------ - - legend.dispatch.on('legendClick', function(d,i) { - d.disabled = !d.disabled; - - if (!data.filter(function(d) { return !d.disabled }).length) { - data.map(function(d) { - d.disabled = false; - wrap.selectAll('.nv-series').classed('disabled', false); - return d; - }); - } - - state.disabled = data.map(function(d) { return !!d.disabled }); - dispatch.stateChange(state); - - chart.update(); - }); - - legend.dispatch.on('legendDblclick', function(d) { - //Double clicking should always enable current series, and disabled all others. - data.forEach(function(d) { - d.disabled = true; - }); - d.disabled = false; - - state.disabled = data.map(function(d) { return !!d.disabled }); - dispatch.stateChange(state); - chart.update(); - }); - - - controls.dispatch.on('legendClick', function(d,i) { - if (!d.disabled) return; - controlsData = controlsData.map(function(s) { - s.disabled = true; - return s; - }); - d.disabled = false; - - switch (d.key) { - case 'Grouped': - multibar.stacked(false); - break; - case 'Stacked': - multibar.stacked(true); - break; - } - - state.stacked = multibar.stacked(); - dispatch.stateChange(state); - - chart.update(); - }); - - dispatch.on('tooltipShow', function(e) { - if (tooltips) showTooltip(e, that.parentNode) - }); - - // Update chart from a state object passed to event handler - dispatch.on('changeState', function(e) { - - if (typeof e.disabled !== 'undefined') { - data.forEach(function(series,i) { - series.disabled = e.disabled[i]; - }); - - state.disabled = e.disabled; - } - - if (typeof e.stacked !== 'undefined') { - multibar.stacked(e.stacked); - state.stacked = e.stacked; - } - - chart.update(); - }); - - //============================================================ - - - }); - - return chart; - } - - - //============================================================ - // Event Handling/Dispatching (out of chart's scope) - //------------------------------------------------------------ - - multibar.dispatch.on('elementMouseover.tooltip', function(e) { - e.pos = [e.pos[0] + margin.left, e.pos[1] + margin.top]; - dispatch.tooltipShow(e); - }); - - multibar.dispatch.on('elementMouseout.tooltip', function(e) { - dispatch.tooltipHide(e); - }); - dispatch.on('tooltipHide', function() { - if (tooltips) nv.tooltip.cleanup(); - }); - - //============================================================ - - - //============================================================ - // Expose Public Variables - //------------------------------------------------------------ - - // expose chart's sub-components - chart.dispatch = dispatch; - chart.multibar = multibar; - chart.legend = legend; - chart.xAxis = xAxis; - chart.yAxis = yAxis; - - d3.rebind(chart, multibar, 'x', 'y', 'xDomain', 'yDomain', 'forceX', 'forceY', 'clipEdge', 'id', 'stacked', 'delay', 'barColor'); - - chart.margin = function(_) { - if (!arguments.length) return margin; - margin.top = typeof _.top != 'undefined' ? _.top : margin.top; - margin.right = typeof _.right != 'undefined' ? _.right : margin.right; - margin.bottom = typeof _.bottom != 'undefined' ? _.bottom : margin.bottom; - margin.left = typeof _.left != 'undefined' ? _.left : margin.left; - return chart; - }; - - chart.width = function(_) { - if (!arguments.length) return width; - width = _; - return chart; - }; - - chart.height = function(_) { - if (!arguments.length) return height; - height = _; - return chart; - }; - - chart.color = function(_) { - if (!arguments.length) return color; - color = nv.utils.getColor(_); - legend.color(color); - return chart; - }; - - chart.showControls = function(_) { - if (!arguments.length) return showControls; - showControls = _; - return chart; - }; - - chart.showLegend = function(_) { - if (!arguments.length) return showLegend; - showLegend = _; - return chart; - }; - - chart.reduceXTicks= function(_) { - if (!arguments.length) return reduceXTicks; - reduceXTicks = _; - return chart; - }; - - chart.rotateLabels = function(_) { - if (!arguments.length) return rotateLabels; - rotateLabels = _; - return chart; - } - - chart.staggerLabels = function(_) { - if (!arguments.length) return staggerLabels; - staggerLabels = _; - return chart; - }; - - chart.tooltip = function(_) { - if (!arguments.length) return tooltip; - tooltip = _; - return chart; - }; - - chart.tooltips = function(_) { - if (!arguments.length) return tooltips; - tooltips = _; - return chart; - }; - - chart.tooltipContent = function(_) { - if (!arguments.length) return tooltip; - tooltip = _; - return chart; - }; - - chart.state = function(_) { - if (!arguments.length) return state; - state = _; - return chart; - }; - - chart.defaultState = function(_) { - if (!arguments.length) return defaultState; - defaultState = _; - return chart; - }; - - chart.noData = function(_) { - if (!arguments.length) return noData; - noData = _; - return chart; - }; - - //============================================================ - - - return chart; -} - -nv.models.multiBarHorizontal = function() { - - //============================================================ - // Public Variables with Default Settings - //------------------------------------------------------------ - - var margin = {top: 0, right: 0, bottom: 0, left: 0} - , width = 960 - , height = 500 - , id = Math.floor(Math.random() * 10000) //Create semi-unique ID in case user doesn't select one - , x = d3.scale.ordinal() - , y = d3.scale.linear() - , getX = function(d) { return d.x } - , getY = function(d) { return d.y } - , forceY = [0] // 0 is forced by default.. this makes sense for the majority of bar graphs... user can always do chart.forceY([]) to remove - , color = nv.utils.defaultColor() - , barColor = null // adding the ability to set the color for each rather than the whole group - , disabled // used in conjunction with barColor to communicate from multiBarHorizontalChart what series are disabled - , stacked = false - , showValues = false - , valuePadding = 60 - , valueFormat = d3.format(',.2f') - , delay = 1200 - , xDomain - , yDomain - , dispatch = d3.dispatch('chartClick', 'elementClick', 'elementDblClick', 'elementMouseover', 'elementMouseout') - ; - - //============================================================ - - - //============================================================ - // Private Variables - //------------------------------------------------------------ - - var x0, y0 //used to store previous scales - ; - - //============================================================ - - - function chart(selection) { - selection.each(function(data) { - var availableWidth = width - margin.left - margin.right, - availableHeight = height - margin.top - margin.bottom, - container = d3.select(this); - - - if (stacked) - data = d3.layout.stack() - .offset('zero') - .values(function(d){ return d.values }) - .y(getY) - (data); - - - //add series index to each data point for reference - data = data.map(function(series, i) { - series.values = series.values.map(function(point) { - point.series = i; - return point; - }); - return series; - }); - - - - //------------------------------------------------------------ - // HACK for negative value stacking - if (stacked) - data[0].values.map(function(d,i) { - var posBase = 0, negBase = 0; - data.map(function(d) { - var f = d.values[i] - f.size = Math.abs(f.y); - if (f.y<0) { - f.y1 = negBase - f.size; - negBase = negBase - f.size; - } else - { - f.y1 = posBase; - posBase = posBase + f.size; - } - }); - }); - - - - //------------------------------------------------------------ - // Setup Scales - - // remap and flatten the data for use in calculating the scales' domains - var seriesData = (xDomain && yDomain) ? [] : // if we know xDomain and yDomain, no need to calculate - data.map(function(d) { - return d.values.map(function(d,i) { - return { x: getX(d,i), y: getY(d,i), y0: d.y0, y1: d.y1 } - }) - }); - - x .domain(xDomain || d3.merge(seriesData).map(function(d) { return d.x })) - .rangeBands([0, availableHeight], .1); - - //y .domain(yDomain || d3.extent(d3.merge(seriesData).map(function(d) { return d.y + (stacked ? d.y0 : 0) }).concat(forceY))) - y .domain(yDomain || d3.extent(d3.merge(seriesData).map(function(d) { return stacked ? (d.y > 0 ? d.y1 + d.y : d.y1 ) : d.y }).concat(forceY))) - - if (showValues && !stacked) - y.range([(y.domain()[0] < 0 ? valuePadding : 0), availableWidth - (y.domain()[1] > 0 ? valuePadding : 0) ]); - else - y.range([0, availableWidth]); - - x0 = x0 || x; - y0 = y0 || d3.scale.linear().domain(y.domain()).range([y(0),y(0)]); - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Setup containers and skeleton of chart - - var wrap = d3.select(this).selectAll('g.nv-wrap.nv-multibarHorizontal').data([data]); - var wrapEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-wrap nv-multibarHorizontal'); - var defsEnter = wrapEnter.append('defs'); - var gEnter = wrapEnter.append('g'); - var g = wrap.select('g'); - - gEnter.append('g').attr('class', 'nv-groups'); - - wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')'); - - //------------------------------------------------------------ - - - - var groups = wrap.select('.nv-groups').selectAll('.nv-group') - .data(function(d) { return d }, function(d) { return d.key }); - groups.enter().append('g') - .style('stroke-opacity', 1e-6) - .style('fill-opacity', 1e-6); - d3.transition(groups.exit()) - .style('stroke-opacity', 1e-6) - .style('fill-opacity', 1e-6) - .remove(); - groups - .attr('class', function(d,i) { return 'nv-group nv-series-' + i }) - .classed('hover', function(d) { return d.hover }) - .style('fill', function(d,i){ return color(d, i) }) - .style('stroke', function(d,i){ return color(d, i) }); - d3.transition(groups) - .style('stroke-opacity', 1) - .style('fill-opacity', .75); - - - var bars = groups.selectAll('g.nv-bar') - .data(function(d) { return d.values }); - - bars.exit().remove(); - - - var barsEnter = bars.enter().append('g') - .attr('transform', function(d,i,j) { - return 'translate(' + y0(stacked ? d.y0 : 0) + ',' + (stacked ? 0 : (j * x.rangeBand() / data.length ) + x(getX(d,i))) + ')' - }); - - barsEnter.append('rect') - .attr('width', 0) - .attr('height', x.rangeBand() / (stacked ? 1 : data.length) ) - - bars - .on('mouseover', function(d,i) { //TODO: figure out why j works above, but not here - d3.select(this).classed('hover', true); - dispatch.elementMouseover({ - value: getY(d,i), - point: d, - series: data[d.series], - pos: [ y(getY(d,i) + (stacked ? d.y0 : 0)), x(getX(d,i)) + (x.rangeBand() * (stacked ? data.length / 2 : d.series + .5) / data.length) ], - pointIndex: i, - seriesIndex: d.series, - e: d3.event - }); - }) - .on('mouseout', function(d,i) { - d3.select(this).classed('hover', false); - dispatch.elementMouseout({ - value: getY(d,i), - point: d, - series: data[d.series], - pointIndex: i, - seriesIndex: d.series, - e: d3.event - }); - }) - .on('click', function(d,i) { - dispatch.elementClick({ - value: getY(d,i), - point: d, - series: data[d.series], - pos: [x(getX(d,i)) + (x.rangeBand() * (stacked ? data.length / 2 : d.series + .5) / data.length), y(getY(d,i) + (stacked ? d.y0 : 0))], // TODO: Figure out why the value appears to be shifted - pointIndex: i, - seriesIndex: d.series, - e: d3.event - }); - d3.event.stopPropagation(); - }) - .on('dblclick', function(d,i) { - dispatch.elementDblClick({ - value: getY(d,i), - point: d, - series: data[d.series], - pos: [x(getX(d,i)) + (x.rangeBand() * (stacked ? data.length / 2 : d.series + .5) / data.length), y(getY(d,i) + (stacked ? d.y0 : 0))], // TODO: Figure out why the value appears to be shifted - pointIndex: i, - seriesIndex: d.series, - e: d3.event - }); - d3.event.stopPropagation(); - }); - - - barsEnter.append('text'); - - if (showValues && !stacked) { - bars.select('text') - .attr('text-anchor', function(d,i) { return getY(d,i) < 0 ? 'end' : 'start' }) - .attr('y', x.rangeBand() / (data.length * 2)) - .attr('dy', '.32em') - .text(function(d,i) { return valueFormat(getY(d,i)) }) - d3.transition(bars) - //.delay(function(d,i) { return i * delay / data[0].values.length }) - .select('text') - .attr('x', function(d,i) { return getY(d,i) < 0 ? -4 : y(getY(d,i)) - y(0) + 4 }) - } else { - //bars.selectAll('text').remove(); - bars.selectAll('text').text(''); - } - - bars - .attr('class', function(d,i) { return getY(d,i) < 0 ? 'nv-bar negative' : 'nv-bar positive'}) - - if (barColor) { - if (!disabled) disabled = data.map(function() { return true }); - bars - //.style('fill', barColor) - //.style('stroke', barColor) - //.style('fill', function(d,i,j) { return d3.rgb(barColor(d,i)).darker(j).toString(); }) - //.style('stroke', function(d,i,j) { return d3.rgb(barColor(d,i)).darker(j).toString(); }) - .style('fill', function(d,i,j) { return d3.rgb(barColor(d,i)).darker( disabled.map(function(d,i) { return i }).filter(function(d,i){ return !disabled[i] })[j] ).toString(); }) - .style('stroke', function(d,i,j) { return d3.rgb(barColor(d,i)).darker( disabled.map(function(d,i) { return i }).filter(function(d,i){ return !disabled[i] })[j] ).toString(); }); - } - - if (stacked) - d3.transition(bars) - //.delay(function(d,i) { return i * delay / data[0].values.length }) - .attr('transform', function(d,i) { - //return 'translate(' + y(d.y0) + ',0)' - //return 'translate(' + y(d.y0) + ',' + x(getX(d,i)) + ')' - return 'translate(' + y(d.y1) + ',' + x(getX(d,i)) + ')' - }) - .select('rect') - .attr('width', function(d,i) { - return Math.abs(y(getY(d,i) + d.y0) - y(d.y0)) - }) - .attr('height', x.rangeBand() ); - else - d3.transition(bars) - //.delay(function(d,i) { return i * delay / data[0].values.length }) - .attr('transform', function(d,i) { - //TODO: stacked must be all positive or all negative, not both? - return 'translate(' + - (getY(d,i) < 0 ? y(getY(d,i)) : y(0)) - + ',' + - (d.series * x.rangeBand() / data.length - + - x(getX(d,i)) ) - + ')' - }) - .select('rect') - .attr('height', x.rangeBand() / data.length ) - .attr('width', function(d,i) { - return Math.max(Math.abs(y(getY(d,i)) - y(0)),1) - }); - - - //store old scales for use in transitions on update - x0 = x.copy(); - y0 = y.copy(); - - }); - - return chart; - } - - - //============================================================ - // Expose Public Variables - //------------------------------------------------------------ - - chart.dispatch = dispatch; - - chart.x = function(_) { - if (!arguments.length) return getX; - getX = _; - return chart; - }; - - chart.y = function(_) { - if (!arguments.length) return getY; - getY = _; - return chart; - }; - - chart.margin = function(_) { - if (!arguments.length) return margin; - margin.top = typeof _.top != 'undefined' ? _.top : margin.top; - margin.right = typeof _.right != 'undefined' ? _.right : margin.right; - margin.bottom = typeof _.bottom != 'undefined' ? _.bottom : margin.bottom; - margin.left = typeof _.left != 'undefined' ? _.left : margin.left; - return chart; - }; - - chart.width = function(_) { - if (!arguments.length) return width; - width = _; - return chart; - }; - - chart.height = function(_) { - if (!arguments.length) return height; - height = _; - return chart; - }; - - chart.xScale = function(_) { - if (!arguments.length) return x; - x = _; - return chart; - }; - - chart.yScale = function(_) { - if (!arguments.length) return y; - y = _; - return chart; - }; - - chart.xDomain = function(_) { - if (!arguments.length) return xDomain; - xDomain = _; - return chart; - }; - - chart.yDomain = function(_) { - if (!arguments.length) return yDomain; - yDomain = _; - return chart; - }; - - chart.forceY = function(_) { - if (!arguments.length) return forceY; - forceY = _; - return chart; - }; - - chart.stacked = function(_) { - if (!arguments.length) return stacked; - stacked = _; - return chart; - }; - - chart.color = function(_) { - if (!arguments.length) return color; - color = nv.utils.getColor(_); - return chart; - }; - - chart.barColor = function(_) { - if (!arguments.length) return barColor; - barColor = nv.utils.getColor(_); - return chart; - }; - - chart.disabled = function(_) { - if (!arguments.length) return disabled; - disabled = _; - return chart; - }; - - chart.id = function(_) { - if (!arguments.length) return id; - id = _; - return chart; - }; - - chart.delay = function(_) { - if (!arguments.length) return delay; - delay = _; - return chart; - }; - - chart.showValues = function(_) { - if (!arguments.length) return showValues; - showValues = _; - return chart; - }; - - chart.valueFormat= function(_) { - if (!arguments.length) return valueFormat; - valueFormat = _; - return chart; - }; - - chart.valuePadding = function(_) { - if (!arguments.length) return valuePadding; - valuePadding = _; - return chart; - }; - - //============================================================ - - - return chart; -} - -nv.models.multiBarHorizontalChart = function() { - - //============================================================ - // Public Variables with Default Settings - //------------------------------------------------------------ - - var multibar = nv.models.multiBarHorizontal() - , xAxis = nv.models.axis() - , yAxis = nv.models.axis() - , legend = nv.models.legend().height(30) - , controls = nv.models.legend().height(30) - ; - - var margin = {top: 30, right: 20, bottom: 50, left: 60} - , width = null - , height = null - , color = nv.utils.defaultColor() - , showControls = true - , showLegend = true - , stacked = false - , tooltips = true - , tooltip = function(key, x, y, e, graph) { - return '<h3>' + key + ' - ' + x + '</h3>' + - '<p>' + y + '</p>' - } - , x //can be accessed via chart.xScale() - , y //can be accessed via chart.yScale() - , state = { stacked: stacked } - , defaultState = null - , noData = 'No Data Available.' - , dispatch = d3.dispatch('tooltipShow', 'tooltipHide', 'stateChange', 'changeState') - , controlWidth = function() { return showControls ? 180 : 0 } - ; - - multibar - .stacked(stacked) - ; - xAxis - .orient('left') - .tickPadding(5) - .highlightZero(false) - .showMaxMin(false) - .tickFormat(function(d) { return d }) - ; - yAxis - .orient('bottom') - .tickFormat(d3.format(',.1f')) - ; - - //============================================================ - - - //============================================================ - // Private Variables - //------------------------------------------------------------ - - var showTooltip = function(e, offsetElement) { - var left = e.pos[0] + ( offsetElement.offsetLeft || 0 ), - top = e.pos[1] + ( offsetElement.offsetTop || 0), - x = xAxis.tickFormat()(multibar.x()(e.point, e.pointIndex)), - y = yAxis.tickFormat()(multibar.y()(e.point, e.pointIndex)), - content = tooltip(e.series.key, x, y, e, chart); - - nv.tooltip.show([left, top], content, e.value < 0 ? 'e' : 'w', null, offsetElement); - }; - - //============================================================ - - - function chart(selection) { - selection.each(function(data) { - var container = d3.select(this), - that = this; - - var availableWidth = (width || parseInt(container.style('width')) || 960) - - margin.left - margin.right, - availableHeight = (height || parseInt(container.style('height')) || 400) - - margin.top - margin.bottom; - - chart.update = function() { container.transition().call(chart) }; - chart.container = this; - - //set state.disabled - state.disabled = data.map(function(d) { return !!d.disabled }); - - if (!defaultState) { - var key; - defaultState = {}; - for (key in state) { - if (state[key] instanceof Array) - defaultState[key] = state[key].slice(0); - else - defaultState[key] = state[key]; - } - } - - //------------------------------------------------------------ - // Display No Data message if there's nothing to show. - - if (!data || !data.length || !data.filter(function(d) { return d.values.length }).length) { - var noDataText = container.selectAll('.nv-noData').data([noData]); - - noDataText.enter().append('text') - .attr('class', 'nvd3 nv-noData') - .attr('dy', '-.7em') - .style('text-anchor', 'middle'); - - noDataText - .attr('x', margin.left + availableWidth / 2) - .attr('y', margin.top + availableHeight / 2) - .text(function(d) { return d }); - - return chart; - } else { - container.selectAll('.nv-noData').remove(); - } - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Setup Scales - - x = multibar.xScale(); - y = multibar.yScale(); - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Setup containers and skeleton of chart - - var wrap = container.selectAll('g.nv-wrap.nv-multiBarHorizontalChart').data([data]); - var gEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-wrap nv-multiBarHorizontalChart').append('g'); - var g = wrap.select('g'); - - gEnter.append('g').attr('class', 'nv-x nv-axis'); - gEnter.append('g').attr('class', 'nv-y nv-axis'); - gEnter.append('g').attr('class', 'nv-barsWrap'); - gEnter.append('g').attr('class', 'nv-legendWrap'); - gEnter.append('g').attr('class', 'nv-controlsWrap'); - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Legend - - if (showLegend) { - legend.width(availableWidth - controlWidth()); - - if (multibar.barColor()) - data.forEach(function(series,i) { - series.color = d3.rgb('#ccc').darker(i * 1.5).toString(); - }) - - g.select('.nv-legendWrap') - .datum(data) - .call(legend); - - if ( margin.top != legend.height()) { - margin.top = legend.height(); - availableHeight = (height || parseInt(container.style('height')) || 400) - - margin.top - margin.bottom; - } - - g.select('.nv-legendWrap') - .attr('transform', 'translate(' + controlWidth() + ',' + (-margin.top) +')'); - } - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Controls - - if (showControls) { - var controlsData = [ - { key: 'Grouped', disabled: multibar.stacked() }, - { key: 'Stacked', disabled: !multibar.stacked() } - ]; - - controls.width(controlWidth()).color(['#444', '#444', '#444']); - g.select('.nv-controlsWrap') - .datum(controlsData) - .attr('transform', 'translate(0,' + (-margin.top) +')') - .call(controls); - } - - //------------------------------------------------------------ - - - wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')'); - - - //------------------------------------------------------------ - // Main Chart Component(s) - - multibar - .disabled(data.map(function(series) { return series.disabled })) - .width(availableWidth) - .height(availableHeight) - .color(data.map(function(d,i) { - return d.color || color(d, i); - }).filter(function(d,i) { return !data[i].disabled })) - - - var barsWrap = g.select('.nv-barsWrap') - .datum(data.filter(function(d) { return !d.disabled })) - - d3.transition(barsWrap).call(multibar); - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Setup Axes - - xAxis - .scale(x) - .ticks( availableHeight / 24 ) - .tickSize(-availableWidth, 0); - - d3.transition(g.select('.nv-x.nv-axis')) - .call(xAxis); - - var xTicks = g.select('.nv-x.nv-axis').selectAll('g'); - - xTicks - .selectAll('line, text') - .style('opacity', 1) - - - yAxis - .scale(y) - .ticks( availableWidth / 100 ) - .tickSize( -availableHeight, 0); - - g.select('.nv-y.nv-axis') - .attr('transform', 'translate(0,' + availableHeight + ')'); - d3.transition(g.select('.nv-y.nv-axis')) - .call(yAxis); - - //------------------------------------------------------------ - - - - //============================================================ - // Event Handling/Dispatching (in chart's scope) - //------------------------------------------------------------ - - legend.dispatch.on('legendClick', function(d,i) { - d.disabled = !d.disabled; - - if (!data.filter(function(d) { return !d.disabled }).length) { - data.map(function(d) { - d.disabled = false; - wrap.selectAll('.nv-series').classed('disabled', false); - return d; - }); - } - - state.disabled = data.map(function(d) { return !!d.disabled }); - dispatch.stateChange(state); - - chart.update(); - }); - - legend.dispatch.on('legendDblclick', function(d) { - //Double clicking should always enable current series, and disabled all others. - data.forEach(function(d) { - d.disabled = true; - }); - d.disabled = false; - - state.disabled = data.map(function(d) { return !!d.disabled }); - dispatch.stateChange(state); - chart.update(); - }); - - controls.dispatch.on('legendClick', function(d,i) { - if (!d.disabled) return; - controlsData = controlsData.map(function(s) { - s.disabled = true; - return s; - }); - d.disabled = false; - - switch (d.key) { - case 'Grouped': - multibar.stacked(false); - break; - case 'Stacked': - multibar.stacked(true); - break; - } - - state.stacked = multibar.stacked(); - dispatch.stateChange(state); - - chart.update(); - }); - - dispatch.on('tooltipShow', function(e) { - if (tooltips) showTooltip(e, that.parentNode); - }); - - // Update chart from a state object passed to event handler - dispatch.on('changeState', function(e) { - - if (typeof e.disabled !== 'undefined') { - data.forEach(function(series,i) { - series.disabled = e.disabled[i]; - }); - - state.disabled = e.disabled; - } - - if (typeof e.stacked !== 'undefined') { - multibar.stacked(e.stacked); - state.stacked = e.stacked; - } - - selection.call(chart); - }); - //============================================================ - - - }); - - return chart; - } - - - //============================================================ - // Event Handling/Dispatching (out of chart's scope) - //------------------------------------------------------------ - - multibar.dispatch.on('elementMouseover.tooltip', function(e) { - e.pos = [e.pos[0] + margin.left, e.pos[1] + margin.top]; - dispatch.tooltipShow(e); - }); - - multibar.dispatch.on('elementMouseout.tooltip', function(e) { - dispatch.tooltipHide(e); - }); - dispatch.on('tooltipHide', function() { - if (tooltips) nv.tooltip.cleanup(); - }); - - //============================================================ - - - //============================================================ - // Expose Public Variables - //------------------------------------------------------------ - - // expose chart's sub-components - chart.dispatch = dispatch; - chart.multibar = multibar; - chart.legend = legend; - chart.xAxis = xAxis; - chart.yAxis = yAxis; - - d3.rebind(chart, multibar, 'x', 'y', 'xDomain', 'yDomain', 'forceX', 'forceY', 'clipEdge', 'id', 'delay', 'showValues', 'valueFormat', 'stacked', 'barColor'); - - chart.margin = function(_) { - if (!arguments.length) return margin; - margin.top = typeof _.top != 'undefined' ? _.top : margin.top; - margin.right = typeof _.right != 'undefined' ? _.right : margin.right; - margin.bottom = typeof _.bottom != 'undefined' ? _.bottom : margin.bottom; - margin.left = typeof _.left != 'undefined' ? _.left : margin.left; - return chart; - }; - - chart.width = function(_) { - if (!arguments.length) return width; - width = _; - return chart; - }; - - chart.height = function(_) { - if (!arguments.length) return height; - height = _; - return chart; - }; - - chart.color = function(_) { - if (!arguments.length) return color; - color = nv.utils.getColor(_); - legend.color(color); - return chart; - }; - - chart.showControls = function(_) { - if (!arguments.length) return showControls; - showControls = _; - return chart; - }; - - chart.showLegend = function(_) { - if (!arguments.length) return showLegend; - showLegend = _; - return chart; - }; - - chart.tooltip = function(_) { - if (!arguments.length) return tooltip; - tooltip = _; - return chart; - }; - - chart.tooltips = function(_) { - if (!arguments.length) return tooltips; - tooltips = _; - return chart; - }; - - chart.tooltipContent = function(_) { - if (!arguments.length) return tooltip; - tooltip = _; - return chart; - }; - - chart.state = function(_) { - if (!arguments.length) return state; - state = _; - return chart; - }; - - chart.defaultState = function(_) { - if (!arguments.length) return defaultState; - defaultState = _; - return chart; - }; - - chart.noData = function(_) { - if (!arguments.length) return noData; - noData = _; - return chart; - }; - - //============================================================ - - - return chart; -} -nv.models.multiChart = function() { - - //============================================================ - // Public Variables with Default Settings - //------------------------------------------------------------ - - var margin = {top: 30, right: 20, bottom: 50, left: 60}, - color = d3.scale.category20().range(), - width = null, - height = null, - showLegend = true, - tooltips = true, - tooltip = function(key, x, y, e, graph) { - return '<h3>' + key + '</h3>' + - '<p>' + y + ' at ' + x + '</p>' - }, - x, y; //can be accessed via chart.lines.[x/y]Scale() - - //============================================================ - // Private Variables - //------------------------------------------------------------ - - var x = d3.scale.linear(), - yScale1 = d3.scale.linear(), - yScale2 = d3.scale.linear(), - - lines1 = nv.models.line().yScale(yScale1), - lines2 = nv.models.line().yScale(yScale2), - - bars1 = nv.models.multiBar().stacked(false).yScale(yScale1), - bars2 = nv.models.multiBar().stacked(false).yScale(yScale2), - - stack1 = nv.models.stackedArea().yScale(yScale1), - stack2 = nv.models.stackedArea().yScale(yScale2), - - xAxis = nv.models.axis().scale(x).orient('bottom').tickPadding(5), - yAxis1 = nv.models.axis().scale(yScale1).orient('left'), - yAxis2 = nv.models.axis().scale(yScale2).orient('right'), - - legend = nv.models.legend().height(30), - dispatch = d3.dispatch('tooltipShow', 'tooltipHide'); - - var showTooltip = function(e, offsetElement) { - var left = e.pos[0] + ( offsetElement.offsetLeft || 0 ), - top = e.pos[1] + ( offsetElement.offsetTop || 0), - x = xAxis.tickFormat()(lines1.x()(e.point, e.pointIndex)), - y = ((e.series.yAxis == 2) ? yAxis2 : yAxis1).tickFormat()(lines1.y()(e.point, e.pointIndex)), - content = tooltip(e.series.key, x, y, e, chart); - - nv.tooltip.show([left, top], content, undefined, undefined, offsetElement.offsetParent); - }; - - function chart(selection) { - selection.each(function(data) { - var container = d3.select(this), - that = this; - - chart.update = function() { container.transition().call(chart); }; - chart.container = this; - - var availableWidth = (width || parseInt(container.style('width')) || 960) - - margin.left - margin.right, - availableHeight = (height || parseInt(container.style('height')) || 400) - - margin.top - margin.bottom; - - var dataLines1 = data.filter(function(d) {return !d.disabled && d.type == 'line' && d.yAxis == 1}) - var dataLines2 = data.filter(function(d) {return !d.disabled && d.type == 'line' && d.yAxis == 2}) - var dataBars1 = data.filter(function(d) {return !d.disabled && d.type == 'bar' && d.yAxis == 1}) - var dataBars2 = data.filter(function(d) {return !d.disabled && d.type == 'bar' && d.yAxis == 2}) - var dataStack1 = data.filter(function(d) {return !d.disabled && d.type == 'area' && d.yAxis == 1}) - var dataStack2 = data.filter(function(d) {return !d.disabled && d.type == 'area' && d.yAxis == 2}) - - var series1 = data.filter(function(d) {return !d.disabled && d.yAxis == 1}) - .map(function(d) { - return d.values.map(function(d,i) { - return { x: d.x, y: d.y } - }) - }) - - var series2 = data.filter(function(d) {return !d.disabled && d.yAxis == 2}) - .map(function(d) { - return d.values.map(function(d,i) { - return { x: d.x, y: d.y } - }) - }) - - x .domain(d3.extent(d3.merge(series1.concat(series2)), function(d) { return d.x } )) - .range([0, availableWidth]); - - var wrap = container.selectAll('g.wrap.multiChart').data([data]); - var gEnter = wrap.enter().append('g').attr('class', 'wrap nvd3 multiChart').append('g'); - - gEnter.append('g').attr('class', 'x axis'); - gEnter.append('g').attr('class', 'y1 axis'); - gEnter.append('g').attr('class', 'y2 axis'); - gEnter.append('g').attr('class', 'lines1Wrap'); - gEnter.append('g').attr('class', 'lines2Wrap'); - gEnter.append('g').attr('class', 'bars1Wrap'); - gEnter.append('g').attr('class', 'bars2Wrap'); - gEnter.append('g').attr('class', 'stack1Wrap'); - gEnter.append('g').attr('class', 'stack2Wrap'); - gEnter.append('g').attr('class', 'legendWrap'); - - var g = wrap.select('g'); - - if (showLegend) { - legend.width( availableWidth / 2 ); - - g.select('.legendWrap') - .datum(data.map(function(series) { - series.originalKey = series.originalKey === undefined ? series.key : series.originalKey; - series.key = series.originalKey + (series.yAxis == 1 ? '' : ' (right axis)'); - return series; - })) - .call(legend); - - if ( margin.top != legend.height()) { - margin.top = legend.height(); - availableHeight = (height || parseInt(container.style('height')) || 400) - - margin.top - margin.bottom; - } - - g.select('.legendWrap') - .attr('transform', 'translate(' + ( availableWidth / 2 ) + ',' + (-margin.top) +')'); - } - - - lines1 - .width(availableWidth) - .height(availableHeight) - .interpolate("monotone") - .color(data.map(function(d,i) { - return d.color || color[i % color.length]; - }).filter(function(d,i) { return !data[i].disabled && data[i].yAxis == 1 && data[i].type == 'line'})); - - lines2 - .width(availableWidth) - .height(availableHeight) - .interpolate("monotone") - .color(data.map(function(d,i) { - return d.color || color[i % color.length]; - }).filter(function(d,i) { return !data[i].disabled && data[i].yAxis == 2 && data[i].type == 'line'})); - - bars1 - .width(availableWidth) - .height(availableHeight) - .color(data.map(function(d,i) { - return d.color || color[i % color.length]; - }).filter(function(d,i) { return !data[i].disabled && data[i].yAxis == 1 && data[i].type == 'bar'})); - - bars2 - .width(availableWidth) - .height(availableHeight) - .color(data.map(function(d,i) { - return d.color || color[i % color.length]; - }).filter(function(d,i) { return !data[i].disabled && data[i].yAxis == 2 && data[i].type == 'bar'})); - - stack1 - .width(availableWidth) - .height(availableHeight) - .color(data.map(function(d,i) { - return d.color || color[i % color.length]; - }).filter(function(d,i) { return !data[i].disabled && data[i].yAxis == 1 && data[i].type == 'area'})); - - stack2 - .width(availableWidth) - .height(availableHeight) - .color(data.map(function(d,i) { - return d.color || color[i % color.length]; - }).filter(function(d,i) { return !data[i].disabled && data[i].yAxis == 2 && data[i].type == 'area'})); - - g.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')'); - - - var lines1Wrap = g.select('.lines1Wrap') - .datum(dataLines1) - var bars1Wrap = g.select('.bars1Wrap') - .datum(dataBars1) - var stack1Wrap = g.select('.stack1Wrap') - .datum(dataStack1) - - var lines2Wrap = g.select('.lines2Wrap') - .datum(dataLines2) - var bars2Wrap = g.select('.bars2Wrap') - .datum(dataBars2) - var stack2Wrap = g.select('.stack2Wrap') - .datum(dataStack2) - - var extraValue1 = dataStack1.length ? dataStack1.map(function(a){return a.values}).reduce(function(a,b){ - return a.map(function(aVal,i){return {x: aVal.x, y: aVal.y + b[i].y}}) - }).concat([{x:0, y:0}]) : [] - var extraValue2 = dataStack2.length ? dataStack2.map(function(a){return a.values}).reduce(function(a,b){ - return a.map(function(aVal,i){return {x: aVal.x, y: aVal.y + b[i].y}}) - }).concat([{x:0, y:0}]) : [] - - yScale1 .domain(d3.extent(d3.merge(series1).concat(extraValue1), function(d) { return d.y } )) - .range([0, availableHeight]) - - yScale2 .domain(d3.extent(d3.merge(series2).concat(extraValue2), function(d) { return d.y } )) - .range([0, availableHeight]) - - lines1.yDomain(yScale1.domain()) - bars1.yDomain(yScale1.domain()) - stack1.yDomain(yScale1.domain()) - - lines2.yDomain(yScale2.domain()) - bars2.yDomain(yScale2.domain()) - stack2.yDomain(yScale2.domain()) - - if(dataStack1.length){d3.transition(stack1Wrap).call(stack1);} - if(dataStack2.length){d3.transition(stack2Wrap).call(stack2);} - - if(dataBars1.length){d3.transition(bars1Wrap).call(bars1);} - if(dataBars2.length){d3.transition(bars2Wrap).call(bars2);} - - if(dataLines1.length){d3.transition(lines1Wrap).call(lines1);} - if(dataLines2.length){d3.transition(lines2Wrap).call(lines2);} - - - - xAxis - .ticks( availableWidth / 100 ) - .tickSize(-availableHeight, 0); - - g.select('.x.axis') - .attr('transform', 'translate(0,' + availableHeight + ')'); - d3.transition(g.select('.x.axis')) - .call(xAxis); - - yAxis1 - .ticks( availableHeight / 36 ) - .tickSize( -availableWidth, 0); - - - d3.transition(g.select('.y1.axis')) - .call(yAxis1); - - yAxis2 - .ticks( availableHeight / 36 ) - .tickSize( -availableWidth, 0); - - d3.transition(g.select('.y2.axis')) - .call(yAxis2); - - g.select('.y2.axis') - .style('opacity', series2.length ? 1 : 0) - .attr('transform', 'translate(' + x.range()[1] + ',0)'); - - legend.dispatch.on('legendClick', function(d,i) { - d.disabled = !d.disabled; - - if (!data.filter(function(d) { return !d.disabled }).length) { - data.map(function(d) { - d.disabled = false; - wrap.selectAll('.series').classed('disabled', false); - return d; - }); - } - chart.update(); - }); - - dispatch.on('tooltipShow', function(e) { - if (tooltips) showTooltip(e, that.parentNode); - }); - - }); - - return chart; - } - - - //============================================================ - // Event Handling/Dispatching (out of chart's scope) - //------------------------------------------------------------ - - lines1.dispatch.on('elementMouseover.tooltip', function(e) { - e.pos = [e.pos[0] + margin.left, e.pos[1] + margin.top]; - dispatch.tooltipShow(e); - }); - - lines1.dispatch.on('elementMouseout.tooltip', function(e) { - dispatch.tooltipHide(e); - }); - - lines2.dispatch.on('elementMouseover.tooltip', function(e) { - e.pos = [e.pos[0] + margin.left, e.pos[1] + margin.top]; - dispatch.tooltipShow(e); - }); - - lines2.dispatch.on('elementMouseout.tooltip', function(e) { - dispatch.tooltipHide(e); - }); - - bars1.dispatch.on('elementMouseover.tooltip', function(e) { - e.pos = [e.pos[0] + margin.left, e.pos[1] + margin.top]; - dispatch.tooltipShow(e); - }); - - bars1.dispatch.on('elementMouseout.tooltip', function(e) { - dispatch.tooltipHide(e); - }); - - bars2.dispatch.on('elementMouseover.tooltip', function(e) { - e.pos = [e.pos[0] + margin.left, e.pos[1] + margin.top]; - dispatch.tooltipShow(e); - }); - - bars2.dispatch.on('elementMouseout.tooltip', function(e) { - dispatch.tooltipHide(e); - }); - - stack1.dispatch.on('tooltipShow', function(e) { - //disable tooltips when value ~= 0 - //// TODO: consider removing points from voronoi that have 0 value instead of this hack - if (!Math.round(stack1.y()(e.point) * 100)) { // 100 will not be good for very small numbers... will have to think about making this valu dynamic, based on data range - setTimeout(function() { d3.selectAll('.point.hover').classed('hover', false) }, 0); - return false; - } - - e.pos = [e.pos[0] + margin.left, e.pos[1] + margin.top], - dispatch.tooltipShow(e); - }); - - stack1.dispatch.on('tooltipHide', function(e) { - dispatch.tooltipHide(e); - }); - - stack2.dispatch.on('tooltipShow', function(e) { - //disable tooltips when value ~= 0 - //// TODO: consider removing points from voronoi that have 0 value instead of this hack - if (!Math.round(stack2.y()(e.point) * 100)) { // 100 will not be good for very small numbers... will have to think about making this valu dynamic, based on data range - setTimeout(function() { d3.selectAll('.point.hover').classed('hover', false) }, 0); - return false; - } - - e.pos = [e.pos[0] + margin.left, e.pos[1] + margin.top], - dispatch.tooltipShow(e); - }); - - stack2.dispatch.on('tooltipHide', function(e) { - dispatch.tooltipHide(e); - }); - - lines1.dispatch.on('elementMouseover.tooltip', function(e) { - e.pos = [e.pos[0] + margin.left, e.pos[1] + margin.top]; - dispatch.tooltipShow(e); - }); - - lines1.dispatch.on('elementMouseout.tooltip', function(e) { - dispatch.tooltipHide(e); - }); - - lines2.dispatch.on('elementMouseover.tooltip', function(e) { - e.pos = [e.pos[0] + margin.left, e.pos[1] + margin.top]; - dispatch.tooltipShow(e); - }); - - lines2.dispatch.on('elementMouseout.tooltip', function(e) { - dispatch.tooltipHide(e); - }); - - dispatch.on('tooltipHide', function() { - if (tooltips) nv.tooltip.cleanup(); - }); - - - - //============================================================ - // Global getters and setters - //------------------------------------------------------------ - - chart.dispatch = dispatch; - chart.lines1 = lines1; - chart.lines2 = lines2; - chart.bars1 = bars1; - chart.bars2 = bars2; - chart.stack1 = stack1; - chart.stack2 = stack2; - chart.xAxis = xAxis; - chart.yAxis1 = yAxis1; - chart.yAxis2 = yAxis2; - - chart.x = function(_) { - if (!arguments.length) return getX; - getX = _; - lines1.x(_); - bars1.x(_); - return chart; - }; - - chart.y = function(_) { - if (!arguments.length) return getY; - getY = _; - lines1.y(_); - bars1.y(_); - return chart; - }; - - chart.margin = function(_) { - if (!arguments.length) return margin; - margin = _; - return chart; - }; - - chart.width = function(_) { - if (!arguments.length) return width; - width = _; - return chart; - }; - - chart.height = function(_) { - if (!arguments.length) return height; - height = _; - return chart; - }; - - chart.color = function(_) { - if (!arguments.length) return color; - color = _; - legend.color(_); - return chart; - }; - - chart.showLegend = function(_) { - if (!arguments.length) return showLegend; - showLegend = _; - return chart; - }; - - chart.tooltips = function(_) { - if (!arguments.length) return tooltips; - tooltips = _; - return chart; - }; - - chart.tooltipContent = function(_) { - if (!arguments.length) return tooltip; - tooltip = _; - return chart; - }; - - return chart; -} - - -nv.models.ohlcBar = function() { - - //============================================================ - // Public Variables with Default Settings - //------------------------------------------------------------ - - var margin = {top: 0, right: 0, bottom: 0, left: 0} - , width = 960 - , height = 500 - , id = Math.floor(Math.random() * 10000) //Create semi-unique ID in case user doesn't select one - , x = d3.scale.linear() - , y = d3.scale.linear() - , getX = function(d) { return d.x } - , getY = function(d) { return d.y } - , getOpen = function(d) { return d.open } - , getClose = function(d) { return d.close } - , getHigh = function(d) { return d.high } - , getLow = function(d) { return d.low } - , forceX = [] - , forceY = [] - , padData = false // If true, adds half a data points width to front and back, for lining up a line chart with a bar chart - , clipEdge = true - , color = nv.utils.defaultColor() - , xDomain - , yDomain - , dispatch = d3.dispatch('chartClick', 'elementClick', 'elementDblClick', 'elementMouseover', 'elementMouseout') - ; - - //============================================================ - - //============================================================ - // Private Variables - //------------------------------------------------------------ - - //TODO: store old scales for transitions - - //============================================================ - - - function chart(selection) { - selection.each(function(data) { - var availableWidth = width - margin.left - margin.right, - availableHeight = height - margin.top - margin.bottom, - container = d3.select(this); - - - //------------------------------------------------------------ - // Setup Scales - - x .domain(xDomain || d3.extent(data[0].values.map(getX).concat(forceX) )); - - if (padData) - x.range([availableWidth * .5 / data[0].values.length, availableWidth * (data[0].values.length - .5) / data[0].values.length ]); - else - x.range([0, availableWidth]); - - y .domain(yDomain || [ - d3.min(data[0].values.map(getLow).concat(forceY)), - d3.max(data[0].values.map(getHigh).concat(forceY)) - ]) - .range([availableHeight, 0]); - - // If scale's domain don't have a range, slightly adjust to make one... so a chart can show a single data point - if (x.domain()[0] === x.domain()[1] || y.domain()[0] === y.domain()[1]) singlePoint = true; - if (x.domain()[0] === x.domain()[1]) - x.domain()[0] ? - x.domain([x.domain()[0] - x.domain()[0] * 0.01, x.domain()[1] + x.domain()[1] * 0.01]) - : x.domain([-1,1]); - - if (y.domain()[0] === y.domain()[1]) - y.domain()[0] ? - y.domain([y.domain()[0] + y.domain()[0] * 0.01, y.domain()[1] - y.domain()[1] * 0.01]) - : y.domain([-1,1]); - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Setup containers and skeleton of chart - - var wrap = d3.select(this).selectAll('g.nv-wrap.nv-ohlcBar').data([data[0].values]); - var wrapEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-wrap nv-ohlcBar'); - var defsEnter = wrapEnter.append('defs'); - var gEnter = wrapEnter.append('g'); - var g = wrap.select('g'); - - gEnter.append('g').attr('class', 'nv-ticks'); - - wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')'); - - //------------------------------------------------------------ - - - container - .on('click', function(d,i) { - dispatch.chartClick({ - data: d, - index: i, - pos: d3.event, - id: id - }); - }); - - - defsEnter.append('clipPath') - .attr('id', 'nv-chart-clip-path-' + id) - .append('rect'); - - wrap.select('#nv-chart-clip-path-' + id + ' rect') - .attr('width', availableWidth) - .attr('height', availableHeight); - - g .attr('clip-path', clipEdge ? 'url(#nv-chart-clip-path-' + id + ')' : ''); - - - - var ticks = wrap.select('.nv-ticks').selectAll('.nv-tick') - .data(function(d) { return d }); - - ticks.exit().remove(); - - - var ticksEnter = ticks.enter().append('path') - .attr('class', function(d,i,j) { return (getOpen(d,i) > getClose(d,i) ? 'nv-tick negative' : 'nv-tick positive') + ' nv-tick-' + j + '-' + i }) - .attr('d', function(d,i) { - var w = (availableWidth / data[0].values.length) * .9; - return 'm0,0l0,' - + (y(getOpen(d,i)) - - y(getHigh(d,i))) - + 'l' - + (-w/2) - + ',0l' - + (w/2) - + ',0l0,' - + (y(getLow(d,i)) - y(getOpen(d,i))) - + 'l0,' - + (y(getClose(d,i)) - - y(getLow(d,i))) - + 'l' - + (w/2) - + ',0l' - + (-w/2) - + ',0z'; - }) - .attr('transform', function(d,i) { return 'translate(' + x(getX(d,i)) + ',' + y(getHigh(d,i)) + ')'; }) - //.attr('fill', function(d,i) { return color[0]; }) - //.attr('stroke', function(d,i) { return color[0]; }) - //.attr('x', 0 ) - //.attr('y', function(d,i) { return y(Math.max(0, getY(d,i))) }) - //.attr('height', function(d,i) { return Math.abs(y(getY(d,i)) - y(0)) }) - .on('mouseover', function(d,i) { - d3.select(this).classed('hover', true); - dispatch.elementMouseover({ - point: d, - series: data[0], - pos: [x(getX(d,i)), y(getY(d,i))], // TODO: Figure out why the value appears to be shifted - pointIndex: i, - seriesIndex: 0, - e: d3.event - }); - - }) - .on('mouseout', function(d,i) { - d3.select(this).classed('hover', false); - dispatch.elementMouseout({ - point: d, - series: data[0], - pointIndex: i, - seriesIndex: 0, - e: d3.event - }); - }) - .on('click', function(d,i) { - dispatch.elementClick({ - //label: d[label], - value: getY(d,i), - data: d, - index: i, - pos: [x(getX(d,i)), y(getY(d,i))], - e: d3.event, - id: id - }); - d3.event.stopPropagation(); - }) - .on('dblclick', function(d,i) { - dispatch.elementDblClick({ - //label: d[label], - value: getY(d,i), - data: d, - index: i, - pos: [x(getX(d,i)), y(getY(d,i))], - e: d3.event, - id: id - }); - d3.event.stopPropagation(); - }); - - ticks - .attr('class', function(d,i,j) { return (getOpen(d,i) > getClose(d,i) ? 'nv-tick negative' : 'nv-tick positive') + ' nv-tick-' + j + '-' + i }) - d3.transition(ticks) - .attr('transform', function(d,i) { return 'translate(' + x(getX(d,i)) + ',' + y(getHigh(d,i)) + ')'; }) - .attr('d', function(d,i) { - var w = (availableWidth / data[0].values.length) * .9; - return 'm0,0l0,' - + (y(getOpen(d,i)) - - y(getHigh(d,i))) - + 'l' - + (-w/2) - + ',0l' - + (w/2) - + ',0l0,' - + (y(getLow(d,i)) - - y(getOpen(d,i))) - + 'l0,' - + (y(getClose(d,i)) - - y(getLow(d,i))) - + 'l' - + (w/2) - + ',0l' - + (-w/2) - + ',0z'; - }) - //.attr('width', (availableWidth / data[0].values.length) * .9 ) - - - //d3.transition(ticks) - //.attr('y', function(d,i) { return y(Math.max(0, getY(d,i))) }) - //.attr('height', function(d,i) { return Math.abs(y(getY(d,i)) - y(0)) }); - //.order(); // not sure if this makes any sense for this model - - }); - - return chart; - } - - - //============================================================ - // Expose Public Variables - //------------------------------------------------------------ - - chart.dispatch = dispatch; - - chart.x = function(_) { - if (!arguments.length) return getX; - getX = _; - return chart; - }; - - chart.y = function(_) { - if (!arguments.length) return getY; - getY = _; - return chart; - }; - - chart.open = function(_) { - if (!arguments.length) return getOpen; - getOpen = _; - return chart; - }; - - chart.close = function(_) { - if (!arguments.length) return getClose; - getClose = _; - return chart; - }; - - chart.high = function(_) { - if (!arguments.length) return getHigh; - getHigh = _; - return chart; - }; - - chart.low = function(_) { - if (!arguments.length) return getLow; - getLow = _; - return chart; - }; - - chart.margin = function(_) { - if (!arguments.length) return margin; - margin.top = typeof _.top != 'undefined' ? _.top : margin.top; - margin.right = typeof _.right != 'undefined' ? _.right : margin.right; - margin.bottom = typeof _.bottom != 'undefined' ? _.bottom : margin.bottom; - margin.left = typeof _.left != 'undefined' ? _.left : margin.left; - return chart; - }; - - chart.width = function(_) { - if (!arguments.length) return width; - width = _; - return chart; - }; - - chart.height = function(_) { - if (!arguments.length) return height; - height = _; - return chart; - }; - - chart.xScale = function(_) { - if (!arguments.length) return x; - x = _; - return chart; - }; - - chart.yScale = function(_) { - if (!arguments.length) return y; - y = _; - return chart; - }; - - chart.xDomain = function(_) { - if (!arguments.length) return xDomain; - xDomain = _; - return chart; - }; - - chart.yDomain = function(_) { - if (!arguments.length) return yDomain; - yDomain = _; - return chart; - }; - - chart.forceX = function(_) { - if (!arguments.length) return forceX; - forceX = _; - return chart; - }; - - chart.forceY = function(_) { - if (!arguments.length) return forceY; - forceY = _; - return chart; - }; - - chart.padData = function(_) { - if (!arguments.length) return padData; - padData = _; - return chart; - }; - - chart.clipEdge = function(_) { - if (!arguments.length) return clipEdge; - clipEdge = _; - return chart; - }; - - chart.color = function(_) { - if (!arguments.length) return color; - color = nv.utils.getColor(_); - return chart; - }; - - chart.id = function(_) { - if (!arguments.length) return id; - id = _; - return chart; - }; - - //============================================================ - - - return chart; -} -nv.models.pie = function() { - - //============================================================ - // Public Variables with Default Settings - //------------------------------------------------------------ - - var margin = {top: 0, right: 0, bottom: 0, left: 0} - , width = 500 - , height = 500 - , getValues = function(d) { return d.values } - , getX = function(d) { return d.x } - , getY = function(d) { return d.y } - , getDescription = function(d) { return d.description } - , id = Math.floor(Math.random() * 10000) //Create semi-unique ID in case user doesn't select one - , color = nv.utils.defaultColor() - , valueFormat = d3.format(',.2f') - , showLabels = true - , pieLabelsOutside = true - , donutLabelsOutside = false - , labelThreshold = .02 //if slice percentage is under this, don't show label - , donut = false - , labelSunbeamLayout = false - , startAngle = false - , endAngle = false - , donutRatio = 0.5 - , dispatch = d3.dispatch('chartClick', 'elementClick', 'elementDblClick', 'elementMouseover', 'elementMouseout') - ; - - //============================================================ - - - function chart(selection) { - selection.each(function(data) { - var availableWidth = width - margin.left - margin.right, - availableHeight = height - margin.top - margin.bottom, - radius = Math.min(availableWidth, availableHeight) / 2, - arcRadius = radius-(radius / 5), - container = d3.select(this); - - - //------------------------------------------------------------ - // Setup containers and skeleton of chart - - //var wrap = container.selectAll('.nv-wrap.nv-pie').data([data]); - var wrap = container.selectAll('.nv-wrap.nv-pie').data([getValues(data[0])]); - var wrapEnter = wrap.enter().append('g').attr('class','nvd3 nv-wrap nv-pie nv-chart-' + id); - var gEnter = wrapEnter.append('g'); - var g = wrap.select('g'); - - gEnter.append('g').attr('class', 'nv-pie'); - - wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')'); - g.select('.nv-pie').attr('transform', 'translate(' + availableWidth / 2 + ',' + availableHeight / 2 + ')'); - - //------------------------------------------------------------ - - - container - .on('click', function(d,i) { - dispatch.chartClick({ - data: d, - index: i, - pos: d3.event, - id: id - }); - }); - - - var arc = d3.svg.arc() - .outerRadius(arcRadius); - - if (startAngle) arc.startAngle(startAngle) - if (endAngle) arc.endAngle(endAngle); - if (donut) arc.innerRadius(radius * donutRatio); - - // Setup the Pie chart and choose the data element - var pie = d3.layout.pie() - .sort(null) - .value(function(d) { return d.disabled ? 0 : getY(d) }); - - var slices = wrap.select('.nv-pie').selectAll('.nv-slice') - .data(pie); - - slices.exit().remove(); - - var ae = slices.enter().append('g') - .attr('class', 'nv-slice') - .on('mouseover', function(d,i){ - d3.select(this).classed('hover', true); - dispatch.elementMouseover({ - label: getX(d.data), - value: getY(d.data), - point: d.data, - pointIndex: i, - pos: [d3.event.pageX, d3.event.pageY], - id: id - }); - }) - .on('mouseout', function(d,i){ - d3.select(this).classed('hover', false); - dispatch.elementMouseout({ - label: getX(d.data), - value: getY(d.data), - point: d.data, - index: i, - id: id - }); - }) - .on('click', function(d,i) { - dispatch.elementClick({ - label: getX(d.data), - value: getY(d.data), - point: d.data, - index: i, - pos: d3.event, - id: id - }); - d3.event.stopPropagation(); - }) - .on('dblclick', function(d,i) { - dispatch.elementDblClick({ - label: getX(d.data), - value: getY(d.data), - point: d.data, - index: i, - pos: d3.event, - id: id - }); - d3.event.stopPropagation(); - }); - - slices - .attr('fill', function(d,i) { return color(d, i); }) - .attr('stroke', function(d,i) { return color(d, i); }); - - var paths = ae.append('path') - .each(function(d) { this._current = d; }); - //.attr('d', arc); - - d3.transition(slices.select('path')) - .attr('d', arc) - .attrTween('d', arcTween); - - if (showLabels) { - // This does the normal label - var labelsArc = d3.svg.arc().innerRadius(0); - - if (pieLabelsOutside){ labelsArc = arc; } - - if (donutLabelsOutside) { labelsArc = d3.svg.arc().outerRadius(arc.outerRadius()); } - - ae.append("g").classed("nv-label", true) - .each(function(d, i) { - var group = d3.select(this); - - group - .attr('transform', function(d) { - if (labelSunbeamLayout) { - d.outerRadius = arcRadius + 10; // Set Outer Coordinate - d.innerRadius = arcRadius + 15; // Set Inner Coordinate - var rotateAngle = (d.startAngle + d.endAngle) / 2 * (180 / Math.PI); - if ((d.startAngle+d.endAngle)/2 < Math.PI) { - rotateAngle -= 90; - } else { - rotateAngle += 90; - } - return 'translate(' + labelsArc.centroid(d) + ') rotate(' + rotateAngle + ')'; - } else { - d.outerRadius = radius + 10; // Set Outer Coordinate - d.innerRadius = radius + 15; // Set Inner Coordinate - return 'translate(' + labelsArc.centroid(d) + ')' - } - }); - - group.append('rect') - .style('stroke', '#fff') - .style('fill', '#fff') - .attr("rx", 3) - .attr("ry", 3); - - group.append('text') - .style('text-anchor', labelSunbeamLayout ? ((d.startAngle + d.endAngle) / 2 < Math.PI ? 'start' : 'end') : 'middle') //center the text on it's origin or begin/end if orthogonal aligned - .style('fill', '#000') - - - }); - - slices.select(".nv-label").transition() - .attr('transform', function(d) { - if (labelSunbeamLayout) { - d.outerRadius = arcRadius + 10; // Set Outer Coordinate - d.innerRadius = arcRadius + 15; // Set Inner Coordinate - var rotateAngle = (d.startAngle + d.endAngle) / 2 * (180 / Math.PI); - if ((d.startAngle+d.endAngle)/2 < Math.PI) { - rotateAngle -= 90; - } else { - rotateAngle += 90; - } - return 'translate(' + labelsArc.centroid(d) + ') rotate(' + rotateAngle + ')'; - } else { - d.outerRadius = radius + 10; // Set Outer Coordinate - d.innerRadius = radius + 15; // Set Inner Coordinate - return 'translate(' + labelsArc.centroid(d) + ')' - } - }); - - slices.each(function(d, i) { - var slice = d3.select(this); - - slice - .select(".nv-label text") - .style('text-anchor', labelSunbeamLayout ? ((d.startAngle + d.endAngle) / 2 < Math.PI ? 'start' : 'end') : 'middle') //center the text on it's origin or begin/end if orthogonal aligned - .text(function(d, i) { - var percent = (d.endAngle - d.startAngle) / (2 * Math.PI); - return (d.value && percent > labelThreshold) ? getX(d.data) : ''; - }); - - var textBox = slice.select('text').node().getBBox(); - slice.select(".nv-label rect") - .attr("width", textBox.width + 10) - .attr("height", textBox.height + 10) - .attr("transform", function() { - return "translate(" + [textBox.x - 5, textBox.y - 5] + ")"; - }); - }); - } - - - // Computes the angle of an arc, converting from radians to degrees. - function angle(d) { - var a = (d.startAngle + d.endAngle) * 90 / Math.PI - 90; - return a > 90 ? a - 180 : a; - } - - function arcTween(a) { - a.endAngle = isNaN(a.endAngle) ? 0 : a.endAngle; - a.startAngle = isNaN(a.startAngle) ? 0 : a.startAngle; - if (!donut) a.innerRadius = 0; - var i = d3.interpolate(this._current, a); - this._current = i(0); - return function(t) { - return arc(i(t)); - }; - } - - function tweenPie(b) { - b.innerRadius = 0; - var i = d3.interpolate({startAngle: 0, endAngle: 0}, b); - return function(t) { - return arc(i(t)); - }; - } - - }); - - return chart; - } - - - //============================================================ - // Expose Public Variables - //------------------------------------------------------------ - - chart.dispatch = dispatch; - - chart.margin = function(_) { - if (!arguments.length) return margin; - margin.top = typeof _.top != 'undefined' ? _.top : margin.top; - margin.right = typeof _.right != 'undefined' ? _.right : margin.right; - margin.bottom = typeof _.bottom != 'undefined' ? _.bottom : margin.bottom; - margin.left = typeof _.left != 'undefined' ? _.left : margin.left; - return chart; - }; - - chart.width = function(_) { - if (!arguments.length) return width; - width = _; - return chart; - }; - - chart.height = function(_) { - if (!arguments.length) return height; - height = _; - return chart; - }; - - chart.values = function(_) { - if (!arguments.length) return getValues; - getValues = _; - return chart; - }; - - chart.x = function(_) { - if (!arguments.length) return getX; - getX = _; - return chart; - }; - - chart.y = function(_) { - if (!arguments.length) return getY; - getY = d3.functor(_); - return chart; - }; - - chart.description = function(_) { - if (!arguments.length) return getDescription; - getDescription = _; - return chart; - }; - - chart.showLabels = function(_) { - if (!arguments.length) return showLabels; - showLabels = _; - return chart; - }; - - chart.labelSunbeamLayout = function(_) { - if (!arguments.length) return labelSunbeamLayout; - labelSunbeamLayout = _; - return chart; - }; - - chart.donutLabelsOutside = function(_) { - if (!arguments.length) return donutLabelsOutside; - donutLabelsOutside = _; - return chart; - }; - - chart.pieLabelsOutside = function(_) { - if (!arguments.length) return pieLabelsOutside; - pieLabelsOutside = _; - return chart; - }; - - chart.donut = function(_) { - if (!arguments.length) return donut; - donut = _; - return chart; - }; - - chart.donutRatio = function(_) { - if (!arguments.length) return donutRatio; - donutRatio = _; - return chart; - }; - - chart.startAngle = function(_) { - if (!arguments.length) return startAngle; - startAngle = _; - return chart; - }; - - chart.endAngle = function(_) { - if (!arguments.length) return endAngle; - endAngle = _; - return chart; - }; - - chart.id = function(_) { - if (!arguments.length) return id; - id = _; - return chart; - }; - - chart.color = function(_) { - if (!arguments.length) return color; - color = nv.utils.getColor(_); - return chart; - }; - - chart.valueFormat = function(_) { - if (!arguments.length) return valueFormat; - valueFormat = _; - return chart; - }; - - chart.labelThreshold = function(_) { - if (!arguments.length) return labelThreshold; - labelThreshold = _; - return chart; - }; - //============================================================ - - - return chart; -} -nv.models.pieChart = function() { - - //============================================================ - // Public Variables with Default Settings - //------------------------------------------------------------ - - var pie = nv.models.pie() - , legend = nv.models.legend() - ; - - var margin = {top: 30, right: 20, bottom: 20, left: 20} - , width = null - , height = null - , showLegend = true - , color = nv.utils.defaultColor() - , tooltips = true - , tooltip = function(key, y, e, graph) { - return '<h3>' + key + '</h3>' + - '<p>' + y + '</p>' - } - , state = {} - , defaultState = null - , noData = "No Data Available." - , dispatch = d3.dispatch('tooltipShow', 'tooltipHide', 'stateChange', 'changeState') - ; - - //============================================================ - - - //============================================================ - // Private Variables - //------------------------------------------------------------ - - var showTooltip = function(e, offsetElement) { - var tooltipLabel = pie.description()(e.point) || pie.x()(e.point) - var left = e.pos[0] + ( (offsetElement && offsetElement.offsetLeft) || 0 ), - top = e.pos[1] + ( (offsetElement && offsetElement.offsetTop) || 0), - y = pie.valueFormat()(pie.y()(e.point)), - content = tooltip(tooltipLabel, y, e, chart); - - nv.tooltip.show([left, top], content, e.value < 0 ? 'n' : 's', null, offsetElement); - }; - - //============================================================ - - - function chart(selection) { - selection.each(function(data) { - var container = d3.select(this), - that = this; - - var availableWidth = (width || parseInt(container.style('width')) || 960) - - margin.left - margin.right, - availableHeight = (height || parseInt(container.style('height')) || 400) - - margin.top - margin.bottom; - - chart.update = function() { container.transition().call(chart); }; - chart.container = this; - - //set state.disabled - state.disabled = data[0].map(function(d) { return !!d.disabled }); - - if (!defaultState) { - var key; - defaultState = {}; - for (key in state) { - if (state[key] instanceof Array) - defaultState[key] = state[key].slice(0); - else - defaultState[key] = state[key]; - } - } - - //------------------------------------------------------------ - // Display No Data message if there's nothing to show. - - if (!data[0] || !data[0].length) { - var noDataText = container.selectAll('.nv-noData').data([noData]); - - noDataText.enter().append('text') - .attr('class', 'nvd3 nv-noData') - .attr('dy', '-.7em') - .style('text-anchor', 'middle'); - - noDataText - .attr('x', margin.left + availableWidth / 2) - .attr('y', margin.top + availableHeight / 2) - .text(function(d) { return d }); - - return chart; - } else { - container.selectAll('.nv-noData').remove(); - } - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Setup containers and skeleton of chart - - var wrap = container.selectAll('g.nv-wrap.nv-pieChart').data([data]); - var gEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-wrap nv-pieChart').append('g'); - var g = wrap.select('g'); - - gEnter.append('g').attr('class', 'nv-pieWrap'); - gEnter.append('g').attr('class', 'nv-legendWrap'); - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Legend - - if (showLegend) { - legend - .width( availableWidth ) - .key(pie.x()); - - wrap.select('.nv-legendWrap') - .datum(pie.values()(data[0])) - .call(legend); - - if ( margin.top != legend.height()) { - margin.top = legend.height(); - availableHeight = (height || parseInt(container.style('height')) || 400) - - margin.top - margin.bottom; - } - - wrap.select('.nv-legendWrap') - .attr('transform', 'translate(0,' + (-margin.top) +')'); - } - - //------------------------------------------------------------ - - - wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')'); - - - //------------------------------------------------------------ - // Main Chart Component(s) - - pie - .width(availableWidth) - .height(availableHeight); - - - var pieWrap = g.select('.nv-pieWrap') - .datum(data); - - d3.transition(pieWrap).call(pie); - - //------------------------------------------------------------ - - - //============================================================ - // Event Handling/Dispatching (in chart's scope) - //------------------------------------------------------------ - - legend.dispatch.on('legendClick', function(d,i, that) { - d.disabled = !d.disabled; - - if (!pie.values()(data[0]).filter(function(d) { return !d.disabled }).length) { - pie.values()(data[0]).map(function(d) { - d.disabled = false; - wrap.selectAll('.nv-series').classed('disabled', false); - return d; - }); - } - - state.disabled = data[0].map(function(d) { return !!d.disabled }); - dispatch.stateChange(state); - - chart.update(); - }); - - pie.dispatch.on('elementMouseout.tooltip', function(e) { - dispatch.tooltipHide(e); - }); - - // Update chart from a state object passed to event handler - dispatch.on('changeState', function(e) { - - if (typeof e.disabled !== 'undefined') { - data[0].forEach(function(series,i) { - series.disabled = e.disabled[i]; - }); - - state.disabled = e.disabled; - } - - chart.update(); - }); - - //============================================================ - - - }); - - return chart; - } - - //============================================================ - // Event Handling/Dispatching (out of chart's scope) - //------------------------------------------------------------ - - pie.dispatch.on('elementMouseover.tooltip', function(e) { - e.pos = [e.pos[0] + margin.left, e.pos[1] + margin.top]; - dispatch.tooltipShow(e); - }); - - dispatch.on('tooltipShow', function(e) { - if (tooltips) showTooltip(e); - }); - - dispatch.on('tooltipHide', function() { - if (tooltips) nv.tooltip.cleanup(); - }); - - //============================================================ - - - //============================================================ - // Expose Public Variables - //------------------------------------------------------------ - - // expose chart's sub-components - chart.legend = legend; - chart.dispatch = dispatch; - chart.pie = pie; - - d3.rebind(chart, pie, 'valueFormat', 'values', 'x', 'y', 'description', 'id', 'showLabels', 'donutLabelsOutside', 'pieLabelsOutside', 'donut', 'donutRatio', 'labelThreshold'); - - chart.margin = function(_) { - if (!arguments.length) return margin; - margin.top = typeof _.top != 'undefined' ? _.top : margin.top; - margin.right = typeof _.right != 'undefined' ? _.right : margin.right; - margin.bottom = typeof _.bottom != 'undefined' ? _.bottom : margin.bottom; - margin.left = typeof _.left != 'undefined' ? _.left : margin.left; - return chart; - }; - - chart.width = function(_) { - if (!arguments.length) return width; - width = _; - return chart; - }; - - chart.height = function(_) { - if (!arguments.length) return height; - height = _; - return chart; - }; - - chart.color = function(_) { - if (!arguments.length) return color; - color = nv.utils.getColor(_); - legend.color(color); - pie.color(color); - return chart; - }; - - chart.showLegend = function(_) { - if (!arguments.length) return showLegend; - showLegend = _; - return chart; - }; - - chart.tooltips = function(_) { - if (!arguments.length) return tooltips; - tooltips = _; - return chart; - }; - - chart.tooltipContent = function(_) { - if (!arguments.length) return tooltip; - tooltip = _; - return chart; - }; - - chart.state = function(_) { - if (!arguments.length) return state; - state = _; - return chart; - }; - - chart.defaultState = function(_) { - if (!arguments.length) return defaultState; - defaultState = _; - return chart; - }; - - chart.noData = function(_) { - if (!arguments.length) return noData; - noData = _; - return chart; - }; - - //============================================================ - - - return chart; -} - -nv.models.scatter = function() { - - //============================================================ - // Public Variables with Default Settings - //------------------------------------------------------------ - - var margin = {top: 0, right: 0, bottom: 0, left: 0} - , width = 960 - , height = 500 - , color = nv.utils.defaultColor() // chooses color - , id = Math.floor(Math.random() * 100000) //Create semi-unique ID incase user doesn't select one - , x = d3.scale.linear() - , y = d3.scale.linear() - , z = d3.scale.linear() //linear because d3.svg.shape.size is treated as area - , getX = function(d) { return d.x } // accessor to get the x value - , getY = function(d) { return d.y } // accessor to get the y value - , getSize = function(d) { return d.size || 1} // accessor to get the point size - , getShape = function(d) { return d.shape || 'circle' } // accessor to get point shape - , onlyCircles = true // Set to false to use shapes - , forceX = [] // List of numbers to Force into the X scale (ie. 0, or a max / min, etc.) - , forceY = [] // List of numbers to Force into the Y scale - , forceSize = [] // List of numbers to Force into the Size scale - , interactive = true // If true, plots a voronoi overlay for advanced point intersection - , pointKey = null - , pointActive = function(d) { return !d.notActive } // any points that return false will be filtered out - , padData = false // If true, adds half a data points width to front and back, for lining up a line chart with a bar chart - , padDataOuter = .1 //outerPadding to imitate ordinal scale outer padding - , clipEdge = false // if true, masks points within x and y scale - , clipVoronoi = true // if true, masks each point with a circle... can turn off to slightly increase performance - , clipRadius = function() { return 25 } // function to get the radius for voronoi point clips - , xDomain = null // Override x domain (skips the calculation from data) - , yDomain = null // Override y domain - , sizeDomain = null // Override point size domain - , sizeRange = null - , singlePoint = false - , dispatch = d3.dispatch('elementClick', 'elementMouseover', 'elementMouseout') - , useVoronoi = true - ; - - //============================================================ - - - //============================================================ - // Private Variables - //------------------------------------------------------------ - - var x0, y0, z0 // used to store previous scales - , timeoutID - , needsUpdate = false // Flag for when the points are visually updating, but the interactive layer is behind, to disable tooltips - ; - - //============================================================ - - - function chart(selection) { - selection.each(function(data) { - var availableWidth = width - margin.left - margin.right, - availableHeight = height - margin.top - margin.bottom, - container = d3.select(this); - - //add series index to each data point for reference - data = data.map(function(series, i) { - series.values = series.values.map(function(point) { - point.series = i; - return point; - }); - return series; - }); - - //------------------------------------------------------------ - // Setup Scales - - // remap and flatten the data for use in calculating the scales' domains - var seriesData = (xDomain && yDomain && sizeDomain) ? [] : // if we know xDomain and yDomain and sizeDomain, no need to calculate.... if Size is constant remember to set sizeDomain to speed up performance - d3.merge( - data.map(function(d) { - return d.values.map(function(d,i) { - return { x: getX(d,i), y: getY(d,i), size: getSize(d,i) } - }) - }) - ); - - x .domain(xDomain || d3.extent(seriesData.map(function(d) { return d.x; }).concat(forceX))) - - if (padData && data[0]) - x.range([(availableWidth * padDataOuter + availableWidth) / (2 *data[0].values.length), availableWidth - availableWidth * (1 + padDataOuter) / (2 * data[0].values.length) ]); - //x.range([availableWidth * .5 / data[0].values.length, availableWidth * (data[0].values.length - .5) / data[0].values.length ]); - else - x.range([0, availableWidth]); - - y .domain(yDomain || d3.extent(seriesData.map(function(d) { return d.y }).concat(forceY))) - .range([availableHeight, 0]); - - z .domain(sizeDomain || d3.extent(seriesData.map(function(d) { return d.size }).concat(forceSize))) - .range(sizeRange || [16, 256]); - - // If scale's domain don't have a range, slightly adjust to make one... so a chart can show a single data point - if (x.domain()[0] === x.domain()[1] || y.domain()[0] === y.domain()[1]) singlePoint = true; - if (x.domain()[0] === x.domain()[1]) - x.domain()[0] ? - x.domain([x.domain()[0] - x.domain()[0] * 0.01, x.domain()[1] + x.domain()[1] * 0.01]) - : x.domain([-1,1]); - - if (y.domain()[0] === y.domain()[1]) - y.domain()[0] ? - y.domain([y.domain()[0] + y.domain()[0] * 0.01, y.domain()[1] - y.domain()[1] * 0.01]) - : y.domain([-1,1]); - - if ( isNaN(x.domain()[0])) { - x.domain([-1,1]); - } - - if ( isNaN(y.domain()[0])) { - y.domain([-1,1]); - } - - - x0 = x0 || x; - y0 = y0 || y; - z0 = z0 || z; - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Setup containers and skeleton of chart - - var wrap = container.selectAll('g.nv-wrap.nv-scatter').data([data]); - var wrapEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-wrap nv-scatter nv-chart-' + id + (singlePoint ? ' nv-single-point' : '')); - var defsEnter = wrapEnter.append('defs'); - var gEnter = wrapEnter.append('g'); - var g = wrap.select('g'); - - gEnter.append('g').attr('class', 'nv-groups'); - gEnter.append('g').attr('class', 'nv-point-paths'); - - wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')'); - - //------------------------------------------------------------ - - - defsEnter.append('clipPath') - .attr('id', 'nv-edge-clip-' + id) - .append('rect'); - - wrap.select('#nv-edge-clip-' + id + ' rect') - .attr('width', availableWidth) - .attr('height', availableHeight); - - g .attr('clip-path', clipEdge ? 'url(#nv-edge-clip-' + id + ')' : ''); - - - function updateInteractiveLayer() { - - if (!interactive) return false; - - var eventElements; - - var vertices = d3.merge(data.map(function(group, groupIndex) { - return group.values - .map(function(point, pointIndex) { - // *Adding noise to make duplicates very unlikely - // *Injecting series and point index for reference - /* *Adding a 'jitter' to the points, because there's an issue in d3.geom.voronoi. - */ - var pX = getX(point,pointIndex) + Math.random() * 1e-7; - var pY = getY(point,pointIndex) + Math.random() * 1e-7; - - return [x(pX), - y(pY), - groupIndex, - pointIndex, point]; //temp hack to add noise untill I think of a better way so there are no duplicates - }) - .filter(function(pointArray, pointIndex) { - return pointActive(pointArray[4], pointIndex); // Issue #237.. move filter to after map, so pointIndex is correct! - }) - }) - ); - - - - //inject series and point index for reference into voronoi - if (useVoronoi === true) { - - if (clipVoronoi) { - var pointClipsEnter = wrap.select('defs').selectAll('.nv-point-clips') - .data([id]) - .enter(); - - pointClipsEnter.append('clipPath') - .attr('class', 'nv-point-clips') - .attr('id', 'nv-points-clip-' + id); - - var pointClips = wrap.select('#nv-points-clip-' + id).selectAll('circle') - .data(vertices); - pointClips.enter().append('circle') - .attr('r', clipRadius); - pointClips.exit().remove(); - pointClips - .attr('cx', function(d) { return d[0] }) - .attr('cy', function(d) { return d[1] }); - - wrap.select('.nv-point-paths') - .attr('clip-path', 'url(#nv-points-clip-' + id + ')'); - } - - - if(vertices.length) { - // Issue #283 - Adding 2 dummy points to the voronoi b/c voronoi requires min 3 points to work - vertices.push([x.range()[0] - 20, y.range()[0] - 20, null, null]); - vertices.push([x.range()[1] + 20, y.range()[1] + 20, null, null]); - vertices.push([x.range()[0] - 20, y.range()[0] + 20, null, null]); - vertices.push([x.range()[1] + 20, y.range()[1] - 20, null, null]); - } - - var bounds = d3.geom.polygon([ - [-10,-10], - [-10,height + 10], - [width + 10,height + 10], - [width + 10,-10] - ]); - - var voronoi = d3.geom.voronoi(vertices).map(function(d, i) { - return { - 'data': bounds.clip(d), - 'series': vertices[i][2], - 'point': vertices[i][3] - } - }); - - - var pointPaths = wrap.select('.nv-point-paths').selectAll('path') - .data(voronoi); - pointPaths.enter().append('path') - .attr('class', function(d,i) { return 'nv-path-'+i; }); - pointPaths.exit().remove(); - pointPaths - .attr('d', function(d) { - if (d.data.length === 0) - return 'M 0 0' - else - return 'M' + d.data.join('L') + 'Z'; - }); - - pointPaths - .on('click', function(d) { - if (needsUpdate) return 0; - var series = data[d.series], - point = series.values[d.point]; - - dispatch.elementClick({ - point: point, - series: series, - pos: [x(getX(point, d.point)) + margin.left, y(getY(point, d.point)) + margin.top], - seriesIndex: d.series, - pointIndex: d.point - }); - }) - .on('mouseover', function(d) { - if (needsUpdate) return 0; - var series = data[d.series], - point = series.values[d.point]; - - dispatch.elementMouseover({ - point: point, - series: series, - pos: [x(getX(point, d.point)) + margin.left, y(getY(point, d.point)) + margin.top], - seriesIndex: d.series, - pointIndex: d.point - }); - }) - .on('mouseout', function(d, i) { - if (needsUpdate) return 0; - var series = data[d.series], - point = series.values[d.point]; - - dispatch.elementMouseout({ - point: point, - series: series, - seriesIndex: d.series, - pointIndex: d.point - }); - }); - - - } else { - /* - // bring data in form needed for click handlers - var dataWithPoints = vertices.map(function(d, i) { - return { - 'data': d, - 'series': vertices[i][2], - 'point': vertices[i][3] - } - }); - */ - - // add event handlers to points instead voronoi paths - wrap.select('.nv-groups').selectAll('.nv-group') - .selectAll('.nv-point') - //.data(dataWithPoints) - //.style('pointer-events', 'auto') // recativate events, disabled by css - .on('click', function(d,i) { - //nv.log('test', d, i); - if (needsUpdate || !data[d.series]) return 0; //check if this is a dummy point - var series = data[d.series], - point = series.values[i]; - - dispatch.elementClick({ - point: point, - series: series, - pos: [x(getX(point, i)) + margin.left, y(getY(point, i)) + margin.top], - seriesIndex: d.series, - pointIndex: i - }); - }) - .on('mouseover', function(d,i) { - if (needsUpdate || !data[d.series]) return 0; //check if this is a dummy point - var series = data[d.series], - point = series.values[i]; - - dispatch.elementMouseover({ - point: point, - series: series, - pos: [x(getX(point, i)) + margin.left, y(getY(point, i)) + margin.top], - seriesIndex: d.series, - pointIndex: i - }); - }) - .on('mouseout', function(d,i) { - if (needsUpdate || !data[d.series]) return 0; //check if this is a dummy point - var series = data[d.series], - point = series.values[i]; - - dispatch.elementMouseout({ - point: point, - series: series, - seriesIndex: d.series, - pointIndex: i - }); - }); - } - - needsUpdate = false; - } - - needsUpdate = true; - - var groups = wrap.select('.nv-groups').selectAll('.nv-group') - .data(function(d) { return d }, function(d) { return d.key }); - groups.enter().append('g') - .style('stroke-opacity', 1e-6) - .style('fill-opacity', 1e-6); - d3.transition(groups.exit()) - .style('stroke-opacity', 1e-6) - .style('fill-opacity', 1e-6) - .remove(); - groups - .attr('class', function(d,i) { return 'nv-group nv-series-' + i }) - .classed('hover', function(d) { return d.hover }); - d3.transition(groups) - .style('fill', function(d,i) { return color(d, i) }) - .style('stroke', function(d,i) { return color(d, i) }) - .style('stroke-opacity', 1) - .style('fill-opacity', .5); - - - if (onlyCircles) { - - var points = groups.selectAll('circle.nv-point') - .data(function(d) { return d.values }, pointKey); - points.enter().append('circle') - .attr('cx', function(d,i) { return x0(getX(d,i)) }) - .attr('cy', function(d,i) { return y0(getY(d,i)) }) - .attr('r', function(d,i) { return Math.sqrt(z(getSize(d,i))/Math.PI) }); - points.exit().remove(); - groups.exit().selectAll('path.nv-point').transition() - .attr('cx', function(d,i) { return x(getX(d,i)) }) - .attr('cy', function(d,i) { return y(getY(d,i)) }) - .remove(); - points.each(function(d,i) { - d3.select(this) - .classed('nv-point', true) - .classed('nv-point-' + i, true); - }); - points.transition() - .attr('cx', function(d,i) { return x(getX(d,i)) }) - .attr('cy', function(d,i) { return y(getY(d,i)) }) - .attr('r', function(d,i) { return Math.sqrt(z(getSize(d,i))/Math.PI) }); - - } else { - - var points = groups.selectAll('path.nv-point') - .data(function(d) { return d.values }); - points.enter().append('path') - .attr('transform', function(d,i) { - return 'translate(' + x0(getX(d,i)) + ',' + y0(getY(d,i)) + ')' - }) - .attr('d', - d3.svg.symbol() - .type(getShape) - .size(function(d,i) { return z(getSize(d,i)) }) - ); - points.exit().remove(); - d3.transition(groups.exit().selectAll('path.nv-point')) - .attr('transform', function(d,i) { - return 'translate(' + x(getX(d,i)) + ',' + y(getY(d,i)) + ')' - }) - .remove(); - points.each(function(d,i) { - d3.select(this) - .classed('nv-point', true) - .classed('nv-point-' + i, true); - }); - points.transition() - .attr('transform', function(d,i) { - //nv.log(d,i,getX(d,i), x(getX(d,i))); - return 'translate(' + x(getX(d,i)) + ',' + y(getY(d,i)) + ')' - }) - .attr('d', - d3.svg.symbol() - .type(getShape) - .size(function(d,i) { return z(getSize(d,i)) }) - ); - } - - - // Delay updating the invisible interactive layer for smoother animation - clearTimeout(timeoutID); // stop repeat calls to updateInteractiveLayer - timeoutID = setTimeout(updateInteractiveLayer, 300); - //updateInteractiveLayer(); - - //store old scales for use in transitions on update - x0 = x.copy(); - y0 = y.copy(); - z0 = z.copy(); - - }); - - return chart; - } - - - //============================================================ - // Event Handling/Dispatching (out of chart's scope) - //------------------------------------------------------------ - - dispatch.on('elementMouseover.point', function(d) { - if (interactive) - d3.select('.nv-chart-' + id + ' .nv-series-' + d.seriesIndex + ' .nv-point-' + d.pointIndex) - .classed('hover', true); - }); - - dispatch.on('elementMouseout.point', function(d) { - if (interactive) - d3.select('.nv-chart-' + id + ' .nv-series-' + d.seriesIndex + ' .nv-point-' + d.pointIndex) - .classed('hover', false); - }); - - //============================================================ - - - //============================================================ - // Expose Public Variables - //------------------------------------------------------------ - - chart.dispatch = dispatch; - - chart.x = function(_) { - if (!arguments.length) return getX; - getX = d3.functor(_); - return chart; - }; - - chart.y = function(_) { - if (!arguments.length) return getY; - getY = d3.functor(_); - return chart; - }; - - chart.size = function(_) { - if (!arguments.length) return getSize; - getSize = d3.functor(_); - return chart; - }; - - chart.margin = function(_) { - if (!arguments.length) return margin; - margin.top = typeof _.top != 'undefined' ? _.top : margin.top; - margin.right = typeof _.right != 'undefined' ? _.right : margin.right; - margin.bottom = typeof _.bottom != 'undefined' ? _.bottom : margin.bottom; - margin.left = typeof _.left != 'undefined' ? _.left : margin.left; - return chart; - }; - - chart.width = function(_) { - if (!arguments.length) return width; - width = _; - return chart; - }; - - chart.height = function(_) { - if (!arguments.length) return height; - height = _; - return chart; - }; - - chart.xScale = function(_) { - if (!arguments.length) return x; - x = _; - return chart; - }; - - chart.yScale = function(_) { - if (!arguments.length) return y; - y = _; - return chart; - }; - - chart.zScale = function(_) { - if (!arguments.length) return z; - z = _; - return chart; - }; - - chart.xDomain = function(_) { - if (!arguments.length) return xDomain; - xDomain = _; - return chart; - }; - - chart.yDomain = function(_) { - if (!arguments.length) return yDomain; - yDomain = _; - return chart; - }; - - chart.sizeDomain = function(_) { - if (!arguments.length) return sizeDomain; - sizeDomain = _; - return chart; - }; - - chart.sizeRange = function(_) { - if (!arguments.length) return sizeRange; - sizeRange = _; - return chart; - }; - - chart.forceX = function(_) { - if (!arguments.length) return forceX; - forceX = _; - return chart; - }; - - chart.forceY = function(_) { - if (!arguments.length) return forceY; - forceY = _; - return chart; - }; - - chart.forceSize = function(_) { - if (!arguments.length) return forceSize; - forceSize = _; - return chart; - }; - - chart.interactive = function(_) { - if (!arguments.length) return interactive; - interactive = _; - return chart; - }; - - chart.pointKey = function(_) { - if (!arguments.length) return pointKey; - pointKey = _; - return chart; - }; - - chart.pointActive = function(_) { - if (!arguments.length) return pointActive; - pointActive = _; - return chart; - }; - - chart.padData = function(_) { - if (!arguments.length) return padData; - padData = _; - return chart; - }; - - chart.padDataOuter = function(_) { - if (!arguments.length) return padDataOuter; - padDataOuter = _; - return chart; - }; - - chart.clipEdge = function(_) { - if (!arguments.length) return clipEdge; - clipEdge = _; - return chart; - }; - - chart.clipVoronoi= function(_) { - if (!arguments.length) return clipVoronoi; - clipVoronoi = _; - return chart; - }; - - chart.useVoronoi= function(_) { - if (!arguments.length) return useVoronoi; - useVoronoi = _; - if (useVoronoi === false) { - clipVoronoi = false; - } - return chart; - }; - - chart.clipRadius = function(_) { - if (!arguments.length) return clipRadius; - clipRadius = _; - return chart; - }; - - chart.color = function(_) { - if (!arguments.length) return color; - color = nv.utils.getColor(_); - return chart; - }; - - chart.shape = function(_) { - if (!arguments.length) return getShape; - getShape = _; - return chart; - }; - - chart.onlyCircles = function(_) { - if (!arguments.length) return onlyCircles; - onlyCircles = _; - return chart; - }; - - chart.id = function(_) { - if (!arguments.length) return id; - id = _; - return chart; - }; - - chart.singlePoint = function(_) { - if (!arguments.length) return singlePoint; - singlePoint = _; - return chart; - }; - - //============================================================ - - - return chart; -} - -nv.models.scatterChart = function() { - - //============================================================ - // Public Variables with Default Settings - //------------------------------------------------------------ - - var scatter = nv.models.scatter() - , xAxis = nv.models.axis() - , yAxis = nv.models.axis() - , legend = nv.models.legend() - , controls = nv.models.legend() - , distX = nv.models.distribution() - , distY = nv.models.distribution() - ; - - var margin = {top: 30, right: 20, bottom: 50, left: 75} - , width = null - , height = null - , color = nv.utils.defaultColor() - , x = d3.fisheye ? d3.fisheye.scale(d3.scale.linear).distortion(0) : scatter.xScale() - , y = d3.fisheye ? d3.fisheye.scale(d3.scale.linear).distortion(0) : scatter.yScale() - , xPadding = 0 - , yPadding = 0 - , showDistX = false - , showDistY = false - , showLegend = true - , showControls = !!d3.fisheye - , fisheye = 0 - , pauseFisheye = false - , tooltips = true - , tooltipX = function(key, x, y) { return '<strong>' + x + '</strong>' } - , tooltipY = function(key, x, y) { return '<strong>' + y + '</strong>' } - //, tooltip = function(key, x, y) { return '<h3>' + key + '</h3>' } - , tooltip = null - , state = {} - , defaultState = null - , dispatch = d3.dispatch('tooltipShow', 'tooltipHide', 'stateChange', 'changeState') - , noData = "No Data Available." - ; - - scatter - .xScale(x) - .yScale(y) - ; - xAxis - .orient('bottom') - .tickPadding(10) - ; - yAxis - .orient('left') - .tickPadding(10) - ; - distX - .axis('x') - ; - distY - .axis('y') - ; - - //============================================================ - - - //============================================================ - // Private Variables - //------------------------------------------------------------ - - var x0, y0; - - var showTooltip = function(e, offsetElement) { - //TODO: make tooltip style an option between single or dual on axes (maybe on all charts with axes?) - - var left = e.pos[0] + ( offsetElement.offsetLeft || 0 ), - top = e.pos[1] + ( offsetElement.offsetTop || 0), - leftX = e.pos[0] + ( offsetElement.offsetLeft || 0 ), - topX = y.range()[0] + margin.top + ( offsetElement.offsetTop || 0), - leftY = x.range()[0] + margin.left + ( offsetElement.offsetLeft || 0 ), - topY = e.pos[1] + ( offsetElement.offsetTop || 0), - xVal = xAxis.tickFormat()(scatter.x()(e.point, e.pointIndex)), - yVal = yAxis.tickFormat()(scatter.y()(e.point, e.pointIndex)); - - if( tooltipX != null ) - nv.tooltip.show([leftX, topX], tooltipX(e.series.key, xVal, yVal, e, chart), 'n', 1, offsetElement, 'x-nvtooltip'); - if( tooltipY != null ) - nv.tooltip.show([leftY, topY], tooltipY(e.series.key, xVal, yVal, e, chart), 'e', 1, offsetElement, 'y-nvtooltip'); - if( tooltip != null ) - nv.tooltip.show([left, top], tooltip(e.series.key, xVal, yVal, e, chart), e.value < 0 ? 'n' : 's', null, offsetElement); - }; - - var controlsData = [ - { key: 'Magnify', disabled: true } - ]; - - //============================================================ - - - function chart(selection) { - selection.each(function(data) { - var container = d3.select(this), - that = this; - - var availableWidth = (width || parseInt(container.style('width')) || 960) - - margin.left - margin.right, - availableHeight = (height || parseInt(container.style('height')) || 400) - - margin.top - margin.bottom; - - chart.update = function() { container.transition().call(chart); }; - // chart.container = this; - - //set state.disabled - state.disabled = data.map(function(d) { return !!d.disabled }); - - if (!defaultState) { - var key; - defaultState = {}; - for (key in state) { - if (state[key] instanceof Array) - defaultState[key] = state[key].slice(0); - else - defaultState[key] = state[key]; - } - } - - //------------------------------------------------------------ - // Display noData message if there's nothing to show. - - if (!data || !data.length || !data.filter(function(d) { return d.values.length }).length) { - var noDataText = container.selectAll('.nv-noData').data([noData]); - - noDataText.enter().append('text') - .attr('class', 'nvd3 nv-noData') - .attr('dy', '-.7em') - .style('text-anchor', 'middle'); - - noDataText - .attr('x', margin.left + availableWidth / 2) - .attr('y', margin.top + availableHeight / 2) - .text(function(d) { return d }); - - return chart; - } else { - container.selectAll('.nv-noData').remove(); - } - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Setup Scales - - x0 = x0 || x; - y0 = y0 || y; - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Setup containers and skeleton of chart - - var wrap = container.selectAll('g.nv-wrap.nv-scatterChart').data([data]); - var wrapEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-wrap nv-scatterChart nv-chart-' + scatter.id()); - var gEnter = wrapEnter.append('g'); - var g = wrap.select('g'); - - // background for pointer events - gEnter.append('rect').attr('class', 'nvd3 nv-background'); - - gEnter.append('g').attr('class', 'nv-x nv-axis'); - gEnter.append('g').attr('class', 'nv-y nv-axis'); - gEnter.append('g').attr('class', 'nv-scatterWrap'); - gEnter.append('g').attr('class', 'nv-distWrap'); - gEnter.append('g').attr('class', 'nv-legendWrap'); - gEnter.append('g').attr('class', 'nv-controlsWrap'); - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Legend - - if (showLegend) { - legend.width( availableWidth / 2 ); - - wrap.select('.nv-legendWrap') - .datum(data) - .call(legend); - - if ( margin.top != legend.height()) { - margin.top = legend.height(); - availableHeight = (height || parseInt(container.style('height')) || 400) - - margin.top - margin.bottom; - } - - wrap.select('.nv-legendWrap') - .attr('transform', 'translate(' + (availableWidth / 2) + ',' + (-margin.top) +')'); - } - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Controls - - if (showControls) { - controls.width(180).color(['#444']); - g.select('.nv-controlsWrap') - .datum(controlsData) - .attr('transform', 'translate(0,' + (-margin.top) +')') - .call(controls); - } - - //------------------------------------------------------------ - - - wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')'); - - - //------------------------------------------------------------ - // Main Chart Component(s) - - scatter - .width(availableWidth) - .height(availableHeight) - .color(data.map(function(d,i) { - return d.color || color(d, i); - }).filter(function(d,i) { return !data[i].disabled })) - .xDomain(null) - .yDomain(null) - - wrap.select('.nv-scatterWrap') - .datum(data.filter(function(d) { return !d.disabled })) - .call(scatter); - - - //Adjust for x and y padding - if (xPadding) { - var xRange = x.domain()[1] - x.domain()[0]; - scatter.xDomain([x.domain()[0] - (xPadding * xRange), x.domain()[1] + (xPadding * xRange)]); - } - - if (yPadding) { - var yRange = y.domain()[1] - y.domain()[0]; - scatter.yDomain([y.domain()[0] - (yPadding * yRange), y.domain()[1] + (yPadding * yRange)]); - } - - wrap.select('.nv-scatterWrap') - .datum(data.filter(function(d) { return !d.disabled })) - .call(scatter); - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Setup Axes - - xAxis - .scale(x) - .ticks( xAxis.ticks() && xAxis.ticks().length ? xAxis.ticks() : availableWidth / 100 ) - .tickSize( -availableHeight , 0); - - g.select('.nv-x.nv-axis') - .attr('transform', 'translate(0,' + y.range()[0] + ')') - .call(xAxis); - - - yAxis - .scale(y) - .ticks( yAxis.ticks() && yAxis.ticks().length ? yAxis.ticks() : availableHeight / 36 ) - .tickSize( -availableWidth, 0); - - g.select('.nv-y.nv-axis') - .call(yAxis); - - - if (showDistX) { - distX - .getData(scatter.x()) - .scale(x) - .width(availableWidth) - .color(data.map(function(d,i) { - return d.color || color(d, i); - }).filter(function(d,i) { return !data[i].disabled })); - gEnter.select('.nv-distWrap').append('g') - .attr('class', 'nv-distributionX'); - g.select('.nv-distributionX') - .attr('transform', 'translate(0,' + y.range()[0] + ')') - .datum(data.filter(function(d) { return !d.disabled })) - .call(distX); - } - - if (showDistY) { - distY - .getData(scatter.y()) - .scale(y) - .width(availableHeight) - .color(data.map(function(d,i) { - return d.color || color(d, i); - }).filter(function(d,i) { return !data[i].disabled })); - gEnter.select('.nv-distWrap').append('g') - .attr('class', 'nv-distributionY'); - g.select('.nv-distributionY') - .attr('transform', 'translate(-' + distY.size() + ',0)') - .datum(data.filter(function(d) { return !d.disabled })) - .call(distY); - } - - //------------------------------------------------------------ - - - - - if (d3.fisheye) { - g.select('.nv-background') - .attr('width', availableWidth) - .attr('height', availableHeight); - - g.select('.nv-background').on('mousemove', updateFisheye); - g.select('.nv-background').on('click', function() { pauseFisheye = !pauseFisheye;}); - scatter.dispatch.on('elementClick.freezeFisheye', function() { - pauseFisheye = !pauseFisheye; - }); - } - - - function updateFisheye() { - if (pauseFisheye) { - g.select('.nv-point-paths').style('pointer-events', 'all'); - return false; - } - - g.select('.nv-point-paths').style('pointer-events', 'none' ); - - var mouse = d3.mouse(this); - x.distortion(fisheye).focus(mouse[0]); - y.distortion(fisheye).focus(mouse[1]); - - g.select('.nv-scatterWrap') - .call(scatter); - - g.select('.nv-x.nv-axis').call(xAxis); - g.select('.nv-y.nv-axis').call(yAxis); - g.select('.nv-distributionX') - .datum(data.filter(function(d) { return !d.disabled })) - .call(distX); - g.select('.nv-distributionY') - .datum(data.filter(function(d) { return !d.disabled })) - .call(distY); - } - - - - //============================================================ - // Event Handling/Dispatching (in chart's scope) - //------------------------------------------------------------ - - controls.dispatch.on('legendClick', function(d,i) { - d.disabled = !d.disabled; - - fisheye = d.disabled ? 0 : 2.5; - g.select('.nv-background') .style('pointer-events', d.disabled ? 'none' : 'all'); - g.select('.nv-point-paths').style('pointer-events', d.disabled ? 'all' : 'none' ); - - if (d.disabled) { - x.distortion(fisheye).focus(0); - y.distortion(fisheye).focus(0); - - g.select('.nv-scatterWrap').call(scatter); - g.select('.nv-x.nv-axis').call(xAxis); - g.select('.nv-y.nv-axis').call(yAxis); - } else { - pauseFisheye = false; - } - - chart.update(); - }); - - legend.dispatch.on('legendClick', function(d,i, that) { - d.disabled = !d.disabled; - - if (!data.filter(function(d) { return !d.disabled }).length) { - data.map(function(d) { - d.disabled = false; - wrap.selectAll('.nv-series').classed('disabled', false); - return d; - }); - } - - state.disabled = data.map(function(d) { return !!d.disabled }); - dispatch.stateChange(state); - - chart.update(); - }); - - legend.dispatch.on('legendDblclick', function(d) { - //Double clicking should always enable current series, and disabled all others. - data.forEach(function(d) { - d.disabled = true; - }); - d.disabled = false; - - state.disabled = data.map(function(d) { return !!d.disabled }); - dispatch.stateChange(state); - chart.update(); - }); - - - /* - legend.dispatch.on('legendMouseover', function(d, i) { - d.hover = true; - chart(selection); - }); - - legend.dispatch.on('legendMouseout', function(d, i) { - d.hover = false; - chart(selection); - }); - */ - - scatter.dispatch.on('elementMouseover.tooltip', function(e) { - d3.select('.nv-chart-' + scatter.id() + ' .nv-series-' + e.seriesIndex + ' .nv-distx-' + e.pointIndex) - .attr('y1', function(d,i) { return e.pos[1] - availableHeight;}); - d3.select('.nv-chart-' + scatter.id() + ' .nv-series-' + e.seriesIndex + ' .nv-disty-' + e.pointIndex) - .attr('x2', e.pos[0] + distX.size()); - - e.pos = [e.pos[0] + margin.left, e.pos[1] + margin.top]; - dispatch.tooltipShow(e); - }); - - dispatch.on('tooltipShow', function(e) { - if (tooltips) showTooltip(e, that.parentNode); - }); - - // Update chart from a state object passed to event handler - dispatch.on('changeState', function(e) { - - if (typeof e.disabled !== 'undefined') { - data.forEach(function(series,i) { - series.disabled = e.disabled[i]; - }); - - state.disabled = e.disabled; - } - - chart.update(); - }); - - //============================================================ - - - //store old scales for use in transitions on update - x0 = x.copy(); - y0 = y.copy(); - - - }); - - return chart; - } - - - //============================================================ - // Event Handling/Dispatching (out of chart's scope) - //------------------------------------------------------------ - - scatter.dispatch.on('elementMouseout.tooltip', function(e) { - dispatch.tooltipHide(e); - - d3.select('.nv-chart-' + scatter.id() + ' .nv-series-' + e.seriesIndex + ' .nv-distx-' + e.pointIndex) - .attr('y1', 0); - d3.select('.nv-chart-' + scatter.id() + ' .nv-series-' + e.seriesIndex + ' .nv-disty-' + e.pointIndex) - .attr('x2', distY.size()); - }); - dispatch.on('tooltipHide', function() { - if (tooltips) nv.tooltip.cleanup(); - }); - - //============================================================ - - - //============================================================ - // Expose Public Variables - //------------------------------------------------------------ - - // expose chart's sub-components - chart.dispatch = dispatch; - chart.scatter = scatter; - chart.legend = legend; - chart.controls = controls; - chart.xAxis = xAxis; - chart.yAxis = yAxis; - chart.distX = distX; - chart.distY = distY; - - d3.rebind(chart, scatter, 'id', 'interactive', 'pointActive', 'x', 'y', 'shape', 'size', 'xScale', 'yScale', 'zScale', 'xDomain', 'yDomain', 'sizeDomain', 'sizeRange', 'forceX', 'forceY', 'forceSize', 'clipVoronoi', 'clipRadius', 'useVoronoi'); - - chart.margin = function(_) { - if (!arguments.length) return margin; - margin.top = typeof _.top != 'undefined' ? _.top : margin.top; - margin.right = typeof _.right != 'undefined' ? _.right : margin.right; - margin.bottom = typeof _.bottom != 'undefined' ? _.bottom : margin.bottom; - margin.left = typeof _.left != 'undefined' ? _.left : margin.left; - return chart; - }; - - chart.width = function(_) { - if (!arguments.length) return width; - width = _; - return chart; - }; - - chart.height = function(_) { - if (!arguments.length) return height; - height = _; - return chart; - }; - - chart.color = function(_) { - if (!arguments.length) return color; - color = nv.utils.getColor(_); - legend.color(color); - distX.color(color); - distY.color(color); - return chart; - }; - - chart.showDistX = function(_) { - if (!arguments.length) return showDistX; - showDistX = _; - return chart; - }; - - chart.showDistY = function(_) { - if (!arguments.length) return showDistY; - showDistY = _; - return chart; - }; - - chart.showControls = function(_) { - if (!arguments.length) return showControls; - showControls = _; - return chart; - }; - - chart.showLegend = function(_) { - if (!arguments.length) return showLegend; - showLegend = _; - return chart; - }; - - chart.fisheye = function(_) { - if (!arguments.length) return fisheye; - fisheye = _; - return chart; - }; - - chart.xPadding = function(_) { - if (!arguments.length) return xPadding; - xPadding = _; - return chart; - }; - - chart.yPadding = function(_) { - if (!arguments.length) return yPadding; - yPadding = _; - return chart; - }; - - chart.tooltips = function(_) { - if (!arguments.length) return tooltips; - tooltips = _; - return chart; - }; - - chart.tooltipContent = function(_) { - if (!arguments.length) return tooltip; - tooltip = _; - return chart; - }; - - chart.tooltipXContent = function(_) { - if (!arguments.length) return tooltipX; - tooltipX = _; - return chart; - }; - - chart.tooltipYContent = function(_) { - if (!arguments.length) return tooltipY; - tooltipY = _; - return chart; - }; - - chart.state = function(_) { - if (!arguments.length) return state; - state = _; - return chart; - }; - - chart.defaultState = function(_) { - if (!arguments.length) return defaultState; - defaultState = _; - return chart; - }; - - chart.noData = function(_) { - if (!arguments.length) return noData; - noData = _; - return chart; - }; - - //============================================================ - - - return chart; -} - -nv.models.scatterPlusLineChart = function() { - - //============================================================ - // Public Variables with Default Settings - //------------------------------------------------------------ - - var scatter = nv.models.scatter() - , xAxis = nv.models.axis() - , yAxis = nv.models.axis() - , legend = nv.models.legend() - , controls = nv.models.legend() - , distX = nv.models.distribution() - , distY = nv.models.distribution() - ; - - var margin = {top: 30, right: 20, bottom: 50, left: 75} - , width = null - , height = null - , color = nv.utils.defaultColor() - , x = d3.fisheye ? d3.fisheye.scale(d3.scale.linear).distortion(0) : scatter.xScale() - , y = d3.fisheye ? d3.fisheye.scale(d3.scale.linear).distortion(0) : scatter.yScale() - , showDistX = false - , showDistY = false - , showLegend = true - , showControls = !!d3.fisheye - , fisheye = 0 - , pauseFisheye = false - , tooltips = true - , tooltipX = function(key, x, y) { return '<strong>' + x + '</strong>' } - , tooltipY = function(key, x, y) { return '<strong>' + y + '</strong>' } - , tooltip = function(key, x, y, date) { return '<h3>' + key + '</h3>' - + '<p>' + date + '</p>' } - //, tooltip = null - , state = {} - , defaultState = null - , dispatch = d3.dispatch('tooltipShow', 'tooltipHide', 'stateChange', 'changeState') - , noData = "No Data Available." - ; - - scatter - .xScale(x) - .yScale(y) - ; - xAxis - .orient('bottom') - .tickPadding(10) - ; - yAxis - .orient('left') - .tickPadding(10) - ; - distX - .axis('x') - ; - distY - .axis('y') - ; - - //============================================================ - - - //============================================================ - // Private Variables - //------------------------------------------------------------ - - var x0, y0; - - var showTooltip = function(e, offsetElement) { - //TODO: make tooltip style an option between single or dual on axes (maybe on all charts with axes?) - - var left = e.pos[0] + ( offsetElement.offsetLeft || 0 ), - top = e.pos[1] + ( offsetElement.offsetTop || 0), - leftX = e.pos[0] + ( offsetElement.offsetLeft || 0 ), - topX = y.range()[0] + margin.top + ( offsetElement.offsetTop || 0), - leftY = x.range()[0] + margin.left + ( offsetElement.offsetLeft || 0 ), - topY = e.pos[1] + ( offsetElement.offsetTop || 0), - xVal = xAxis.tickFormat()(scatter.x()(e.point, e.pointIndex)), - yVal = yAxis.tickFormat()(scatter.y()(e.point, e.pointIndex)); - - if( tooltipX != null ) - nv.tooltip.show([leftX, topX], tooltipX(e.series.key, xVal, yVal, e, chart), 'n', 1, offsetElement, 'x-nvtooltip'); - if( tooltipY != null ) - nv.tooltip.show([leftY, topY], tooltipY(e.series.key, xVal, yVal, e, chart), 'e', 1, offsetElement, 'y-nvtooltip'); - if( tooltip != null ) - nv.tooltip.show([left, top], tooltip(e.series.key, xVal, yVal, e.point.tooltip, e, chart), e.value < 0 ? 'n' : 's', null, offsetElement); - }; - - var controlsData = [ - { key: 'Magnify', disabled: true } - ]; - - //============================================================ - - - function chart(selection) { - selection.each(function(data) { - var container = d3.select(this), - that = this; - - var availableWidth = (width || parseInt(container.style('width')) || 960) - - margin.left - margin.right, - availableHeight = (height || parseInt(container.style('height')) || 400) - - margin.top - margin.bottom; - - chart.update = function() { container.transition().call(chart); }; - chart.container = this; - - //set state.disabled - state.disabled = data.map(function(d) { return !!d.disabled }); - - if (!defaultState) { - var key; - defaultState = {}; - for (key in state) { - if (state[key] instanceof Array) - defaultState[key] = state[key].slice(0); - else - defaultState[key] = state[key]; - } - } - - //------------------------------------------------------------ - // Display noData message if there's nothing to show. - - if (!data || !data.length || !data.filter(function(d) { return d.values.length }).length) { - var noDataText = container.selectAll('.nv-noData').data([noData]); - - noDataText.enter().append('text') - .attr('class', 'nvd3 nv-noData') - .attr('dy', '-.7em') - .style('text-anchor', 'middle'); - - noDataText - .attr('x', margin.left + availableWidth / 2) - .attr('y', margin.top + availableHeight / 2) - .text(function(d) { return d }); - - return chart; - } else { - container.selectAll('.nv-noData').remove(); - } - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Setup Scales - - x = scatter.xScale(); - y = scatter.yScale(); - - x0 = x0 || x; - y0 = y0 || y; - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Setup containers and skeleton of chart - - var wrap = container.selectAll('g.nv-wrap.nv-scatterChart').data([data]); - var wrapEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-wrap nv-scatterChart nv-chart-' + scatter.id()); - var gEnter = wrapEnter.append('g'); - var g = wrap.select('g') - - // background for pointer events - gEnter.append('rect').attr('class', 'nvd3 nv-background') - - gEnter.append('g').attr('class', 'nv-x nv-axis'); - gEnter.append('g').attr('class', 'nv-y nv-axis'); - gEnter.append('g').attr('class', 'nv-scatterWrap'); - gEnter.append('g').attr('class', 'nv-regressionLinesWrap'); - gEnter.append('g').attr('class', 'nv-distWrap'); - gEnter.append('g').attr('class', 'nv-legendWrap'); - gEnter.append('g').attr('class', 'nv-controlsWrap'); - - wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')'); - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Legend - - if (showLegend) { - legend.width( availableWidth / 2 ); - - wrap.select('.nv-legendWrap') - .datum(data) - .call(legend); - - if ( margin.top != legend.height()) { - margin.top = legend.height(); - availableHeight = (height || parseInt(container.style('height')) || 400) - - margin.top - margin.bottom; - } - - wrap.select('.nv-legendWrap') - .attr('transform', 'translate(' + (availableWidth / 2) + ',' + (-margin.top) +')'); - } - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Controls - - if (showControls) { - controls.width(180).color(['#444']); - g.select('.nv-controlsWrap') - .datum(controlsData) - .attr('transform', 'translate(0,' + (-margin.top) +')') - .call(controls); - } - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Main Chart Component(s) - - scatter - .width(availableWidth) - .height(availableHeight) - .color(data.map(function(d,i) { - return d.color || color(d, i); - }).filter(function(d,i) { return !data[i].disabled })) - - wrap.select('.nv-scatterWrap') - .datum(data.filter(function(d) { return !d.disabled })) - .call(scatter); - - - wrap.select('.nv-regressionLinesWrap') - .attr('clip-path', 'url(#nv-edge-clip-' + scatter.id() + ')'); - - var regWrap = wrap.select('.nv-regressionLinesWrap').selectAll('.nv-regLines') - .data(function(d) { return d }); - - var reglines = regWrap.enter() - .append('g').attr('class', 'nv-regLines') - .append('line').attr('class', 'nv-regLine') - .style('stroke-opacity', 0); - - //d3.transition(regWrap.selectAll('.nv-regLines line')) - regWrap.selectAll('.nv-regLines line') - .attr('x1', x.range()[0]) - .attr('x2', x.range()[1]) - .attr('y1', function(d,i) { return y(x.domain()[0] * d.slope + d.intercept) }) - .attr('y2', function(d,i) { return y(x.domain()[1] * d.slope + d.intercept) }) - .style('stroke', function(d,i,j) { return color(d,j) }) - .style('stroke-opacity', function(d,i) { - return (d.disabled || typeof d.slope === 'undefined' || typeof d.intercept === 'undefined') ? 0 : 1 - }); - - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Setup Axes - - xAxis - .scale(x) - .ticks( xAxis.ticks() ? xAxis.ticks() : availableWidth / 100 ) - .tickSize( -availableHeight , 0); - - g.select('.nv-x.nv-axis') - .attr('transform', 'translate(0,' + y.range()[0] + ')') - .call(xAxis); - - - yAxis - .scale(y) - .ticks( yAxis.ticks() ? yAxis.ticks() : availableHeight / 36 ) - .tickSize( -availableWidth, 0); - - g.select('.nv-y.nv-axis') - .call(yAxis); - - - if (showDistX) { - distX - .getData(scatter.x()) - .scale(x) - .width(availableWidth) - .color(data.map(function(d,i) { - return d.color || color(d, i); - }).filter(function(d,i) { return !data[i].disabled })); - gEnter.select('.nv-distWrap').append('g') - .attr('class', 'nv-distributionX'); - g.select('.nv-distributionX') - .attr('transform', 'translate(0,' + y.range()[0] + ')') - .datum(data.filter(function(d) { return !d.disabled })) - .call(distX); - } - - if (showDistY) { - distY - .getData(scatter.y()) - .scale(y) - .width(availableHeight) - .color(data.map(function(d,i) { - return d.color || color(d, i); - }).filter(function(d,i) { return !data[i].disabled })); - gEnter.select('.nv-distWrap').append('g') - .attr('class', 'nv-distributionY'); - g.select('.nv-distributionY') - .attr('transform', 'translate(-' + distY.size() + ',0)') - .datum(data.filter(function(d) { return !d.disabled })) - .call(distY); - } - - //------------------------------------------------------------ - - - - - if (d3.fisheye) { - g.select('.nv-background') - .attr('width', availableWidth) - .attr('height', availableHeight); - - g.select('.nv-background').on('mousemove', updateFisheye); - g.select('.nv-background').on('click', function() { pauseFisheye = !pauseFisheye;}); - scatter.dispatch.on('elementClick.freezeFisheye', function() { - pauseFisheye = !pauseFisheye; - }); - } - - - function updateFisheye() { - if (pauseFisheye) { - g.select('.nv-point-paths').style('pointer-events', 'all'); - return false; - } - - g.select('.nv-point-paths').style('pointer-events', 'none' ); - - var mouse = d3.mouse(this); - x.distortion(fisheye).focus(mouse[0]); - y.distortion(fisheye).focus(mouse[1]); - - g.select('.nv-scatterWrap') - .datum(data.filter(function(d) { return !d.disabled })) - .call(scatter); - g.select('.nv-x.nv-axis').call(xAxis); - g.select('.nv-y.nv-axis').call(yAxis); - g.select('.nv-distributionX') - .datum(data.filter(function(d) { return !d.disabled })) - .call(distX); - g.select('.nv-distributionY') - .datum(data.filter(function(d) { return !d.disabled })) - .call(distY); - } - - - - //============================================================ - // Event Handling/Dispatching (in chart's scope) - //------------------------------------------------------------ - - controls.dispatch.on('legendClick', function(d,i) { - d.disabled = !d.disabled; - - fisheye = d.disabled ? 0 : 2.5; - g.select('.nv-background') .style('pointer-events', d.disabled ? 'none' : 'all'); - g.select('.nv-point-paths').style('pointer-events', d.disabled ? 'all' : 'none' ); - - if (d.disabled) { - x.distortion(fisheye).focus(0); - y.distortion(fisheye).focus(0); - - g.select('.nv-scatterWrap').call(scatter); - g.select('.nv-x.nv-axis').call(xAxis); - g.select('.nv-y.nv-axis').call(yAxis); - } else { - pauseFisheye = false; - } - - chart.update(); - }); - - legend.dispatch.on('legendClick', function(d,i, that) { - d.disabled = !d.disabled; - - if (!data.filter(function(d) { return !d.disabled }).length) { - data.map(function(d) { - d.disabled = false; - wrap.selectAll('.nv-series').classed('disabled', false); - return d; - }); - } - - state.disabled = data.map(function(d) { return !!d.disabled }); - dispatch.stateChange(state); - - chart.update(); - }); - - legend.dispatch.on('legendDblclick', function(d) { - //Double clicking should always enable current series, and disabled all others. - data.forEach(function(d) { - d.disabled = true; - }); - d.disabled = false; - - state.disabled = data.map(function(d) { return !!d.disabled }); - dispatch.stateChange(state); - chart.update(); - }); - - - /* - legend.dispatch.on('legendMouseover', function(d, i) { - d.hover = true; - chart(selection); - }); - - legend.dispatch.on('legendMouseout', function(d, i) { - d.hover = false; - chart(selection); - }); - */ - - scatter.dispatch.on('elementMouseover.tooltip', function(e) { - d3.select('.nv-chart-' + scatter.id() + ' .nv-series-' + e.seriesIndex + ' .nv-distx-' + e.pointIndex) - .attr('y1', e.pos[1] - availableHeight); - d3.select('.nv-chart-' + scatter.id() + ' .nv-series-' + e.seriesIndex + ' .nv-disty-' + e.pointIndex) - .attr('x2', e.pos[0] + distX.size()); - - e.pos = [e.pos[0] + margin.left, e.pos[1] + margin.top]; - dispatch.tooltipShow(e); - }); - - dispatch.on('tooltipShow', function(e) { - if (tooltips) showTooltip(e, that.parentNode); - }); - - // Update chart from a state object passed to event handler - dispatch.on('changeState', function(e) { - - if (typeof e.disabled !== 'undefined') { - data.forEach(function(series,i) { - series.disabled = e.disabled[i]; - }); - - state.disabled = e.disabled; - } - - chart.update(); - }); - - //============================================================ - - - //store old scales for use in transitions on update - x0 = x.copy(); - y0 = y.copy(); - - - }); - - return chart; - } - - - //============================================================ - // Event Handling/Dispatching (out of chart's scope) - //------------------------------------------------------------ - - scatter.dispatch.on('elementMouseout.tooltip', function(e) { - dispatch.tooltipHide(e); - - d3.select('.nv-chart-' + scatter.id() + ' .nv-series-' + e.seriesIndex + ' .nv-distx-' + e.pointIndex) - .attr('y1', 0); - d3.select('.nv-chart-' + scatter.id() + ' .nv-series-' + e.seriesIndex + ' .nv-disty-' + e.pointIndex) - .attr('x2', distY.size()); - }); - dispatch.on('tooltipHide', function() { - if (tooltips) nv.tooltip.cleanup(); - }); - - //============================================================ - - - //============================================================ - // Expose Public Variables - //------------------------------------------------------------ - - // expose chart's sub-components - chart.dispatch = dispatch; - chart.scatter = scatter; - chart.legend = legend; - chart.controls = controls; - chart.xAxis = xAxis; - chart.yAxis = yAxis; - chart.distX = distX; - chart.distY = distY; - - d3.rebind(chart, scatter, 'id', 'interactive', 'pointActive', 'x', 'y', 'shape', 'size', 'xScale', 'yScale', 'zScale', 'xDomain', 'yDomain', 'sizeDomain', 'sizeRange', 'forceX', 'forceY', 'forceSize', 'clipVoronoi', 'clipRadius', 'useVoronoi'); - - chart.margin = function(_) { - if (!arguments.length) return margin; - margin.top = typeof _.top != 'undefined' ? _.top : margin.top; - margin.right = typeof _.right != 'undefined' ? _.right : margin.right; - margin.bottom = typeof _.bottom != 'undefined' ? _.bottom : margin.bottom; - margin.left = typeof _.left != 'undefined' ? _.left : margin.left; - return chart; - }; - - chart.width = function(_) { - if (!arguments.length) return width; - width = _; - return chart; - }; - - chart.height = function(_) { - if (!arguments.length) return height; - height = _; - return chart; - }; - - chart.color = function(_) { - if (!arguments.length) return color; - color = nv.utils.getColor(_); - legend.color(color); - distX.color(color); - distY.color(color); - return chart; - }; - - chart.showDistX = function(_) { - if (!arguments.length) return showDistX; - showDistX = _; - return chart; - }; - - chart.showDistY = function(_) { - if (!arguments.length) return showDistY; - showDistY = _; - return chart; - }; - - chart.showControls = function(_) { - if (!arguments.length) return showControls; - showControls = _; - return chart; - }; - - chart.showLegend = function(_) { - if (!arguments.length) return showLegend; - showLegend = _; - return chart; - }; - - chart.fisheye = function(_) { - if (!arguments.length) return fisheye; - fisheye = _; - return chart; - }; - - chart.tooltips = function(_) { - if (!arguments.length) return tooltips; - tooltips = _; - return chart; - }; - - chart.tooltipContent = function(_) { - if (!arguments.length) return tooltip; - tooltip = _; - return chart; - }; - - chart.tooltipXContent = function(_) { - if (!arguments.length) return tooltipX; - tooltipX = _; - return chart; - }; - - chart.tooltipYContent = function(_) { - if (!arguments.length) return tooltipY; - tooltipY = _; - return chart; - }; - - chart.state = function(_) { - if (!arguments.length) return state; - state = _; - return chart; - }; - - chart.defaultState = function(_) { - if (!arguments.length) return defaultState; - defaultState = _; - return chart; - }; - - chart.noData = function(_) { - if (!arguments.length) return noData; - noData = _; - return chart; - }; - - //============================================================ - - - return chart; -} - -nv.models.sparkline = function() { - - //============================================================ - // Public Variables with Default Settings - //------------------------------------------------------------ - - var margin = {top: 2, right: 0, bottom: 2, left: 0} - , width = 400 - , height = 32 - , animate = true - , x = d3.scale.linear() - , y = d3.scale.linear() - , getX = function(d) { return d.x } - , getY = function(d) { return d.y } - , color = nv.utils.getColor(['#000']) - , xDomain - , yDomain - ; - - //============================================================ - - - function chart(selection) { - selection.each(function(data) { - var availableWidth = width - margin.left - margin.right, - availableHeight = height - margin.top - margin.bottom, - container = d3.select(this); - - - //------------------------------------------------------------ - // Setup Scales - - x .domain(xDomain || d3.extent(data, getX )) - .range([0, availableWidth]); - - y .domain(yDomain || d3.extent(data, getY )) - .range([availableHeight, 0]); - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Setup containers and skeleton of chart - - var wrap = container.selectAll('g.nv-wrap.nv-sparkline').data([data]); - var wrapEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-wrap nv-sparkline'); - var gEnter = wrapEnter.append('g'); - var g = wrap.select('g'); - - wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')') - - //------------------------------------------------------------ - - - var paths = wrap.selectAll('path') - .data(function(d) { return [d] }); - paths.enter().append('path'); - paths.exit().remove(); - paths - .style('stroke', function(d,i) { return d.color || color(d, i) }) - .attr('d', d3.svg.line() - .x(function(d,i) { return x(getX(d,i)) }) - .y(function(d,i) { return y(getY(d,i)) }) - ); - - - // TODO: Add CURRENT data point (Need Min, Mac, Current / Most recent) - var points = wrap.selectAll('circle.nv-point') - .data(function(data) { - var yValues = data.map(function(d, i) { return getY(d,i); }); - function pointIndex(index) { - if (index != -1) { - var result = data[index]; - result.pointIndex = index; - return result; - } else { - return null; - } - } - var maxPoint = pointIndex(yValues.lastIndexOf(y.domain()[1])), - minPoint = pointIndex(yValues.indexOf(y.domain()[0])), - currentPoint = pointIndex(yValues.length - 1); - return [minPoint, maxPoint, currentPoint].filter(function (d) {return d != null;}); - }); - points.enter().append('circle'); - points.exit().remove(); - points - .attr('cx', function(d,i) { return x(getX(d,d.pointIndex)) }) - .attr('cy', function(d,i) { return y(getY(d,d.pointIndex)) }) - .attr('r', 2) - .attr('class', function(d,i) { - return getX(d, d.pointIndex) == x.domain()[1] ? 'nv-point nv-currentValue' : - getY(d, d.pointIndex) == y.domain()[0] ? 'nv-point nv-minValue' : 'nv-point nv-maxValue' - }); - }); - - return chart; - } - - - //============================================================ - // Expose Public Variables - //------------------------------------------------------------ - - chart.margin = function(_) { - if (!arguments.length) return margin; - margin.top = typeof _.top != 'undefined' ? _.top : margin.top; - margin.right = typeof _.right != 'undefined' ? _.right : margin.right; - margin.bottom = typeof _.bottom != 'undefined' ? _.bottom : margin.bottom; - margin.left = typeof _.left != 'undefined' ? _.left : margin.left; - return chart; - }; - - chart.width = function(_) { - if (!arguments.length) return width; - width = _; - return chart; - }; - - chart.height = function(_) { - if (!arguments.length) return height; - height = _; - return chart; - }; - - chart.x = function(_) { - if (!arguments.length) return getX; - getX = d3.functor(_); - return chart; - }; - - chart.y = function(_) { - if (!arguments.length) return getY; - getY = d3.functor(_); - return chart; - }; - - chart.xScale = function(_) { - if (!arguments.length) return x; - x = _; - return chart; - }; - - chart.yScale = function(_) { - if (!arguments.length) return y; - y = _; - return chart; - }; - - chart.xDomain = function(_) { - if (!arguments.length) return xDomain; - xDomain = _; - return chart; - }; - - chart.yDomain = function(_) { - if (!arguments.length) return yDomain; - yDomain = _; - return chart; - }; - - chart.animate = function(_) { - if (!arguments.length) return animate; - animate = _; - return chart; - }; - - chart.color = function(_) { - if (!arguments.length) return color; - color = nv.utils.getColor(_); - return chart; - }; - - //============================================================ - - - return chart; -} - -nv.models.sparklinePlus = function() { - - //============================================================ - // Public Variables with Default Settings - //------------------------------------------------------------ - - var sparkline = nv.models.sparkline(); - - var margin = {top: 15, right: 100, bottom: 10, left: 50} - , width = null - , height = null - , x - , y - , index = [] - , paused = false - , xTickFormat = d3.format(',r') - , yTickFormat = d3.format(',.2f') - , showValue = true - , alignValue = true - , rightAlignValue = false - , noData = "No Data Available." - ; - - //============================================================ - - - function chart(selection) { - selection.each(function(data) { - var container = d3.select(this); - - var availableWidth = (width || parseInt(container.style('width')) || 960) - - margin.left - margin.right, - availableHeight = (height || parseInt(container.style('height')) || 400) - - margin.top - margin.bottom; - - - - chart.update = function() { chart(selection) }; - chart.container = this; - - - //------------------------------------------------------------ - // Display No Data message if there's nothing to show. - - if (!data || !data.length) { - var noDataText = container.selectAll('.nv-noData').data([noData]); - - noDataText.enter().append('text') - .attr('class', 'nvd3 nv-noData') - .attr('dy', '-.7em') - .style('text-anchor', 'middle'); - - noDataText - .attr('x', margin.left + availableWidth / 2) - .attr('y', margin.top + availableHeight / 2) - .text(function(d) { return d }); - - return chart; - } else { - container.selectAll('.nv-noData').remove(); - } - - var currentValue = sparkline.y()(data[data.length-1], data.length-1); - - //------------------------------------------------------------ - - - - //------------------------------------------------------------ - // Setup Scales - - x = sparkline.xScale(); - y = sparkline.yScale(); - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Setup containers and skeleton of chart - - var wrap = container.selectAll('g.nv-wrap.nv-sparklineplus').data([data]); - var wrapEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-wrap nv-sparklineplus'); - var gEnter = wrapEnter.append('g'); - var g = wrap.select('g'); - - gEnter.append('g').attr('class', 'nv-sparklineWrap'); - gEnter.append('g').attr('class', 'nv-valueWrap'); - gEnter.append('g').attr('class', 'nv-hoverArea'); - - wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')'); - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Main Chart Component(s) - - var sparklineWrap = g.select('.nv-sparklineWrap'); - - sparkline - .width(availableWidth) - .height(availableHeight); - - sparklineWrap - .call(sparkline); - - //------------------------------------------------------------ - - - var valueWrap = g.select('.nv-valueWrap'); - - var value = valueWrap.selectAll('.nv-currentValue') - .data([currentValue]); - - value.enter().append('text').attr('class', 'nv-currentValue') - .attr('dx', rightAlignValue ? -8 : 8) - .attr('dy', '.9em') - .style('text-anchor', rightAlignValue ? 'end' : 'start'); - - value - .attr('x', availableWidth + (rightAlignValue ? margin.right : 0)) - .attr('y', alignValue ? function(d) { return y(d) } : 0) - .style('fill', sparkline.color()(data[data.length-1], data.length-1)) - .text(yTickFormat(currentValue)); - - - - gEnter.select('.nv-hoverArea').append('rect') - .on('mousemove', sparklineHover) - .on('click', function() { paused = !paused }) - .on('mouseout', function() { index = []; updateValueLine(); }); - //.on('mouseout', function() { index = null; updateValueLine(); }); - - g.select('.nv-hoverArea rect') - .attr('transform', function(d) { return 'translate(' + -margin.left + ',' + -margin.top + ')' }) - .attr('width', availableWidth + margin.left + margin.right) - .attr('height', availableHeight + margin.top); - - - - function updateValueLine() { //index is currently global (within the chart), may or may not keep it that way - if (paused) return; - - var hoverValue = g.selectAll('.nv-hoverValue').data(index) - - var hoverEnter = hoverValue.enter() - .append('g').attr('class', 'nv-hoverValue') - .style('stroke-opacity', 0) - .style('fill-opacity', 0); - - hoverValue.exit() - .transition().duration(250) - .style('stroke-opacity', 0) - .style('fill-opacity', 0) - .remove(); - - hoverValue - .attr('transform', function(d) { return 'translate(' + x(sparkline.x()(data[d],d)) + ',0)' }) - .transition().duration(250) - .style('stroke-opacity', 1) - .style('fill-opacity', 1); - - if (!index.length) return; - - hoverEnter.append('line') - .attr('x1', 0) - .attr('y1', -margin.top) - .attr('x2', 0) - .attr('y2', availableHeight); - - - hoverEnter.append('text').attr('class', 'nv-xValue') - .attr('x', -6) - .attr('y', -margin.top) - .attr('text-anchor', 'end') - .attr('dy', '.9em') - - - g.select('.nv-hoverValue .nv-xValue') - .text(xTickFormat(sparkline.x()(data[index[0]], index[0]))); - - hoverEnter.append('text').attr('class', 'nv-yValue') - .attr('x', 6) - .attr('y', -margin.top) - .attr('text-anchor', 'start') - .attr('dy', '.9em') - - g.select('.nv-hoverValue .nv-yValue') - .text(yTickFormat(sparkline.y()(data[index[0]], index[0]))); - - } - - - function sparklineHover() { - if (paused) return; - - var pos = d3.mouse(this)[0] - margin.left; - - function getClosestIndex(data, x) { - var distance = Math.abs(sparkline.x()(data[0], 0) - x); - var closestIndex = 0; - for (var i = 0; i < data.length; i++){ - if (Math.abs(sparkline.x()(data[i], i) - x) < distance) { - distance = Math.abs(sparkline.x()(data[i], i) - x); - closestIndex = i; - } - } - return closestIndex; - } - - index = [getClosestIndex(data, Math.round(x.invert(pos)))]; - - updateValueLine(); - } - - }); - - return chart; - } - - - //============================================================ - // Expose Public Variables - //------------------------------------------------------------ - - // expose chart's sub-components - chart.sparkline = sparkline; - - d3.rebind(chart, sparkline, 'x', 'y', 'xScale', 'yScale', 'color'); - - chart.margin = function(_) { - if (!arguments.length) return margin; - margin.top = typeof _.top != 'undefined' ? _.top : margin.top; - margin.right = typeof _.right != 'undefined' ? _.right : margin.right; - margin.bottom = typeof _.bottom != 'undefined' ? _.bottom : margin.bottom; - margin.left = typeof _.left != 'undefined' ? _.left : margin.left; - return chart; - }; - - chart.width = function(_) { - if (!arguments.length) return width; - width = _; - return chart; - }; - - chart.height = function(_) { - if (!arguments.length) return height; - height = _; - return chart; - }; - - chart.xTickFormat = function(_) { - if (!arguments.length) return xTickFormat; - xTickFormat = _; - return chart; - }; - - chart.yTickFormat = function(_) { - if (!arguments.length) return yTickFormat; - yTickFormat = _; - return chart; - }; - - chart.showValue = function(_) { - if (!arguments.length) return showValue; - showValue = _; - return chart; - }; - - chart.alignValue = function(_) { - if (!arguments.length) return alignValue; - alignValue = _; - return chart; - }; - - chart.rightAlignValue = function(_) { - if (!arguments.length) return rightAlignValue; - rightAlignValue = _; - return chart; - }; - - chart.noData = function(_) { - if (!arguments.length) return noData; - noData = _; - return chart; - }; - - //============================================================ - - - return chart; -} - -nv.models.stackedArea = function() { - - //============================================================ - // Public Variables with Default Settings - //------------------------------------------------------------ - - var margin = {top: 0, right: 0, bottom: 0, left: 0} - , width = 960 - , height = 500 - , color = nv.utils.defaultColor() // a function that computes the color - , id = Math.floor(Math.random() * 100000) //Create semi-unique ID incase user doesn't selet one - , getX = function(d) { return d.x } // accessor to get the x value from a data point - , getY = function(d) { return d.y } // accessor to get the y value from a data point - , style = 'stack' - , offset = 'zero' - , order = 'default' - , interpolate = 'linear' // controls the line interpolation - , clipEdge = false // if true, masks lines within x and y scale - , x //can be accessed via chart.xScale() - , y //can be accessed via chart.yScale() - , scatter = nv.models.scatter() - , dispatch = d3.dispatch('tooltipShow', 'tooltipHide', 'areaClick', 'areaMouseover', 'areaMouseout') - ; - - scatter - .size(2.2) // default size - .sizeDomain([2.2,2.2]) // all the same size by default - ; - - /************************************ - * offset: - * 'wiggle' (stream) - * 'zero' (stacked) - * 'expand' (normalize to 100%) - * 'silhouette' (simple centered) - * - * order: - * 'inside-out' (stream) - * 'default' (input order) - ************************************/ - - //============================================================ - - - function chart(selection) { - selection.each(function(data) { - var availableWidth = width - margin.left - margin.right, - availableHeight = height - margin.top - margin.bottom, - container = d3.select(this); - - //------------------------------------------------------------ - // Setup Scales - - x = scatter.xScale(); - y = scatter.yScale(); - - //------------------------------------------------------------ - - - // Injecting point index into each point because d3.layout.stack().out does not give index - // ***Also storing getY(d,i) as stackedY so that it can be set to 0 if series is disabled - data = data.map(function(aseries, i) { - aseries.values = aseries.values.map(function(d, j) { - d.index = j; - d.stackedY = aseries.disabled ? 0 : getY(d,j); - return d; - }) - return aseries; - }); - - - data = d3.layout.stack() - .order(order) - .offset(offset) - .values(function(d) { return d.values }) //TODO: make values customizeable in EVERY model in this fashion - .x(getX) - .y(function(d) { return d.stackedY }) - .out(function(d, y0, y) { - d.display = { - y: y, - y0: y0 - }; - }) - (data); - - - //------------------------------------------------------------ - // Setup containers and skeleton of chart - - var wrap = container.selectAll('g.nv-wrap.nv-stackedarea').data([data]); - var wrapEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-wrap nv-stackedarea'); - var defsEnter = wrapEnter.append('defs'); - var gEnter = wrapEnter.append('g'); - var g = wrap.select('g'); - - gEnter.append('g').attr('class', 'nv-areaWrap'); - gEnter.append('g').attr('class', 'nv-scatterWrap'); - - wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')'); - - //------------------------------------------------------------ - - - scatter - .width(availableWidth) - .height(availableHeight) - .x(getX) - .y(function(d) { return d.display.y + d.display.y0 }) - .forceY([0]) - .color(data.map(function(d,i) { - return d.color || color(d, i); - }).filter(function(d,i) { return !data[i].disabled })); - - - var scatterWrap = g.select('.nv-scatterWrap') - .datum(data.filter(function(d) { return !d.disabled })) - - //d3.transition(scatterWrap).call(scatter); - scatterWrap.call(scatter); - - - - - - defsEnter.append('clipPath') - .attr('id', 'nv-edge-clip-' + id) - .append('rect'); - - wrap.select('#nv-edge-clip-' + id + ' rect') - .attr('width', availableWidth) - .attr('height', availableHeight); - - g .attr('clip-path', clipEdge ? 'url(#nv-edge-clip-' + id + ')' : ''); - - - - - var area = d3.svg.area() - .x(function(d,i) { return x(getX(d,i)) }) - .y0(function(d) { return y(d.display.y0) }) - .y1(function(d) { return y(d.display.y + d.display.y0) }) - .interpolate(interpolate); - - var zeroArea = d3.svg.area() - .x(function(d,i) { return x(getX(d,i)) }) - .y0(function(d) { return y(d.display.y0) }) - .y1(function(d) { return y(d.display.y0) }); - - - var path = g.select('.nv-areaWrap').selectAll('path.nv-area') - .data(function(d) { return d }); - //.data(function(d) { return d }, function(d) { return d.key }); - path.enter().append('path').attr('class', function(d,i) { return 'nv-area nv-area-' + i }) - .on('mouseover', function(d,i) { - d3.select(this).classed('hover', true); - dispatch.areaMouseover({ - point: d, - series: d.key, - pos: [d3.event.pageX, d3.event.pageY], - seriesIndex: i - }); - }) - .on('mouseout', function(d,i) { - d3.select(this).classed('hover', false); - dispatch.areaMouseout({ - point: d, - series: d.key, - pos: [d3.event.pageX, d3.event.pageY], - seriesIndex: i - }); - }) - .on('click', function(d,i) { - d3.select(this).classed('hover', false); - dispatch.areaClick({ - point: d, - series: d.key, - pos: [d3.event.pageX, d3.event.pageY], - seriesIndex: i - }); - }) - //d3.transition(path.exit()) - path.exit() - .attr('d', function(d,i) { return zeroArea(d.values,i) }) - .remove(); - path - .style('fill', function(d,i){ return d.color || color(d, i) }) - .style('stroke', function(d,i){ return d.color || color(d, i) }); - //d3.transition(path) - path - .attr('d', function(d,i) { return area(d.values,i) }) - - - //============================================================ - // Event Handling/Dispatching (in chart's scope) - //------------------------------------------------------------ - - scatter.dispatch.on('elementMouseover.area', function(e) { - g.select('.nv-chart-' + id + ' .nv-area-' + e.seriesIndex).classed('hover', true); - }); - scatter.dispatch.on('elementMouseout.area', function(e) { - g.select('.nv-chart-' + id + ' .nv-area-' + e.seriesIndex).classed('hover', false); - }); - - //============================================================ - - }); - - - return chart; - } - - - //============================================================ - // Event Handling/Dispatching (out of chart's scope) - //------------------------------------------------------------ - - scatter.dispatch.on('elementClick.area', function(e) { - dispatch.areaClick(e); - }) - scatter.dispatch.on('elementMouseover.tooltip', function(e) { - e.pos = [e.pos[0] + margin.left, e.pos[1] + margin.top], - dispatch.tooltipShow(e); - }); - scatter.dispatch.on('elementMouseout.tooltip', function(e) { - dispatch.tooltipHide(e); - }); - - //============================================================ - - - //============================================================ - // Global getters and setters - //------------------------------------------------------------ - - chart.dispatch = dispatch; - chart.scatter = scatter; - - d3.rebind(chart, scatter, 'interactive', 'size', 'xScale', 'yScale', 'zScale', 'xDomain', 'yDomain', 'sizeDomain', 'forceX', 'forceY', 'forceSize', 'clipVoronoi', 'clipRadius'); - - chart.x = function(_) { - if (!arguments.length) return getX; - getX = d3.functor(_); - return chart; - }; - - chart.y = function(_) { - if (!arguments.length) return getY; - getY = d3.functor(_); - return chart; - } - - chart.margin = function(_) { - if (!arguments.length) return margin; - margin.top = typeof _.top != 'undefined' ? _.top : margin.top; - margin.right = typeof _.right != 'undefined' ? _.right : margin.right; - margin.bottom = typeof _.bottom != 'undefined' ? _.bottom : margin.bottom; - margin.left = typeof _.left != 'undefined' ? _.left : margin.left; - return chart; - }; - - chart.width = function(_) { - if (!arguments.length) return width; - width = _; - return chart; - }; - - chart.height = function(_) { - if (!arguments.length) return height; - height = _; - return chart; - }; - - chart.clipEdge = function(_) { - if (!arguments.length) return clipEdge; - clipEdge = _; - return chart; - }; - - chart.color = function(_) { - if (!arguments.length) return color; - color = nv.utils.getColor(_); - return chart; - }; - - chart.offset = function(_) { - if (!arguments.length) return offset; - offset = _; - return chart; - }; - - chart.order = function(_) { - if (!arguments.length) return order; - order = _; - return chart; - }; - - //shortcut for offset + order - chart.style = function(_) { - if (!arguments.length) return style; - style = _; - - switch (style) { - case 'stack': - chart.offset('zero'); - chart.order('default'); - break; - case 'stream': - chart.offset('wiggle'); - chart.order('inside-out'); - break; - case 'stream-center': - chart.offset('silhouette'); - chart.order('inside-out'); - break; - case 'expand': - chart.offset('expand'); - chart.order('default'); - break; - } - - return chart; - }; - - chart.interpolate = function(_) { - if (!arguments.length) return interpolate; - interpolate = _; - return interpolate; - - }; - - //============================================================ - - - return chart; -} - -nv.models.stackedAreaChart = function() { - - //============================================================ - // Public Variables with Default Settings - //------------------------------------------------------------ - - var stacked = nv.models.stackedArea() - , xAxis = nv.models.axis() - , yAxis = nv.models.axis() - , legend = nv.models.legend() - , controls = nv.models.legend() - ; - - var margin = {top: 30, right: 25, bottom: 50, left: 60} - , width = null - , height = null - , color = nv.utils.defaultColor() // a function that takes in d, i and returns color - , showControls = true - , showLegend = true - , tooltips = true - , tooltip = function(key, x, y, e, graph) { - return '<h3>' + key + '</h3>' + - '<p>' + y + ' on ' + x + '</p>' - } - , x //can be accessed via chart.xScale() - , y //can be accessed via chart.yScale() - , yAxisTickFormat = d3.format(',.2f') - , state = { style: stacked.style() } - , defaultState = null - , noData = 'No Data Available.' - , dispatch = d3.dispatch('tooltipShow', 'tooltipHide', 'stateChange', 'changeState') - , controlWidth = 250 - ; - - xAxis - .orient('bottom') - .tickPadding(7) - ; - yAxis - .orient('left') - ; - stacked.scatter - .pointActive(function(d) { - //console.log(stacked.y()(d), !!Math.round(stacked.y()(d) * 100)); - return !!Math.round(stacked.y()(d) * 100); - }) - ; - - //============================================================ - - - //============================================================ - // Private Variables - //------------------------------------------------------------ - - var showTooltip = function(e, offsetElement) { - var left = e.pos[0] + ( offsetElement.offsetLeft || 0 ), - top = e.pos[1] + ( offsetElement.offsetTop || 0), - x = xAxis.tickFormat()(stacked.x()(e.point, e.pointIndex)), - y = yAxis.tickFormat()(stacked.y()(e.point, e.pointIndex)), - content = tooltip(e.series.key, x, y, e, chart); - - nv.tooltip.show([left, top], content, e.value < 0 ? 'n' : 's', null, offsetElement); - }; - - //============================================================ - - - function chart(selection) { - selection.each(function(data) { - var container = d3.select(this), - that = this; - - var availableWidth = (width || parseInt(container.style('width')) || 960) - - margin.left - margin.right, - availableHeight = (height || parseInt(container.style('height')) || 400) - - margin.top - margin.bottom; - - chart.update = function() { container.transition().call(chart); }; - chart.container = this; - - //set state.disabled - state.disabled = data.map(function(d) { return !!d.disabled }); - - if (!defaultState) { - var key; - defaultState = {}; - for (key in state) { - if (state[key] instanceof Array) - defaultState[key] = state[key].slice(0); - else - defaultState[key] = state[key]; - } - } - - //------------------------------------------------------------ - // Display No Data message if there's nothing to show. - - if (!data || !data.length || !data.filter(function(d) { return d.values.length }).length) { - var noDataText = container.selectAll('.nv-noData').data([noData]); - - noDataText.enter().append('text') - .attr('class', 'nvd3 nv-noData') - .attr('dy', '-.7em') - .style('text-anchor', 'middle'); - - noDataText - .attr('x', margin.left + availableWidth / 2) - .attr('y', margin.top + availableHeight / 2) - .text(function(d) { return d }); - - return chart; - } else { - container.selectAll('.nv-noData').remove(); - } - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Setup Scales - - x = stacked.xScale(); - y = stacked.yScale(); - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Setup containers and skeleton of chart - - var wrap = container.selectAll('g.nv-wrap.nv-stackedAreaChart').data([data]); - var gEnter = wrap.enter().append('g').attr('class', 'nvd3 nv-wrap nv-stackedAreaChart').append('g'); - var g = wrap.select('g'); - - gEnter.append('g').attr('class', 'nv-x nv-axis'); - gEnter.append('g').attr('class', 'nv-y nv-axis'); - gEnter.append('g').attr('class', 'nv-stackedWrap'); - gEnter.append('g').attr('class', 'nv-legendWrap'); - gEnter.append('g').attr('class', 'nv-controlsWrap'); - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Legend - - if (showLegend) { - legend - .width( availableWidth - controlWidth ); - - g.select('.nv-legendWrap') - .datum(data) - .call(legend); - - if ( margin.top != legend.height()) { - margin.top = legend.height(); - availableHeight = (height || parseInt(container.style('height')) || 400) - - margin.top - margin.bottom; - } - - g.select('.nv-legendWrap') - .attr('transform', 'translate(' + controlWidth + ',' + (-margin.top) +')'); - } - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Controls - - if (showControls) { - var controlsData = [ - { key: 'Stacked', disabled: stacked.offset() != 'zero' }, - { key: 'Stream', disabled: stacked.offset() != 'wiggle' }, - { key: 'Expanded', disabled: stacked.offset() != 'expand' } - ]; - - controls - .width( controlWidth ) - .color(['#444', '#444', '#444']); - - g.select('.nv-controlsWrap') - .datum(controlsData) - .call(controls); - - - if ( margin.top != Math.max(controls.height(), legend.height()) ) { - margin.top = Math.max(controls.height(), legend.height()); - availableHeight = (height || parseInt(container.style('height')) || 400) - - margin.top - margin.bottom; - } - - - g.select('.nv-controlsWrap') - .attr('transform', 'translate(0,' + (-margin.top) +')'); - } - - //------------------------------------------------------------ - - - wrap.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')'); - - - //------------------------------------------------------------ - // Main Chart Component(s) - - stacked - .width(availableWidth) - .height(availableHeight) - - var stackedWrap = g.select('.nv-stackedWrap') - .datum(data); - //d3.transition(stackedWrap).call(stacked); - stackedWrap.call(stacked); - - //------------------------------------------------------------ - - - //------------------------------------------------------------ - // Setup Axes - - xAxis - .scale(x) - .ticks( availableWidth / 100 ) - .tickSize( -availableHeight, 0); - - g.select('.nv-x.nv-axis') - .attr('transform', 'translate(0,' + availableHeight + ')'); - //d3.transition(g.select('.nv-x.nv-axis')) - g.select('.nv-x.nv-axis') - .transition().duration(0) - .call(xAxis); - - yAxis - .scale(y) - .ticks(stacked.offset() == 'wiggle' ? 0 : availableHeight / 36) - .tickSize(-availableWidth, 0) - .setTickFormat(stacked.offset() == 'expand' ? d3.format('%') : yAxisTickFormat); - - //d3.transition(g.select('.nv-y.nv-axis')) - g.select('.nv-y.nv-axis') - .transition().duration(0) - .call(yAxis); - - //------------------------------------------------------------ - - - //============================================================ - // Event Handling/Dispatching (in chart's scope) - //------------------------------------------------------------ - - stacked.dispatch.on('areaClick.toggle', function(e) { - if (data.filter(function(d) { return !d.disabled }).length === 1) - data = data.map(function(d) { - d.disabled = false; - return d - }); - else - data = data.map(function(d,i) { - d.disabled = (i != e.seriesIndex); - return d - }); - - state.disabled = data.map(function(d) { return !!d.disabled }); - dispatch.stateChange(state); - - //selection.transition().call(chart); - chart.update(); - }); - - legend.dispatch.on('legendClick', function(d,i) { - d.disabled = !d.disabled; - - if (!data.filter(function(d) { return !d.disabled }).length) { - data.map(function(d) { - d.disabled = false; - return d; - }); - } - - state.disabled = data.map(function(d) { return !!d.disabled }); - dispatch.stateChange(state); - - //selection.transition().call(chart); - chart.update(); - }); - - legend.dispatch.on('legendDblclick', function(d) { - //Double clicking should always enable current series, and disabled all others. - data.forEach(function(d) { - d.disabled = true; - }); - d.disabled = false; - - state.disabled = data.map(function(d) { return !!d.disabled }); - dispatch.stateChange(state); - chart.update(); - }); - - controls.dispatch.on('legendClick', function(d,i) { - if (!d.disabled) return; - - controlsData = controlsData.map(function(s) { - s.disabled = true; - return s; - }); - d.disabled = false; - - switch (d.key) { - case 'Stacked': - stacked.style('stack'); - break; - case 'Stream': - stacked.style('stream'); - break; - case 'Expanded': - stacked.style('expand'); - break; - } - - state.style = stacked.style(); - dispatch.stateChange(state); - - //selection.transition().call(chart); - chart.update(); - }); - - dispatch.on('tooltipShow', function(e) { - if (tooltips) showTooltip(e, that.parentNode); - }); - - // Update chart from a state object passed to event handler - dispatch.on('changeState', function(e) { - - if (typeof e.disabled !== 'undefined') { - data.forEach(function(series,i) { - series.disabled = e.disabled[i]; - }); - - state.disabled = e.disabled; - } - - if (typeof e.style !== 'undefined') { - stacked.style(e.style); - } - - chart.update(); - }); - - }); - - - return chart; - } - - - //============================================================ - // Event Handling/Dispatching (out of chart's scope) - //------------------------------------------------------------ - - stacked.dispatch.on('tooltipShow', function(e) { - //disable tooltips when value ~= 0 - //// TODO: consider removing points from voronoi that have 0 value instead of this hack - /* - if (!Math.round(stacked.y()(e.point) * 100)) { // 100 will not be good for very small numbers... will have to think about making this valu dynamic, based on data range - setTimeout(function() { d3.selectAll('.point.hover').classed('hover', false) }, 0); - return false; - } - */ - - e.pos = [e.pos[0] + margin.left, e.pos[1] + margin.top], - dispatch.tooltipShow(e); - }); - - stacked.dispatch.on('tooltipHide', function(e) { - dispatch.tooltipHide(e); - }); - - dispatch.on('tooltipHide', function() { - if (tooltips) nv.tooltip.cleanup(); - }); - - //============================================================ - - - //============================================================ - // Expose Public Variables - //------------------------------------------------------------ - - // expose chart's sub-components - chart.dispatch = dispatch; - chart.stacked = stacked; - chart.legend = legend; - chart.controls = controls; - chart.xAxis = xAxis; - chart.yAxis = yAxis; - - d3.rebind(chart, stacked, 'x', 'y', 'size', 'xScale', 'yScale', 'xDomain', 'yDomain', 'sizeDomain', 'interactive', 'offset', 'order', 'style', 'clipEdge', 'forceX', 'forceY', 'forceSize', 'interpolate'); - - chart.margin = function(_) { - if (!arguments.length) return margin; - margin.top = typeof _.top != 'undefined' ? _.top : margin.top; - margin.right = typeof _.right != 'undefined' ? _.right : margin.right; - margin.bottom = typeof _.bottom != 'undefined' ? _.bottom : margin.bottom; - margin.left = typeof _.left != 'undefined' ? _.left : margin.left; - return chart; - }; - - chart.width = function(_) { - if (!arguments.length) return getWidth; - width = _; - return chart; - }; - - chart.height = function(_) { - if (!arguments.length) return getHeight; - height = _; - return chart; - }; - - chart.color = function(_) { - if (!arguments.length) return color; - color = nv.utils.getColor(_); - legend.color(color); - stacked.color(color); - return chart; - }; - - chart.showControls = function(_) { - if (!arguments.length) return showControls; - showControls = _; - return chart; - }; - - chart.showLegend = function(_) { - if (!arguments.length) return showLegend; - showLegend = _; - return chart; - }; - - chart.tooltip = function(_) { - if (!arguments.length) return tooltip; - tooltip = _; - return chart; - }; - - chart.tooltips = function(_) { - if (!arguments.length) return tooltips; - tooltips = _; - return chart; - }; - - chart.tooltipContent = function(_) { - if (!arguments.length) return tooltip; - tooltip = _; - return chart; - }; - - chart.state = function(_) { - if (!arguments.length) return state; - state = _; - return chart; - }; - - chart.defaultState = function(_) { - if (!arguments.length) return defaultState; - defaultState = _; - return chart; - }; - - chart.noData = function(_) { - if (!arguments.length) return noData; - noData = _; - return chart; - }; - - yAxis.setTickFormat = yAxis.tickFormat; - - yAxis.tickFormat = function(_) { - if (!arguments.length) return yAxisTickFormat; - yAxisTickFormat = _; - return yAxis; - }; - - //============================================================ - - return chart; -} -})(); \ No newline at end of file diff --git a/modules/enterprise/gui/coregui/src/main/webapp/js/nv.d3.min.js b/modules/enterprise/gui/coregui/src/main/webapp/js/nv.d3.min.js deleted file mode 100755 index 309108b..0000000 --- a/modules/enterprise/gui/coregui/src/main/webapp/js/nv.d3.min.js +++ /dev/null @@ -1,6 +0,0 @@ -(function(){function t(e,t){return(new Date(t,e+1,0)).getDate()}function n(e,t,n){return function(r,i,s){var o=e(r),u=[];o<r&&t(o);if(s>1)while(o<i){var a=new Date(+o);n(a)%s===0&&u.push(a),t(o)}else while(o<i)u.push(new Date(+o)),t(o);return u}}var e=window.nv||{};e.version="0.0.1a",e.dev=!0,window.nv=e,e.tooltip={},e.utils={},e.models={},e.charts={},e.graphs=[],e.logs={},e.dispatch=d3.dispatch("render_start","render_end"),e.dev&&(e.dispatch.on("render_start",function(t){e.logs.startTime=+(new Date)}),e.dispatch.on("render_end",function(t){e.logs.endTime=+(new Date),e.logs.totalTime=e.logs.endTime-e.logs.startTime,e.log("total",e.logs.totalTime)})),e.log=function(){if(e.dev&&console.log&&console.log.apply)console.log.apply(console,arguments);else if(e.dev&&console.log&&Function.prototype.bind){var t=Function.prototype.bind.call(console.log,console);t.apply(console,arguments)}return arguments[arguments.length-1]},e.render=function(n){n=n||1,e.render.active=!0,e.dispatch.rend er_start(),setTimeout(function(){var t,r;for(var i=0;i<n&&(r=e.render.queue[i]);i++)t=r.generate(),typeof r.callback==typeof Function&&r.callback(t),e.graphs.push(t);e.render.queue.splice(0,i),e.render.queue.length?setTimeout(arguments.callee,0):(e.render.active=!1,e.dispatch.render_end())},0)},e.render.active=!1,e.render.queue=[],e.addGraph=function(t){typeof arguments[0]==typeof Function&&(t={generate:arguments[0],callback:arguments[1]}),e.render.queue.push(t),e.render.active||e.render()},e.identity=function(e){return e},e.strip=function(e){return e.replace(/(\s|&)/g,"")},d3.time.monthEnd=function(e){return new Date(e.getFullYear(),e.getMonth(),0)},d3.time.monthEnds=n(d3.time.monthEnd,function(e){e.setUTCDate(e.getUTCDate()+1),e.setDate(t(e.getMonth()+1,e.getFullYear()))},function(e){return e.getMonth()}),function(){var t=window.nv.tooltip={};t.show=function(t,n,r,i,s,o){var u=document.createElement("div");u.className="nvtooltip "+(o?o:"xy-tooltip"),r=r||"s",i=i||20;var a= s;if(!s||s.tagName.match(/g|svg/i))a=document.getElementsByTagName("body")[0];u.innerHTML=n,u.style.left=0,u.style.top=0,u.style.opacity=0,a.appendChild(u);var f=parseInt(u.offsetHeight),l=parseInt(u.offsetWidth),c=e.utils.windowSize().width,h=e.utils.windowSize().height,p=window.scrollY,d=window.scrollX,v,m;h=window.innerWidth>=document.body.scrollWidth?h:h-16,c=window.innerHeight>=document.body.scrollHeight?c:c-16;var g=function(e){var t=m;do isNaN(e.offsetTop)||(t+=e.offsetTop);while(e=e.offsetParent);return t},y=function(e){var t=v;do isNaN(e.offsetLeft)||(t+=e.offsetLeft);while(e=e.offsetParent);return t};switch(r){case"e":v=t[0]-l-i,m=t[1]-f/2;var b=y(u),w=g(u);b<d&&(v=t[0]+i>d?t[0]+i:d-b+v),w<p&&(m=p-w+m),w+f>p+h&&(m=p+h-w+m-f);break;case"w":v=t[0]+i,m=t[1]-f/2,b+l>c&&(v=t[0]-l-i),w<p&&(m=p+5),w+f>p+h&&(m=p-f-5);break;case"n":v=t[0]-l/2-5,m=t[1]+i;var b=y(u),w=g(u);b<d&&(v=d+5),b+l>c&&(v=v-l/2+5),w+f>p+h&&(m=p+h-w+m-f);break;case"s":v=t[0]-l/2,m=t[1]-f-i;var b=y(u),w= g(u);b<d&&(v=d+5),b+l>c&&(v=v-l/2+5),p>w&&(m=p)}return u.style.left=v+"px",u.style.top=m+"px",u.style.opacity=1,u.style.position="absolute",u.style.pointerEvents="none",u},t.cleanup=function(){var e=document.getElementsByClassName("nvtooltip"),t=[];while(e.length)t.push(e[0]),e[0].style.transitionDelay="0 !important",e[0].style.opacity=0,e[0].className="nvtooltip-pending-removal";setTimeout(function(){while(t.length){var e=t.pop();e.parentNode.removeChild(e)}},500)}}(),e.utils.windowSize=function(){var e={width:640,height:480};return document.body&&document.body.offsetWidth&&(e.width=document.body.offsetWidth,e.height=document.body.offsetHeight),document.compatMode=="CSS1Compat"&&document.documentElement&&document.documentElement.offsetWidth&&(e.width=document.documentElement.offsetWidth,e.height=document.documentElement.offsetHeight),window.innerWidth&&window.innerHeight&&(e.width=window.innerWidth,e.height=window.innerHeight),e},e.utils.windowResize=function(e){var t=windo w.onresize;window.onresize=function(n){typeof t=="function"&&t(n),e(n)}},e.utils.getColor=function(t){return arguments.length?Object.prototype.toString.call(t)==="[object Array]"?function(e,n){return e.color||t[n%t.length]}:t:e.utils.defaultColor()},e.utils.defaultColor=function(){var e=d3.scale.category20().range();return function(t,n){return t.color||e[n%e.length]}},e.utils.customTheme=function(e,t,n){t=t||function(e){return e.key},n=n||d3.scale.category20().range();var r=n.length;return function(i,s){var o=t(i);return r||(r=n.length),typeof e[o]!="undefined"?typeof e[o]=="function"?e[o]():e[o]:n[--r]}},e.utils.pjax=function(t,n){function r(r){d3.html(r,function(r){var i=d3.select(n).node();i.parentNode.replaceChild(d3.select(r).select(n).node(),i),e.utils.pjax(t,n)})}d3.selectAll(t).on("click",function(){history.pushState(this.href,this.textContent,this.href),r(this.href),d3.event.preventDefault()}),d3.select(window).on("popstate",function(){d3.event.state&&r(d3.event.sta te)})},e.utils.calcApproxTextWidth=function(e){if(e instanceof d3.selection){var t=parseInt(e.style("font-size").replace("px","")),n=e.text().length;return n*t*.5}return 0},e.models.axis=function(){function d(r){return r.each(function(r){var d=d3.select(this),v=d.selectAll("g.nv-wrap.nv-axis").data([r]),m=v.enter().append("g").attr("class","nvd3 nv-wrap nv-axis"),g=m.append("g"),y=v.select("g");h!==null?e.ticks(h):(e.orient()=="top"||e.orient()=="bottom")&&e.ticks(Math.abs(i.range()[1]-i.range()[0])/100),d3.transition(y).call(e),p=p||e.scale();var b=e.tickFormat();b==null&&(b=p.tickFormat());var w=y.selectAll("text.nv-axislabel").data([s||null]);w.exit().remove();switch(e.orient()){case"top":w.enter().append("text").attr("class","nv-axislabel");var E=i.range().length==2?i.range()[1]:i.range()[i.range().length-1]+(i.range()[1]-i.range()[0]);w.attr("text-anchor","middle").attr("y",0).attr("x",E/2);if(o){var S=v.selectAll("g.nv-axisMaxMin").data(i.domain());S.enter().append("g" ).attr("class","nv-axisMaxMin").append("text"),S.exit().remove(),S.attr("transform",function(e,t){return"translate("+i(e)+",0)"}).select("text").attr("dy","0em").attr("y",-e.tickPadding()).attr("text-anchor","middle").text(function(e,t){var n=b(e);return(""+n).match("NaN")?"":n}),d3.transition(S).attr("transform",function(e,t){return"translate("+i.range()[t]+",0)"})}break;case"bottom":var x=36,T=30,N=y.selectAll("g").select("text");if(a%360){N.each(function(e,t){var n=this.getBBox().width;n>T&&(T=n)});var C=Math.abs(Math.sin(a*Math.PI/180)),x=(C?C*T:T)+30;N.attr("transform",function(e,t,n){return"rotate("+a+" 0,0)"}).attr("text-anchor",a%360>0?"start":"end")}w.enter().append("text").attr("class","nv-axislabel");var E=i.range().length==2?i.range()[1]:i.range()[i.range().length-1]+(i.range()[1]-i.range()[0]);w.attr("text-anchor","middle").attr("y",x).attr("x",E/2);if(o){var S=v.selectAll("g.nv-axisMaxMin").data([i.domain()[0],i.domain()[i.domain().length-1]]);S.enter().append( "g").attr("class","nv-axisMaxMin").append("text"),S.exit().remove(),S.attr("transform",function(e,t){return"translate("+(i(e)+(c?i.rangeBand()/2:0))+",0)"}).select("text").attr("dy",".71em").attr("y",e.tickPadding()).attr("transform",function(e,t,n){return"rotate("+a+" 0,0)"}).attr("text-anchor",a?a%360>0?"start":"end":"middle").text(function(e,t){var n=b(e);return(""+n).match("NaN")?"":n}),d3.transition(S).attr("transform",function(e,t){return"translate("+(i(e)+(c?i.rangeBand()/2:0))+",0)"})}l&&N.attr("transform",function(e,t){return"translate(0,"+(t%2==0?"0":"12")+")"});break;case"right":w.enter().append("text").attr("class","nv-axislabel"),w.attr("text-anchor",f?"middle":"begin").attr("transform",f?"rotate(90)":"").attr("y",f?-Math.max(t.right,n)+12:-10).attr("x",f?i.range()[0]/2:e.tickPadding());if(o){var S=v.selectAll("g.nv-axisMaxMin").data(i.domain());S.enter().append("g").attr("class","nv-axisMaxMin").append("text").style("opacity",0),S.exit().remove(),S.attr("transf orm",function(e,t){return"translate(0,"+i(e)+")"}).select("text").attr("dy",".32em").attr("y",0).attr("x",e.tickPadding()).attr("text-anchor","start").text(function(e,t){var n=b(e);return(""+n).match("NaN")?"":n}),d3.transition(S).attr("transform",function(e,t){return"translate(0,"+i.range()[t]+")"}).select("text").style("opacity",1)}break;case"left":w.enter().append("text").attr("class","nv-axislabel"),w.attr("text-anchor",f?"middle":"end").attr("transform",f?"rotate(-90)":"").attr("y",f?-Math.max(t.left,n)+12:-10).attr("x",f?-i.range()[0]/2:-e.tickPadding());if(o){var S=v.selectAll("g.nv-axisMaxMin").data(i.domain());S.enter().append("g").attr("class","nv-axisMaxMin").append("text").style("opacity",0),S.exit().remove(),S.attr("transform",function(e,t){return"translate(0,"+p(e)+")"}).select("text").attr("dy",".32em").attr("y",0).attr("x",-e.tickPadding()).attr("text-anchor","end").text(function(e,t){var n=b(e);return(""+n).match("NaN")?"":n}),d3.transition(S).attr("transfor m",function(e,t){return"translate(0,"+i.range()[t]+")"}).select("text").style("opacity",1)}}w.text(function(e){return e}),o&&(e.orient()==="left"||e.orient()==="right")&&(y.selectAll("g").each(function(e,t){d3.select(this).select("text").attr("opacity",1);if(i(e)<i.range()[1]+10||i(e)>i.range()[0]-10)(e>1e-10||e<-1e-10)&&d3.select(this).attr("opacity",0),d3.select(this).select("text").attr("opacity",0)}),i.domain()[0]==i.domain()[1]&&i.domain()[0]==0&&v.selectAll("g.nv-axisMaxMin").style("opacity",function(e,t){return t?0:1}));if(o&&(e.orient()==="top"||e.orient()==="bottom")){var k=[];v.selectAll("g.nv-axisMaxMin").each(function(e,t){try{t?k.push(i(e)-this.getBBox().width-4):k.push(i(e)+this.getBBox().width+4)}catch(n){t?k.push(i(e)-4):k.push(i(e)+4)}}),y.selectAll("g").each(function(e,t){if(i(e)<k[0]||i(e)>k[1])e>1e-10||e<-1e-10?d3.select(this).remove():d3.select(this).select("text").remove()})}u&&y.selectAll(".tick").filter(function(e){return!parseFloat(Math.round(e.__dat a__*1e5)/1e6)&&e.__data__!==undefined}).classed("zero",!0),p=i.copy()}),d}var e=d3.svg.axis(),t={top:0,right:0,bottom:0,left:0},n=75,r=60,i=d3.scale.linear(),s=null,o=!0,u=!0,a=0,f=!0,l=!1,c=!1,h=null;e.scale(i).orient("bottom").tickFormat(function(e){return e});var p;return d.axis=e,d3.rebind(d,e,"orient","tickValues","tickSubdivide","tickSize","tickPadding","tickFormat"),d3.rebind(d,i,"domain","range","rangeBand","rangeBands"),d.margin=function(e){return arguments.length?(t.top=typeof e.top!="undefined"?e.top:t.top,t.right=typeof e.right!="undefined"?e.right:t.right,t.bottom=typeof e.bottom!="undefined"?e.bottom:t.bottom,t.left=typeof e.left!="undefined"?e.left:t.left,d):t},d.width=function(e){return arguments.length?(n=e,d):n},d.ticks=function(e){return arguments.length?(h=e,d):h},d.height=function(e){return arguments.length?(r=e,d):r},d.axisLabel=function(e){return arguments.length?(s=e,d):s},d.showMaxMin=function(e){return arguments.length?(o=e,d):o},d.highlightZero=fun ction(e){return arguments.length?(u=e,d):u},d.scale=function(t){return arguments.length?(i=t,e.scale(i),c=typeof i.rangeBands=="function",d3.rebind(d,i,"domain","range","rangeBand","rangeBands"),d):i},d.rotateYLabel=function(e){return arguments.length?(f=e,d):f},d.rotateLabels=function(e){return arguments.length?(a=e,d):a},d.staggerLabels=function(e){return arguments.length?(l=e,d):l},d},e.models.bullet=function(){function p(e){return e.each(function(e,n){var l=a-t.left-t.right,p=f-t.top-t.bottom,d=d3.select(this),v=i.call(this,e,n).slice().sort(d3.descending),m=s.call(this,e,n).slice().sort(d3.descending),g=o.call(this,e,n).slice().sort(d3.descending),y=d3.scale.linear().domain(d3.extent(d3.merge([u,v]))).range(r?[l,0]:[0,l]),b=this.__chart__||d3.scale.linear().domain([0,Infinity]).range(y.range());this.__chart__=y;var w=d3.min(v),E=d3.max(v),S=v[1],x=d.selectAll("g.nv-wrap.nv-bullet").data([e]),T=x.enter().append("g").attr("class","nvd3 nv-wrap nv-bullet"),N=T.append("g"), C=x.select("g");N.append("rect").attr("class","nv-range nv-rangeMax"),N.append("rect").attr("class","nv-range nv-rangeAvg"),N.append("rect").attr("class","nv-range nv-rangeMin"),N.append("rect").attr("class","nv-measure"),N.append("path").attr("class","nv-markerTriangle"),x.attr("transform","translate("+t.left+","+t.top+")");var k=function(e){return Math.abs(b(e)-b(0))},L=function(e){return Math.abs(y(e)-y(0))},A=function(e){return e<0?b(e):b(0)},O=function(e){return e<0?y(e):y(0)};C.select("rect.nv-rangeMax").attr("height",p).attr("width",L(E>0?E:w)).attr("x",O(E>0?E:w)).datum(E>0?E:w),C.select("rect.nv-rangeAvg").attr("height",p).attr("width",L(S)).attr("x",O(S)).datum(S),C.select("rect.nv-rangeMin").attr("height",p).attr("width",L(E)).attr("x",O(E)).attr("width",L(E>0?w:E)).attr("x",O(E>0?w:E)).datum(E>0?w:E),C.select("rect.nv-measure").style("fill",c).attr("height",p/3).attr("y",p/3).attr("width",g<0?y(0)-y(g[0]):y(g[0])-y(0)).attr("x",O(g)).on("mouseover",function(){h.e lementMouseover({value:g[0],label:"Current",pos:[y(g[0]),p/2]})}).on("mouseout",function(){h.elementMouseout({value:g[0],label:"Current"})});var M=p/6;m[0]?C.selectAll("path.nv-markerTriangle").attr("transform",function(e){return"translate("+y(m[0])+","+p/2+")"}).attr("d","M0,"+M+"L"+M+","+ -M+" "+ -M+","+ -M+"Z").on("mouseover",function(){h.elementMouseover({value:m[0],label:"Previous",pos:[y(m[0]),p/2]})}).on("mouseout",function(){h.elementMouseout({value:m[0],label:"Previous"})}):C.selectAll("path.nv-markerTriangle").remove(),x.selectAll(".nv-range").on("mouseover",function(e,t){var n=t?t==1?"Mean":"Minimum":"Maximum";h.elementMouseover({value:e,label:n,pos:[y(e),p/2]})}).on("mouseout",function(e,t){var n=t?t==1?"Mean":"Minimum":"Maximum";h.elementMouseout({value:e,label:n})})}),p}var t={top:0,right:0,bottom:0,left:0},n="left",r=!1,i=function(e){return e.ranges},s=function(e){return e.markers},o=function(e){return e.measures},u=[0],a=380,f=30,l=null,c=e.utils.getColor(["# 1f77b4"]),h=d3.dispatch("elementMouseover","elementMouseout");return p.dispatch=h,p.orient=function(e){return arguments.length?(n=e,r=n=="right"||n=="bottom",p):n},p.ranges=function(e){return arguments.length?(i=e,p):i},p.markers=function(e){return arguments.length?(s=e,p):s},p.measures=function(e){return arguments.length?(o=e,p):o},p.forceX=function(e){return arguments.length?(u=e,p):u},p.width=function(e){return arguments.length?(a=e,p):a},p.height=function(e){return arguments.length?(f=e,p):f},p.margin=function(e){return arguments.length?(t.top=typeof e.top!="undefined"?e.top:t.top,t.right=typeof e.right!="undefined"?e.right:t.right,t.bottom=typeof e.bottom!="undefined"?e.bottom:t.bottom,t.left=typeof e.left!="undefined"?e.left:t.left,p):t},p.tickFormat=function(e){return arguments.length?(l=e,p):l},p.color=function(t){return arguments.length?(c=e.utils.getColor(t),p):c},p},e.models.bulletChart=function(){function m(e){return e.each(function(n,h){var g=d3.select(this),y=( a||parseInt(g.style("width"))||960)-i.left-i.right,b=f-i.top-i.bottom,w=this;m.update=function(){m(e)},m.container=this;if(!n||!s.call(this,n,h)){var E=g.selectAll(".nv-noData").data([p]);return E.enter().append("text").attr("class","nvd3 nv-noData").attr("dy","-.7em").style("text-anchor","middle"),E.attr("x",i.left+y/2).attr("y",18+i.top+b/2).text(function(e){return e}),m}g.selectAll(".nv-noData").remove();var S=s.call(this,n,h).slice().sort(d3.descending),x=o.call(this,n,h).slice().sort(d3.descending),T=u.call(this,n,h).slice().sort(d3.descending),N=g.selectAll("g.nv-wrap.nv-bulletChart").data([n]),C=N.enter().append("g").attr("class","nvd3 nv-wrap nv-bulletChart"),k=C.append("g"),L=N.select("g");k.append("g").attr("class","nv-bulletWrap"),k.append("g").attr("class","nv-titles"),N.attr("transform","translate("+i.left+","+i.top+")");var A=d3.scale.linear().domain([0,Math.max(S[0],x[0],T[0])]).range(r?[y,0]:[0,y]),O=this.__chart__||d3.scale.linear().domain([0,Infinity]).rang e(A.range());this.__chart__=A;var M=function(e){return Math.abs(O(e)-O(0))},_=function(e){return Math.abs(A(e)-A(0))},D=k.select(".nv-titles").append("g").attr("text-anchor","end").attr("transform","translate(-6,"+(f-i.top-i.bottom)/2+")");D.append("text").attr("class","nv-title").text(function(e){return e.title}),D.append("text").attr("class","nv-subtitle").attr("dy","1em").text(function(e){return e.subtitle}),t.width(y).height(b);var P=L.select(".nv-bulletWrap");d3.transition(P).call(t);var H=l||A.tickFormat(y/100),B=L.selectAll("g.nv-tick").data(A.ticks(y/50),function(e){return this.textContent||H(e)}),j=B.enter().append("g").attr("class","nv-tick").attr("transform",function(e){return"translate("+O(e)+",0)"}).style("opacity",1e-6);j.append("line").attr("y1",b).attr("y2",b*7/6),j.append("text").attr("text-anchor","middle").attr("dy","1em").attr("y",b*7/6).text(H);var F=d3.transition(B).attr("transform",function(e){return"translate("+A(e)+",0)"}).style("opacity",1);F.select ("line").attr("y1",b).attr("y2",b*7/6),F.select("text").attr("y",b*7/6),d3.transition(B.exit()).attr("transform",function(e){return"translate("+A(e)+",0)"}).style("opacity",1e-6).remove(),d.on("tooltipShow",function(e){e.key=n.title,c&&v(e,w.parentNode)})}),d3.timer.flush(),m}var t=e.models.bullet(),n="left",r=!1,i={top:5,right:40,bottom:20,left:120},s=function(e){return e.ranges},o=function(e){return e.markers},u=function(e){return e.measures},a=null,f=55,l=null,c=!0,h=function(e,t,n,r,i){return"<h3>"+t+"</h3>"+"<p>"+n+"</p>"},p="No Data Available.",d=d3.dispatch("tooltipShow","tooltipHide"),v=function(t,n){var r=t.pos[0]+(n.offsetLeft||0)+i.left,s=t.pos[1]+(n.offsetTop||0)+i.top,o=h(t.key,t.label,t.value,t,m);e.tooltip.show([r,s],o,t.value<0?"e":"w",null,n)};return t.dispatch.on("elementMouseover.tooltip",function(e){d.tooltipShow(e)}),t.dispatch.on("elementMouseout.tooltip",function(e){d.tooltipHide(e)}),d.on("tooltipHide",function(){c&&e.tooltip.cleanup()}),m.dispatch=d, m.bullet=t,d3.rebind(m,t,"color"),m.orient=function(e){return arguments.length?(n=e,r=n=="right"||n=="bottom",m):n},m.ranges=function(e){return arguments.length?(s=e,m):s},m.markers=function(e){return arguments.length?(o=e,m):o},m.measures=function(e){return arguments.length?(u=e,m):u},m.width=function(e){return arguments.length?(a=e,m):a},m.height=function(e){return arguments.length?(f=e,m):f},m.margin=function(e){return arguments.length?(i.top=typeof e.top!="undefined"?e.top:i.top,i.right=typeof e.right!="undefined"?e.right:i.right,i.bottom=typeof e.bottom!="undefined"?e.bottom:i.bottom,i.left=typeof e.left!="undefined"?e.left:i.left,m):i},m.tickFormat=function(e){return arguments.length?(l=e,m):l},m.tooltips=function(e){return arguments.length?(c=e,m):c},m.tooltipContent=function(e){return arguments.length?(h=e,m):h},m.noData=function(e){return arguments.length?(p=e,m):p},m},e.models.cumulativeLineChart=function(){function C(e){return e.each(function(e){function D(e,t){d3 .select(C.container).style("cursor","ew-resize")}function P(e,t){T.x=d3.event.x,T.i=Math.round(x.invert(T.x)),J()}function H(e,t){d3.select(C.container).style("cursor","auto"),y.index=T.i,S.stateChange(y)}function J(){$.data([T]),d.call(C)}var d=d3.select(this).classed("nv-chart-"+g,!0),L=this,A=(a||parseInt(d.style("width"))||960)-o.left-o.right,O=(f||parseInt(d.style("height"))||400)-o.top-o.bottom;C.update=function(){d.transition().call(C)},C.container=this,y.disabled=e.map(function(e){return!!e.disabled});if(!b){var M;b={};for(M in y)y[M]instanceof Array?b[M]=y[M].slice(0):b[M]=y[M]}var _=d3.behavior.drag().on("dragstart",D).on("drag",P).on("dragend",H);if(!e||!e.length||!e.filter(function(e){return e.values.length}).length){var B=d.selectAll(".nv-noData").data([w]);return B.enter().append("text").attr("class","nvd3 nv-noData").attr("dy","-.7em").style("text-anchor","middle"),B.attr("x",o.left+A/2).attr("y",o.top+O/2).text(function(e){return e}),C}d.selectAll(".nv-noData ").remove(),v=t.xScale(),m=t.yScale();if(!p){var j=e.filter(function(e){return!e.disabled}).map(function(e,n){var r=d3.extent(e.values,t.y());return r[0]<-0.95&&(r[0]=-0.95),[(r[0]-r[1])/(1+r[1]),(r[1]-r[0])/(1+r[0])]}),F=[d3.min(j,function(e){return e[0]}),d3.max(j,function(e){return e[1]})];t.yDomain(F)}else t.yDomain(null);x.domain([0,e[0].values.length-1]).range([0,A]).clamp(!0);var e=k(T.i,e),I=d.selectAll("g.nv-wrap.nv-cumulativeLine").data([e]),q=I.enter().append("g").attr("class","nvd3 nv-wrap nv-cumulativeLine").append("g"),R=I.select("g");q.append("g").attr("class","nv-x nv-axis"),q.append("g").attr("class","nv-y nv-axis"),q.append("g").attr("class","nv-background"),q.append("g").attr("class","nv-linesWrap"),q.append("g").attr("class","nv-avgLinesWrap"),q.append("g").attr("class","nv-legendWrap"),q.append("g").attr("class","nv-controlsWrap"),l&&(i.width(A),R.select(".nv-legendWrap").datum(e).call(i),o.top!=i.height()&&(o.top=i.height(),O=(f||parseInt(d.style("heigh t"))||400)-o.top-o.bottom),R.select(".nv-legendWrap").attr("transform","translate(0,"+ -o.top+")"));if(h){var U=[{key:"Re-scale y-axis",disabled:!p}];s.width(140).color(["#444","#444","#444"]),R.select(".nv-controlsWrap").datum(U).attr("transform","translate(0,"+ -o.top+")").call(s)}I.attr("transform","translate("+o.left+","+o.top+")");var z=e.filter(function(e){return e.tempDisabled});I.select(".tempDisabled").remove(),z.length&&I.append("text").attr("class","tempDisabled").attr("x",A/2).attr("y","-.71em").style("text-anchor","end").text(z.map(function(e){return e.key}).join(", ")+" values cannot be calculated for this time period."),q.select(".nv-background").append("rect"),R.select(".nv-background rect").attr("width",A).attr("height",O),t.y(function(e){return e.display.y}).width(A).height(O).color(e.map(function(e,t){return e.color||u(e,t)}).filter(function(t,n){return!e[n].disabled&&!e[n].tempDisabled}));var W=R.select(".nv-linesWrap").datum(e.filter(function(e){return!e .disabled&&!e.tempDisabled}));W.call(t),e.forEach(function(e,t){e.seriesIndex=t});var X=e.filter(function(e){return!e.disabled&&!!E(e)}),V=R.select(".nv-avgLinesWrap").selectAll("line").data(X,function(e){return e.key});V.enter().append("line").style("stroke-width",2).style("stroke-dasharray","10,10").style("stroke",function(e,n){return t.color()(e,e.seriesIndex)}).attr("x1",0).attr("x2",A).attr("y1",function(e){return m(E(e))}).attr("y2",function(e){return m(E(e))}),V.attr("x1",0).attr("x2",A).attr("y1",function(e){return m(E(e))}).attr("y2",function(e){return m(E(e))}),V.exit().remove();var $=W.selectAll(".nv-indexLine").data([T]);$.enter().append("rect").attr("class","nv-indexLine").attr("width",3).attr("x",-2).attr("fill","red").attr("fill-opacity",.5).call(_),$.attr("transform",function(e){return"translate("+x(e.i)+",0)"}).attr("height",O),n.scale(v).ticks(Math.min(e[0].values.length,A/70)).tickSize(-O,0),R.select(".nv-x.nv-axis").attr("transform","translate(0,"+m.range ()[0]+")"),d3.transition(R.select(".nv-x.nv-axis")).call(n),r.scale(m).ticks(O/36).tickSize(-A,0),d3.transition(R.select(".nv-y.nv-axis")).call(r),R.select(".nv-background rect").on("click",function(){T.x=d3.mouse(this)[0],T.i=Math.round(x.invert(T.x)),y.index=T.i,S.stateChange(y),J()}),t.dispatch.on("elementClick",function(e){T.i=e.pointIndex,T.x=x(T.i),y.index=T.i,S.stateChange(y),J()}),s.dispatch.on("legendClick",function(e,t){e.disabled=!e.disabled,p=!e.disabled,y.rescaleY=p,S.stateChange(y),C.update()}),i.dispatch.on("legendClick",function(t,n){t.disabled=!t.disabled,e.filter(function(e){return!e.disabled}).length||e.map(function(e){return e.disabled=!1,I.selectAll(".nv-series").classed("disabled",!1),e}),y.disabled=e.map(function(e){return!!e.disabled}),S.stateChange(y),C.update()}),i.dispatch.on("legendDblclick",function(t){e.forEach(function(e){e.disabled=!0}),t.disabled=!1,y.disabled=e.map(function(e){return!!e.disabled}),S.stateChange(y),C.update()}),S.on("tooltipS how",function(e){c&&N(e,L.parentNode)}),S.on("changeState",function(t){typeof t.disabled!="undefined"&&(e.forEach(function(e,n){e.disabled=t.disabled[n]}),y.disabled=t.disabled),typeof t.index!="undefined"&&(T.i=t.index,T.x=x(T.i),y.index=t.index,$.data([T])),typeof t.rescaleY!="undefined"&&(p=t.rescaleY),C.update()})}),C}function k(e,n){return n.map(function(n,r){if(!n.values)return n;var i=t.y()(n.values[e],e);return i<-0.95?(n.tempDisabled=!0,n):(n.tempDisabled=!1,n.values=n.values.map(function(e,n){return e.display={y:(t.y()(e,n)-i)/(1+i)},e}),n)})}var t=e.models.line(),n=e.models.axis(),r=e.models.axis(),i=e.models.legend(),s=e.models.legend(),o={top:30,right:30,bottom:50,left:60},u=e.utils.defaultColor(),a=null,f=null,l=!0,c=!0,h=!0,p=!0,d=function(e,t,n,r,i){return"<h3>"+e+"</h3>"+"<p>"+n+" at "+t+"</p>"},v,m,g=t.id(),y={index:0,rescaleY:p},b=null,w="No Data Available.",E=function(e){return e.average},S=d3.dispatch("tooltipShow","tooltipHide","stateChange","changeStat e");n.orient("bottom").tickPadding(7),r.orient("left");var x=d3.scale.linear(),T={i:0,x:0},N=function(i,s){var o=i.pos[0]+(s.offsetLeft||0),u=i.pos[1]+(s.offsetTop||0),a=n.tickFormat()(t.x()(i.point,i.pointIndex)),f=r.tickFormat()(t.y()(i.point,i.pointIndex)),l=d(i.series.key,a,f,i,C);e.tooltip.show([o,u],l,null,null,s)};return t.dispatch.on("elementMouseover.tooltip",function(e){e.pos=[e.pos[0]+o.left,e.pos[1]+o.top],S.tooltipShow(e)}),t.dispatch.on("elementMouseout.tooltip",function(e){S.tooltipHide(e)}),S.on("tooltipHide",function(){c&&e.tooltip.cleanup()}),C.dispatch=S,C.lines=t,C.legend=i,C.xAxis=n,C.yAxis=r,d3.rebind(C,t,"defined","isArea","x","y","size","xDomain","yDomain","forceX","forceY","interactive","clipEdge","clipVoronoi","id"),C.margin=function(e){return arguments.length?(o.top=typeof e.top!="undefined"?e.top:o.top,o.right=typeof e.right!="undefined"?e.right:o.right,o.bottom=typeof e.bottom!="undefined"?e.bottom:o.bottom,o.left=typeof e.left!="undefined"?e.lef t:o.left,C):o},C.width=function(e){return arguments.length?(a=e,C):a},C.height=function(e){return arguments.length?(f=e,C):f},C.color=function(t){return arguments.length?(u=e.utils.getColor(t),i.color(u),C):u},C.rescaleY=function(e){return arguments.length?(p=e,p):p},C.showControls=function(e){return arguments.length?(h=e,C):h},C.showLegend=function(e){return arguments.length?(l=e,C):l},C.tooltips=function(e){return arguments.length?(c=e,C):c},C.tooltipContent=function(e){return arguments.length?(d=e,C):d},C.state=function(e){return arguments.length?(y=e,C):y},C.defaultState=function(e){return arguments.length?(b=e,C):b},C.noData=function(e){return arguments.length?(w=e,C):w},C.average=function(e){return arguments.length?(E=e,C):E},C},e.models.discreteBar=function(){function b(e){return e.each(function(e){var i=n-t.left-t.right,b=r-t.top-t.bottom,w=d3.select(this);e=e.map(function(e,t){return e.values=e.values.map(function(e){return e.series=t,e}),e});var E=p&&d?[]:e.map(fun ction(e){return e.values.map(function(e,t){return{x:u(e,t),y:a(e,t),y0:e.y0}})});s.domain(p||d3.merge(E).map(function(e){return e.x})).rangeBands([0,i],.1),o.domain(d||d3.extent(d3.merge(E).map(function(e){return e.y}).concat(f))),c?o.range([b-(o.domain()[0]<0?12:0),o.domain()[1]>0?12:0]):o.range([b,0]),g=g||s,y=y||o.copy().range([o(0),o(0)]);var S=w.selectAll("g.nv-wrap.nv-discretebar").data([e]),T=S.enter().append("g").attr("class","nvd3 nv-wrap nv-discretebar"),N=T.append("g"),C=S.select("g");N.append("g").attr("class","nv-groups"),S.attr("transform","translate("+t.left+","+t.top+")");var k=S.select(".nv-groups").selectAll(".nv-group").data(function(e){return e},function(e){return e.key});k.enter().append("g").style("stroke-opacity",1e-6).style("fill-opacity",1e-6),d3.transition(k.exit()).style("stroke-opacity",1e-6).style("fill-opacity",1e-6).remove(),k.attr("class",function(e,t){return"nv-group nv-series-"+t}).classed("hover",function(e){return e.hover}),d3.transition(k ).style("stroke-opacity",1).style("fill-opacity",.75);var L=k.selectAll("g.nv-bar").data(function(e){return e.values});L.exit().remove();var A=L.enter().append("g").attr("transform",function(e,t,n){return"translate("+(s(u(e,t))+s.rangeBand()*.05)+", "+o(0)+")"}).on("mouseover",function(t,n){d3.select(this).classed("hover",!0),v.elementMouseover({value:a(t,n),point:t,series:e[t.series],pos:[s(u(t,n))+s.rangeBand()*(t.series+.5)/e.length,o(a(t,n))],pointIndex:n,seriesIndex:t.series,e:d3.event})}).on("mouseout",function(t,n){d3.select(this).classed("hover",!1),v.elementMouseout({value:a(t,n),point:t,series:e[t.series],pointIndex:n,seriesIndex:t.series,e:d3.event})}).on("click",function(t,n){v.elementClick({value:a(t,n),point:t,series:e[t.series],pos:[s(u(t,n))+s.rangeBand()*(t.series+.5)/e.length,o(a(t,n))],pointIndex:n,seriesIndex:t.series,e:d3.event}),d3.event.stopPropagation()}).on("dblclick",function(t,n){v.elementDblClick({value:a(t,n),point:t,series:e[t.series],pos:[s(u(t ,n))+s.rangeBand()*(t.series+.5)/e.length,o(a(t,n))],pointIndex:n,seriesIndex:t.series,e:d3.event}),d3.event.stopPropagation()});A.append("rect").attr("height",0).attr("width",s.rangeBand()*.9/e.length),c?(A.append("text").attr("text-anchor","middle"),L.select("text").attr("x",s.rangeBand()*.9/2).attr("y",function(e,t){return a(e,t)<0?o(a(e,t))-o(0)+12:-4}).text(function(e,t){return h(a(e,t))})):L.selectAll("text").remove(),L.attr("class",function(e,t){return a(e,t)<0?"nv-bar negative":"nv-bar positive"}).style("fill",function(e,t){return e.color||l(e,t)}).style("stroke",function(e,t){return e.color||l(e,t)}).select("rect").attr("class",m).attr("width",s.rangeBand()*.9/e.length),d3.transition(L).attr("transform",function(e,t){var n=s(u(e,t))+s.rangeBand()*.05,r=a(e,t)<0?o(0):o(0)-o(a(e,t))<1?o(0)-1:o(a(e,t));return"translate("+n+", "+r+")"}).select("rect").attr("height",function(e,t){return Math.max(Math.abs(o(a(e,t))-o(0))||1)}),g=s.copy(),y=o.copy()}),b}var t={top:0,right: 0,bottom:0,left:0},n=960,r=500,i=Math.floor(Math.random()*1e4),s=d3.scale.ordinal(),o=d3.scale.linear(),u=function(e){return e.x},a=function(e){return e.y},f=[0],l=e.utils.defaultColor(),c=!1,h=d3.format(",.2f"),p,d,v=d3.dispatch("chartClick","elementClick","elementDblClick","elementMouseover","elementMouseout"),m="discreteBar",g,y;return b.dispatch=v,b.x=function(e){return arguments.length?(u=e,b):u},b.y=function(e){return arguments.length?(a=e,b):a},b.margin=function(e){return arguments.length?(t.top=typeof e.top!="undefined"?e.top:t.top,t.right=typeof e.right!="undefined"?e.right:t.right,t.bottom=typeof e.bottom!="undefined"?e.bottom:t.bottom,t.left=typeof e.left!="undefined"?e.left:t.left,b):t},b.width=function(e){return arguments.length?(n=e,b):n},b.height=function(e){return arguments.length?(r=e,b):r},b.xScale=function(e){return arguments.length?(s=e,b):s},b.yScale=function(e){return arguments.length?(o=e,b):o},b.xDomain=function(e){return arguments.length?(p=e,b):p},b .yDomain=function(e){return arguments.length?(d=e,b):d},b.forceY=function(e){return arguments.length?(f=e,b):f},b.color=function(t){return arguments.length?(l=e.utils.getColor(t),b):l},b.id=function(e){return arguments.length?(i=e,b):i},b.showValues=function(e){return arguments.length?(c=e,b):c},b.valueFormat=function(e){return arguments.length?(h=e,b):h},b.rectClass=function(e){return arguments.length?(m=e,b):m},b},e.models.discreteBarChart=function(){function m(e){return e.each(function(e){var u=d3.select(this),l=this,g=(s||parseInt(u.style("width"))||960)-i.left-i.right,b=(o||parseInt(u.style("height"))||400)-i.top-i.bottom;m.update=function(){d.beforeUpdate(),u.transition().call(m)},m.container=this;if(!e||!e.length||!e.filter(function(e){return e.values.length}).length){var w=u.selectAll(".nv-noData").data([p]);return w.enter().append("text").attr("class","nvd3 nv-noData").attr("dy","-.7em").style("text-anchor","middle"),w.attr("x",i.left+g/2).attr("y",i.top+b/2).text(f unction(e){return e}),m}u.selectAll(".nv-noData").remove(),c=t.xScale(),h=t.yScale();var E=u.selectAll("g.nv-wrap.nv-discreteBarWithAxes").data([e]),S=E.enter().append("g").attr("class","nvd3 nv-wrap nv-discreteBarWithAxes").append("g"),T=S.append("defs"),N=E.select("g");S.append("g").attr("class","nv-x nv-axis"),S.append("g").attr("class","nv-y nv-axis"),S.append("g").attr("class","nv-barsWrap"),N.attr("transform","translate("+i.left+","+i.top+")"),t.width(g).height(b);var C=N.select(".nv-barsWrap").datum(e.filter(function(e){return!e.disabled}));d3.transition(C).call(t),T.append("clipPath").attr("id","nv-x-label-clip-"+t.id()).append("rect"),N.select("#nv-x-label-clip-"+t.id()+" rect").attr("width",c.rangeBand()*(a?2:1)).attr("height",16).attr("x",-c.rangeBand()/(a?1:2)),n.scale(c).ticks(g/100).tickSize(-b,0),N.select(".nv-x.nv-axis").attr("transform","translate(0,"+(h.range()[0]+(t.showValues()&&h.domain()[0]<0?16:0))+")"),N.select(".nv-x.nv-axis").transition().duration(0 ).call(n);var k=N.select(".nv-x.nv-axis").selectAll("g");a&&k.selectAll("text").attr("transform",function(e,t,n){return"translate(0,"+ -(n%2==0?"5":"17")+")"}),r.scale(h).ticks(b/36).tickSize(-g,0),d3.transition(N.select(".nv-y.nv-axis")).call(r),d.on("tooltipShow",function(e){f&&v(e,l.parentNode)})}),m}var t=e.models.discreteBar(),n=e.models.axis(),r=e.models.axis(),i={top:15,right:10,bottom:50,left:60},s=null,o=null,u=e.utils.getColor(),a=!1,f=!0,l=function(e,t,n,r,i){return"<h3>"+t+"</h3>"+"<p>"+n+"</p>"},c,h,p="No Data Available.",d=d3.dispatch("tooltipShow","tooltipHide","beforeUpdate");n.orient("bottom").highlightZero(!1).showMaxMin(!1).tickFormat(function(e){return e}),r.orient("left").tickFormat(d3.format(",.1f"));var v=function(i,s){var o=i.pos[0]+(s.offsetLeft||0),u=i.pos[1]+(s.offsetTop||0),a=n.tickFormat()(t.x()(i.point,i.pointIndex)),f=r.tickFormat()(t.y()(i.point,i.pointIndex)),c=l(i.series.key,a,f,i,m);e.tooltip.show([o,u],c,i.value<0?"n":"s",null,s)};return t.dispatch.on("elementMouseover.tooltip",function(e){e.pos=[e.pos[0]+i.left,e.pos[1]+i.top],d.tooltipShow(e)}),t.dispatch.on("elementMous eout.tooltip",function(e){d.tooltipHide(e)}),d.on("tooltipHide",function(){f&&e.tooltip.cleanup()}),m.dispatch=d,m.discretebar=t,m.xAxis=n,m.yAxis=r,d3.rebind(m,t,"x","y","xDomain","yDomain","forceX","forceY","id","showValues","valueFormat"),m.margin=function(e){return arguments.length?(i.top=typeof e.top!="undefined"?e.top:i.top,i.right=typeof e.right!="undefined"?e.right:i.right,i.bottom=typeof e.bottom!="undefined"?e.bottom:i.bottom,i.left=typeof e.left!="undefined"?e.left:i.left,m):i},m.width=function(e){return arguments.length?(s=e,m):s},m.height=function(e){return arguments.length?(o=e,m):o},m.color=function(n){return arguments.length?(u=e.utils.getColor(n),t.color(u),m):u},m.staggerLabels=function(e){return arguments.length?(a=e,m):a},m.tooltips=function(e){return arguments.length?(f=e,m):f},m.tooltipContent=function(e){return arguments.length?(l=e,m):l},m.noData=function(e){return arguments.length?(p=e,m):p},m},e.models.distribution=function(){function l(e){return e. each(function(e){var a=n-(i==="x"?t.left+t.right:t.top+t.bottom),l=i=="x"?"y":"x",c=d3.select(this);f=f||u;var h=c.selectAll("g.nv-distribution").data([e]),p=h.enter().append("g").attr("class","nvd3 nv-distribution"),d=p.append("g"),v=h.select("g");h.attr("transform","translate("+t.left+","+t.top+")");var m=v.selectAll("g.nv-dist").data(function(e){return e},function(e){return e.key});m.enter().append("g"),m.attr("class",function(e,t){return"nv-dist nv-series-"+t}).style("stroke",function(e,t){return o(e,t)});var g=m.selectAll("line.nv-dist"+i).data(function(e){return e.values});g.enter().append("line").attr(i+"1",function(e,t){return f(s(e,t))}).attr(i+"2",function(e,t){return f(s(e,t))}),d3.transition(m.exit().selectAll("line.nv-dist"+i)).attr(i+"1",function(e,t){return u(s(e,t))}).attr(i+"2",function(e,t){return u(s(e,t))}).style("stroke-opacity",0).remove(),g.attr("class",function(e,t){return"nv-dist"+i+" nv-dist"+i+"-"+t}).attr(l+"1",0).attr(l+"2",r),d3.transition(g).at tr(i+"1",function(e,t){return u(s(e,t))}).attr(i+"2",function(e,t){return u(s(e,t))}),f=u.copy()}),l}var t={top:0,right:0,bottom:0,left:0},n=400,r=8,i="x",s=function(e){return e[i]},o=e.utils.defaultColor(),u=d3.scale.linear(),a,f;return l.margin=function(e){return arguments.length?(t.top=typeof e.top!="undefined"?e.top:t.top,t.right=typeof e.right!="undefined"?e.right:t.right,t.bottom=typeof e.bottom!="undefined"?e.bottom:t.bottom,t.left=typeof e.left!="undefined"?e.left:t.left,l):t},l.width=function(e){return arguments.length?(n=e,l):n},l.axis=function(e){return arguments.length?(i=e,l):i},l.size=function(e){return arguments.length?(r=e,l):r},l.getData=function(e){return arguments.length?(s=d3.functor(e),l):s},l.scale=function(e){return arguments.length?(u=e,l):u},l.color=function(t){return arguments.length?(o=e.utils.getColor(t),l):o},l},e.models.historicalBar=function(){function g(e){return e.each(function(e){var g=n-t.left-t.right,b=r-t.top-t.bottom,w=d3.select(this);s. domain(d||d3.extent(e[0].values.map(u).concat(f))),c?s.range([g*.5/e[0].values.length,g*(e[0].values.length-.5)/e[0].values.length]):s.range([0,g]),o.domain(v||d3.extent(e[0].values.map(a).concat(l))).range([b,0]);if(s.domain()[0]===s.domain()[1]||o.domain()[0]===o.domain()[1])singlePoint=!0;s.domain()[0]===s.domain()[1]&&(s.domain()[0]?s.domain([s.domain()[0]-s.domain()[0]*.01,s.domain()[1]+s.domain()[1]*.01]):s.domain([-1,1])),o.domain()[0]===o.domain()[1]&&(o.domain()[0]?o.domain([o.domain()[0]+o.domain()[0]*.01,o.domain()[1]-o.domain()[1]*.01]):o.domain([-1,1]));var E=w.selectAll("g.nv-wrap.nv-bar").data([e[0].values]),S=E.enter().append("g").attr("class","nvd3 nv-wrap nv-bar"),T=S.append("defs"),N=S.append("g"),C=E.select("g");N.append("g").attr("class","nv-bars"),E.attr("transform","translate("+t.left+","+t.top+")"),w.on("click",function(e,t){m.chartClick({data:e,index:t,pos:d3.event,id:i})}),T.append("clipPath").attr("id","nv-chart-clip-path-"+i).append("rect"),E.sele ct("#nv-chart-clip-path-"+i+" rect").attr("width",g).attr("height",b),C.attr("clip-path",h?"url(#nv-chart-clip-path-"+i+")":"");var k=E.select(".nv-bars").selectAll(".nv-bar").data(function(e){return e});k.exit().remove();var L=k.enter().append("rect").attr("x",0).attr("y",function(e,t){return o(Math.max(0,a(e,t)))}).attr("height",function(e,t){return Math.abs(o(a(e,t))-o(0))}).on("mouseover",function(t,n){d3.select(this).classed("hover",!0),m.elementMouseover({point:t,series:e[0],pos:[s(u(t,n)),o(a(t,n))],pointIndex:n,seriesIndex:0,e:d3.event})}).on("mouseout",function(t,n){d3.select(this).classed("hover",!1),m.elementMouseout({point:t,series:e[0],pointIndex:n,seriesIndex:0,e:d3.event})}).on("click",function(e,t){m.elementClick({value:a(e,t),data:e,index:t,pos:[s(u(e,t)),o(a(e,t))],e:d3.event,id:i}),d3.event.stopPropagation()}).on("dblclick",function(e,t){m.elementDblClick({value:a(e,t),data:e,index:t,pos:[s(u(e,t)),o(a(e,t))],e:d3.event,id:i}),d3.event.stopPropagation()}); k.attr("fill",function(e,t){return p(e,t)}).attr("class",function(e,t,n){return(a(e,t)<0?"nv-bar negative":"nv-bar positive")+" nv-bar-"+n+"-"+t}).attr("transform",function(t,n){return"translate("+(s(u(t,n))-g/e[0].values.length*.45)+",0)"}).attr("width",g/e[0].values.length*.9),d3.transition(k).attr("y",function(e,t){return a(e,t)<0?o(0):o(0)-o(a(e,t))<1?o(0)-1:o(a(e,t))}).attr("height",function(e,t){return Math.max(Math.abs(o(a(e,t))-o(0)),1)})}),g}var t={top:0,right:0,bottom:0,left:0},n=960,r=500,i=Math.floor(Math.random()*1e4),s=d3.scale.linear(),o=d3.scale.linear(),u=function(e){return e.x},a=function(e){return e.y},f=[],l=[0],c=!1,h=!0,p=e.utils.defaultColor(),d,v,m=d3.dispatch("chartClick","elementClick","elementDblClick","elementMouseover","elementMouseout");return g.dispatch=m,g.x=function(e){return arguments.length?(u=e,g):u},g.y=function(e){return arguments.length?(a=e,g):a},g.margin=function(e){return arguments.length?(t.top=typeof e.top!="undefined"?e.top:t.top, t.right=typeof e.right!="undefined"?e.right:t.right,t.bottom=typeof e.bottom!="undefined"?e.bottom:t.bottom,t.left=typeof e.left!="undefined"?e.left:t.left,g):t},g.width=function(e){return arguments.length?(n=e,g):n},g.height=function(e){return arguments.length?(r=e,g):r},g.xScale=function(e){return arguments.length?(s=e,g):s},g.yScale=function(e){return arguments.length?(o=e,g):o},g.xDomain=function(e){return arguments.length?(d=e,g):d},g.yDomain=function(e){return arguments.length?(v=e,g):v},g.forceX=function(e){return arguments.length?(f=e,g):f},g.forceY=function(e){return arguments.length?(l=e,g):l},g.padData=function(e){return arguments.length?(c=e,g):c},g.clipEdge=function(e){return arguments.length?(h=e,g):h},g.color=function(t){return arguments.length?(p=e.utils.getColor(t),g):p},g.id=function(e){return arguments.length?(i=e,g):i},g},e.models.historicalBarChart=function(){function S(e){return e.each(function(d){var T=d3.select(this),N=this,C=(u||parseInt(T.style("wid th"))||960)-s.left-s.right,k=(a||parseInt(T.style("height"))||400)-s.top-s.bottom;S.update=function(){S(e)},S.container=this,g.disabled=d.map(function(e){return!!e.disabled});if(!y){var L;y={};for(L in g)g[L]instanceof Array?y[L]=g[L].slice(0):y[L]=g[L]}if(!d||!d.length||!d.filter(function(e){return e.values.length}).length){var A=T.selectAll(".nv-noData").data([b]);return A.enter().append("text").attr("class","nvd3 nv-noData").attr("dy","-.7em").style("text-anchor","middle"),A.attr("x",s.left+C/2).attr("y",s.top+k/2).text(function(e){return e}),S}T.selectAll(".nv-noData").remove(),v=t.xScale(),m=t.yScale();var O=T.selectAll("g.nv-wrap.nv-lineChart").data([d]),M=O.enter().append("g").attr("class","nvd3 nv-wrap nv-lineChart").append("g"),_=O.select("g");M.append("g").attr("class","nv-x nv-axis"),M.append("g").attr("class","nv-y nv-axis"),M.append("g").attr("class","nv-barsWrap"),M.append("g").attr("class","nv-legendWrap"),f&&(i.width(C),_.select(".nv-legendWrap").datum(d).cal l(i),s.top!=i.height()&&(s.top=i.height(),k=(a||parseInt(T.style("height"))||400)-s.top-s.bottom),O.select(".nv-legendWrap").attr("transform","translate(0,"+ -s.top+")")),O.attr("transform","translate("+s.left+","+s.top+")"),h&&_.select(".nv-y.nv-axis").attr("transform","translate("+C+",0)"),t.width(C).height(k).color(d.map(function(e,t){return e.color||o(e,t)}).filter(function(e,t){return!d[t].disabled}));var D=_.select(".nv-barsWrap").datum(d.filter(function(e){return!e.disabled}));d3.transition(D).call(t),l&&(n.scale(v).tickSize(-k,0),_.select(".nv-x.nv-axis").attr("transform","translate(0,"+m.range()[0]+")"),_.select(".nv-x.nv-axis").transition().call(n)),c&&(r.scale(m).ticks(k/36).tickSize(-C,0),_.select(".nv-y.nv-axis").transition().duration(0).call(r)),i.dispatch.on("legendClick",function(t,n){t.disabled=!t.disabled,d.filter(function(e){return!e.disabled}).length||d.map(function(e){return e.disabled=!1,O.selectAll(".nv-series").classed("disabled",!1),e}),g.disabled=d. map(function(e){return!!e.disabled}),w.stateChange(g),e.transition().call(S)}),i.dispatch.on("legendDblclick",function(e){d.forEach(function(e){e.disabled=!0}),e.disabled=!1,g.disabled=d.map(function(e){return!!e.disabled}),w.stateChange(g),S.update()}),w.on("tooltipShow",function(e){p&&E(e,N.parentNode)}),w.on("changeState",function(t){typeof t.disabled!="undefined"&&(d.forEach(function(e,n){e.disabled=t.disabled[n]}),g.disabled=t.disabled),e.call(S)})}),S}var t=e.models.historicalBar(),n=e.models.axis(),r=e.models.axis(),i=e.models.legend(),s={top:30,right:90,bottom:50,left:90},o=e.utils.defaultColor(),u=null,a=null,f=!1,l=!0,c=!0,h=!1,p=!0,d=function(e,t,n,r,i){return"<h3>"+e+"</h3>"+"<p>"+n+" at "+t+"</p>"},v,m,g={},y=null,b="No Data Available.",w=d3.dispatch("tooltipShow","tooltipHide","stateChange","changeState");n.orient("bottom").tickPadding(7),r.orient(h?"right":"left");var E=function(i,s){if(s){var o=d3.select(s).select("svg"),u=o.node()?o.attr("viewBox"):null;if(u ){u=u.split(" ");var a=parseInt(o.style("width"))/u[2];i.pos[0]=i.pos[0]*a,i.pos[1]=i.pos[1]*a}}var f=i.pos[0]+(s.offsetLeft||0),l=i.pos[1]+(s.offsetTop||0),c=n.tickFormat()(t.x()(i.point,i.pointIndex)),h=r.tickFormat()(t.y()(i.point,i.pointIndex)),p=d(i.series.key,c,h,i,S);e.tooltip.show([f,l],p,null,null,s)};return t.dispatch.on("elementMouseover.tooltip",function(e){e.pos=[e.pos[0]+s.left,e.pos[1]+s.top],w.tooltipShow(e)}),t.dispatch.on("elementMouseout.tooltip",function(e){w.tooltipHide(e)}),w.on("tooltipHide",function(){p&&e.tooltip.cleanup()}),S.dispatch=w,S.bars=t,S.legend=i,S.xAxis=n,S.yAxis=r,d3.rebind(S,t,"defined","isArea","x","y","size","xScale","yScale","xDomain","yDomain","forceX","forceY","interactive","clipEdge","clipVoronoi","id","interpolate"),S.margin=function(e){return arguments.length?(s.top=typeof e.top!="undefined"?e.top:s.top,s.right=typeof e.right!="undefined"?e.right:s.right,s.bottom=typeof e.bottom!="undefined"?e.bottom:s.bottom,s.left=typeof e.lef t!="undefined"?e.left:s.left,S):s},S.width=function(e){return arguments.length?(u=e,S):u},S.height=function(e){return arguments.length?(a=e,S):a},S.color=function(t){return arguments.length?(o=e.utils.getColor(t),i.color(o),S):o},S.showLegend=function(e){return arguments.length?(f=e,S):f},S.showXAxis=function(e){return arguments.length?(l=e,S):l},S.showYAxis=function(e){return arguments.length?(c=e,S):c},S.rightAlignYAxis=function(e){return arguments.length?(h=e,r.orient(e?"right":"left"),S):h},S.tooltips=function(e){return arguments.length?(p=e,S):p},S.tooltipContent=function(e){return arguments.length?(d=e,S):d},S.state=function(e){return arguments.length?(g=e,S):g},S.defaultState=function(e){return arguments.length?(y=e,S):y},S.noData=function(e){return arguments.length?(b=e,S):b},S},e.models.indentedTree=function(){function m(e){return e.each(function(e){function C(e,t,n){d3.event.stopPropagation();if(d3.event.shiftKey&&!n)return d3.event.shiftKey=!1,e.values&&e.values.f orEach(function(e){(e.values||e._values)&&C(e,0,!0)}),!0;if(!A(e))return!0;e.values?(e._values=e.values,e.values=null):(e.values=e._values,e._values=null),m.update()}function k(e){return e._values&&e._values.length?h:e.values&&e.values.length?p:""}function L(e){return e._values&&e._values.length}function A(e){var t=e.values||e._values;return t&&t.length}var t=1,n=d3.select(this),i=d3.layout.tree().children(function(e){return e.values}).size([r,f]);m.update=function(){n.transition().duration(600).call(m)},e[0]||(e[0]={key:a});var s=i.nodes(e[0]),g=d3.select(this).selectAll("div").data([[s]]),y=g.enter().append("div").attr("class","nvd3 nv-wrap nv-indentedtree"),b=y.append("table"),w=g.select("table").attr("width","100%").attr("class",c);if(o){var E=b.append("thead"),S=E.append("tr");l.forEach(function(e){S.append("th").attr("width",e.width?e.width:"10%").style("text-align",e.type=="numeric"?"right":"left").append("span").text(e.label)})}var x=w.selectAll("tbody").data(functio n(e){return e});x.enter().append("tbody"),t=d3.max(s,function(e){return e.depth}),i.size([r,t*f]);var T=x.selectAll("tr").data(function(e){return e.filter(function(e){return u&&!e.children?u(e):!0})},function(e,t){return e.id||e.id||++v});T.exit().remove(),T.select("img.nv-treeicon").attr("src",k).classed("folded",L);var N=T.enter().append("tr");l.forEach(function(e,t){var n=N.append("td").style("padding-left",function(e){return(t?0:e.depth*f+12+(k(e)?0:16))+"px"},"important").style("text-align",e.type=="numeric"?"right":"left");t==0&&n.append("img").classed("nv-treeicon",!0).classed("nv-folded",L).attr("src",k).style("width","14px").style("height","14px").style("padding","0 1px").style("display",function(e){return k(e)?"inline-block":"none"}).on("click",C),n.append("span").attr("class",d3.functor(e.classes)).text(function(t){return e.format?e.format(t):t[e.key]||"-"}),e.showCount&&(n.append("span").attr("class","nv-childrenCount"),T.selectAll("span.nv-childrenCount").text(f unction(e){return e.values&&e.values.length||e._values&&e._values.length?"("+(e.values&&e.values.filter(function(e){return u?u(e):!0}).length||e._values&&e._values.filter(function(e){return u?u(e):!0}).length||0)+")":""})),e.click&&n.select("span").on("click",e.click)}),T.order().on("click",function(e){d.elementClick({row:this,data:e,pos:[e.x,e.y]})}).on("dblclick",function(e){d.elementDblclick({row:this,data:e,pos:[e.x,e.y]})}).on("mouseover",function(e){d.elementMouseover({row:this,data:e,pos:[e.x,e.y]})}).on("mouseout",function(e){d.elementMouseout({row:this,data:e,pos:[e.x,e.y]})})}),m}var t={top:0,right:0,bottom:0,left:0},n=960,r=500,i=e.utils.defaultColor(),s=Math.floor(Math.random()*1e4),o=!0,u=!1,a="No Data Available.",f=20,l=[{key:"key",label:"Name",type:"text"}],c=null,h="images/grey-plus.png",p="images/grey-minus.png",d=d3.dispatch("elementClick","elementDblclick","elementMouseover","elementMouseout"),v=0;return m.margin=function(e){return arguments.length?(t.top= typeof e.top!="undefined"?e.top:t.top,t.right=typeof e.right!="undefined"?e.right:t.right,t.bottom=typeof e.bottom!="undefined"?e.bottom:t.bottom,t.left=typeof e.left!="undefined"?e.left:t.left,m):t},m.width=function(e){return arguments.length?(n=e,m):n},m.height=function(e){return arguments.length?(r=e,m):r},m.color=function(t){return arguments.length?(i=e.utils.getColor(t),scatter.color(i),m):i},m.id=function(e){return arguments.length?(s=e,m):s},m.header=function(e){return arguments.length?(o=e,m):o},m.noData=function(e){return arguments.length?(a=e,m):a},m.filterZero=function(e){return arguments.length?(u=e,m):u},m.columns=function(e){return arguments.length?(l=e,m):l},m.tableClass=function(e){return arguments.length?(c=e,m):c},m.iconOpen=function(e){return arguments.length?(h=e,m):h},m.iconClose=function(e){return arguments.length?(p=e,m):p},m},e.models.legend=function(){function a(f){return f.each(function(a){var f=n-t.left-t.right,l=d3.select(this),c=l.selectAll("g.nv -legend").data([a]),h=c.enter().append("g").attr("class","nvd3 nv-legend").append("g"),p=c.select("g");c.attr("transform","translate("+t.left+","+t.top+")");var d=p.selectAll(".nv-series").data(function(e){return e}),v=d.enter().append("g").attr("class","nv-series").on("mouseover",function(e,t){u.legendMouseover(e,t)}).on("mouseout",function(e,t){u.legendMouseout(e,t)}).on("click",function(e,t){u.legendClick(e,t)}).on("dblclick",function(e,t){u.legendDblclick(e,t)});v.append("circle").style("stroke-width",2).attr("r",5),v.append("text").attr("text-anchor","start").attr("dy",".32em").attr("dx","8"),d.classed("disabled",function(e){return e.disabled}),d.exit().remove(),d.select("circle").style("fill",function(e,t){return e.color||s(e,t)}).style("stroke",function(e,t){return e.color||s(e,t)}),d.select("text").text(i);if(o){var m=[];d.each(function(t,n){var r=d3.select(this).select("text"),i=r.node().getComputedTextLength()||e.utils.calcApproxTextWidth(r);m.push(i+28)});var g=0, y=0,b=[];while(y<f&&g<m.length)b[g]=m[g],y+=m[g++];while(y>f&&g>1){b=[],g--;for(k=0;k<m.length;k++)m[k]>(b[k%g]||0)&&(b[k%g]=m[k]);y=b.reduce(function(e,t,n,r){return e+t})}var w=[];for(var E=0,S=0;E<g;E++)w[E]=S,S+=b[E];d.attr("transform",function(e,t){return"translate("+w[t%g]+","+(5+Math.floor(t/g)*20)+")"}),p.attr("transform","translate("+(n-t.right-y)+","+t.top+")"),r=t.top+t.bottom+Math.ceil(m.length/g)*20}else{var x=5,T=5,N=0,C;d.attr("transform",function(e,r){var i=d3.select(this).select("text").node().getComputedTextLength()+28;return C=T,n<t.left+t.right+C+i&&(T=C=5,x+=20),T+=i,T>N&&(N=T),"translate("+C+","+x+")"}),p.attr("transform","translate("+(n-t.right-N)+","+t.top+")"),r=t.top+t.bottom+x+15}}),a}var t={top:5,right:0,bottom:5,left:0},n=400,r=20,i=function(e){return e.key},s=e.utils.defaultColor(),o=!0,u=d3.dispatch("legendClick","legendDblclick","legendMouseover","legendMouseout");return a.dispatch=u,a.margin=function(e){return arguments.length?(t.top=typeof e .top!="undefined"?e.top:t.top,t.right=typeof e.right!="undefined"?e.right:t.right,t.bottom=typeof e.bottom!="undefined"?e.bottom:t.bottom,t.left=typeof e.left!="undefined"?e.left:t.left,a):t},a.width=function(e){return arguments.length?(n=e,a):n},a.height=function(e){return arguments.length?(r=e,a):r},a.key=function(e){return arguments.length?(i=e,a):i},a.color=function(t){return arguments.length?(s=e.utils.getColor(t),a):s},a.align=function(e){return arguments.length?(o=e,a):o},a},e.models.line=function(){function m(e){return e.each(function(e){var m=r-n.left-n.right,g=i-n.top-n.bottom,b=d3.select(this);c=t.xScale(),h=t.yScale(),d=d||c,v=v||h;var w=b.selectAll("g.nv-wrap.nv-line").data([e]),E=w.enter().append("g").attr("class","nvd3 nv-wrap nv-line"),S=E.append("defs"),T=E.append("g"),N=w.select("g");T.append("g").attr("class","nv-groups"),T.append("g").attr("class","nv-scatterWrap"),w.attr("transform","translate("+n.left+","+n.top+")"),t.width(m).height(g);var C=w.select(" .nv-scatterWrap");d3.transition(C).call(t),S.append("clipPath").attr("id","nv-edge-clip-"+t.id()).append("rect"),w.select("#nv-edge-clip-"+t.id()+" rect").attr("width",m).attr("height",g),N.attr("clip-path",l?"url(#nv-edge-clip-"+t.id()+")":""),C.attr("clip-path",l?"url(#nv-edge-clip-"+t.id()+")":"");var k=w.select(".nv-groups").selectAll(".nv-group").data(function(e){return e},function(e){return e.key});k.enter().append("g").style("stroke-opacity",1e-6).style("fill-opacity",1e-6),d3.transition(k.exit()).style("stroke-opacity",1e-6).style("fill-opacity",1e-6).remove(),k.attr("class",function(e,t){return"nv-group nv-series-"+t}).classed("hover",function(e){return e.hover}).style("fill",function(e,t){return s(e,t)}).style("stroke",function(e,t){return s(e,t)}),d3.transition(k).style("stroke-opacity",1).style("fill-opacity",.5);var L=k.selectAll("path.nv-area").data(function(e){return f(e)?[e]:[]});L.enter().append("path").attr("class","nv-area").attr("d",function(e){return d3. svg.area().interpolate(p).defined(a).x(function(e,t){return d(o(e,t))}).y0(function(e,t){return v(u(e,t))}).y1(function(e,t){return v(h.domain()[0]<=0?h.domain()[1]>=0?0:h.domain()[1]:h.domain()[0])}).apply(this,[e.values])}),d3.transition(k.exit().selectAll("path.nv-area")).attr("d",function(e){return d3.svg.area().interpolate(p).defined(a).x(function(e,t){return c(o(e,t))}).y0(function(e,t){return h(u(e,t))}).y1(function(e,t){return h(h.domain()[0]<=0?h.domain()[1]>=0?0:h.domain()[1]:h.domain()[0])}).apply(this,[e.values])}),d3.transition(L).attr("d",function(e){return d3.svg.area().interpolate(p).defined(a).x(function(e,t){return c(o(e,t))}).y0(function(e,t){return h(u(e,t))}).y1(function(e,t){return h(h.domain()[0]<=0?h.domain()[1]>=0?0:h.domain()[1]:h.domain()[0])}).apply(this,[e.values])});var A=k.selectAll("path.nv-line").data(function(e){return[e.values]});A.enter().append("path").attr("class","nv-line").attr("d",d3.svg.line().interpolate(p).defined(a).x(function(e,t ){return d(o(e,t))}).y(function(e,t){return v(u(e,t))})),d3.transition(k.exit().selectAll("path.nv-line")).attr("d",d3.svg.line().interpolate(p).defined(a).x(function(e,t){return c(o(e,t))}).y(function(e,t){return h(u(e,t))})),d3.transition(A).attr("d",d3.svg.line().interpolate(p).defined(a).x(function(e,t){return c(o(e,t))}).y(function(e,t){return h(u(e,t))})),d=c.copy(),v=h.copy()}),m}var t=e.models.scatter(),n={top:0,right:0,bottom:0,left:0},r=960,i=500,s=e.utils.defaultColor(),o=function(e){return e.x},u=function(e){return e.y},a=function(e,t){return!isNaN(u(e,t))&&u(e,t)!==null},f=function(e){return e.area},l=!1,c,h,p="linear";t.size(16).sizeDomain([16,256]);var d,v;return m.dispatch=t.dispatch,m.scatter=t,d3.rebind(m,t,"id","interactive","size","xScale","yScale","zScale","xDomain","yDomain","sizeDomain","forceX","forceY","forceSize","clipVoronoi","clipRadius","padData"),m.margin=function(e){return arguments.length?(n.top=typeof e.top!="undefined"?e.top:n.top,n.right=ty peof e.right!="undefined"?e.right:n.right,n.bottom=typeof e.bottom!="undefined"?e.bottom:n.bottom,n.left=typeof e.left!="undefined"?e.left:n.left,m):n},m.width=function(e){return arguments.length?(r=e,m):r},m.height=function(e){return arguments.length?(i=e,m):i},m.x=function(e){return arguments.length?(o=e,t.x(e),m):o},m.y=function(e){return arguments.length?(u=e,t.y(e),m):u},m.clipEdge=function(e){return arguments.length?(l=e,m):l},m.color=function(n){return arguments.length?(s=e.utils.getColor(n),t.color(s),m):s},m.interpolate=function(e){return arguments.length?(p=e,m):p},m.defined=function(e){return arguments.length?(a=e,m):a},m.isArea=function(e){return arguments.length?(f=d3.functor(e),m):f},m},e.models.lineChart=function(){function S(e){return e.each(function(e){var d=d3.select(this),T=this,N=(u||parseInt(d.style("width"))||960)-s.left-s.right,C=(a||parseInt(d.style("height"))||400)-s.top-s.bottom;S.update=function(){d.transition().call(S)},S.container=this,g.disabled =e.map(function(e){return!!e.disabled});if(!y){var k;y={};for(k in g)g[k]instanceof Array?y[k]=g[k].slice(0):y[k]=g[k]}if(!e||!e.length||!e.filter(function(e){return e.values.length}).length){var L=d.selectAll(".nv-noData").data([b]);return L.enter().append("text").attr("class","nvd3 nv-noData").attr("dy","-.7em").style("text-anchor","middle"),L.attr("x",s.left+N/2).attr("y",s.top+C/2).text(function(e){return e}),S}d.selectAll(".nv-noData").remove(),v=t.xScale(),m=t.yScale();var A=d.selectAll("g.nv-wrap.nv-lineChart").data([e]),O=A.enter().append("g").attr("class","nvd3 nv-wrap nv-lineChart").append("g"),M=A.select("g");O.append("g").attr("class","nv-x nv-axis"),O.append("g").attr("class","nv-y nv-axis"),O.append("g").attr("class","nv-linesWrap"),O.append("g").attr("class","nv-legendWrap"),f&&(i.width(N),M.select(".nv-legendWrap").datum(e).call(i),s.top!=i.height()&&(s.top=i.height(),C=(a||parseInt(d.style("height"))||400)-s.top-s.bottom),A.select(".nv-legendWrap").attr("tra nsform","translate(0,"+ -s.top+")")),A.attr("transform","translate("+s.left+","+s.top+")"),h&&M.select(".nv-y.nv-axis").attr("transform","translate("+N+",0)"),t.width(N).height(C).color(e.map(function(e,t){return e.color||o(e,t)}).filter(function(t,n){return!e[n].disabled}));var _=M.select(".nv-linesWrap").datum(e.filter(function(e){return!e.disabled}));d3.transition(_).call(t),l&&(n.scale(v).ticks(N/100).tickSize(-C,0),M.select(".nv-x.nv-axis").attr("transform","translate(0,"+m.range()[0]+")"),d3.transition(M.select(".nv-x.nv-axis")).call(n)),c&&(r.scale(m).ticks(C/36).tickSize(-N,0),d3.transition(M.select(".nv-y.nv-axis")).call(r)),i.dispatch.on("legendClick",function(t,n){t.disabled=!t.disabled,e.filter(function(e){return!e.disabled}).length||e.map(function(e){return e.disabled=!1,A.selectAll(".nv-series").classed("disabled",!1),e}),g.disabled=e.map(function(e){return!!e.disabled}),w.stateChange(g),S.update()}),i.dispatch.on("legendDblclick",function(t){e.forEach(function (e){e.disabled=!0}),t.disabled=!1,g.disabled=e.map(function(e){return!!e.disabled}),w.stateChange(g),S.update()}),w.on("tooltipShow",function(e){p&&E(e,T.parentNode)}),w.on("changeState",function(t){typeof t.disabled!="undefined"&&(e.forEach(function(e,n){e.disabled=t.disabled[n]}),g.disabled=t.disabled),S.update()})}),S}var t=e.models.line(),n=e.models.axis(),r=e.models.axis(),i=e.models.legend(),s={top:30,right:20,bottom:50,left:60},o=e.utils.defaultColor(),u=null,a=null,f=!0,l=!0,c=!0,h=!1,p=!0,d=function(e,t,n,r,i){return"<h3>"+e+"</h3>"+"<p>"+n+" at "+t+"</p>"},v,m,g={},y=null,b="No Data Available.",w=d3.dispatch("tooltipShow","tooltipHide","stateChange","changeState");n.orient("bottom").tickPadding(7),r.orient(h?"right":"left");var E=function(i,s){if(s){var o=d3.select(s).select("svg"),u=o.node()?o.attr("viewBox"):null;if(u){u=u.split(" ");var a=parseInt(o.style("width"))/u[2];i.pos[0]=i.pos[0]*a,i.pos[1]=i.pos[1]*a}}var f=i.pos[0]+(s.offsetLeft||0),l=i.pos[1]+(s.offse tTop||0),c=n.tickFormat()(t.x()(i.point,i.pointIndex)),h=r.tickFormat()(t.y()(i.point,i.pointIndex)),p=d(i.series.key,c,h,i,S);e.tooltip.show([f,l],p,null,null,s)};return t.dispatch.on("elementMouseover.tooltip",function(e){e.pos=[e.pos[0]+s.left,e.pos[1]+s.top],w.tooltipShow(e)}),t.dispatch.on("elementMouseout.tooltip",function(e){w.tooltipHide(e)}),w.on("tooltipHide",function(){p&&e.tooltip.cleanup()}),S.dispatch=w,S.lines=t,S.legend=i,S.xAxis=n,S.yAxis=r,d3.rebind(S,t,"defined","isArea","x","y","size","xScale","yScale","xDomain","yDomain","forceX","forceY","interactive","clipEdge","clipVoronoi","id","interpolate"),S.margin=function(e){return arguments.length?(s.top=typeof e.top!="undefined"?e.top:s.top,s.right=typeof e.right!="undefined"?e.right:s.right,s.bottom=typeof e.bottom!="undefined"?e.bottom:s.bottom,s.left=typeof e.left!="undefined"?e.left:s.left,S):s},S.width=function(e){return arguments.length?(u=e,S):u},S.height=function(e){return arguments.length?(a=e,S):a},S .color=function(t){return arguments.length?(o=e.utils.getColor(t),i.color(o),S):o},S.showLegend=function(e){return arguments.length?(f=e,S):f},S.showXAxis=function(e){return arguments.length?(l=e,S):l},S.showYAxis=function(e){return arguments.length?(c=e,S):c},S.rightAlignYAxis=function(e){return arguments.length?(h=e,r.orient(e?"right":"left"),S):h},S.tooltips=function(e){return arguments.length?(p=e,S):p},S.tooltipContent=function(e){return arguments.length?(d=e,S):d},S.state=function(e){return arguments.length?(g=e,S):g},S.defaultState=function(e){return arguments.length?(y=e,S):y},S.noData=function(e){return arguments.length?(b=e,S):b},S},e.models.linePlusBarChart=function(){function T(e){return e.each(function(e){var l=d3.select(this),c=this,v=(a||parseInt(l.style("width"))||960)-u.left-u.right,N=(f||parseInt(l.style("height"))||400)-u.top-u.bottom;T.update=function(){l.transition().call(T)},b.disabled=e.map(function(e){return!!e.disabled});if(!w){var C;w={};for(C in b) b[C]instanceof Array?w[C]=b[C].slice(0):w[C]=b[C]}if(!e||!e.length||!e.filter(function(e){return e.values.length}).length){var k=l.selectAll(".nv-noData").data([E]);return k.enter().append("text").attr("class","nvd3 nv-noData").attr("dy","-.7em").style("text-anchor","middle"),k.attr("x",u.left+v/2).attr("y",u.top+N/2).text(function(e){return e}),T}l.selectAll(".nv-noData").remove();var L=e.filter(function(e){return!e.disabled&&e.bar}),A=e.filter(function(e){return!e.bar});m=A.filter(function(e){return!e.disabled}).length&&A.filter(function(e){return!e.disabled})[0].values.length?t.xScale():n.xScale(),g=n.yScale(),y=t.yScale();var O=d3.select(this).selectAll("g.nv-wrap.nv-linePlusBar").data([e]),M=O.enter().append("g").attr("class","nvd3 nv-wrap nv-linePlusBar").append("g"),_=O.select("g");M.append("g").attr("class","nv-x nv-axis"),M.append("g").attr("class","nv-y1 nv-axis"),M.append("g").attr("class","nv-y2 nv-axis"),M.append("g").attr("class","nv-barsWrap"),M.append("g").at tr("class","nv-linesWrap"),M.append("g").attr("class","nv-legendWrap"),p&&(o.width(v/2),_.select(".nv-legendWrap").datum(e.map(function(e){return e.originalKey=e.originalKey===undefined?e.key:e.originalKey,e.key=e.originalKey+(e.bar?" (left axis)":" (right axis)"),e})).call(o),u.top!=o.height()&&(u.top=o.height(),N=(f||parseInt(l.style("height"))||400)-u.top-u.bottom),_.select(".nv-legendWrap").attr("transform","translate("+v/2+","+ -u.top+")")),O.attr("transform","translate("+u.left+","+u.top+")"),t.width(v).height(N).color(e.map(function(e,t){return e.color||h(e,t)}).filter(function(t,n){return!e[n].disabled&&!e[n].bar})),n.width(v).height(N).color(e.map(function(e,t){return e.color||h(e,t)}).filter(function(t,n){return!e[n].disabled&&e[n].bar}));var D=_.select(".nv-barsWrap").datum(L.length?L:[{values:[]}]),P=_.select(".nv-linesWrap").datum(A[0]&&!A[0].disabled?A:[{values:[]}]);d3.transition(D).call(n),d3.transition(P).call(t),r.scale(m).ticks(v/100).tickSize(-N,0),_.sele ct(".nv-x.nv-axis").attr("transform","translate(0,"+g.range()[0]+")"),d3.transition(_.select(".nv-x.nv-axis")).call(r),i.scale(g).ticks(N/36).tickSize(-v,0),d3.transition(_.select(".nv-y1.nv-axis")).style("opacity",L.length?1:0).call(i),s.scale(y).ticks(N/36).tickSize(L.length?0:-v,0),_.select(".nv-y2.nv-axis").style("opacity",A.length?1:0).attr("transform","translate("+v+",0)"),d3.transition(_.select(".nv-y2.nv-axis")).call(s),o.dispatch.on("legendClick",function(t,n){t.disabled=!t.disabled,e.filter(function(e){return!e.disabled}).length||e.map(function(e){return e.disabled=!1,O.selectAll(".nv-series").classed("disabled",!1),e}),b.disabled=e.map(function(e){return!!e.disabled}),S.stateChange(b),T.update()}),o.dispatch.on("legendDblclick",function(t){e.forEach(function(e){e.disabled=!0}),t.disabled=!1,b.disabled=e.map(function(e){return!!e.disabled}),S.stateChange(b),T.update()}),S.on("tooltipShow",function(e){d&&x(e,c.parentNode)}),S.on("changeState",function(t){typeof t.di sabled!="undefined"&&(e.forEach(function(e,n){e.disabled=t.disabled[n]}),b.disabled=t.disabled),T.update()})}),T}var t=e.models.line(),n=e.models.historicalBar(),r=e.models.axis(),i=e.models.axis(),s=e.models.axis(),o=e.models.legend(),u={top:30,right:60,bottom:50,left:60},a=null,f=null,l=function(e){return e.x},c=function(e){return e.y},h=e.utils.defaultColor(),p=!0,d=!0,v=function(e,t,n,r,i){return"<h3>"+e+"</h3>"+"<p>"+n+" at "+t+"</p>"},m,g,y,b={},w=null,E="No Data Available.",S=d3.dispatch("tooltipShow","tooltipHide","stateChange","changeState");n.padData(!0),t.clipEdge(!1).padData(!0),r.orient("bottom").tickPadding(7).highlightZero(!1),i.orient("left"),s.orient("right");var x=function(n,o){var u=n.pos[0]+(o.offsetLeft||0),a=n.pos[1]+(o.offsetTop||0),f=r.tickFormat()(t.x()(n.point,n.pointIndex)),l=(n.series.bar?i:s).tickFormat()(t.y()(n.point,n.pointIndex)),c=v(n.series.key,f,l,n,T);e.tooltip.show([u,a],c,n.value<0?"n":"s",null,o)};return t.dispatch.on("elementMouseover .tooltip",function(e){e.pos=[e.pos[0]+u.left,e.pos[1]+u.top],S.tooltipShow(e)}),t.dispatch.on("elementMouseout.tooltip",function(e){ -S.tooltipHide(e)}),n.dispatch.on("elementMouseover.tooltip",function(e){e.pos=[e.pos[0]+u.left,e.pos[1]+u.top],S.tooltipShow(e)}),n.dispatch.on("elementMouseout.tooltip",function(e){S.tooltipHide(e)}),S.on("tooltipHide",function(){d&&e.tooltip.cleanup()}),T.dispatch=S,T.legend=o,T.lines=t,T.bars=n,T.xAxis=r,T.y1Axis=i,T.y2Axis=s,d3.rebind(T,t,"defined","size","clipVoronoi","interpolate"),T.x=function(e){return arguments.length?(l=e,t.x(e),n.x(e),T):l},T.y=function(e){return arguments.length?(c=e,t.y(e),n.y(e),T):c},T.margin=function(e){return arguments.length?(u.top=typeof e.top!="undefined"?e.top:u.top,u.right=typeof e.right!="undefined"?e.right:u.right,u.bottom=typeof e.bottom!="undefined"?e.bottom:u.bottom,u.left=typeof e.left!="undefined"?e.left:u.left,T):u},T.width=function(e){return arguments.length?(a=e,T):a},T.height=function(e){return arguments.length?(f=e,T):f},T.color=function(t){return arguments.length?(h=e.utils.getColor(t),o.color(h),T):h},T.showLegend=function (e){return arguments.length?(p=e,T):p},T.tooltips=function(e){return arguments.length?(d=e,T):d},T.tooltipContent=function(e){return arguments.length?(v=e,T):v},T.state=function(e){return arguments.length?(b=e,T):b},T.defaultState=function(e){return arguments.length?(w=e,T):w},T.noData=function(e){return arguments.length?(E=e,T):E},T},e.models.lineWithFocusChart=function(){function C(e){return e.each(function(e){function q(e){var t=+(e=="e"),n=t?1:-1,r=O/3;return"M"+.5*n+","+r+"A6,6 0 0 "+t+" "+6.5*n+","+(r+6)+"V"+(2*r-6)+"A6,6 0 0 "+t+" "+.5*n+","+2*r+"Z"+"M"+2.5*n+","+(r+8)+"V"+(2*r-8)+"M"+4.5*n+","+(r+8)+"V"+(2*r-8)}function R(){a.empty()||a.extent(w),F.data([a.empty()?g.domain():w]).each(function(e,t){var n=g(e[0])-v.range()[0],r=v.range()[1]-g(e[1]);d3.select(this).select(".left").attr("width",n<0?0:n),d3.select(this).select(".right").attr("x",g(e[1])).attr("width",r<0?0:r)})}function U(){w=a.empty()?null:a.extent(),extent=a.empty()?g.domain():a.extent(),T.brush({extent :extent,brush:a}),R();var n=P.select(".nv-focus .nv-linesWrap").datum(e.filter(function(e){return!e.disabled}).map(function(e,n){return{key:e.key,values:e.values.filter(function(e,n){return t.x()(e,n)>=extent[0]&&t.x()(e,n)<=extent[1]})}}));d3.transition(n).call(t),d3.transition(P.select(".nv-focus .nv-x.nv-axis")).call(r),d3.transition(P.select(".nv-focus .nv-y.nv-axis")).call(i)}var S=d3.select(this),k=this,L=(h||parseInt(S.style("width"))||960)-f.left-f.right,A=(p||parseInt(S.style("height"))||400)-f.top-f.bottom-d,O=d-l.top-l.bottom;C.update=function(){S.transition().call(C)},C.container=this;if(!e||!e.length||!e.filter(function(e){return e.values.length}).length){var M=S.selectAll(".nv-noData").data([x]);return M.enter().append("text").attr("class","nvd3 nv-noData").attr("dy","-.7em").style("text-anchor","middle"),M.attr("x",f.left+L/2).attr("y",f.top+A/2).text(function(e){return e}),C}S.selectAll(".nv-noData").remove(),v=t.xScale(),m=t.yScale(),g=n.xScale(),y=n.yScale( );var _=S.selectAll("g.nv-wrap.nv-lineWithFocusChart").data([e]),D=_.enter().append("g").attr("class","nvd3 nv-wrap nv-lineWithFocusChart").append("g"),P=_.select("g");D.append("g").attr("class","nv-legendWrap");var H=D.append("g").attr("class","nv-focus");H.append("g").attr("class","nv-x nv-axis"),H.append("g").attr("class","nv-y nv-axis"),H.append("g").attr("class","nv-linesWrap");var B=D.append("g").attr("class","nv-context");B.append("g").attr("class","nv-x nv-axis"),B.append("g").attr("class","nv-y nv-axis"),B.append("g").attr("class","nv-linesWrap"),B.append("g").attr("class","nv-brushBackground"),B.append("g").attr("class","nv-x nv-brush"),b&&(u.width(L),P.select(".nv-legendWrap").datum(e).call(u),f.top!=u.height()&&(f.top=u.height(),A=(p||parseInt(S.style("height"))||400)-f.top-f.bottom-d),P.select(".nv-legendWrap").attr("transform","translate(0,"+ -f.top+")")),_.attr("transform","translate("+f.left+","+f.top+")"),t.width(L).height(A).color(e.map(function(e,t){return e.color||c(e,t)}).filter(function(t,n){return!e[n].disabled})),n.defined(t.defined()).width(L).height(O).color(e.map(function(e,t){return e.color||c(e,t)}).filter(function(t,n){return!e[n].disabled})),P.select(".nv-context").attr("transform","translate(0,"+(A+f.bottom+l.top)+")");var j=P.select(".nv-context .nv-linesWrap").datum(e.filter(function(e){return!e.disabled}));d3.transition(j).call(n),r.scale(v).ticks(L/100).tickSize(-A,0),i.scale(m).ticks(A/36).tickSize(-L,0),P.select(".nv-focus .nv-x.nv-axis").attr("transform","translate(0,"+A+")"),a.x(g).on("brush",U),w&&a.extent(w);var F=P.select(".nv-brushBackground").selectAll("g").data([w||a.extent()]),I=F.enter().append("g");I.append("rect").attr("class","left").attr("x",0).attr("y",0).attr("height",O),I.append("rect").attr("class","right").attr("x",0).attr("y",0).attr("height",O),gBrush=P.select(".nv-x.nv-brush").call(a),gBrush.selectAll("rect").attr("height",O),gBrush.selectAll(".resize").append("path").attr("d",q),U(),s .scale(g).ticks(L/100).tickSize(-O,0),P.select(".nv-context .nv-x.nv-axis").attr("transform","translate(0,"+y.range()[0]+")"),d3.transition(P.select(".nv-context .nv-x.nv-axis")).call(s),o.scale(y).ticks(O/36).tickSize(-L,0),d3.transition(P.select(".nv-context .nv-y.nv-axis")).call(o),P.select(".nv-context .nv-x.nv-axis").attr("transform","translate(0,"+y.range()[0]+")"),u.dispatch.on("legendClick",function(t,n){t.disabled=!t.disabled,e.filter(function(e){return!e.disabled}).length||e.map(function(e){return e.disabled=!1,_.selectAll(".nv-series").classed("disabled",!1),e}),S.transition().call(C)}),T.on("tooltipShow",function(e){E&&N(e,k.parentNode)})}),C}var t=e.models.line(),n=e.models.line(),r=e.models.axis(),i=e.models.axis(),s=e.models.axis(),o=e.models.axis(),u=e.models.legend(),a=d3.svg.brush(),f={top:30,right:30,bottom:30,left:60},l={top:0,right:30,bottom:20,left:60},c=e.utils.defaultColor(),h=null,p=null,d=100,v,m,g,y,b=!0,w=null,E=!0,S=function(e,t,n,r,i){return"<h3
"+e+"</h3>"+"<p>"+n+" at "+t+"</p>"},x="No Data Available.",T=d3.dispatch("tooltipShow","tooltipHide","brush");t.clipEdge(!0),n.interactive(!1),r.orient("bottom").tickPadding(5),i.orient("left"),s.orient("bottom").tickPadding(5),o.orient("left");var N=function(n,s){var o=n.pos[0]+(s.offsetLeft||0),u=n.pos[1]+(s.offsetTop||0),a=r.tickFormat()(t.x()(n.point,n.pointIndex)),f=i.tickFormat()(t.y()(n.point,n.pointIndex)),l=S(n.series.key,a,f,n,C);e.tooltip.show([o,u],l,null,null,s)};return t.dispatch.on("elementMouseover.tooltip",function(e){e.pos=[e.pos[0]+f.left,e.pos[1]+f.top],T.tooltipShow(e)}),t.dispatch.on("elementMouseout.tooltip",function(e){T.tooltipHide(e)}),T.on("tooltipHide",function(){E&&e.tooltip.cleanup()}),C.dispatch=T,C.legend=u,C.lines=t,C.lines2=n,C.xAxis=r,C.yAxis=i,C.x2Axis=s,C.y2Axis=o,d3.rebind(C,t,"defined","isArea","size","xDomain","yDomain","forceX","forceY","interactive","clipEdge","clipVoronoi","id"),C.x=function(e){return arguments.length?(t.x(e),n.x(
e),C):t.x},C.y=function(e){return arguments.length?(t.y(e),n.y(e),C):t.y},C.margin=function(e){return arguments.length?(f.top=typeof e.top!="undefined"?e.top:f.top,f.right=typeof e.right!="undefined"?e.right:f.right,f.bottom=typeof e.bottom!="undefined"?e.bottom:f.bottom,f.left=typeof e.left!="undefined"?e.left:f.left,C):f},C.margin2=function(e){return arguments.length?(l=e,C):l},C.width=function(e){return arguments.length?(h=e,C):h},C.height=function(e){return arguments.length?(p=e,C):p},C.height2=function(e){return arguments.length?(d=e,C):d},C.color=function(t){return arguments.length?(c=e.utils.getColor(t),u.color(c),C):c},C.showLegend=function(e){return arguments.length?(b=e,C):b},C.tooltips=function(e){return arguments.length?(E=e,C):E},C.tooltipContent=function(e){return arguments.length?(S=e,C):S},C.interpolate=function(e){return arguments.length?(t.interpolate(e),n.interpolate(e),C):t.interpolate()},C.noData=function(e){return arguments.length?(x=e,C):x},C.xTickForm at=function(e){return arguments.length?(r.tickFormat(e),s.tickFormat(e),C):r.tickFormat()},C.yTickFormat=function(e){return arguments.length?(i.tickFormat(e),o.tickFormat(e),C):i.tickFormat()},C},e.models.linePlusBarWithFocusChart=function(){function H(e){return e.each(function(e){function tt(e){var t=+(e=="e"),n=t?1:-1,r=I/3;return"M"+.5*n+","+r+"A6,6 0 0 "+t+" "+6.5*n+","+(r+6)+"V"+(2*r-6)+"A6,6 0 0 "+t+" "+.5*n+","+2*r+"Z"+"M"+2.5*n+","+(r+8)+"V"+(2*r-8)+"M"+4.5*n+","+(r+8)+"V"+(2*r-8)}function nt(){h.empty()||h.extent(x),Y.data([h.empty()?k.domain():x]).each(function(e,t){var n=k(e[0])-k.range()[0],r=k.range()[1]-k(e[1]);d3.select(this).select(".left").attr("width",n<0?0:n),d3.select(this).select(".right").attr("x",k(e[1])).attr("width",r<0?0:r)})}function rt(){x=h.empty()?null:h.extent(),S=h.empty()?k.domain():h.extent(),D.brush({extent:S,brush:h}),nt(),r.width(j).height(F).color(e.map(function(e,t){return e.color||w(e,t)}).filter(function(t,n){return!e[n].disabled&&e[n ].bar})),t.width(j).height(F).color(e.map(function(e,t){return e.color||w(e,t)}).filter(function(t,n){return!e[n].disabled&&!e[n].bar}));var n=$.select(".nv-focus .nv-barsWrap").datum(R.length?R.map(function(e,t){return{key:e.key,values:e.values.filter(function(e,t){return r.x()(e,t)>=S[0]&&r.x()(e,t)<=S[1]})}}):[{values:[]}]),i=$.select(".nv-focus .nv-linesWrap").datum(U[0].disabled?[{values:[]}]:U.map(function(e,n){return{key:e.key,values:e.values.filter(function(e,n){return t.x()(e,n)>=S[0]&&t.x()(e,n)<=S[1]})}}));R.length?C=r.xScale():C=t.xScale(),s.scale(C).ticks(j/100).tickSize(-F,0),s.domain([Math.ceil(S[0]),Math.floor(S[1])]),d3.transition($.select(".nv-x.nv-axis")).call(s),d3.transition(n).call(r),d3.transition(i).call(t),$.select(".nv-focus .nv-x.nv-axis").attr("transform","translate(0,"+L.range()[0]+")"),u.scale(L).ticks(F/36).tickSize(-j,0),$.select(".nv-focus .nv-y1.nv-axis").style("opacity",R.length?1:0),a.scale(A).ticks(F/36).tickSize(R.length?0:-j,0),$.select (".nv-focus .nv-y2.nv-axis").style("opacity",U.length?1:0).attr("transform","translate("+C.range()[1]+",0)"),d3.transition($.select(".nv-focus .nv-y1.nv-axis")).call(u),d3.transition($.select(".nv-focus .nv-y2.nv-axis")).call(a)}var N=d3.select(this),B=this,j=(v||parseInt(N.style("width"))||960)-p.left-p.right,F=(m||parseInt(N.style("height"))||400)-p.top-p.bottom-g,I=g-d.top-d.bottom;H.update=function(){N.transition().call(H)},H.container=this;if(!e||!e.length||!e.filter(function(e){return e.values.length}).length){var q=N.selectAll(".nv-noData").data([_]);return q.enter().append("text").attr("class","nvd3 nv-noData").attr("dy","-.7em").style("text-anchor","middle"),q.attr("x",p.left+j/2).attr("y",p.top+F/2).text(function(e){return e}),H}N.selectAll(".nv-noData").remove();var R=e.filter(function(e){return!e.disabled&&e.bar}),U=e.filter(function(e){return!e.bar});C=r.xScale(),k=o.scale(),L=r.yScale(),A=t.yScale(),O=i.yScale(),M=n.yScale();var z=e.filter(function(e){return!e. disabled&&e.bar}).map(function(e){return e.values.map(function(e,t){return{x:y(e,t),y:b(e,t)}})}),W=e.filter(function(e){return!e.disabled&&!e.bar}).map(function(e){return e.values.map(function(e,t){return{x:y(e,t),y:b(e,t)}})});C.range([0,j]),k.domain(d3.extent(d3.merge(z.concat(W)),function(e){return e.x})).range([0,j]);var X=N.selectAll("g.nv-wrap.nv-linePlusBar").data([e]),V=X.enter().append("g").attr("class","nvd3 nv-wrap nv-linePlusBar").append("g"),$=X.select("g");V.append("g").attr("class","nv-legendWrap");var J=V.append("g").attr("class","nv-focus");J.append("g").attr("class","nv-x nv-axis"),J.append("g").attr("class","nv-y1 nv-axis"),J.append("g").attr("class","nv-y2 nv-axis"),J.append("g").attr("class","nv-barsWrap"),J.append("g").attr("class","nv-linesWrap");var K=V.append("g").attr("class","nv-context");K.append("g").attr("class","nv-x nv-axis"),K.append("g").attr("class","nv-y1 nv-axis"),K.append("g").attr("class","nv-y2 nv-axis"),K.append("g").attr("class","nv -barsWrap"),K.append("g").attr("class","nv-linesWrap"),K.append("g").attr("class","nv-brushBackground"),K.append("g").attr("class","nv-x nv-brush"),E&&(c.width(j/2),$.select(".nv-legendWrap").datum(e.map(function(e){return e.originalKey=e.originalKey===undefined?e.key:e.originalKey,e.key=e.originalKey+(e.bar?" (left axis)":" (right axis)"),e})).call(c),p.top!=c.height()&&(p.top=c.height(),F=(m||parseInt(N.style("height"))||400)-p.top-p.bottom-g),$.select(".nv-legendWrap").attr("transform","translate("+j/2+","+ -p.top+")")),X.attr("transform","translate("+p.left+","+p.top+")"),i.width(j).height(I).color(e.map(function(e,t){return e.color||w(e,t)}).filter(function(t,n){return!e[n].disabled&&e[n].bar})),n.width(j).height(I).color(e.map(function(e,t){return e.color||w(e,t)}).filter(function(t,n){return!e[n].disabled&&!e[n].bar}));var Q=$.select(".nv-context .nv-barsWrap").datum(R.length?R:[{values:[]}]),G=$.select(".nv-context .nv-linesWrap").datum(U[0].disabled?[{values:[]}]:U) ;$.select(".nv-context").attr("transform","translate(0,"+(F+p.bottom+d.top)+")"),d3.transition(Q).call(i),d3.transition(G).call(n),h.x(k).on("brush",rt),x&&h.extent(x);var Y=$.select(".nv-brushBackground").selectAll("g").data([x||h.extent()]),Z=Y.enter().append("g");Z.append("rect").attr("class","left").attr("x",0).attr("y",0).attr("height",I),Z.append("rect").attr("class","right").attr("x",0).attr("y",0).attr("height",I);var et=$.select(".nv-x.nv-brush").call(h);et.selectAll("rect").attr("height",I),et.selectAll(".resize").append("path").attr("d",tt),o.ticks(j/100).tickSize(-I,0),$.select(".nv-context .nv-x.nv-axis").attr("transform","translate(0,"+O.range()[0]+")"),d3.transition($.select(".nv-context .nv-x.nv-axis")).call(o),f.scale(O).ticks(I/36).tickSize(-j,0),$.select(".nv-context .nv-y1.nv-axis").style("opacity",R.length?1:0).attr("transform","translate(0,"+k.range()[0]+")"),d3.transition($.select(".nv-context .nv-y1.nv-axis")).call(f),l.scale(M).ticks(I/36).tickSize(R .length?0:-j,0),$.select(".nv-context .nv-y2.nv-axis").style("opacity",U.length?1:0).attr("transform","translate("+k.range()[1]+",0)"),d3.transition($.select(".nv-context .nv-y2.nv-axis")).call(l),c.dispatch.on("legendClick",function(t,n){t.disabled=!t.disabled,e.filter(function(e){return!e.disabled}).length||e.map(function(e){return e.disabled=!1,X.selectAll(".nv-series").classed("disabled",!1),e}),H.update()}),D.on("tooltipShow",function(e){T&&P(e,B.parentNode)}),rt()}),H}var t=e.models.line(),n=e.models.line(),r=e.models.historicalBar(),i=e.models.historicalBar(),s=e.models.axis(),o=e.models.axis(),u=e.models.axis(),a=e.models.axis(),f=e.models.axis(),l=e.models.axis(),c=e.models.legend(),h=d3.svg.brush(),p={top:30,right:30,bottom:30,left:60},d={top:0,right:30,bottom:20,left:60},v=null,m=null,g=100,y=function(e){return e.x},b=function(e){return e.y},w=e.utils.defaultColor(),E=!0,S,x=null,T=!0,N=function(e,t,n,r,i){return"<h3>"+e+"</h3>"+"<p>"+n+" at "+t+"</p>"},C,k,L,A,O, M,_="No Data Available.",D=d3.dispatch("tooltipShow","tooltipHide","brush");t.clipEdge(!0),n.interactive(!1),s.orient("bottom").tickPadding(5),u.orient("left"),a.orient("right"),o.orient("bottom").tickPadding(5),f.orient("left"),l.orient("right");var P=function(n,r){S&&(n.pointIndex+=Math.ceil(S[0]));var i=n.pos[0]+(r.offsetLeft||0),o=n.pos[1]+(r.offsetTop||0),f=s.tickFormat()(t.x()(n.point,n.pointIndex)),l=(n.series.bar?u:a).tickFormat()(t.y()(n.point,n.pointIndex)),c=N(n.series.key,f,l,n,H);e.tooltip.show([i,o],c,n.value<0?"n":"s",null,r)};return t.dispatch.on("elementMouseover.tooltip",function(e){e.pos=[e.pos[0]+p.left,e.pos[1]+p.top],D.tooltipShow(e)}),t.dispatch.on("elementMouseout.tooltip",function(e){D.tooltipHide(e)}),r.dispatch.on("elementMouseover.tooltip",function(e){e.pos=[e.pos[0]+p.left,e.pos[1]+p.top],D.tooltipShow(e)}),r.dispatch.on("elementMouseout.tooltip",function(e){D.tooltipHide(e)}),D.on("tooltipHide",function(){T&&e.tooltip.cleanup()}),H.dispatch=D,H. legend=c,H.lines=t,H.lines2=n,H.bars=r,H.bars2=i,H.xAxis=s,H.x2Axis=o,H.y1Axis=u,H.y2Axis=a,H.y3Axis=f,H.y4Axis=l,d3.rebind(H,t,"defined","size","clipVoronoi","interpolate"),H.x=function(e){return arguments.length?(y=e,t.x(e),r.x(e),H):y},H.y=function(e){return arguments.length?(b=e,t.y(e),r.y(e),H):b},H.margin=function(e){return arguments.length?(p.top=typeof e.top!="undefined"?e.top:p.top,p.right=typeof e.right!="undefined"?e.right:p.right,p.bottom=typeof e.bottom!="undefined"?e.bottom:p.bottom,p.left=typeof e.left!="undefined"?e.left:p.left,H):p},H.width=function(e){return arguments.length?(v=e,H):v},H.height=function(e){return arguments.length?(m=e,H):m},H.color=function(t){return arguments.length?(w=e.utils.getColor(t),c.color(w),H):w},H.showLegend=function(e){return arguments.length?(E=e,H):E},H.tooltips=function(e){return arguments.length?(T=e,H):T},H.tooltipContent=function(e){return arguments.length?(N=e,H):N},H.noData=function(e){return arguments.length?(_=e,H):_}, H.brushExtent=function(e){return arguments.length?(x=e,H):x},H},e.models.multiBar=function(){function x(e){return e.each(function(e){var x=n-t.left-t.right,T=r-t.top-t.bottom,N=d3.select(this);p&&e.length&&(p=[{values:e[0].values.map(function(e){return{x:e.x,y:0,series:e.series,size:.01}})}]),c&&(e=d3.layout.stack().offset("zero").values(function(e){return e.values}).y(a)(!e.length&&p?p:e)),e=e.map(function(e,t){return e.values=e.values.map(function(e){return e.series=t,e}),e}),c&&e[0].values.map(function(t,n){var r=0,i=0;e.map(function(e){var t=e.values[n];t.size=Math.abs(t.y),t.y<0?(t.y1=i,i-=t.size):(t.y1=t.size+r,r+=t.size)})});var C=y&&b?[]:e.map(function(e){return e.values.map(function(e,t){return{x:u(e,t),y:a(e,t),y0:e.y0,y1:e.y1}})});i.domain(d3.merge(C).map(function(e){return e.x})).rangeBands([0,x],.1),s.domain(b||d3.extent(d3.merge(C).map(function(e){return c?e.y>0?e.y1:e.y1+e.y:e.y}).concat(f))).range([T,0]);if(i.domain()[0]===i.domain()[1]||s.domain()[0]===s.dom ain()[1])singlePoint=!0;i.domain()[0]===i.domain()[1]&&(i.domain()[0]?i.domain([i.domain()[0]-i.domain()[0]*.01,i.domain()[1]+i.domain()[1]*.01]):i.domain([-1,1])),s.domain()[0]===s.domain()[1]&&(s.domain()[0]?s.domain([s.domain()[0]+s.domain()[0]*.01,s.domain()[1]-s.domain()[1]*.01]):s.domain([-1,1])),E=E||i,S=S||s;var k=N.selectAll("g.nv-wrap.nv-multibar").data([e]),L=k.enter().append("g").attr("class","nvd3 nv-wrap nv-multibar"),A=L.append("defs"),O=L.append("g"),M=k.select("g");O.append("g").attr("class","nv-groups"),k.attr("transform","translate("+t.left+","+t.top+")"),A.append("clipPath").attr("id","nv-edge-clip-"+o).append("rect"),k.select("#nv-edge-clip-"+o+" rect").attr("width",x).attr("height",T),M.attr("clip-path",l?"url(#nv-edge-clip-"+o+")":"");var _=k.select(".nv-groups").selectAll(".nv-group").data(function(e){return e},function(e){return e.key});_.enter().append("g").style("stroke-opacity",1e-6).style("fill-opacity",1e-6),_.exit().selectAll("rect.nv-bar").tra nsition().delay(function(t,n){return n*m/e[0].values.length}).attr("y",function(e){return c?S(e.y0):S(0)}).attr("height",0).remove(),_.attr("class",function(e,t){return"nv-group nv-series-"+t}).classed("hover",function(e){return e.hover}).style("fill",function(e,t){return h(e,t)}).style("stroke",function(e,t){return h(e,t)}),d3.transition(_).style("stroke-opacity",1).style("fill-opacity",.75);var D=_.selectAll("rect.nv-bar").data(function(t){return p&&!e.length?p.values:t.values});D.exit().remove();var P=D.enter().append("rect").attr("class",function(e,t){return a(e,t)<0?"nv-bar negative":"nv-bar positive"}).attr("x",function(t,n,r){return c?0:r*i.rangeBand()/e.length}).attr("y",function(e){return S(c?e.y0:0)}).attr("height",0).attr("width",i.rangeBand()/(c?1:e.length));D.style("fill",function(e,t,n){return h(e,n,t)}).style("stroke",function(e,t,n){return h(e,n,t)}).on("mouseover",function(t,n){d3.select(this).classed("hover",!0),w.elementMouseover({value:a(t,n),point:t,seri es:e[t.series],pos:[i(u(t,n))+i.rangeBand()*(c?e.length/2:t.series+.5)/e.length,s(a(t,n)+(c?t.y0:0))],pointIndex:n,seriesIndex:t.series,e:d3.event})}).on("mouseout",function(t,n){d3.select(this).classed("hover",!1),w.elementMouseout({value:a(t,n),point:t,series:e[t.series],pointIndex:n,seriesIndex:t.series,e:d3.event})}).on("click",function(t,n){w.elementClick({value:a(t,n),point:t,series:e[t.series],pos:[i(u(t,n))+i.rangeBand()*(c?e.length/2:t.series+.5)/e.length,s(a(t,n)+(c?t.y0:0))],pointIndex:n,seriesIndex:t.series,e:d3.event}),d3.event.stopPropagation()}).on("dblclick",function(t,n){w.elementDblClick({value:a(t,n),point:t,series:e[t.series],pos:[i(u(t,n))+i.rangeBand()*(c?e.length/2:t.series+.5)/e.length,s(a(t,n)+(c?t.y0:0))],pointIndex:n,seriesIndex:t.series,e:d3.event}),d3.event.stopPropagation()}),D.attr("class",function(e,t){return a(e,t)<0?"nv-bar negative":"nv-bar positive"}).attr("transform",function(e,t){return"translate("+i(u(e,t))+",0)"}),d&&(v||(v=e.map(funct ion(){return!0})),D.style("fill",function(e,t,n){return d3.rgb(d(e,t)).darker(v.map(function(e,t){return t}).filter(function(e,t){return!v[t]})[n]).toString()}).style("stroke",function(e,t,n){return d3.rgb(d(e,t)).darker(v.map(function(e,t){return t}).filter(function(e,t){return!v[t]})[n]).toString()})),c?D.transition().delay(function(t,n){return n*m/e[0].values.length}).attr("y",function(e,t){return s(c?e.y1:0)}).attr("height",function(e,t){return Math.max(Math.abs(s(e.y+(c?e.y0:0))-s(c?e.y0:0)),1)}).each("end",function(){d3.select(this).transition().duration(g).attr("x",function(t,n){return c?0:t.series*i.rangeBand()/e.length}).attr("width",i.rangeBand()/(c?1:e.length))}):d3.transition(D).duration(g).delay(function(t,n){return n*m/e[0].values.length}).attr("x",function(t,n){return t.series*i.rangeBand()/e.length}).attr("width",i.rangeBand()/e.length).each("end",function(){d3.select(this).transition().duration(g).attr("y",function(e,t){return a(e,t)<0?s(0):s(0)-s(a(e,t))<1? s(0)-1:s(a(e,t))||0}).attr("height",function(e,t){return Math.max(Math.abs(s(a(e,t))-s(0)),1)||0})}),E=i.copy(),S=s.copy()}),x}var t={top:0,right:0,bottom:0,left:0},n=960,r=500,i=d3.scale.ordinal(),s=d3.scale.linear(),o=Math.floor(Math.random()*1e4),u=function(e){return e.x},a=function(e){return e.y},f=[0],l=!0,c=!1,h=e.utils.defaultColor(),p=!1,d=null,v,m=1200,g=500,y,b,w=d3.dispatch("chartClick","elementClick","elementDblClick","elementMouseover","elementMouseout"),E,S;return x.dispatch=w,x.x=function(e){return arguments.length?(u=e,x):u},x.y=function(e){return arguments.length?(a=e,x):a},x.margin=function(e){return arguments.length?(t.top=typeof e.top!="undefined"?e.top:t.top,t.right=typeof e.right!="undefined"?e.right:t.right,t.bottom=typeof e.bottom!="undefined"?e.bottom:t.bottom,t.left=typeof e.left!="undefined"?e.left:t.left,x):t},x.width=function(e){return arguments.length?(n=e,x):n},x.height=function(e){return arguments.length?(r=e,x):r},x.xScale=function(e){return arguments.length?(i=e,x):i},x.yScale=function(e){return arguments.length?(s=e,x):s},x.xDomain=function(e){return arguments.length?(y=e,x):y},x.yDomain=function(e){return arguments.length?(b=e,x):b},x.forceY=function(e){return arguments.length?(f=e,x):f},x.stacked=function(e){return arguments.length?(c=e,x):c},x.clipEdge=function(e){return arguments.length?(l=e,x):l},x.color=function(t){return arguments.length?(h=e.utils.getColor(t),x):h},x.barColor=function(t){return arguments.length?(d=e.utils.getColor(t),x):d},x.disabled=function(e){return arguments.length?(v=e,x):v},x.id=function(e){return arguments.length?(o=e,x):o},x.hideable=function(e){return arguments.length?(p=e,x):p},x.delay=function(e){return arguments.length?(m=e,x):m},x.drawTime=function(e){return arguments.length?(g=e,x):g},x},e.models.multiBarChart=function(){function N(e){return e.each(function(e){var m=d3.select(this),C=this,k=(u||parseInt(m.style("width"))||960)-o.left-o.right,L=(a||parseInt(m.style("height "))||400)-o.top-o.bottom;N.update=function(){m.transition().call(N)},N.container=this,b.disabled=e.map(function(e){return!!e.disabled});if(!w){var A;w={};for(A in b)b[A]instanceof Array?w[A]=b[A].slice(0):w[A]=b[A]}if(!e||!e.length||!e.filter(function(e){return e.values.length}).length){var O=m.selectAll(".nv-noData").data([E]);return O.enter().append("text").attr("class","nvd3 nv-noData").attr("dy","-.7em").style("text-anchor","middle"),O.attr("x",o.left+k/2).attr("y",o.top+L/2).text(function(e){return e}),N}m.selectAll(".nv-noData").remove(),g=t.xScale(),y=t.yScale();var M=m.selectAll("g.nv-wrap.nv-multiBarWithLegend").data([e]),_=M.enter().append("g").attr("class","nvd3 nv-wrap nv-multiBarWithLegend").append("g"),D=M.select("g");_.append("g").attr("class","nv-x nv-axis"),_.append("g").attr("class","nv-y nv-axis"),_.append("g").attr("class","nv-barsWrap"),_.append("g").attr("class","nv-legendWrap"),_.append("g").attr("class","nv-controlsWrap"),c&&(i.width(k-x()),t.barColor ()&&e.forEach(function(e,t){e.color=d3.rgb("#ccc").darker(t*1.5).toString()}),D.select(".nv-legendWrap").datum(e).call(i),o.top!=i.height()&&(o.top=i.height(),L=(a||parseInt(m.style("height"))||400)-o.top-o.bottom),D.select(".nv-legendWrap").attr("transform","translate("+x()+","+ -o.top+")"));if(l){var P=[{key:"Grouped",disabled:t.stacked()},{key:"Stacked",disabled:!t.stacked()}];s.width(x()).color(["#444","#444","#444"]),D.select(".nv-controlsWrap").datum(P).attr("transform","translate(0,"+ -o.top+")").call(s)}M.attr("transform","translate("+o.left+","+o.top+")"),t.disabled(e.map(function(e){return e.disabled})).width(k).height(L).color(e.map(function(e,t){return e.color||f(e,t)}).filter(function(t,n){return!e[n].disabled}));var H=D.select(".nv-barsWrap").datum(e.filter(function(e){return!e.disabled}));d3.transition(H).call(t),n.scale(g).ticks(k/100).tickSize(-L,0),D.select(".nv-x.nv-axis").attr("transform","translate(0,"+y.range()[0]+")"),d3.transition(D.select(".nv-x.nv-a xis")).call(n);var B=D.select(".nv-x.nv-axis > g").selectAll("g");B.selectAll("line, text").style("opacity",1);if(p){var j=function(e,t){return"translate("+e+","+t+")"},F=5,I=17;B.selectAll("text").attr("transform",function(e,t,n){return j(0,n%2==0?F:I)});var q=d3.selectAll(".nv-x.nv-axis .nv-wrap g g text")[0].length;D.selectAll(".nv-x.nv-axis .nv-axisMaxMin text").attr("transform",function(e,t){return j(0,t===0||q%2!==0?I:F)})}h&&B.filter(function(t,n){return n%Math.ceil(e[0].values.length/(k/100))!==0}).selectAll("text, line").style("opacity",0),d&&B.selectAll("text").attr("transform","rotate("+d+" 0,0)").attr("text-anchor",d>0?"start":"end"),D.select(".nv-x.nv-axis").selectAll("g.nv-axisMaxMin text").style("opacity",1),r.scale(y).ticks(L/36).tickSize(-k,0),d3.transition(D.select(".nv-y.nv-axis")).call(r),i.dispatch.on("legendClick",function(t,n){t.disabled=!t.disabled,e.filter(function(e){return!e.disabled}).length||e.map(function(e){return e.disabled=!1,M.selectAll(".nv -series").classed("disabled",!1),e}),b.disabled=e.map(function(e){return!!e.disabled}),S.stateChange(b),N.update()}),i.dispatch.on("legendDblclick",function(t){e.forEach(function(e){e.disabled=!0}),t.disabled=!1,b.disabled=e.map(function(e){return!!e.disabled}),S.stateChange(b),N.update()}),s.dispatch.on("legendClick",function(e,n){if(!e.disabled)return;P=P.map(function(e){return e.disabled=!0,e}),e.disabled=!1;switch(e.key){case"Grouped":t.stacked(!1);break;case"Stacked":t.stacked(!0)}b.stacked=t.stacked(),S.stateChange(b),N.update()}),S.on("tooltipShow",function(e){v&&T(e,C.parentNode)}),S.on("changeState",function(n){typeof n.disabled!="undefined"&&(e.forEach(function(e,t){e.disabled=n.disabled[t]}),b.disabled=n.disabled),typeof n.stacked!="undefined"&&(t.stacked(n.stacked),b.stacked=n.stacked),N.update()})}),N}var t=e.models.multiBar(),n=e.models.axis(),r=e.models.axis(),i=e.models.legend(),s=e.models.legend(),o={top:30,right:20,bottom:50,left:60},u=null,a=null,f=e.utils .defaultColor(),l=!0,c=!0,h=!0,p=!1,d=0,v=!0,m=function(e,t,n,r,i){return"<h3>"+e+"</h3>"+"<p>"+n+" on "+t+"</p>"},g,y,b={stacked:!1},w=null,E="No Data Available.",S=d3.dispatch("tooltipShow","tooltipHide","stateChange","changeState"),x=function(){return l?180:0};t.stacked(!1),n.orient("bottom").tickPadding(7).highlightZero(!0).showMaxMin(!1).tickFormat(function(e){return e}),r.orient("left").tickFormat(d3.format(",.1f"));var T=function(i,s){var o=i.pos[0]+(s.offsetLeft||0),u=i.pos[1]+(s.offsetTop||0),a=n.tickFormat()(t.x()(i.point,i.pointIndex)),f=r.tickFormat()(t.y()(i.point,i.pointIndex)),l=m(i.series.key,a,f,i,N);e.tooltip.show([o,u],l,i.value<0?"n":"s",null,s)};return t.dispatch.on("elementMouseover.tooltip",function(e){e.pos=[e.pos[0]+o.left,e.pos[1]+o.top],S.tooltipShow(e)}),t.dispatch.on("elementMouseout.tooltip",function(e){S.tooltipHide(e)}),S.on("tooltipHide",function(){v&&e.tooltip.cleanup()}),N.dispatch=S,N.multibar=t,N.legend=i,N.xAxis=n,N.yAxis=r,d3.rebind(N,t ,"x","y","xDomain","yDomain","forceX","forceY","clipEdge","id","stacked","delay","barColor"),N.margin=function(e){return arguments.length?(o.top=typeof e.top!="undefined"?e.top:o.top,o.right=typeof e.right!="undefined"?e.right:o.right,o.bottom=typeof e.bottom!="undefined"?e.bottom:o.bottom,o.left=typeof e.left!="undefined"?e.left:o.left,N):o},N.width=function(e){return arguments.length?(u=e,N):u},N.height=function(e){return arguments.length?(a=e,N):a},N.color=function(t){return arguments.length?(f=e.utils.getColor(t),i.color(f),N):f},N.showControls=function(e){return arguments.length?(l=e,N):l},N.showLegend=function(e){return arguments.length?(c=e,N):c},N.reduceXTicks=function(e){return arguments.length?(h=e,N):h},N.rotateLabels=function(e){return arguments.length?(d=e,N):d},N.staggerLabels=function(e){return arguments.length?(p=e,N):p},N.tooltip=function(e){return arguments.length?(m=e,N):m},N.tooltips=function(e){return arguments.length?(v=e,N):v},N.tooltipContent=function (e){return arguments.length?(m=e,N):m},N.state=function(e){return arguments.length?(b=e,N):b},N.defaultState=function(e){return arguments.length?(w=e,N):w},N.noData=function(e){return arguments.length?(E=e,N):E},N},e.models.multiBarHorizontal=function(){function x(e){return e.each(function(e){var i=n-t.left-t.right,g=r-t.top-t.bottom,x=d3.select(this);p&&(e=d3.layout.stack().offset("zero").values(function(e){return e.values}).y(a)(e)),e=e.map(function(e,t){return e.values=e.values.map(function(e){return e.series=t,e}),e}),p&&e[0].values.map(function(t,n){var r=0,i=0;e.map(function(e){var t=e.values[n];t.size=Math.abs(t.y),t.y<0?(t.y1=i-t.size,i-=t.size):(t.y1=r,r+=t.size)})});var T=y&&b?[]:e.map(function(e){return e.values.map(function(e,t){return{x:u(e,t),y:a(e,t),y0:e.y0,y1:e.y1}})});s.domain(y||d3.merge(T).map(function(e){return e.x})).rangeBands([0,g],.1),o.domain(b||d3.extent(d3.merge(T).map(function(e){return p?e.y>0?e.y1+e.y:e.y1:e.y}).concat(f))),d&&!p?o.range([o.dom ain()[0]<0?v:0,i-(o.domain()[1]>0?v:0)]):o.range([0,i]),E=E||s,S=S||d3.scale.linear().domain(o.domain()).range([o(0),o(0)]);var N=d3.select(this).selectAll("g.nv-wrap.nv-multibarHorizontal").data([e]),C=N.enter().append("g").attr("class","nvd3 nv-wrap nv-multibarHorizontal"),k=C.append("defs"),L=C.append("g"),A=N.select("g");L.append("g").attr("class","nv-groups"),N.attr("transform","translate("+t.left+","+t.top+")");var O=N.select(".nv-groups").selectAll(".nv-group").data(function(e){return e},function(e){return e.key});O.enter().append("g").style("stroke-opacity",1e-6).style("fill-opacity",1e-6),d3.transition(O.exit()).style("stroke-opacity",1e-6).style("fill-opacity",1e-6).remove(),O.attr("class",function(e,t){return"nv-group nv-series-"+t}).classed("hover",function(e){return e.hover}).style("fill",function(e,t){return l(e,t)}).style("stroke",function(e,t){return l(e,t)}),d3.transition(O).style("stroke-opacity",1).style("fill-opacity",.75);var M=O.selectAll("g.nv-bar").da ta(function(e){return e.values});M.exit().remove();var _=M.enter().append("g").attr("transform",function(t,n,r){return"translate("+S(p?t.y0:0)+","+(p?0:r*s.rangeBand()/e.length+s(u(t,n)))+")"});_.append("rect").attr("width",0).attr("height",s.rangeBand()/(p?1:e.length)),M.on("mouseover",function(t,n){d3.select(this).classed("hover",!0),w.elementMouseover({value:a(t,n),point:t,series:e[t.series],pos:[o(a(t,n)+(p?t.y0:0)),s(u(t,n))+s.rangeBand()*(p?e.length/2:t.series+.5)/e.length],pointIndex:n,seriesIndex:t.series,e:d3.event})}).on("mouseout",function(t,n){d3.select(this).classed("hover",!1),w.elementMouseout({value:a(t,n),point:t,series:e[t.series],pointIndex:n,seriesIndex:t.series,e:d3.event})}).on("click",function(t,n){w.elementClick({value:a(t,n),point:t,series:e[t.series],pos:[s(u(t,n))+s.rangeBand()*(p?e.length/2:t.series+.5)/e.length,o(a(t,n)+(p?t.y0:0))],pointIndex:n,seriesIndex:t.series,e:d3.event}),d3.event.stopPropagation()}).on("dblclick",function(t,n){w.elementDb lClick({value:a(t,n),point:t,series:e[t.series],pos:[s(u(t,n))+s.rangeBand()*(p?e.length/2:t.series+.5)/e.length,o(a(t,n)+(p?t.y0:0) -)],pointIndex:n,seriesIndex:t.series,e:d3.event}),d3.event.stopPropagation()}),_.append("text"),d&&!p?(M.select("text").attr("text-anchor",function(e,t){return a(e,t)<0?"end":"start"}).attr("y",s.rangeBand()/(e.length*2)).attr("dy",".32em").text(function(e,t){return m(a(e,t))}),d3.transition(M).select("text").attr("x",function(e,t){return a(e,t)<0?-4:o(a(e,t))-o(0)+4})):M.selectAll("text").text(""),M.attr("class",function(e,t){return a(e,t)<0?"nv-bar negative":"nv-bar positive"}),c&&(h||(h=e.map(function(){return!0})),M.style("fill",function(e,t,n){return d3.rgb(c(e,t)).darker(h.map(function(e,t){return t}).filter(function(e,t){return!h[t]})[n]).toString()}).style("stroke",function(e,t,n){return d3.rgb(c(e,t)).darker(h.map(function(e,t){return t}).filter(function(e,t){return!h[t]})[n]).toString()})),p?d3.transition(M).attr("transform",function(e,t){return"translate("+o(e.y1)+","+s(u(e,t))+")"}).select("rect").attr("width",function(e,t){return Math.abs(o(a(e,t)+e.y0)-o(e.y0)) }).attr("height",s.rangeBand()):d3.transition(M).attr("transform",function(t,n){return"translate("+(a(t,n)<0?o(a(t,n)):o(0))+","+(t.series*s.rangeBand()/e.length+s(u(t,n)))+")"}).select("rect").attr("height",s.rangeBand()/e.length).attr("width",function(e,t){return Math.max(Math.abs(o(a(e,t))-o(0)),1)}),E=s.copy(),S=o.copy()}),x}var t={top:0,right:0,bottom:0,left:0},n=960,r=500,i=Math.floor(Math.random()*1e4),s=d3.scale.ordinal(),o=d3.scale.linear(),u=function(e){return e.x},a=function(e){return e.y},f=[0],l=e.utils.defaultColor(),c=null,h,p=!1,d=!1,v=60,m=d3.format(",.2f"),g=1200,y,b,w=d3.dispatch("chartClick","elementClick","elementDblClick","elementMouseover","elementMouseout"),E,S;return x.dispatch=w,x.x=function(e){return arguments.length?(u=e,x):u},x.y=function(e){return arguments.length?(a=e,x):a},x.margin=function(e){return arguments.length?(t.top=typeof e.top!="undefined"?e.top:t.top,t.right=typeof e.right!="undefined"?e.right:t.right,t.bottom=typeof e.bottom!="unde fined"?e.bottom:t.bottom,t.left=typeof e.left!="undefined"?e.left:t.left,x):t},x.width=function(e){return arguments.length?(n=e,x):n},x.height=function(e){return arguments.length?(r=e,x):r},x.xScale=function(e){return arguments.length?(s=e,x):s},x.yScale=function(e){return arguments.length?(o=e,x):o},x.xDomain=function(e){return arguments.length?(y=e,x):y},x.yDomain=function(e){return arguments.length?(b=e,x):b},x.forceY=function(e){return arguments.length?(f=e,x):f},x.stacked=function(e){return arguments.length?(p=e,x):p},x.color=function(t){return arguments.length?(l=e.utils.getColor(t),x):l},x.barColor=function(t){return arguments.length?(c=e.utils.getColor(t),x):c},x.disabled=function(e){return arguments.length?(h=e,x):h},x.id=function(e){return arguments.length?(i=e,x):i},x.delay=function(e){return arguments.length?(g=e,x):g},x.showValues=function(e){return arguments.length?(d=e,x):d},x.valueFormat=function(e){return arguments.length?(m=e,x):m},x.valuePadding=function(e ){return arguments.length?(v=e,x):v},x},e.models.multiBarHorizontalChart=function(){function x(e){return e.each(function(h){var d=d3.select(this),T=this,N=(u||parseInt(d.style("width"))||960)-o.left-o.right,C=(a||parseInt(d.style("height"))||400)-o.top-o.bottom;x.update=function(){d.transition().call(x)},x.container=this,g.disabled=h.map(function(e){return!!e.disabled});if(!y){var k;y={};for(k in g)g[k]instanceof Array?y[k]=g[k].slice(0):y[k]=g[k]}if(!h||!h.length||!h.filter(function(e){return e.values.length}).length){var L=d.selectAll(".nv-noData").data([b]);return L.enter().append("text").attr("class","nvd3 nv-noData").attr("dy","-.7em").style("text-anchor","middle"),L.attr("x",o.left+N/2).attr("y",o.top+C/2).text(function(e){return e}),x}d.selectAll(".nv-noData").remove(),v=t.xScale(),m=t.yScale();var A=d.selectAll("g.nv-wrap.nv-multiBarHorizontalChart").data([h]),O=A.enter().append("g").attr("class","nvd3 nv-wrap nv-multiBarHorizontalChart").append("g"),M=A.select("g"); O.append("g").attr("class","nv-x nv-axis"),O.append("g").attr("class","nv-y nv-axis"),O.append("g").attr("class","nv-barsWrap"),O.append("g").attr("class","nv-legendWrap"),O.append("g").attr("class","nv-controlsWrap"),c&&(i.width(N-E()),t.barColor()&&h.forEach(function(e,t){e.color=d3.rgb("#ccc").darker(t*1.5).toString()}),M.select(".nv-legendWrap").datum(h).call(i),o.top!=i.height()&&(o.top=i.height(),C=(a||parseInt(d.style("height"))||400)-o.top-o.bottom),M.select(".nv-legendWrap").attr("transform","translate("+E()+","+ -o.top+")"));if(l){var _=[{key:"Grouped",disabled:t.stacked()},{key:"Stacked",disabled:!t.stacked()}];s.width(E()).color(["#444","#444","#444"]),M.select(".nv-controlsWrap").datum(_).attr("transform","translate(0,"+ -o.top+")").call(s)}A.attr("transform","translate("+o.left+","+o.top+")"),t.disabled(h.map(function(e){return e.disabled})).width(N).height(C).color(h.map(function(e,t){return e.color||f(e,t)}).filter(function(e,t){return!h[t].disabled}));var D= M.select(".nv-barsWrap").datum(h.filter(function(e){return!e.disabled}));d3.transition(D).call(t),n.scale(v).ticks(C/24).tickSize(-N,0),d3.transition(M.select(".nv-x.nv-axis")).call(n);var P=M.select(".nv-x.nv-axis").selectAll("g");P.selectAll("line, text").style("opacity",1),r.scale(m).ticks(N/100).tickSize(-C,0),M.select(".nv-y.nv-axis").attr("transform","translate(0,"+C+")"),d3.transition(M.select(".nv-y.nv-axis")).call(r),i.dispatch.on("legendClick",function(e,t){e.disabled=!e.disabled,h.filter(function(e){return!e.disabled}).length||h.map(function(e){return e.disabled=!1,A.selectAll(".nv-series").classed("disabled",!1),e}),g.disabled=h.map(function(e){return!!e.disabled}),w.stateChange(g),x.update()}),i.dispatch.on("legendDblclick",function(e){h.forEach(function(e){e.disabled=!0}),e.disabled=!1,g.disabled=h.map(function(e){return!!e.disabled}),w.stateChange(g),x.update()}),s.dispatch.on("legendClick",function(e,n){if(!e.disabled)return;_=_.map(function(e){return e.disab led=!0,e}),e.disabled=!1;switch(e.key){case"Grouped":t.stacked(!1);break;case"Stacked":t.stacked(!0)}g.stacked=t.stacked(),w.stateChange(g),x.update()}),w.on("tooltipShow",function(e){p&&S(e,T.parentNode)}),w.on("changeState",function(n){typeof n.disabled!="undefined"&&(h.forEach(function(e,t){e.disabled=n.disabled[t]}),g.disabled=n.disabled),typeof n.stacked!="undefined"&&(t.stacked(n.stacked),g.stacked=n.stacked),e.call(x)})}),x}var t=e.models.multiBarHorizontal(),n=e.models.axis(),r=e.models.axis(),i=e.models.legend().height(30),s=e.models.legend().height(30),o={top:30,right:20,bottom:50,left:60},u=null,a=null,f=e.utils.defaultColor(),l=!0,c=!0,h=!1,p=!0,d=function(e,t,n,r,i){return"<h3>"+e+" - "+t+"</h3>"+"<p>"+n+"</p>"},v,m,g={stacked:h},y=null,b="No Data Available.",w=d3.dispatch("tooltipShow","tooltipHide","stateChange","changeState"),E=function(){return l?180:0};t.stacked(h),n.orient("left").tickPadding(5).highlightZero(!1).showMaxMin(!1).tickFormat(function(e){retur n e}),r.orient("bottom").tickFormat(d3.format(",.1f"));var S=function(i,s){var o=i.pos[0]+(s.offsetLeft||0),u=i.pos[1]+(s.offsetTop||0),a=n.tickFormat()(t.x()(i.point,i.pointIndex)),f=r.tickFormat()(t.y()(i.point,i.pointIndex)),l=d(i.series.key,a,f,i,x);e.tooltip.show([o,u],l,i.value<0?"e":"w",null,s)};return t.dispatch.on("elementMouseover.tooltip",function(e){e.pos=[e.pos[0]+o.left,e.pos[1]+o.top],w.tooltipShow(e)}),t.dispatch.on("elementMouseout.tooltip",function(e){w.tooltipHide(e)}),w.on("tooltipHide",function(){p&&e.tooltip.cleanup()}),x.dispatch=w,x.multibar=t,x.legend=i,x.xAxis=n,x.yAxis=r,d3.rebind(x,t,"x","y","xDomain","yDomain","forceX","forceY","clipEdge","id","delay","showValues","valueFormat","stacked","barColor"),x.margin=function(e){return arguments.length?(o.top=typeof e.top!="undefined"?e.top:o.top,o.right=typeof e.right!="undefined"?e.right:o.right,o.bottom=typeof e.bottom!="undefined"?e.bottom:o.bottom,o.left=typeof e.left!="undefined"?e.left:o.left,x):o} ,x.width=function(e){return arguments.length?(u=e,x):u},x.height=function(e){return arguments.length?(a=e,x):a},x.color=function(t){return arguments.length?(f=e.utils.getColor(t),i.color(f),x):f},x.showControls=function(e){return arguments.length?(l=e,x):l},x.showLegend=function(e){return arguments.length?(c=e,x):c},x.tooltip=function(e){return arguments.length?(d=e,x):d},x.tooltips=function(e){return arguments.length?(p=e,x):p},x.tooltipContent=function(e){return arguments.length?(d=e,x):d},x.state=function(e){return arguments.length?(g=e,x):g},x.defaultState=function(e){return arguments.length?(y=e,x):y},x.noData=function(e){return arguments.length?(b=e,x):b},x},e.models.multiChart=function(){function T(e){return e.each(function(e){var u=d3.select(this),f=this;T.update=function(){u.transition().call(T)},T.container=this;var N=(r||parseInt(u.style("width"))||960)-t.left-t.right,C=(i||parseInt(u.style("height"))||400)-t.top-t.bottom,k=e.filter(function(e){return!e.disabled&& e.type=="line"&&e.yAxis==1}),L=e.filter(function(e){return!e.disabled&&e.type=="line"&&e.yAxis==2}),A=e.filter(function(e){return!e.disabled&&e.type=="bar"&&e.yAxis==1}),O=e.filter(function(e){return!e.disabled&&e.type=="bar"&&e.yAxis==2}),M=e.filter(function(e){return!e.disabled&&e.type=="area"&&e.yAxis==1}),_=e.filter(function(e){return!e.disabled&&e.type=="area"&&e.yAxis==2}),D=e.filter(function(e){return!e.disabled&&e.yAxis==1}).map(function(e){return e.values.map(function(e,t){return{x:e.x,y:e.y}})}),P=e.filter(function(e){return!e.disabled&&e.yAxis==2}).map(function(e){return e.values.map(function(e,t){return{x:e.x,y:e.y}})});a.domain(d3.extent(d3.merge(D.concat(P)),function(e){return e.x})).range([0,N]);var H=u.selectAll("g.wrap.multiChart").data([e]),B=H.enter().append("g").attr("class","wrap nvd3 multiChart").append("g");B.append("g").attr("class","x axis"),B.append("g").attr("class","y1 axis"),B.append("g").attr("class","y2 axis"),B.append("g").attr("class","lines1 Wrap"),B.append("g").attr("class","lines2Wrap"),B.append("g").attr("class","bars1Wrap"),B.append("g").attr("class","bars2Wrap"),B.append("g").attr("class","stack1Wrap"),B.append("g").attr("class","stack2Wrap"),B.append("g").attr("class","legendWrap");var j=H.select("g");s&&(E.width(N/2),j.select(".legendWrap").datum(e.map(function(e){return e.originalKey=e.originalKey===undefined?e.key:e.originalKey,e.key=e.originalKey+(e.yAxis==1?"":" (right axis)"),e})).call(E),t.top!=E.height()&&(t.top=E.height(),C=(i||parseInt(u.style("height"))||400)-t.top-t.bottom),j.select(".legendWrap").attr("transform","translate("+N/2+","+ -t.top+")")),h.width(N).height(C).interpolate("monotone").color(e.map(function(e,t){return e.color||n[t%n.length]}).filter(function(t,n){return!e[n].disabled&&e[n].yAxis==1&&e[n].type=="line"})),p.width(N).height(C).interpolate("monotone").color(e.map(function(e,t){return e.color||n[t%n.length]}).filter(function(t,n){return!e[n].disabled&&e[n].yAxis==2&&e[n].type =="line"})),d.width(N).height(C).color(e.map(function(e,t){return e.color||n[t%n.length]}).filter(function(t,n){return!e[n].disabled&&e[n].yAxis==1&&e[n].type=="bar"})),v.width(N).height(C).color(e.map(function(e,t){return e.color||n[t%n.length]}).filter(function(t,n){return!e[n].disabled&&e[n].yAxis==2&&e[n].type=="bar"})),m.width(N).height(C).color(e.map(function(e,t){return e.color||n[t%n.length]}).filter(function(t,n){return!e[n].disabled&&e[n].yAxis==1&&e[n].type=="area"})),g.width(N).height(C).color(e.map(function(e,t){return e.color||n[t%n.length]}).filter(function(t,n){return!e[n].disabled&&e[n].yAxis==2&&e[n].type=="area"})),j.attr("transform","translate("+t.left+","+t.top+")");var F=j.select(".lines1Wrap").datum(k),I=j.select(".bars1Wrap").datum(A),q=j.select(".stack1Wrap").datum(M),R=j.select(".lines2Wrap").datum(L),U=j.select(".bars2Wrap").datum(O),z=j.select(".stack2Wrap").datum(_),W=M.length?M.map(function(e){return e.values}).reduce(function(e,t){return e.map( function(e,n){return{x:e.x,y:e.y+t[n].y}})}).concat([{x:0,y:0}]):[],X=_.length?_.map(function(e){return e.values}).reduce(function(e,t){return e.map(function(e,n){return{x:e.x,y:e.y+t[n].y}})}).concat([{x:0,y:0}]):[];l.domain(d3.extent(d3.merge(D).concat(W),function(e){return e.y})).range([0,C]),c.domain(d3.extent(d3.merge(P).concat(X),function(e){return e.y})).range([0,C]),h.yDomain(l.domain()),d.yDomain(l.domain()),m.yDomain(l.domain()),p.yDomain(c.domain()),v.yDomain(c.domain()),g.yDomain(c.domain()),M.length&&d3.transition(q).call(m),_.length&&d3.transition(z).call(g),A.length&&d3.transition(I).call(d),O.length&&d3.transition(U).call(v),k.length&&d3.transition(F).call(h),L.length&&d3.transition(R).call(p),y.ticks(N/100).tickSize(-C,0),j.select(".x.axis").attr("transform","translate(0,"+C+")"),d3.transition(j.select(".x.axis")).call(y),b.ticks(C/36).tickSize(-N,0),d3.transition(j.select(".y1.axis")).call(b),w.ticks(C/36).tickSize(-N,0),d3.transition(j.select(".y2.axis")). call(w),j.select(".y2.axis").style("opacity",P.length?1:0).attr("transform","translate("+a.range()[1]+",0)"),E.dispatch.on("legendClick",function(t,n){t.disabled=!t.disabled,e.filter(function(e){return!e.disabled}).length||e.map(function(e){return e.disabled=!1,H.selectAll(".series").classed("disabled",!1),e}),T.update()}),S.on("tooltipShow",function(e){o&&x(e,f.parentNode)})}),T}var t={top:30,right:20,bottom:50,left:60},n=d3.scale.category20().range(),r=null,i=null,s=!0,o=!0,u=function(e,t,n,r,i){return"<h3>"+e+"</h3>"+"<p>"+n+" at "+t+"</p>"},a,f,a=d3.scale.linear(),l=d3.scale.linear(),c=d3.scale.linear(),h=e.models.line().yScale(l),p=e.models.line().yScale(c),d=e.models.multiBar().stacked(!1).yScale(l),v=e.models.multiBar().stacked(!1).yScale(c),m=e.models.stackedArea().yScale(l),g=e.models.stackedArea().yScale(c),y=e.models.axis().scale(a).orient("bottom").tickPadding(5),b=e.models.axis().scale(l).orient("left"),w=e.models.axis().scale(c).orient("right"),E=e.models.legen d().height(30),S=d3.dispatch("tooltipShow","tooltipHide"),x=function(t,n){var r=t.pos[0]+(n.offsetLeft||0),i=t.pos[1]+(n.offsetTop||0),s=y.tickFormat()(h.x()(t.point,t.pointIndex)),o=(t.series.yAxis==2?w:b).tickFormat()(h.y()(t.point,t.pointIndex)),a=u(t.series.key,s,o,t,T);e.tooltip.show([r,i],a,undefined,undefined,n.offsetParent)};return h.dispatch.on("elementMouseover.tooltip",function(e){e.pos=[e.pos[0]+t.left,e.pos[1]+t.top],S.tooltipShow(e)}),h.dispatch.on("elementMouseout.tooltip",function(e){S.tooltipHide(e)}),p.dispatch.on("elementMouseover.tooltip",function(e){e.pos=[e.pos[0]+t.left,e.pos[1]+t.top],S.tooltipShow(e)}),p.dispatch.on("elementMouseout.tooltip",function(e){S.tooltipHide(e)}),d.dispatch.on("elementMouseover.tooltip",function(e){e.pos=[e.pos[0]+t.left,e.pos[1]+t.top],S.tooltipShow(e)}),d.dispatch.on("elementMouseout.tooltip",function(e){S.tooltipHide(e)}),v.dispatch.on("elementMouseover.tooltip",function(e){e.pos=[e.pos[0]+t.left,e.pos[1]+t.top],S.tooltip Show(e)}),v.dispatch.on("elementMouseout.tooltip",function(e){S.tooltipHide(e)}),m.dispatch.on("tooltipShow",function(e){if(!Math.round(m.y()(e.point)*100))return setTimeout(function(){d3.selectAll(".point.hover").classed("hover",!1)},0),!1;e.pos=[e.pos[0]+t.left,e.pos[1]+t.top],S.tooltipShow(e)}),m.dispatch.on("tooltipHide",function(e){S.tooltipHide(e)}),g.dispatch.on("tooltipShow",function(e){if(!Math.round(g.y()(e.point)*100))return setTimeout(function(){d3.selectAll(".point.hover").classed("hover",!1)},0),!1;e.pos=[e.pos[0]+t.left,e.pos[1]+t.top],S.tooltipShow(e)}),g.dispatch.on("tooltipHide",function(e){S.tooltipHide(e)}),h.dispatch.on("elementMouseover.tooltip",function(e){e.pos=[e.pos[0]+t.left,e.pos[1]+t.top],S.tooltipShow(e)}),h.dispatch.on("elementMouseout.tooltip",function(e){S.tooltipHide(e)}),p.dispatch.on("elementMouseover.tooltip",function(e){e.pos=[e.pos[0]+t.left,e.pos[1]+t.top],S.tooltipShow(e)}),p.dispatch.on("elementMouseout.tooltip",function(e){S.tooltip Hide(e)}),S.on("tooltipHide",function(){o&&e.tooltip.cleanup()}),T.dispatch=S,T.lines1=h,T.lines2=p,T.bars1=d,T.bars2=v,T.stack1=m,T.stack2=g,T.xAxis=y,T.yAxis1=b,T.yAxis2=w,T.x=function(e){return arguments.length?(getX=e,h.x(e),d.x(e),T):getX},T.y=function(e){return arguments.length?(getY=e,h.y(e),d.y(e),T):getY},T.margin=function(e){return arguments.length?(t=e,T):t},T.width=function(e){return arguments.length?(r=e,T):r},T.height=function(e){return arguments.length?(i=e,T):i},T.color=function(e){return arguments.length?(n=e,E.color(e),T):n},T.showLegend=function(e){return arguments.length?(s=e,T):s},T.tooltips=function(e){return arguments.length?(o=e,T):o},T.tooltipContent=function(e){return arguments.length?(u=e,T):u},T},e.models.ohlcBar=function(){function E(e){return e.each(function(e){var g=n-t.left-t.right,E=r-t.top-t.bottom,S=d3.select(this);s.domain(y||d3.extent(e[0].values.map(u).concat(p))),v?s.range([g*.5/e[0].values.length,g*(e[0].values.length-.5)/e[0].values.l ength]):s.range([0,g]),o.domain(b||[d3.min(e[0].values.map(h).concat(d)),d3.max(e[0].values.map(c).concat(d))]).range([E,0]);if(s.domain()[0]===s.domain()[1]||o.domain()[0]===o.domain()[1])singlePoint=!0;s.domain()[0]===s.domain()[1]&&(s.domain()[0]?s.domain([s.domain()[0]-s.domain()[0]*.01,s.domain()[1]+s.domain()[1]*.01]):s.domain([-1,1])),o.domain()[0]===o.domain()[1]&&(o.domain()[0]?o.domain([o.domain()[0]+o.domain()[0]*.01,o.domain()[1]-o.domain()[1]*.01]):o.domain([-1,1]));var T=d3.select(this).selectAll("g.nv-wrap.nv-ohlcBar").data([e[0].values]),N=T.enter().append("g").attr("class","nvd3 nv-wrap nv-ohlcBar"),C=N.append("defs"),k=N.append("g"),L=T.select("g");k.append("g").attr("class","nv-ticks"),T.attr("transform","translate("+t.left+","+t.top+")"),S.on("click",function(e,t){w.chartClick({data:e,index:t,pos:d3.event,id:i})}),C.append("clipPath").attr("id","nv-chart-clip-path-"+i).append("rect"),T.select("#nv-chart-clip-path-"+i+" rect").attr("width",g).attr("height" ,E),L.attr("clip-path",m?"url(#nv-chart-clip-path-"+i+")":"");var A=T.select(".nv-ticks").selectAll(".nv-tick").data(function(e){return e});A.exit().remove();var O=A.enter().append("path").attr("class",function(e,t,n){return(f(e,t)>l(e,t)?"nv-tick negative":"nv-tick positive")+" nv-tick-"+n+"-"+t}).attr("d",function(t,n){var r=g/e[0].values.length*.9;return"m0,0l0,"+(o(f(t,n))-o(c(t,n)))+"l"+ -r/2+",0l"+r/2+",0l0,"+(o(h(t,n))-o(f(t,n)))+"l0,"+(o(l(t,n))-o(h(t,n)))+"l"+r/2+",0l"+ -r/2+",0z"}).attr("transform",function(e,t){return"translate("+s(u(e,t))+","+o(c(e,t))+")"}).on("mouseover",function(t,n){d3.select(this).classed("hover",!0),w.elementMouseover({point:t,series:e[0],pos:[s(u(t,n)),o(a(t,n))],pointIndex:n,seriesIndex:0,e:d3.event})}).on("mouseout",function(t,n){d3.select(this).classed("hover",!1),w.elementMouseout({point:t,series:e[0],pointIndex:n,seriesIndex:0,e:d3.event})}).on("click",function(e,t){w.elementClick({value:a(e,t),data:e,index:t,pos:[s(u(e,t)),o(a(e,t))] ,e:d3.event,id:i}),d3.event.stopPropagation()}).on("dblclick",function(e,t){w.elementDblClick({value:a(e,t),data:e,index:t,pos:[s(u(e,t)),o(a(e,t))],e:d3.event,id:i}),d3.event.stopPropagation()});A.attr("class",function(e,t,n){return(f(e,t)>l(e,t)?"nv-tick negative":"nv-tick positive")+" nv-tick-"+n+"-"+t}),d3.transition(A).attr("transform",function(e,t){return"translate("+s(u(e,t))+","+o(c(e,t))+")"}).attr("d",function(t,n){var r=g/e[0].values.length*.9;return"m0,0l0,"+(o(f(t,n))-o(c(t,n)))+"l"+ -r/2+",0l"+r/2+",0l0,"+(o(h(t,n))-o(f(t,n)))+"l0,"+(o(l(t,n))-o(h(t,n)))+"l"+r/2+",0l"+ -r/2+",0z"})}),E}var t={top:0,right:0,bottom:0,left:0},n=960,r=500,i=Math.floor(Math.random()*1e4),s=d3.scale.linear(),o=d3.scale.linear(),u=function(e){return e.x},a=function(e){return e.y},f=function(e){return e.open},l=function(e){return e.close},c=function(e){return e.high},h=function(e){return e.low},p=[],d=[],v=!1,m=!0,g=e.utils.defaultColor(),y,b,w=d3.dispatch("chartClick","elementClick"," elementDblClick","elementMouseover","elementMouseout");return E.dispatch=w,E.x=function(e){return arguments.length?(u=e,E):u},E.y=function(e){return arguments.length?(a=e,E):a},E.open=function(e){return arguments.length?(f=e,E):f},E.close=function(e){return arguments.length?(l=e,E):l},E.high=function(e){return arguments.length?(c=e,E):c},E.low=function(e){return arguments.length?(h=e,E):h},E.margin=function(e){return arguments.length?(t.top=typeof e.top!="undefined"?e.top:t.top,t.right=typeof e.right!="undefined"?e.right:t.right,t.bottom=typeof e.bottom!="undefined"?e.bottom:t.bottom,t.left=typeof e.left!="undefined"?e.left:t.left,E):t},E.width=function(e){return arguments.length?(n=e,E):n},E.height=function(e){return arguments.length?(r=e,E):r},E.xScale=function(e){return arguments.length?(s=e,E):s},E.yScale=function(e){return arguments.length?(o=e,E):o},E.xDomain=function(e){return arguments.length?(y=e,E):y},E.yDomain=function(e){return arguments.length?(b=e,E):b},E.force X=function(e){return arguments.length?(p=e,E):p},E.forceY=function(e){return arguments.length?(d=e,E):d},E.padData=function(e){return arguments.length?(v=e,E):v},E.clipEdge=function(e){return arguments.length?(m=e,E):m},E.color=function(t){return arguments.length?(g=e.utils.getColor(t),E):g},E.id=function(e){return arguments.length?(i=e,E):i},E},e.models.pie=function(){function E(e){return e.each(function(e){function P(e){var t=(e.startAngle+e.endAngle)*90/Math.PI-90;return t>90?t-180:t}function H(e){e.endAngle=isNaN(e.endAngle)?0:e.endAngle,e.startAngle=isNaN(e.startAngle)?0:e.startAngle,v||(e.innerRadius=0);var t=d3.interpolate(this._current,e);return this._current=t(0),function(e){return L(t(e))}}function B(e){e.innerRadius=0;var t=d3.interpolate({startAngle:0,endAngle:0},e);return function(e){return L(t(e))}}var u=n-t.left-t.right,l=r-t.top-t.bottom,E=Math.min(u,l)/2,S=E-E/5,x=d3.select(this),T=x.selectAll(".nv-wrap.nv-pie").data([i(e[0])]),N=T.enter().append("g").attr(" class","nvd3 nv-wrap nv-pie nv-chart-"+a),C=N.append("g"),k=T.select("g");C.append("g").attr("class","nv-pie"),T.attr("transform","translate("+t.left+","+t.top+")"),k.select(".nv-pie").attr("transform","translate("+u/2+","+l/2+")"),x.on("click",function(e,t){w.chartClick({data:e,index:t,pos:d3.event,id:a})});var L=d3.svg.arc().outerRadius(S);g&&L.startAngle(g),y&&L.endAngle(y),v&&L.innerRadius(E*b);var A=d3.layout.pie().sort(null).value(function(e){return e.disabled?0:o(e)}),O=T.select(".nv-pie").selectAll(".nv-slice").data(A);O.exit().remove();var M=O.enter().append("g").attr("class","nv-slice").on("mouseover",function(e,t){d3.select(this).classed("hover",!0),w.elementMouseover({label:s(e.data),value:o(e.data),point:e.data,pointIndex:t,pos:[d3.event.pageX,d3.event.pageY],id:a})}).on("mouseout",function(e,t){d3.select(this).classed("hover",!1),w.elementMouseout({label:s(e.data),value:o(e.data),point:e.data,index:t,id:a})}).on("click",function(e,t){w.elementClick({label:s(e.d ata),value:o(e.data),point:e.data,index:t,pos:d3.event,id:a}),d3.event.stopPropagation()}).on("dblclick",function(e,t){w.elementDblClick({label:s(e.data),value:o(e.data),point:e.data,index:t,pos:d3.event,id:a}),d3.event.stopPropagation()});O.attr("fill",function(e,t){return f(e,t)}).attr("stroke",function(e,t){return f(e,t)});var _=M.append("path").each(function(e){this._current=e});d3.transition(O.select("path")).attr("d",L).attrTween("d",H);if(c){var D=d3.svg.arc().innerRadius(0);h&&(D=L),p&&(D=d3.svg.arc().outerRadius(L.outerRadius())),M.append("g").classed("nv-label",!0).each(function(e,t){var n=d3.select(this);n.attr("transform",function(e){if(m){e.outerRadius=S+10,e.innerRadius=S+15;var t=(e.startAngle+e.endAngle)/2*(180/Math.PI);return(e.startAngle+e.endAngle)/2<Math.PI?t-=90:t+=90,"translate("+D.centroid(e)+") rotate("+t+")"}return e.outerRadius=E+10,e.innerRadius=E+15,"translate("+D.centroid(e)+")"}),n.append("rect").style("stroke","#fff").style("fill","#fff").attr( "rx",3).attr("ry",3),n.append("text").style("text-anchor",m?(e.startAngle+e.endAngle)/2<Math.PI?"start":"end":"middle").style("fill","#000")}),O.select(".nv-label").transition().attr("transform",function(e){if(m){e.outerRadius=S+10,e.innerRadius=S+15;var t=(e.startAngle+e.endAngle)/2*(180/Math.PI);return(e.startAngle+e.endAngle)/2<Math.PI?t-=90:t+=90,"translate("+D.centroid(e)+") rotate("+t+")"}return e.outerRadius=E+10,e.innerRadius=E+15,"translate("+D.centroid(e)+")"}),O.each(function(e,t){var n=d3.select(this);n.select(".nv-label text").style("text-anchor",m?(e.startAngle+e.endAngle)/2<Math.PI?"start":"end":"middle").text(function(e,t){var n=(e.endAngle-e.startAngle)/(2*Math.PI);return e.value&&n>d?s(e.data):""});var r=n.select("text").node().getBBox();n.select(".nv-label rect").attr("width",r.width+10).attr("height",r.height+10).attr("transform",function(){return"translate("+[r.x-5,r.y-5]+")"})})}}),E}var t={top:0,right:0,bottom:0,left:0},n=500,r=500,i=function(e){return e.values},s=function(e){return e.x},o=function(e){return e.y},u=function(e){return e.description},a=Math.floor(Math.random()*1e4),f=e.utils.defaultColor(),l=d3.format(",.2f"),c=!0,h=!0,p=!1,d=.02,v=!1,m=!1,g=!1,y=!1,b=.5,w=d3.dispatch("chartClick","elementClick","elementDblClick","elementMouseover","elementMouseout");return E.dispatch=w,E.margin=function(e){return arguments.length?(t.top=typeof e.top!="undefined"?e.top:t.top,t.right=typeof e.right!="undefined"?e.right:t.right,t.bottom=typeof e.bottom!="undefined"?e.bottom:t.bottom,t.left=typeof e.left!="undefined"?e.left:t.left,E):t},E.width=function(e){return arguments.length?(n=e,E):n},E.height=function(e){return arguments.length?(r=e,E):r},E.values=function(e){return arguments.length?(i=e,E):i},E.x=function(e){return arguments.length?(s=e,E):s},E.y=function(e){return arguments.length?(o=d3.functor(e),E):o},E.description=function(e){return arguments.length?(u=e,E):u},E.showLabels=function(e){return arguments.length?(c=e,E ):c},E.labelSunbeamLayout=function(e){return arguments.length?(m=e,E):m},E.donutLabelsOutside=function(e){return arguments.length?(p=e,E):p},E.pieLabelsOutside=function(e){return arguments.length?(h=e,E):h},E.donut=function(e){return arguments.length?(v=e,E):v},E.donutRatio=function(e){return arguments.length?(b=e,E):b},E.startAngle=function(e){return arguments.length?(g=e,E):g},E.endAngle=function(e){return arguments.length?(y=e,E):y},E.id=function(e){return arguments.length?(a=e,E):a},E.color=function(t){return arguments.length?(f=e.utils.getColor(t),E):f},E.valueFormat=function(e){return arguments.length?(l=e,E):l},E.labelThreshold=function(e){return arguments.length?(d=e,E):d},E},e.models.pieChart=function(){function v(e){return e.each(function(e){var u=d3.select(this),a=this,f=(i||parseInt(u.style("width"))||960)-r.left-r.right,d=(s||parseInt(u.style("height"))||400)-r.top-r.bottom;v.update=function(){u.transition().call(v)},v.container=this,l.disabled=e[0].map(function (e){return!!e.disabled});if(!c){var m;c={};for(m in l)l[m]instanceof Array?c[m]=l[m].slice(0):c[m]=l[m]}if(!e[0]||!e[0].length){var g=u.selectAll(".nv-noData").data([h]);return g.enter().append("text").attr("class","nvd3 nv-noData").attr("dy","-.7em").style("text-anchor","middle"),g.attr("x",r.left+f/2).attr("y",r.top+d/2).text(function(e){return e}),v}u.selectAll(".nv-noData").remove();var y=u.selectAll("g.nv-wrap.nv-pieChart").data([e]),b=y.enter().append("g").attr("class","nvd3 nv-wrap nv-pieChart").append("g"),w=y.select("g");b.append("g").attr("class","nv-pieWrap"),b.append("g").attr("class","nv-legendWrap"),o&&(n.width(f).key(t.x()),y.select(".nv-legendWrap").datum(t.values()(e[0])).call(n),r.top!=n.height()&&(r.top=n.height(),d=(s||parseInt(u.style("height"))||400)-r.top-r.bottom),y.select(".nv-legendWrap").attr("transform","translate(0,"+ -r.top+")")),y.attr("transform","translate("+r.left+","+r.top+")"),t.width(f).height(d);var E=w.select(".nv-pieWrap").datum(e);d3. transition(E).call(t),n.dispatch.on("legendClick",function(n,r,i){n.disabled=!n.disabled,t.values()(e[0]).filter(function(e){return!e.disabled}).length||t.values()(e[0]).map(function(e){return e.disabled=!1,y.selectAll(".nv-series").classed("disabled",!1),e}),l.disabled=e[0].map(function(e){return!!e.disabled}),p.stateChange(l),v.update()}),t.dispatch.on("elementMouseout.tooltip",function(e){p.tooltipHide(e)}),p.on("changeState",function(t){typeof t.disabled!="undefined"&&(e[0].forEach(function(e,n){e.disabled=t.disabled[n]}),l.disabled=t.disabled),v.update()})}),v}var t=e.models.pie(),n=e.models.legend(),r={top:30,right:20,bottom:20,left:20},i=null,s=null,o=!0,u=e.utils.defaultColor(),a=!0,f=function(e,t,n,r){return"<h3>"+e+"</h3>"+"<p>"+t+"</p>"},l={},c=null,h="No Data Available.",p=d3.dispatch("tooltipShow","tooltipHide","stateChange","changeState"),d=function(n,r){var i=t.description()(n.point)||t.x()(n.point),s=n.pos[0]+(r&&r.offsetLeft||0),o=n.pos[1]+(r&&r.offsetTop||0 ),u=t.valueFormat()(t.y()(n.point)),a=f(i,u,n,v);e.tooltip.show([s,o],a,n.value<0?"n":"s",null,r)};return t.dispatch.on("elementMouseover.tooltip",function(e){e.pos=[e.pos[0]+r.left,e.pos[1]+r.top],p.tooltipShow(e)}),p.on("tooltipShow",function(e){a&&d(e)}),p.on("tooltipHide",function(){a&&e.tooltip.cleanup()}),v.legend=n,v.dispatch=p,v.pie=t,d3.rebind(v,t,"valueFormat","values","x","y","description","id","showLabels","donutLabelsOutside","pieLabelsOutside","donut","donutRatio","labelThreshold"),v.margin=function(e){return arguments.length?(r.top=typeof e.top!="undefined"?e.top:r.top,r.right=typeof e.right!="undefined"?e.right:r.right,r.bottom=typeof e.bottom!="undefined"?e.bottom:r.bottom,r.left=typeof e.left!="undefined"?e.left:r.left,v):r},v.width=function(e){return arguments.length?(i=e,v):i},v.height=function(e){return arguments.length?(s=e,v):s},v.color=function(r){return arguments.length?(u=e.utils.getColor(r),n.color(u),t.color(u),v):u},v.showLegend=function(e){retur n arguments.length?(o=e,v):o},v.tooltips=function(e){return arguments.length?(a=e,v):a},v.tooltipContent=function(e){return arguments.length?(f=e,v):f},v.state=function(e){return arguments.length?(l=e,v):l},v.defaultState=function(e){return arguments.length?(c=e,v):c},v.noData=function(e){return arguments.length?(h=e,v):h},v},e.models.scatter=function(){function j(e){return e.each(function(e){function $(){if(!g)return!1;var i,a=d3.merge(e.map(function(e,t){return e.values.map(function(e,n){var r=f(e,n)+Math.random()*1e-7,i=l(e,n)+Math.random()*1e-7;return[o(r),u(i),t,n,e]}).filter(function(e,t){return b(e[4],t)})}));if(M===!0){if(x){var c=R.select("defs").selectAll(".nv-point-clips").data([s]).enter();c.append("clipPath").attr("class","nv-point-clips").attr("id","nv-points-clip-"+s);var h=R.select("#nv-points-clip-"+s).selectAll("circle").data(a);h.enter().append("circle").attr("r",T),h.exit().remove(),h.attr("cx",function(e){return e[0]}).attr("cy",function(e){return e[1]}) ,R.select(".nv-point-paths").attr("clip-path","url(#nv-points-clip-"+s+")")}a.length&&(a.push([o.range()[0]-20,u.range()[0]-20,null,null]),a.push([o.range()[1]+20,u.range()[1]+20,null,null]),a.push([o.range()[0]-20,u.range()[0]+20,null,null]),a.push([o.range()[1]+20,u.range()[1]-20,null,null]));var p=d3.geom.polygon([[-10,-10],[-10,r+10],[n+10,r+10],[n+10,-10]]),d=d3.geom.voronoi(a).map(function(e,t){return{data:p.clip(e),series:a[t][2],point:a[t][3]}}),v=R.select(".nv-point-paths").selectAll("path").data(d);v.enter().append("path").attr("class",function(e,t){return"nv-path-"+t}),v.exit().remove(),v.attr("d",function(e){return e.data.length===0?"M 0 0":"M"+e.data.join("L")+"Z"}),v.on("click",function(n){if(B)return 0;var r=e[n.series],i=r.values[n.point];O.elementClick({point:i,series:r,pos:[o(f(i,n.point))+t.left,u(l(i,n.point))+t.top],seriesIndex:n.series,pointIndex:n.point})}).on("mouseover",function(n){if(B)return 0;var r=e[n.series],i=r.values[n.point];O.elementMouseove r({point:i,series:r,pos:[o(f(i,n.point))+t.left,u(l(i,n.point))+t.top],seriesIndex:n.series,pointIndex:n.point})}).on("mouseout",function(t,n){if(B)return 0;var r=e[t.series],i=r.values[t.point];O.elementMouseout({point:i,series:r,seriesIndex:t.series,pointIndex:t.point})})}else R.select(".nv-groups").selectAll(".nv-group").selectAll(".nv-point").on("click",function(n,r){if(B||!e[n.series])return 0;var i=e[n.series],s=i.values[r];O.elementClick({point:s,series:i,pos:[o(f(s,r))+t.left,u(l(s,r))+t.top],seriesIndex:n.series,pointIndex:r})}).on("mouseover",function(n,r){if(B||!e[n.series])return 0;var i=e[n.series],s=i.values[r];O.elementMouseover({point:s,series:i,pos:[o(f(s,r))+t.left,u(l(s,r))+t.top],seriesIndex:n.series,pointIndex:r})}).on("mouseout",function(t,n){if(B||!e[t.series])return 0;var r=e[t.series],i=r.values[n];O.elementMouseout({point:i,series:r,seriesIndex:t.series,pointIndex:n})});B=!1}var j=n-t.left-t.right,F=r-t.top-t.bottom,I=d3.select(this);e=e.map(functio n(e,t){return e.values=e.values.map(function(e){return e.series=t,e}),e});var q=N&&C&&k?[]:d3.merge(e.map(function(e){return e.values -.map(function(e,t){return{x:f(e,t),y:l(e,t),size:c(e,t)}})}));o.domain(N||d3.extent(q.map(function(e){return e.x}).concat(d))),w&&e[0]?o.range([(j*E+j)/(2*e[0].values.length),j-j*(1+E)/(2*e[0].values.length)]):o.range([0,j]),u.domain(C||d3.extent(q.map(function(e){return e.y}).concat(v))).range([F,0]),a.domain(k||d3.extent(q.map(function(e){return e.size}).concat(m))).range(L||[16,256]);if(o.domain()[0]===o.domain()[1]||u.domain()[0]===u.domain()[1])A=!0;o.domain()[0]===o.domain()[1]&&(o.domain()[0]?o.domain([o.domain()[0]-o.domain()[0]*.01,o.domain()[1]+o.domain()[1]*.01]):o.domain([-1,1])),u.domain()[0]===u.domain()[1]&&(u.domain()[0]?u.domain([u.domain()[0]+u.domain()[0]*.01,u.domain()[1]-u.domain()[1]*.01]):u.domain([-1,1])),isNaN(o.domain()[0])&&o.domain([-1,1]),isNaN(u.domain()[0])&&u.domain([-1,1]),_=_||o,D=D||u,P=P||a;var R=I.selectAll("g.nv-wrap.nv-scatter").data([e]),U=R.enter().append("g").attr("class","nvd3 nv-wrap nv-scatter nv-chart-"+s+(A?" nv-single-point":"" )),W=U.append("defs"),X=U.append("g"),V=R.select("g");X.append("g").attr("class","nv-groups"),X.append("g").attr("class","nv-point-paths"),R.attr("transform","translate("+t.left+","+t.top+")"),W.append("clipPath").attr("id","nv-edge-clip-"+s).append("rect"),R.select("#nv-edge-clip-"+s+" rect").attr("width",j).attr("height",F),V.attr("clip-path",S?"url(#nv-edge-clip-"+s+")":""),B=!0;var J=R.select(".nv-groups").selectAll(".nv-group").data(function(e){return e},function(e){return e.key});J.enter().append("g").style("stroke-opacity",1e-6).style("fill-opacity",1e-6),d3.transition(J.exit()).style("stroke-opacity",1e-6).style("fill-opacity",1e-6).remove(),J.attr("class",function(e,t){return"nv-group nv-series-"+t}).classed("hover",function(e){return e.hover}),d3.transition(J).style("fill",function(e,t){return i(e,t)}).style("stroke",function(e,t){return i(e,t)}).style("stroke-opacity",1).style("fill-opacity",.5);if(p){var K=J.selectAll("circle.nv-point").data(function(e){return e. values},y);K.enter().append("circle").attr("cx",function(e,t){return _(f(e,t))}).attr("cy",function(e,t){return D(l(e,t))}).attr("r",function(e,t){return Math.sqrt(a(c(e,t))/Math.PI)}),K.exit().remove(),J.exit().selectAll("path.nv-point").transition().attr("cx",function(e,t){return o(f(e,t))}).attr("cy",function(e,t){return u(l(e,t))}).remove(),K.each(function(e,t){d3.select(this).classed("nv-point",!0).classed("nv-point-"+t,!0)}),K.transition().attr("cx",function(e,t){return o(f(e,t))}).attr("cy",function(e,t){return u(l(e,t))}).attr("r",function(e,t){return Math.sqrt(a(c(e,t))/Math.PI)})}else{var K=J.selectAll("path.nv-point").data(function(e){return e.values});K.enter().append("path").attr("transform",function(e,t){return"translate("+_(f(e,t))+","+D(l(e,t))+")"}).attr("d",d3.svg.symbol().type(h).size(function(e,t){return a(c(e,t))})),K.exit().remove(),d3.transition(J.exit().selectAll("path.nv-point")).attr("transform",function(e,t){return"translate("+o(f(e,t))+","+u(l(e,t ))+")"}).remove(),K.each(function(e,t){d3.select(this).classed("nv-point",!0).classed("nv-point-"+t,!0)}),K.transition().attr("transform",function(e,t){return"translate("+o(f(e,t))+","+u(l(e,t))+")"}).attr("d",d3.svg.symbol().type(h).size(function(e,t){return a(c(e,t))}))}clearTimeout(H),H=setTimeout($,300),_=o.copy(),D=u.copy(),P=a.copy()}),j}var t={top:0,right:0,bottom:0,left:0},n=960,r=500,i=e.utils.defaultColor(),s=Math.floor(Math.random()*1e5),o=d3.scale.linear(),u=d3.scale.linear(),a=d3.scale.linear(),f=function(e){return e.x},l=function(e){return e.y},c=function(e){return e.size||1},h=function(e){return e.shape||"circle"},p=!0,d=[],v=[],m=[],g=!0,y=null,b=function(e){return!e.notActive},w=!1,E=.1,S=!1,x=!0,T=function(){return 25},N=null,C=null,k=null,L=null,A=!1,O=d3.dispatch("elementClick","elementMouseover","elementMouseout"),M=!0,_,D,P,H,B=!1;return O.on("elementMouseover.point",function(e){g&&d3.select(".nv-chart-"+s+" .nv-series-"+e.seriesIndex+" .nv-point-"+e.po intIndex).classed("hover",!0)}),O.on("elementMouseout.point",function(e){g&&d3.select(".nv-chart-"+s+" .nv-series-"+e.seriesIndex+" .nv-point-"+e.pointIndex).classed("hover",!1)}),j.dispatch=O,j.x=function(e){return arguments.length?(f=d3.functor(e),j):f},j.y=function(e){return arguments.length?(l=d3.functor(e),j):l},j.size=function(e){return arguments.length?(c=d3.functor(e),j):c},j.margin=function(e){return arguments.length?(t.top=typeof e.top!="undefined"?e.top:t.top,t.right=typeof e.right!="undefined"?e.right:t.right,t.bottom=typeof e.bottom!="undefined"?e.bottom:t.bottom,t.left=typeof e.left!="undefined"?e.left:t.left,j):t},j.width=function(e){return arguments.length?(n=e,j):n},j.height=function(e){return arguments.length?(r=e,j):r},j.xScale=function(e){return arguments.length?(o=e,j):o},j.yScale=function(e){return arguments.length?(u=e,j):u},j.zScale=function(e){return arguments.length?(a=e,j):a},j.xDomain=function(e){return arguments.length?(N=e,j):N},j.yDomain=functi on(e){return arguments.length?(C=e,j):C},j.sizeDomain=function(e){return arguments.length?(k=e,j):k},j.sizeRange=function(e){return arguments.length?(L=e,j):L},j.forceX=function(e){return arguments.length?(d=e,j):d},j.forceY=function(e){return arguments.length?(v=e,j):v},j.forceSize=function(e){return arguments.length?(m=e,j):m},j.interactive=function(e){return arguments.length?(g=e,j):g},j.pointKey=function(e){return arguments.length?(y=e,j):y},j.pointActive=function(e){return arguments.length?(b=e,j):b},j.padData=function(e){return arguments.length?(w=e,j):w},j.padDataOuter=function(e){return arguments.length?(E=e,j):E},j.clipEdge=function(e){return arguments.length?(S=e,j):S},j.clipVoronoi=function(e){return arguments.length?(x=e,j):x},j.useVoronoi=function(e){return arguments.length?(M=e,M===!1&&(x=!1),j):M},j.clipRadius=function(e){return arguments.length?(T=e,j):T},j.color=function(t){return arguments.length?(i=e.utils.getColor(t),j):i},j.shape=function(e){return argum ents.length?(h=e,j):h},j.onlyCircles=function(e){return arguments.length?(p=e,j):p},j.id=function(e){return arguments.length?(s=e,j):s},j.singlePoint=function(e){return arguments.length?(A=e,j):A},j},e.models.scatterChart=function(){function P(e){return e.each(function(e){function W(){if(E)return R.select(".nv-point-paths").style("pointer-events","all"),!1;R.select(".nv-point-paths").style("pointer-events","none");var i=d3.mouse(this);h.distortion(w).focus(i[0]),p.distortion(w).focus(i[1]),R.select(".nv-scatterWrap").call(t),R.select(".nv-x.nv-axis").call(n),R.select(".nv-y.nv-axis").call(r),R.select(".nv-distributionX").datum(e.filter(function(e){return!e.disabled})).call(o),R.select(".nv-distributionY").datum(e.filter(function(e){return!e.disabled})).call(u)}var x=d3.select(this),T=this,N=(f||parseInt(x.style("width"))||960)-a.left-a.right,H=(l||parseInt(x.style("height"))||400)-a.top-a.bottom;P.update=function(){x.transition().call(P)},C.disabled=e.map(function(e){return! !e.disabled});if(!k){var B;k={};for(B in C)C[B]instanceof Array?k[B]=C[B].slice(0):k[B]=C[B]}if(!e||!e.length||!e.filter(function(e){return e.values.length}).length){var j=x.selectAll(".nv-noData").data([A]);return j.enter().append("text").attr("class","nvd3 nv-noData").attr("dy","-.7em").style("text-anchor","middle"),j.attr("x",a.left+N/2).attr("y",a.top+H/2).text(function(e){return e}),P}x.selectAll(".nv-noData").remove(),O=O||h,M=M||p;var F=x.selectAll("g.nv-wrap.nv-scatterChart").data([e]),I=F.enter().append("g").attr("class","nvd3 nv-wrap nv-scatterChart nv-chart-"+t.id()),q=I.append("g"),R=F.select("g");q.append("rect").attr("class","nvd3 nv-background"),q.append("g").attr("class","nv-x nv-axis"),q.append("g").attr("class","nv-y nv-axis"),q.append("g").attr("class","nv-scatterWrap"),q.append("g").attr("class","nv-distWrap"),q.append("g").attr("class","nv-legendWrap"),q.append("g").attr("class","nv-controlsWrap"),y&&(i.width(N/2),F.select(".nv-legendWrap").datum(e).call (i),a.top!=i.height()&&(a.top=i.height(),H=(l||parseInt(x.style("height"))||400)-a.top-a.bottom),F.select(".nv-legendWrap").attr("transform","translate("+N/2+","+ -a.top+")")),b&&(s.width(180).color(["#444"]),R.select(".nv-controlsWrap").datum(D).attr("transform","translate(0,"+ -a.top+")").call(s)),F.attr("transform","translate("+a.left+","+a.top+")"),t.width(N).height(H).color(e.map(function(e,t){return e.color||c(e,t)}).filter(function(t,n){return!e[n].disabled})).xDomain(null).yDomain(null),F.select(".nv-scatterWrap").datum(e.filter(function(e){return!e.disabled})).call(t);if(d){var U=h.domain()[1]-h.domain()[0];t.xDomain([h.domain()[0]-d*U,h.domain()[1]+d*U])}if(v){var z=p.domain()[1]-p.domain()[0];t.yDomain([p.domain()[0]-v*z,p.domain()[1]+v*z])}F.select(".nv-scatterWrap").datum(e.filter(function(e){return!e.disabled})).call(t),n.scale(h).ticks(n.ticks()&&n.ticks().length?n.ticks():N/100).tickSize(-H,0),R.select(".nv-x.nv-axis").attr("transform","translate(0,"+p.range( )[0]+")").call(n),r.scale(p).ticks(r.ticks()&&r.ticks().length?r.ticks():H/36).tickSize(-N,0),R.select(".nv-y.nv-axis").call(r),m&&(o.getData(t.x()).scale(h).width(N).color(e.map(function(e,t){return e.color||c(e,t)}).filter(function(t,n){return!e[n].disabled})),q.select(".nv-distWrap").append("g").attr("class","nv-distributionX"),R.select(".nv-distributionX").attr("transform","translate(0,"+p.range()[0]+")").datum(e.filter(function(e){return!e.disabled})).call(o)),g&&(u.getData(t.y()).scale(p).width(H).color(e.map(function(e,t){return e.color||c(e,t)}).filter(function(t,n){return!e[n].disabled})),q.select(".nv-distWrap").append("g").attr("class","nv-distributionY"),R.select(".nv-distributionY").attr("transform","translate(-"+u.size()+",0)").datum(e.filter(function(e){return!e.disabled})).call(u)),d3.fisheye&&(R.select(".nv-background").attr("width",N).attr("height",H),R.select(".nv-background").on("mousemove",W),R.select(".nv-background").on("click",function(){E=!E}),t.disp atch.on("elementClick.freezeFisheye",function(){E=!E})),s.dispatch.on("legendClick",function(e,i){e.disabled=!e.disabled,w=e.disabled?0:2.5,R.select(".nv-background").style("pointer-events",e.disabled?"none":"all"),R.select(".nv-point-paths").style("pointer-events",e.disabled?"all":"none"),e.disabled?(h.distortion(w).focus(0),p.distortion(w).focus(0),R.select(".nv-scatterWrap").call(t),R.select(".nv-x.nv-axis").call(n),R.select(".nv-y.nv-axis").call(r)):E=!1,P.update()}),i.dispatch.on("legendClick",function(t,n,r){t.disabled=!t.disabled,e.filter(function(e){return!e.disabled}).length||e.map(function(e){return e.disabled=!1,F.selectAll(".nv-series").classed("disabled",!1),e}),C.disabled=e.map(function(e){return!!e.disabled}),L.stateChange(C),P.update()}),i.dispatch.on("legendDblclick",function(t){e.forEach(function(e){e.disabled=!0}),t.disabled=!1,C.disabled=e.map(function(e){return!!e.disabled}),L.stateChange(C),P.update()}),t.dispatch.on("elementMouseover.tooltip",function( e){d3.select(".nv-chart-"+t.id()+" .nv-series-"+e.seriesIndex+" .nv-distx-"+e.pointIndex).attr("y1",function(t,n){return e.pos[1]-H}),d3.select(".nv-chart-"+t.id()+" .nv-series-"+e.seriesIndex+" .nv-disty-"+e.pointIndex).attr("x2",e.pos[0]+o.size()),e.pos=[e.pos[0]+a.left,e.pos[1]+a.top],L.tooltipShow(e)}),L.on("tooltipShow",function(e){S&&_(e,T.parentNode)}),L.on("changeState",function(t){typeof t.disabled!="undefined"&&(e.forEach(function(e,n){e.disabled=t.disabled[n]}),C.disabled=t.disabled),P.update()}),O=h.copy(),M=p.copy()}),P}var t=e.models.scatter(),n=e.models.axis(),r=e.models.axis(),i=e.models.legend(),s=e.models.legend(),o=e.models.distribution(),u=e.models.distribution(),a={top:30,right:20,bottom:50,left:75},f=null,l=null,c=e.utils.defaultColor(),h=d3.fisheye?d3.fisheye.scale(d3.scale.linear).distortion(0):t.xScale(),p=d3.fisheye?d3.fisheye.scale(d3.scale.linear).distortion(0):t.yScale(),d=0,v=0,m=!1,g=!1,y=!0,b=!!d3.fisheye,w=0,E=!1,S=!0,x=function(e,t,n){return "<strong>"+t+"</strong>"},T=function(e,t,n){return"<strong>"+n+"</strong>"},N=null,C={},k=null,L=d3.dispatch("tooltipShow","tooltipHide","stateChange","changeState"),A="No Data Available.";t.xScale(h).yScale(p),n.orient("bottom").tickPadding(10),r.orient("left").tickPadding(10),o.axis("x"),u.axis("y");var O,M,_=function(i,s){var o=i.pos[0]+(s.offsetLeft||0),u=i.pos[1]+(s.offsetTop||0),f=i.pos[0]+(s.offsetLeft||0),l=p.range()[0]+a.top+(s.offsetTop||0),c=h.range()[0]+a.left+(s.offsetLeft||0),d=i.pos[1]+(s.offsetTop||0),v=n.tickFormat()(t.x()(i.point,i.pointIndex)),m=r.tickFormat()(t.y()(i.point,i.pointIndex));x!=null&&e.tooltip.show([f,l],x(i.series.key,v,m,i,P),"n",1,s,"x-nvtooltip"),T!=null&&e.tooltip.show([c,d],T(i.series.key,v,m,i,P),"e",1,s,"y-nvtooltip"),N!=null&&e.tooltip.show([o,u],N(i.series.key,v,m,i,P),i.value<0?"n":"s",null,s)},D=[{key:"Magnify",disabled:!0}];return t.dispatch.on("elementMouseout.tooltip",function(e){L.tooltipHide(e),d3.select(".nv-chart-"+t.id()+" .nv-series-"+e.seriesIndex+" .nv-distx-"+e.pointIndex).attr("y1",0),d3.select(".nv-chart-"+t.id()+" .nv-series-"+e.seriesIndex+" .nv-disty-"+e.pointIndex).attr("x2",u.size())}),L.on("tooltipHide",function(){S&&e.tooltip.cleanup()}),P.dispatch=L,P.scatter=t,P.legend=i,P.controls=s,P.xAxis=n,P.yAxis=r,P.distX=o,P.distY=u,d3.rebind(P,t,"id","interactive","pointActive","x","y","shape","size","xScale","yScale","zScale","xDomain","yDomain","sizeDomain","sizeRange","forceX","forceY","forceSize","clipVoronoi","clipRadius","useVoronoi"),P.margin=function(e){return arguments.length?(a.top=typeof e.top!="undefined"?e.top:a.top,a.right=typeof e.right!="undefined"?e.right:a.right,a.bottom=typeof e.bottom!="undefined"?e.bottom:a.bottom,a.left=typeof e.left!="undefined"?e.left:a.left,P):a},P.width=function(e){return arguments.length?(f=e,P):f},P.height=function(e){return arguments.length?(l=e,P):l},P.color=function(t){return arguments.length?(c=e.utils.getColor(t),i.color(c),o.color(c),u. color(c),P):c},P.showDistX=function(e){return arguments.length?(m=e,P):m},P.showDistY=function(e){return arguments.length?(g=e,P):g},P.showControls=function(e){return arguments.length?(b=e,P):b},P.showLegend=function(e){return arguments.length?(y=e,P):y},P.fisheye=function(e){return arguments.length?(w=e,P):w},P.xPadding=function(e){return arguments.length?(d=e,P):d},P.yPadding=function(e){return arguments.length?(v=e,P):v},P.tooltips=function(e){return arguments.length?(S=e,P):S},P.tooltipContent=function(e){return arguments.length?(N=e,P):N},P.tooltipXContent=function(e){return arguments.length?(x=e,P):x},P.tooltipYContent=function(e){return arguments.length?(T=e,P):T},P.state=function(e){return arguments.length?(C=e,P):C},P.defaultState=function(e){return arguments.length?(k=e,P):k},P.noData=function(e){return arguments.length?(A=e,P):A},P},e.models.scatterPlusLineChart=function(){function _(e){return e.each(function(e){function U(){if(b)return I.select(".nv-point-paths") .style("pointer-events","all"),!1;I.select(".nv-point-paths").style("pointer-events","none");var i=d3.mouse(this);h.distortion(y).focus(i[0]),p.distortion(y).focus(i[1]),I.select(".nv-scatterWrap").datum(e.filter(function(e){return!e.disabled})).call(t),I.select(".nv-x.nv-axis").call(n),I.select(".nv-y.nv-axis").call(r),I.select(".nv-distributionX").datum(e.filter(function(e){return!e.disabled})).call(o),I.select(".nv-distributionY").datum(e.filter(function(e){return!e.disabled})).call(u)}var E=d3.select(this),S=this,x=(f||parseInt(E.style("width"))||960)-a.left-a.right,D=(l||parseInt(E.style("height"))||400)-a.top-a.bottom;_.update=function(){E.transition().call(_)},_.container=this,T.disabled=e.map(function(e){return!!e.disabled});if(!N){var P;N={};for(P in T)T[P]instanceof Array?N[P]=T[P].slice(0):N[P]=T[P]}if(!e||!e.length||!e.filter(function(e){return e.values.length}).length){var H=E.selectAll(".nv-noData").data([k]);return H.enter().append("text").attr("class","nvd3 n v-noData").attr("dy","-.7em").style("text-anchor","middle"),H.attr("x",a.left+x/2).attr("y",a.top+D/2).text(function(e){return e}),_}E.selectAll(".nv-noData").remove(),h=t.xScale(),p=t.yScale(),L=L||h,A=A||p;var B=E.selectAll("g.nv-wrap.nv-scatterChart").data([e]),j=B.enter().append("g").attr("class","nvd3 nv-wrap nv-scatterChart nv-chart-"+t.id()),F=j.append("g"),I=B.select("g");F.append("rect").attr("class","nvd3 nv-background"),F.append("g").attr("class","nv-x nv-axis"),F.append("g").attr("class","nv-y nv-axis"),F.append("g").attr("class","nv-scatterWrap"),F.append("g").attr("class","nv-regressionLinesWrap"),F.append("g").attr("class","nv-distWrap"),F.append("g").attr("class","nv-legendWrap"),F.append("g").attr("class","nv-controlsWrap"),B.attr("transform","translate("+a.left+","+a.top+")"),m&&(i.width(x/2),B.select(".nv-legendWrap").datum(e).call(i),a.top!=i.height()&&(a.top=i.height(),D=(l||parseInt(E.style("height"))||400)-a.top-a.bottom),B.select(".nv-legendWrap").att r("transform","translate("+x/2+","+ -a.top+")")),g&&(s.width(180).color(["#444"]),I.select(".nv-controlsWrap").datum(M).attr("transform","translate(0,"+ -a.top+")").call(s)),t.width(x).height(D).color(e.map(function(e,t){return e.color||c(e,t)}).filter(function(t,n){return!e[n].disabled})),B.select(".nv-scatterWrap").datum(e.filter(function(e){return!e.disabled})).call(t),B.select(".nv-regressionLinesWrap").attr("clip-path","url(#nv-edge-clip-"+t.id()+")");var q=B.select(".nv-regressionLinesWrap").selectAll(".nv-regLines").data(function(e){return e}),R=q.enter().append("g").attr("class","nv-regLines").append("line").attr("class","nv-regLine").style("stroke-opacity",0);q.selectAll(".nv-regLines line").attr("x1",h.range()[0]).attr("x2",h.range()[1]).attr("y1",function(e,t){return p(h.domain()[0]*e.slope+e.intercept)}).attr("y2",function(e,t){return p(h.domain()[1]*e.slope+e.intercept)}).style("stroke",function(e,t,n){return c(e,n)}).style("stroke-opacity",function(e,t){return e.disabled||typeof e.slope=="undefined"||typeof e.intercept=="undefined"?0:1}),n.scale(h).ticks(n.ticks()?n.ticks():x/100).tickSize(-D,0),I.select(".nv-x.nv-axis").attr("transform","translate(0,"+p.range()[0]+")").call(n),r.scale(p).ticks(r.ticks()?r.ticks():D/36).tickSize(-x,0),I.select(".nv-y.nv-axis").call(r),d&&(o.getData(t.x()).scale(h).width(x).color(e.map(function(e,t){return e.color||c(e,t)}).filter(function(t,n){return!e[n].disabled})),F.select(".nv-distWrap").append("g").attr("class","nv-distributionX"),I.select(".nv-distributionX").attr("transform","translate(0,"+p.range()[0]+")").datum(e.filter(function(e){return!e.disabled})).call(o)),v&&(u.getData(t.y()).scale(p).width(D).color(e.map(function(e,t){return e.color||c(e,t)}).filter(function(t,n){return!e[n].disabled})),F.select(".nv-distWrap").append("g").attr("class","nv-distributionY"),I.select(".nv-distributionY").attr("transform","translate(-"+u.size()+",0)").datum(e.filter(function(e){return!e.disabled})).cal l(u)),d3.fisheye&&(I.select(".nv-background").attr("width",x).attr("height",D),I.select(".nv-background").on("mousemove",U),I.select(".nv-background").on("click",function(){b=!b}),t.dispatch.on("elementClick.freezeFisheye",function(){b=!b})),s.dispatch.on("legendClick",function(e,i){e.disabled=!e.disabled,y=e.disabled?0:2.5,I.select(".nv-background").style("pointer-events",e.disabled?"none":"all"),I.select(".nv-point-paths").style("pointer-events",e.disabled?"all":"none"),e.disabled?(h.distortion(y).focus(0),p.distortion(y).focus(0),I.select(".nv-scatterWrap").call(t),I.select(".nv-x.nv-axis").call(n),I.select(".nv-y.nv-axis").call(r)):b=!1,_.update()}),i.dispatch.on("legendClick",function(t,n,r){t.disabled=!t.disabled,e.filter(function(e){return!e.disabled}).length||e.map(function(e){return e.disabled=!1,B.selectAll(".nv-series").classed("disabled",!1),e}),T.disabled=e.map(function(e){return!!e.disabled}),C.stateChange(T),_.update()}),i.dispatch.on("legendDblclick",function (t){e.forEach(function(e){e.disabled=!0}),t.disabled=!1,T.disabled=e.map(function(e){return!!e.disabled}),C.stateChange(T),_.update()}),t.dispatch.on("elementMouseover.tooltip",function(e){d3.select(".nv-chart-"+t.id()+" .nv-series-"+e.seriesIndex+" .nv-distx-"+e.pointIndex).attr("y1",e.pos[1]-D),d3.select(".nv-chart-"+t.id()+" .nv-series-"+e.seriesIndex+" .nv-disty-"+e.pointIndex).attr("x2",e.pos[0]+o.size()),e.pos=[e.pos[0]+a.left,e.pos[1]+a.top],C.tooltipShow(e)}),C.on("tooltipShow",function(e){w&&O(e,S.parentNode)}),C.on("changeState",function(t){typeof t.disabled!="undefined"&&(e.forEach(function(e,n){e.disabled=t.disabled[n]}),T.disabled=t.disabled),_.update()}),L=h.copy(),A=p.copy()}),_}var t=e.models.scatter(),n=e.models.axis(),r=e.models.axis(),i=e.models.legend(),s=e.models.legend(),o=e.models.distribution(),u=e.models.distribution(),a={top:30,right:20,bottom:50,left:75},f=null,l=null,c=e.utils.defaultColor(),h=d3.fisheye?d3.fisheye.scale(d3.scale.linear).distortio n(0):t.xScale(),p=d3.fisheye?d3.fisheye.scale(d3.scale.linear).distortion(0):t.yScale(),d=!1,v=!1,m=!0,g=!!d3.fisheye,y=0,b=!1,w=!0,E=function(e,t,n){return"<strong>"+t+"</strong>"},S=function(e,t,n){return"<strong>"+n+"</strong>"},x=function(e,t,n,r){return"<h3>"+e+"</h3>"+"<p>"+r+"</p>"},T={},N=null,C=d3.dispatch("tooltipShow","tooltipHide","stateChange","changeState"),k="No Data Available.";t.xScale(h).yScale(p),n.orient("bottom").tickPadding(10),r.orient("left").tickPadding(10),o.axis("x"),u.axis("y");var L,A,O=function(i,s){var o=i.pos[0]+(s.offsetLeft||0),u=i.pos[1]+(s.offsetTop||0),f=i.pos[0]+(s.offsetLeft||0),l=p.range()[0]+a.top+(s.offsetTop||0),c=h.range()[0]+a.left+(s.offsetLeft||0),d=i.pos[1]+(s.offsetTop||0),v=n.tickFormat()(t.x()(i.point,i.pointIndex)),m=r.tickFormat()(t.y()(i.point,i.pointIndex));E!=null&&e.tooltip.show([f,l],E(i.series.key,v,m,i,_),"n",1,s,"x-nvtooltip"),S!=null&&e.tooltip.show([c,d],S(i.series.key,v,m,i,_),"e",1,s,"y-nvtooltip"),x!=null&&e.t ooltip.show([o,u],x(i.series.key,v,m,i.point.tooltip,i,_),i.value<0?"n":"s",null,s)},M=[{key:"Magnify",disabled:!0}];return t.dispatch.on("elementMouseout.tooltip",function(e){C.tooltipHide(e),d3.select(".nv-chart-"+t.id()+" .nv-series-"+e.seriesIndex+" .nv-distx-"+e.pointIndex).attr("y1",0),d3.select(".nv-chart-"+t.id()+" .nv-series-"+e.seriesIndex+" .nv-disty-"+e.pointIndex).attr("x2",u.size())}),C.on("tooltipHide",function(){w&&e.tooltip.cleanup()}),_.dispatch=C,_.scatter=t,_.legend=i,_.controls=s,_.xAxis=n,_.yAxis=r,_.distX=o,_.distY=u,d3.rebind(_,t,"id","interactive","pointActive","x","y","shape","size","xScale","yScale","zScale","xDomain","yDomain","sizeDomain","sizeRange","forceX","forceY","forceSize","clipVoronoi","clipRadius","useVoronoi"),_.margin=function(e){return arguments.length?(a.top=typeof e.top!="undefined"?e.top:a.top,a.right=typeof e.right!="undefined"?e.right:a.right,a.bottom=typeof e.bottom!="undefined"?e.bottom:a.bottom,a.left=typeof e.left!="undefined "?e.left:a.left,_):a},_.width=function(e){return arguments.length?(f=e,_):f},_.height=function(e){return arguments.length?(l=e,_):l},_.color=function(t){return arguments.length?(c=e.utils.getColor(t),i.color(c),o.color(c),u.color(c),_):c},_.showDistX=function(e){return arguments.length?(d=e,_):d},_.showDistY=function(e){return arguments.length?(v=e,_):v},_.showControls=function(e){return arguments.length?(g=e,_):g},_.showLegend=function(e){return arguments.length?(m=e,_):m},_.fisheye=function(e){return arguments.length?(y=e,_):y},_.tooltips=function(e){return arguments.length?(w=e,_):w},_.tooltipContent=function(e){return arguments.length?(x=e,_):x},_.tooltipXContent=function(e){return arguments.length?(E=e,_):E},_.tooltipYContent=function(e){return arguments.length?(S=e,_):S},_.state=function(e){return arguments.length?(T=e,_):T},_.defaultState=function(e){return arguments.length?(N=e,_):N},_.noData=function(e){return arguments.length?(k=e,_):k},_},e.models.sparkline=functi on(){function h(e){return e.each(function(e){var i=n-t.left-t.right,h=r-t.top-t.bottom,p=d3.select(this);s.domain(l||d3.extent(e,u)).range([0,i]),o.domain(c||d3.extent(e,a)).range([h,0]);var d=p.selectAll("g.nv-wrap.nv-sparkline").data([e]),v=d.enter().append("g").attr("class","nvd3 nv-wrap nv-sparkline"),m=v.append("g"),g=d.select("g");d.attr("transform","translate("+t.left+","+t.top+")");var b=d.selectAll("path").data(function(e){return[e]});b.enter().append("path"),b.exit().remove(),b.style("stroke",function(e,t){return e.color||f(e,t)}).attr("d",d3.svg.line().x(function(e,t){return s(u(e,t))}).y(function(e,t){return o(a(e,t))}));var w=d.selectAll("circle.nv-point").data(function(e){function n(t){if(t!=-1){var n=e[t];return n.pointIndex=t,n}return null}var t=e.map(function(e,t){return a(e,t)}),r=n(t.lastIndexOf(o.domain()[1])),i=n(t.indexOf(o.domain()[0])),s=n(t.length-1);return[i,r,s].filter(function(e){return e!=null})});w.enter().append("circle"),w.exit().remove(),w.at tr("cx",function(e,t){return s(u(e,e.pointIndex))}).attr("cy",function(e,t){return o(a(e,e.pointIndex))}).attr("r",2).attr("class",function(e,t){return u(e,e.pointIndex)==s.domain()[1]?"nv-point nv-currentValue":a(e,e.pointIndex)==o.domain()[0]?"nv-point nv-minValue":"nv-point nv-maxValue"})}),h}var t={top:2,right:0,bottom:2,left:0},n=400,r=32,i=!0,s=d3.scale.linear(),o=d3.scale.linear(),u=function(e){return e.x},a=function(e){return e.y},f=e.utils.getColor(["#000"]),l,c;return h.margin=function(e){return arguments.length?(t.top=typeof e.top!="undefined"?e.top:t.top,t.right=typeof e.right!="undefined"?e.right:t.right,t.bottom=typeof e.bottom!="undefined"?e.bottom:t.bottom,t.left=typeof e.left!="undefined"?e.left:t.left,h):t},h.width=function(e){return arguments.length?(n=e,h):n},h.height=function(e){return arguments.length?(r=e,h):r},h.x=function(e){return arguments.length?(u=d3.functor(e),h):u},h.y=function(e){return arguments.length?(a=d3.functor(e),h):a},h.xScale=function (e){return arguments.length?(s=e,h):s},h.yScale=function(e){return arguments.length?(o=e,h):o},h.xDomain=function(e){return arguments.length?(l=e,h):l},h.yDomain=function(e){return arguments.length?(c=e,h):c},h.animate=function(e){return arguments.length?(i=e,h):i},h.color=function(t){return arguments.length?(f=e.utils.getColor(t),h):f},h},e.models.sparklinePlus=function(){function v(e){return e.each(function(c){function O(){if(a)return;var e=C.selectAll(".nv-hoverValue").data(u),r=e.enter().append("g").attr("class","nv-hoverValue").style("stroke-opacity",0).style("fill-opacity",0);e.exit().transition().duration(250).style("stroke-opacity",0).style("fill-opacity",0).remove(),e.attr("transform",function(e){return"translate("+s(t.x()(c[e],e))+",0)"}).transition().duration(250).style("stroke-opacity",1).style("fill-opacity",1);if(!u.length)return;r.append("line").attr("x1",0).attr("y1",-n.top).attr("x2",0).attr("y2",b),r.append("text").attr("class","nv-xValue").attr("x",-6).att r("y",-n.top).attr("text-anchor","end").attr("dy",".9em"),C.select(".nv-hoverValue .nv-xValue").text(f(t.x()(c[u[0]],u[0]))),r.append("text").attr("class","nv-yValue").attr("x",6).attr("y",-n.top).attr("text-anchor","start").attr("dy",".9em"),C.select(".nv-hoverValue .nv-yValue").text(l(t.y()(c[u[0]],u[0])))}function M(){function r(e,n){var r=Math.abs(t.x()(e[0],0)-n),i=0;for(var s=0;s<e.length;s++)Math.abs(t.x()(e[s],s)-n)<r&&(r=Math.abs(t.x()(e[s],s)-n),i=s);return i}if(a)return;var e=d3.mouse(this)[0]-n.left;u=[r(c,Math.round(s.invert(e)))],O()}var m=d3.select(this),g=(r||parseInt(m.style("width"))||960)-n.left-n.right,b=(i||parseInt(m.style("height"))||400)-n.top-n.bottom;v.update=function(){v(e)},v.container=this;if(!c||!c.length){var w=m.selectAll(".nv-noData").data([d]);return w.enter().append("text").attr("class","nvd3 nv-noData").attr("dy","-.7em").style("text-anchor","middle"),w.attr("x",n.left+g/2).attr("y",n.top+b/2).text(function(e){return e}),v}m.selectAll(".nv -noData").remove();var E=t.y()(c[c.length-1],c.length-1);s=t.xScale(),o=t.yScale();var S=m.selectAll("g.nv-wrap.nv-sparklineplus").data([c]),T=S.enter().append("g").attr("class","nvd3 nv-wrap nv-sparklineplus"),N=T.append("g"),C=S.select("g");N.append("g").attr("class","nv-sparklineWrap"),N.append("g").attr("class","nv-valueWrap"),N.append("g").attr("class","nv-hoverArea"),S.attr("transform","translate("+n.left+","+n.top+")");var k=C.select(".nv-sparklineWrap");t.width(g).height(b),k.call(t);var L=C.select(".nv-valueWrap"),A=L.selectAll(".nv-currentValue").data([E]);A.enter().append("text").attr("class","nv-currentValue").attr("dx",p?-8:8).attr("dy",".9em").style("text-anchor",p?"end":"start"),A.attr("x",g+(p?n.right:0)).attr("y",h?function(e){return o(e)}:0).style("fill",t.color()(c[c.length-1],c.length-1)).text(l(E)),N.select(".nv-hoverArea").append("rect").on("mousemove",M).on("click",function(){a=!a}).on("mouseout",function(){u=[],O()}),C.select(".nv-hoverArea rect").att r("transform",function(e){return"translate("+ -n.left+","+ -n.top+")"}).attr("width",g+n.left+n.right).attr("height",b+n.top)}),v}var t=e.models.sparkline(),n={top:15,right:100,bottom:10,left:50},r=null,i=null,s,o,u=[],a=!1,f=d3.format(",r"),l=d3.format(",.2f"),c=!0,h=!0,p=!1,d="No Data Available.";return v.sparkline=t,d3.rebind(v,t,"x","y","xScale","yScale","color"),v.margin=function(e){return arguments.length?(n.top=typeof e.top!="undefined"?e.top:n.top,n.right=typeof e.right!="undefined"?e.right:n.right,n.bottom=typeof e.bottom!="undefined"?e.bottom:n.bottom,n.left=typeof e.left!="undefined"?e.left:n.left,v):n},v.width=function(e){return arguments.length?(r=e,v):r},v.height=function(e){return arguments.length?(i=e,v):i},v.xTickFormat=function(e){return arguments.length?(f=e,v):f},v.yTickFormat=function(e){return arguments.length?(l=e,v):l},v.showValue=function(e){return arguments.length?(c=e,v):c},v.alignValue=function(e){return arguments.length?(h=e,v):h},v.rightAlignVal ue=function(e){return arguments.length?(p=e,v):p},v.noData=function(e){return arguments.length?(d=e,v):d},v},e.models.stackedArea=function(){function g(e){return e.each(function(e){var a=n-t.left-t.right,g=r-t.top-t.bottom,b=d3.select(this);p=v.xScale(),d=v.yScale(),e=e.map(function(e,t){return e.values=e.values.map(function(t,n){return t.index=n,t.stackedY=e.disabled?0:u(t,n),t}),e}),e=d3.layout.stack().order(l).offset(f).values(function(e){return e.values}).x(o).y(function(e){return e.stackedY}).out(function(e,t,n){e.display={y:n,y0:t}})(e);var w=b.selectAll("g.nv-wrap.nv-stackedarea").data([e]),E=w.enter().append("g").attr("class","nvd3 nv-wrap nv-stackedarea"),S=E.append("defs"),T=E.append("g"),N=w.select("g");T.append("g").attr("class","nv-areaWrap"),T.append("g").attr("class","nv-scatterWrap"),w.attr("transform","translate("+t.left+","+t.top+")"),v.width(a).height(g).x(o).y(function(e){return e.display.y+e.display.y0}).forceY([0]).color(e.map(function(e,t){return e.col or||i(e,t)}).filter(function(t,n){return!e[n].disabled}));var C=N.select(".nv-scatterWrap").datum(e.filter(function(e){return!e.disabled}));C.call(v),S.append("clipPath").attr("id","nv-edge-clip-"+s).append("rect"),w.select("#nv-edge-clip-"+s+" rect").attr("width",a).attr("height",g),N.attr("clip-path",h?"url(#nv-edge-clip-"+s+")":"");var k=d3.svg.area().x(function(e,t){return p(o(e,t))}).y0(function(e){return d(e.display.y0)}).y1(function(e){return d(e.display.y+e.display.y0)}).interpolate(c),L=d3.svg.area().x(function(e,t){return p(o(e,t))}).y0(function(e){return d(e.display.y0)}).y1(function(e){return d(e.display.y0)}),A=N.select(".nv-areaWrap").selectAll("path.nv-area").data(function(e){return e});A.enter().append("path").attr("class",function(e,t){return"nv-area nv-area-"+t}).on("mouseover",function(e,t){d3.select(this).classed("hover",!0),m.areaMouseover({point:e,series:e.key,pos:[d3.event.pageX,d3.event.pageY],seriesIndex:t})}).on("mouseout",function(e,t){d3.select(th is).classed("hover",!1),m.areaMouseout({point:e,series:e.key,pos:[d3.event.pageX,d3.event.pageY],seriesIndex:t})}).on("click",function(e,t){d3.select(this).classed("hover",!1),m.areaClick({point:e,series:e.key,pos:[d3.event.pageX,d3.event.pageY],seriesIndex:t})}),A.exit().attr("d",function(e,t){return L(e.values,t)}).remove(),A.style("fill",function(e,t){return e.color||i(e,t)}).style("stroke",function(e,t){return e.color||i(e,t)}),A.attr("d",function(e,t){return k(e.values,t)}),v.dispatch.on("elementMouseover.area",function(e){N.select(".nv-chart-"+s+" .nv-area-"+e.seriesIndex).classed("hover",!0)}),v.dispatch.on("elementMouseout.area",function(e){N.select(".nv-chart-"+s+" .nv-area-"+e.seriesIndex).classed("hover",!1)})}),g}var t={top:0,right:0,bottom:0,left:0},n=960,r=500,i=e.utils.defaultColor(),s=Math.floor(Math.random()*1e5),o=function(e){return e.x},u=function(e){return e.y},a="stack",f="zero",l="default",c="linear",h=!1,p,d,v=e.models.scatter(),m=d3.dispatch("tooltipS how","tooltipHide","areaClick","areaMouseover","areaMouseout");return v.size(2.2).sizeDomain([2.2,2.2]),v.dispatch.on("elementClick.area" -,function(e){m.areaClick(e)}),v.dispatch.on("elementMouseover.tooltip",function(e){e.pos=[e.pos[0]+t.left,e.pos[1]+t.top],m.tooltipShow(e)}),v.dispatch.on("elementMouseout.tooltip",function(e){m.tooltipHide(e)}),g.dispatch=m,g.scatter=v,d3.rebind(g,v,"interactive","size","xScale","yScale","zScale","xDomain","yDomain","sizeDomain","forceX","forceY","forceSize","clipVoronoi","clipRadius"),g.x=function(e){return arguments.length?(o=d3.functor(e),g):o},g.y=function(e){return arguments.length?(u=d3.functor(e),g):u},g.margin=function(e){return arguments.length?(t.top=typeof e.top!="undefined"?e.top:t.top,t.right=typeof e.right!="undefined"?e.right:t.right,t.bottom=typeof e.bottom!="undefined"?e.bottom:t.bottom,t.left=typeof e.left!="undefined"?e.left:t.left,g):t},g.width=function(e){return arguments.length?(n=e,g):n},g.height=function(e){return arguments.length?(r=e,g):r},g.clipEdge=function(e){return arguments.length?(h=e,g):h},g.color=function(t){return arguments.length?(i=e.uti ls.getColor(t),g):i},g.offset=function(e){return arguments.length?(f=e,g):f},g.order=function(e){return arguments.length?(l=e,g):l},g.style=function(e){if(!arguments.length)return a;a=e;switch(a){case"stack":g.offset("zero"),g.order("default");break;case"stream":g.offset("wiggle"),g.order("inside-out");break;case"stream-center":g.offset("silhouette"),g.order("inside-out");break;case"expand":g.offset("expand"),g.order("default")}return g},g.interpolate=function(e){return arguments.length?(c=e,c):c},g},e.models.stackedAreaChart=function(){function x(e){return e.each(function(e){var f=d3.select(this),p=this,T=(u||parseInt(f.style("width"))||960)-o.left-o.right,N=(a||parseInt(f.style("height"))||400)-o.top-o.bottom;x.update=function(){f.transition().call(x)},x.container=this,g.disabled=e.map(function(e){return!!e.disabled});if(!y){var C;y={};for(C in g)g[C]instanceof Array?y[C]=g[C].slice(0):y[C]=g[C]}if(!e||!e.length||!e.filter(function(e){return e.values.length}).length){var k =f.selectAll(".nv-noData").data([b]);return k.enter().append("text").attr("class","nvd3 nv-noData").attr("dy","-.7em").style("text-anchor","middle"),k.attr("x",o.left+T/2).attr("y",o.top+N/2).text(function(e){return e}),x}f.selectAll(".nv-noData").remove(),d=t.xScale(),v=t.yScale();var L=f.selectAll("g.nv-wrap.nv-stackedAreaChart").data([e]),A=L.enter().append("g").attr("class","nvd3 nv-wrap nv-stackedAreaChart").append("g"),O=L.select("g");A.append("g").attr("class","nv-x nv-axis"),A.append("g").attr("class","nv-y nv-axis"),A.append("g").attr("class","nv-stackedWrap"),A.append("g").attr("class","nv-legendWrap"),A.append("g").attr("class","nv-controlsWrap"),c&&(i.width(T-E),O.select(".nv-legendWrap").datum(e).call(i),o.top!=i.height()&&(o.top=i.height(),N=(a||parseInt(f.style("height"))||400)-o.top-o.bottom),O.select(".nv-legendWrap").attr("transform","translate("+E+","+ -o.top+")"));if(l){var M=[{key:"Stacked",disabled:t.offset()!="zero"},{key:"Stream",disabled:t.offset()!= "wiggle"},{key:"Expanded",disabled:t.offset()!="expand"}];s.width(E).color(["#444","#444","#444"]),O.select(".nv-controlsWrap").datum(M).call(s),o.top!=Math.max(s.height(),i.height())&&(o.top=Math.max(s.height(),i.height()),N=(a||parseInt(f.style("height"))||400)-o.top-o.bottom),O.select(".nv-controlsWrap").attr("transform","translate(0,"+ -o.top+")")}L.attr("transform","translate("+o.left+","+o.top+")"),t.width(T).height(N);var _=O.select(".nv-stackedWrap").datum(e);_.call(t),n.scale(d).ticks(T/100).tickSize(-N,0),O.select(".nv-x.nv-axis").attr("transform","translate(0,"+N+")"),O.select(".nv-x.nv-axis").transition().duration(0).call(n),r.scale(v).ticks(t.offset()=="wiggle"?0:N/36).tickSize(-T,0).setTickFormat(t.offset()=="expand"?d3.format("%"):m),O.select(".nv-y.nv-axis").transition().duration(0).call(r),t.dispatch.on("areaClick.toggle",function(t){e.filter(function(e){return!e.disabled}).length===1?e=e.map(function(e){return e.disabled=!1,e}):e=e.map(function(e,n){return e.disabled=n!=t.seriesIndex,e}),g.disabled=e.map(function(e){return!!e.disabled}),w.stateChange(g),x.update()}),i.dispatch.on("legendClick",function(t,n){t.disabled=!t.disabled,e.filter(function(e){return!e.disabled}).length||e.map(function(e){return e.disabled=!1,e}),g.disabled=e.map(function(e){return!!e.disabled}),w.stateChange(g),x.update()}),i.dispatch.on("legendDblclick",function(t){e.forEach(function(e){e.disabled=!0}),t.disabled=!1,g.disabled=e.map(function(e){return!!e.disabled}),w.stateChange(g),x.update()}),s.dispatch.on("legendClick",function(e,n){if(!e.disabled)return;M=M.map(function(e){return e.disabled=!0,e}),e.disabled=!1;switch(e.key){case"Stacked":t.style("stack");break;case"Stream":t.style("stream");break;case"Expanded":t.style("expand")}g.style=t.style(),w.stateChange(g),x.update()}),w.on("tooltipShow",function(e){h&&S(e,p.parentNode)}),w.on("changeState",function(n){typeof n.disabled!="undefined"&&(e.forEach(function(e,t){e.disabled=n.disabled[t]}),g.di sabled=n.disabled),typeof n.style!="undefined"&&t.style(n.style),x.update()})}),x}var t=e.models.stackedArea(),n=e.models.axis(),r=e.models.axis(),i=e.models.legend(),s=e.models.legend(),o={top:30,right:25,bottom:50,left:60},u=null,a=null,f=e.utils.defaultColor(),l=!0,c=!0,h=!0,p=function(e,t,n,r,i){return"<h3>"+e+"</h3>"+"<p>"+n+" on "+t+"</p>"},d,v,m=d3.format(",.2f"),g={style:t.style()},y=null,b="No Data Available.",w=d3.dispatch("tooltipShow","tooltipHide","stateChange","changeState"),E=250;n.orient("bottom").tickPadding(7),r.orient("left"),t.scatter.pointActive(function(e){return!!Math.round(t.y()(e)*100)});var S=function(i,s){var o=i.pos[0]+(s.offsetLeft||0),u=i.pos[1]+(s.offsetTop||0),a=n.tickFormat()(t.x()(i.point,i.pointIndex)),f=r.tickFormat()(t.y()(i.point,i.pointIndex)),l=p(i.series.key,a,f,i,x);e.tooltip.show([o,u],l,i.value<0?"n":"s",null,s)};return t.dispatch.on("tooltipShow",function(e){e.pos=[e.pos[0]+o.left,e.pos[1]+o.top],w.tooltipShow(e)}),t.dispatch.on(" tooltipHide",function(e){w.tooltipHide(e)}),w.on("tooltipHide",function(){h&&e.tooltip.cleanup()}),x.dispatch=w,x.stacked=t,x.legend=i,x.controls=s,x.xAxis=n,x.yAxis=r,d3.rebind(x,t,"x","y","size","xScale","yScale","xDomain","yDomain","sizeDomain","interactive","offset","order","style","clipEdge","forceX","forceY","forceSize","interpolate"),x.margin=function(e){return arguments.length?(o.top=typeof e.top!="undefined"?e.top:o.top,o.right=typeof e.right!="undefined"?e.right:o.right,o.bottom=typeof e.bottom!="undefined"?e.bottom:o.bottom,o.left=typeof e.left!="undefined"?e.left:o.left,x):o},x.width=function(e){return arguments.length?(u=e,x):getWidth},x.height=function(e){return arguments.length?(a=e,x):getHeight},x.color=function(n){return arguments.length?(f=e.utils.getColor(n),i.color(f),t.color(f),x):f},x.showControls=function(e){return arguments.length?(l=e,x):l},x.showLegend=function(e){return arguments.length?(c=e,x):c},x.tooltip=function(e){return arguments.length?(p=e, x):p},x.tooltips=function(e){return arguments.length?(h=e,x):h},x.tooltipContent=function(e){return arguments.length?(p=e,x):p},x.state=function(e){return arguments.length?(g=e,x):g},x.defaultState=function(e){return arguments.length?(y=e,x):y},x.noData=function(e){return arguments.length?(b=e,x):b},r.setTickFormat=r.tickFormat,r.tickFormat=function(e){return arguments.length?(m=e,r):m},x}})(); \ No newline at end of file
commit 1a8d7e50edd4878adadce504c03f92a599dfaf1b Author: Mike Thompson mithomps@redhat.com Date: Tue Jul 9 12:36:39 2013 -0700
On multi-resource chart add sort to legend to show alphabetically.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3MultiLineGraph.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3MultiLineGraph.java index a62cb5c..b89cc71 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3MultiLineGraph.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3MultiLineGraph.java @@ -40,7 +40,9 @@ public class CompositeGroupD3MultiLineGraph extends CompositeGroupD3GraphListVie } this.chartId = chartId; this.chartHeight = chartHeight; - this.data = $wnd.jQuery.parseJSON(metricsData); // make into json + if(typeof metricsData !== 'undefined' && metricsData.length > 0){ + this.data = $wnd.jQuery.parseJSON(metricsData); + } this.xAxisLabel = xAxisLabel; this.chartTitle = chartTitle; this.yAxisUnits = yAxisUnits; @@ -57,14 +59,13 @@ public class CompositeGroupD3MultiLineGraph extends CompositeGroupD3GraphListVie this.chartXaxisTimeFormatHours = chartXaxisTimeFormatHours; this.chartXaxisTimeFormatHoursMinutes = chartXaxisTimeFormatHoursMinutes;
- }; - - var global = this, + }, + global = this;
// create a chartContext object (from rhq.js) with the data required to render to a chart // this same data could be passed to different chart types // This way, we are decoupled from the dependency on globals and JSNI and kept all the java interaction right here. - chartContext = new MultiLineChartContext(global.@org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table.CompositeGroupD3GraphListView::getChartId()(), + chartContext = new MultiLineChartContext(global.@org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table.CompositeGroupD3GraphListView::getChartId()(), global.@org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table.CompositeGroupD3GraphListView::getChartHeight()(), global.@org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table.CompositeGroupD3GraphListView::getJsonMetrics()(), global.@org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table.CompositeGroupD3GraphListView::getXAxisTitle()(), @@ -102,14 +103,13 @@ public class CompositeGroupD3MultiLineGraph extends CompositeGroupD3GraphListVie
function determineScale() { - var xTicks, xTickSubDivide, minY, maxY; + var xTicks, xTickSubDivide; console.log("DetermineScale for # resources: "+ chartContext.data.length);
if (chartContext.data.length > 0) { xTicks = 8; xTickSubDivide = 5; var myExtent = getExtentFromNestedValues(chartContext.data); - console.info("minY, maxY: "+myExtent[0] + ", "+myExtent[1] );
yScale = $wnd.d3.scale.linear() .clamp(true) @@ -210,13 +210,14 @@ public class CompositeGroupD3MultiLineGraph extends CompositeGroupD3GraphListVie // add legend var legend = svg.append("g") .attr("class", "legend") - .attr("x", width + 30) + .attr("x", width + 100) .attr("y", 70) .attr("height", 240) .attr("width", 150);
legend.selectAll('g').data(chartContext.data) .enter() + .append('g') .each(function (d, i) { var g = $wnd.d3.select(this); @@ -277,6 +278,8 @@ public class CompositeGroupD3MultiLineGraph extends CompositeGroupD3GraphListVie return yScale(d.y); });
+ chartContext.data.sort(function(a,b){return ((a.key < b.key) ? -1 : ((a.key > b.key) ? 1 : 0));}); + svg.selectAll(".multiLine") .data(chartContext.data) .enter()
commit 48fed3e97aa03337bf8fd2461064b2fb07241d8b Author: Mike Thompson mithomps@redhat.com Date: Tue Jul 9 12:36:25 2013 -0700
Keep Global exception from occuring in multi resource graph when metrics return empty data.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3GraphListView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3GraphListView.java index 3ac3758..a5a0c9d 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3GraphListView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3GraphListView.java @@ -363,17 +363,20 @@ public abstract class CompositeGroupD3GraphListView extends EnhancedVLayout impl
@Override public String getJsonMetrics() { - StringBuilder sb = new StringBuilder("["); - for (MultiLineGraphData multiLineGraphData : measurementForEachResource) { - sb.append("{ "key": ""); - sb.append(multiLineGraphData.getResourceName()); - sb.append("","value" : "); - sb.append(produceInnerValuesArray(multiLineGraphData.getMeasurementData())); - sb.append("},"); + StringBuilder sb = new StringBuilder(); + if (null != measurementForEachResource && !measurementForEachResource.isEmpty()) { + sb = new StringBuilder("["); + for (MultiLineGraphData multiLineGraphData : measurementForEachResource) { + sb.append("{ "key": ""); + sb.append(multiLineGraphData.getResourceName()); + sb.append("","value" : "); + sb.append(produceInnerValuesArray(multiLineGraphData.getMeasurementData())); + sb.append("},"); + } + sb.setLength(sb.length() - 1); // delete the last ',' + sb.append("]"); } - sb.setLength(sb.length() - 1); // delete the last ',' - sb.append("]"); - Log.debug("Multi-resource Graph json: "+ sb.toString()); + Log.debug("Multi-resource Graph json: " + sb.toString()); return sb.toString(); }
commit a2d8542774237ebc6af878832567c93d10d71e52 Author: Mike Thompson mithomps@redhat.com Date: Tue Jul 9 08:16:49 2013 -0700
Add legend to new multi-line graph.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3MultiLineGraph.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3MultiLineGraph.java index 31f5b8b..a62cb5c 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3MultiLineGraph.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3MultiLineGraph.java @@ -205,6 +205,44 @@ public class CompositeGroupD3MultiLineGraph extends CompositeGroupD3GraphListVie
}
+ function createLegend() { + + // add legend + var legend = svg.append("g") + .attr("class", "legend") + .attr("x", width + 30) + .attr("y", 70) + .attr("height", 240) + .attr("width", 150); + + legend.selectAll('g').data(chartContext.data) + .enter() + .append('g') + .each(function (d, i) { + var g = $wnd.d3.select(this); + g.append("rect") + .attr("x", width + 10) + .attr("y", (i * 15) - 8) + .attr("width", 10) + .attr("height", 10) + .style("fill", function(){return colorScale(i);}); + + g.append("text") + .attr("x", width + 30) + .attr("y", i * 15) + .attr("height", 10) + .attr("width", 135) + .style("font-size", "10px") + .style("font-family", "Arial, Helvetica, sans-serif") + .style("fill", "#50505A") + .text(function (d) { + return d.key; + }); + + + }); + } +
function createHeader(titleName) { var title = chart.append("g").append("rect") @@ -266,6 +304,7 @@ public class CompositeGroupD3MultiLineGraph extends CompositeGroupD3GraphListVie createYAxisGridLines(); createMultiLines(chartContext); createXandYAxes(); + createLegend(); console.log("finished drawing multi-line graph"); } }
commit 568b5c8ffc36e41aaddfa99b29b351b8cd0b0ef2 Author: Mike Thompson mithomps@redhat.com Date: Mon Jul 8 13:48:42 2013 -0700
Fix new d3 multi-line graph scaling issues.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/detail/summary/AbstractActivityView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/detail/summary/AbstractActivityView.java index 31b1667..b22907e 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/detail/summary/AbstractActivityView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/detail/summary/AbstractActivityView.java @@ -634,7 +634,7 @@ public abstract class AbstractActivityView extends EnhancedVLayout implements Re setIsModal(true); setShowModalMask(true); setWidth(950); - setHeight(390); + setHeight(420); setShowResizer(true); setCanDragResize(true); centerInPage(); diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/StackedBarMetricGraphImpl.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/StackedBarMetricGraphImpl.java index b8c63f4..9058ea5 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/StackedBarMetricGraphImpl.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/StackedBarMetricGraphImpl.java @@ -251,7 +251,7 @@ public class StackedBarMetricGraphImpl extends AbstractMetricGraph { .attr("y", yBase) .text(highLabel + " - ");
- if(highValue !== undefined){ + if(typeof highValue !== 'undefined'){ chart.append("text") .attr("class", "highText") .attr("x", xValue) @@ -267,7 +267,7 @@ public class StackedBarMetricGraphImpl extends AbstractMetricGraph { .attr("y", yBase + yInc) .text(avgLabel + " - ");
- if(avgValue !== undefined){ + if(typeof avgValue !== 'undefined'){ chart.append("text") .attr("class", "avgText") .attr("x", xValue) @@ -282,7 +282,7 @@ public class StackedBarMetricGraphImpl extends AbstractMetricGraph { .attr("y", yBase + 2 * yInc) .text(minLabel + " - ");
- if(minValue !== undefined){ + if(typeof minValue !== 'undefined'){ chart.append("text") .attr("class", "minText") .attr("x", xValue) @@ -606,7 +606,7 @@ public class StackedBarMetricGraphImpl extends AbstractMetricGraph {
function formatHovers(chartContext, d) { var hoverString, - xValue = (d.x == undefined) ? 0 : +d.x, + xValue = (typeof d.x === 'undefined') ? 0 : +d.x, date = new Date(+xValue), barDuration = d.barDuration, timeFormatter = $wnd.d3.time.format(chartContext.chartHoverTimeFormat), diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3GraphListView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3GraphListView.java index 7ade3f7..3ac3758 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3GraphListView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3GraphListView.java @@ -249,9 +249,8 @@ public abstract class CompositeGroupD3GraphListView extends EnhancedVLayout impl
@Override public void redrawGraphs() { - drawGraph(); populateData(); - markForRedraw(); + drawGraph(); }
private void drawGraph() { @@ -281,7 +280,6 @@ public abstract class CompositeGroupD3GraphListView extends EnhancedVLayout impl
@Override public void run() { - //markForRedraw(); drawJsniChart(); } }.schedule(200); @@ -375,7 +373,7 @@ public abstract class CompositeGroupD3GraphListView extends EnhancedVLayout impl } sb.setLength(sb.length() - 1); // delete the last ',' sb.append("]"); - Log.debug("*** Multi-resource Graph json: "+ sb.toString()); + Log.debug("Multi-resource Graph json: "+ sb.toString()); return sb.toString(); }
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3MultiLineGraph.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3MultiLineGraph.java index 42e5f0c..31f5b8b 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3MultiLineGraph.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3MultiLineGraph.java @@ -96,26 +96,25 @@ public class CompositeGroupD3MultiLineGraph extends CompositeGroupD3GraphListVie yAxis, timeScale, xAxis, + colorScale = $wnd.d3.scale.category20(), chart, svg;
function determineScale() { - var xTicks, xTickSubDivide; + var xTicks, xTickSubDivide, minY, maxY; console.log("DetermineScale for # resources: "+ chartContext.data.length);
if (chartContext.data.length > 0) { xTicks = 8; xTickSubDivide = 5; + var myExtent = getExtentFromNestedValues(chartContext.data); + console.info("minY, maxY: "+myExtent[0] + ", "+myExtent[1] );
yScale = $wnd.d3.scale.linear() .clamp(true) .rangeRound([height, 0]) - .domain([$wnd.d3.min(chartContext.data[0], function (d) { - return d.y; - }), $wnd.d3.max(chartContext.data[0], function (d) { - return d.y; - })]); + .domain([myExtent[0],myExtent[1]]);
yAxis = $wnd.d3.svg.axis() .scale(yScale) @@ -124,10 +123,10 @@ public class CompositeGroupD3MultiLineGraph extends CompositeGroupD3GraphListVie .tickSize(4, 4, 0) .orient("left");
- + var firstDataset = chartContext.data[0].value; timeScale = $wnd.d3.time.scale() .range([0, width]) - .domain($wnd.d3.extent(chartContext.data[0], function (d) { + .domain($wnd.d3.extent(firstDataset, function(d){ return d.x; }));
@@ -150,6 +149,24 @@ public class CompositeGroupD3MultiLineGraph extends CompositeGroupD3GraphListVie
}
+ function getExtentFromNestedValues(data){ + var tempArray = [], + mergedArray = [], + resultArray = [], + max = 0, + min = 0; + + for(var i=0; i< data.length;i++){ + tempArray.push(data[i].value); + } + mergedArray = $wnd.d3.merge(tempArray, function(d){ return d.y;}); + max = $wnd.d3.max(mergedArray, function(d){ return d.y}); + min = $wnd.d3.min(mergedArray, function(d){ return d.y}); + resultArray.push(min); + resultArray.push(max); + return resultArray; + } + function createYAxisGridLines() { // create the y axis grid lines svg.append("g").classed("grid y_grid", true) @@ -164,7 +181,7 @@ public class CompositeGroupD3MultiLineGraph extends CompositeGroupD3GraphListVie
function createXandYAxes() {
- xAxis.tickFormat($wnd.rhqCommon.getD3CustomTimeFormat(chartContext.chartXaxisTimeFormatHours, chartContext.chartXaxisTimeFormatHoursMinutes)); + //xAxis.tickFormat($wnd.rhqCommon.getD3CustomTimeFormat(chartContext.chartXaxisTimeFormatHours, chartContext.chartXaxisTimeFormatHoursMinutes));
// create x-axis svg.append("g") @@ -189,8 +206,6 @@ public class CompositeGroupD3MultiLineGraph extends CompositeGroupD3GraphListVie }
- - function createHeader(titleName) { var title = chart.append("g").append("rect") .attr("class", "title") @@ -224,14 +239,14 @@ public class CompositeGroupD3MultiLineGraph extends CompositeGroupD3GraphListVie return yScale(d.y); });
- chart.selectAll(".multiLine") + svg.selectAll(".multiLine") .data(chartContext.data) .enter() .append('path') .attr("class", "multiLine") .attr("fill", "none") - .attr("stroke", "#2e376a") - .attr("stroke-width", "1.5") + .attr("stroke", function(d,i){ return colorScale(i);}) + .attr("stroke-width", "2") .attr("stroke-opacity", ".9") .attr("d", function(d) { return graphLine(d.value);});
@@ -244,21 +259,20 @@ public class CompositeGroupD3MultiLineGraph extends CompositeGroupD3GraphListVie "use strict"; // Guard condition that can occur when a portlet has not been configured yet console.log("multi-resource chart handle:" + chartContext.chartHandle); - //console.dir(chartContext.data); if (chartContext.data.length > 0) { console.log("Creating MultiLine Chart: " + chartContext.chartSelection + " --> " + chartContext.chartTitle); determineScale(); createHeader(chartContext.chartTitle); - console.log("created multi-header"); createYAxisGridLines(); createMultiLines(chartContext); createXandYAxes(); + console.log("finished drawing multi-line graph"); } } }; // end public closure }();
- if (chartContext.data !== undefined && chartContext.data.length > 0) { + if (typeof chartContext.data !== 'undefined' && chartContext.data.length > 0) { multiLineGraph.draw(chartContext); }
commit 83f7d3d726d2c3ef78f3f104f204ed01fafe1b1a Author: Mike Thompson mithomps@redhat.com Date: Sun Jul 7 14:15:08 2013 -0700
Consolidate the d3 time format stuff into rhq.js so all graphs have access.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityLineGraphType.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityLineGraphType.java index 60b5e26..34ca60b 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityLineGraphType.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityLineGraphType.java @@ -32,9 +32,12 @@ import org.rhq.enterprise.gui.coregui.client.util.Log; import org.rhq.enterprise.gui.coregui.client.util.MeasurementConverterClient;
/** + * This is now old and for demonstration purposes only. * Contains the javascript chart definition for an implementation of the d3 availability chart. This implementation is * just a line that changes color based on availability type: up=green, down=red, orange=disabled, unknown=grey, * empty=grey, warn=yellow. This version of the availability graph shows continuous intervals. + * @deprecated + * @see AvailabilityOverUnderGraphType * * @author Mike Thompson */ diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityOverUnderGraphType.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityOverUnderGraphType.java index 3822b59..7c5f80a 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityOverUnderGraphType.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilityOverUnderGraphType.java @@ -171,7 +171,6 @@ public class AvailabilityOverUnderGraphType implements AvailabilityGraphType { .tickSize(13, 0, 0) .orient("bottom"),
- customTimeFormat,
calcBarY = function (d) { var ABOVE = -6, @@ -253,17 +252,7 @@ public class AvailabilityOverUnderGraphType implements AvailabilityGraphType { return calcBarFill(d); });
- customTimeFormat = timeFormat([ - [$wnd.d3.time.format("%Y"), function() { return true; }], - [$wnd.d3.time.format("%B"), function(d) { return d.getMonth(); }], - [$wnd.d3.time.format("%b %d"), function(d) { return d.getDate() != 1; }], - [$wnd.d3.time.format("%a %d"), function(d) { return d.getDay() && d.getDate() != 1; }], - [$wnd.d3.time.format(availChartContext.chartXaxisTimeFormatHours), function(d) { return d.getHours(); }], - [$wnd.d3.time.format(availChartContext.chartXaxisTimeFormatHoursMinutes), function(d) { return d.getMinutes(); }], - [$wnd.d3.time.format(":%S"), function(d) { return d.getSeconds(); }], - [$wnd.d3.time.format(".%L"), function(d) { return d.getMilliseconds(); }] - ]); - xAxis.tickFormat(customTimeFormat); + xAxis.tickFormat($wnd.rhqCommon.getD3CustomTimeFormat(availChartContext.chartXaxisTimeFormatHours, availChartContext.chartXaxisTimeFormatHoursMinutes));
// create x-axis svg.append("g") diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/StackedBarMetricGraphImpl.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/StackedBarMetricGraphImpl.java index 0178f93..b8c63f4 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/StackedBarMetricGraphImpl.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/StackedBarMetricGraphImpl.java @@ -461,17 +461,8 @@ public class StackedBarMetricGraphImpl extends AbstractMetricGraph { }
function createXandYAxes() { - var customTimeFormat = timeFormat([ - [$wnd.d3.time.format("%Y"), function() { return true; }], - [$wnd.d3.time.format("%B"), function(d) { return d.getMonth(); }], - [$wnd.d3.time.format("%b %d"), function(d) { return d.getDate() != 1; }], - [$wnd.d3.time.format("%a %d"), function(d) { return d.getDay() && d.getDate() != 1; }], - [$wnd.d3.time.format(chartContext.chartXaxisTimeFormatHours), function(d) { return d.getHours(); }], - [$wnd.d3.time.format(chartContext.chartXaxisTimeFormatHoursMinutes), function(d) { return d.getMinutes(); }], - [$wnd.d3.time.format(":%S"), function(d) { return d.getSeconds(); }], - [$wnd.d3.time.format(".%L"), function(d) { return d.getMilliseconds(); }] - ]); - xAxis.tickFormat(customTimeFormat); + + xAxis.tickFormat($wnd.rhqCommon.getD3CustomTimeFormat(chartContext.chartXaxisTimeFormatHours, chartContext.chartXaxisTimeFormatHoursMinutes));
// create x-axis svg.append("g") @@ -495,15 +486,6 @@ public class StackedBarMetricGraphImpl extends AbstractMetricGraph {
}
- function timeFormat(formats) { - return function(date) { - var i = formats.length - 1, f = formats[i]; - while (!f[1](date)) f = formats[--i]; - return f[0](date); - } - } - - function createAvgLines() { var showBarAvgTrendline = global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.AbstractMetricGraph::showBarAvgTrendLine()(), diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3GraphListView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3GraphListView.java index 7a4232a..7ade3f7 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3GraphListView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3GraphListView.java @@ -73,7 +73,7 @@ public abstract class CompositeGroupD3GraphListView extends EnhancedVLayout impl private final String chartHoverDateFormat = MSG.chart_hover_date_format(); private int groupId; private int definitionId; - private boolean isAutogroup; + private boolean isAutoGroup; private MeasurementDefinition definition; private MeasurementUserPreferences measurementUserPreferences; private ButtonBarDateTimeRangeEditor buttonBarDateTimeRangeEditor; @@ -86,10 +86,10 @@ public abstract class CompositeGroupD3GraphListView extends EnhancedVLayout impl private String chartTitle; private Integer chartHeight;
- public CompositeGroupD3GraphListView(int groupId, int defId, boolean isAutogroup) { + public CompositeGroupD3GraphListView(int groupId, int defId, boolean isAutoGroup) { super(); this.groupId = groupId; - this.isAutogroup = isAutogroup; + this.isAutoGroup = isAutoGroup; setDefinitionId(defId); measurementForEachResource = new ArrayList<MultiLineGraphData>(); measurementUserPreferences = new MeasurementUserPreferences(UserSessionManager.getUserPreferences()); @@ -105,7 +105,7 @@ public abstract class CompositeGroupD3GraphListView extends EnhancedVLayout impl ResourceGroupCriteria criteria = new ResourceGroupCriteria(); criteria.addFilterId(groupId); criteria.fetchResourceType(true); - criteria.addFilterVisible(!isAutogroup); + criteria.addFilterVisible(!isAutoGroup); criteria.fetchExplicitResources(true);
measurementForEachResource.clear(); diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3MultiLineGraph.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3MultiLineGraph.java index be6d110..42e5f0c 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3MultiLineGraph.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3MultiLineGraph.java @@ -163,33 +163,8 @@ public class CompositeGroupD3MultiLineGraph extends CompositeGroupD3GraphListVie }
function createXandYAxes() { - var customTimeFormat = timeFormat([ - [$wnd.d3.time.format("%Y"), function () { - return true; - }], - [$wnd.d3.time.format("%B"), function (d) { - return d.getMonth(); - }], - [$wnd.d3.time.format("%b %d"), function (d) { - return d.getDate() != 1; - }], - [$wnd.d3.time.format("%a %d"), function (d) { - return d.getDay() && d.getDate() != 1; - }], - [$wnd.d3.time.format(chartContext.chartXaxisTimeFormatHours), function (d) { - return d.getHours(); - }], - [$wnd.d3.time.format(chartContext.chartXaxisTimeFormatHoursMinutes), function (d) { - return d.getMinutes(); - }], - [$wnd.d3.time.format(":%S"), function (d) { - return d.getSeconds(); - }], - [$wnd.d3.time.format(".%L"), function (d) { - return d.getMilliseconds(); - }] - ]); - xAxis.tickFormat(customTimeFormat); + + xAxis.tickFormat($wnd.rhqCommon.getD3CustomTimeFormat(chartContext.chartXaxisTimeFormatHours, chartContext.chartXaxisTimeFormatHoursMinutes));
// create x-axis svg.append("g") @@ -213,13 +188,7 @@ public class CompositeGroupD3MultiLineGraph extends CompositeGroupD3GraphListVie
}
- function timeFormat(formats) { - return function (date) { - var i = formats.length - 1, f = formats[i]; - while (!f[1](date)) f = formats[--i]; - return f[0](date); - } - } +
function createHeader(titleName) { diff --git a/modules/enterprise/gui/coregui/src/main/webapp/js/rhq.js b/modules/enterprise/gui/coregui/src/main/webapp/js/rhq.js index 435fd67..40c7ca8 100644 --- a/modules/enterprise/gui/coregui/src/main/webapp/js/rhq.js +++ b/modules/enterprise/gui/coregui/src/main/webapp/js/rhq.js @@ -4,7 +4,8 @@
// Handle browsers not supporting console object if (!window.console) window.console = {}; -if (!window.console.log) window.console.log = function () { }; +if (!window.console.log) window.console.log = function () { +};
/** * ChartContext Constructor Object @@ -39,101 +40,146 @@ if (!window.console.log) window.console.log = function () { }; * @param chartXaxisTimeFormatHoursMinutes * @constructor */ -var ChartContext = function (chartId, chartHeight, metricsData, xAxisLabel, chartTitle, yAxisUnits, minChartTitle, avgChartTitle, peakChartTitle, dateLabel, timeLabel, downLabel, unknownLabel, noDataLabel, hoverStartLabel,hoverEndLabel, hoverPeriodLabel, hoverBarLabel, chartHoverTimeFormat, chartHoverDateFormat, isPortalGraph, portalId, buttonBarDateTimeFormat, singleValueLabel, chartXaxisTimeFormatHours, chartXaxisTimeFormatHoursMinutes ) -{ - "use strict"; - if(!(this instanceof ChartContext)){ - throw new Error("ChartContext function cannot be called as a function.") - } - this.chartId = chartId; - this.chartHeight = chartHeight; - this.data = jQuery.parseJSON(metricsData); // make into json - this.xAxisLabel = xAxisLabel; - this.chartTitle = chartTitle; - this.yAxisUnits = yAxisUnits; - this.minChartTitle = minChartTitle; - this.avgChartTitle = avgChartTitle; - this.peakChartTitle = peakChartTitle; - this.dateLabel = dateLabel; - this.timeLabel = timeLabel; - this.downLabel = downLabel; - this.unknownLabel = unknownLabel; - this.singleValueLabel = singleValueLabel; - this.noDataLabel = noDataLabel; - this.hoverStartLabel = hoverStartLabel; - this.hoverEndLabel = hoverEndLabel; - this.hoverPeriodLabel = hoverPeriodLabel; - this.hoverBarLabel = hoverBarLabel; - this.chartHoverTimeFormat = chartHoverTimeFormat; - this.chartHoverDateFormat = chartHoverDateFormat; - this.isPortalGraph = isPortalGraph; - this.portalId = portalId; - if(isPortalGraph){ - this.chartHandle = "rChart-"+chartId+"-"+portalId; - }else { - this.chartHandle = "rChart-"+chartId; - } - this.chartSelection = this.chartHandle + " svg"; - this.buttonBarDateTimeFormat = buttonBarDateTimeFormat; - this.chartXaxisTimeFormatHours = chartXaxisTimeFormatHours; - this.chartXaxisTimeFormatHoursMinutes = chartXaxisTimeFormatHoursMinutes; +var ChartContext = function (chartId, chartHeight, metricsData, xAxisLabel, chartTitle, yAxisUnits, minChartTitle, avgChartTitle, peakChartTitle, dateLabel, timeLabel, downLabel, unknownLabel, noDataLabel, hoverStartLabel, hoverEndLabel, hoverPeriodLabel, hoverBarLabel, chartHoverTimeFormat, chartHoverDateFormat, isPortalGraph, portalId, buttonBarDateTimeFormat, singleValueLabel, chartXaxisTimeFormatHours, chartXaxisTimeFormatHoursMinutes) { + "use strict"; + if (!(this instanceof ChartContext)) { + throw new Error("ChartContext function cannot be called as a function.") + } + this.chartId = chartId; + this.chartHeight = chartHeight; + this.data = jQuery.parseJSON(metricsData); // make into json + this.xAxisLabel = xAxisLabel; + this.chartTitle = chartTitle; + this.yAxisUnits = yAxisUnits; + this.minChartTitle = minChartTitle; + this.avgChartTitle = avgChartTitle; + this.peakChartTitle = peakChartTitle; + this.dateLabel = dateLabel; + this.timeLabel = timeLabel; + this.downLabel = downLabel; + this.unknownLabel = unknownLabel; + this.singleValueLabel = singleValueLabel; + this.noDataLabel = noDataLabel; + this.hoverStartLabel = hoverStartLabel; + this.hoverEndLabel = hoverEndLabel; + this.hoverPeriodLabel = hoverPeriodLabel; + this.hoverBarLabel = hoverBarLabel; + this.chartHoverTimeFormat = chartHoverTimeFormat; + this.chartHoverDateFormat = chartHoverDateFormat; + this.isPortalGraph = isPortalGraph; + this.portalId = portalId; + if (isPortalGraph) { + this.chartHandle = "rChart-" + chartId + "-" + portalId; + } + else { + this.chartHandle = "rChart-" + chartId; + } + this.chartSelection = this.chartHandle + " svg"; + this.buttonBarDateTimeFormat = buttonBarDateTimeFormat; + this.chartXaxisTimeFormatHours = chartXaxisTimeFormatHours; + this.chartXaxisTimeFormatHoursMinutes = chartXaxisTimeFormatHoursMinutes;
-}, -/** - * Availability Context object constructor - * @param chartId - * @param availData - * @param dateLabel - * @param timeLabel - * @param hoverStartLabel - * @param hoverBarLabel - * @param availabilityLabel - * @param chartHoverTimeFormat - * @param chartHoverDateFormat - * @param chartTitle - * @param chartUpLabel - * @param chartDownLabel - * @param chartXaxisTimeFormatHours - * @param chartXaxisTimeFormatHoursMinutes - * @constructor - */ -AvailChartContext = function (chartId, availData, dateLabel, timeLabel, hoverStartLabel, hoverBarLabel, availabilityLabel, chartHoverTimeFormat, chartHoverDateFormat, chartTitle, chartUpLabel, chartDownLabel, chartXaxisTimeFormatHours, chartXaxisTimeFormatHoursMinutes) { - "use strict"; - if (!(this instanceof AvailChartContext)) { - throw new Error("AvailChartContext function cannot be called as a function.") - } - this.chartId = chartId; - this.chartHandle = "#availChart-" + this.chartId; - this.chartSelection = this.chartHandle + " svg"; - this.data = jQuery.parseJSON(availData); // make into json - this.dateLabel = dateLabel; - this.timeLabel = timeLabel; - this.hoverStartLabel = hoverStartLabel; - this.hoverBarLabel = hoverBarLabel; - this.hoverBarAvailabilityLabel = availabilityLabel; - this.chartHoverTimeFormat = chartHoverTimeFormat; - this.chartHoverDateFormat = chartHoverDateFormat; - this.chartTitle = chartTitle; - this.chartDownLabel = chartDownLabel; - this.chartUpLabel = chartUpLabel; - this.chartXaxisTimeFormatHours = chartXaxisTimeFormatHours; - this.chartXaxisTimeFormatHoursMinutes = chartXaxisTimeFormatHoursMinutes; + }, + /** + * Availability Context object constructor + * @param chartId + * @param availData + * @param dateLabel + * @param timeLabel + * @param hoverStartLabel + * @param hoverBarLabel + * @param availabilityLabel + * @param chartHoverTimeFormat + * @param chartHoverDateFormat + * @param chartTitle + * @param chartUpLabel + * @param chartDownLabel + * @param chartXaxisTimeFormatHours + * @param chartXaxisTimeFormatHoursMinutes + * @constructor + */ + AvailChartContext = function (chartId, availData, dateLabel, timeLabel, hoverStartLabel, hoverBarLabel, availabilityLabel, chartHoverTimeFormat, chartHoverDateFormat, chartTitle, chartUpLabel, chartDownLabel, chartXaxisTimeFormatHours, chartXaxisTimeFormatHoursMinutes) { + "use strict"; + if (!(this instanceof AvailChartContext)) { + throw new Error("AvailChartContext function cannot be called as a function.") + } + this.chartId = chartId; + this.chartHandle = "#availChart-" + this.chartId; + this.chartSelection = this.chartHandle + " svg"; + this.data = jQuery.parseJSON(availData); // make into json + this.dateLabel = dateLabel; + this.timeLabel = timeLabel; + this.hoverStartLabel = hoverStartLabel; + this.hoverBarLabel = hoverBarLabel; + this.hoverBarAvailabilityLabel = availabilityLabel; + this.chartHoverTimeFormat = chartHoverTimeFormat; + this.chartHoverDateFormat = chartHoverDateFormat; + this.chartTitle = chartTitle; + this.chartDownLabel = chartDownLabel; + this.chartUpLabel = chartUpLabel; + this.chartXaxisTimeFormatHours = chartXaxisTimeFormatHours; + this.chartXaxisTimeFormatHoursMinutes = chartXaxisTimeFormatHoursMinutes; + + }, + + /** + * GraphDateContext object constructor. + * @param startDate moment object representing startDate range + * @param endDate moment object representing endDate range + * @constructor + */ + GraphDateContext = function (startDate, endDate) { + "use strict"; + if (!(this instanceof GraphDateContext)) { + throw new Error("GraphDateContext function cannot be called as a function.") + } + this.startDate = startDate; + this.endDate = endDate; + }, + rhqCommon = (function () { + + + var timeFormat = function (formats) { + return function(date) { + var i = formats.length - 1, f = formats[i]; + while (!f[1](date)) f = formats[--i]; + return f[0](date); + } + }; + + return { + getD3CustomTimeFormat: function (xAxisTimeFormatHours, xAxisTimeFormatHoursMinutes) { + return timeFormat([ + [d3.time.format("%Y"), function () { + return true; + }], + [d3.time.format("%B"), function (d) { + return d.getMonth(); + }], + [d3.time.format("%b %d"), function (d) { + return d.getDate() != 1; + }], + [d3.time.format("%a %d"), function (d) { + return d.getDay() && d.getDate() != 1; + }], + [d3.time.format(xAxisTimeFormatHours), function (d) { + return d.getHours(); + }], + [d3.time.format(xAxisTimeFormatHoursMinutes), function (d) { + return d.getMinutes(); + }], + [d3.time.format(":%S"), function (d) { + return d.getSeconds(); + }], + [d3.time.format(".%L"), function (d) { + return d.getMilliseconds(); + }] + ]); + } + + } + })();
-},
-/** - * GraphDateContext object constructor. - * @param startDate moment object representing startDate range - * @param endDate moment object representing endDate range - * @constructor - */ -GraphDateContext = function (startDate, endDate){ - "use strict"; - if (!(this instanceof GraphDateContext)) { - throw new Error("GraphDateContext function cannot be called as a function.") - } - this.startDate = startDate; - this.endDate = endDate; -};
commit 6dac7937e5cf8b9606d5e0e149e2228cf9ab423e Author: Mike Thompson mithomps@redhat.com Date: Wed Jul 3 15:33:11 2013 -0700
Second iteration at new Multi-resource graph using d3 instead of nvd3.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/StackedBarMetricGraphImpl.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/StackedBarMetricGraphImpl.java index f81fb4f..0178f93 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/StackedBarMetricGraphImpl.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/StackedBarMetricGraphImpl.java @@ -171,7 +171,7 @@ public class StackedBarMetricGraphImpl extends AbstractMetricGraph { lowBound = determineLowBound(min, peak); highBound = peak + ((peak - min) * 0.1); oobMax = $wnd.d3.max(chartContext.data.map(function (d) { - if (d.baselineMax == undefined) { + if (typeof d.baselineMax === 'undefined') { return 0; } else { @@ -216,7 +216,7 @@ public class StackedBarMetricGraphImpl extends AbstractMetricGraph { .attr("height", height + margin.top - titleHeight - titleSpace + margin.bottom) .attr("transform", "translate(" + margin.left + "," + (+titleHeight + titleSpace + margin.top) + ")");
- legendUnDefined = (typeof min === undefined) || (typeof avg === undefined) || (typeof peak === undefined); + legendUnDefined = (typeof min === 'undefined') || (typeof avg === 'undefined') || (typeof peak === 'undefined'); if (!useSmallCharts() && !legendUnDefined) { createMinAvgPeakSidePanel(chartContext.minChartTitle, min, chartContext.avgChartTitle, avg, chartContext.peakChartTitle, peak, chartContext.yAxisUnits); } @@ -711,7 +711,7 @@ public class StackedBarMetricGraphImpl extends AbstractMetricGraph { }; // end public closure }();
- if(chartContext.data !== undefined && chartContext.data.length > 0){ + if(typeof chartContext.data !== 'undefined' && chartContext.data.length > 0){ metricStackedBarGraph.draw(chartContext); }
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3GraphListView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3GraphListView.java index 1ca63ce..7a4232a 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3GraphListView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3GraphListView.java @@ -339,7 +339,7 @@ public abstract class CompositeGroupD3GraphListView extends EnhancedVLayout impl }
public int getChartHeight() { - return chartHeight != null ? chartHeight : 210; + return chartHeight != null ? chartHeight : 300; }
public void setChartHeight(Integer chartHeight) { @@ -369,7 +369,7 @@ public abstract class CompositeGroupD3GraphListView extends EnhancedVLayout impl for (MultiLineGraphData multiLineGraphData : measurementForEachResource) { sb.append("{ "key": ""); sb.append(multiLineGraphData.getResourceName()); - sb.append("","values" : "); + sb.append("","value" : "); sb.append(produceInnerValuesArray(multiLineGraphData.getMeasurementData())); sb.append("},"); } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3MultiLineGraph.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3MultiLineGraph.java index 9d595a7..be6d110 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3MultiLineGraph.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3MultiLineGraph.java @@ -92,11 +92,6 @@ public class CompositeGroupD3MultiLineGraph extends CompositeGroupD3GraphListVie adjustedChartHeight = chartContext.chartHeight - 50, height = adjustedChartHeight - margin.top - margin.bottom, titleHeight = 30, titleSpace = 10, - chartData, - lowBound, - min, high, - newLow = 0, - highBound, yScale, yAxis, timeScale, @@ -104,40 +99,21 @@ public class CompositeGroupD3MultiLineGraph extends CompositeGroupD3GraphListVie chart, svg;
- // adjust the min scale so blue low line is not in axis - function determineLowBound(min, peak) { - //var newLow = min - ((peak - min) * 0.1); - newLow = min; - if (newLow < 0) { - return 0; - } - else { - return newLow; - } - } -
function determineScale() { var xTicks, xTickSubDivide; - console.log("DetermineScale!"); + console.log("DetermineScale for # resources: "+ chartContext.data.length);
if (chartContext.data.length > 0) { xTicks = 8; xTickSubDivide = 5; - chartData = chartContext.data; - min = $wnd.d3.min(function (d) { - return d.y; - }); - high = $wnd.d3.max(function (d) { - return d.y; - });
yScale = $wnd.d3.scale.linear() .clamp(true) .rangeRound([height, 0]) - .domain([$wnd.d3.min(chartContext.data, function (d) { + .domain([$wnd.d3.min(chartContext.data[0], function (d) { return d.y; - }), $wnd.d3.max(chartContext.data, function (d) { + }), $wnd.d3.max(chartContext.data[0], function (d) { return d.y; })]);
@@ -151,7 +127,7 @@ public class CompositeGroupD3MultiLineGraph extends CompositeGroupD3GraphListVie
timeScale = $wnd.d3.time.scale() .range([0, width]) - .domain($wnd.d3.extent(chartData, function (d) { + .domain($wnd.d3.extent(chartContext.data[0], function (d) { return d.x; }));
@@ -187,33 +163,33 @@ public class CompositeGroupD3MultiLineGraph extends CompositeGroupD3GraphListVie }
function createXandYAxes() { -// var customTimeFormat = timeFormat([ -// [$wnd.d3.time.format("%Y"), function () { -// return true; -// }], -// [$wnd.d3.time.format("%B"), function (d) { -// return d.getMonth(); -// }], -// [$wnd.d3.time.format("%b %d"), function (d) { -// return d.getDate() != 1; -// }], -// [$wnd.d3.time.format("%a %d"), function (d) { -// return d.getDay() && d.getDate() != 1; -// }], -// [$wnd.d3.time.format(chartContext.chartXaxisTimeFormatHours), function (d) { -// return d.getHours(); -// }], -// [$wnd.d3.time.format(chartContext.chartXaxisTimeFormatHoursMinutes), function (d) { -// return d.getMinutes(); -// }], -// [$wnd.d3.time.format(":%S"), function (d) { -// return d.getSeconds(); -// }], -// [$wnd.d3.time.format(".%L"), function (d) { -// return d.getMilliseconds(); -// }] -// ]); -// xAxis.tickFormat(customTimeFormat); + var customTimeFormat = timeFormat([ + [$wnd.d3.time.format("%Y"), function () { + return true; + }], + [$wnd.d3.time.format("%B"), function (d) { + return d.getMonth(); + }], + [$wnd.d3.time.format("%b %d"), function (d) { + return d.getDate() != 1; + }], + [$wnd.d3.time.format("%a %d"), function (d) { + return d.getDay() && d.getDate() != 1; + }], + [$wnd.d3.time.format(chartContext.chartXaxisTimeFormatHours), function (d) { + return d.getHours(); + }], + [$wnd.d3.time.format(chartContext.chartXaxisTimeFormatHoursMinutes), function (d) { + return d.getMinutes(); + }], + [$wnd.d3.time.format(":%S"), function (d) { + return d.getSeconds(); + }], + [$wnd.d3.time.format(".%L"), function (d) { + return d.getMilliseconds(); + }] + ]); + xAxis.tickFormat(customTimeFormat);
// create x-axis svg.append("g") @@ -269,8 +245,8 @@ public class CompositeGroupD3MultiLineGraph extends CompositeGroupD3GraphListVie
}
- function createAvgLines() { - var barAvgLine = $wnd.d3.svg.line() + function createMultiLines(chartContext) { + var graphLine = $wnd.d3.svg.line() .interpolate("linear") .x(function (d) { return timeScale(d.x); @@ -279,15 +255,16 @@ public class CompositeGroupD3MultiLineGraph extends CompositeGroupD3GraphListVie return yScale(d.y); });
- // Bar avg line - svg.append("path") - .datum(chartData) - .attr("class", "barAvgLine") + chart.selectAll(".multiLine") + .data(chartContext.data) + .enter() + .append('path') + .attr("class", "multiLine") .attr("fill", "none") .attr("stroke", "#2e376a") .attr("stroke-width", "1.5") - .attr("stroke-opacity", ".7") - .attr("d", barAvgLine); + .attr("stroke-opacity", ".9") + .attr("d", function(d) { return graphLine(d.value);});
}
@@ -305,7 +282,7 @@ public class CompositeGroupD3MultiLineGraph extends CompositeGroupD3GraphListVie createHeader(chartContext.chartTitle); console.log("created multi-header"); createYAxisGridLines(); - createAvgLines(); + createMultiLines(chartContext); createXandYAxes(); } }
commit c2265e55bec42062329fdfdf30c586da381b99d4 Author: Mike Thompson mithomps@redhat.com Date: Tue Jul 2 14:33:21 2013 -0700
First pass at new Multi-resource graph using d3 instead of nvd3.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/groups/GroupMetricsPortlet.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/groups/GroupMetricsPortlet.java index d226cbe..5ac5e9e 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/groups/GroupMetricsPortlet.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/dashboard/portlets/groups/GroupMetricsPortlet.java @@ -56,7 +56,6 @@ import org.rhq.core.domain.resource.group.ResourceGroup; import org.rhq.core.domain.resource.group.composite.ResourceGroupComposite; import org.rhq.core.domain.util.PageList; import org.rhq.enterprise.gui.coregui.client.LinkManager; -import org.rhq.enterprise.gui.coregui.client.components.FullHTMLPane; import org.rhq.enterprise.gui.coregui.client.components.measurement.CustomConfigMeasurementRangeEditor; import org.rhq.enterprise.gui.coregui.client.dashboard.AutoRefreshPortlet; import org.rhq.enterprise.gui.coregui.client.dashboard.AutoRefreshUtil; @@ -70,7 +69,7 @@ import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; import org.rhq.enterprise.gui.coregui.client.inventory.common.detail.summary.AbstractActivityView; import org.rhq.enterprise.gui.coregui.client.inventory.common.detail.summary.AbstractActivityView.ChartViewWindow; import org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table.CompositeGroupD3GraphListView; -import org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table.CompositeGroupMultiLineGraphListView; +import org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table.CompositeGroupD3MultiLineGraph; import org.rhq.enterprise.gui.coregui.client.inventory.resource.type.ResourceTypeRepository; import org.rhq.enterprise.gui.coregui.client.util.BrowserUtility; import org.rhq.enterprise.gui.coregui.client.util.Log; @@ -407,7 +406,7 @@ public class GroupMetricsPortlet extends EnhancedVLayout implements CustomSettin @Override public void onClick(ClickEvent event) { ChartViewWindow window = new ChartViewWindow(title); - CompositeGroupD3GraphListView graph = new CompositeGroupMultiLineGraphListView( + CompositeGroupD3GraphListView graph = new CompositeGroupD3MultiLineGraph( groupId, md.getId(), isAutoGroup); window.addItem(graph); graph.populateData(); diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/MetricGraphData.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/MetricGraphData.java index 16f3e4a..fca4e80 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/MetricGraphData.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/MetricGraphData.java @@ -428,20 +428,6 @@ public class MetricGraphData implements JsonMetricProducer { }
- - /** - * If there is more than 2 days time window then return true so we can show day of week - * in axis labels. Function to switch the timescale to whichever is more appropriate hours - * or hours with days of week. - * @return true if difference between startTime and endTime is >= x days - */ - public boolean shouldDisplayDayOfWeekInXAxisLabel() { - Long startTime = metricData.get(0).getTimestamp(); - Long endTime = metricData.get(metricData.size() - 1).getTimestamp(); - long timeThreshold = 24 * 60 * 60 * 1000; // 1 days - return startTime + timeThreshold < endTime; - } - @Override public String toString() { final StringBuilder sb = new StringBuilder(); diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/MetricNvd3MultiLineGraph.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/MetricNvd3MultiLineGraph.java deleted file mode 100644 index 22b6feb..0000000 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/MetricNvd3MultiLineGraph.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * RHQ Management Platform - * Copyright (C) 2005-2012 Red Hat, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ -package org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype; - -import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.AbstractMetricGraph; -import org.rhq.enterprise.gui.coregui.client.inventory.common.graph.MetricGraphData; - -/** - * Contains the chart definition for a MultiLine Graph. - * - * @author Mike Thompson - */ -public final class MetricNvd3MultiLineGraph extends AbstractMetricGraph -{ - /** - * General constructor for stacked bar graph when you have all the data needed to - * produce the graph. (This is true for all cases but the dashboard portlet). - */ - public MetricNvd3MultiLineGraph(MetricGraphData metricGraphData){ - setMetricGraphData(metricGraphData); - } - - - /** - * The magic JSNI to draw the charts with d3. - */ - public native void drawJsniChart() /*-{ - console.log("Draw NVD3 Bar jsni chart"); - console.time("multiChart"); - var global = this, - chartId = global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.MetricGraphData::getChartId()(), - chartHandle = "#rChart-"+chartId, - chartSelection = chartHandle + " svg", - json = $wnd.jQuery.parseJSON(global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.MetricGraphData::getJsonMetrics()()), - yAxisLabel = global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.MetricGraphData::getChartTitle()(), - yAxisUnits = global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.MetricGraphData::getYAxisUnits()(), - xAxisLabel = global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.MetricGraphData::getXAxisTitle()(), - displayDayOfWeek = global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.MetricGraphData::shouldDisplayDayOfWeekInXAxisLabel()(), - xAxisTimeFormat = (displayDayOfWeek) ? "%a %I %p" : "%I : %M %p", - - // nvd3 defines their json models a standard way (same model for other graphs) - data = function() { - return [ - { - values: json, - key: yAxisLabel , - color: '#ff7f0e' - } - ]; - }; - $wnd.nv.addGraph(function() { - var chart = $wnd.nv.models.multiBarChart() - .showControls(true) - .tooltips(true); - - chart.xAxis.axisLabel(xAxisLabel) - .tickFormat(function(d) { return $wnd.d3.time.format(xAxisTimeFormat)(new Date(d)) }); - - chart.yAxis - .axisLabel(yAxisUnits) - .tickFormat($wnd.d3.format(',f')); - - $wnd.d3.select(chartSelection) - .datum(data()) - .transition().duration(300) - .call(chart); - - $wnd.nv.utils.windowResize(chart.update); - - return chart; - }); - console.timeEnd("multiChart") - - }-*/; - -} diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3GraphListView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3GraphListView.java index faeb476..1ca63ce 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3GraphListView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3GraphListView.java @@ -23,6 +23,7 @@ import java.util.EnumSet; import java.util.List; import java.util.Set;
+import com.google.gwt.user.client.Timer; import com.google.gwt.user.client.rpc.AsyncCallback; import com.smartgwt.client.widgets.HTMLFlow; import com.smartgwt.client.widgets.layout.HLayout; @@ -38,6 +39,7 @@ import org.rhq.core.domain.resource.group.composite.ResourceGroupComposite; import org.rhq.core.domain.util.PageList; import org.rhq.enterprise.gui.coregui.client.CoreGUI; import org.rhq.enterprise.gui.coregui.client.JsonMetricProducer; +import org.rhq.enterprise.gui.coregui.client.Messages; import org.rhq.enterprise.gui.coregui.client.UserSessionManager; import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; import org.rhq.enterprise.gui.coregui.client.gwt.ResourceGroupGWTServiceAsync; @@ -60,22 +62,29 @@ import org.rhq.enterprise.gui.coregui.client.util.preferences.MeasurementUserPre */ public abstract class CompositeGroupD3GraphListView extends EnhancedVLayout implements JsonMetricProducer, RedrawGraphs {
+ static protected final Messages MSG = CoreGUI.getMessages(); + // string labels + private final String chartTitleMinLabel = MSG.chart_title_min_label(); + private final String chartTitleAvgLabel = MSG.chart_title_avg_label(); + private final String chartTitlePeakLabel = MSG.chart_title_peak_label(); + private final String chartDateLabel = MSG.chart_date_label(); + private final String chartTimeLabel = MSG.chart_time_label(); + private final String chartHoverTimeFormat = MSG.chart_hover_time_format(); + private final String chartHoverDateFormat = MSG.chart_hover_date_format(); private int groupId; private int definitionId; private boolean isAutogroup; - private MeasurementDefinition definition; - private MeasurementUserPreferences measurementUserPreferences; private ButtonBarDateTimeRangeEditor buttonBarDateTimeRangeEditor; - /** * measurementForEachResource is a list of a list of single Measurement data for multiple resources. */ private List<MultiLineGraphData> measurementForEachResource; private HLayout titleHLayout; - private HTMLFlow title; private HTMLFlow graph; + private String chartTitle; + private Integer chartHeight;
public CompositeGroupD3GraphListView(int groupId, int defId, boolean isAutogroup) { super(); @@ -114,8 +123,8 @@ public abstract class CompositeGroupD3GraphListView extends EnhancedVLayout impl }
final ResourceGroup parentGroup = result.get(0).getResourceGroup(); + chartTitle = parentGroup.getName(); Log.debug("group name: " + parentGroup.getName()); - Log.debug("# of child resources: " + parentGroup.getExplicitResources().size()); // setting up a deferred Command to execute after all resource queries have completed (successfully or unsuccessfully) final CountDownLatch countDownLatch = CountDownLatch.create(parentGroup.getExplicitResources() .size(), new Command() { @@ -124,6 +133,11 @@ public abstract class CompositeGroupD3GraphListView extends EnhancedVLayout impl * Do this only after ALL of the metric queries for each resource */ public void execute() { + if (parentGroup.getExplicitResources().size() != measurementForEachResource.size()) { + Log.warn("Number of graphs doesn't match number of resources"); + Log.warn("# of child resources: " + parentGroup.getExplicitResources().size()); + Log.warn("# of charted graphs: " + measurementForEachResource.size()); + } drawGraph(); redraw(); } @@ -167,9 +181,9 @@ public abstract class CompositeGroupD3GraphListView extends EnhancedVLayout impl
@Override public void onSuccess( - List<List<MeasurementDataNumericHighLowComposite>> result) { + List<List<MeasurementDataNumericHighLowComposite>> measurements) { addMeasurementForEachResource(childResource.getName(), - childResource.getId(), result.get(0)); + childResource.getId(), measurements.get(0)); countDownLatch.countDown(); } }); @@ -184,34 +198,6 @@ public abstract class CompositeGroupD3GraphListView extends EnhancedVLayout impl }
/** - * Immutable data for each graph line. - */ - private final class MultiLineGraphData { - private String resourceName; - private int resourceId; - private List<MeasurementDataNumericHighLowComposite> measurementData; - - private MultiLineGraphData(String resourceName, int resourceId, - List<MeasurementDataNumericHighLowComposite> measurmentData) { - this.resourceName = resourceName; - this.resourceId = resourceId; - this.measurementData = measurmentData; - } - - public String getResourceName() { - return resourceName; - } - - public int getResourceId() { - return resourceId; - } - - public List<MeasurementDataNumericHighLowComposite> getMeasurementData() { - return measurementData; - } - } - - /** * Adding is done asynchronously, so we must synchronize the add. * @param resourceMeasurementList */ @@ -238,19 +224,16 @@ public abstract class CompositeGroupD3GraphListView extends EnhancedVLayout impl return definitionId; }
- public String getChartId() { - return groupId + "-" + definition.getId(); - } - public void setDefinitionId(int definitionId) { this.definitionId = definitionId; this.definition = null; }
- public MeasurementDefinition getDefinition() { - return definition; + public String getChartId() { + return String.valueOf(definition.getId()); }
+ public void setDefinition(MeasurementDefinition definition) { this.definition = definition; } @@ -260,8 +243,6 @@ public abstract class CompositeGroupD3GraphListView extends EnhancedVLayout impl removeMember(buttonBarDateTimeRangeEditor); if (null != titleHLayout) removeMember(titleHLayout); - if (null != title) - removeMember(title); if (null != graph) removeMember(graph); } @@ -274,7 +255,7 @@ public abstract class CompositeGroupD3GraphListView extends EnhancedVLayout impl }
private void drawGraph() { - Log.debug("drawGraph in CompositeGroupD3GraphListView for: " + definition + "," + definitionId); + Log.debug("drawGraph in CompositeGroupD3GraphListView for: " + definition + " (" + definitionId+")");
if (null != titleHLayout) { removeMembers(); @@ -290,17 +271,20 @@ public abstract class CompositeGroupD3GraphListView extends EnhancedVLayout impl
addMember(titleHLayout);
- title = new HTMLFlow("<b>" + definition.getDisplayName() + "</b> " + definition.getDescription()); - title.setWidth100(); - addMember(title); graph = new HTMLFlow("<div id="mChart-" + getChartId() + "" ><svg xmlns="http://www.w3.org/2000/svg%5C" version="1.1" style="height:95%;"></svg></div>"); graph.setWidth100(); graph.setHeight100(); addMember(graph);
- drawJsniChart(); - markForRedraw(); + new Timer(){ + + @Override + public void run() { + //markForRedraw(); + drawJsniChart(); + } + }.schedule(200);
}
@@ -318,6 +302,50 @@ public abstract class CompositeGroupD3GraphListView extends EnhancedVLayout impl return MSG.view_charts_time_axis_label(); }
+ public String getChartTitleMinLabel() { + return chartTitleMinLabel; + } + + public String getChartTitleAvgLabel() { + return chartTitleAvgLabel; + } + + public String getChartTitlePeakLabel() { + return chartTitlePeakLabel; + } + + public String getChartDateLabel() { + return chartDateLabel; + } + + public String getChartTimeLabel() { + return chartTimeLabel; + } + + public String getChartHoverTimeFormat() { + return chartHoverTimeFormat; + } + + public String getChartHoverDateFormat() { + return chartHoverDateFormat; + } + + public String getButtonBarDateTimeFormat() { + return MSG.common_buttonbar_datetime_format_moment_js(); + } + + public String getChartTitle() { + return chartTitle; + } + + public int getChartHeight() { + return chartHeight != null ? chartHeight : 210; + } + + public void setChartHeight(Integer chartHeight) { + this.chartHeight = chartHeight; + } + /** * Takes a measurementList for each resource and turn it into an array. * @return String @@ -326,8 +354,8 @@ public abstract class CompositeGroupD3GraphListView extends EnhancedVLayout impl StringBuilder sb = new StringBuilder("["); for (MeasurementDataNumericHighLowComposite measurement : measurementList) { if (!Double.isNaN(measurement.getValue())) { - sb.append("{ x:" + measurement.getTimestamp() + ","); - sb.append(" y:" + MeasurementUnits.scaleUp(measurement.getValue(), definition.getUnits()) + "},"); + sb.append("{ "x":" + measurement.getTimestamp() + ","); + sb.append(" "y":" + MeasurementUnits.scaleUp(measurement.getValue(), definition.getUnits()) + "},"); } } sb.setLength(sb.length() - 1); // delete the last ',' @@ -339,21 +367,57 @@ public abstract class CompositeGroupD3GraphListView extends EnhancedVLayout impl public String getJsonMetrics() { StringBuilder sb = new StringBuilder("["); for (MultiLineGraphData multiLineGraphData : measurementForEachResource) { - sb.append("{ values: "); - sb.append(produceInnerValuesArray(multiLineGraphData.getMeasurementData())); - sb.append(",key: '"); + sb.append("{ "key": ""); sb.append(multiLineGraphData.getResourceName()); - sb.append("'},"); + sb.append("","values" : "); + sb.append(produceInnerValuesArray(multiLineGraphData.getMeasurementData())); + sb.append("},"); } sb.setLength(sb.length() - 1); // delete the last ',' sb.append("]"); + Log.debug("*** Multi-resource Graph json: "+ sb.toString()); return sb.toString(); }
+ protected String getXAxisTimeFormatHoursMinutes() { + return MSG.chart_xaxis_time_format_hours_minutes(); + } + + protected String getXAxisTimeFormatHours() { + return MSG.chart_xaxis_time_format_hours(); + }
/** * Client can choose which graph types to render. */ public abstract void drawJsniChart();
+ /** + * Immutable data for each graph line. + */ + private final class MultiLineGraphData { + private String resourceName; + private int resourceId; + private List<MeasurementDataNumericHighLowComposite> measurementData; + + private MultiLineGraphData(String resourceName, int resourceId, + List<MeasurementDataNumericHighLowComposite> measurmentData) { + this.resourceName = resourceName; + this.resourceId = resourceId; + this.measurementData = measurmentData; + } + + public String getResourceName() { + return resourceName; + } + + public int getResourceId() { + return resourceId; + } + + public List<MeasurementDataNumericHighLowComposite> getMeasurementData() { + return measurementData; + } + } + } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3MultiLineGraph.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3MultiLineGraph.java new file mode 100644 index 0000000..9d595a7 --- /dev/null +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3MultiLineGraph.java @@ -0,0 +1,322 @@ +/* + * RHQ Management Platform + * Copyright (C) 2005-2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +package org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table; + +/** + * D3 rendition of group composite graphs for single metric multiple resources. + * + * @author Mike Thompson + */ +public class CompositeGroupD3MultiLineGraph extends CompositeGroupD3GraphListView { + + public CompositeGroupD3MultiLineGraph(int groupId, int defId, boolean isAutogroup) { + super(groupId, defId, isAutogroup); + } + + @Override + public native void drawJsniChart() /*-{ + console.log("Draw d3 MultiLine jsni chart"); + + var MultiLineChartContext = function (chartId, chartHeight, metricsData, xAxisLabel, chartTitle, yAxisUnits, minChartTitle, avgChartTitle, peakChartTitle, dateLabel, timeLabel, chartHoverTimeFormat, chartHoverDateFormat, isPortalGraph, portalId, buttonBarDateTimeFormat, chartXaxisTimeFormatHours, chartXaxisTimeFormatHoursMinutes) { + "use strict"; + if (!(this instanceof MultiLineChartContext)) { + throw new Error("MultiLineChartContext function cannot be called as a function.") + } + this.chartId = chartId; + this.chartHeight = chartHeight; + this.data = $wnd.jQuery.parseJSON(metricsData); // make into json + this.xAxisLabel = xAxisLabel; + this.chartTitle = chartTitle; + this.yAxisUnits = yAxisUnits; + this.minChartTitle = minChartTitle; + this.avgChartTitle = avgChartTitle; + this.peakChartTitle = peakChartTitle; + this.dateLabel = dateLabel; + this.timeLabel = timeLabel; + this.chartHoverTimeFormat = chartHoverTimeFormat; + this.chartHoverDateFormat = chartHoverDateFormat; + this.chartHandle = "mChart-" + chartId; + this.chartSelection = this.chartHandle + " svg"; + this.buttonBarDateTimeFormat = buttonBarDateTimeFormat; + this.chartXaxisTimeFormatHours = chartXaxisTimeFormatHours; + this.chartXaxisTimeFormatHoursMinutes = chartXaxisTimeFormatHoursMinutes; + + }; + + var global = this, + + // create a chartContext object (from rhq.js) with the data required to render to a chart + // this same data could be passed to different chart types + // This way, we are decoupled from the dependency on globals and JSNI and kept all the java interaction right here. + chartContext = new MultiLineChartContext(global.@org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table.CompositeGroupD3GraphListView::getChartId()(), + global.@org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table.CompositeGroupD3GraphListView::getChartHeight()(), + global.@org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table.CompositeGroupD3GraphListView::getJsonMetrics()(), + global.@org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table.CompositeGroupD3GraphListView::getXAxisTitle()(), + global.@org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table.CompositeGroupD3GraphListView::getChartTitle()(), + global.@org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table.CompositeGroupD3GraphListView::getYAxisUnits()(), + global.@org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table.CompositeGroupD3GraphListView::getChartTitleMinLabel()(), + global.@org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table.CompositeGroupD3GraphListView::getChartTitleAvgLabel()(), + global.@org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table.CompositeGroupD3GraphListView::getChartTitlePeakLabel()(), + global.@org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table.CompositeGroupD3GraphListView::getChartDateLabel()(), + global.@org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table.CompositeGroupD3GraphListView::getChartTimeLabel()(), + global.@org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table.CompositeGroupD3GraphListView::getChartHoverTimeFormat()(), + global.@org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table.CompositeGroupD3GraphListView::getChartHoverDateFormat()(), + global.@org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table.CompositeGroupD3GraphListView::getButtonBarDateTimeFormat()(), + global.@org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table.CompositeGroupD3GraphListView::getXAxisTimeFormatHours()(), + global.@org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table.CompositeGroupD3GraphListView::getXAxisTimeFormatHoursMinutes()() + ); + + + // Define the Stacked Bar Graph function using the module pattern + var multiLineGraph = function () { + "use strict"; + // privates + var margin = {top: 10, right: 5, bottom: 5, left: 40}, + width = 750 - margin.left - margin.right, + adjustedChartHeight = chartContext.chartHeight - 50, + height = adjustedChartHeight - margin.top - margin.bottom, + titleHeight = 30, titleSpace = 10, + chartData, + lowBound, + min, high, + newLow = 0, + highBound, + yScale, + yAxis, + timeScale, + xAxis, + chart, + svg; + + // adjust the min scale so blue low line is not in axis + function determineLowBound(min, peak) { + //var newLow = min - ((peak - min) * 0.1); + newLow = min; + if (newLow < 0) { + return 0; + } + else { + return newLow; + } + } + + + function determineScale() { + var xTicks, xTickSubDivide; + console.log("DetermineScale!"); + + if (chartContext.data.length > 0) { + xTicks = 8; + xTickSubDivide = 5; + chartData = chartContext.data; + min = $wnd.d3.min(function (d) { + return d.y; + }); + high = $wnd.d3.max(function (d) { + return d.y; + }); + + yScale = $wnd.d3.scale.linear() + .clamp(true) + .rangeRound([height, 0]) + .domain([$wnd.d3.min(chartContext.data, function (d) { + return d.y; + }), $wnd.d3.max(chartContext.data, function (d) { + return d.y; + })]); + + yAxis = $wnd.d3.svg.axis() + .scale(yScale) + .tickSubdivide(1) + .ticks(5) + .tickSize(4, 4, 0) + .orient("left"); + + + timeScale = $wnd.d3.time.scale() + .range([0, width]) + .domain($wnd.d3.extent(chartData, function (d) { + return d.x; + })); + + xAxis = $wnd.d3.svg.axis() + .scale(timeScale) + .ticks(xTicks) + .tickSubdivide(xTickSubDivide) + .tickSize(4, 4, 0) + .orient("bottom"); + + // create the actual chart group + chart = $wnd.d3.select("#" + chartContext.chartSelection); + + svg = chart.append("g") + .attr("width", width + margin.left + margin.right) + .attr("height", height + margin.top - titleHeight - titleSpace + margin.bottom) + .attr("transform", "translate(" + margin.left + "," + (+titleHeight + titleSpace + margin.top) + ")"); + + } + + } + + function createYAxisGridLines() { + // create the y axis grid lines + svg.append("g").classed("grid y_grid", true) + .call($wnd.d3.svg.axis() + .scale(yScale) + .orient("left") + .ticks(10) + .tickSize(-width, 0, 0) + .tickFormat("") + ); + } + + function createXandYAxes() { +// var customTimeFormat = timeFormat([ +// [$wnd.d3.time.format("%Y"), function () { +// return true; +// }], +// [$wnd.d3.time.format("%B"), function (d) { +// return d.getMonth(); +// }], +// [$wnd.d3.time.format("%b %d"), function (d) { +// return d.getDate() != 1; +// }], +// [$wnd.d3.time.format("%a %d"), function (d) { +// return d.getDay() && d.getDate() != 1; +// }], +// [$wnd.d3.time.format(chartContext.chartXaxisTimeFormatHours), function (d) { +// return d.getHours(); +// }], +// [$wnd.d3.time.format(chartContext.chartXaxisTimeFormatHoursMinutes), function (d) { +// return d.getMinutes(); +// }], +// [$wnd.d3.time.format(":%S"), function (d) { +// return d.getSeconds(); +// }], +// [$wnd.d3.time.format(".%L"), function (d) { +// return d.getMilliseconds(); +// }] +// ]); +// xAxis.tickFormat(customTimeFormat); + + // create x-axis + svg.append("g") + .attr("class", "x axis") + .attr("transform", "translate(0," + height + ")") + .attr("letter-spacing", "3") + .style("text-anchor", "end") + .call(xAxis); + + + // create y-axis + svg.append("g") + .attr("class", "y axis") + .call(yAxis) + .append("text") + .attr("transform", "rotate(-90),translate( -60,0)") + .attr("y", -30) + .attr("letter-spacing", "3") + .style("text-anchor", "end") + .text(chartContext.yAxisUnits === "NONE" ? "" : chartContext.yAxisUnits); + + } + + function timeFormat(formats) { + return function (date) { + var i = formats.length - 1, f = formats[i]; + while (!f[1](date)) f = formats[--i]; + return f[0](date); + } + } + + + function createHeader(titleName) { + var title = chart.append("g").append("rect") + .attr("class", "title") + .attr("x", 10) + .attr("y", margin.top) + .attr("height", titleHeight) + .attr("width", width + 30 + margin.left) + .attr("fill", "none"); + + chart.append("text") + .attr("class", "titleName") + .attr("x", 40) + .attr("y", 37) + .attr("font-size", "12") + .attr("font-weight", "bold") + .attr("text-anchor", "left") + .text(titleName) + .attr("fill", "#003168"); + + return title; + + } + + function createAvgLines() { + var barAvgLine = $wnd.d3.svg.line() + .interpolate("linear") + .x(function (d) { + return timeScale(d.x); + }) + .y(function (d) { + return yScale(d.y); + }); + + // Bar avg line + svg.append("path") + .datum(chartData) + .attr("class", "barAvgLine") + .attr("fill", "none") + .attr("stroke", "#2e376a") + .attr("stroke-width", "1.5") + .attr("stroke-opacity", ".7") + .attr("d", barAvgLine); + + } + + + return { + // Public API + draw: function (chartContext) { + "use strict"; + // Guard condition that can occur when a portlet has not been configured yet + console.log("multi-resource chart handle:" + chartContext.chartHandle); + //console.dir(chartContext.data); + if (chartContext.data.length > 0) { + console.log("Creating MultiLine Chart: " + chartContext.chartSelection + " --> " + chartContext.chartTitle); + determineScale(); + createHeader(chartContext.chartTitle); + console.log("created multi-header"); + createYAxisGridLines(); + createAvgLines(); + createXandYAxes(); + } + } + }; // end public closure + }(); + + if (chartContext.data !== undefined && chartContext.data.length > 0) { + multiLineGraph.draw(chartContext); + } + + }-*/; + + +} diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupMultiLineGraphListView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupMultiLineGraphListView.java deleted file mode 100644 index 2804af0..0000000 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupMultiLineGraphListView.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * RHQ Management Platform - * Copyright (C) 2005-2012 Red Hat, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ -package org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table; - - -import org.rhq.enterprise.gui.coregui.client.CoreGUI; -import org.rhq.enterprise.gui.coregui.client.Messages; - -/** - * A MultiLine version of the Composite group single metric multiple resource charts. - * - * @author Mike Thompson - */ -public final class CompositeGroupMultiLineGraphListView extends CompositeGroupD3GraphListView -{ - private static final Messages MSG = CoreGUI.getMessages(); - - public CompositeGroupMultiLineGraphListView(int groupId, int defId, boolean isAutogroup) - { - super(groupId, defId, isAutogroup); - } - - - - @Override - public native void drawJsniChart() /*-{ - console.log("Draw nvd3 charts for composite multiline graph"); - var chartId = this.@org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table.CompositeGroupD3GraphListView::getChartId()(), - chartHandle = "#mChart-"+chartId, - chartSelection = chartHandle + " svg", - yAxisUnits = this.@org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table.CompositeGroupD3GraphListView::getYAxisUnits()(), - xAxisLabel = this.@org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table.CompositeGroupD3GraphListView::getXAxisTitle()(), - xAxisTimeFormat = this.@org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table.CompositeGroupMultiLineGraphListView::getXAxisTimeFormatHoursMinutes()(); - json = eval(this.@org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table.CompositeGroupD3GraphListView::getJsonMetrics()()); - - $wnd.nv.addGraph(function() { - var chart = $wnd.nv.models.lineChart(); - - chart.xAxis.axisLabel(xAxisLabel) - .tickFormat(function(d) { return $wnd.d3.time.format(xAxisTimeFormat)(new Date(d)) }); - - chart.yAxis - .axisLabel(yAxisUnits) - .tickFormat($wnd.d3.format('.02f')); - - $wnd.d3.select(chartSelection) - .datum(json) - .transition().duration(300) - .call(chart); - - $wnd.nv.utils.windowResize(chart.update); - - return chart; - }); - - }-*/; - - - - public String getXAxisTimeFormatHoursMinutes() { - return MSG.chart_xaxis_time_format_hours_minutes(); - } -} diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupNvD3MultiLineGraph.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupNvD3MultiLineGraph.java new file mode 100644 index 0000000..d2ae7ef --- /dev/null +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupNvD3MultiLineGraph.java @@ -0,0 +1,74 @@ +/* + * RHQ Management Platform + * Copyright (C) 2005-2012 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +package org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table; + + + +/** + * A MultiLine version of the Composite group single metric multiple resource charts. + * + * @author Mike Thompson + */ +@Deprecated +public final class CompositeGroupNvD3MultiLineGraph extends CompositeGroupD3GraphListView +{ + + public CompositeGroupNvD3MultiLineGraph(int groupId, int defId, boolean isAutogroup) + { + super(groupId, defId, isAutogroup); + } + + + + @Override + public native void drawJsniChart() /*-{ + console.log("Draw nvd3 charts for composite multiline graph"); + var chartId = this.@org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table.CompositeGroupD3GraphListView::getChartId()(), + chartHandle = "#mChart-"+chartId, + chartSelection = chartHandle + " svg", + yAxisUnits = this.@org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table.CompositeGroupD3GraphListView::getYAxisUnits()(), + xAxisLabel = this.@org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table.CompositeGroupD3GraphListView::getXAxisTitle()(), + xAxisTimeFormat = this.@org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table.CompositeGroupD3MultiLineGraph::getXAxisTimeFormatHoursMinutes()(); + json = eval(this.@org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table.CompositeGroupD3GraphListView::getJsonMetrics()()); + + $wnd.nv.addGraph(function() { + var chart = $wnd.nv.models.lineChart(); + + chart.xAxis.axisLabel(xAxisLabel) + .tickFormat(function(d) { return $wnd.d3.time.format(xAxisTimeFormat)(new Date(d)) }); + + chart.yAxis + .axisLabel(yAxisUnits) + .tickFormat($wnd.d3.format('.02f')); + + $wnd.d3.select(chartSelection) + .datum(json) + .transition().duration(300) + .call(chart); + + $wnd.nv.utils.windowResize(chart.update); + + return chart; + }); + + }-*/; + + + +} diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/GroupMeasurementTableView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/GroupMeasurementTableView.java index 1e73f0d..efb7b72 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/GroupMeasurementTableView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/GroupMeasurementTableView.java @@ -65,7 +65,7 @@ public class GroupMeasurementTableView extends Table<GroupMetricsTableDataSource ChartViewWindow window = new ChartViewWindow("MeasurementTableFrame", title); int defId = record.getAttributeAsInt(GroupMetricsTableDataSource.FIELD_METRIC_DEF_ID);
- CompositeGroupD3GraphListView graph = new CompositeGroupMultiLineGraphListView(groupId, defId, isAutogroup); + CompositeGroupD3GraphListView graph = new CompositeGroupD3MultiLineGraph(groupId, defId, isAutogroup); window.addItem(graph); graph.populateData(); window.show();
commit 0b7e56cbe371944587db03a200fd98075d30a95d Author: John Sanda jsanda@redhat.com Date: Tue Jul 9 22:11:21 2013 -0400
initial commit for RHQ 4.8 storage node patch to disable all table compression
diff --git a/etc/scripts/rhq48-storage-patch/apache-cassandra-1.2.4-patch-1.jar b/etc/scripts/rhq48-storage-patch/apache-cassandra-1.2.4-patch-1.jar new file mode 100644 index 0000000..9bc5e4a Binary files /dev/null and b/etc/scripts/rhq48-storage-patch/apache-cassandra-1.2.4-patch-1.jar differ diff --git a/etc/scripts/rhq48-storage-patch/disable_compression.cql b/etc/scripts/rhq48-storage-patch/disable_compression.cql new file mode 100644 index 0000000..eccb962 --- /dev/null +++ b/etc/scripts/rhq48-storage-patch/disable_compression.cql @@ -0,0 +1,31 @@ +alter table rhq.raw_metrics with compression = {'sstable_compression': ''}; +alter table rhq.one_hour_metrics with compression = {'sstable_compression': ''}; +alter table rhq.six_hour_metrics with compression = {'sstable_compression': ''}; +alter table rhq.twenty_four_hour_metrics with compression = {'sstable_compression': ''}; +alter table rhq.metrics_index with compression = {'sstable_compression': ''}; +alter table rhq.schema_version with compression = {'sstable_compression': ''}; + +alter table system_auth.credentials with compression = {'sstable_compression': ''}; +alter table system_auth.permissions with compression = {'sstable_compression': ''}; +alter table system_auth.users with compression = {'sstable_compression': ''}; +alter table system_auth.users with compression = {'sstable_compression': ''}; + +alter table system_traces.events with compression = {'sstable_compression': ''}; +alter table system_traces.sessions with compression = {'sstable_compression': ''}; +alter table system_traces.sessions with compression = {'sstable_compression': ''}; + +alter table system."HintsColumnFamily" with compression = {'sstable_compression': ''}; +alter table system."IndexInfo" with compression = {'sstable_compression': ''}; +alter table system."LocationInfo" with compression = {'sstable_compression': ''}; +alter table system."Migrations" with compression = {'sstable_compression': ''}; +alter table system."NodeIdInfo" with compression = {'sstable_compression': ''}; +alter table system."Schema" with compression = {'sstable_compression': ''}; +alter table system.batchlog with compression = {'sstable_compression': ''}; +alter table system.hints with compression = {'sstable_compression': ''}; +alter table system.local with compression = {'sstable_compression': ''}; +alter table system.peer_events with compression = {'sstable_compression': ''}; +alter table system.peers with compression = {'sstable_compression': ''}; +alter table system.range_xfers with compression = {'sstable_compression': ''}; +alter table system.schema_columnfamilies with compression = {'sstable_compression': ''}; +alter table system.schema_columns with compression = {'sstable_compression': ''}; +alter table system.schema_keyspaces with compression = {'sstable_compression': ''}; diff --git a/etc/scripts/rhq48-storage-patch/rhq48-storage-patch.sh b/etc/scripts/rhq48-storage-patch/rhq48-storage-patch.sh new file mode 100755 index 0000000..756ba20 --- /dev/null +++ b/etc/scripts/rhq48-storage-patch/rhq48-storage-patch.sh @@ -0,0 +1,50 @@ +#!/bin/bash + +if [ "x$1" = "x" ]; then + echo "Usage: $0 <rhq-server-dir>" + exit 1 +fi + +RHQ_SERVER_DIR=$1 +CQL_HOSTNAME=$2 +CQL_PORT=9142 +JMX_PORT=7299 + +PATCH="apache-cassandra-1.2.4-patch-1.jar" + +# swap out the Cassandra jar file with the patched version +echo "Copying patch file to $RHQ_SERVER_DIR/rhq-storage/lib" +mv $RHQ_SERVER_DIR/rhq-storage/lib/apache-cassandra-1.2.4.jar $TMPDIR +cp $PATCH $RHQ_SERVER_DIR/rhq-storage/lib + +# restart the storage node +echo "Starting RHQ Storage node" +$RHQ_SERVER_DIR/bin/rhqctl start --storage + +# run the CQL script +echo "Running CQL script to disable table compression" +export CQLSH_HOST=$CQLSH_HOST +export CQL_PORT=$CQL_PORT +$RHQ_SERVER_DIR/rhq-storage/bin/cqlsh -u rhqadmin -p rhqadmin -f ./disable_compression.cql + +# scrub all keyspaces +echo "Rebuilding data files for system keyspace" +$RHQ_SERVER_DIR/rhq-storage/bin/nodetool -u rhqadmin -pw rhqadmin -p $JMX_PORT scrub system + +echo "Rebuilding data files for system_traces keyspace" +$RHQ_SERVER_DIR/rhq-storage/bin/nodetool -u rhqadmin -pw rhqadmin -p $JMX_PORT scrub system_traces + +echo "Rebuilding data files for system_auth keyspace" +$RHQ_SERVER_DIR/rhq-storage/bin/nodetool -u rhqadmin -pw rhqadmin -p $JMX_PORT scrub system_auth + +echo "Rebuilding data files for rhq keyspace" +$RHQ_SERVER_DIR/rhq-storage/bin/nodetool -u rhqadmin -pw rhqadmin -p $JMX_PORT scrub rhq + +echo "Shutting down the RHQ Storage node" +$RHQ_SERVER_DIR/bin/rhqctl stop + +echo "Removing patch file" +rm $RHQ_SERVER_DIR/rhq-storage/lib/$PATCH +mv $TMPDIR/apache-cassandra-1.2.4.jar $RHQ_SERVER_DIR/rhq-storage/lib + +echo "Table compression has been disabled for all keyspaces. You are now ready to upgrade your RHQ installation."
commit 9f02dd2fa407518ba64ecad2409f368364d017f4 Author: John Sanda jsanda@redhat.com Date: Tue Jul 9 15:26:04 2013 -0400
fixing broken tests caused from change to persist seed nodes during install
Previously seed storage nodes were persisted at start up. That has been changed so that seed nodes are now persisted by the installer. This caused the storage client subsystem to fail to properly initialize. Seed nodes are now persisted at deployment time (for tests) by StrippedDownStartupBeanPreparation.
diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBeanTest.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBeanTest.java index 4e04447..78c0cff 100644 --- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBeanTest.java +++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBeanTest.java @@ -148,26 +148,6 @@ public class StorageNodeManagerBeanTest extends AbstractEJB3Test { // this method is still needed, because tests calls SLSB methods that are executed in their own transaction // and the rollback performed once the TransactionCallback is finished just wont clean everything
- // We can only filter on the group name because the resource type info might not exist in the test - // database. - ResourceGroupCriteria criteria = new ResourceGroupCriteria(); - criteria.addFilterName(STORAGE_NODE_GROUP_NAME); - - List<ResourceGroup> groups = resourceGroupManager.findResourceGroupsByCriteria(subjectManager.getOverlord(), - criteria); - - if (!groups.isEmpty()) { - resourceGroupManager.deleteResourceGroup(subjectManager.getOverlord(), groups.get(0).getId()); - } - -// for (ResourceGroup group : groups) { -// if (group.getName().equals(STORAGE_NODE_GROUP_NAME)) { -// resourceGroupManager.deleteResourceGroup(subjectManager.getOverlord(), group.getId()); -// break; -// } -// } - - // pause the currently running TX Transaction runningTransaction = getTransactionManager().suspend(); getTransactionManager().begin(); diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/AbstractEJB3Test.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/AbstractEJB3Test.java index ea7ac6b..9ddba97 100644 --- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/AbstractEJB3Test.java +++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/AbstractEJB3Test.java @@ -37,6 +37,7 @@ import java.util.List; import java.util.Properties; import java.util.regex.Pattern;
+import javax.ejb.EJB; import javax.management.MBeanServer; import javax.management.MalformedObjectNameException; import javax.management.ObjectName; @@ -87,6 +88,7 @@ import org.rhq.enterprise.server.plugin.pc.ServerPluginService; import org.rhq.enterprise.server.plugin.pc.ServerPluginServiceMBean; import org.rhq.enterprise.server.scheduler.SchedulerService; import org.rhq.enterprise.server.scheduler.SchedulerServiceMBean; +import org.rhq.enterprise.server.storage.StorageClientManagerBean; import org.rhq.enterprise.server.util.LookupUtil; import org.rhq.test.AssertUtils; import org.rhq.test.MatchResult; @@ -114,6 +116,9 @@ public abstract class AbstractEJB3Test extends Arquillian { @ArquillianResource protected InitialContext initialContext;
+ @EJB + private StorageClientManagerBean storageClientManager; + // We originally (in 4.2.3 days) ran these tests as "unit" tests in the server/jar module using // the embedded container. With Arquillian it makes sense to actually deploy an EAR because // we need a way to deploy dependent ears needed to support the server/jar classes. But @@ -518,7 +523,7 @@ public abstract class AbstractEJB3Test extends Arquillian { } } } - + storageClientManager.init(); beforeMethod(); beforeMethod(method);
diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/StrippedDownStartupBean.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/StrippedDownStartupBean.java index 2e654ab..58273d8 100644 --- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/StrippedDownStartupBean.java +++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/StrippedDownStartupBean.java @@ -20,21 +20,26 @@
package org.rhq.enterprise.server.test;
-import java.io.IOException; -import java.io.InputStream; -import java.util.Properties; +import static org.rhq.enterprise.server.cloud.StorageNodeManagerLocal.STORAGE_NODE_GROUP_NAME; + +import java.util.List;
import javax.ejb.EJB; import javax.ejb.Singleton; +import javax.ejb.TransactionAttribute; +import javax.ejb.TransactionAttributeType; import javax.persistence.EntityManager; import javax.persistence.PersistenceContext;
import org.rhq.core.domain.cloud.Server; import org.rhq.core.domain.cloud.StorageNode; +import org.rhq.core.domain.criteria.ResourceGroupCriteria; +import org.rhq.core.domain.resource.group.ResourceGroup; import org.rhq.enterprise.server.RHQConstants; -import org.rhq.enterprise.server.storage.StorageClientManagerBean; +import org.rhq.enterprise.server.auth.SubjectManagerLocal; import org.rhq.enterprise.server.core.StartupBean; import org.rhq.enterprise.server.naming.NamingHack; +import org.rhq.enterprise.server.resource.group.ResourceGroupManagerLocal;
/** * This is a replacement for the fullblown {@link StartupBean} of the actual RHQ server. @@ -45,9 +50,6 @@ public class StrippedDownStartupBean {
public static final String RHQ_SERVER_NAME_PROPERTY = "rhq.server.high-availability.name";
- @EJB - StorageClientManagerBean storageClientManager; - @PersistenceContext(unitName = RHQConstants.PERSISTENCE_UNIT_NAME) private EntityManager entityManager;
@@ -55,46 +57,30 @@ public class StrippedDownStartupBean { NamingHack.bruteForceInitialContextFactoryBuilder(); }
+ @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) public void init() { secureNaming(); - // TODO Find a better way to load system properties - // Cassandra connection info is currently obtained from system properties. I have - // yet to find a good way to set system properties for the deployment under test. - // https://github.com/arquillian/arquillian-showcase/tree/master/extensions/sys... - // might be worth looking at. - // - // jsanda - loadCassandraConnectionProps(); - storageClientManager.init(); }
/** - * Purges the test server and any storage nodes created during server initialization + * <p> + * Purges the storage node resource group, test server, and any storage nodes created during server initialization * from a prior test run. + * </p> + * <p> + * Note that the storage node group deletion simply removes the entity from the rhq_resource_group table. At this + * point in the deployment, {@link ResourceGroupManagerLocal#deleteResourceGroup(org.rhq.core.domain.auth.Subject, int)} + * cannot be used; therefore, any test that added storage node resources to the group should take care of removing + * them as well. + * </p> */ public void purgeTestServerAndStorageNodes() { + entityManager.createQuery("DELETE FROM " + ResourceGroup.class.getName() + " WHERE name = :storageNodeGroup") + .setParameter("storageNodeGroup", STORAGE_NODE_GROUP_NAME) + .executeUpdate(); entityManager.createQuery("DELETE FROM " + StorageNode.class.getName()).executeUpdate(); entityManager.createQuery("DELETE FROM " + Server.class.getName() + " WHERE name = :serverName") .setParameter("serverName", TestConstants.RHQ_TEST_SERVER_NAME) .executeUpdate(); } - - public void loadCassandraConnectionProps() { - InputStream stream = null; - try { - stream = getClass().getResourceAsStream("/cassandra-test.properties"); - Properties props = new Properties(); - props.load(stream); - - // DO NOT use System.setProperties(Properties). I previously tried that and it - // caused some arquillian deployment exception. - // - // jsanda - System.setProperty("rhq.cassandra.username", props.getProperty("rhq.cassandra.username")); - System.setProperty("rhq.cassandra.password", props.getProperty("rhq.cassandra.password")); - System.setProperty("rhq.cassandra.seeds", props.getProperty("rhq.cassandra.seeds")); - } catch (IOException e) { - throw new RuntimeException(("Failed to load cassandra-test.properties")); - } - } } diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/StrippedDownStartupBeanPreparation.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/StrippedDownStartupBeanPreparation.java index c272afe..5f67888 100644 --- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/StrippedDownStartupBeanPreparation.java +++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/StrippedDownStartupBeanPreparation.java @@ -20,6 +20,10 @@
package org.rhq.enterprise.server.test;
+import java.io.IOException; +import java.io.InputStream; +import java.util.Properties; + import javax.annotation.PostConstruct; import javax.annotation.Resource; import javax.ejb.EJB; @@ -28,11 +32,15 @@ import javax.ejb.Startup; import javax.ejb.Timeout; import javax.ejb.TimerConfig; import javax.ejb.TimerService; +import javax.persistence.EntityManager; +import javax.persistence.PersistenceContext;
import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory;
import org.rhq.core.domain.cloud.Server; +import org.rhq.core.domain.cloud.StorageNode; +import org.rhq.enterprise.server.RHQConstants; import org.rhq.enterprise.server.cloud.instance.ServerManagerLocal;
/** @@ -56,6 +64,9 @@ public class StrippedDownStartupBeanPreparation { @EJB private ServerManagerLocal serverManager;
+ @PersistenceContext(unitName = RHQConstants.PERSISTENCE_UNIT_NAME) + private EntityManager entityManager; + @Resource private TimerService timerService; // needed to schedule our startup bean init call
@@ -66,6 +77,8 @@ public class StrippedDownStartupBeanPreparation {
startupBean.purgeTestServerAndStorageNodes(); createTestServer(); + loadCassandraConnectionProps(); + createStorageNodes(); }
/** @@ -86,11 +99,41 @@ public class StrippedDownStartupBeanPreparation { System.setProperty(TestConstants.RHQ_SERVER_NAME_PROPERTY, TestConstants.RHQ_TEST_SERVER_NAME); }
+ private void createStorageNodes() { + String[] seedsInfo = System.getProperty("rhq.cassandra.seeds").split(","); + for (String seedInfo : seedsInfo) { + StorageNode storageNode = new StorageNode(); + storageNode.parseNodeInformation(seedInfo); + storageNode.setOperationMode(StorageNode.OperationMode.INSTALLED); + entityManager.persist(storageNode); + } + } + + public void loadCassandraConnectionProps() { + InputStream stream = null; + try { + stream = getClass().getResourceAsStream("/cassandra-test.properties"); + Properties props = new Properties(); + props.load(stream); + + // DO NOT use System.setProperties(Properties). I previously tried that and it + // caused some arquillian deployment exception. + // + // jsanda + System.setProperty("rhq.cassandra.username", props.getProperty("rhq.cassandra.username")); + System.setProperty("rhq.cassandra.password", props.getProperty("rhq.cassandra.password")); + System.setProperty("rhq.cassandra.seeds", props.getProperty("rhq.cassandra.seeds")); + } catch (IOException e) { + throw new RuntimeException(("Failed to load cassandra-test.properties")); + } + } + @Timeout public void initializeServer() throws RuntimeException { try { log.info("Initializing the testing RHQ deployment"); this.startupBean.init(); + log.info("Initialization complete"); } catch (Throwable t) { // do NOT allow exceptions to bubble out of our method because then // the EJB container would simply re-trigger the timer and call us again
commit 4dc0ec58446ea00d82a13e3fd797f4ff05580cb6 Author: Jirka Kremser jkremser@redhat.com Date: Tue Jul 9 19:03:57 2013 +0200
Unit test for StorageNode entity.
diff --git a/modules/core/domain/src/test/java/org/rhq/core/domain/cloud/StorageNodeTest.java b/modules/core/domain/src/test/java/org/rhq/core/domain/cloud/StorageNodeTest.java new file mode 100644 index 0000000..ce4dbbd --- /dev/null +++ b/modules/core/domain/src/test/java/org/rhq/core/domain/cloud/StorageNodeTest.java @@ -0,0 +1,111 @@ +/* + * RHQ Management Platform + * Copyright (C) 2005-2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation, and/or the GNU Lesser + * General Public License, version 2.1, also as published by the Free + * Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License and the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * and the GNU Lesser General Public License along with this program; + * if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ +package org.rhq.core.domain.cloud; + +import org.testng.Assert; +import org.testng.annotations.Test; + +import org.rhq.core.domain.cloud.StorageNode.OperationMode; + +@Test +public class StorageNodeTest { + public void testEquals() { + StorageNode localhost1 = new StorageNode(); + assert localhost1 != null; + assert !localhost1.equals(null); + + StorageNode localhost2 = new StorageNode(); + assert localhost2 != null; + assert localhost1.equals(localhost2); + assert localhost2.equals(localhost1); + + localhost1.setAddress("127.0.0.1"); + assert !localhost1.equals(localhost2); + assert !localhost2.equals(localhost1); + + localhost2.setAddress("127.0.0.1"); + assert localhost1.equals(localhost2); + assert localhost2.equals(localhost1); + + StorageNode localhost3 = new StorageNode(42); + localhost3.setAddress("sn.com"); + assert !localhost3.equals(null); + assert !localhost3.equals(localhost1); + assert localhost3.hashCode() != localhost1.hashCode(); + assert localhost2.hashCode() == localhost1.hashCode(); + + localhost3.setAddress("127.0.0.1"); + assert localhost3.equals(localhost1); + assert localhost3.hashCode() == localhost1.hashCode(); + } + + public void testParseNodeInformation1() { + StorageNode localhost1 = new StorageNode(); + localhost1.parseNodeInformation("127.0.0.1|1234|4321"); + assert "127.0.0.1".equals(localhost1.getAddress()); + assert localhost1.getJmxPort() == 1234; + assert localhost1.getCqlPort() == 4321; + assert "service:jmx:rmi:///jndi/rmi://127.0.0.1:1234/jmxrmi".equals(localhost1.getJMXConnectionURL()); + + localhost1.setOperationMode(OperationMode.INSTALLED); + assert localhost1.getOperationMode() == OperationMode.INSTALLED; + assert localhost1.getOperationMode().getMessage() != null; + assert localhost1.getOperationMode().getMessage() != null; + localhost1.setMtime(42); + assert localhost1.getMtime() == 42; + + StorageNode localhost2 = new StorageNode(); + localhost2.parseNodeInformation("127.0.0.1|1235|5321"); + assert localhost1.equals(localhost2); + assert !localhost1.getJMXConnectionURL().equals(localhost2.getJMXConnectionURL()); + } + + public void testParseNodeInformation2() { + StorageNode localhost1 = new StorageNode(); + try { + localhost1.parseNodeInformation("127.0.0.1|1234|4321|foo"); + Assert.fail("The exception (IllegalArgumentException) should be thrown!"); + } catch (IllegalArgumentException e) { + } + try { + localhost1.parseNodeInformation("127.0.0.1|1234"); + Assert.fail("The exception (IllegalArgumentException) should be thrown!"); + } catch (IllegalArgumentException e) { + } + try { + localhost1.parseNodeInformation("127.0.0.1|aaaa|4321"); + Assert.fail("The exception (NumberFormatException) should be thrown!"); + } catch (NumberFormatException e) { + } + try { + localhost1.parseNodeInformation("127.0.0.1|1234|bbbb"); + Assert.fail("The exception (NumberFormatException) should be thrown!"); + } catch (NumberFormatException e) { + } + try { + localhost1.parseNodeInformation(null); + Assert.fail("The exception (NullPointerException) should be thrown!"); + } catch (NullPointerException e) { + } + } +}
commit a10a8a6c63cf8f980cf0e657d5349ed42547c2c2 Author: Jirka Kremser jkremser@redhat.com Date: Tue Jul 9 18:55:45 2013 +0200
StorageNode: hashCode() should be consistent with equals().
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java index 1d39bcd..14043db 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java @@ -238,7 +238,6 @@ public class StorageNode implements Serializable { public int hashCode() { final int prime = 31; int result = 1; - result = prime * result + (int) (ctime ^ (ctime >>> 32)); result = prime * result + ((address == null) ? 0 : address.hashCode()); return result; } @@ -255,10 +254,6 @@ public class StorageNode implements Serializable {
final StorageNode other = (StorageNode) obj;
- //if (ctime != other.ctime) { - // return false; - //} - if (address == null) { if (other.address != null) { return false;
commit 19facfd1591445b491f155303e083d731dd06f61 Author: Jirka Kremser jkremser@redhat.com Date: Tue Jul 9 18:25:09 2013 +0200
Fixing the api-checks jenkins build. Adding the setDiskSpacePercentageUsed() and getDiskSpacePercentageUsed() methods back to StorageNodeLoadComposite, and marking them as deprecated.
diff --git a/modules/core/domain/intentional-api-changes-since-4.8.0.xml b/modules/core/domain/intentional-api-changes-since-4.8.0.xml new file mode 100644 index 0000000..f21a45f --- /dev/null +++ b/modules/core/domain/intentional-api-changes-since-4.8.0.xml @@ -0,0 +1,3 @@ +<?xml version="1.0"?> +<differences> +</differences> diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNodeLoadComposite.java b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNodeLoadComposite.java index 0913f1d..80bfdd6 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNodeLoadComposite.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNodeLoadComposite.java @@ -113,6 +113,24 @@ public class StorageNodeLoadComposite implements Serializable { public void setHeapPercentageUsed(MeasurementAggregateWithUnits heapPercentageUsed) { this.heapPercentageUsed = heapPercentageUsed; } + + /** + * @deprecated use {@link #getPartitionDiskUsedPercentage() getPartitionDiskUsedPercentage()} instead + * + * @return partitionDiskUsedPercentage + */ + public MeasurementAggregateWithUnits getDiskSpacePercentageUsed() { + return getPartitionDiskUsedPercentage(); + } + + /** + * @deprecated use {@link #setPartitionDiskUsedPercentage() setPartitionDiskUsedPercentage()} instead + * + * @param partitionDiskUsedPercentage + */ + public void setDiskSpacePercentageUsed(MeasurementAggregateWithUnits partitionDiskUsedPercentage) { + setPartitionDiskUsedPercentage(partitionDiskUsedPercentage); + }
/** * @return A computed metric for the percentage of disk space used on the partition that contains the SSTables.
commit 4381d307b87bb8ca5e7cba265633aa585adcf27f Author: Heiko W. Rupp hwr@redhat.com Date: Tue Jul 9 17:13:00 2013 +0200
BZ 966294 Fix mib file and related code. Also fix some other smaller issues.
diff --git a/modules/enterprise/server/appserver/src/main/resources/etc/RHQ-mib.txt b/modules/enterprise/server/appserver/src/main/resources/etc/RHQ-mib.txt index ced6071..5586ac0 100644 --- a/modules/enterprise/server/appserver/src/main/resources/etc/RHQ-mib.txt +++ b/modules/enterprise/server/appserver/src/main/resources/etc/RHQ-mib.txt @@ -3,39 +3,51 @@ RHQ-MIB DEFINITIONS ::= BEGIN IMPORTS MODULE-IDENTITY, OBJECT-TYPE, NOTIFICATION-TYPE, snmpModules, enterprises FROM SNMPv2-SMI - coldStart - FROM SNMPv2-MIB OBJECT-GROUP, NOTIFICATION-GROUP, MODULE-COMPLIANCE FROM SNMPv2-CONF DisplayString FROM SNMPv2-TC;
rhqMIB MODULE-IDENTITY - LAST-UPDATED "201112200000Z" + LAST-UPDATED "201307020000Z" ORGANIZATION "RHQ-Project" CONTACT-INFO "http://www.jboss.org/rhq" DESCRIPTION "The MIB module for RHQ alerts.
This file is part of the RHQ management platform - Copyright (C) 2005-2012 Red Hat, Inc. + Copyright (C) 2005-2013 Red Hat, Inc. All rights reserved. "
- REVISION "200807110000Z" - DESCRIPTION "Initial version" - REVISION "201010180000Z" - DESCRIPTION "Better trap support" + REVISION "201307020000Z" + DESCRIPTION "Bug fixes" REVISION "201112200000Z" DESCRIPTION "Also emit resource lineage" + REVISION "201010180000Z" + DESCRIPTION "Better trap support" + REVISION "200807110000Z" + DESCRIPTION "Initial version" ::= { snmpModules 1 }
+-- 1.3.6.1.4.1.18016 jboss OBJECT IDENTIFIER ::= {enterprises 18016 }
+-- 1.3.6.1.4.1.18016.2 rhq OBJECT IDENTIFIER ::= {jboss 2 }
+-- 1.3.6.1.4.1.18016.2.1 alert OBJECT IDENTIFIER ::= {rhq 1 }
+-- 1.3.6.1.4.1.18016.2.1.2 +alertNotifications OBJECT IDENTIFIER ::= {rhq 2} + +-- 1.3.6.1.4.1.18016.2.3 +rhqServer OBJECT IDENTIFIER ::= {rhq 3} + + +-- 1.3.6.1.4.1.18016.2.1.2.0 +alertNotifPrefix OBJECT IDENTIFIER ::= {alertNotifications 0 }
alertName OBJECT-TYPE SYNTAX DisplayString (SIZE (0..255)) @@ -93,28 +105,10 @@ alertHierarchy OBJECT-TYPE STATUS current DESCRIPTION "The hierarchy of the resource that triggered the alert" - ::= { alert 6 } - --- conformance information - -snmpMIBConformance - OBJECT IDENTIFIER ::= { rhqMIB 2 } - -snmpMIBCompliances - OBJECT IDENTIFIER ::= { snmpMIBConformance 1 } -snmpMIBGroups OBJECT IDENTIFIER ::= { snmpMIBConformance 2 } - --- compliance statements - -snmpBasicCompliance MODULE-COMPLIANCE - STATUS current - DESCRIPTION "TODO" - MODULE - MANDATORY-GROUPS { alertGroup, trapGroup } - - ::= { snmpMIBCompliances 2 } + ::= { alert 7 }
-alertGroup OBJECT-GROUP +-- 1.3.6.1.4.1.18016.2.1.2.0.1 +alertNotification NOTIFICATION-TYPE OBJECTS { alertName, alertResourceName, alertPlatformName, @@ -124,12 +118,46 @@ alertGroup OBJECT-GROUP alertHierarchy } STATUS current DESCRIPTION "A collection of objects providing information about an alert" - ::= { snmpMIBGroups 1 } + ::= { alertNotifPrefix 1 } + + +-- conformance information + +rhqMIBConformance OBJECT IDENTIFIER ::= { rhqMIB 2 } +rhqTraps OBJECT IDENTIFIER ::= { rhqMIB 3 } +rhqTrapPrefix OBJECT IDENTIFIER ::= { rhqTraps 0 }
-trapGroup NOTIFICATION-GROUP - NOTIFICATIONS { coldStart } +rhqMIBCompliances OBJECT IDENTIFIER ::= { rhqMIBConformance 1 } +rhqMIBGroups OBJECT IDENTIFIER ::= { rhqMIBConformance 2 } + +-- compliance statements + +rhqMibBasicCompliance MODULE-COMPLIANCE + STATUS current + DESCRIPTION "Module compliance definition for the RHQ-MIB extension module" + MODULE + MANDATORY-GROUPS { rhqAlertGroup, rhqNotificationGroup } + + ::= { rhqMIBCompliances 2 } + +rhqAlertGroup OBJECT-GROUP + OBJECTS { alertName, + alertResourceName, + alertPlatformName, + alertCondition, + alertSeverity, + alertUrl, + alertHierarchy + } + STATUS current + DESCRIPTION "A collection of entries for a notifications for alerts" + ::= { rhqMIBGroups 2 } + +rhqNotificationGroup NOTIFICATION-GROUP + NOTIFICATIONS { alertNotification } STATUS current DESCRIPTION "A collection of notifications for alerts" - ::= { snmpMIBGroups 2 } + ::= { rhqMIBGroups 3 } +
END diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/plugin/pc/alert/AlertSender.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/plugin/pc/alert/AlertSender.java index 88c140f..bda7f99 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/plugin/pc/alert/AlertSender.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/plugin/pc/alert/AlertSender.java @@ -1,6 +1,6 @@ /* * RHQ Management Platform - * Copyright (C) 2005-2009 Red Hat, Inc. + * Copyright (C) 2005-2013 Red Hat, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify @@ -13,8 +13,8 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * along with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ package org.rhq.enterprise.server.plugin.pc.alert;
@@ -30,7 +30,6 @@ import org.rhq.core.domain.configuration.Configuration; import org.rhq.core.domain.configuration.Property; import org.rhq.core.domain.configuration.PropertyMap; import org.rhq.core.domain.configuration.PropertySimple; -import org.rhq.enterprise.server.plugin.pc.ControlResults; import org.rhq.enterprise.server.plugin.pc.ServerPluginComponent; import org.rhq.enterprise.server.plugin.pc.ServerPluginEnvironment;
@@ -103,7 +102,7 @@ public abstract class AlertSender<T extends ServerPluginComponent> {
/** * Validates the alert and extra parameters. The results should be initialized with the current - * parameters of this alert sender and the erroneous properties should have their + * parameters of this alert sender and the erroneous properties should have their * {@link Property#getErrorMessage() error messages} set. * <p> * The implementation is free to change (add/update/delete) properties in either of the configurations @@ -111,14 +110,14 @@ public abstract class AlertSender<T extends ServerPluginComponent> { * further processed in an alert sender specific way before they get stored into the database. * <p> * The default implementation makes no changes to the configurations. - * + * * @param subject the subject requesting the changes in the configuration * @return the validation results */ public AlertSenderValidationResults validateAndFinalizeConfiguration(Subject subject) { return new AlertSenderValidationResults(alertParameters, extraParameters); } - + private String printProperty(Property property) { if (property instanceof PropertySimple) { return ((PropertySimple) property).getStringValue(); diff --git a/modules/enterprise/server/plugins/alert-snmp/src/main/java/org/rhq/enterprise/server/plugins/alertSnmp/SnmpInfo.java b/modules/enterprise/server/plugins/alert-snmp/src/main/java/org/rhq/enterprise/server/plugins/alertSnmp/SnmpInfo.java index c92daae..7b174cc 100644 --- a/modules/enterprise/server/plugins/alert-snmp/src/main/java/org/rhq/enterprise/server/plugins/alertSnmp/SnmpInfo.java +++ b/modules/enterprise/server/plugins/alert-snmp/src/main/java/org/rhq/enterprise/server/plugins/alertSnmp/SnmpInfo.java @@ -79,6 +79,18 @@ public class SnmpInfo { return new SnmpInfo(host, port, oid, trapOid); }
+ protected static SnmpInfo load(Configuration configuration, Configuration preferences) { + String host = configuration.getSimpleValue(PARAM_HOST, null); // optional + if (host==null || host.isEmpty()) { + host = preferences.getSimpleValue("defaultTargetHost",null); + } + String port = configuration.getSimpleValue(PARAM_PORT, DEFAULT_PORT); + String oid = configuration.getSimpleValue(PARAM_VARIABLE_BINDING_PREFIX, null); // required + String trapOid = configuration.getSimpleValue(PARAM_TRAP_OID, null); + return new SnmpInfo(host, port, oid, trapOid); + + } + @Override public String toString() { String hostString = (host == null ? "UnknownHost" : host); diff --git a/modules/enterprise/server/plugins/alert-snmp/src/main/java/org/rhq/enterprise/server/plugins/alertSnmp/SnmpSender.java b/modules/enterprise/server/plugins/alert-snmp/src/main/java/org/rhq/enterprise/server/plugins/alertSnmp/SnmpSender.java index 5c6d1e7..521589d 100644 --- a/modules/enterprise/server/plugins/alert-snmp/src/main/java/org/rhq/enterprise/server/plugins/alertSnmp/SnmpSender.java +++ b/modules/enterprise/server/plugins/alert-snmp/src/main/java/org/rhq/enterprise/server/plugins/alertSnmp/SnmpSender.java @@ -45,7 +45,7 @@ public class SnmpSender extends AlertSender { private AlertManagerLocal alertManager;
/** - * Default constructor needed for instanciation by server plugin container + * Default constructor needed for instantiation by server plugin container */ public SnmpSender() { this(LookupUtil.getResourceManager(), LookupUtil.getAlertManager()); @@ -59,7 +59,7 @@ public class SnmpSender extends AlertSender { @Override public SenderResult send(Alert alert) {
- SnmpInfo info = SnmpInfo.load(alertParameters); + SnmpInfo info = SnmpInfo.load(alertParameters, preferences); if (info.error != null) { return SenderResult.getSimpleFailure(info.error); } @@ -75,8 +75,8 @@ public class SnmpSender extends AlertSender { String platformName = lineage.get(0).getName(); String conditions = alertManager.prettyPrintAlertConditions(alert, false); String alertUrl = alertManager.prettyPrintAlertURL(alert); - - + + String hierarchy = getResourceHierarchyAsString(lineage);
Date bootTime = new Date(); // TODO: want to use LookupUtil.getCoreServer().getBootTime() but ServiceMBean is not visible @@ -102,7 +102,7 @@ public class SnmpSender extends AlertSender {
@Override public String previewConfiguration() { - SnmpInfo info = SnmpInfo.load(alertParameters); + SnmpInfo info = SnmpInfo.load(alertParameters, preferences); return info.toString(); } } diff --git a/modules/enterprise/server/plugins/alert-snmp/src/main/java/org/rhq/enterprise/server/plugins/alertSnmp/SnmpTrapSender.java b/modules/enterprise/server/plugins/alert-snmp/src/main/java/org/rhq/enterprise/server/plugins/alertSnmp/SnmpTrapSender.java index 2556b38..ea3d985 100644 --- a/modules/enterprise/server/plugins/alert-snmp/src/main/java/org/rhq/enterprise/server/plugins/alertSnmp/SnmpTrapSender.java +++ b/modules/enterprise/server/plugins/alert-snmp/src/main/java/org/rhq/enterprise/server/plugins/alertSnmp/SnmpTrapSender.java @@ -76,7 +76,9 @@ import org.rhq.core.util.StringUtil;
/** * @author Ian Springer + * @author Heiko W. Rupp */ +@SuppressWarnings("unused") public class SnmpTrapSender implements PDUFactory { public static final int DEFAULT = 0; private static final String UDP_TRANSPORT = "udp"; @@ -104,7 +106,10 @@ public class SnmpTrapSender implements PDUFactory {
private TimeTicks sysUpTime = new TimeTicks(0);
- private OID trapOID = SnmpConstants.coldStart; + public static final OID enterpriseSpecificTrap = + new OID(new int[] { 1,3,6,1,6,3,1,1,5,6 }); + + private OID trapOID = enterpriseSpecificTrap;
private PDUv1 v1TrapPDU = new PDUv1();
@@ -338,14 +343,33 @@ public class SnmpTrapSender implements PDUFactory { return octetString; }
- private static Address createAddress(Configuration properties) { - // TODO: Make transport configurable (ips, 09/12/07). + private Address createAddress(Configuration properties) {
String host = properties.getSimpleValue("host",null); - String portS = properties.getSimpleValue("port","162"); + String portS = properties.getSimpleValue("port",null); + + + if (host==null) { + String tmp = systemConfig.getSimpleValue("defaultTargetHost",null); + if ((tmp != null) && (tmp.length() > 0)) { + host=tmp; + } + } + + if (portS==null) { + String tmp = systemConfig.getSimpleValue("defaultPort","162"); + if ((tmp != null) && (tmp.length() > 0)) { + portS = tmp; + } + } Integer port = Integer.valueOf(portS); + if (port==0) { + port = 162; // just to make sure + } + + String transport = systemConfig.getSimpleValue("transport","UDP"); +
- final String transport = UDP_TRANSPORT; String address = host + "/" + port; if (transport.equalsIgnoreCase(UDP_TRANSPORT)) { return new UdpAddress(address); @@ -357,7 +381,7 @@ public class SnmpTrapSender implements PDUFactory { }
protected String getVariableBindings(PDU response) { - StringBuffer strBuf = new StringBuffer(); + StringBuilder strBuf = new StringBuilder(); for (int i = 0; i < response.size(); i++) { VariableBinding vb = response.get(i); strBuf.append(vb.toString()); @@ -473,7 +497,7 @@ public class SnmpTrapSender implements PDUFactory {
String variableBindingPrefix = alertParameters.getSimpleValue(SnmpInfo.PARAM_VARIABLE_BINDING_PREFIX, null);
- // TODO add a request id and a timestamp + // request id and a timestamp are added below in setSysUpTime..
this.address = createAddress(alertParameters); // bind the alert definitions name on the oid set in the alert @@ -532,7 +556,7 @@ public class SnmpTrapSender implements PDUFactory { delta = now - bootTime.getTime(); } else delta = 0; - setSysUpTime(new TimeTicks(delta / 1000)); // TT is 100th of a second TODO : fix this !!! + setSysUpTime(new TimeTicks(delta / 100)); // TT is 100th of a second
}
@@ -571,6 +595,8 @@ public class SnmpTrapSender implements PDUFactory { this.authProtocol = AuthMD5.ID; } else if (tmp.equals("SHA")) { this.authProtocol = AuthSHA.ID; + } else if (tmp.equals("none")) { + this.authProtocol=null; } else { throw new IllegalStateException("SNMP authentication protocol unsupported: " + tmp); } diff --git a/modules/enterprise/server/plugins/alert-snmp/src/main/resources/META-INF/rhq-serverplugin.xml b/modules/enterprise/server/plugins/alert-snmp/src/main/resources/META-INF/rhq-serverplugin.xml index 83a55e9..3f7a35c 100644 --- a/modules/enterprise/server/plugins/alert-snmp/src/main/resources/META-INF/rhq-serverplugin.xml +++ b/modules/enterprise/server/plugins/alert-snmp/src/main/resources/META-INF/rhq-serverplugin.xml @@ -25,15 +25,29 @@ </c:property-options> </c:simple-property>
- <c:simple-property name="trapOid" displayName="Trap OID" description="OID for the trap sent" type="string" /> + <c:simple-property name="defaultTargetHost" displayName="Default trap target host" required="false"/> + <c:simple-property name="defaultPort" displayName="Default trap target port" required="false" type="integer" + default="162" defaultValue="162"/> + <c:simple-property name="transport" defaultValue="UDP"> + <c:property-options allowCustomValue="false"> + <c:option value="UDP" name="UDP"/> + <c:option value="TCP" name="TCP"/> + </c:property-options> + </c:simple-property> + + <c:simple-property name="trapOid" displayName="Trap OID" description="OID for the trap sent" type="string" + default="1.3.6.1.4.1.18016.2.1.2.0.1"/> <c:simple-property name="community" type="string" default="public" description="Community - v1 and v2c only" required="false"/>
<c:group name="1" displayName="SNMP version 1 properties" hiddenByDefault="true" > - <c:simple-property name="engineId" required="false"/> - <c:simple-property name="genericId" required="false"/> - <c:simple-property name="enterpriseOid" required="false"/> - <c:simple-property name="specificId" required="false"/> - <c:simple-property name="agentAddress" description="Address of our SNMP agent" required="false"/> + + <c:simple-property name="genericId" required="false" default="6" type="integer" + description="Set the generic trap type. Default is 6 (=Enterprise specific)."/> + <c:simple-property name="enterpriseOid" required="false" default="1.3.6.1.4.1.18016.2.3" + description="OID of the sender, identifies the type of managed object generating the trap. Default is enterprise.jboss.rhq.rhqServer"/> + <c:simple-property name="specificId" required="false" default="0" type="integer" + description="Enterprise-specific ID of the trap. If this is set, the generic ID must be set to 6."/> + <c:simple-property name="agentAddress" description="Address of our SNMP agent (=the RHQ server)" required="false"/> </c:group> <!-- <c:group name="2c" displayName="SNMP version 2c properties" hiddenByDefault="true"> @@ -41,14 +55,15 @@ </c:group> --> <c:group name="3" displayName="SNMP version 3 properties" hiddenByDefault="true"> - <c:simple-property name="authProtocol" type="string" default="MD5"> + <c:simple-property name="authProtocol" type="string" default="MD5" + description="Authorization protocol to use. If no Auth Passphrase is given, this must be set to 'none'."> <c:property-options> <c:option value="none"/> <c:option value="MD5"/> <c:option value="SHA"/> </c:property-options> </c:simple-property> - <c:simple-property name="privacyProtocol" default="AES"> + <c:simple-property name="privacyProtocol" default="AES" description="The privacy protocol to use in conjunction when the auth protocol is set"> <c:property-options> <c:option value="DES"/> <c:option value="AES"/> @@ -56,8 +71,9 @@ <c:option value="AES256"/> </c:property-options> </c:simple-property> + <c:simple-property name="engineId" required="false"/> <!-- TODO this was not here before, but in v1, where it is wrong --> <c:simple-property name="targetContext" displayName="Target Context Name" type="string" required="false"/> - <c:simple-property name="authPassphrase" description="Auto Passphrase is required with autorization enabled" type="password" required="false"/> + <c:simple-property name="authPassphrase" description="Auto Passphrase is required with authorization enabled. If this is not set, authProtocol must be not set as well. Length must be > 8." type="password" required="false"/> <c:simple-property name="privacyPassphrase" description="Privacy Passphrase is required with privacy enabled" type="password" required="false"/> <c:simple-property name="securityName" type="string" required="false"/>
@@ -71,13 +87,15 @@ <a:plugin-class>SnmpSender</a:plugin-class>
<a:alert-configuration> - <c:simple-property name="host" type="string" required="true" description="Trap target host"/> + <c:simple-property name="host" type="string" required="false" description="Trap target host. Required if not yet set in the + sender-wide preferences"/> <c:simple-property name="port" type="integer" required="false" default="162" description="Trap target port"/> - <c:simple-property name="oid" displayName="Variable bindings prefix" type="string" required="true"> + <c:simple-property name="oid" displayName="Variable bindings prefix" type="string" required="true" + defaultValue="1.3.6.1.4.1.18016.2.1"> <c:description> <![CDATA[ <p>RHQ will send alert notification details as a list of variable bindings in the - SNMP trap PDU.</p> + SNMP trap PDU. This OID is the common prefix of those bindings.</p> <p><strong>Do not confuse this paramater with 'Trap Oid'.</strong></p> ]]> </c:description> diff --git a/modules/enterprise/server/plugins/alert-snmp/src/test/java/org/rhq/enterprise/server/plugins/alertSnmp/SnmpInfoTest.java b/modules/enterprise/server/plugins/alert-snmp/src/test/java/org/rhq/enterprise/server/plugins/alertSnmp/SnmpInfoTest.java index c46c8b6..97b9a05 100644 --- a/modules/enterprise/server/plugins/alert-snmp/src/test/java/org/rhq/enterprise/server/plugins/alertSnmp/SnmpInfoTest.java +++ b/modules/enterprise/server/plugins/alert-snmp/src/test/java/org/rhq/enterprise/server/plugins/alertSnmp/SnmpInfoTest.java @@ -29,6 +29,7 @@ import static org.testng.Assert.*; import org.testng.annotations.Test;
import org.rhq.core.domain.configuration.Configuration; +import org.rhq.core.domain.configuration.PropertySimple;
/** * @author Thomas Segismont @@ -69,6 +70,29 @@ public class SnmpInfoTest { }
@Test + public void fallBackToGlobalValues() throws Exception { + Configuration configuration = new Configuration(); + configuration.setSimpleValue(PARAM_VARIABLE_BINDING_PREFIX, "molo"); + + Configuration preferences = new Configuration(); + preferences.put(new PropertySimple("defaultTargetHost","hugo")); + SnmpInfo snmpInfo = SnmpInfo.load(configuration,preferences); + assertNull(snmpInfo.error, "SnmpInfo#load should not have detected an error"); + assertEquals(snmpInfo.host, "hugo"); + assertEquals(snmpInfo.oid, "molo"); + assertEquals(snmpInfo.port, SnmpInfo.DEFAULT_PORT); + + + configuration.setSimpleValue(PARAM_HOST, "pipo"); + + snmpInfo = SnmpInfo.load(configuration,preferences); + assertNull(snmpInfo.error, "SnmpInfo#load should not have detected an error"); + assertEquals(snmpInfo.host, "pipo"); + assertEquals(snmpInfo.oid, "molo"); + assertEquals(snmpInfo.port, SnmpInfo.DEFAULT_PORT); + } + + @Test public void shouldExposeAllParams() throws Exception { Configuration configuration = new Configuration(); configuration.setSimpleValue(PARAM_HOST, "pipo");
commit 44e927da01ac1d6fb0252c71615c675815787408 Author: Heiko W. Rupp hwr@redhat.com Date: Mon Jul 8 10:01:47 2013 +0200
Bump rest-assured version.
diff --git a/modules/integration-tests/rest-api/pom.xml b/modules/integration-tests/rest-api/pom.xml index 7b42b45..47bfe18 100644 --- a/modules/integration-tests/rest-api/pom.xml +++ b/modules/integration-tests/rest-api/pom.xml @@ -19,7 +19,7 @@ <properties> <surefire-plugin.version>2.10</surefire-plugin.version> <jackson.version>1.9.5</jackson.version> - <rest-assured.version>1.7.2</rest-assured.version> + <rest-assured.version>1.8.1</rest-assured.version> </properties>
<build>
commit 686716224eeaeed7179e1edc9a4532702b7bd494 Author: John Sanda jsanda@redhat.com Date: Mon Jul 8 09:52:51 2013 -0400
revert snashot call in avail check
also log the execution time for avail checks
diff --git a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java index 93d758c..5e7692b 100644 --- a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java +++ b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java @@ -128,18 +128,28 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent
@Override public AvailabilityType getAvailability() { - ResourceContext<?> context = getResourceContext(); - ProcessInfo processInfo = context.getNativeProcess(); + long start = System.currentTimeMillis(); + try { + ResourceContext<?> context = getResourceContext(); + ProcessInfo processInfo = context.getNativeProcess();
- if (processInfo == null) { - return UNKNOWN; - } else { - // It is safe to read prior snapshot as getNativeProcess always return a fresh instance - ProcessInfoSnapshot processInfoSnaphot = processInfo.freshSnapshot(); - if (processInfoSnaphot.isRunning()) { - return UP; + if (processInfo == null) { + return UNKNOWN; } else { - return DOWN; + // It is safe to read prior snapshot as getNativeProcess always return a fresh instance + // ProcessInfoSnapshot processInfoSnaphot = processInfo.freshSnapshot(); + if (processInfo.priorSnaphot().isRunning()) { + return UP; + } else { + return DOWN; + } + } + } finally { + long end = System.currentTimeMillis(); + long totalTime = end - start; + log.debug("Finished availability check in " + totalTime + " ms"); + if (totalTime > (1000 * 5)) { + log.warn("Availability check exceeded five seconds. Total time was " + totalTime + " ms"); } } }
commit 2d36e94937dd694ebfb6bab50e57db3717687363 Author: John Sanda jsanda@redhat.com Date: Mon Jul 8 09:46:20 2013 -0400
Create storage nodes from rhq.cassandra.seeds prop during installation
Previously we were reading rhq.cassandra.seeds at server start up and had logic in place to see if there were any changes in the property. If changes were detected (i.e., new node added), cluster maintenance would be scheduled. We really only want to create/persist seeds nodes at installation time though. Once the server has been installed, rhqctl install should be used to install new storage nodes. This simplifies server start up logic and also reduces the chances for screwing up the deployment.
diff --git a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/SchemaManager.java b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/SchemaManager.java index 8f67ab3..2836964 100644 --- a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/SchemaManager.java +++ b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/SchemaManager.java @@ -98,6 +98,10 @@ public class SchemaManager { return topology.updateTopology(isNewSchema); }
+ public List<StorageNode> getStorageNodes() { + return nodes; + } + private static List<StorageNode> parseNodeInformation(String... nodes) { List<StorageNode> parsedNodes = new ArrayList<StorageNode>(); for (String node : nodes) { diff --git a/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml b/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml index 32824e2..cfe13be 100644 --- a/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml +++ b/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml @@ -675,6 +675,8 @@ rhq.cassandra.password=${rhq.cassandra.password} # # hostname|jmxPort|nativeTransportPort| # +# Note that this is actually an installer setting. Changing the value after +# installation will have no effect. rhq.cassandra.seeds=
# If enabled data sent to and from storage nodes will be compressed using diff --git a/modules/enterprise/server/installer/src/main/java/org/rhq/enterprise/server/installer/InstallerServiceImpl.java b/modules/enterprise/server/installer/src/main/java/org/rhq/enterprise/server/installer/InstallerServiceImpl.java index 4c87f70..284ddbd 100644 --- a/modules/enterprise/server/installer/src/main/java/org/rhq/enterprise/server/installer/InstallerServiceImpl.java +++ b/modules/enterprise/server/installer/src/main/java/org/rhq/enterprise/server/installer/InstallerServiceImpl.java @@ -499,6 +499,9 @@ public class InstallerServiceImpl implements InstallerService {
// ensure the server info is up to date and stored in the DB ServerInstallUtil.storeServerDetails(serverProperties, clearTextDbPassword, serverDetails); + + ServerInstallUtil.persistStorageNodesIfNecessary(serverProperties, clearTextDbPassword, + storageNodeSchemaManager.getStorageNodes()); }
@Override diff --git a/modules/enterprise/server/installer/src/main/java/org/rhq/enterprise/server/installer/ServerInstallUtil.java b/modules/enterprise/server/installer/src/main/java/org/rhq/enterprise/server/installer/ServerInstallUtil.java index 0318e0e..d0f6f3f 100644 --- a/modules/enterprise/server/installer/src/main/java/org/rhq/enterprise/server/installer/ServerInstallUtil.java +++ b/modules/enterprise/server/installer/src/main/java/org/rhq/enterprise/server/installer/ServerInstallUtil.java @@ -68,6 +68,7 @@ import org.rhq.core.db.DbUtil; import org.rhq.core.db.OracleDatabaseType; import org.rhq.core.db.PostgresqlDatabaseType; import org.rhq.core.db.setup.DBSetup; +import org.rhq.core.domain.cloud.StorageNode; import org.rhq.core.util.PropertiesFileUpdate; import org.rhq.core.util.exception.ThrowableUtil; import org.rhq.core.util.file.FileUtil; @@ -950,6 +951,89 @@ public class ServerInstallUtil { }
/** + * Persists the storage nodes to the database only if no storage node entities already exist. This method is used + * to persist storage nodes created from the rhq.cassandra.seeds server configuration property. The only time those + * seed nodes should be created is during an initial server installation. After the initial installation storage + * nodes should be created using <code>rhqctl install</code>. This ensures that any necessary cluster maintenance + * tasks will be performed. + * + * @param serverProperties the server properties + * @param password clear text password to connect to the database + * @param storageNodes the {@link StorageNode storage nodes} to persist + * @throws Exception + */ + public static void persistStorageNodesIfNecessary(HashMap<String, String> serverProperties, String password, + List<StorageNode> storageNodes) throws Exception { + DatabaseType db = null; + Connection connection = null; + Statement queryStatement = null; + ResultSet resultSet = null; + PreparedStatement insertStatement = null; + + try { + String dbUrl = serverProperties.get(ServerProperties.PROP_DATABASE_CONNECTION_URL); + String userName = serverProperties.get(ServerProperties.PROP_DATABASE_USERNAME); + connection = getDatabaseConnection(dbUrl, userName, password); + db = DatabaseTypeFactory.getDatabaseType(connection); + + if (!(db instanceof PostgresqlDatabaseType || db instanceof OracleDatabaseType)) { + throw new IllegalArgumentException("Unknown database type, can't continue: " + db); + } + + connection = getDatabaseConnection(dbUrl, userName, password); + queryStatement = connection.createStatement(); + resultSet = queryStatement.executeQuery("SELECT count(id) FROM rhq_storage_node"); + resultSet.next(); + + if (resultSet.getInt(1) == 0) { + connection.setAutoCommit(false); + + try { + LOG.info("Persisting to database new storage nodes for values specified in server configuration " + + "property [rhq.cassandra.seeds]"); + + insertStatement = connection.prepareStatement( + "INSERT INTO rhq_storage_node (id, address, jmx_port, cql_port, operation_mode, ctime, mtime) " + + "VALUES (?, ?, ?, ?, ?, ?, ?)" + ); + + int id = 1001; + for (StorageNode storageNode : storageNodes) { + insertStatement.setInt(1, id); + insertStatement.setString(2, storageNode.getAddress()); + insertStatement.setInt(3, storageNode.getJmxPort()); + insertStatement.setInt(4, storageNode.getCqlPort()); + insertStatement.setString(5, StorageNode.OperationMode.INSTALLED.toString()); + insertStatement.setLong(6, System.currentTimeMillis()); + insertStatement.setLong(7, System.currentTimeMillis()); + + insertStatement.executeUpdate(); + id += 1; + } + + connection.commit(); + } catch (SQLException e) { + LOG.error("Failed to persist to database the storage nodes specified by server configuration " + + "property [rhq.cassandra.seeds]. Transaction will be rolled back.", e); + connection.rollback(); + throw e; + } + } else { + LOG.info("Storage nodes already exist in database. Server configuration property " + + "[rhq.cassandra.seeds] will be ignored."); + } + + } finally { + if (db != null) { + db.closeResultSet(resultSet); + db.closeStatement(queryStatement); + db.closeStatement(insertStatement); + db.closeConnection(connection); + } + } + } + + /** * Stores the server details (such as the public endpoint) in the database. If the server definition already * exists, it will be updated; otherwise, a new server will be added to the HA cloud. * @@ -982,6 +1066,7 @@ public class ServerInstallUtil { } }
+ private static void updateOrInsertServer(DatabaseType db, Connection conn, ServerDetails serverDetails) { PreparedStatement stm = null; ResultSet rs = null; diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBeanTest.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBeanTest.java index d44d637..4e04447 100644 --- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBeanTest.java +++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBeanTest.java @@ -26,8 +26,6 @@ package org.rhq.enterprise.server.cloud;
import static org.rhq.enterprise.server.cloud.StorageNodeManagerBean.STORAGE_NODE_GROUP_NAME; -import static org.rhq.enterprise.server.cloud.StorageNodeManagerBean.STORAGE_NODE_PLUGIN_NAME; -import static org.rhq.enterprise.server.cloud.StorageNodeManagerBean.STORAGE_NODE_RESOURCE_TYPE_NAME;
import java.util.ArrayList; import java.util.Arrays; @@ -81,70 +79,6 @@ public class StorageNodeManagerBeanTest extends AbstractEJB3Test {
private static final String TEST_PREFIX = "test-";
- @Test - public void testInit() throws Exception { - final String cassandraSeedsProperty = "rhq.cassandra.seeds"; - final String originalSeedValue = System.getProperty(cassandraSeedsProperty); - - try { - prepareScheduler(); - cleanDatabase(); - executeInTransaction(new TransactionCallback() { - - @Override - public void execute() throws Exception { - String testHostName = TEST_PREFIX + "hostname"; - List<String> addresses = Arrays.asList(testHostName, TEST_PREFIX + "hostWithNoFoundResource", - TEST_PREFIX + "secondHostWithNoFoundResource"); - System.setProperty(cassandraSeedsProperty, addresses.get(0) + "|123|123," + addresses.get(1) - + "|987|987," + addresses.get(2) + "|123|123"); - - // create the resource type if it doesn't exist - ResourceType testResourceType = typeManager.getResourceTypeByNameAndPlugin("RHQ Storage Node", - "RHQStorage"); - if (testResourceType == null) { - testResourceType = createResourceType(); - } - Resource testResource = createResource(testResourceType, testHostName); - - // finds the storage nodes and pairs them w/ the associated resources - nodeManager.scanForStorageNodes(); - - // get the storage nodes and checks some properties on them - List<StorageNode> storageNodes = nodeManager.getStorageNodes(); - Assert.assertNotNull(storageNodes, "The list of storage nodes shouldn't be null."); - Assert.assertFalse(storageNodes.isEmpty(), "The list of storage nodes shouldn't be empty."); - Assert.assertTrue(storageNodes.size() >= addresses.size(), - "The size of the list of storage nodes should be at least " + addresses.size()); - - List<String> obtainedAddresses = new ArrayList<String>(storageNodes.size()); - for (StorageNode storageNode : storageNodes) { - Assert.assertNotNull(storageNode.getAddress(), "Address of storage node cannot be null."); - obtainedAddresses.add(storageNode.getAddress()); - if (storageNode.getAddress().equals(testHostName)) { - Assert.assertEquals(storageNode.getResource().getId(), testResource.getId()); - Assert.assertNotNull(storageNode.getResource(), "Associated resource cannot be null."); - } else { - Assert.assertNull(storageNode.getResource(), - "The resource field should be null at this point."); - } - } - - Assert.assertTrue(obtainedAddresses.containsAll(addresses), - "There are some storage nodes that should be created but were not discovered and returned." - + " The storage nodes that should be returned: " + addresses - + " The storage nodes that were returned: " + obtainedAddresses - + " (the second should be a super-set (not necessarily strict) of the first.)"); - - } - }); - } finally { - unprepareScheduler(); - cleanDatabase(); - System.setProperty(cassandraSeedsProperty, originalSeedValue); - } - } - @Test(groups = "integration.ejb3") public void testStorageNodeCriteriaFinder() throws Exception {
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index b76d054..9d073ee 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -53,16 +53,13 @@ import org.rhq.core.domain.common.JobTrigger; import org.rhq.core.domain.configuration.Configuration; import org.rhq.core.domain.criteria.ResourceGroupCriteria; import org.rhq.core.domain.criteria.StorageNodeCriteria; -import org.rhq.core.domain.measurement.AvailabilityType; import org.rhq.core.domain.measurement.MeasurementAggregate; import org.rhq.core.domain.measurement.MeasurementUnits; import org.rhq.core.domain.operation.bean.GroupOperationSchedule; -import org.rhq.core.domain.resource.InventoryStatus; import org.rhq.core.domain.resource.Resource; import org.rhq.core.domain.resource.ResourceType; import org.rhq.core.domain.resource.group.ResourceGroup; import org.rhq.core.domain.util.PageList; -import org.rhq.core.util.StringUtil; import org.rhq.enterprise.server.RHQConstants; import org.rhq.enterprise.server.auth.SubjectManagerLocal; import org.rhq.enterprise.server.authz.RequiredPermission; @@ -88,22 +85,10 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
private final Log log = LogFactory.getLog(StorageNodeManagerBean.class);
- private static final String RHQ_STORAGE_RESOURCE_TYPE = "RHQ Storage Node"; - private static final String RHQ_STORAGE_PLUGIN = "RHQStorage"; - private static final String RHQ_STORAGE_CQL_PORT_PROPERTY = "nativeTransportPort"; private static final String RHQ_STORAGE_JMX_PORT_PROPERTY = "jmxPort"; private static final String RHQ_STORAGE_ADDRESS_PROPERTY = "host";
- private static final String SEEDS_PROP = "rhq.cassandra.seeds"; - - // The following have package visibility to make accessible to StorageNodeManagerBeanTest - static final String STORAGE_NODE_GROUP_NAME = "RHQ Storage Nodes"; - - static final String STORAGE_NODE_RESOURCE_TYPE_NAME = "RHQ Storage Node"; - - static final String STORAGE_NODE_PLUGIN_NAME = "RHQStorage"; - @PersistenceContext(unitName = RHQConstants.PERSISTENCE_UNIT_NAME) private EntityManager entityManager;
@@ -126,89 +111,6 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN private OperationManagerLocal operationManager;
@Override - public synchronized List<StorageNode> scanForStorageNodes() { - List<StorageNode> existingStorageNodes = getStorageNodes(); - if (log.isDebugEnabled()) { - log.debug("Found existing storage nodes [" + StringUtil.listToString(existingStorageNodes) - + "] in the database"); - } - - String seeds = System.getProperty(SEEDS_PROP); - - if (StringUtil.isEmpty(seeds) && existingStorageNodes.isEmpty()) { - // We need to find storage node connection info from one or the other but not - // necessarily both. If this is a single server deployment where the storage - // node(s) is running on a separate machine, then SEEDS_PROP will have to be set - // manually. And in this scenario during the initial deployment, there will not - // be any storage nodes in the db. In a HA deployment, where there are already - // storage nodes in the db, an RHQ server does not have to have SEEDS_PROP set - // since it can obtain connection info from the storage node table. - throw new IllegalStateException("There are no existing storage nodes defined in the RHQ database and " - + "the system property [" + SEEDS_PROP + "] is not set. The RHQ server will not be able to connect " - + "to the RHQ storage node(s). The [" + SEEDS_PROP + "] property should be defined in " - + "rhq-server.properties."); - } - - List<StorageNode> seedNodes = parseSeedsProperty(seeds); - boolean clusterMaintenanceNeeded = false; - List<StorageNode> newNodes = null; - - if (existingStorageNodes.isEmpty()) { - // This should only happen on the very first server start upon installation. - if (log.isDebugEnabled()) { - log.debug("No storage node entities exist in the database"); - log.debug("Persisting seed nodes [" + StringUtil.listToString(seedNodes) + "]"); - } - createStorageNodeGroup(); - } else { - // There are existing storage nodes but we need to check if the storage node - // group exists. In the case of an upgrade, the group would not yet exist so it - // has to be created now. - if (!storageNodeGroupExists()) { - createStorageNodeGroup(); - addExistingStorageNodesToGroup(); - } - - newNodes = findNewStorageNodes(existingStorageNodes, seedNodes); - if (!newNodes.isEmpty()) { - log.info("Detected topology change. New seed nodes will be persisted."); - if (log.isDebugEnabled()) { - log.debug("Persisting new seed nodes [" + StringUtil.listToString(newNodes)); - } - - clusterMaintenanceNeeded = true; - } - } - - Map<String, StorageNode> storageNodeMap = new HashMap<String, StorageNode>(existingStorageNodes.size() - + seedNodes.size()); - for (StorageNode existingStorageNode : existingStorageNodes) { - storageNodeMap.put(existingStorageNode.getAddress(), existingStorageNode); - } - // possibly overide the existing storage nodes with up to date data - for (StorageNode seedNode : seedNodes) { - StorageNode existing = storageNodeMap.get(seedNode.getAddress()); - if (existing != null) { - if (existing.getJmxPort() != seedNode.getJmxPort() || existing.getCqlPort() != seedNode.getCqlPort() - || existing.getResource() != seedNode.getResource()) { - existing.setMtime(new Date().getTime()); - } - seedNode.setResource(existing.getResource()); - } - storageNodeMap.put(seedNode.getAddress(), seedNode); - } - - this.discoverResourceInformation(storageNodeMap); - this.updateStorageNodes(storageNodeMap); - - if (clusterMaintenanceNeeded) { - this.scheduleQuartzJob(existingStorageNodes.size()); - } - - return new ArrayList<StorageNode>(storageNodeMap.values()); - } - - @Override public void linkResource(Resource resource) { List<StorageNode> storageNodes = this.getStorageNodes();
@@ -250,7 +152,8 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN } }
- private void createStorageNodeGroup() { + @Override + public void createStorageNodeGroup() { log.info("Creating resource group [" + STORAGE_NODE_GROUP_NAME + "]");
ResourceGroup group = new ResourceGroup(STORAGE_NODE_GROUP_NAME); @@ -261,6 +164,8 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN group.setRecursive(false);
resourceGroupManager.createResourceGroup(subjectManager.getOverlord(), group); + + addExistingStorageNodesToGroup(); }
private void addExistingStorageNodesToGroup() { @@ -283,13 +188,8 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN new int[] {resource.getId()}); }
- /** - * This method is very similar to {@link #getStorageNodeGroup()} but may be called - * prior to the group being created. - * - * @return true if the storage node resource group exists, false otherwise. - */ - private boolean storageNodeGroupExists() { + @Override + public boolean storageNodeGroupExists() { Subject overlord = subjectManager.getOverlord();
ResourceGroupCriteria criteria = new ResourceGroupCriteria(); @@ -302,14 +202,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN return !groups.isEmpty(); }
- /** - * Note that this method assumes the storage node resource group already exists; as - * such, it should only be called from places in the code that are after the point(s) - * where the group has been created. - * - * @return The storage node resource group. - * @throws IllegalStateException if the group is not found or does not exist. - */ + @Override public ResourceGroup getStorageNodeGroup() { Subject overlord = subjectManager.getOverlord();
@@ -499,24 +392,6 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN return storageNodes; }
- private List<StorageNode> findNewStorageNodes(List<StorageNode> nodes, List<StorageNode> seedNodes) { - if (log.isDebugEnabled()) { - log.debug("Checking system property [" + SEEDS_PROP + "] for any new nodes to be persisted"); - } - List<StorageNode> newNodes = new ArrayList<StorageNode>(); - for (StorageNode seedNode : seedNodes) { - // The contains call should be ok even though it is an O(N) operation because - // the number of storage nodes will be small and this is only done at start up. - if (!nodes.contains(seedNode)) { - if (log.isDebugEnabled()) { - log.debug("Detected new storage node [" + seedNode + "]"); - } - newNodes.add(seedNode); - } - } - return newNodes; - } - private void scheduleQuartzJob(int clusterSize) { String jobName = StorageNodeMaintenanceJob.class.getName(); String jobGroupName = StorageNodeMaintenanceJob.class.getName(); @@ -552,38 +427,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN } entityManager.flush(); } - - private void discoverResourceInformation(Map<String, StorageNode> storageNodeMap) { - TypedQuery<ResourceType> query = entityManager.<ResourceType>createNamedQuery(ResourceType.QUERY_FIND_BY_NAME_AND_PLUGIN, ResourceType.class) - .setParameter("name", RHQ_STORAGE_RESOURCE_TYPE).setParameter("plugin", RHQ_STORAGE_PLUGIN); - List<ResourceType> resourceTypes = query.getResultList(); - - if (resourceTypes.isEmpty()) { - return; - } - - TypedQuery<Resource> resourceQuery = entityManager.<Resource>createNamedQuery(Resource.QUERY_FIND_BY_TYPE_ADMIN, Resource.class).setParameter("type", - resourceTypes.get(0)); - List<Resource> cassandraResources = resourceQuery.getResultList(); - - for (Resource resource : cassandraResources) { - Configuration resourceConfiguration = resource.getPluginConfiguration(); - String host = resourceConfiguration.getSimpleValue(RHQ_STORAGE_ADDRESS_PROPERTY); - - if (host != null && storageNodeMap.containsKey(host)) { - StorageNode storageNode = storageNodeMap.get(host); - - storageNode.setResource(resource); - if (resource.getInventoryStatus() == InventoryStatus.NEW) { - storageNode.setOperationMode(OperationMode.INSTALLED); - } else if (resource.getInventoryStatus() == InventoryStatus.COMMITTED - && resource.getCurrentAvailability().getAvailabilityType() == AvailabilityType.UP) { - storageNode.setOperationMode(OperationMode.NORMAL); - } - } - } - } - + private StorageNodeLoadComposite.MeasurementAggregateWithUnits getMeasurementAggregateWithUnits(Subject subject, int schedId, MeasurementUnits units, long beginTime, long endTime) { MeasurementAggregate measurementAggregate = measurementManager.getAggregate(subject, schedId, beginTime, diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java index 52e2424..5c3f092 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java @@ -33,7 +33,10 @@ import org.rhq.core.domain.util.PageList; @Local public interface StorageNodeManagerLocal {
- List<StorageNode> scanForStorageNodes(); + // The following have package visibility to make accessible to StorageNodeManagerBeanTest + String STORAGE_NODE_GROUP_NAME = "RHQ Storage Nodes"; + String STORAGE_NODE_RESOURCE_TYPE_NAME = "RHQ Storage Node"; + String STORAGE_NODE_PLUGIN_NAME = "RHQStorage";
List<StorageNode> getStorageNodes();
@@ -88,6 +91,21 @@ public interface StorageNodeManagerLocal { void runReadRepair();
/** + * Creates the storage node resource group which will be named {@link #STORAGE_NODE_GROUP_NAME}. This method should + * only be called at start up by {@link org.rhq.enterprise.server.storage.StorageClientManagerBean StorageClientManagerBean}. + * Storage node entities created during installation will be added to the group. + */ + void createStorageNodeGroup(); + + /** + * Checks whether or not the storage node resource group exists. This method is very similar to + * {@link #getStorageNodeGroup()} but may be called prior to the group being created. + * + * @return true if the storage node resource group exists, false otherwise. + */ + boolean storageNodeGroupExists(); + + /** * This method assumes the storage node resource group already exists; as such, it should only be called from places * in the code that are after the point(s) where the group has been created. * diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClientManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClientManagerBean.java index 0c76dee..2d48092 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClientManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClientManagerBean.java @@ -86,10 +86,10 @@ public class StorageClientManagerBean {
log.info("Initializing storage client subsystem");
- //decide if there are no storage nodes persisted before doing anything - boolean isNewServerInstall = storageNodeManager.getStorageNodes().isEmpty(); - - storageNodeManager.scanForStorageNodes(); + boolean isNewServerInstall = !storageNodeManager.storageNodeGroupExists(); + if (isNewServerInstall) { + storageNodeManager.createStorageNodeGroup(); + }
String username = getRequiredStorageProperty(USERNAME_PROP); String password = getRequiredStorageProperty(PASSWORD_PROP);
commit d5b6714edf6eacbed95fbf2253de9f4ab5c1b08b Author: Thomas Segismont tsegismo@redhat.com Date: Fri Jul 5 15:09:44 2013 +0200
Bug 910646 - Unable to add more than 100 resources to a Compatible Group
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/ResourceGWTService.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/ResourceGWTService.java index bd6209a..3248186 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/ResourceGWTService.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/ResourceGWTService.java @@ -1,6 +1,6 @@ /* * RHQ Management Platform - * Copyright (C) 2005-2010 Red Hat, Inc. + * Copyright (C) 2005-2013 Red Hat, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify @@ -13,8 +13,8 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * along with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ package org.rhq.enterprise.gui.coregui.client.gwt;
@@ -113,4 +113,7 @@ public interface ResourceGWTService extends RemoteService {
List<Integer> uninventoryResources(int[] resourceIds) throws RuntimeException;
+ PageList<Resource> findGroupMemberCandidateResources(ResourceCriteria criteria, int[] alreadySelectedResourceIds) + throws RuntimeException; + } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/inventory/ResourceGroupResourceSelector.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/inventory/ResourceGroupResourceSelector.java index 0882ce2..2bf0883 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/inventory/ResourceGroupResourceSelector.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/inventory/ResourceGroupResourceSelector.java @@ -1,24 +1,20 @@ /* * RHQ Management Platform - * Copyright (C) 2005-2010 Red Hat, Inc. + * Copyright (C) 2005-2013 Red Hat, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation, and/or the GNU Lesser - * General Public License, version 2.1, also as published by the Free - * Software Foundation. + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License and the GNU Lesser General Public License - * for more details. + * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * and the GNU Lesser General Public License along with this program; - * if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * along with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ package org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.inventory;
@@ -26,24 +22,33 @@ import java.util.Collection; import java.util.HashSet; import java.util.Map;
+import com.google.gwt.user.client.rpc.AsyncCallback; +import com.smartgwt.client.data.DSRequest; +import com.smartgwt.client.data.DSResponse; import com.smartgwt.client.data.Record; +import com.smartgwt.client.rpc.RPCResponse; import com.smartgwt.client.types.Overflow; import com.smartgwt.client.widgets.grid.ListGridRecord;
+import org.rhq.core.domain.criteria.ResourceCriteria; import org.rhq.core.domain.resource.Resource; import org.rhq.core.domain.resource.ResourceType; +import org.rhq.core.domain.util.PageList; +import org.rhq.enterprise.gui.coregui.client.CoreGUI; import org.rhq.enterprise.gui.coregui.client.inventory.resource.AncestryUtil; import org.rhq.enterprise.gui.coregui.client.inventory.resource.ResourceDatasource; import org.rhq.enterprise.gui.coregui.client.inventory.resource.selection.ResourceSelector; import org.rhq.enterprise.gui.coregui.client.inventory.resource.type.ResourceTypeRepository; import org.rhq.enterprise.gui.coregui.client.inventory.resource.type.ResourceTypeRepository.TypesLoadedCallback; +import org.rhq.enterprise.gui.coregui.client.util.RPCDataSource;
/** * @author Jay Shaughnessy */ public class ResourceGroupResourceSelector extends ResourceSelector { + private static final int MAX_AVAILABLE_RECORDS = 300;
- Collection<Resource> resources; + private Collection<Resource> resources;
public ResourceGroupResourceSelector(Collection<Resource> resources, ResourceType resourceTypeFilter, boolean forceResourceTypeFilter) { @@ -96,4 +101,44 @@ public class ResourceGroupResourceSelector extends ResourceSelector { } }
+ @Override + protected int getMaxAvailableRecords() { + return MAX_AVAILABLE_RECORDS; + } + + @Override + protected RPCDataSource<Resource, ResourceCriteria> getDataSource() { + return new SelectedResourcesAwareDataSource(); + } + + private class SelectedResourcesAwareDataSource extends SelectedResourceDataSource { + + @Override + public void executeFetch(final DSRequest request, final DSResponse response, final ResourceCriteria criteria) { + getResourceService().findGroupMemberCandidateResources(criteria, getSelectedResourceIds(), + new AsyncCallback<PageList<Resource>>() { + @Override + public void onFailure(Throwable caught) { + CoreGUI.getErrorHandler().handleError(MSG.view_inventory_resources_loadFailed(), caught); + response.setStatus(RPCResponse.STATUS_FAILURE); + processResponse(request.getRequestId(), response); + } + + @Override + public void onSuccess(PageList<Resource> result) { + dataRetrieved(result, response, request); + } + }); + } + + private int[] getSelectedResourceIds() { + ListGridRecord[] assignedRecords = assignedGrid.getRecords(); + int[] selectedResourceIds = new int[assignedRecords.length]; + for (int i = 0; i < assignedRecords.length; i++) { + ListGridRecord assignedRecord = assignedRecords[i]; + selectedResourceIds[i] = assignedRecord.getAttributeAsInt(getSelectorKey()); + } + return selectedResourceIds; + } + } } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/selection/ResourceSelector.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/selection/ResourceSelector.java index 90511d7..03b70b3 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/selection/ResourceSelector.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/selection/ResourceSelector.java @@ -1,6 +1,6 @@ /* * RHQ Management Platform - * Copyright (C) 2005-2010 Red Hat, Inc. + * Copyright (C) 2005-2013 Red Hat, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify @@ -13,8 +13,8 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * along with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ package org.rhq.enterprise.gui.coregui.client.inventory.resource.selection;
@@ -185,7 +185,7 @@ public class ResourceSelector extends AbstractSelector<Resource, ResourceCriteri return true; }
- private class SelectedResourceDataSource extends ResourceDatasource { + protected class SelectedResourceDataSource extends ResourceDatasource {
@Override protected ResourceCriteria getFetchCriteria(final DSRequest request) { diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/ResourceGWTServiceImpl.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/ResourceGWTServiceImpl.java index 456a69a..dfa1b9f 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/ResourceGWTServiceImpl.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/ResourceGWTServiceImpl.java @@ -1,6 +1,6 @@ /* * RHQ Management Platform - * Copyright (C) 2005-2011 Red Hat, Inc. + * Copyright (C) 2005-2013 Red Hat, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify @@ -13,8 +13,8 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * along with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ package org.rhq.enterprise.gui.coregui.server.gwt;
@@ -463,4 +463,17 @@ public class ResourceGWTServiceImpl extends AbstractGWTServiceImpl implements Re throw getExceptionToThrowToClient(t); } } + + @Override + public PageList<Resource> findGroupMemberCandidateResources(ResourceCriteria criteria, + int[] alreadySelectedResourceIds) throws RuntimeException { + try { + PageList<Resource> result = resourceManager.findGroupMemberCandidateResources(getSessionSubject(), + criteria, alreadySelectedResourceIds); + ObjectFilter.filterFieldsInCollection(result, importantFieldsSet); + return SerialUtility.prepare(result, "ResourceService.findResourcesByCriteria"); + } catch (Throwable t) { + throw getExceptionToThrowToClient(t); + } + } } diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/ResourceManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/ResourceManagerBean.java index e3a89a4..0c8594d 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/ResourceManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/ResourceManagerBean.java @@ -1,6 +1,6 @@ /* * RHQ Management Platform - * Copyright (C) 2005-2010 Red Hat, Inc. + * Copyright (C) 2005-2013 Red Hat, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify @@ -13,11 +13,15 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * along with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ package org.rhq.enterprise.server.resource;
+import static org.rhq.core.domain.criteria.Criteria.Restriction.COLLECTION_ONLY; +import static org.rhq.core.domain.criteria.Criteria.Restriction.COUNT_ONLY; +import static org.rhq.enterprise.server.util.CriteriaQueryGenerator.getPageControl; + import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -30,6 +34,7 @@ import java.util.LinkedList; import java.util.List; import java.util.ListIterator; import java.util.Map; +import java.util.Set;
import javax.ejb.EJB; import javax.ejb.Stateless; @@ -2921,4 +2926,47 @@ public class ResourceManagerBean implements ResourceManagerLocal, ResourceManage return reports; }
+ @Override + public PageList<Resource> findGroupMemberCandidateResources(Subject subject, ResourceCriteria criteria, + int[] alreadySelectedResourceIds) { + + Set<Integer> alreadySelectedResourceIdSet = new HashSet<Integer>( + ArrayUtils.wrapInList(alreadySelectedResourceIds)); + + PageControl originalPageControl = getPageControl(criteria); + PageControl pageControl = (PageControl) originalPageControl.clone(); + criteria.setPageControl(pageControl); + + int requiredPageSize = pageControl.getPageSize(); + criteria.setRestriction(COUNT_ONLY); + int totalSize = findResourcesByCriteria(subject, criteria).getTotalSize(); + int totalPages = (totalSize / requiredPageSize) + (((totalSize % requiredPageSize) > 0) ? 1 : 0); + + criteria.setRestriction(COLLECTION_ONLY); + List<Resource> candidates = new LinkedList<Resource>(); + for (int pageNumber = 0; candidates.size() < requiredPageSize && pageNumber < totalPages; pageNumber++) { + pageControl.setPageNumber(pageNumber); + PageList<Resource> foundResources = findResourcesByCriteria(subject, criteria); + Collection<Resource> filteredResources = filterOutAlreadySelectedResources(foundResources, + alreadySelectedResourceIdSet); + + candidates.addAll(filteredResources); + } + if (candidates.size() > requiredPageSize) { + candidates = candidates.subList(0, requiredPageSize); + } + + return new PageList<Resource>(candidates, totalSize, originalPageControl); + } + + private Collection<Resource> filterOutAlreadySelectedResources(Collection<Resource> foundResources, + Collection<Integer> alreadySelectedResourceIds) { + List<Resource> result = new LinkedList<Resource>(); + for (Resource foundResource : foundResources) { + if (!alreadySelectedResourceIds.contains(foundResource.getId())) { + result.add(foundResource); + } + } + return result; + } } diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/ResourceManagerLocal.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/ResourceManagerLocal.java index 0d4dd15..b6db0fd 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/ResourceManagerLocal.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/ResourceManagerLocal.java @@ -1,6 +1,6 @@ /* * RHQ Management Platform - * Copyright (C) 2005-2010 Red Hat, Inc. + * Copyright (C) 2005-2013 Red Hat, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify @@ -13,8 +13,8 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * along with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ package org.rhq.enterprise.server.resource;
@@ -54,7 +54,6 @@ import org.rhq.core.domain.util.PageControl; import org.rhq.core.domain.util.PageList; import org.rhq.core.util.IntExtractor; import org.rhq.enterprise.server.resource.disambiguation.DisambiguationUpdateStrategy; -import org.rhq.enterprise.server.resource.disambiguation.Disambiguator; import org.rhq.enterprise.server.resource.group.ResourceGroupNotFoundException;
/** @@ -220,16 +219,9 @@ public interface ResourceManagerLocal { PageList<Resource> findChildResourcesByCategoryAndInventoryStatus(Subject user, Resource parent, ResourceCategory category, InventoryStatus status, PageControl pageControl);
- /** - * - * @see ResourceManagerRemote#findResourcesByCategory(Subject, ResourceCategory, InventoryStatus, PageControl) - */ PageList<Resource> findResourcesByCategory(Subject user, ResourceCategory category, InventoryStatus inventoryStatus, PageControl pageControl);
- /** - * @see ResourceManagerRemote#findResourceComposites(Subject, ResourceCategory, String, int, String, PageControl) - */ PageList<ResourceComposite> findResourceComposites(Subject user, ResourceCategory category, String typeName, String pluginName, Resource parentResource, String searchString, boolean attachParentResource, PageControl pageControl); @@ -542,7 +534,6 @@ public interface ResourceManagerLocal { Resource getParentResource(Subject subject, int resourceId);
/** - * @see Disambiguator#disambiguate(List, boolean, IntExtractor, javax.persistence.EntityManager) * @return the disambiguation result or null on error */ <T> List<DisambiguationReport<T>> disambiguate(List<T> results, IntExtractor<? super T> resourceIdExtractor, @@ -557,4 +548,7 @@ public interface ResourceManagerLocal { List<Integer> disableResources(Subject subject, int[] resourceIds);
List<Integer> enableResources(Subject subject, int[] resourceIds); -} \ No newline at end of file + + PageList<Resource> findGroupMemberCandidateResources(Subject subject, ResourceCriteria criteria, + int[] alreadySelectedResourceIds); +}
commit c19b6ea19c342474b0cc43733fc8eec5ae1c955f Author: Stefan Negrea snegrea@redhat.com Date: Thu Jul 4 02:47:49 2013 -0500
Update the way the percentage disk space use is calculated. It is now based on the aggregate usage of all the partitions that have data files. Also, this is based on the total disk use, not just Cassandra. Added an alert based on this metric.
Added a new metric to the storage node composite that aggregates all the metrics that refer to storage related to Cassandra: data files, commit logs, row cache, and key cache. This will be used for CLI an web only, it will not be a plugin metric.
Updated the plugin descriptor to collect metrics related to disk space usage.
Minor other code refactorings and updates.
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNodeLoadComposite.java b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNodeLoadComposite.java index 379eba6..0913f1d 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNodeLoadComposite.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNodeLoadComposite.java @@ -36,19 +36,20 @@ public class StorageNodeLoadComposite implements Serializable { private StorageNode storageNode; private long beginTime; private long endTime; - + private MeasurementAggregateWithUnits heapCommitted; private MeasurementAggregateWithUnits heapUsed; private MeasurementAggregateWithUnits heapPercentageUsed; private MeasurementAggregateWithUnits load; - private MeasurementAggregateWithUnits diskSpacePercentageUsed; - private MeasurementAggregate tokens; + private MeasurementAggregateWithUnits partitionDiskUsedPercentage; + private MeasurementAggregateWithUnits dataDiskUsed; + private MeasurementAggregate tokens; private MeasurementAggregateWithUnits actuallyOwns;
public StorageNodeLoadComposite() { // GWT needs this } - + public StorageNodeLoadComposite(StorageNode storageNode, long beginTime, long endTime) { this.storageNode = storageNode; this.beginTime = beginTime; @@ -114,15 +115,26 @@ public class StorageNodeLoadComposite implements Serializable { }
/** - * @return A computed metric for the space used on disk by all SSTables of all column families expressed as a - * percentage. + * @return A computed metric for the percentage of disk space used on the partition that contains the SSTables. + * If multiple data locations are configured then the partition with the highest utilization will be reported. */ - public MeasurementAggregateWithUnits getDiskSpacePercentageUsed() { - return diskSpacePercentageUsed; + public MeasurementAggregateWithUnits getPartitionDiskUsedPercentage() { + return partitionDiskUsedPercentage; }
- public void setDiskSpacePercentageUsed(MeasurementAggregateWithUnits diskSpacePercentageUsed) { - this.diskSpacePercentageUsed = diskSpacePercentageUsed; + public void setPartitionDiskUsedPercentage(MeasurementAggregateWithUnits partitionDiskUsedPercentage) { + this.partitionDiskUsedPercentage = partitionDiskUsedPercentage; + } + + /** + * @return A computed metric for the space used on disk by all data files, commit logs, and saved caches. + */ + public MeasurementAggregateWithUnits getDataDiskUsed() { + return dataDiskUsed; + } + + public void setDataDiskUsed(MeasurementAggregateWithUnits dataDiskUsed) { + this.dataDiskUsed = dataDiskUsed; }
/** @@ -161,38 +173,32 @@ public class StorageNodeLoadComposite implements Serializable { this.actuallyOwns = actuallyOwns; }
+ /* (non-Javadoc) + * @see java.lang.Object#toString() + */ public String toString() { StringBuilder builder = new StringBuilder(); - // gwt doesn't support String.format -// builder.append("average values for last "); -// builder.append((endtime - begintime) / (1000 * 60 * 60)); -// builder.append(" hours"); -// builder.append("\naddress load tokens owns (effective)\n"); -// builder.append(string.format("%15s", storagenode.getaddress())); -// builder.append(string.format("%11s", load.getaggregate().getavg())).append(" ").append(load.getunits().getname()); -// builder.append(string.format("%8s", tokens.getavg())); -// builder.append(string.format("%16s", actuallyowns.getavg())); - builder.append("storageNode.addresss=").append(storageNode.getAddress()).append(", "); builder.append("beginTime=").append(beginTime).append(", "); builder.append("heapCommitted=").append(heapCommitted).append(", "); builder.append("heapUsed=").append(heapUsed).append(", "); builder.append("heapPercentageUsed=").append(heapPercentageUsed).append(", "); builder.append("load=").append(load).append(", "); - builder.append("diskSpacePercentageUsed=").append(diskSpacePercentageUsed).append(", "); + builder.append("partitionDiskUsedPercentage=").append(partitionDiskUsedPercentage).append(", "); + builder.append("dataDiskUsed=").append(dataDiskUsed).append(", "); builder.append("tokens=").append(tokens).append(", "); builder.append("actuallyOwns=").append(actuallyOwns); return builder.toString(); }
- + public static class MeasurementAggregateWithUnits implements Serializable { private static final long serialVersionUID = 1L; - + private MeasurementAggregate aggregate; private MeasurementUnits units; private String formattedValue; - + public MeasurementAggregateWithUnits() { // GWT needs this } @@ -209,7 +215,7 @@ public class StorageNodeLoadComposite implements Serializable { public MeasurementUnits getUnits() { return units; } - + public void setFormattedValue(String formattedValue) { this.formattedValue = formattedValue; } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java index f267602..7d413fd 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java @@ -61,7 +61,7 @@ import org.rhq.enterprise.server.measurement.util.MeasurementUtils;
/** * Datasource for @see StorageNode. - * + * * @author Jirka Kremser */ public class StorageNodeDatasource extends RPCDataSource<StorageNode, StorageNodeCriteria> { @@ -104,7 +104,7 @@ public class StorageNodeDatasource extends RPCDataSource<StorageNode, StorageNod ListGridField createdTimeField = FIELD_CTIME.getListGridField("120"); TimestampCellFormatter.prepareDateField(createdTimeField); fields.add(createdTimeField); - + ListGridField lastUpdateTimeField = FIELD_MTIME.getListGridField("120"); TimestampCellFormatter.prepareDateField(lastUpdateTimeField); fields.add(lastUpdateTimeField); @@ -142,7 +142,7 @@ public class StorageNodeDatasource extends RPCDataSource<StorageNode, StorageNod */ @Override protected PageControl getPageControl(DSRequest request) { - // Initialize paging. + // Initialize paging. PageControl pageControl = new PageControl(0, getDataPageSize());
// Initialize sorting. @@ -197,7 +197,7 @@ public class StorageNodeDatasource extends RPCDataSource<StorageNode, StorageNod
return criteria; } - + public static class StorageNodeLoadCompositeDatasource extends RPCDataSource<StorageNodeLoadComposite, StorageNodeCriteria> { public static final String HEAP_PERCENTAGE_KEY = "heapPercentage"; public static final String DISK_SPACE_PERCENTAGE_KEY = "diskSpacePercentage"; @@ -205,12 +205,12 @@ public class StorageNodeDatasource extends RPCDataSource<StorageNode, StorageNod
public static StorageNodeLoadCompositeDatasource getInstance(int id) { // if (instance == null) { -// instance = + // instance = return new StorageNodeLoadCompositeDatasource(id); // } // return instance; } - + public StorageNodeLoadCompositeDatasource(int id) { super(); this.id = id; @@ -235,7 +235,7 @@ public class StorageNodeDatasource extends RPCDataSource<StorageNode, StorageNod fields.add(idField); return fields; } - + public List<ListGridField> getListGridFields() { List<ListGridField> fields = new ArrayList<ListGridField>(); ListGridField idField = FIELD_ID.getListGridField(); @@ -259,8 +259,7 @@ public class StorageNodeDatasource extends RPCDataSource<StorageNode, StorageNod fields.add(hoverField); return fields; } - - + @Override protected void executeFetch(final DSRequest request, final DSResponse response, StorageNodeCriteria criteria) { // Integer id = getFilter(request, FIELD_ID.propertyName(), Integer.class); @@ -286,7 +285,6 @@ public class StorageNodeDatasource extends RPCDataSource<StorageNode, StorageNod
private ListGridRecord[] makeListGridRecords(StorageNodeLoadComposite loadComposite) { List<ListGridRecord> recordsList = new ArrayList<ListGridRecord>(6); - @SuppressWarnings("unchecked") List<List<Object>> loadFields = Arrays .<List<Object>> asList( Arrays.<Object> asList(loadComposite.getHeapCommitted(), "Heap Maximum", @@ -298,10 +296,16 @@ public class StorageNodeDatasource extends RPCDataSource<StorageNode, StorageNod "This value is calculated by dividing Heap Used by Heap Maximum.", HEAP_PERCENTAGE_KEY), Arrays.<Object> asList(loadComposite.getLoad(), "Load", "Data stored on the node", "load"), Arrays.<Object> asList( - loadComposite.getDiskSpacePercentageUsed(), + loadComposite.getPartitionDiskUsedPercentage(), "Disk Space Percent Used", - "How much of diskspace is already used. This takes into account the installation path, where Cassandra was installed.", - DISK_SPACE_PERCENTAGE_KEY), Arrays.<Object> asList(loadComposite.getActuallyOwns(), + "Percentage of total disk space used for the partition that contains the data files.If multiple data locations are specified then this will report the average utilization accross all the partitions.", + DISK_SPACE_PERCENTAGE_KEY), + Arrays.<Object> asList( + loadComposite.getDataDiskUsed(), + "Total Disk Space Used", + "Total space used on disk by all data files, commit logs, and saved caches.", + "totaldisk"), + Arrays.<Object> asList(loadComposite.getActuallyOwns(), "Ownership", "Refers to the percentage of keys that a node owns.", "ownership")); for (List<Object> aggregateWithUnitsList : loadFields) { if (aggregateWithUnitsList.get(0) != null) { @@ -346,14 +350,13 @@ public class StorageNodeDatasource extends RPCDataSource<StorageNode, StorageNod record.setAttribute("hover", hover); return record; } - - + @Override protected StorageNodeCriteria getFetchCriteria(DSRequest request) { return new StorageNodeCriteria(); // throw new UnsupportedOperationException("StorageNodeDatasource.StorageNodeLoadCompositeDatasource.getFetchCriteria()"); } - + @Override public StorageNodeLoadComposite copyValues(Record from) { throw new UnsupportedOperationException("StorageNodeDatasource.StorageNodeLoadCompositeDatasource.copyValues(Record from)"); @@ -363,6 +366,6 @@ public class StorageNodeDatasource extends RPCDataSource<StorageNode, StorageNod public ListGridRecord copyValues(StorageNodeLoadComposite from) { throw new UnsupportedOperationException("StorageNodeDatasource.StorageNodeLoadCompositeDatasource.copyValues(StorageNodeLoadComposite from)"); } - + } } diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index b21946a..b76d054 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -332,71 +332,96 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN @Override @RequiredPermission(Permission.MANAGE_SETTINGS) public StorageNodeLoadComposite getLoad(Subject subject, StorageNode node, long beginTime, long endTime) { - StorageNodeLoadComposite result = new StorageNodeLoadComposite(node, beginTime, endTime); - final String tokensMetric = "Tokens", ownershipMetric = "Ownership", loadMetric = "Load", diskUsedPercentageMetric = "Calculated.DiskSpaceUsedPercentage"; - final String heapCommittedMetric = "{HeapMemoryUsage.committed}", heapUsedMetric = "{HeapMemoryUsage.used}", heapUsedPercentageMetric = "Calculated.HeapUsagePercentage"; - int resourceId = getResourceIdFromStorageNode(node); + Map<String, Integer> scheduleIdsMap = new HashMap<String, Integer>();
// get the schedule ids for Storage Service resource + final String tokensMetric = "Tokens", ownershipMetric = "Ownership", diskUsedPercentageMetric = "Calculated.PartitionDiskUsedPercentage"; + final String loadMetric = "Load", keyCacheSize = "KeyCacheSize", rowCacheSize = "RowCacheSize", totalCommitLogSize = "TotalCommitlogSize"; TypedQuery<Object[]> query = entityManager.<Object[]> createNamedQuery( StorageNode.QUERY_FIND_SCHEDULE_IDS_BY_PARENT_RESOURCE_ID_AND_MEASUREMENT_DEFINITION_NAMES, Object[].class); query.setParameter("parrentId", resourceId).setParameter("metricNames", - Arrays.asList(tokensMetric, ownershipMetric, loadMetric, diskUsedPercentageMetric)); - List<Object[]> scheduleIds = query.getResultList(); - Map<String, Integer> scheduleIdsMap = new HashMap<String, Integer>(4); - for (Object[] pair : scheduleIds) { + Arrays.asList(tokensMetric, ownershipMetric, diskUsedPercentageMetric, loadMetric, keyCacheSize, + rowCacheSize, totalCommitLogSize)); + for (Object[] pair : query.getResultList()) { scheduleIdsMap.put((String) pair[0], (Integer) pair[1]); }
// get the schedule ids for Memory Subsystem resource + final String heapCommittedMetric = "{HeapMemoryUsage.committed}", heapUsedMetric = "{HeapMemoryUsage.used}", heapUsedPercentageMetric = "Calculated.HeapUsagePercentage"; query = entityManager.<Object[]> createNamedQuery( StorageNode.QUERY_FIND_SCHEDULE_IDS_BY_GRANDPARENT_RESOURCE_ID_AND_MEASUREMENT_DEFINITION_NAMES, Object[].class); query.setParameter("grandparrentId", resourceId).setParameter("metricNames", Arrays.asList(heapCommittedMetric, heapUsedMetric, heapUsedPercentageMetric)); - scheduleIds = query.getResultList(); - for (Object[] pair : scheduleIds) { + for (Object[] pair : query.getResultList()) { scheduleIdsMap.put((String) pair[0], (Integer) pair[1]); }
+ + StorageNodeLoadComposite result = new StorageNodeLoadComposite(node, beginTime, endTime); + MeasurementAggregate totalDiskUsedaggregate = new MeasurementAggregate(0d, 0d, 0d); + Integer scheduleId = null; + // find the aggregates and enrich the result instance if (!scheduleIdsMap.isEmpty()) { - if (scheduleIdsMap.get(tokensMetric) != null) { - MeasurementAggregate tokensAggregate = measurementManager.getAggregate(subject, - scheduleIdsMap.get(tokensMetric), beginTime, endTime); + if ((scheduleId = scheduleIdsMap.get(tokensMetric)) != null) { + MeasurementAggregate tokensAggregate = measurementManager.getAggregate(subject, scheduleId, beginTime, + endTime); result.setTokens(tokensAggregate); } - if (scheduleIdsMap.get(ownershipMetric) != null) { + if ((scheduleId = scheduleIdsMap.get(ownershipMetric)) != null) { StorageNodeLoadComposite.MeasurementAggregateWithUnits ownershipAggregateWithUnits = getMeasurementAggregateWithUnits( - subject, scheduleIdsMap.get(ownershipMetric), MeasurementUnits.PERCENTAGE, beginTime, endTime); + subject, scheduleId, MeasurementUnits.PERCENTAGE, beginTime, endTime); result.setActuallyOwns(ownershipAggregateWithUnits); } - if (scheduleIdsMap.get(loadMetric) != null) { + if ((scheduleId = scheduleIdsMap.get(diskUsedPercentageMetric)) != null) { + StorageNodeLoadComposite.MeasurementAggregateWithUnits diskUsedPercentageAggregateWithUnits = getMeasurementAggregateWithUnits( + subject, scheduleId, MeasurementUnits.PERCENTAGE, beginTime, endTime); + result.setPartitionDiskUsedPercentage(diskUsedPercentageAggregateWithUnits); + } + + if ((scheduleId = scheduleIdsMap.get(loadMetric)) != null) { StorageNodeLoadComposite.MeasurementAggregateWithUnits loadAggregateWithUnits = getMeasurementAggregateWithUnits( - subject, scheduleIdsMap.get(loadMetric), MeasurementUnits.BYTES, beginTime, endTime); + subject, scheduleId, MeasurementUnits.BYTES, beginTime, endTime); result.setLoad(loadAggregateWithUnits); + + updateAggregateTotal(totalDiskUsedaggregate, loadAggregateWithUnits.getAggregate()); } - if (scheduleIdsMap.get(diskUsedPercentageMetric) != null) { - StorageNodeLoadComposite.MeasurementAggregateWithUnits diskUsedPercentageAggregateWithUnits = getMeasurementAggregateWithUnits( - subject, scheduleIdsMap.get(diskUsedPercentageMetric), MeasurementUnits.PERCENTAGE, beginTime, - endTime); - result.setDiskSpacePercentageUsed(diskUsedPercentageAggregateWithUnits); + if ((scheduleId = scheduleIdsMap.get(keyCacheSize)) != null) { + updateAggregateTotal(totalDiskUsedaggregate, + measurementManager.getAggregate(subject, scheduleId, beginTime, endTime)); + } + if ((scheduleId = scheduleIdsMap.get(rowCacheSize)) != null) { + updateAggregateTotal(totalDiskUsedaggregate, + measurementManager.getAggregate(subject, scheduleId, beginTime, endTime)); + } + if ((scheduleId = scheduleIdsMap.get(totalCommitLogSize)) != null) { + updateAggregateTotal(totalDiskUsedaggregate, + measurementManager.getAggregate(subject, scheduleId, beginTime, endTime)); + } + + if (totalDiskUsedaggregate.getMax() > 0) { + StorageNodeLoadComposite.MeasurementAggregateWithUnits totalDiskUsedAggregateWithUnits = new StorageNodeLoadComposite.MeasurementAggregateWithUnits( + totalDiskUsedaggregate, MeasurementUnits.BYTES); + totalDiskUsedAggregateWithUnits.setFormattedValue(getSummaryString(totalDiskUsedaggregate, + MeasurementUnits.BYTES)); + result.setDataDiskUsed(totalDiskUsedAggregateWithUnits); }
- if (scheduleIdsMap.get(heapCommittedMetric) != null) { + if ((scheduleId = scheduleIdsMap.get(heapCommittedMetric)) != null) { StorageNodeLoadComposite.MeasurementAggregateWithUnits heapCommittedAggregateWithUnits = getMeasurementAggregateWithUnits( - subject, scheduleIdsMap.get(heapCommittedMetric), MeasurementUnits.BYTES, beginTime, endTime); + subject, scheduleId, MeasurementUnits.BYTES, beginTime, endTime); result.setHeapCommitted(heapCommittedAggregateWithUnits); } - if (scheduleIdsMap.get(heapUsedMetric) != null) { + if ((scheduleId = scheduleIdsMap.get(heapUsedMetric)) != null) { StorageNodeLoadComposite.MeasurementAggregateWithUnits heapUsedAggregateWithUnits = getMeasurementAggregateWithUnits( - subject, scheduleIdsMap.get(heapUsedMetric), MeasurementUnits.BYTES, beginTime, endTime); + subject, scheduleId, MeasurementUnits.BYTES, beginTime, endTime); result.setHeapUsed(heapUsedAggregateWithUnits); } - if (scheduleIdsMap.get(heapUsedPercentageMetric) != null) { + if ((scheduleId = scheduleIdsMap.get(heapUsedPercentageMetric)) != null) { StorageNodeLoadComposite.MeasurementAggregateWithUnits heapUsedPercentageAggregateWithUnits = getMeasurementAggregateWithUnits( - subject, scheduleIdsMap.get(heapUsedPercentageMetric), MeasurementUnits.PERCENTAGE, beginTime, + subject, scheduleId, MeasurementUnits.PERCENTAGE, beginTime, endTime); result.setHeapPercentageUsed(heapUsedPercentageAggregateWithUnits); } @@ -405,13 +430,28 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN return result; }
+ /** + * @param accumulator + * @param input + */ + private void updateAggregateTotal(MeasurementAggregate accumulator, MeasurementAggregate input) { + if (accumulator != null && input != null + && input.getMax() != null && !Double.isNaN(input.getMax()) + && input.getMin() != null && !Double.isNaN(input.getMin()) + && input.getAvg() != null && !Double.isNaN(input.getAvg())) { + accumulator.setAvg(accumulator.getAvg() + input.getAvg()); + accumulator.setMax(accumulator.getMax() + input.getMax()); + accumulator.setMin(accumulator.getMin() + input.getMin()); + } + } + @Override public List<StorageNode> getStorageNodes() { TypedQuery<StorageNode> query = entityManager.<StorageNode> createNamedQuery(StorageNode.QUERY_FIND_ALL, StorageNode.class); return query.getResultList(); } - + @Override @RequiredPermission(Permission.MANAGE_SETTINGS) public PageList<StorageNode> findStorageNodesByCriteria(Subject subject, StorageNodeCriteria criteria) { @@ -432,7 +472,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN Server server = serverManager.getServer(); // setting the server mode to maintenance topologyManager.updateServerMode(subject, new Integer[] { server.getId() }, Server.OperationMode.MAINTENANCE); - + Configuration parameters = new Configuration(); parameters.setSimpleValue("snapshotName", String.valueOf(System.currentTimeMillis())); // scheduling the operation @@ -543,7 +583,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN } } } - + private StorageNodeLoadComposite.MeasurementAggregateWithUnits getMeasurementAggregateWithUnits(Subject subject, int schedId, MeasurementUnits units, long beginTime, long endTime) { MeasurementAggregate measurementAggregate = measurementManager.getAggregate(subject, schedId, beginTime, @@ -553,7 +593,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN measurementAggregateWithUnits.setFormattedValue(getSummaryString(measurementAggregate, units)); return measurementAggregateWithUnits; } - + private int getResourceIdFromStorageNode(StorageNode storageNode) { int resourceId; if (storageNode.getResource() == null) { diff --git a/modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java b/modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java index 566dfe2..1cff654 100644 --- a/modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java +++ b/modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java @@ -54,15 +54,19 @@ import org.rhq.enterprise.server.util.LookupUtil;
/** * An alert definition server-side plugin component that the server uses to inject "factory-installed" alert definitions. - * + * * @author Jay Shaughnessy */ public class AlertDefinitionServerPluginComponent implements ServerPluginComponent, ControlFacet {
private final Log log = LogFactory.getLog(AlertDefinitionServerPluginComponent.class);
+ private static final String PARTITION_DISK_USED_PERCENTAGE_METRIC_NAME = "Calculated.PartitionDiskUsedPercentage"; + private static final String DATA_FILE_LOCATIONS_NAME = "AllDataFileLocations"; + static private final List<InjectedTemplate> injectedTemplates; static private final InjectedTemplate storageNodeHighHeapTemplate; + static private final InjectedTemplate storageNodeHighDiskUsageTemplate;
static { storageNodeHighHeapTemplate = new InjectedTemplate( @@ -71,8 +75,15 @@ public class AlertDefinitionServerPluginComponent implements ServerPluginCompone "StorageNodeHighHeapTemplate", // "An alert template to notify users of excessive heap use by an RHQ Storage Node. When fired please see documentation for the proper corrective action.");
+ storageNodeHighDiskUsageTemplate = new InjectedTemplate( + "RHQStorage", // + "StorageService", // + "StorageNodeHighDiskUsageTemplate", // + "An alert template to notify users of excessive heap use by an RHQ Storage Node. When fired please see documentation for the proper corrective action."); + injectedTemplates = new ArrayList<InjectedTemplate>(); injectedTemplates.add(storageNodeHighHeapTemplate); + injectedTemplates.add(storageNodeHighDiskUsageTemplate); }
private ServerPluginContext context; @@ -213,6 +224,8 @@ public class AlertDefinitionServerPluginComponent implements ServerPluginCompone
if (storageNodeHighHeapTemplate.equals(injectedAlertDef)) { newAlertDefId = injectStorageNodeHighHeapTemplate(resourceType); + } else if (storageNodeHighDiskUsageTemplate.equals(injectedAlertDef)) { + newAlertDefId = injectStorageNodeHighDiskUsageTemplate(resourceType); }
adc.addFilterId(newAlertDefId); @@ -273,6 +286,57 @@ public class AlertDefinitionServerPluginComponent implements ServerPluginCompone return newTemplateId; }
+ private int injectStorageNodeHighDiskUsageTemplate(ResourceType resourceType) { + AlertTemplateManagerLocal alertTemplateManager = LookupUtil.getAlertTemplateManager(); + SubjectManagerLocal subjectManager = LookupUtil.getSubjectManager(); + + AlertDefinition newTemplate = new AlertDefinition(); + newTemplate.setName(storageNodeHighDiskUsageTemplate.getName()); + newTemplate.setResourceType(resourceType); + newTemplate.setPriority(AlertPriority.MEDIUM); + newTemplate.setConditionExpression(BooleanExpression.ANY); + newTemplate.setRecoveryId(0); + newTemplate.setEnabled(true); + + AlertCondition ac = new AlertCondition(); + ac.setCategory(AlertConditionCategory.THRESHOLD); + ac.setComparator(">"); + ac.setThreshold(0.75D); + + List<Integer> measurementDefinitionIds = new ArrayList<Integer>(1); + for (MeasurementDefinition d : resourceType.getMetricDefinitions()) { + if (PARTITION_DISK_USED_PERCENTAGE_METRIC_NAME.equals(d.getName())) { + measurementDefinitionIds.add(d.getId()); + ac.setMeasurementDefinition(d); + ac.setName(d.getDisplayName()); + } else if (DATA_FILE_LOCATIONS_NAME.equals(d.getName())) { + measurementDefinitionIds.add(d.getId()); + } + } + assert null != ac.getMeasurementDefinition() : "Did not find expected measurement definition " + + PARTITION_DISK_USED_PERCENTAGE_METRIC_NAME + " for " + + resourceType; + newTemplate.addCondition(ac); + + AlertDampening dampener = new AlertDampening(AlertDampening.Category.PARTIAL_COUNT); + dampener.setPeriod(15); + dampener.setPeriodUnits(TimeUnits.MINUTES); + dampener.setValue(10); + newTemplate.setAlertDampening(dampener); + + int newTemplateId = alertTemplateManager.createAlertTemplate(subjectManager.getOverlord(), newTemplate, + resourceType.getId()); + + // additionally, we want to ensure that the metric is enabled and collecting at a more frequent interval than + // is set by default. + MeasurementScheduleManagerLocal measurementManager = LookupUtil.getMeasurementScheduleManager(); + measurementManager.updateDefaultCollectionIntervalAndEnablementForMeasurementDefinitions( + subjectManager.getOverlord(), ArrayUtils.toPrimitive(measurementDefinitionIds.toArray(new Integer[2])), + 60000L, true, true); + + return newTemplateId; + } + private static class InjectedTemplate { static public final String FIELD_PLUGIN_NAME = "plugin"; static public final String FIELD_RESOURCE_TYPE_NAME = "type"; diff --git a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/StorageServiceComponent.java b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/StorageServiceComponent.java index b2987be..7281f9b 100644 --- a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/StorageServiceComponent.java +++ b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/StorageServiceComponent.java @@ -29,9 +29,10 @@ import static org.rhq.core.domain.measurement.AvailabilityType.DOWN; import static org.rhq.core.domain.measurement.AvailabilityType.UNKNOWN; import static org.rhq.core.domain.measurement.AvailabilityType.UP;
-import java.io.File; import java.net.InetAddress; import java.net.UnknownHostException; +import java.util.ArrayList; +import java.util.List; import java.util.Map; import java.util.Set;
@@ -48,19 +49,20 @@ import org.rhq.core.domain.measurement.MeasurementReport; import org.rhq.core.domain.measurement.MeasurementScheduleRequest; import org.rhq.core.pluginapi.inventory.ResourceContext; import org.rhq.core.pluginapi.operation.OperationResult; +import org.rhq.core.system.FileSystemInfo; import org.rhq.plugins.jmx.JMXComponent;
/** * @author John Sanda */ public class StorageServiceComponent extends ComplexConfigurationResourceComponent { - + private static final String OWNERSHIP_METRIC_NAME = "Ownership"; - private static final String DISK_USED_METRIC_NAME = "Calculated.DiskSpaceUsedPercentage"; + private static final String PARTITION_DISK_USED_PERCENTAGE_METRIC_NAME = "Calculated.PartitionDiskUsedPercentage"; private static final String DATA_FILE_LOCATIONS_NAME = "AllDataFileLocations"; private Log log = LogFactory.getLog(StorageServiceComponent.class); private InetAddress host; - + @Override public void start(ResourceContext<JMXComponent<?>> context) { super.start(context); @@ -73,7 +75,7 @@ public class StorageServiceComponent extends ComplexConfigurationResourceCompone e); } } - + @Override public AvailabilityType getAvailability() { ResourceContext<?> context = getResourceContext(); @@ -148,7 +150,7 @@ public class StorageServiceComponent extends ComplexConfigurationResourceCompone
return new OperationResult(); } - + @Override protected void getValues(MeasurementReport report, Set<MeasurementScheduleRequest> requests, EmsBean bean) { super.getValues(report, requests, bean); @@ -175,32 +177,43 @@ public class StorageServiceComponent extends ComplexConfigurationResourceCompone report.addData(new MeasurementDataNumeric(request, value.doubleValue())); } break; - } else if (DISK_USED_METRIC_NAME.equals(request.getName())) { + } else if (PARTITION_DISK_USED_PERCENTAGE_METRIC_NAME.equals(request.getName())) { EmsAttribute attribute = bean.getAttribute(DATA_FILE_LOCATIONS_NAME); Object valueObject = attribute.refresh(); if (valueObject instanceof String[]) { - String[] paths = (String[]) valueObject; - double max = 0; - for (String path : paths) { - double taken = getUsage(path); - if (taken > max) { - max = taken; - } - } - if (max > 0.0001d) { - report.addData(new MeasurementDataNumeric(request, max)); - } + //Please visit for details: https://issues.apache.org/jira/browse/CASSANDRA-2749 + //The average usage of all partitions with the data will be reported. + //Cassandra selects the partition with most free space for SStable flush and compaction. + report.addData(new MeasurementDataNumeric(request, + getPartitionDiskUsedPercentage((String[]) valueObject))); } } } } - - private double getUsage(String path) { - File f = new File(path); - return ((double)f.getTotalSpace() - f.getUsableSpace()) / f.getTotalSpace(); - } - - public static boolean kindOfIP(final String addr) { - return addr.matches("^\d{1,3}\.\d{1,3\.\d{1,3\.\d{1,3$"); // || addr.indexOf(":") >= 0) or IPv6 + + private double getPartitionDiskUsedPercentage(String[] paths) { + List<String> visitedMountPoints = new ArrayList<String>(); + long totalDiskSpace = 0; + long totalUsedDiskSpace = 0; + + for (String path : paths) { + try { + FileSystemInfo fileSystemInfo = this.getResourceContext().getSystemInformation().getFileSystem(path); + if (!visitedMountPoints.contains(fileSystemInfo.getMountPoint())) { + visitedMountPoints.add(fileSystemInfo.getMountPoint()); + totalDiskSpace += fileSystemInfo.getFileSystemUsage().getTotal(); + totalUsedDiskSpace += fileSystemInfo.getFileSystemUsage().getUsed(); + } + } catch (Exception e) { + log.error("Unable to determine file system usage information for data file location " + path, e); + } + } + + if (totalDiskSpace != 0) { + double rawPercentage = ((double) totalUsedDiskSpace) / ((double) totalDiskSpace); + return Math.round(rawPercentage * 100.0) / 100.0; + } + + return 0; } } diff --git a/modules/plugins/cassandra/src/main/resources/META-INF/rhq-plugin.xml b/modules/plugins/cassandra/src/main/resources/META-INF/rhq-plugin.xml index ebaef94..cac0a8d 100644 --- a/modules/plugins/cassandra/src/main/resources/META-INF/rhq-plugin.xml +++ b/modules/plugins/cassandra/src/main/resources/META-INF/rhq-plugin.xml @@ -183,7 +183,8 @@ </operation>
<metric property="CurrentGenerationNumber" dataType="trait" displayType="summary" description="Current generation number"/> - <metric property="Calculated.DiskSpaceUsedPercentage" dataType="measurement" units="percentage" displayType="summary" description="How much of diskspace is already used. This takes into account the installation path, where Cassandra was installed."/> + <metric property="Calculated.PartitionDiskUsedPercentage" dataType="measurement" units="percentage" displayType="summary" description="Percentage of total disk space used for the partition that contains the data files. + If multiple data locations are specified then this will report the average utilization accross all the partitions."/> <metric property="ExceptionCount" measurementType="trendsup" dataType="measurement" displayType="summary" description="Exception Count"/> <metric property="Initialized" dataType="trait" displayType="summary" description="Initialized"/> <metric property="Joined" dataType="trait" displayType="summary" description="Joined"/> @@ -272,7 +273,7 @@ <c:simple-property name="nameTemplate" readOnly="true" type="string" default="Commit Log" /> </plugin-configuration>
- <metric property="TotalCommitlogSize" measurementType="trendsup" dataType="measurement" displayType="summary" description="Size of all commit log segments"/> + <metric property="TotalCommitlogSize" measurementType="dynamic" displayType="summary" description="Size of all commit log segments"/> <metric property="PendingTasks" measurementType="dynamic" displayType="summary" description="Number of tasks waiting to be executed"/> <metric property="CompletedTasks" measurementType="trendsup" displayType="summary" description="Number of completed tasks"/> </service> @@ -393,8 +394,8 @@ <metric property="KeyCacheCapacityInBytes" measurementType="dynamic" displayType="summary" description="Key cache capacity in bytes"/> <metric property="RowCacheCapacityInBytes" measurementType="dynamic" displayType="summary" description="Row cache capacity in bytes"/>
- <metric property="KeyCacheSize" measurementType="dynamic" displayType="detail" description="Key cache size"/> - <metric property="RowCacheSize" measurementType="dynamic" displayType="detail" description="Row cache size"/> + <metric property="KeyCacheSize" measurementType="dynamic" displayType="summary" description="Key cache size"/> + <metric property="RowCacheSize" measurementType="dynamic" displayType="summary" description="Row cache size"/>
<resource-configuration> <c:simple-property name="KeyCacheCapacityInMB" type="long" required="true" displayName="Key Cache Capacity In MB" description="Key cache capacity in MB"/>
commit dc028de9772aa70f64b7e817c38fd2f9a64d5135 Author: John Sanda jsanda@redhat.com Date: Thu Jul 4 07:29:32 2013 -0400
fix failing test
Now that StorageNodeManagerBean creates the storage node resource group, the group has to be deleted during test runs.
diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBeanTest.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBeanTest.java index d45ae90..d44d637 100644 --- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBeanTest.java +++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBeanTest.java @@ -25,6 +25,10 @@
package org.rhq.enterprise.server.cloud;
+import static org.rhq.enterprise.server.cloud.StorageNodeManagerBean.STORAGE_NODE_GROUP_NAME; +import static org.rhq.enterprise.server.cloud.StorageNodeManagerBean.STORAGE_NODE_PLUGIN_NAME; +import static org.rhq.enterprise.server.cloud.StorageNodeManagerBean.STORAGE_NODE_RESOURCE_TYPE_NAME; + import java.util.ArrayList; import java.util.Arrays; import java.util.HashSet; @@ -32,27 +36,30 @@ import java.util.List; import java.util.Set; import java.util.UUID;
+import javax.ejb.EJB; import javax.persistence.Query; import javax.transaction.Transaction;
import org.testng.Assert; import org.testng.annotations.Test;
-import org.rhq.core.domain.auth.Subject; import org.rhq.core.domain.cloud.StorageNode; import org.rhq.core.domain.configuration.definition.ConfigurationDefinition; import org.rhq.core.domain.configuration.definition.PropertyDefinitionSimple; import org.rhq.core.domain.configuration.definition.PropertySimpleType; +import org.rhq.core.domain.criteria.ResourceGroupCriteria; import org.rhq.core.domain.criteria.StorageNodeCriteria; import org.rhq.core.domain.resource.Resource; import org.rhq.core.domain.resource.ResourceCategory; import org.rhq.core.domain.resource.ResourceType; +import org.rhq.core.domain.resource.group.ResourceGroup; import org.rhq.core.domain.util.PageList; import org.rhq.core.domain.util.PageOrdering; +import org.rhq.enterprise.server.auth.SubjectManagerLocal; import org.rhq.enterprise.server.resource.ResourceTypeManagerLocal; +import org.rhq.enterprise.server.resource.group.ResourceGroupManagerLocal; import org.rhq.enterprise.server.test.AbstractEJB3Test; import org.rhq.enterprise.server.test.TransactionCallback; -import org.rhq.enterprise.server.util.LookupUtil;
/** * @author Jirka Kremser @@ -60,17 +67,19 @@ import org.rhq.enterprise.server.util.LookupUtil; @Test public class StorageNodeManagerBeanTest extends AbstractEJB3Test {
+ @EJB private StorageNodeManagerLocal nodeManager; + + @EJB private ResourceTypeManagerLocal typeManager; - private Subject overlord; - private static final String TEST_PREFIX = "test-";
- @Override - protected void beforeMethod() throws Exception { - nodeManager = LookupUtil.getStorageNodeManager(); - typeManager = LookupUtil.getResourceTypeManager(); - overlord = LookupUtil.getSubjectManager().getOverlord(); - } + @EJB + private ResourceGroupManagerLocal resourceGroupManager; + + @EJB + private SubjectManagerLocal subjectManager; + + private static final String TEST_PREFIX = "test-";
@Test public void testInit() throws Exception { @@ -79,6 +88,7 @@ public class StorageNodeManagerBeanTest extends AbstractEJB3Test {
try { prepareScheduler(); + cleanDatabase(); executeInTransaction(new TransactionCallback() {
@Override @@ -89,8 +99,6 @@ public class StorageNodeManagerBeanTest extends AbstractEJB3Test { System.setProperty(cassandraSeedsProperty, addresses.get(0) + "|123|123," + addresses.get(1) + "|987|987," + addresses.get(2) + "|123|123");
- cleanDatabase(); - // create the resource type if it doesn't exist ResourceType testResourceType = typeManager.getResourceTypeByNameAndPlugin("RHQ Storage Node", "RHQStorage"); @@ -175,7 +183,8 @@ public class StorageNodeManagerBeanTest extends AbstractEJB3Test { criteria.addFilterAddress(prefix); // use DESC just to make sure sorting on name is different than insert order criteria.addSortAddress(PageOrdering.DESC); - PageList<StorageNode> list = nodeManager.findStorageNodesByCriteria(overlord, criteria); + PageList<StorageNode> list = nodeManager.findStorageNodesByCriteria(subjectManager.getOverlord(), + criteria);
assertTrue("The number of found storage nodes should be " + storageNodeCount + ". Was: " + list.size(), storageNodeCount == list.size()); @@ -205,6 +214,26 @@ public class StorageNodeManagerBeanTest extends AbstractEJB3Test { // this method is still needed, because tests calls SLSB methods that are executed in their own transaction // and the rollback performed once the TransactionCallback is finished just wont clean everything
+ // We can only filter on the group name because the resource type info might not exist in the test + // database. + ResourceGroupCriteria criteria = new ResourceGroupCriteria(); + criteria.addFilterName(STORAGE_NODE_GROUP_NAME); + + List<ResourceGroup> groups = resourceGroupManager.findResourceGroupsByCriteria(subjectManager.getOverlord(), + criteria); + + if (!groups.isEmpty()) { + resourceGroupManager.deleteResourceGroup(subjectManager.getOverlord(), groups.get(0).getId()); + } + +// for (ResourceGroup group : groups) { +// if (group.getName().equals(STORAGE_NODE_GROUP_NAME)) { +// resourceGroupManager.deleteResourceGroup(subjectManager.getOverlord(), group.getId()); +// break; +// } +// } + + // pause the currently running TX Transaction runningTransaction = getTransactionManager().suspend(); getTransactionManager().begin(); diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index 874be1e..b21946a 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -97,11 +97,12 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
private static final String SEEDS_PROP = "rhq.cassandra.seeds";
- private static final String STORAGE_NODE_GROUP_NAME = "RHQ Storage Nodes"; + // The following have package visibility to make accessible to StorageNodeManagerBeanTest + static final String STORAGE_NODE_GROUP_NAME = "RHQ Storage Nodes";
- private static final String STORAGE_NODE_RESOURCE_TYPE_NAME = "RHQ Storage Node"; + static final String STORAGE_NODE_RESOURCE_TYPE_NAME = "RHQ Storage Node";
- private static final String STORAGE_NODE_PLUGIN_NAME = "RHQStorage"; + static final String STORAGE_NODE_PLUGIN_NAME = "RHQStorage";
@PersistenceContext(unitName = RHQConstants.PERSISTENCE_UNIT_NAME) private EntityManager entityManager;
commit 95a9f222e3d047fe335fee7c2d305c7ffe18518b Author: John Sanda jsanda@redhat.com Date: Wed Jul 3 14:54:03 2013 -0400
big refactoring of the work that is done when a new storage node is committed to inventory
Here is a run down of the changes.
* fix endpoint comparisons There was a bug in StorageModeMaintenanceJob.waitForClustering where it was comparing the string form of an ip address against a PropertySimple.toString. It should be comparing the value of the property.
* schedule the addNodeMaintenance op as a group operation StorageNodeMaintenanceJob now schedules the operations for each storage node as a group operation instead of as individual operations. This allows us to remove a lot of code around waiting for operations to complete before scheduling the next one. More importantly, the previous implementation was not blocking until each operation completed which resulted in repair operations running across multiple nodes simultenously. We definitely want to run repair on the nodes serially. Scheduling the work as a group operation handles that for us.
Scheduling the work as a group operation required a slight change to StorageNodeManagerBean.linkResource. The resource has to be added to the group before the quartz job is scheduled. Logic needs to be added to verify that the node has actually joined to cluster before adding it to the group.
* add logic to detect when repair needs to run There is logic in place now to determine whether or not repair needs to run. Previously, we would run repair against each node whenever StorageNodeMaintenanceJob would run. We only want to run repair if and when we have to since it is a very resource intensive operation.
* Add logic back to update replication_factor of system_auth keyspace I had previously changed the logic in TopologyManager to *not* update the RF of the system_auth keyspace. For an multi-node installation, we would increase the RF of system_auth because the change was made after the rhqadmin user was created. Without running repair this results in inconsistent reads which in turns leads to failed authentication. When StorageNodeMaintenanceJob runs we do want to update the RF of both the system_auth and rhq keysapces. I have refactored TopologyManager so that system_auth gets updated.
diff --git a/modules/common/cassandra-ccm/cassandra-ccm-arquillian/src/main/java/org/rhq/cassandra/ccm/arquillian/CCMSuiteDeploymentExtension.java b/modules/common/cassandra-ccm/cassandra-ccm-arquillian/src/main/java/org/rhq/cassandra/ccm/arquillian/CCMSuiteDeploymentExtension.java index d307f0b..a86c49e 100644 --- a/modules/common/cassandra-ccm/cassandra-ccm-arquillian/src/main/java/org/rhq/cassandra/ccm/arquillian/CCMSuiteDeploymentExtension.java +++ b/modules/common/cassandra-ccm/cassandra-ccm-arquillian/src/main/java/org/rhq/cassandra/ccm/arquillian/CCMSuiteDeploymentExtension.java @@ -159,7 +159,7 @@ public class CCMSuiteDeploymentExtension implements LoadableExtension { try { schemaManager.install(); clusterInitService.waitForSchemaAgreement(nodes); - schemaManager.updateTopology(); + schemaManager.updateTopology(true); } catch (Exception e) { if (null != ccm) { ccm.shutdownCluster(); diff --git a/modules/common/cassandra-ccm/cassandra-ccm-maven-plugin/src/main/java/org/rhq/cassandra/ccm/maven/DeployMojo.java b/modules/common/cassandra-ccm/cassandra-ccm-maven-plugin/src/main/java/org/rhq/cassandra/ccm/maven/DeployMojo.java index f50535c..b84018f 100644 --- a/modules/common/cassandra-ccm/cassandra-ccm-maven-plugin/src/main/java/org/rhq/cassandra/ccm/maven/DeployMojo.java +++ b/modules/common/cassandra-ccm/cassandra-ccm-maven-plugin/src/main/java/org/rhq/cassandra/ccm/maven/DeployMojo.java @@ -74,7 +74,7 @@ public class DeployMojo extends AbstractMojo {
try { schemaManager.install(); - schemaManager.updateTopology(); + schemaManager.updateTopology(true); } catch (Exception e) { throw new MojoExecutionException("Schema installation failed.", e); } diff --git a/modules/common/cassandra-ccm/cassandra-ccm-testng/src/main/java/org/rhq/cassandra/CCMTestNGListener.java b/modules/common/cassandra-ccm/cassandra-ccm-testng/src/main/java/org/rhq/cassandra/CCMTestNGListener.java index a9292f7..38d5337 100644 --- a/modules/common/cassandra-ccm/cassandra-ccm-testng/src/main/java/org/rhq/cassandra/CCMTestNGListener.java +++ b/modules/common/cassandra-ccm/cassandra-ccm-testng/src/main/java/org/rhq/cassandra/CCMTestNGListener.java @@ -129,7 +129,7 @@ public class CCMTestNGListener implements IInvokedMethodListener { if (annotation.waitForSchemaAgreement()) { clusterInitService.waitForSchemaAgreement(nodes); } - schemaManager.updateTopology(); + schemaManager.updateTopology(true); }
private void shutdownCluster() throws Exception { diff --git a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/SchemaManager.java b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/SchemaManager.java index 8f8c47e..8f67ab3 100644 --- a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/SchemaManager.java +++ b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/SchemaManager.java @@ -93,9 +93,9 @@ public class SchemaManager { version.drop(); }
- public boolean updateTopology() throws Exception { + public boolean updateTopology(boolean isNewSchema) throws Exception { TopologyManager topology = new TopologyManager(username, password, nodes); - return topology.updateTopology(); + return topology.updateTopology(isNewSchema); }
private static List<StorageNode> parseNodeInformation(String... nodes) { @@ -139,7 +139,7 @@ public class SchemaManager { } else if ("drop".equalsIgnoreCase(command)) { schemaManager.drop(); } else if ("topology".equalsIgnoreCase(command)) { - schemaManager.updateTopology(); + schemaManager.updateTopology(true); } else { throw new IllegalArgumentException(command + " not available."); } diff --git a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/TopologyManager.java b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/TopologyManager.java index 850c383..fd987a1 100644 --- a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/TopologyManager.java +++ b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/TopologyManager.java @@ -55,8 +55,12 @@ public class TopologyManager extends AbstractManager { this.file = file; }
- protected String getFile() { - return TOPOLOGY_BASE_FOLDER + "/" + this.file; + protected String getFile(boolean isNewSchema) { + if (isNewSchema) { + return TOPOLOGY_BASE_FOLDER + "/create/" + this.file; + } + + return TOPOLOGY_BASE_FOLDER + "/update/" + this.file; } }
@@ -64,14 +68,14 @@ public class TopologyManager extends AbstractManager { super(username, password, nodes); }
- public boolean updateTopology() throws Exception { + public boolean updateTopology(boolean isNewSchema) throws Exception { boolean result = false;
initCluster(); if (schemaExists()) { log.info("Applying topology updates..."); - result = this.updateReplicationFactor(nodes.size()); - this.updateGCGrace(nodes.size()); + result = this.updateReplicationFactor(isNewSchema, nodes.size()); + this.updateGCGrace(isNewSchema, nodes.size()); } else { log.info("Topology updates cannot be applied because the schema is not installed."); } @@ -80,7 +84,7 @@ public class TopologyManager extends AbstractManager { return result; }
- private boolean updateReplicationFactor(int numberOfNodes) throws Exception { + private boolean updateReplicationFactor(boolean isNewSchema, int numberOfNodes) throws Exception { log.info("Starting to execute " + Task.UpdateReplicationFactor + " task.");
int replicationFactor = 1; @@ -97,19 +101,19 @@ public class TopologyManager extends AbstractManager { return false; }
- log.info("Applying file " + Task.UpdateReplicationFactor.getFile() + " for " + Task.UpdateReplicationFactor - + " task."); - for (String query : this.getSteps(Task.UpdateReplicationFactor.getFile())) { + log.info("Applying file " + Task.UpdateReplicationFactor.getFile(isNewSchema) + " for " + + Task.UpdateReplicationFactor + " task."); + for (String query : this.getSteps(Task.UpdateReplicationFactor.getFile(isNewSchema))) { executedPreparedStatement(query, replicationFactor); } - log.info("File " + Task.UpdateReplicationFactor.getFile() + " applied for " + Task.UpdateReplicationFactor - + " task."); + log.info("File " + Task.UpdateReplicationFactor.getFile(isNewSchema) + " applied for " + + Task.UpdateReplicationFactor + " task.");
log.info("Successfully executed " + Task.UpdateReplicationFactor + " task."); return true; }
- private boolean updateGCGrace(int numberOfNodes) throws Exception { + private boolean updateGCGrace(boolean isNewSchema, int numberOfNodes) throws Exception { log.info("Starting to execute " + Task.UpdateGCGrace + " task.");
int gcGraceSeconds = 864000; @@ -120,11 +124,11 @@ public class TopologyManager extends AbstractManager { }
- log.info("Applying file " + Task.UpdateGCGrace.getFile() + " for " + Task.UpdateGCGrace + " task."); - for (String query : this.getSteps(Task.UpdateGCGrace.getFile())) { + log.info("Applying file " + Task.UpdateGCGrace.getFile(isNewSchema) + " for " + Task.UpdateGCGrace + " task."); + for (String query : this.getSteps(Task.UpdateGCGrace.getFile(isNewSchema))) { executedPreparedStatement(query, gcGraceSeconds); } - log.info("File " + Task.UpdateGCGrace.getFile() + " applied for " + Task.UpdateGCGrace + " task."); + log.info("File " + Task.UpdateGCGrace.getFile(isNewSchema) + " applied for " + Task.UpdateGCGrace + " task.");
log.info("Successfully executed " + Task.UpdateGCGrace + " task."); return true; diff --git a/modules/common/cassandra-schema/src/main/resources/topology/0001.xml b/modules/common/cassandra-schema/src/main/resources/topology/0001.xml deleted file mode 100644 index 5cbd7eb..0000000 --- a/modules/common/cassandra-schema/src/main/resources/topology/0001.xml +++ /dev/null @@ -1,5 +0,0 @@ -<updatePlan> - <step> - ALTER KEYSPACE rhq WITH replication = {'class': 'SimpleStrategy', 'replication_factor': %s}; - </step> -</updatePlan> \ No newline at end of file diff --git a/modules/common/cassandra-schema/src/main/resources/topology/0002.xml b/modules/common/cassandra-schema/src/main/resources/topology/0002.xml deleted file mode 100644 index d631030..0000000 --- a/modules/common/cassandra-schema/src/main/resources/topology/0002.xml +++ /dev/null @@ -1,26 +0,0 @@ -<updatePlan> - <step> - ALTER COLUMNFAMILY rhq.metrics_index WITH gc_grace_seconds = %s; - </step> - - <step> - ALTER COLUMNFAMILY rhq.raw_metrics WITH gc_grace_seconds = %s; - </step> - - <step> - ALTER COLUMNFAMILY rhq.one_hour_metrics WITH gc_grace_seconds = %s; - </step> - - <step> - ALTER COLUMNFAMILY rhq.six_hour_metrics WITH gc_grace_seconds = %s; - </step> - - <step> - ALTER COLUMNFAMILY rhq.twenty_four_hour_metrics WITH gc_grace_seconds = %s; - </step> - - <step> - ALTER COLUMNFAMILY rhq.schema_version WITH gc_grace_seconds = %s; - </step> - -</updatePlan> \ No newline at end of file diff --git a/modules/common/cassandra-schema/src/main/resources/topology/create/0001.xml b/modules/common/cassandra-schema/src/main/resources/topology/create/0001.xml new file mode 100644 index 0000000..5cbd7eb --- /dev/null +++ b/modules/common/cassandra-schema/src/main/resources/topology/create/0001.xml @@ -0,0 +1,5 @@ +<updatePlan> + <step> + ALTER KEYSPACE rhq WITH replication = {'class': 'SimpleStrategy', 'replication_factor': %s}; + </step> +</updatePlan> \ No newline at end of file diff --git a/modules/common/cassandra-schema/src/main/resources/topology/create/0002.xml b/modules/common/cassandra-schema/src/main/resources/topology/create/0002.xml new file mode 100644 index 0000000..d631030 --- /dev/null +++ b/modules/common/cassandra-schema/src/main/resources/topology/create/0002.xml @@ -0,0 +1,26 @@ +<updatePlan> + <step> + ALTER COLUMNFAMILY rhq.metrics_index WITH gc_grace_seconds = %s; + </step> + + <step> + ALTER COLUMNFAMILY rhq.raw_metrics WITH gc_grace_seconds = %s; + </step> + + <step> + ALTER COLUMNFAMILY rhq.one_hour_metrics WITH gc_grace_seconds = %s; + </step> + + <step> + ALTER COLUMNFAMILY rhq.six_hour_metrics WITH gc_grace_seconds = %s; + </step> + + <step> + ALTER COLUMNFAMILY rhq.twenty_four_hour_metrics WITH gc_grace_seconds = %s; + </step> + + <step> + ALTER COLUMNFAMILY rhq.schema_version WITH gc_grace_seconds = %s; + </step> + +</updatePlan> \ No newline at end of file diff --git a/modules/common/cassandra-schema/src/main/resources/topology/update/0001.xml b/modules/common/cassandra-schema/src/main/resources/topology/update/0001.xml new file mode 100644 index 0000000..f2c0e57 --- /dev/null +++ b/modules/common/cassandra-schema/src/main/resources/topology/update/0001.xml @@ -0,0 +1,9 @@ +<updatePlan> + <step> + ALTER KEYSPACE system_auth WITH replication = {'class': 'SimpleStrategy', 'replication_factor': %s}; + </step> + + <step> + ALTER KEYSPACE rhq WITH replication = {'class': 'SimpleStrategy', 'replication_factor': %s}; + </step> +</updatePlan> \ No newline at end of file diff --git a/modules/common/cassandra-schema/src/main/resources/topology/update/0002.xml b/modules/common/cassandra-schema/src/main/resources/topology/update/0002.xml new file mode 100644 index 0000000..d631030 --- /dev/null +++ b/modules/common/cassandra-schema/src/main/resources/topology/update/0002.xml @@ -0,0 +1,26 @@ +<updatePlan> + <step> + ALTER COLUMNFAMILY rhq.metrics_index WITH gc_grace_seconds = %s; + </step> + + <step> + ALTER COLUMNFAMILY rhq.raw_metrics WITH gc_grace_seconds = %s; + </step> + + <step> + ALTER COLUMNFAMILY rhq.one_hour_metrics WITH gc_grace_seconds = %s; + </step> + + <step> + ALTER COLUMNFAMILY rhq.six_hour_metrics WITH gc_grace_seconds = %s; + </step> + + <step> + ALTER COLUMNFAMILY rhq.twenty_four_hour_metrics WITH gc_grace_seconds = %s; + </step> + + <step> + ALTER COLUMNFAMILY rhq.schema_version WITH gc_grace_seconds = %s; + </step> + +</updatePlan> \ No newline at end of file diff --git a/modules/enterprise/server/installer/src/main/java/org/rhq/enterprise/server/installer/InstallerServiceImpl.java b/modules/enterprise/server/installer/src/main/java/org/rhq/enterprise/server/installer/InstallerServiceImpl.java index fbd869b..4c87f70 100644 --- a/modules/enterprise/server/installer/src/main/java/org/rhq/enterprise/server/installer/InstallerServiceImpl.java +++ b/modules/enterprise/server/installer/src/main/java/org/rhq/enterprise/server/installer/InstallerServiceImpl.java @@ -487,7 +487,7 @@ public class InstallerServiceImpl implements InstallerService { } log("Install RHQ schema along with updates to Cassandra."); storageNodeSchemaManager.install(); - storageNodeSchemaManager.updateTopology(); + storageNodeSchemaManager.updateTopology(true); } else { log("Ignoring Cassandra schema - installer will assume it exists and is already up-to-date."); } diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index 90d9497..874be1e 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -39,6 +39,7 @@ import javax.persistence.TypedQuery;
import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.quartz.JobDataMap; import org.quartz.SimpleTrigger; import org.quartz.Trigger;
@@ -200,7 +201,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN this.updateStorageNodes(storageNodeMap);
if (clusterMaintenanceNeeded) { - this.scheduleQuartzJob(); + this.scheduleQuartzJob(existingStorageNodes.size()); }
return new ArrayList<StorageNode>(storageNodeMap.values()); @@ -214,6 +215,10 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN String configAddress = resourceConfig.getSimpleValue(RHQ_STORAGE_ADDRESS_PROPERTY);
if (configAddress != null) { + // TODO Do not add the node to the group until we have verified it has joined the cluster + // StorageNodeMaintenanceJob currently determines if a new node has successfully joined the cluster. + addStorageNodeToGroup(resource); + boolean storageNodeFound = false; if (storageNodes != null) { for (StorageNode storageNode : storageNodes) { @@ -239,10 +244,8 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
entityManager.persist(storageNode);
- scheduleQuartzJob(); + scheduleQuartzJob(storageNodes.size()); } - - addStorageNodeToGroup(resource); } }
@@ -306,13 +309,14 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN * @return The storage node resource group. * @throws IllegalStateException if the group is not found or does not exist. */ - private ResourceGroup getStorageNodeGroup() { + public ResourceGroup getStorageNodeGroup() { Subject overlord = subjectManager.getOverlord();
ResourceGroupCriteria criteria = new ResourceGroupCriteria(); criteria.addFilterResourceTypeName(STORAGE_NODE_RESOURCE_TYPE_NAME); criteria.addFilterPluginName(STORAGE_NODE_PLUGIN_NAME); criteria.addFilterName(STORAGE_NODE_GROUP_NAME); + criteria.fetchExplicitResources(true);
List<ResourceGroup> groups = resourceGroupManager.findResourceGroupsByCriteria(overlord, criteria);
@@ -472,7 +476,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN return newNodes; }
- private void scheduleQuartzJob() { + private void scheduleQuartzJob(int clusterSize) { String jobName = StorageNodeMaintenanceJob.class.getName(); String jobGroupName = StorageNodeMaintenanceJob.class.getName(); String triggerName = StorageNodeMaintenanceJob.class.getName(); @@ -482,6 +486,10 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN trigger.setJobName(jobName); trigger.setJobGroup(jobGroupName); try { + JobDataMap jobDataMap = new JobDataMap(); + jobDataMap.put(StorageNodeMaintenanceJob.JOB_DATA_PROPERTY_CLUSTER_SIZE, Integer.toString(clusterSize)); + trigger.setJobDataMap(jobDataMap); + quartzScheduler.scheduleJob(trigger); } catch (Throwable t) { log.warn("Unable to schedule storage node maintenance job", t); diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java index 54646ec..52e2424 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java @@ -27,6 +27,7 @@ import org.rhq.core.domain.cloud.StorageNode; import org.rhq.core.domain.cloud.StorageNodeLoadComposite; import org.rhq.core.domain.criteria.StorageNodeCriteria; import org.rhq.core.domain.resource.Resource; +import org.rhq.core.domain.resource.group.ResourceGroup; import org.rhq.core.domain.util.PageList;
@Local @@ -86,4 +87,13 @@ public interface StorageNodeManagerLocal { */ void runReadRepair();
+ /** + * This method assumes the storage node resource group already exists; as such, it should only be called from places + * in the code that are after the point(s) where the group has been created. + * + * @return The storage node resource group. + * @throws IllegalStateException if the group is not found or does not exist. + */ + ResourceGroup getStorageNodeGroup(); + } diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/scheduler/jobs/StorageNodeMaintenanceJob.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/scheduler/jobs/StorageNodeMaintenanceJob.java index f492fa0..6b1940d 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/scheduler/jobs/StorageNodeMaintenanceJob.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/scheduler/jobs/StorageNodeMaintenanceJob.java @@ -24,6 +24,7 @@ import java.util.List;
import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.quartz.JobDataMap; import org.quartz.JobExecutionContext; import org.quartz.JobExecutionException;
@@ -36,13 +37,11 @@ import org.rhq.core.domain.configuration.PropertyList; import org.rhq.core.domain.configuration.PropertyMap; import org.rhq.core.domain.configuration.PropertySimple; import org.rhq.core.domain.criteria.ResourceCriteria; -import org.rhq.core.domain.operation.OperationRequestStatus; -import org.rhq.core.domain.operation.ResourceOperationHistory; -import org.rhq.core.domain.operation.bean.ResourceOperationSchedule; +import org.rhq.core.domain.operation.bean.GroupOperationSchedule; import org.rhq.core.domain.resource.Resource; -import org.rhq.core.domain.util.PageControl; -import org.rhq.core.domain.util.PageList; +import org.rhq.core.domain.resource.group.ResourceGroup; import org.rhq.core.util.StringUtil; +import org.rhq.enterprise.server.auth.SubjectManagerLocal; import org.rhq.enterprise.server.cloud.StorageNodeManagerLocal; import org.rhq.enterprise.server.operation.OperationManagerLocal; import org.rhq.enterprise.server.util.LookupUtil; @@ -58,6 +57,10 @@ public class StorageNodeMaintenanceJob extends AbstractStatefulJob {
private final Log log = LogFactory.getLog(StorageNodeMaintenanceJob.class);
+ public static final String JOB_DATA_PROPERTY_CLUSTER_SIZE = "clusterSize"; + + public static final String JOB_DATA_PROPERTY_TOPOLOGY_CHANGED = "topologyChanged"; + private final static int MAX_ITERATIONS = 5; private final static int TIMEOUT = 10000; private final static String STORAGE_SERVICE = "Storage Service"; @@ -73,7 +76,10 @@ public class StorageNodeMaintenanceJob extends AbstractStatefulJob { private static final String PASSWORD_PROP = "rhq.cassandra.password";
@Override - public void executeJobCode(JobExecutionContext arg0) throws JobExecutionException { + public void executeJobCode(JobExecutionContext context) throws JobExecutionException { + JobDataMap jobDataMap = context.getMergedJobDataMap(); + int clusterSize = Integer.parseInt(jobDataMap.getString(JOB_DATA_PROPERTY_CLUSTER_SIZE)); + //1. Wait for resouces to be linked to node storage nodes waitForResouceLinks();
@@ -84,21 +90,50 @@ public class StorageNodeMaintenanceJob extends AbstractStatefulJob { //3. Wait for the all storage nodes to be part of the same cluster storageNodes = waitForClustering(storageNodes);
- //4. Update topology - boolean topologyUpdated = updateTopology(storageNodes); - - //5. Run repair operation on all the storage nodes if topology(replication factor was updated) - if (topologyUpdated) { - List<String> seedList = new ArrayList<String>(); - for (StorageNode storageNode : storageNodes) { - seedList.add(storageNode.getAddress()); + boolean isReadRepairNeeded; + + if (clusterSize >= 4) { + // At 4 nodes we increase the RF to 3. We are not increasing the RF beyond + // that for additional nodes; so, there is no need to run repair if we are + // expanding from a 4 node cluster since the RF remains the same. + isReadRepairNeeded = false; + } else if (clusterSize == 1) { + // The RF will increase since we are going from a single to a multi-node + // cluster; therefore, we want to run repair. + isReadRepairNeeded = true; + } else if (clusterSize == 2) { + if (storageNodes.size() > 3) { + // If we go from 2 to > 3 nodes we will increase the RF to 3; therefore + // we want to run repair. + isReadRepairNeeded = true; + } else { + // If we go from 2 to 3 nodes, we keep the RF at 2 so there is no need + // to run repair. + isReadRepairNeeded = false; } + } else if (clusterSize == 3) { + // We are increasing the cluster size > 3 which means the RF will be + // updated to 3; therefore, we want to run repair. + isReadRepairNeeded = true; + } else { + // If we cluster size of zero, then something is really screwed up. It + // should always be > 0. + log.error("The job data property [" + JOB_DATA_PROPERTY_CLUSTER_SIZE + "] should always be greater " + + "than zero. This may be a bug in the code that scheduled this job."); + isReadRepairNeeded = storageNodes.size() > 1; + }
- for (StorageNode storageNode : storageNodes) { - Resource resource = storageNode.getResource(); - runNodeMaintenance(resource, seedList); - } + if (isReadRepairNeeded) { + updateTopology(storageNodes); + } + + //5. run maintenance on each node + List<String> seedList = new ArrayList<String>(); + for (StorageNode storageNode : storageNodes) { + seedList.add(storageNode.getAddress()); } + + runNodeMaintenance(seedList, isReadRepairNeeded); }
private boolean updateTopology(List<StorageNode> storageNodes) throws JobExecutionException { @@ -106,9 +141,9 @@ public class StorageNodeMaintenanceJob extends AbstractStatefulJob { String password = getRequiredStorageProperty(PASSWORD_PROP); SchemaManager schemaManager = new SchemaManager(username, password, storageNodes); try{ - return schemaManager.updateTopology(); + return schemaManager.updateTopology(false); } catch (Exception e) { - log.error(e); + log.error("An error occurred while applying schema topology changes", e); }
return false; @@ -147,7 +182,7 @@ public class StorageNodeMaintenanceJob extends AbstractStatefulJob { List<Property> actualList = propertyList.getList(); for (Property property : actualList) { PropertyMap map = (PropertyMap) property; - endpoints.add(map.get(ENDPOINT_PROPERTY).toString()); + endpoints.add(map.getSimpleValue(ENDPOINT_PROPERTY, null)); } } catch (Exception e) { log.error("Error fetching live configuration for resource " + resource.getId()); @@ -157,7 +192,7 @@ public class StorageNodeMaintenanceJob extends AbstractStatefulJob { } } } catch (Exception e) { - log.error(e); + log.error("An exception occurred while waiting for nodes to cluster", e); }
Collections.sort(endpoints); @@ -184,67 +219,38 @@ public class StorageNodeMaintenanceJob extends AbstractStatefulJob { return storageNodes; }
- private void runNodeMaintenance(Resource resource, List<String> seedList) { + private void runNodeMaintenance(List<String> seedList, boolean runRepair) { OperationManagerLocal operationManager = LookupUtil.getOperationManager(); + StorageNodeManagerLocal storageNodeManager = LookupUtil.getStorageNodeManager(); + SubjectManagerLocal subjectManager = LookupUtil.getSubjectManager(); + + ResourceGroup storageNodeGroup = storageNodeManager.getStorageNodeGroup(); + + GroupOperationSchedule schedule = new GroupOperationSchedule(); + schedule.setGroup(storageNodeGroup); + schedule.setHaltOnFailure(false); + schedule.setExecutionOrder(new ArrayList<Resource>(storageNodeGroup.getExplicitResources())); + schedule.setJobTrigger(JobTrigger.createNowTrigger()); + schedule.setSubject(subjectManager.getOverlord()); + schedule.setOperationName(MAINTENANCE_OPERATION); + schedule.setDescription(MAINTENANCE_OPERATION_NOTE); + + List<Property> properties = new ArrayList<Property>(); + properties.add(new PropertySimple(RUN_REPAIR_PROPERTY, runRepair)); + properties.add(new PropertySimple(UPDATE_SEEDS_LIST, Boolean.TRUE)); + + PropertyList seedListProperty = new PropertyList(SEEDS_LIST); + for (String seed : seedList) { + seedListProperty.add(new PropertySimple("seed", seed)); + } + properties.add(seedListProperty);
- try { - ResourceOperationSchedule newSchedule = new ResourceOperationSchedule(); - newSchedule.setJobTrigger(JobTrigger.createNowTrigger()); - newSchedule.setResource(resource); - newSchedule.setOperationName(MAINTENANCE_OPERATION); - newSchedule.setDescription(MAINTENANCE_OPERATION_NOTE); - - List<Property> properties = new ArrayList<Property>(); - properties.add(new PropertySimple(RUN_REPAIR_PROPERTY, Boolean.TRUE)); - properties.add(new PropertySimple(UPDATE_SEEDS_LIST, Boolean.TRUE)); - - PropertyList seedListProperty = new PropertyList(SEEDS_LIST); - for (String seed : seedList) { - seedListProperty.add(new PropertySimple("seed", seed)); - } - properties.add(seedListProperty); - - Configuration config = new Configuration(); - config.setProperties(properties); - newSchedule.setParameters(config); - - long operationStartTime = System.currentTimeMillis(); - operationManager.scheduleResourceOperation(LookupUtil.getSubjectManager().getOverlord(), newSchedule); - - int iteration = 0; - boolean resultFound = false; - while (iteration < MAX_ITERATIONS && !resultFound) { - PageList<ResourceOperationHistory> results = operationManager.findCompletedResourceOperationHistories( - LookupUtil.getSubjectManager().getOverlord(), resource.getId(), operationStartTime, null, - PageControl.getUnlimitedInstance()); - - for (ResourceOperationHistory operationHistory : results) { - if (MAINTENANCE_OPERATION.equals(operationHistory.getOperationDefinition().getName())) { - if (OperationRequestStatus.SUCCESS.equals(operationHistory.getStatus())) { - Configuration operationResults = operationHistory.getResults(); - if ("true".equals(operationResults.getSimpleValue(SUCCEED_PROPERTY))) { - resultFound = true; - } - } - } - } - - if (resultFound) { - break; - } else { - try { - Thread.sleep(TIMEOUT); - } catch (Exception e) { - log.error(e); - } - } + Configuration config = new Configuration(); + config.setProperties(properties);
- iteration++; - } + schedule.setParameters(config);
- } catch (Exception e) { - log.error(e); - } + operationManager.scheduleGroupOperation(subjectManager.getOverlord(), schedule); }
private List<StorageNode> getOnlyResourceLinkedStorageNodes() {
commit a39e88e38e73d07f7ee7ac6374c78b566ea9c5b4 Author: John Sanda jsanda@redhat.com Date: Tue Jul 2 17:01:15 2013 -0400
use ProcessInfo.freshSnapshot during avail checks to avoid stale data
diff --git a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java index 7d06cb2..93d758c 100644 --- a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java +++ b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java @@ -135,7 +135,7 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent return UNKNOWN; } else { // It is safe to read prior snapshot as getNativeProcess always return a fresh instance - ProcessInfoSnapshot processInfoSnaphot = processInfo.priorSnaphot(); + ProcessInfoSnapshot processInfoSnaphot = processInfo.freshSnapshot(); if (processInfoSnaphot.isRunning()) { return UP; } else {
rhq-commits@lists.fedorahosted.org