summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--ambari-admin/pom.xml34
-rw-r--r--ambari-agent/conf/windows/ambari-agent.cmd18
-rw-r--r--ambari-agent/conf/windows/ambari-agent.ini54
-rw-r--r--ambari-agent/conf/windows/ambari-agent.ps1245
-rw-r--r--ambari-agent/conf/windows/ambari-env.cmd22
-rw-r--r--ambari-agent/conf/windows/createservice.ps1195
-rw-r--r--ambari-agent/conf/windows/service_wrapper.py227
-rw-r--r--ambari-agent/pom.xml173
-rw-r--r--ambari-agent/src/main/package/msi/ambari-agent.wxs38
-rw-r--r--ambari-agent/src/main/python/ambari_agent/ActionQueue.py35
-rw-r--r--ambari-agent/src/main/python/ambari_agent/AgentConfig_linux.py229
-rw-r--r--ambari-agent/src/main/python/ambari_agent/AgentConfig_windows.py232
-rw-r--r--ambari-agent/src/main/python/ambari_agent/AmbariAgent.py10
-rw-r--r--ambari-agent/src/main/python/ambari_agent/AmbariConfig.py240
-rw-r--r--ambari-agent/src/main/python/ambari_agent/Controller.py44
-rw-r--r--ambari-agent/src/main/python/ambari_agent/CustomServiceOrchestrator.py14
-rw-r--r--ambari-agent/src/main/python/ambari_agent/Facter.py329
-rw-r--r--ambari-agent/src/main/python/ambari_agent/FileCache.py3
-rw-r--r--ambari-agent/src/main/python/ambari_agent/Hardware.py33
-rw-r--r--ambari-agent/src/main/python/ambari_agent/HeartbeatHandlers_windows.py58
-rw-r--r--ambari-agent/src/main/python/ambari_agent/HeartbeatStopHandler_linux.py91
-rw-r--r--ambari-agent/src/main/python/ambari_agent/HostCheckReportFileHandler.py5
-rw-r--r--ambari-agent/src/main/python/ambari_agent/HostInfo.py395
-rw-r--r--ambari-agent/src/main/python/ambari_agent/HostInfo_linux.py411
-rw-r--r--ambari-agent/src/main/python/ambari_agent/HostInfo_win.py231
-rw-r--r--ambari-agent/src/main/python/ambari_agent/LiveStatus.py2
-rw-r--r--ambari-agent/src/main/python/ambari_agent/NetUtil.py30
-rw-r--r--ambari-agent/src/main/python/ambari_agent/PackagesAnalyzer.py14
-rw-r--r--ambari-agent/src/main/python/ambari_agent/PythonExecutor.py37
-rw-r--r--ambari-agent/src/main/python/ambari_agent/StatusCheck.py6
-rw-r--r--ambari-agent/src/main/python/ambari_agent/hostname.py8
-rw-r--r--ambari-agent/src/main/python/ambari_agent/main.py107
-rw-r--r--ambari-agent/src/main/python/ambari_agent/security.py28
-rw-r--r--ambari-agent/src/main/python/ambari_agent/shell.py113
-rw-r--r--ambari-agent/src/packages/windows.xml82
-rw-r--r--ambari-agent/src/test/python/ambari_agent/TestAlerts.py34
-rw-r--r--ambari-agent/src/test/python/ambari_agent/TestCertGeneration.py1
-rw-r--r--ambari-agent/src/test/python/ambari_agent/TestController.py6
-rw-r--r--ambari-agent/src/test/python/ambari_agent/TestCustomServiceOrchestrator.py41
-rw-r--r--ambari-agent/src/test/python/ambari_agent/TestHostname.py2
-rw-r--r--ambari-agent/src/test/python/ambari_agent/TestMain.py23
-rw-r--r--ambari-agent/src/test/python/ambari_agent/TestNetUtil.py13
-rw-r--r--ambari-agent/src/test/python/ambari_agent/TestStatusCheck.py16
-rw-r--r--ambari-agent/src/test/python/resource_management/TestContentSources.py2
-rw-r--r--ambari-agent/src/test/python/resource_management/TestDirectoryResource.py2
-rw-r--r--ambari-agent/src/test/python/resource_management/TestExecuteHadoopResource.py2
-rw-r--r--ambari-agent/src/test/python/resource_management/TestExecuteResource.py1
-rw-r--r--ambari-agent/src/test/python/resource_management/TestMonitorWebserverResource.py1
-rw-r--r--ambari-agent/src/test/python/resource_management/TestSubstituteVars.py2
-rw-r--r--ambari-agent/src/test/python/unitTests.py6
-rw-r--r--ambari-client/groovy-client/pom.xml1
-rw-r--r--ambari-client/python-client/pom.xml6
-rw-r--r--ambari-common/src/main/python/ambari_commons/ambari_service.py79
-rw-r--r--ambari-common/src/main/python/ambari_commons/exceptions.py35
-rw-r--r--ambari-common/src/main/python/ambari_commons/inet_utils.py148
-rw-r--r--ambari-common/src/main/python/ambari_commons/logging_utils.py49
-rw-r--r--ambari-common/src/main/python/ambari_commons/os_check.py126
-rw-r--r--ambari-common/src/main/python/ambari_commons/os_linux.py81
-rw-r--r--ambari-common/src/main/python/ambari_commons/os_utils.py102
-rw-r--r--ambari-common/src/main/python/ambari_commons/os_windows.py563
-rw-r--r--ambari-common/src/main/python/ambari_commons/resources/os_family.json11
-rw-r--r--ambari-common/src/main/python/ambari_commons/str_utils.py30
-rw-r--r--ambari-common/src/main/python/resource_management/core/logger.py41
-rw-r--r--ambari-common/src/main/python/resource_management/core/providers/__init__.py6
-rw-r--r--ambari-common/src/main/python/resource_management/core/providers/windows/__init__.py20
-rw-r--r--ambari-common/src/main/python/resource_management/core/providers/windows/service.py65
-rw-r--r--ambari-common/src/main/python/resource_management/core/providers/windows/system.py382
-rw-r--r--ambari-common/src/main/python/resource_management/libraries/functions/__init__.py9
-rw-r--r--ambari-common/src/main/python/resource_management/libraries/functions/default.py8
-rw-r--r--ambari-common/src/main/python/resource_management/libraries/functions/get_unique_id_and_date.py15
-rw-r--r--ambari-common/src/main/python/resource_management/libraries/functions/install_hdp_msi.py182
-rw-r--r--ambari-common/src/main/python/resource_management/libraries/functions/reload_windows_env.py48
-rw-r--r--ambari-common/src/main/python/resource_management/libraries/functions/tar_archive.py30
-rw-r--r--ambari-common/src/main/python/resource_management/libraries/functions/windows_service_utils.py42
-rw-r--r--ambari-common/src/main/python/resource_management/libraries/functions/zip_archive.py40
-rw-r--r--ambari-common/src/main/python/resource_management/libraries/providers/__init__.py3
-rw-r--r--ambari-common/src/main/python/resource_management/libraries/providers/xml_config.py13
-rw-r--r--ambari-common/src/main/python/resource_management/libraries/script/script.py183
-rw-r--r--ambari-server/conf/unix/ambari.properties1
-rw-r--r--ambari-server/conf/windows/ambari-env.cmd19
-rw-r--r--ambari-server/conf/windows/ambari.properties82
-rw-r--r--ambari-server/conf/windows/ca.config29
-rw-r--r--ambari-server/conf/windows/install-helper.cmd61
-rw-r--r--ambari-server/conf/windows/krb5JAASLogin.conf12
-rw-r--r--ambari-server/conf/windows/log4j.properties68
-rw-r--r--ambari-server/docs/api/v1/clusters-cluster.md372
-rw-r--r--ambari-server/pom.xml194
-rw-r--r--ambari-server/src/main/assemblies/server-windows.xml183
-rw-r--r--ambari-server/src/main/java/org/apache/ambari/server/DBConnectionVerification.java8
-rw-r--r--ambari-server/src/main/java/org/apache/ambari/server/api/services/ComponentService.java6
-rw-r--r--ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorRunner.java13
-rw-r--r--ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java53
-rw-r--r--ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractPropertyProvider.java12
-rw-r--r--ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractProviderModule.java349
-rw-r--r--ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java5
-rw-r--r--ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ComponentResourceProvider.java2
-rw-r--r--ambari-server/src/main/java/org/apache/ambari/server/controller/sql/HostInfoProvider.java63
-rw-r--r--ambari-server/src/main/java/org/apache/ambari/server/controller/sql/SQLPropertyProvider.java572
-rw-r--r--ambari-server/src/main/java/org/apache/ambari/server/controller/sql/SinkConnectionFactory.java132
-rw-r--r--ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/PropertyHelper.java6
-rw-r--r--ambari-server/src/main/java/org/apache/ambari/server/resources/ResourceManager.java2
-rw-r--r--ambari-server/src/main/java/org/apache/ambari/server/security/CertificateManager.java37
-rw-r--r--ambari-server/src/main/package/msi/ambari-server.wxs60
-rw-r--r--ambari-server/src/main/python/ambari-server-windows.py601
-rwxr-xr-xambari-server/src/main/python/ambari-server.py12
-rw-r--r--ambari-server/src/main/python/ambari_server/dbConfiguration.py213
-rw-r--r--ambari-server/src/main/python/ambari_server/dbConfiguration_linux.py740
-rw-r--r--ambari-server/src/main/python/ambari_server/dbConfiguration_windows.py461
-rw-r--r--ambari-server/src/main/python/ambari_server/properties.py223
-rw-r--r--ambari-server/src/main/python/ambari_server/serverConfiguration.py589
-rw-r--r--ambari-server/src/main/python/ambari_server/serverConfiguration_linux.py67
-rw-r--r--ambari-server/src/main/python/ambari_server/serverConfiguration_windows.py98
-rw-r--r--ambari-server/src/main/python/ambari_server/serverSetup.py533
-rw-r--r--ambari-server/src/main/python/ambari_server/serverSetup_linux.py795
-rw-r--r--ambari-server/src/main/python/ambari_server/serverSetup_windows.py313
-rw-r--r--ambari-server/src/main/python/ambari_server/setupActions.py39
-rw-r--r--ambari-server/src/main/python/ambari_server/setupSecurity.py1216
-rw-r--r--ambari-server/src/main/python/ambari_server/userInput.py110
-rw-r--r--ambari-server/src/main/python/ambari_server/utils.py40
-rw-r--r--ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql694
-rw-r--r--ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATELOCAL.sql128
-rw-r--r--ambari-server/src/main/resources/Ambari-DDL-SQLServer-DROP.sql203
-rw-r--r--ambari-server/src/main/resources/custom_actions/check_host.py81
-rw-r--r--ambari-server/src/main/resources/sqlserver_properties.json23463
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/configuration/cluster-env.xml111
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/after-INSTALL/scripts/hook.py66
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/after-INSTALL/scripts/params.py101
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/after-INSTALL/templates/cluster.properties.j238
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/before-ANY/scripts/hook.py30
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/before-ANY/scripts/params.py27
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/before-ANY/scripts/setup_jdk.py49
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/before-INSTALL/scripts/hook.py32
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/before-INSTALL/scripts/params.py22
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/before-RESTART/scripts/hook.py28
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/before-START/scripts/hook.py33
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/before-START/scripts/params.py22
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/metainfo.xml22
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/repos/repoinfo.xml26
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/role_command_order.json101
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/configuration/falcon-env.xml109
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/configuration/falcon-runtime.properties.xml47
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/configuration/falcon-startup.properties.xml207
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/configuration/oozie-site.xml167
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/metainfo.xml106
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/package/scripts/falcon.py41
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/package/scripts/falcon_client.py37
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/package/scripts/falcon_server.py44
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/package/scripts/params.py45
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/package/scripts/service_check.py33
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/package/scripts/service_mapping.py20
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/package/templates/client.properties.j242
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/configuration/hbase-env.xml127
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/configuration/hbase-policy.xml53
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/configuration/hbase-site.xml318
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/metainfo.xml128
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/metrics.json4659
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/files/draining_servers.rb164
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/hbase.py29
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/hbase_client.py36
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/hbase_decommission.py66
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/hbase_master.py52
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/hbase_regionserver.py48
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/params.py36
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/service_check.py34
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/service_mapping.py21
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/core-site.xml202
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hadoop-env.xml41
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hadoop-policy.xml219
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hdfs-site.xml272
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/metainfo.xml162
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/metrics.json2126
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/package/scripts/datanode.py49
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/package/scripts/hdfs.py60
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/package/scripts/hdfs_client.py41
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/package/scripts/hdfs_rebalance.py130
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/package/scripts/journalnode.py48
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/package/scripts/namenode.py128
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/package/scripts/params.py65
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/package/scripts/service_check.py55
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/package/scripts/service_mapping.py24
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/package/scripts/snamenode.py48
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/package/scripts/zkfc_slave.py51
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/package/templates/exclude_hosts_list.j221
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/package/templates/hadoop-metrics2.properties.j253
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/hive-env.xml105
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/hive-site.xml291
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/webhcat-site.xml109
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/etc/hive-schema-0.12.0.mysql.sql777
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/etc/hive-schema-0.12.0.oracle.sql717
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/etc/hive-schema-0.12.0.postgres.sql1405
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/metainfo.xml210
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/files/templetonSmoke.sh96
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/hcat_client.py40
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/hcat_service_check.py25
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/hive.py33
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/hive_client.py40
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/hive_metastore.py53
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/hive_server.py52
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/mysql_server.py46
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/params.py39
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/service_check.py39
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/service_mapping.py23
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/webhcat.py30
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/webhcat_server.py48
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/webhcat_service_check.py27
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/templates/webhcat-env.sh.j262
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-env.xml140
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-log4j.xml96
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-site.xml502
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/metainfo.xml126
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/package/scripts/oozie.py36
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/package/scripts/oozie_client.py40
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/package/scripts/oozie_server.py66
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/package/scripts/params.py33
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/package/scripts/service_check.py32
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/package/scripts/service_mapping.py21
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/PIG/configuration/pig-log4j.xml61
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/PIG/configuration/pig-properties.xml262
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/PIG/metainfo.xml75
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/PIG/package/scripts/params.py37
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/PIG/package/scripts/pig.py49
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/PIG/package/scripts/pig_client.py41
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/PIG/package/scripts/service_check.py34
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/SQOOP/configuration/sqoop-site.xml156
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/SQOOP/metainfo.xml80
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/SQOOP/package/scripts/params.py32
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/SQOOP/package/scripts/service_check.py34
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/SQOOP/package/scripts/sqoop.py30
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/SQOOP/package/scripts/sqoop_client.py41
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/configuration/storm-site.xml651
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/metainfo.xml92
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/metrics.json99
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/package/scripts/nimbus.py50
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/package/scripts/params.py30
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/package/scripts/service_check.py32
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/package/scripts/service_mapping.py22
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/package/scripts/status_params.py22
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/package/scripts/storm.py33
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/package/scripts/supervisor.py50
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/package/scripts/ui_server.py51
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/package/scripts/yaml_config.py67
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/TEZ/configuration/tez-site.xml218
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/TEZ/metainfo.xml63
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/TEZ/package/scripts/params.py30
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/TEZ/package/scripts/tez.py30
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/TEZ/package/scripts/tez_client.py41
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/configuration-mapred/mapred-site.xml239
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/configuration/capacity-scheduler.xml114
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/configuration/yarn-site.xml214
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/metainfo.xml224
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/metrics.json3138
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/files/validateYarnComponentStatus.py161
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/scripts/application_timeline_server.py54
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/scripts/historyserver.py53
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/scripts/mapred_service_check.py105
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/scripts/mapreduce2_client.py43
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/scripts/nodemanager.py53
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/scripts/params.py57
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/scripts/resourcemanager.py77
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/scripts/service_check.py68
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/scripts/service_mapping.py26
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/scripts/yarn.py44
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/scripts/yarn_client.py44
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/templates/container-executor.cfg.j240
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/templates/exclude_hosts_list.j221
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/templates/mapreduce.conf.j235
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/templates/taskcontroller.cfg.j238
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/templates/yarn.conf.j235
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/configuration/zookeeper-env.xml73
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/configuration/zookeeper-log4j.xml100
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/metainfo.xml78
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/package/scripts/params.py59
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/package/scripts/service_check.py34
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/package/scripts/service_mapping.py22
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/package/scripts/zookeeper.py63
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/package/scripts/zookeeper_client.py42
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/package/scripts/zookeeper_server.py51
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/package/templates/configuration.xsl.j242
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/package/templates/zoo.cfg.j269
-rw-r--r--ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/stack_advisor.py414
-rw-r--r--ambari-server/src/main/windows/ambari-server.cmd2
-rw-r--r--ambari-server/src/main/windows/ambari-server.ps1303
-rw-r--r--ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java5
-rw-r--r--ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProviderTest.java3
-rw-r--r--ambari-server/src/test/python/TestAmbariServer.py2
-rw-r--r--ambari-server/src/test/python/TestBootstrap.py1
-rw-r--r--ambari-server/src/test/python/TestCheckHost.py53
-rw-r--r--ambari-server/src/test/python/TestOSCheck.py22
-rw-r--r--ambari-server/src/test/python/stacks/1.3.2/HDFS/test_datanode.py2
-rw-r--r--ambari-server/src/test/python/stacks/1.3.2/hooks/before-ANY/test_before_any.py4
-rw-r--r--ambari-server/src/test/python/stacks/1.3.2/hooks/before-INSTALL/test_before_install.py2
-rw-r--r--ambari-server/src/test/python/stacks/1.3.2/hooks/before-START/test_before_start.py4
-rw-r--r--ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py2
-rw-r--r--ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py8
-rw-r--r--ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py2
-rw-r--r--ambari-server/src/test/python/stacks/2.0.6/hooks/before-ANY/test_before_any.py4
-rw-r--r--ambari-server/src/test/python/stacks/2.0.6/hooks/before-INSTALL/test_before_install.py2
-rw-r--r--ambari-server/src/test/python/stacks/2.0.6/hooks/before-START/test_before_start.py4
-rw-r--r--ambari-server/src/test/python/stacks/2.1/configs/default-storm-start.json516
-rw-r--r--ambari-server/src/test/python/stacks/2.1/configs/secured-storm-start.json.orig374
-rw-r--r--ambari-server/src/test/python/stacks/2.1/configs/secured-storm-start.json.rej527
-rw-r--r--ambari-server/src/test/python/unitTests.py6
-rw-r--r--ambari-shell/ambari-python-shell/pom.xml28
-rw-r--r--ambari-shell/pom.xml17
-rw-r--r--ambari-views/pom.xml3
-rw-r--r--ambari-web/app/app.js20
-rw-r--r--ambari-web/app/assets/data/configuration/cluster_env_site.json14
-rw-r--r--ambari-web/app/config.js2
-rw-r--r--ambari-web/app/controllers/global/cluster_controller.js1
-rw-r--r--ambari-web/app/controllers/installer.js4
-rw-r--r--ambari-web/app/controllers/main/admin/serviceAccounts_controller.js4
-rw-r--r--ambari-web/app/controllers/main/charts/heatmap.js33
-rw-r--r--ambari-web/app/controllers/main/host/add_controller.js8
-rw-r--r--ambari-web/app/controllers/main/service/info/configs.js276
-rw-r--r--ambari-web/app/controllers/wizard.js17
-rw-r--r--ambari-web/app/controllers/wizard/step2_controller.js6
-rw-r--r--ambari-web/app/controllers/wizard/step4_controller.js12
-rw-r--r--ambari-web/app/controllers/wizard/step5_controller.js4
-rw-r--r--ambari-web/app/controllers/wizard/step7_controller.js24
-rw-r--r--ambari-web/app/controllers/wizard/step8_controller.js164
-rw-r--r--ambari-web/app/controllers/wizard/step9_controller.js2
-rw-r--r--ambari-web/app/data/HDP2/site_properties.js442
-rw-r--r--ambari-web/app/messages.js2
-rw-r--r--ambari-web/app/models/quick_links.js5
-rw-r--r--ambari-web/app/models/service_config.js53
-rw-r--r--ambari-web/app/models/stack_service.js42
-rw-r--r--ambari-web/app/templates/main/dashboard/widgets.hbs12
-rw-r--r--ambari-web/app/templates/main/host/summary.hbs36
-rw-r--r--ambari-web/app/templates/main/service/info/summary.hbs21
-rw-r--r--ambari-web/app/templates/wizard/step2.hbs70
-rw-r--r--ambari-web/app/utils/ajax/ajax.js12
-rw-r--r--ambari-web/app/utils/config.js5
-rw-r--r--ambari-web/app/utils/validator.js6
-rw-r--r--ambari-web/app/views/common/quick_view_link_view.js2
-rw-r--r--ambari-web/app/views/main/admin.js12
-rw-r--r--ambari-web/app/views/main/dashboard/widgets.js36
-rw-r--r--ambari-web/app/views/main/host/summary.js7
-rw-r--r--ambari-web/app/views/main/menu.js12
-rw-r--r--ambari-web/app/views/main/service/info/summary.js17
-rw-r--r--ambari-web/app/views/wizard/controls_view.js102
-rw-r--r--ambari-web/app/views/wizard/step1_view.js12
-rw-r--r--ambari-web/pom.xml88
-rw-r--r--ambari-web/test/controllers/main/host/add_controller_test.js2
-rw-r--r--ambari-web/test/views/main/dashboard/widgets_test.js23
-rw-r--r--contrib/ambari-scom/metrics-sink/db/Hadoop-Metrics-SQLServer-CREATE.sql (renamed from contrib/ambari-scom/metrics-sink/db/Hadoop-Metrics-SQLServer-CREATE.ddl)123
-rw-r--r--contrib/ambari-scom/metrics-sink/db/Hadoop-Metrics-SQLServer-CREATELOCAL.sql140
-rw-r--r--contrib/ambari-scom/metrics-sink/db/Hadoop-Metrics-SQLServer-DROP.sql161
-rw-r--r--contrib/views/jobs/pom.xml34
-rw-r--r--contrib/views/slider/pom.xml91
-rw-r--r--pom.xml48
350 files changed, 66191 insertions, 2109 deletions
diff --git a/ambari-admin/pom.xml b/ambari-admin/pom.xml
index 3228c2595b..8251860dda 100644
--- a/ambari-admin/pom.xml
+++ b/ambari-admin/pom.xml
@@ -99,9 +99,9 @@
</goals>
<configuration>
<workingDirectory>${basedir}/src/main/resources/ui/admin-web</workingDirectory>
- <executable>node/node</executable>
+ <executable>${basedir}/src/main/resources/ui/admin-web/node/${executable.node}</executable>
<arguments>
- <argument>node_modules/bower/bin/bower</argument>
+ <argument>${basedir}/src/main/resources/ui/admin-web/node_modules/bower/bin/bower</argument>
<argument>install</argument>
<argument>--allow-root</argument>
</arguments>
@@ -115,9 +115,9 @@
</goals>
<configuration>
<workingDirectory>${basedir}/src/main/resources/ui/admin-web</workingDirectory>
- <executable>node/node</executable>
+ <executable>${basedir}/src/main/resources/ui/admin-web/node/${executable.node}</executable>
<arguments>
- <argument>node_modules/gulp/bin/gulp</argument>
+ <argument>${basedir}/src/main/resources/ui/admin-web/node_modules/gulp/bin/gulp</argument>
<argument>build</argument>
</arguments>
</configuration>
@@ -215,4 +215,30 @@
</resource>
</resources>
</build>
+ <profiles>
+ <profile>
+ <id>windows</id>
+ <activation>
+ <os>
+ <family>win</family>
+ </os>
+ </activation>
+ <properties>
+ <envClassifier>win</envClassifier>
+ <executable.node>node.exe</executable.node>
+ </properties>
+ </profile>
+ <profile>
+ <id>linux</id>
+ <activation>
+ <os>
+ <family>unix</family>
+ </os>
+ </activation>
+ <properties>
+ <envClassifier>linux</envClassifier>
+ <executable.node>node</executable.node>
+ </properties>
+ </profile>
+ </profiles>
</project> \ No newline at end of file
diff --git a/ambari-agent/conf/windows/ambari-agent.cmd b/ambari-agent/conf/windows/ambari-agent.cmd
new file mode 100644
index 0000000000..e159be4a23
--- /dev/null
+++ b/ambari-agent/conf/windows/ambari-agent.cmd
@@ -0,0 +1,18 @@
+@echo off
+REM Licensed to the Apache Software Foundation (ASF) under one or more
+REM contributor license agreements. See the NOTICE file distributed with
+REM this work for additional information rega4rding copyright ownership.
+REM The ASF licenses this file to You under the Apache License, Version 2.0
+REM (the "License"); you may not use this file except in compliance with
+REM the License. You may obtain a copy of the License at
+REM
+REM http://www.apache.org/licenses/LICENSE-2.0
+REM
+REM Unless required by applicable law or agreed to in writing, software
+REM distributed under the License is distributed on an "AS IS" BASIS,
+REM WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+REM See the License for the specific language governing permissions and
+REM limitations under the License.
+
+call ambari-env.cmd
+powershell -ExecutionPolicy unrestricted -File ambari-agent.ps1 %*
diff --git a/ambari-agent/conf/windows/ambari-agent.ini b/ambari-agent/conf/windows/ambari-agent.ini
new file mode 100644
index 0000000000..377dbf4f93
--- /dev/null
+++ b/ambari-agent/conf/windows/ambari-agent.ini
@@ -0,0 +1,54 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific
+
+[server]
+hostname=localhost
+url_port=8440
+secured_url_port=8441
+
+[agent]
+prefix=data
+tmp_dir=\\var\\lib\\ambari-agent\\data\\tmp
+;loglevel=(DEBUG/INFO)
+loglevel=INFO
+data_cleanup_interval=86400
+data_cleanup_max_age=2592000
+data_cleanup_max_size_MB = 100
+ping_port=8670
+cache_dir=cache
+tolerate_download_failures=true
+
+[command]
+maxretries=2
+sleepBetweenRetries=1
+
+[security]
+keysdir=keys
+server_crt=ca.crt
+passphrase_env_var_name=AMBARI_PASSPHRASE
+
+[services]
+pidLookupPath=\\var\\run\\ambari-agent
+
+[heartbeat]
+state_interval=6
+dirs=/etc/hadoop,/etc/hadoop/conf,/etc/hbase,/etc/hcatalog,/etc/hive,/etc/oozie,
+ /etc/sqoop,/etc/ganglia,/etc/nagios,
+ /var/run/hadoop,/var/run/zookeeper,/var/run/hbase,/var/run/templeton,/var/run/oozie,
+ /var/log/hadoop,/var/log/zookeeper,/var/log/hbase,/var/run/templeton,/var/log/hive,
+ /var/log/nagios
+rpms=nagios,ganglia,
+ hadoop,hadoop-lzo,hbase,oozie,sqoop,pig,zookeeper,hive,libconfuse,ambari-log4j
+; 0 - unlimited
+log_lines_count=300
diff --git a/ambari-agent/conf/windows/ambari-agent.ps1 b/ambari-agent/conf/windows/ambari-agent.ps1
new file mode 100644
index 0000000000..69585232bd
--- /dev/null
+++ b/ambari-agent/conf/windows/ambari-agent.ps1
@@ -0,0 +1,245 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+# description: ambari-agent service
+# processname: ambari-agent
+
+$VERSION="1.3.0-SNAPSHOT"
+$HASH="testhash"
+
+switch ($($args[0])){
+ "--version" {
+ echo "$VERSION"
+ exit 0
+ }
+ "--hash" {
+ echo "$HASH"
+ exit 0
+ }
+}
+
+$AMBARI_AGENT="ambari-agent"
+$AMBARI_SVC_NAME = "Ambari Agent"
+$current_directory = (Get-Item -Path ".\" -Verbose).FullName
+#environment variables used in python, check if they exists, otherwise set them to $current_directory
+#and pass to child python process
+$Env:PYTHONPATH="$current_directory\sbin;$($Env:PYTHONPATH)"
+$Env:PYTHON = "python.exe"
+
+$AMBARI_LOG_DIR="\var\log\ambari-agent"
+$OUTFILE_STDOUT=Join-Path -path $AMBARI_LOG_DIR -childpath "ambari-agent.stdout"
+$OUTFILE_STDERR=Join-Path -path $AMBARI_LOG_DIR -childpath "ambari-agent.stderr"
+$LOGFILE=Join-Path -path $AMBARI_LOG_DIR -childpath "ambari-agent.log"
+$AMBARI_AGENT_PY_SCRIPT=Join-Path -path $PSScriptRoot -childpath "sbin\service_wrapper.py"
+if($AMBARI_AGENT_PY_SCRIPT.Contains(' '))
+{
+ $AMBARI_AGENT_PY_SCRIPT = """" + $AMBARI_AGENT_PY_SCRIPT + """"
+}
+
+$OK=1
+$NOTOK=0
+
+$retcode=0
+
+function _exit($code)
+{
+ $host.SetShouldExit($code)
+ exit $code
+}
+
+function _detect_python()
+{
+ if(![boolean]$(Get-Command $Env:PYTHON -ErrorAction SilentlyContinue))
+ {
+ echo "ERROR: Can not find python.exe in PATH. Add python executable to PATH and try again."
+ _exit(1)
+ }
+}
+
+function _echo([switch]$off)
+{
+ if($off)
+ {
+ try
+ {
+ stop-transcript|out-null
+ }
+ catch [System.InvalidOperationException]
+ {}
+ }
+ else
+ {
+ try
+ {
+ start-transcript|out-null
+ }
+ catch [System.InvalidOperationException]
+ {}
+ }
+}
+
+Function _pstart_brief($cmd_args)
+{
+ #start python with -u to make stdout and stderr unbuffered
+ $arguments = @("-u",$AMBARI_AGENT_PY_SCRIPT) + $cmd_args
+
+ $psi = New-Object System.Diagnostics.ProcessStartInfo
+
+ $psi.RedirectStandardError = $True
+ $psi.RedirectStandardOutput = $True
+
+ $psi.UseShellExecute = $False
+
+ $psi.FileName = $Env:PYTHON
+ $psi.Arguments = $arguments
+ #$psi.WindowStyle = WindowStyle.Hidden
+
+ $process = [Diagnostics.Process]::Start($psi)
+
+ $process.WaitForExit()
+
+ Write-Output $process.StandardOutput.ReadToEnd()
+}
+
+Function _start($cmd_args)
+{
+ echo "Starting $AMBARI_SVC_NAME..."
+ _echo -off
+
+ _pstart_brief($cmd_args)
+
+ $cnt = 0
+ do
+ {
+ Start-Sleep -Milliseconds 250
+ $svc = Get-Service -Name $AMBARI_SVC_NAME
+ $cnt += 1
+ if ($cnt -eq 120)
+ {
+ echo "$AMBARI_SVC_NAME still starting...".
+ return
+ }
+ }
+ until($svc.status -eq "Running")
+
+ echo "$AMBARI_SVC_NAME is running"
+}
+
+Function _pstart($cmd_args)
+{
+ New-Item -ItemType Directory -Force -Path $AMBARI_LOG_DIR | Out-Null
+ $arguments = @($AMBARI_AGENT_PY_SCRIPT) + $cmd_args
+ $p = New-Object System.Diagnostics.Process
+ $p.StartInfo.UseShellExecute = $false
+ $p.StartInfo.FileName = $Env:PYTHON
+ $p.StartInfo.Arguments = $arguments
+ [void]$p.Start();
+
+ echo "Verifying $AMBARI_AGENT process status..."
+ if (!$p){
+ echo "ERROR: $AMBARI_AGENT start failed"
+ $host.SetShouldExit(-1)
+ exit
+ }
+ echo "Agent log at: $LOGFILE"
+
+ $p.WaitForExit()
+}
+
+Function _pstart_ioredir($cmd_args)
+{
+ New-Item -ItemType Directory -Force -Path $AMBARI_LOG_DIR | Out-Null
+
+ #start python with -u to make stdout and stderr unbuffered
+ $arguments = @("-u",$AMBARI_AGENT_PY_SCRIPT) + $cmd_args
+ $process = Start-Process -FilePath $Env:PYTHON -ArgumentList $arguments -WindowStyle Hidden -RedirectStandardError $OUTFILE_STDERR -RedirectStandardOutput $OUTFILE_STDOUT -PassThru
+ echo "Verifying $AMBARI_AGENT process status..."
+ if (!$process){
+ echo "ERROR: $AMBARI_AGENT start failed"
+ $host.SetShouldExit(-1)
+ exit
+ }
+ echo "Agent stdout at: $OUTFILE_STDOUT"
+ echo "Agent stderr at: $OUTFILE_STDERR"
+ echo "Agent log at: $LOGFILE"
+
+ $process.WaitForExit()
+}
+
+Function _stop($cmd_args){
+ echo "Stopping $AMBARI_SVC_NAME..."
+
+ _pstart_brief($cmd_args)
+
+ $cnt = 0
+ do
+ {
+ Start-Sleep -Milliseconds 250
+ $svc = Get-Service -Name $AMBARI_SVC_NAME
+ $cnt += 1
+ if ($cnt -eq 40)
+ {
+ echo "$AMBARI_SVC_NAME still stopping...".
+ return
+ }
+ }
+ until($svc.status -eq "Stopped")
+
+ echo "$AMBARI_SVC_NAME is stopped"
+}
+
+Function _status($cmd_args){
+ echo "Getting $AMBARI_SVC_NAME status..."
+
+ _pstart_brief($cmd_args)
+}
+
+# check for python before any action
+_detect_python
+switch ($($args[0])){
+ "start"
+ {
+ _start $args
+ }
+ "debug"
+ {
+ echo "Starting ambari-agent"
+ _pstart_ioredir $args
+ echo "Ambari Agent finished"
+ }
+ "stop" {_stop $args}
+ "restart"
+ {
+ _stop @("stop")
+ _start @("start")
+ }
+ "status" {_status $args}
+ "setup"
+ {
+ echo "Installing ambari-agent"
+ _pstart $args
+ echo "Ambari Agent installation finished"
+ }
+ default
+ {
+ echo "Usage: ambari-agent {start|stop|restart|setup|status} [options]"
+ echo "Use ambari-agent <action> --help to get details on options available."
+ echo "Or, simply invoke ambari-agent.py --help to print the options."
+ $retcode=1
+ }
+}
+
+$host.SetShouldExit($retcode)
+exit
diff --git a/ambari-agent/conf/windows/ambari-env.cmd b/ambari-agent/conf/windows/ambari-env.cmd
new file mode 100644
index 0000000000..6e0c317d93
--- /dev/null
+++ b/ambari-agent/conf/windows/ambari-env.cmd
@@ -0,0 +1,22 @@
+@echo off
+REM Licensed to the Apache Software Foundation (ASF) under one or more
+REM contributor license agreements. See the NOTICE file distributed with
+REM this work for additional information rega4rding copyright ownership.
+REM The ASF licenses this file to You under the Apache License, Version 2.0
+REM (the "License"); you may not use this file except in compliance with
+REM the License. You may obtain a copy of the License at
+REM
+REM http://www.apache.org/licenses/LICENSE-2.0
+REM
+REM Unless required by applicable law or agreed to in writing, software
+REM distributed under the License is distributed on an "AS IS" BASIS,
+REM WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+REM See the License for the specific language governing permissions and
+REM limitations under the License.
+
+set SERVICE_NAME=Ambari Agent
+REM REM set AMBARI_AGENT_CONF_DIR=C:\work\test\agent
+REM set AMBARI_AGENT_LOG_DIR=logs
+set AGENT_SERVICE_WRAPPER=sbin\service_wrapper.py
+REM python exe that will be used for command execution(must have access to pywin32 and agent python code)
+set PYTHON_EXE=C:\Python27\python.exe
diff --git a/ambari-agent/conf/windows/createservice.ps1 b/ambari-agent/conf/windows/createservice.ps1
new file mode 100644
index 0000000000..f3936e2851
--- /dev/null
+++ b/ambari-agent/conf/windows/createservice.ps1
@@ -0,0 +1,195 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+param(
+ [String]
+ [Parameter(Mandatory=$true )]
+ $username,
+ [String]
+ [Parameter(Mandatory=$true )]
+ $password,
+ [String]
+ [Parameter(Mandatory=$true )]
+ $servicename,
+ [String]
+ [Parameter(Mandatory=$true )]
+ $hdpResourcesDir,
+ [String]
+ [Parameter(Mandatory=$true )]
+ $servicecmdpath
+ )
+
+function Invoke-Cmd ($command)
+{
+ Write-Output $command
+ $out = cmd.exe /C "$command" 2>&1
+ Write-Output $out
+ return $out
+}
+
+function Invoke-CmdChk ($command)
+{
+ Write-Output $command
+ $out = cmd.exe /C "$command" 2>&1
+ Write-Output $out
+ if (-not ($LastExitCode -eq 0))
+ {
+ throw "Command `"$out`" failed with exit code $LastExitCode "
+ }
+ return $out
+}
+
+### Stops and deletes the Hadoop service.
+function StopAndDeleteHadoopService(
+ [String]
+ [Parameter( Position=0, Mandatory=$true )]
+ $service
+)
+{
+ Write-Output "Stopping $service"
+ $s = Get-Service $service -ErrorAction SilentlyContinue
+
+ if( $s -ne $null )
+ {
+ Stop-Service $service
+ $cmd = "sc.exe delete $service"
+ Invoke-Cmd $cmd
+ }
+}
+
+# Convenience method for processing command-line credential objects
+# Assumes $credentialsHash is a hash with one of the following being true:
+# - keys "username" and "password"/"passwordBase64" are set to strings
+# - key "credentialFilePath" is set to the path of a serialized PSCredential object
+function Get-HadoopUserCredentials($credentialsHash)
+{
+ Write-Output "Using provided credentials for username $($credentialsHash["username"])" | Out-Null
+ $username = $credentialsHash["username"]
+ if($username -notlike "*\*")
+ {
+ $username = "$ENV:COMPUTERNAME\$username"
+ }
+ $securePassword = $credentialsHash["password"] | ConvertTo-SecureString -AsPlainText -Force
+ $creds = New-Object System.Management.Automation.PSCredential $username, $securePassword
+ return $creds
+}
+
+### Creates and configures the service.
+function CreateAndConfigureHadoopService(
+ [String]
+ [Parameter( Position=0, Mandatory=$true )]
+ $service,
+ [String]
+ [Parameter( Position=1, Mandatory=$true )]
+ $hdpResourcesDir,
+ [String]
+ [Parameter( Position=2, Mandatory=$true )]
+ $serviceBinDir,
+ [String]
+ [Parameter( Position=3, Mandatory=$true )]
+ $servicecmdpath,
+ [System.Management.Automation.PSCredential]
+ [Parameter( Position=4, Mandatory=$true )]
+ $serviceCredential
+)
+{
+ if ( -not ( Get-Service "$service" -ErrorAction SilentlyContinue ) )
+ {
+ Write-Output "Creating service `"$service`" as $serviceBinDir\$service.exe"
+ $xcopyServiceHost_cmd = "copy /Y `"$hdpResourcesDir\namenode.exe`" `"$serviceBinDir\$service.exe`""
+ Invoke-CmdChk $xcopyServiceHost_cmd
+
+ #HadoopServiceHost.exe will write to this log but does not create it
+ #Creating the event log needs to be done from an elevated process, so we do it here
+ if( -not ([Diagnostics.EventLog]::SourceExists( "$service" )))
+ {
+ [Diagnostics.EventLog]::CreateEventSource( "$service", "" )
+ }
+ Write-Output "Adding service $service"
+ if ($serviceCredential.Password.get_Length() -ne 0)
+ {
+ $s = New-Service -Name "$service" -BinaryPathName "$serviceBinDir\$service.exe" -Credential $serviceCredential -DisplayName "Apache Hadoop $service"
+ if ( $s -eq $null )
+ {
+ throw "CreateAndConfigureHadoopService: Service `"$service`" creation failed"
+ }
+ }
+ else
+ {
+ # Separately handle case when password is not provided
+ # this path is used for creating services that run under (AD) Managed Service Account
+ # for them password is not provided and in that case service cannot be created using New-Service commandlet
+ $serviceUserName = $serviceCredential.UserName
+ $cred = $serviceCredential.UserName.Split("\")
+
+ # Throw exception if domain is not specified
+ if (($cred.Length -lt 2) -or ($cred[0] -eq "."))
+ {
+ throw "Environment is not AD or domain is not specified"
+ }
+
+ $cmd="$ENV:WINDIR\system32\sc.exe create `"$service`" binPath= `"$serviceBinDir\$service.exe`" obj= $serviceUserName DisplayName= `"Apache Hadoop $service`" "
+ try
+ {
+ Invoke-CmdChk $cmd
+ }
+ catch
+ {
+ throw "CreateAndConfigureHadoopService: Service `"$service`" creation failed"
+ }
+ }
+
+ $cmd="$ENV:WINDIR\system32\sc.exe failure $service reset= 30 actions= restart/5000"
+ Invoke-CmdChk $cmd
+
+ $cmd="$ENV:WINDIR\system32\sc.exe config $service start= demand"
+ Invoke-CmdChk $cmd
+
+
+ Write-Output "Creating service config ${serviceBinDir}\$service.xml"
+ $cmd = "$servicecmdpath --service $service > `"$serviceBinDir\$service.xml`""
+ Invoke-CmdChk $cmd
+ }
+ else
+ {
+ Write-Output "Service `"$service`" already exists, Removing `"$service`""
+ StopAndDeleteHadoopService $service
+ CreateAndConfigureHadoopService $service $hdpResourcesDir $serviceBinDir $servicecmdpath $serviceCredential
+ }
+}
+
+
+try
+{
+ Write-Output "Creating credential object"
+ ###
+ ### Create the Credential object from the given username and password or the provided credentials file
+ ###
+ $serviceCredential = Get-HadoopUserCredentials -credentialsHash @{"username" = $username; "password" = $password}
+ $username = $serviceCredential.UserName
+ Write-Output "Username: $username"
+
+ Write-Output "Creating service $service"
+ ###
+ ### Create Service
+ ###
+ CreateAndConfigureHadoopService $servicename $hdpResourcesDir $hdpResourcesDir $servicecmdpath $serviceCredential
+ Write-Output "Done"
+}
+catch
+{
+ Write-Output "Failure"
+ exit 1
+} \ No newline at end of file
diff --git a/ambari-agent/conf/windows/service_wrapper.py b/ambari-agent/conf/windows/service_wrapper.py
new file mode 100644
index 0000000000..aaf32caf9f
--- /dev/null
+++ b/ambari-agent/conf/windows/service_wrapper.py
@@ -0,0 +1,227 @@
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+import os
+import optparse
+import sys
+
+import win32serviceutil
+import win32api
+import win32event
+import win32service
+
+from ambari_commons.ambari_service import AmbariService
+from ambari_commons.exceptions import *
+from ambari_commons.logging_utils import *
+from ambari_commons.os_windows import WinServiceController
+from ambari_agent.AmbariConfig import *
+from ambari_agent.HeartbeatHandlers_windows import HeartbeatStopHandler
+
+AMBARI_VERSION_VAR = "AMBARI_VERSION_VAR"
+
+ENV_PYTHONPATH = "PYTHONPATH"
+
+
+def parse_options():
+ # parse env cmd
+ with open(os.path.join(os.getcwd(), "ambari-env.cmd"), "r") as env_cmd:
+ content = env_cmd.readlines()
+ for line in content:
+ if line.startswith("set"):
+ name, value = line[4:].split("=")
+ os.environ[name] = value.rstrip()
+ # checking env variables, and fallback to working dir if no env var was founded
+ if not os.environ.has_key("AMBARI_AGENT_CONF_DIR"):
+ os.environ["AMBARI_AGENT_CONF_DIR"] = os.getcwd()
+ if not os.environ.has_key("AMBARI_AGENT_LOG_DIR"):
+ os.environ["AMBARI_AGENT_LOG_DIR"] = os.path.join("\\", "var", "log", "ambari-agent")
+ if not os.path.exists(os.environ["AMBARI_AGENT_LOG_DIR"]):
+ os.makedirs(os.environ["AMBARI_AGENT_LOG_DIR"])
+
+
+class AmbariAgentService(AmbariService):
+ AmbariService._svc_name_ = "Ambari Agent"
+ AmbariService._svc_display_name_ = "Ambari Agent"
+ AmbariService._svc_description_ = "Ambari Agent"
+
+ AmbariService._AdjustServiceVersion()
+
+ heartbeat_stop_handler = None
+
+ def SvcDoRun(self):
+ parse_options()
+ self.redirect_output_streams()
+
+ # Soft dependency on the Windows Time service
+ ensure_time_service_is_started()
+
+ self.heartbeat_stop_handler = HeartbeatStopHandler(self._heventSvcStop)
+
+ self.ReportServiceStatus(win32service.SERVICE_RUNNING)
+
+ from ambari_agent import main
+
+ main.main(self.heartbeat_stop_handler)
+
+ def _InitOptionsParser(self):
+ return init_options_parser()
+
+ def redirect_output_streams(self):
+ self._RedirectOutputStreamsToFile(AmbariConfig.getOutFile())
+ pass
+
+
+def ensure_time_service_is_started():
+ ret = WinServiceController.EnsureServiceIsStarted("W32Time")
+ if 0 != ret:
+ raise FatalException(-1, "Error starting Windows Time service: " + string(ret))
+ pass
+
+
+def ctrlHandler(ctrlType):
+ AmbariAgentService.DefCtrlCHandler()
+ return True
+
+
+def svcsetup():
+ AmbariAgentService.set_ctrl_c_handler(ctrlHandler)
+ AmbariAgentService.Install()
+ pass
+
+
+#
+# Starts the Ambari Agent as a service.
+# Start the Agent in normal mode, as a Windows service. If the Ambari Agent is
+# not registered as a service, the function fails. By default, only one instance of the service can
+# possibly run.
+#
+def svcstart(options):
+ if 0 != AmbariAgentService.Start(15):
+ options.exit_message = None
+ pass
+
+
+#
+# Stops the Ambari Agent.
+#
+def svcstop(options):
+ if 0 != AmbariAgentService.Stop():
+ options.exit_message = None
+
+
+#
+# The Ambari Agent status.
+#
+def svcstatus(options):
+ options.exit_message = None
+
+ statusStr = AmbariAgentService.QueryStatus()
+ print "Ambari Agent is " + statusStr
+
+
+def svcdebug(options):
+ sys.frozen = 'windows_exe' # Fake py2exe so we can debug
+
+ AmbariAgentService.set_ctrl_c_handler(ctrlHandler)
+ win32serviceutil.HandleCommandLine(AmbariAgentService, options)
+
+
+def init_options_parser():
+ parser = optparse.OptionParser(usage="usage: %prog action [options]", )
+ parser.add_option('-r', '--hostname', dest="host_name", default="localhost",
+ help="Use specified Ambari server host for registration.")
+ parser.add_option('-j', '--java-home', dest="java_home", default=None,
+ help="Use specified java_home. Must be valid on all hosts")
+ parser.add_option("-v", "--verbose",
+ action="store_true", dest="verbose", default=False,
+ help="Print verbose status messages")
+ parser.add_option("-s", "--silent",
+ action="store_true", dest="silent", default=False,
+ help="Silently accepts default prompt values")
+ parser.add_option('--jdbc-driver', default=None,
+ help="Specifies the path to the JDBC driver JAR file for the " \
+ "database type specified with the --jdbc-db option. Used only with --jdbc-db option.",
+ dest="jdbc_driver")
+ return parser
+
+
+#
+# Main.
+#
+def agent_main():
+ parser = init_options_parser()
+ (options, args) = parser.parse_args()
+
+ options.warnings = []
+
+ if len(args) == 0:
+ print parser.print_help()
+ parser.error("No action entered")
+
+ action = args[0]
+ possible_args_numbers = [1]
+
+ matches = 0
+ for args_number_required in possible_args_numbers:
+ matches += int(len(args) == args_number_required)
+
+ if matches == 0:
+ print parser.print_help()
+ possible_args = ' or '.join(str(x) for x in possible_args_numbers)
+ parser.error("Invalid number of arguments. Entered: " + str(len(args)) + ", required: " + possible_args)
+
+ options.exit_message = "Ambari Agent '%s' completed successfully." % action
+ try:
+ if action == SETUP_ACTION:
+ #TODO Insert setup(options) here upon need
+ svcsetup()
+ elif action == START_ACTION:
+ svcstart(options)
+ elif action == DEBUG_ACTION:
+ svcdebug(options)
+ elif action == STOP_ACTION:
+ svcstop(options)
+ elif action == STATUS_ACTION:
+ svcstatus(options)
+ else:
+ parser.error("Invalid action")
+
+ if options.warnings:
+ for warning in options.warnings:
+ print_warning_msg(warning)
+ pass
+ options.exit_message = "Ambari Agent '%s' completed with warnings." % action
+ pass
+ except FatalException as e:
+ if e.reason is not None:
+ print_error_msg("Exiting with exit code {0}. \nREASON: {1}".format(e.code, e.reason))
+ sys.exit(e.code)
+ except NonFatalException as e:
+ options.exit_message = "Ambari Agent '%s' completed with warnings." % action
+ if e.reason is not None:
+ print_warning_msg(e.reason)
+
+ if options.exit_message is not None:
+ print options.exit_message
+
+
+if __name__ == '__main__':
+ try:
+ agent_main()
+ except (KeyboardInterrupt, EOFError):
+ print("\nAborting ... Keyboard Interrupt.")
+ sys.exit(1)
diff --git a/ambari-agent/pom.xml b/ambari-agent/pom.xml
index 9db343be3a..c7e0b9d5d4 100644
--- a/ambari-agent/pom.xml
+++ b/ambari-agent/pom.xml
@@ -103,19 +103,26 @@
</executions>
</plugin>
<plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-surefire-plugin</artifactId>
+ <configuration>
+ <skip>${skipSurefireTests}</skip>
+ </configuration>
+ </plugin>
+ <plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>exec-maven-plugin</artifactId>
<version>1.2</version>
<executions>
<execution>
<configuration>
- <executable>${project.basedir}/../ambari-common/src/main/unix/ambari-python-wrap</executable>
+ <executable>${executable.python}</executable>
<workingDirectory>src/test/python</workingDirectory>
<arguments>
<argument>unitTests.py</argument>
</arguments>
<environmentVariables>
- <PYTHONPATH>${project.basedir}/../ambari-common/src/main/python/ambari_jinja2:${project.basedir}/../ambari-common/src/main/python/ambari_commons:${project.basedir}/../ambari-common/src/main/python/resource_management:${project.basedir}/../ambari-common/src/test/python:${project.basedir}/../ambari-common/src/main/python:${project.basedir}/src/main/python/ambari_agent:${project.basedir}/src/test/python/ambari_agent:${project.basedir}/src/test/python/resource_management:${project.basedir}/src/main/python:${project.basedir}/../ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/files:${project.basedir}/../ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/files:$PYTHONPATH</PYTHONPATH>
+ <PYTHONPATH>${path.python.1}${pathsep}$PYTHONPATH</PYTHONPATH>
</environmentVariables>
<skip>${skipTests}</skip>
</configuration>
@@ -128,15 +135,15 @@
<execution>
<!-- TODO: Looks like section is unused, maybe remove? -->
<configuration>
- <executable>${project.basedir}/../ambari-common/src/main/unix/ambari-python-wrap</executable>
- <workingDirectory>target/ambari-agent-${project.version}</workingDirectory>
+ <executable>${executable.python}</executable>
+ <workingDirectory>target${dirsep}ambari-agent-${project.version}</workingDirectory>
<arguments>
- <argument>${project.basedir}/src/main/python/setup.py</argument>
+ <argument>${project.basedir}${dirsep}src${dirsep}main${dirsep}python${dirsep}setup.py</argument>
<argument>clean</argument>
<argument>bdist_dumb</argument>
</arguments>
<environmentVariables>
- <PYTHONPATH>target/ambari-agent-${project.version}:$PYTHONPATH</PYTHONPATH>
+ <PYTHONPATH>target${dirsep}ambari-agent-${project.version}${pathsep}$PYTHONPATH</PYTHONPATH>
</environmentVariables>
</configuration>
<id>python-package</id>
@@ -147,14 +154,14 @@
</execution>
<execution>
<configuration>
- <executable>${project.basedir}/../ambari-common/src/main/unix/ambari-python-wrap</executable>
+ <executable>${executable.python}</executable>
<workingDirectory>${basedir}</workingDirectory>
<arguments>
<argument>${resource.keeper.script}</argument>
<argument>${target.cache.dir}</argument>
</arguments>
<environmentVariables>
- <PYTHONPATH>target/ambari-agent-${project.version}:$PYTHONPATH</PYTHONPATH>
+ <PYTHONPATH>target${dirsep}ambari-agent-${project.version}${pathsep}$PYTHONPATH</PYTHONPATH>
</environmentVariables>
</configuration>
<id>generate-hash-files</id>
@@ -213,7 +220,7 @@
<groupname>root</groupname>
<sources>
<source>
- <location>${project.build.directory}/${project.artifactId}-${project.version}/ambari_agent</location>
+ <location>${project.build.directory}${dirsep}${project.artifactId}-${project.version}${dirsep}ambari_agent</location>
</source>
</sources>
</mapping>
@@ -637,6 +644,7 @@
<exclude>**/*.erb</exclude>
<exclude>**/*.json</exclude>
<exclude>**/*.pydevproject</exclude>
+ <exclude>**/*.wxs</exclude>
</excludes>
</configuration>
<executions>
@@ -674,6 +682,153 @@
</build>
<profiles>
<profile>
+ <id>windows</id>
+ <activation>
+ <os>
+ <family>win</family>
+ </os>
+ </activation>
+ <properties>
+ <envClassifier>win</envClassifier>
+ <dirsep>\</dirsep>
+ <pathsep>;</pathsep>
+ <stack.distribution>HDPWIN</stack.distribution>
+ <executable.python>python</executable.python>
+ <executable.shell>cmd</executable.shell>
+ <fileextension.shell>cmd</fileextension.shell>
+ <fileextension.dot.shell-default>.cmd</fileextension.dot.shell-default>
+ <path.python.1>${project.basedir}\..\ambari-common\src\main\python;${project.basedir}\..\ambari-agent\src\main\python;${project.basedir}\..\ambari-common\src\main\python\ambari_jinja2;${project.basedir}\..\ambari-common\src\main\python\ambari_commons;${project.basedir}\..\ambari-common\src\test\python;${project.basedir}\src\main\python;${project.basedir}\src\main\python\ambari_agent;${project.basedir}\src\main\python\resource_management;${project.basedir}\src\test\python;${project.basedir}\src\test\python\ambari_agent;${project.basedir}\src\test\python\resource_management;${project.basedir}\..\ambari-server\src\test\python;${project.basedir}\..\ambari-server\src\main\resources\stacks\HDP\2.0.6\services\HDFS\package\files;${project.basedir}\..\ambari-server\src\main\resources\stacks\HDP\1.3.2\services\HDFS\package\files</path.python.1>
+ </properties>
+ </profile>
+ <profile>
+ <id>linux</id>
+ <activation>
+ <os>
+ <family>unix</family>
+ </os>
+ </activation>
+ <properties>
+ <envClassifier>linux</envClassifier>
+ <dirsep>/</dirsep>
+ <pathsep>:</pathsep>
+ <stack.distribution>HDP</stack.distribution>
+ <executable.python>${project.basedir}/../ambari-common/src/main/unix/ambari-python-wrap</executable.python>
+ <executable.shell>sh</executable.shell>
+ <fileextension.shell>sh</fileextension.shell>
+ <fileextension.dot.shell-default></fileextension.dot.shell-default>
+ <path.python.1>${project.basedir}/../ambari-common/src/main/python:${project.basedir}/../ambari-agent/src/main/python:${project.basedir}/../ambari-common/src/main/python/ambari_jinja2:${project.basedir}/../ambari-common/src/main/python/ambari_commons:${project.basedir}/../ambari-common/src/test/python:${project.basedir}/src/main/python:${project.basedir}/src/main/python/ambari_agent:${project.basedir}/src/main/python/resource_management:${project.basedir}/src/test/python:${project.basedir}/src/test/python/ambari_agent:${project.basedir}/src/test/python/resource_management:${project.basedir}/../ambari-server/src/test/python:${project.basedir}/../ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/files:${project.basedir}/../ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/files</path.python.1>
+ </properties>
+ </profile>
+ <profile>
+ <id>windows-distro</id>
+ <activation>
+ <os>
+ <family>Windows</family>
+ </os>
+ </activation>
+ <build>
+ <plugins>
+ <plugin>
+ <artifactId>maven-assembly-plugin</artifactId>
+ <configuration>
+ <tarLongFileMode>gnu</tarLongFileMode>
+ <descriptors>
+ <descriptor>src/packages/tarball/all.xml</descriptor>
+ <descriptor>src/packages/windows.xml</descriptor>
+ </descriptors>
+ </configuration>
+ <executions>
+ <execution>
+ <id>build-windows-zip</id>
+ <phase>prepare-package</phase>
+ <goals>
+ <goal>single</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ <!-- msi creation -->
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>exec-maven-plugin</artifactId>
+ <version>1.2</version>
+ <executions>
+ <execution>
+ <id>run-heat</id>
+ <phase>package</phase>
+ <goals>
+ <goal>exec</goal>
+ </goals>
+ <configuration>
+ <executable>heat.exe</executable>
+ <arguments>
+ <argument>dir</argument>
+ <argument>"."</argument>
+ <argument>-dr</argument>
+ <argument>"AMBARI_AGENT_1.3.0_SNAPSHOT"</argument>
+ <argument>-platform</argument>
+ <argument>Win64</argument>
+ <argument>-cg</argument>
+ <argument>"AmbariAgentGroup"</argument>
+ <argument>-gg</argument>
+ <argument>-ke</argument>
+ <argument>-srd</argument>
+ <argument>-o</argument>
+ <argument>".\..\ambari-agent-files.wxs"</argument>
+ </arguments>
+ <workingDirectory>${basedir}/target/${final.name}-windows-dist</workingDirectory>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.npanday.plugins</groupId>
+ <artifactId>wix-maven-plugin</artifactId>
+ <version>1.4.0-incubating</version>
+ <extensions>true</extensions>
+ <configuration>
+ <sourceFiles>
+ <sourceFile>src/main/package/msi/ambari-agent.wxs</sourceFile>
+ <sourceFile>target/ambari-agent-files.wxs</sourceFile>
+ </sourceFiles>
+ <outputDirectory>target</outputDirectory>
+ <objectFiles>
+ <objectFile>target/ambari-agent.wixobj</objectFile>
+ <objectFile>target/ambari-agent-files.wixobj</objectFile>
+ </objectFiles>
+ <outputFile>target/ambari-agent-${project.version}.msi</outputFile>
+ <extensions>
+ <extension>WixUIExtension</extension>
+ </extensions>
+ </configuration>
+ <executions>
+ <execution>
+ <id>wix-candle</id>
+ <phase>package</phase>
+ <goals>
+ <goal>candle</goal>
+ </goals>
+ <configuration>
+ <arguments>-arch x64</arguments>
+ </configuration>
+ </execution>
+ <execution>
+ <id>wix-light</id>
+ <phase>package</phase>
+ <goals>
+ <goal>light</goal>
+ </goals>
+ <configuration>
+ <arguments>-b ${basedir}/target/${final.name}-windows-dist</arguments>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ <!-- end msi creation -->
+ </plugins>
+ </build>
+ </profile>
+ <profile>
<id>suse11</id>
<properties>
<init.d.dir>/etc/init.d</init.d.dir>
diff --git a/ambari-agent/src/main/package/msi/ambari-agent.wxs b/ambari-agent/src/main/package/msi/ambari-agent.wxs
new file mode 100644
index 0000000000..cc2b8d879c
--- /dev/null
+++ b/ambari-agent/src/main/package/msi/ambari-agent.wxs
@@ -0,0 +1,38 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<Wix xmlns="http://schemas.microsoft.com/wix/2006/wi">
+ <Product Id="BA555A14-081D-4521-9B35-BC37C50CF5A9" Name="Ambari Agent 1.3.0-SNAPSHOT" Language="1033"
+ Version="1.3.0.0" Manufacturer="Apache Software Foundation"
+ UpgradeCode="6A5C01ED-C9B3-45C0-8A69-4512AC9F65F7">
+ <Package Description="Ambari Agent for Windows" Comments="Ambari Agent for Windows" InstallerVersion="200"
+ Compressed="yes" Platform="x64"/>
+ <Media Id="1" Cabinet="simple.cab" EmbedCab="yes"/>
+ <Directory Id="TARGETDIR" Name="SourceDir">
+ <Directory Id="ProgramFiles64Folder">
+ <Directory Id="AMBARI_AGENT_1.3.0_SNAPSHOT" Name="ambari-agent-1.3.0-SNAPSHOT">
+ </Directory>
+ </Directory>
+ </Directory>
+ <Feature Id="DefaultFeature" Title="Main Feature" Level="1">
+ <ComponentGroupRef Id="AmbariAgentGroup"/>
+ </Feature>
+ <Property Id="WIXUI_INSTALLDIR" Value="AMBARI_AGENT_1.3.0_SNAPSHOT"/>
+ <UI/>
+ <UIRef Id="WixUI_InstallDir"/>
+ </Product>
+</Wix> \ No newline at end of file
diff --git a/ambari-agent/src/main/python/ambari_agent/ActionQueue.py b/ambari-agent/src/main/python/ambari_agent/ActionQueue.py
index 094975dbba..4ecb8226a8 100644
--- a/ambari-agent/src/main/python/ambari_agent/ActionQueue.py
+++ b/ambari-agent/src/main/python/ambari_agent/ActionQueue.py
@@ -153,11 +153,11 @@ class ActionQueue(threading.Thread):
while not self.backgroundCommandQueue.empty():
try:
command = self.backgroundCommandQueue.get(False)
- if(command.has_key('__handle') and command['__handle'].status == None):
+ if(command.has_key('__handle') and command['__handle'].status == None):
self.process_command(command)
except (Queue.Empty):
pass
-
+
def processStatusCommandQueueSafeEmpty(self):
while not self.statusCommandQueue.empty():
try:
@@ -215,17 +215,17 @@ class ActionQueue(threading.Thread):
'status': self.IN_PROGRESS_STATUS
})
self.commandStatuses.put_command_status(command, in_progress_status)
-
+
# running command
commandresult = self.customServiceOrchestrator.runCommand(command,
in_progress_status['tmpout'], in_progress_status['tmperr'])
-
-
+
+
# dumping results
if isCommandBackground:
return
else:
- status = self.COMPLETED_STATUS if commandresult['exitcode'] == 0 else self.FAILED_STATUS
+ status = self.COMPLETED_STATUS if commandresult['exitcode'] == 0 else self.FAILED_STATUS
roleResult = self.commandStatuses.generate_report_template(command)
roleResult.update({
'stdout': commandresult['stdout'],
@@ -250,18 +250,18 @@ class ActionQueue(threading.Thread):
# let ambari know that configuration tags were applied
if status == self.COMPLETED_STATUS:
configHandler = ActualConfigHandler(self.config, self.configTags)
- #update
+ #update
if command.has_key('forceRefreshConfigTags') and len(command['forceRefreshConfigTags']) > 0 :
-
+
forceRefreshConfigTags = command['forceRefreshConfigTags']
logger.info("Got refresh additional component tags command")
-
+
for configTag in forceRefreshConfigTags :
configHandler.update_component_tag(command['role'], configTag, command['configurationTags'][configTag])
-
+
roleResult['customCommand'] = self.CUSTOM_COMMAND_RESTART # force restart for component to evict stale_config on server side
command['configurationTags'] = configHandler.read_actual_component(command['role'])
-
+
if command.has_key('configurationTags'):
configHandler.write_actual(command['configurationTags'])
roleResult['configurationTags'] = command['configurationTags']
@@ -288,17 +288,17 @@ class ActionQueue(threading.Thread):
logger.debug('Start callback: %s' % process_condenced_result)
logger.debug('The handle is: %s' % handle)
status = self.COMPLETED_STATUS if handle.exitCode == 0 else self.FAILED_STATUS
-
+
aborted_postfix = self.customServiceOrchestrator.command_canceled_reason(handle.command['taskId'])
if aborted_postfix:
status = self.FAILED_STATUS
logger.debug('Set status to: %s , reason = %s' % (status, aborted_postfix))
else:
aborted_postfix = ''
-
-
+
+
roleResult = self.commandStatuses.generate_report_template(handle.command)
-
+
roleResult.update({
'stdout': process_condenced_result['stdout'] + aborted_postfix,
'stderr': process_condenced_result['stderr'] + aborted_postfix,
@@ -306,7 +306,7 @@ class ActionQueue(threading.Thread):
'structuredOut': str(json.dumps(process_condenced_result['structuredOut'])) if 'structuredOut' in process_condenced_result else '',
'status': status,
})
-
+
self.commandStatuses.put_command_status(handle.command, roleResult)
def execute_status_command(self, command):
@@ -371,11 +371,10 @@ class ActionQueue(threading.Thread):
"""
Actions that are executed every time when command status changes
"""
- self.controller.heartbeat_wait_event.set()
+ self.controller.trigger_heartbeat()
# Removes all commands from the queue
def reset(self):
queue = self.commandQueue
with queue.mutex:
queue.queue.clear()
-
diff --git a/ambari-agent/src/main/python/ambari_agent/AgentConfig_linux.py b/ambari-agent/src/main/python/ambari_agent/AgentConfig_linux.py
new file mode 100644
index 0000000000..a90b8ba536
--- /dev/null
+++ b/ambari-agent/src/main/python/ambari_agent/AgentConfig_linux.py
@@ -0,0 +1,229 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import os
+
+content = """
+
+[server]
+hostname=localhost
+url_port=8440
+secured_url_port=8441
+
+[agent]
+prefix=/tmp/ambari-agent
+tmp_dir=/tmp/ambari-agent/tmp
+data_cleanup_interval=86400
+data_cleanup_max_age=2592000
+data_cleanup_max_size_MB = 100
+ping_port=8670
+cache_dir=/var/lib/ambari-agent/cache
+
+[services]
+
+[python]
+custom_actions_dir = /var/lib/ambari-agent/resources/custom_actions
+
+[command]
+maxretries=2
+sleepBetweenRetries=1
+
+[security]
+keysdir=/tmp/ambari-agent
+server_crt=ca.crt
+passphrase_env_var_name=AMBARI_PASSPHRASE
+
+[heartbeat]
+state_interval = 6
+dirs=/etc/hadoop,/etc/hadoop/conf,/var/run/hadoop,/var/log/hadoop
+log_lines_count=300
+
+"""
+
+imports = [
+ "hdp/manifests/*.pp",
+ "hdp-hadoop/manifests/*.pp",
+ "hdp-hbase/manifests/*.pp",
+ "hdp-zookeeper/manifests/*.pp",
+ "hdp-oozie/manifests/*.pp",
+ "hdp-pig/manifests/*.pp",
+ "hdp-sqoop/manifests/*.pp",
+ "hdp-templeton/manifests/*.pp",
+ "hdp-hive/manifests/*.pp",
+ "hdp-hcat/manifests/*.pp",
+ "hdp-mysql/manifests/*.pp",
+ "hdp-monitor-webserver/manifests/*.pp",
+ "hdp-repos/manifests/*.pp"
+]
+
+rolesToClass = {
+ 'GLUSTERFS': 'hdp-hadoop::glusterfs',
+ 'GLUSTERFS_CLIENT': 'hdp-hadoop::glusterfs_client',
+ 'GLUSTERFS_SERVICE_CHECK': 'hdp-hadoop::glusterfs_service_check',
+ 'NAMENODE': 'hdp-hadoop::namenode',
+ 'DATANODE': 'hdp-hadoop::datanode',
+ 'SECONDARY_NAMENODE': 'hdp-hadoop::snamenode',
+ 'JOBTRACKER': 'hdp-hadoop::jobtracker',
+ 'TASKTRACKER': 'hdp-hadoop::tasktracker',
+ 'RESOURCEMANAGER': 'hdp-yarn::resourcemanager',
+ 'NODEMANAGER': 'hdp-yarn::nodemanager',
+ 'HISTORYSERVER': 'hdp-yarn::historyserver',
+ 'YARN_CLIENT': 'hdp-yarn::yarn_client',
+ 'HDFS_CLIENT': 'hdp-hadoop::client',
+ 'MAPREDUCE_CLIENT': 'hdp-hadoop::client',
+ 'MAPREDUCE2_CLIENT': 'hdp-yarn::mapreducev2_client',
+ 'ZOOKEEPER_SERVER': 'hdp-zookeeper',
+ 'ZOOKEEPER_CLIENT': 'hdp-zookeeper::client',
+ 'HBASE_MASTER': 'hdp-hbase::master',
+ 'HBASE_REGIONSERVER': 'hdp-hbase::regionserver',
+ 'HBASE_CLIENT': 'hdp-hbase::client',
+ 'PIG': 'hdp-pig',
+ 'SQOOP': 'hdp-sqoop',
+ 'OOZIE_SERVER': 'hdp-oozie::server',
+ 'OOZIE_CLIENT': 'hdp-oozie::client',
+ 'HIVE_CLIENT': 'hdp-hive::client',
+ 'HCAT': 'hdp-hcat',
+ 'HIVE_SERVER': 'hdp-hive::server',
+ 'HIVE_METASTORE': 'hdp-hive::metastore',
+ 'MYSQL_SERVER': 'hdp-mysql::server',
+ 'WEBHCAT_SERVER': 'hdp-templeton::server',
+ 'DASHBOARD': 'hdp-dashboard',
+ 'GANGLIA_SERVER': 'hdp-ganglia::server',
+ 'GANGLIA_MONITOR': 'hdp-ganglia::monitor',
+ 'HTTPD': 'hdp-monitor-webserver',
+ 'HUE_SERVER': 'hdp-hue::server',
+ 'HDFS_SERVICE_CHECK': 'hdp-hadoop::hdfs::service_check',
+ 'MAPREDUCE_SERVICE_CHECK': 'hdp-hadoop::mapred::service_check',
+ 'MAPREDUCE2_SERVICE_CHECK': 'hdp-yarn::mapred2::service_check',
+ 'ZOOKEEPER_SERVICE_CHECK': 'hdp-zookeeper::zookeeper::service_check',
+ 'ZOOKEEPER_QUORUM_SERVICE_CHECK': 'hdp-zookeeper::quorum::service_check',
+ 'HBASE_SERVICE_CHECK': 'hdp-hbase::hbase::service_check',
+ 'HIVE_SERVICE_CHECK': 'hdp-hive::hive::service_check',
+ 'HCAT_SERVICE_CHECK': 'hdp-hcat::hcat::service_check',
+ 'OOZIE_SERVICE_CHECK': 'hdp-oozie::oozie::service_check',
+ 'PIG_SERVICE_CHECK': 'hdp-pig::pig::service_check',
+ 'SQOOP_SERVICE_CHECK': 'hdp-sqoop::sqoop::service_check',
+ 'WEBHCAT_SERVICE_CHECK': 'hdp-templeton::templeton::service_check',
+ 'DASHBOARD_SERVICE_CHECK': 'hdp-dashboard::dashboard::service_check',
+ 'DECOMMISSION_DATANODE': 'hdp-hadoop::hdfs::decommission',
+ 'HUE_SERVICE_CHECK': 'hdp-hue::service_check',
+ 'RESOURCEMANAGER_SERVICE_CHECK': 'hdp-yarn::resourcemanager::service_check',
+ 'HISTORYSERVER_SERVICE_CHECK': 'hdp-yarn::historyserver::service_check',
+ 'TEZ_CLIENT': 'hdp-tez::tez_client',
+ 'YARN_SERVICE_CHECK': 'hdp-yarn::yarn::service_check',
+ 'FLUME_SERVER': 'hdp-flume',
+ 'JOURNALNODE': 'hdp-hadoop::journalnode',
+ 'ZKFC': 'hdp-hadoop::zkfc'
+}
+
+serviceStates = {
+ 'START': 'running',
+ 'INSTALL': 'installed_and_configured',
+ 'STOP': 'stopped'
+}
+
+servicesToPidNames = {
+ 'GLUSTERFS' : 'glusterd.pid$',
+ 'NAMENODE': 'hadoop-{USER}-namenode.pid$',
+ 'SECONDARY_NAMENODE': 'hadoop-{USER}-secondarynamenode.pid$',
+ 'DATANODE': 'hadoop-{USER}-datanode.pid$',
+ 'JOBTRACKER': 'hadoop-{USER}-jobtracker.pid$',
+ 'TASKTRACKER': 'hadoop-{USER}-tasktracker.pid$',
+ 'RESOURCEMANAGER': 'yarn-{USER}-resourcemanager.pid$',
+ 'NODEMANAGER': 'yarn-{USER}-nodemanager.pid$',
+ 'HISTORYSERVER': 'mapred-{USER}-historyserver.pid$',
+ 'JOURNALNODE': 'hadoop-{USER}-journalnode.pid$',
+ 'ZKFC': 'hadoop-{USER}-zkfc.pid$',
+ 'OOZIE_SERVER': 'oozie.pid',
+ 'ZOOKEEPER_SERVER': 'zookeeper_server.pid',
+ 'FLUME_SERVER': 'flume-node.pid',
+ 'TEMPLETON_SERVER': 'templeton.pid',
+ 'GANGLIA_SERVER': 'gmetad.pid',
+ 'GANGLIA_MONITOR': 'gmond.pid',
+ 'HBASE_MASTER': 'hbase-{USER}-master.pid',
+ 'HBASE_REGIONSERVER': 'hbase-{USER}-regionserver.pid',
+ 'HCATALOG_SERVER': 'webhcat.pid',
+ 'KERBEROS_SERVER': 'kadmind.pid',
+ 'HIVE_SERVER': 'hive-server.pid',
+ 'HIVE_METASTORE': 'hive.pid',
+ 'MYSQL_SERVER': 'mysqld.pid',
+ 'HUE_SERVER': '/var/run/hue/supervisor.pid',
+ 'WEBHCAT_SERVER': 'webhcat.pid',
+}
+
+#Each service, which's pid depends on user should provide user mapping
+servicesToLinuxUser = {
+ 'NAMENODE': 'hdfs_user',
+ 'SECONDARY_NAMENODE': 'hdfs_user',
+ 'DATANODE': 'hdfs_user',
+ 'JOURNALNODE': 'hdfs_user',
+ 'ZKFC': 'hdfs_user',
+ 'JOBTRACKER': 'mapred_user',
+ 'TASKTRACKER': 'mapred_user',
+ 'RESOURCEMANAGER': 'yarn_user',
+ 'NODEMANAGER': 'yarn_user',
+ 'HISTORYSERVER': 'mapred_user',
+ 'HBASE_MASTER': 'hbase_user',
+ 'HBASE_REGIONSERVER': 'hbase_user',
+}
+
+pidPathVars = [
+ {'var' : 'glusterfs_pid_dir_prefix',
+ 'defaultValue' : '/var/run'},
+ {'var' : 'hadoop_pid_dir_prefix',
+ 'defaultValue' : '/var/run/hadoop'},
+ {'var' : 'hadoop_pid_dir_prefix',
+ 'defaultValue' : '/var/run/hadoop'},
+ {'var' : 'ganglia_runtime_dir',
+ 'defaultValue' : '/var/run/ganglia/hdp'},
+ {'var' : 'hbase_pid_dir',
+ 'defaultValue' : '/var/run/hbase'},
+ {'var' : 'zk_pid_dir',
+ 'defaultValue' : '/var/run/zookeeper'},
+ {'var' : 'oozie_pid_dir',
+ 'defaultValue' : '/var/run/oozie'},
+ {'var' : 'hcat_pid_dir',
+ 'defaultValue' : '/var/run/webhcat'},
+ {'var' : 'hive_pid_dir',
+ 'defaultValue' : '/var/run/hive'},
+ {'var' : 'mysqld_pid_dir',
+ 'defaultValue' : '/var/run/mysqld'},
+ {'var' : 'hcat_pid_dir',
+ 'defaultValue' : '/var/run/webhcat'},
+ {'var' : 'yarn_pid_dir_prefix',
+ 'defaultValue' : '/var/run/hadoop-yarn'},
+ {'var' : 'mapred_pid_dir_prefix',
+ 'defaultValue' : '/var/run/hadoop-mapreduce'},
+]
+
+if 'AMBARI_AGENT_CONF_DIR' in os.environ:
+ configFile = os.path.join(os.environ['AMBARI_AGENT_CONF_DIR'], "ambari-agent.ini")
+else:
+ configFile = "/etc/ambari-agent/conf/ambari-agent.ini"
+
+if 'AMBARI_AGENT_LOG_DIR' in os.environ:
+ logfile = os.path.join(os.environ['AMBARI_AGENT_LOG_DIR'], "ambari-agent.log")
+else:
+ logfile = "/var/log/ambari-agent/ambari-agent.log"
+
+if 'AMBARI_AGENT_OUT_DIR' in os.environ:
+ outfile = os.path.join(os.environ['AMBARI_AGENT_OUT_DIR'], "ambari-agent.out")
+else:
+ outfile = "/var/log/ambari-agent/ambari-agent.out"
diff --git a/ambari-agent/src/main/python/ambari_agent/AgentConfig_windows.py b/ambari-agent/src/main/python/ambari_agent/AgentConfig_windows.py
new file mode 100644
index 0000000000..e5e1b22992
--- /dev/null
+++ b/ambari-agent/src/main/python/ambari_agent/AgentConfig_windows.py
@@ -0,0 +1,232 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import os
+
+content = """
+
+[server]
+hostname=localhost
+url_port=8440
+secured_url_port=8441
+
+[agent]
+prefix=\\tmp\\ambari-agent
+data_cleanup_interval=86400
+data_cleanup_max_age=2592000
+ping_port=8670
+cache_dir=\\var\\lib\\ambari-agent\\cache
+
+[services]
+
+[python]
+custom_actions_dir = \\var\\lib\\ambari-agent\\resources\\custom_actions
+
+[command]
+maxretries=2
+sleepBetweenRetries=1
+
+[security]
+keysdir=\\tmp\\ambari-agent
+server_crt=ca.crt
+passphrase_env_var_name=AMBARI_PASSPHRASE
+
+[heartbeat]
+state_interval = 6
+dirs=\\etc\\hadoop,\\etc\\hadoop\\conf,\\var\\run\\hadoop,\\var\\log\\hadoop
+rpms=glusterfs,openssl,wget,net-snmp,ntpd,ganglia,nagios,glusterfs
+log_lines_count=300
+
+"""
+
+imports = [
+ "hdp\\manifests\\*.pp",
+ "hdp-hadoop\\manifests\\*.pp",
+ "hdp-hbase\\manifests\\*.pp",
+ "hdp-zookeeper\\manifests\\*.pp",
+ "hdp-oozie\\manifests\\*.pp",
+ "hdp-pig\\manifests\\*.pp",
+ "hdp-sqoop\\manifests\\*.pp",
+ "hdp-templeton\\manifests\\*.pp",
+ "hdp-hive\\manifests\\*.pp",
+ "hdp-hcat\\manifests\\*.pp",
+ "hdp-mysql\\manifests\\*.pp",
+ "hdp-monitor-webserver\\manifests\\*.pp",
+ "hdp-repos\\manifests\\*.pp"
+]
+
+rolesToClass = {
+ 'GLUSTERFS': 'hdp-hadoop::glusterfs',
+ 'GLUSTERFS_CLIENT': 'hdp-hadoop::glusterfs_client',
+ 'GLUSTERFS_SERVICE_CHECK': 'hdp-hadoop::glusterfs_service_check',
+ 'NAMENODE': 'hdp-hadoop::namenode',
+ 'DATANODE': 'hdp-hadoop::datanode',
+ 'SECONDARY_NAMENODE': 'hdp-hadoop::snamenode',
+ 'JOBTRACKER': 'hdp-hadoop::jobtracker',
+ 'TASKTRACKER': 'hdp-hadoop::tasktracker',
+ 'RESOURCEMANAGER': 'hdp-yarn::resourcemanager',
+ 'NODEMANAGER': 'hdp-yarn::nodemanager',
+ 'HISTORYSERVER': 'hdp-yarn::historyserver',
+ 'YARN_CLIENT': 'hdp-yarn::yarn_client',
+ 'HDFS_CLIENT': 'hdp-hadoop::client',
+ 'MAPREDUCE_CLIENT': 'hdp-hadoop::client',
+ 'MAPREDUCE2_CLIENT': 'hdp-yarn::mapreducev2_client',
+ 'ZOOKEEPER_SERVER': 'hdp-zookeeper',
+ 'ZOOKEEPER_CLIENT': 'hdp-zookeeper::client',
+ 'HBASE_MASTER': 'hdp-hbase::master',
+ 'HBASE_REGIONSERVER': 'hdp-hbase::regionserver',
+ 'HBASE_CLIENT': 'hdp-hbase::client',
+ 'PIG': 'hdp-pig',
+ 'SQOOP': 'hdp-sqoop',
+ 'OOZIE_SERVER': 'hdp-oozie::server',
+ 'OOZIE_CLIENT': 'hdp-oozie::client',
+ 'HIVE_CLIENT': 'hdp-hive::client',
+ 'HCAT': 'hdp-hcat',
+ 'HIVE_SERVER': 'hdp-hive::server',
+ 'HIVE_METASTORE': 'hdp-hive::metastore',
+ 'MYSQL_SERVER': 'hdp-mysql::server',
+ 'WEBHCAT_SERVER': 'hdp-templeton::server',
+ 'DASHBOARD': 'hdp-dashboard',
+ 'NAGIOS_SERVER': 'hdp-nagios::server',
+ 'GANGLIA_SERVER': 'hdp-ganglia::server',
+ 'GANGLIA_MONITOR': 'hdp-ganglia::monitor',
+ 'HTTPD': 'hdp-monitor-webserver',
+ 'HUE_SERVER': 'hdp-hue::server',
+ 'HDFS_SERVICE_CHECK': 'hdp-hadoop::hdfs::service_check',
+ 'MAPREDUCE_SERVICE_CHECK': 'hdp-hadoop::mapred::service_check',
+ 'MAPREDUCE2_SERVICE_CHECK': 'hdp-yarn::mapred2::service_check',
+ 'ZOOKEEPER_SERVICE_CHECK': 'hdp-zookeeper::zookeeper::service_check',
+ 'ZOOKEEPER_QUORUM_SERVICE_CHECK': 'hdp-zookeeper::quorum::service_check',
+ 'HBASE_SERVICE_CHECK': 'hdp-hbase::hbase::service_check',
+ 'HIVE_SERVICE_CHECK': 'hdp-hive::hive::service_check',
+ 'HCAT_SERVICE_CHECK': 'hdp-hcat::hcat::service_check',
+ 'OOZIE_SERVICE_CHECK': 'hdp-oozie::oozie::service_check',
+ 'PIG_SERVICE_CHECK': 'hdp-pig::pig::service_check',
+ 'SQOOP_SERVICE_CHECK': 'hdp-sqoop::sqoop::service_check',
+ 'WEBHCAT_SERVICE_CHECK': 'hdp-templeton::templeton::service_check',
+ 'DASHBOARD_SERVICE_CHECK': 'hdp-dashboard::dashboard::service_check',
+ 'DECOMMISSION_DATANODE': 'hdp-hadoop::hdfs::decommission',
+ 'HUE_SERVICE_CHECK': 'hdp-hue::service_check',
+ 'RESOURCEMANAGER_SERVICE_CHECK': 'hdp-yarn::resourcemanager::service_check',
+ 'HISTORYSERVER_SERVICE_CHECK': 'hdp-yarn::historyserver::service_check',
+ 'TEZ_CLIENT': 'hdp-tez::tez_client',
+ 'YARN_SERVICE_CHECK': 'hdp-yarn::yarn::service_check',
+ 'FLUME_SERVER': 'hdp-flume',
+ 'JOURNALNODE': 'hdp-hadoop::journalnode',
+ 'ZKFC': 'hdp-hadoop::zkfc'
+}
+
+serviceStates = {
+ 'START': 'running',
+ 'INSTALL': 'installed_and_configured',
+ 'STOP': 'stopped'
+}
+
+servicesToPidNames = {
+ 'GLUSTERFS' : 'glusterd.pid$',
+ 'NAMENODE': 'hadoop-{USER}-namenode.pid$',
+ 'SECONDARY_NAMENODE': 'hadoop-{USER}-secondarynamenode.pid$',
+ 'DATANODE': 'hadoop-{USER}-datanode.pid$',
+ 'JOBTRACKER': 'hadoop-{USER}-jobtracker.pid$',
+ 'TASKTRACKER': 'hadoop-{USER}-tasktracker.pid$',
+ 'RESOURCEMANAGER': 'yarn-{USER}-resourcemanager.pid$',
+ 'NODEMANAGER': 'yarn-{USER}-nodemanager.pid$',
+ 'HISTORYSERVER': 'mapred-{USER}-historyserver.pid$',
+ 'JOURNALNODE': 'hadoop-{USER}-journalnode.pid$',
+ 'ZKFC': 'hadoop-{USER}-zkfc.pid$',
+ 'OOZIE_SERVER': 'oozie.pid',
+ 'ZOOKEEPER_SERVER': 'zookeeper_server.pid',
+ 'FLUME_SERVER': 'flume-node.pid',
+ 'TEMPLETON_SERVER': 'templeton.pid',
+ 'NAGIOS_SERVER': 'nagios.pid',
+ 'GANGLIA_SERVER': 'gmetad.pid',
+ 'GANGLIA_MONITOR': 'gmond.pid',
+ 'HBASE_MASTER': 'hbase-{USER}-master.pid',
+ 'HBASE_REGIONSERVER': 'hbase-{USER}-regionserver.pid',
+ 'HCATALOG_SERVER': 'webhcat.pid',
+ 'KERBEROS_SERVER': 'kadmind.pid',
+ 'HIVE_SERVER': 'hive-server.pid',
+ 'HIVE_METASTORE': 'hive.pid',
+ 'MYSQL_SERVER': 'mysqld.pid',
+ 'HUE_SERVER': '\\var\\run\\hue\\supervisor.pid',
+ 'WEBHCAT_SERVER': 'webhcat.pid',
+}
+
+#Each service, which's pid depends on user should provide user mapping
+servicesToLinuxUser = {
+ 'NAMENODE': 'hdfs_user',
+ 'SECONDARY_NAMENODE': 'hdfs_user',
+ 'DATANODE': 'hdfs_user',
+ 'JOURNALNODE': 'hdfs_user',
+ 'ZKFC': 'hdfs_user',
+ 'JOBTRACKER': 'mapred_user',
+ 'TASKTRACKER': 'mapred_user',
+ 'RESOURCEMANAGER': 'yarn_user',
+ 'NODEMANAGER': 'yarn_user',
+ 'HISTORYSERVER': 'mapred_user',
+ 'HBASE_MASTER': 'hbase_user',
+ 'HBASE_REGIONSERVER': 'hbase_user',
+}
+
+pidPathVars = [
+ {'var' : 'glusterfs_pid_dir_prefix',
+ 'defaultValue' : '\\var\\run'},
+ {'var' : 'hadoop_pid_dir_prefix',
+ 'defaultValue' : '\\var\\run\\hadoop'},
+ {'var' : 'hadoop_pid_dir_prefix',
+ 'defaultValue' : '\\var\\run\\hadoop'},
+ {'var' : 'ganglia_runtime_dir',
+ 'defaultValue' : '\\var\\run\\ganglia\\hdp'},
+ {'var' : 'hbase_pid_dir',
+ 'defaultValue' : '\\var\\run\\hbase'},
+ {'var' : '',
+ 'defaultValue' : '\\var\\run\\nagios'},
+ {'var' : 'zk_pid_dir',
+ 'defaultValue' : '\\var\\run\\zookeeper'},
+ {'var' : 'oozie_pid_dir',
+ 'defaultValue' : '\\var\\run\\oozie'},
+ {'var' : 'hcat_pid_dir',
+ 'defaultValue' : '\\var\\run\\webhcat'},
+ {'var' : 'hive_pid_dir',
+ 'defaultValue' : '\\var\\run\\hive'},
+ {'var' : 'mysqld_pid_dir',
+ 'defaultValue' : '\\var\\run\\mysqld'},
+ {'var' : 'hcat_pid_dir',
+ 'defaultValue' : '\\var\\run\\webhcat'},
+ {'var' : 'yarn_pid_dir_prefix',
+ 'defaultValue' : '\\var\\run\\hadoop-yarn'},
+ {'var' : 'mapred_pid_dir_prefix',
+ 'defaultValue' : '\\var\\run\\hadoop-mapreduce'},
+]
+
+if 'AMBARI_AGENT_CONF_DIR' in os.environ:
+ configFile = os.path.join(os.environ['AMBARI_AGENT_CONF_DIR'], "ambari-agent.ini")
+else:
+ configFile = "ambari-agent.ini"
+
+if 'AMBARI_AGENT_LOG_DIR' in os.environ:
+ logfile = os.path.join(os.environ['AMBARI_AGENT_LOG_DIR'], "ambari-agent.log")
+else:
+ logfile = "\\var\\log\\ambari-agent-1.3.0-SNAPSHOT\\ambari-agent.log"
+
+if 'AMBARI_AGENT_OUT_DIR' in os.environ:
+ outfile = os.path.join(os.environ['AMBARI_AGENT_OUT_DIR'], "ambari-agent.out")
+else:
+ outfile = "\\var\\log\\ambari-agent-1.3.0-SNAPSHOT\\ambari-agent.out"
diff --git a/ambari-agent/src/main/python/ambari_agent/AmbariAgent.py b/ambari-agent/src/main/python/ambari_agent/AmbariAgent.py
index 79433b722f..e029620deb 100644
--- a/ambari-agent/src/main/python/ambari_agent/AmbariAgent.py
+++ b/ambari-agent/src/main/python/ambari_agent/AmbariAgent.py
@@ -23,8 +23,14 @@ import sys
import subprocess
from Controller import AGENT_AUTO_RESTART_EXIT_CODE
-AGENT_SCRIPT = "/usr/lib/python2.6/site-packages/ambari_agent/main.py"
-AGENT_PID_FILE = "/var/run/ambari-agent/ambari-agent.pid"
+if os.environ.has_key("PYTHON_BIN"):
+ AGENT_SCRIPT = os.path.join(os.environ["PYTHON_BIN"],"site-packages/ambari_agent/main.py")
+else:
+ AGENT_SCRIPT = "/usr/lib/python2.6/site-packages/ambari_agent/main.py"
+if os.environ.has_key("AMBARI_PID_DIR"):
+ AGENT_SCRIPT = os.path.join(os.environ["AMBARI_PID_DIR"],"ambari-agent.pid")
+else:
+ AGENT_PID_FILE = "/var/run/ambari-agent/ambari-agent.pid"
# AGENT_AUTO_RESTART_EXIT_CODE = 77 is exit code which we return when restart_agent() is called
status = AGENT_AUTO_RESTART_EXIT_CODE
diff --git a/ambari-agent/src/main/python/ambari_agent/AmbariConfig.py b/ambari-agent/src/main/python/ambari_agent/AmbariConfig.py
index 6c79b6b0d0..4bae50baef 100644
--- a/ambari-agent/src/main/python/ambari_agent/AmbariConfig.py
+++ b/ambari-agent/src/main/python/ambari_agent/AmbariConfig.py
@@ -18,204 +18,31 @@ See the License for the specific language governing permissions and
limitations under the License.
'''
+import platform
+
import ConfigParser
import StringIO
import json
from NetUtil import NetUtil
-content = """
-
-[server]
-hostname=localhost
-url_port=8440
-secured_url_port=8441
-
-[agent]
-prefix=/tmp/ambari-agent
-tmp_dir=/tmp/ambari-agent/tmp
-data_cleanup_interval=86400
-data_cleanup_max_age=2592000
-data_cleanup_max_size_MB = 100
-ping_port=8670
-cache_dir=/var/lib/ambari-agent/cache
-
-[services]
-
-[python]
-custom_actions_dir = /var/lib/ambari-agent/resources/custom_actions
-
-[command]
-maxretries=2
-sleepBetweenRetries=1
-
-[security]
-keysdir=/tmp/ambari-agent
-server_crt=ca.crt
-passphrase_env_var_name=AMBARI_PASSPHRASE
-
-[heartbeat]
-state_interval = 6
-dirs=/etc/hadoop,/etc/hadoop/conf,/var/run/hadoop,/var/log/hadoop
-log_lines_count=300
-
-"""
-
-imports = [
- "hdp/manifests/*.pp",
- "hdp-hadoop/manifests/*.pp",
- "hdp-hbase/manifests/*.pp",
- "hdp-zookeeper/manifests/*.pp",
- "hdp-oozie/manifests/*.pp",
- "hdp-pig/manifests/*.pp",
- "hdp-sqoop/manifests/*.pp",
- "hdp-templeton/manifests/*.pp",
- "hdp-hive/manifests/*.pp",
- "hdp-hcat/manifests/*.pp",
- "hdp-mysql/manifests/*.pp",
- "hdp-monitor-webserver/manifests/*.pp",
- "hdp-repos/manifests/*.pp"
-]
-
-rolesToClass = {
- 'GLUSTERFS': 'hdp-hadoop::glusterfs',
- 'GLUSTERFS_CLIENT': 'hdp-hadoop::glusterfs_client',
- 'GLUSTERFS_SERVICE_CHECK': 'hdp-hadoop::glusterfs_service_check',
- 'NAMENODE': 'hdp-hadoop::namenode',
- 'DATANODE': 'hdp-hadoop::datanode',
- 'SECONDARY_NAMENODE': 'hdp-hadoop::snamenode',
- 'JOBTRACKER': 'hdp-hadoop::jobtracker',
- 'TASKTRACKER': 'hdp-hadoop::tasktracker',
- 'RESOURCEMANAGER': 'hdp-yarn::resourcemanager',
- 'NODEMANAGER': 'hdp-yarn::nodemanager',
- 'HISTORYSERVER': 'hdp-yarn::historyserver',
- 'YARN_CLIENT': 'hdp-yarn::yarn_client',
- 'HDFS_CLIENT': 'hdp-hadoop::client',
- 'MAPREDUCE_CLIENT': 'hdp-hadoop::client',
- 'MAPREDUCE2_CLIENT': 'hdp-yarn::mapreducev2_client',
- 'ZOOKEEPER_SERVER': 'hdp-zookeeper',
- 'ZOOKEEPER_CLIENT': 'hdp-zookeeper::client',
- 'HBASE_MASTER': 'hdp-hbase::master',
- 'HBASE_REGIONSERVER': 'hdp-hbase::regionserver',
- 'HBASE_CLIENT': 'hdp-hbase::client',
- 'PIG': 'hdp-pig',
- 'SQOOP': 'hdp-sqoop',
- 'OOZIE_SERVER': 'hdp-oozie::server',
- 'OOZIE_CLIENT': 'hdp-oozie::client',
- 'HIVE_CLIENT': 'hdp-hive::client',
- 'HCAT': 'hdp-hcat',
- 'HIVE_SERVER': 'hdp-hive::server',
- 'HIVE_METASTORE': 'hdp-hive::metastore',
- 'MYSQL_SERVER': 'hdp-mysql::server',
- 'WEBHCAT_SERVER': 'hdp-templeton::server',
- 'DASHBOARD': 'hdp-dashboard',
- 'GANGLIA_SERVER': 'hdp-ganglia::server',
- 'GANGLIA_MONITOR': 'hdp-ganglia::monitor',
- 'HTTPD': 'hdp-monitor-webserver',
- 'HUE_SERVER': 'hdp-hue::server',
- 'HDFS_SERVICE_CHECK': 'hdp-hadoop::hdfs::service_check',
- 'MAPREDUCE_SERVICE_CHECK': 'hdp-hadoop::mapred::service_check',
- 'MAPREDUCE2_SERVICE_CHECK': 'hdp-yarn::mapred2::service_check',
- 'ZOOKEEPER_SERVICE_CHECK': 'hdp-zookeeper::zookeeper::service_check',
- 'ZOOKEEPER_QUORUM_SERVICE_CHECK': 'hdp-zookeeper::quorum::service_check',
- 'HBASE_SERVICE_CHECK': 'hdp-hbase::hbase::service_check',
- 'HIVE_SERVICE_CHECK': 'hdp-hive::hive::service_check',
- 'HCAT_SERVICE_CHECK': 'hdp-hcat::hcat::service_check',
- 'OOZIE_SERVICE_CHECK': 'hdp-oozie::oozie::service_check',
- 'PIG_SERVICE_CHECK': 'hdp-pig::pig::service_check',
- 'SQOOP_SERVICE_CHECK': 'hdp-sqoop::sqoop::service_check',
- 'WEBHCAT_SERVICE_CHECK': 'hdp-templeton::templeton::service_check',
- 'DASHBOARD_SERVICE_CHECK': 'hdp-dashboard::dashboard::service_check',
- 'DECOMMISSION_DATANODE': 'hdp-hadoop::hdfs::decommission',
- 'HUE_SERVICE_CHECK': 'hdp-hue::service_check',
- 'RESOURCEMANAGER_SERVICE_CHECK': 'hdp-yarn::resourcemanager::service_check',
- 'HISTORYSERVER_SERVICE_CHECK': 'hdp-yarn::historyserver::service_check',
- 'TEZ_CLIENT': 'hdp-tez::tez_client',
- 'YARN_SERVICE_CHECK': 'hdp-yarn::yarn::service_check',
- 'FLUME_SERVER': 'hdp-flume',
- 'JOURNALNODE': 'hdp-hadoop::journalnode',
- 'ZKFC': 'hdp-hadoop::zkfc'
-}
-
-serviceStates = {
- 'START': 'running',
- 'INSTALL': 'installed_and_configured',
- 'STOP': 'stopped'
-}
-
-servicesToPidNames = {
- 'GLUSTERFS' : 'glusterd.pid$',
- 'NAMENODE': 'hadoop-{USER}-namenode.pid$',
- 'SECONDARY_NAMENODE': 'hadoop-{USER}-secondarynamenode.pid$',
- 'DATANODE': 'hadoop-{USER}-datanode.pid$',
- 'JOBTRACKER': 'hadoop-{USER}-jobtracker.pid$',
- 'TASKTRACKER': 'hadoop-{USER}-tasktracker.pid$',
- 'RESOURCEMANAGER': 'yarn-{USER}-resourcemanager.pid$',
- 'NODEMANAGER': 'yarn-{USER}-nodemanager.pid$',
- 'HISTORYSERVER': 'mapred-{USER}-historyserver.pid$',
- 'JOURNALNODE': 'hadoop-{USER}-journalnode.pid$',
- 'ZKFC': 'hadoop-{USER}-zkfc.pid$',
- 'OOZIE_SERVER': 'oozie.pid',
- 'ZOOKEEPER_SERVER': 'zookeeper_server.pid',
- 'FLUME_SERVER': 'flume-node.pid',
- 'TEMPLETON_SERVER': 'templeton.pid',
- 'GANGLIA_SERVER': 'gmetad.pid',
- 'GANGLIA_MONITOR': 'gmond.pid',
- 'HBASE_MASTER': 'hbase-{USER}-master.pid',
- 'HBASE_REGIONSERVER': 'hbase-{USER}-regionserver.pid',
- 'HCATALOG_SERVER': 'webhcat.pid',
- 'KERBEROS_SERVER': 'kadmind.pid',
- 'HIVE_SERVER': 'hive-server.pid',
- 'HIVE_METASTORE': 'hive.pid',
- 'MYSQL_SERVER': 'mysqld.pid',
- 'HUE_SERVER': '/var/run/hue/supervisor.pid',
- 'WEBHCAT_SERVER': 'webhcat.pid',
-}
-
-#Each service, which's pid depends on user should provide user mapping
-servicesToLinuxUser = {
- 'NAMENODE': 'hdfs_user',
- 'SECONDARY_NAMENODE': 'hdfs_user',
- 'DATANODE': 'hdfs_user',
- 'JOURNALNODE': 'hdfs_user',
- 'ZKFC': 'hdfs_user',
- 'JOBTRACKER': 'mapred_user',
- 'TASKTRACKER': 'mapred_user',
- 'RESOURCEMANAGER': 'yarn_user',
- 'NODEMANAGER': 'yarn_user',
- 'HISTORYSERVER': 'mapred_user',
- 'HBASE_MASTER': 'hbase_user',
- 'HBASE_REGIONSERVER': 'hbase_user',
-}
-
-pidPathesVars = [
- {'var' : 'glusterfs_pid_dir_prefix',
- 'defaultValue' : '/var/run'},
- {'var' : 'hadoop_pid_dir_prefix',
- 'defaultValue' : '/var/run/hadoop'},
- {'var' : 'hadoop_pid_dir_prefix',
- 'defaultValue' : '/var/run/hadoop'},
- {'var' : 'ganglia_runtime_dir',
- 'defaultValue' : '/var/run/ganglia/hdp'},
- {'var' : 'hbase_pid_dir',
- 'defaultValue' : '/var/run/hbase'},
- {'var' : 'zk_pid_dir',
- 'defaultValue' : '/var/run/zookeeper'},
- {'var' : 'oozie_pid_dir',
- 'defaultValue' : '/var/run/oozie'},
- {'var' : 'hcat_pid_dir',
- 'defaultValue' : '/var/run/webhcat'},
- {'var' : 'hive_pid_dir',
- 'defaultValue' : '/var/run/hive'},
- {'var' : 'mysqld_pid_dir',
- 'defaultValue' : '/var/run/mysqld'},
- {'var' : 'hcat_pid_dir',
- 'defaultValue' : '/var/run/webhcat'},
- {'var' : 'yarn_pid_dir_prefix',
- 'defaultValue' : '/var/run/hadoop-yarn'},
- {'var' : 'mapred_pid_dir_prefix',
- 'defaultValue' : '/var/run/hadoop-mapreduce'},
-]
+SETUP_ACTION = "setup"
+START_ACTION = "start"
+STOP_ACTION = "stop"
+RESET_ACTION = "reset"
+STATUS_ACTION = "status"
+DEBUG_ACTION = "debug"
+
+IS_WINDOWS = platform.system() == "Windows"
+
+if not IS_WINDOWS:
+ from AgentConfig_linux import *
+else:
+ from AgentConfig_windows import *
+
+config = ConfigParser.RawConfigParser()
+s = StringIO.StringIO(content)
+config.readfp(s)
class AmbariConfig:
TWO_WAY_SSL_PROPERTY = "security.server.two_way_ssl"
@@ -246,12 +73,31 @@ class AmbariConfig:
def add_section(self, section):
self.config.add_section(section)
+ @staticmethod
+ def getConfigFile():
+ global configFile
+ return configFile
+
+ @staticmethod
+ def getLogFile():
+ global logfile
+ return logfile
+
+ @staticmethod
+ def getOutFile():
+ global outfile
+ return outfile
+
def setConfig(self, customConfig):
self.config = customConfig
def getConfig(self):
return self.config
+ def getImports(self):
+ global imports
+ return imports
+
def getRolesToClass(self):
global rolesToClass
return rolesToClass
@@ -264,13 +110,9 @@ class AmbariConfig:
global servicesToPidNames
return servicesToPidNames
- def getImports(self):
- global imports
- return imports
-
- def getPidPathesVars(self):
- global pidPathesVars
- return pidPathesVars
+ def pidPathVars(self):
+ global pidPathVars
+ return pidPathVars
def has_option(self, section, option):
return self.config.has_option(section, option)
diff --git a/ambari-agent/src/main/python/ambari_agent/Controller.py b/ambari-agent/src/main/python/ambari_agent/Controller.py
index dc3a1cfd37..d985b91b5a 100644
--- a/ambari-agent/src/main/python/ambari_agent/Controller.py
+++ b/ambari-agent/src/main/python/ambari_agent/Controller.py
@@ -22,6 +22,7 @@ import logging
import signal
import json
import sys
+import platform
import os
import socket
import time
@@ -46,11 +47,21 @@ logger = logging.getLogger()
AGENT_AUTO_RESTART_EXIT_CODE = 77
+IS_WINDOWS = platform.system() == "Windows"
+
class Controller(threading.Thread):
- def __init__(self, config, range=30):
+ def __init__(self, config, heartbeat_stop_callback = None, range=30):
threading.Thread.__init__(self)
logger.debug('Initializing Controller RPC thread.')
+
+ if heartbeat_stop_callback is None:
+ if IS_WINDOWS:
+ from HeartbeatHandlers_windows import HeartbeatStopHandler
+ else:
+ from HeartbeatStopHandler_linux import HeartbeatStopHandler
+ heartbeat_stop_callback = HeartbeatStopHandler()
+
self.lock = threading.Lock()
self.safeMode = True
self.credential = None
@@ -62,7 +73,7 @@ class Controller(threading.Thread):
self.registerUrl = server_secured_url + '/agent/v1/register/' + self.hostname
self.heartbeatUrl = server_secured_url + '/agent/v1/heartbeat/' + self.hostname
self.componentsUrl = server_secured_url + '/agent/v1/components/'
- self.netutil = NetUtil()
+ self.netutil = NetUtil(heartbeat_stop_callback)
self.responseId = -1
self.repeatRegistration = False
self.isRegistered = False
@@ -71,10 +82,10 @@ class Controller(threading.Thread):
self.hasMappedComponents = True
# Event is used for synchronizing heartbeat iterations (to make possible
# manual wait() interruption between heartbeats )
- self.heartbeat_wait_event = threading.Event()
+ self.heartbeat_stop_callback = heartbeat_stop_callback
# List of callbacks that are called at agent registration
self.registration_listeners = []
-
+
# pull config directory out of config
cache_dir = config.get('agent', 'cache_dir')
if cache_dir is None:
@@ -197,6 +208,9 @@ class Controller(threading.Thread):
DEBUG_SUCCESSFULL_HEARTBEATS = 0
DEBUG_STOP_HEARTBEATING = False
+ def trigger_heartbeat(self):
+ self.heartbeat_stop_callback.set_heartbeat()
+
def heartbeatWithServer(self):
self.DEBUG_HEARTBEAT_RETRIES = 0
self.DEBUG_SUCCESSFULL_HEARTBEATS = 0
@@ -261,14 +275,14 @@ class Controller(threading.Thread):
if 'statusCommands' in response.keys():
self.addToStatusQueue(response['statusCommands'])
pass
-
+
if 'alertDefinitionCommands' in response.keys():
self.alert_scheduler_handler.update_definitions(response['alertDefinitionCommands'], True)
pass
if 'alertExecutionCommands' in response.keys():
self.alert_scheduler_handler.execute_alert(response['alertExecutionCommands'])
- pass
+ pass
if "true" == response['restartAgent']:
logger.error("Received the restartAgent command")
@@ -284,7 +298,7 @@ class Controller(threading.Thread):
certVerifFailed = False
self.DEBUG_SUCCESSFULL_HEARTBEATS += 1
self.DEBUG_HEARTBEAT_RETRIES = 0
- self.heartbeat_wait_event.clear()
+ self.heartbeat_stop_callback.reset_heartbeat()
except ssl.SSLError:
self.repeatRegistration=False
self.isRegistered = False
@@ -319,10 +333,10 @@ class Controller(threading.Thread):
# Sleep for some time
timeout = self.netutil.HEARTBEAT_IDDLE_INTERVAL_SEC \
- self.netutil.MINIMUM_INTERVAL_BETWEEN_HEARTBEATS
- self.heartbeat_wait_event.wait(timeout=timeout)
- # Sleep a bit more to allow STATUS_COMMAND results to be collected
- # and sent in one heartbeat. Also avoid server overload with heartbeats
- time.sleep(self.netutil.MINIMUM_INTERVAL_BETWEEN_HEARTBEATS)
+ if 0 == self.heartbeat_stop_callback.wait(timeout, self.netutil.MINIMUM_INTERVAL_BETWEEN_HEARTBEATS):
+ # Stop loop when stop event received
+ logger.info("Stop event received")
+ self.DEBUG_STOP_HEARTBEATING=True
pass
def run(self):
@@ -405,7 +419,10 @@ class Controller(threading.Thread):
def main(argv=None):
# Allow Ctrl-C
- signal.signal(signal.SIGINT, signal.SIG_DFL)
+ if IS_WINDOWS:
+ from HeartbeatHandlers_windows import bind_signal_handlers
+ else:
+ from HeartbeatStopHandler_linux import bind_signal_handlers
logger.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s %(filename)s:%(lineno)d - \
@@ -417,7 +434,8 @@ def main(argv=None):
logger.info('Starting Server RPC Thread: %s' % ' '.join(sys.argv))
config = AmbariConfig.config
- collector = Controller(config)
+ heartbeat_stop_callback = bind_signal_handlers()
+ collector = Controller(config, heartbeat_stop_callback)
collector.start()
collector.run()
diff --git a/ambari-agent/src/main/python/ambari_agent/CustomServiceOrchestrator.py b/ambari-agent/src/main/python/ambari_agent/CustomServiceOrchestrator.py
index e0c5a283cb..7b5889cf81 100644
--- a/ambari-agent/src/main/python/ambari_agent/CustomServiceOrchestrator.py
+++ b/ambari-agent/src/main/python/ambari_agent/CustomServiceOrchestrator.py
@@ -64,7 +64,7 @@ class CustomServiceOrchestrator():
self.public_fqdn = hostname.public_hostname(config)
# cache reset will be called on every agent registration
controller.registration_listeners.append(self.file_cache.reset)
-
+
# Clean up old status command files if any
try:
os.unlink(self.status_commands_stdout)
@@ -88,7 +88,7 @@ class CustomServiceOrchestrator():
"reason - {reason} . Killing process {pid}"
.format(tid = str(task_id), reason = reason, pid = pid))
shell.kill_process_with_children(pid)
- else:
+ else:
logger.warn("Unable to find pid by taskId = %s"%task_id)
def runCommand(self, command, tmpoutfile, tmperrfile, forced_command_name = None,
@@ -101,7 +101,7 @@ class CustomServiceOrchestrator():
script_type = command['commandParams']['script_type']
script = command['commandParams']['script']
timeout = int(command['commandParams']['command_timeout'])
-
+
if 'hostLevelParams' in command and 'jdk_location' in command['hostLevelParams']:
server_url_prefix = command['hostLevelParams']['jdk_location']
else:
@@ -149,7 +149,7 @@ class CustomServiceOrchestrator():
handle = command['__handle']
handle.on_background_command_started = self.map_task_to_process
del command['__handle']
-
+
json_path = self.dump_command_to_json(command)
pre_hook_tuple = self.resolve_hook_script_path(hook_dir,
self.PRE_HOOK_PREFIX, command_name, script_type)
@@ -187,7 +187,7 @@ class CustomServiceOrchestrator():
if cancel_reason:
ret['stdout'] += cancel_reason
ret['stderr'] += cancel_reason
-
+
with open(tmpoutfile, "a") as f:
f.write(cancel_reason)
with open(tmperrfile, "a") as f:
@@ -213,7 +213,7 @@ class CustomServiceOrchestrator():
if not isinstance(pid, int):
return '\nCommand aborted. ' + pid
return None
-
+
def requestComponentStatus(self, command):
"""
Component status is determined by exit code, returned by runCommand().
@@ -262,6 +262,8 @@ class CustomServiceOrchestrator():
# Perform few modifications to stay compatible with the way in which
public_fqdn = self.public_fqdn
command['public_hostname'] = public_fqdn
+ # Add cache dir to make it visible for commands
+ command["hostLevelParams"]["agentCacheDir"] = self.config.get('agent', 'cache_dir')
# Now, dump the json file
command_type = command['commandType']
from ActionQueue import ActionQueue # To avoid cyclic dependency
diff --git a/ambari-agent/src/main/python/ambari_agent/Facter.py b/ambari-agent/src/main/python/ambari_agent/Facter.py
index 947f38021b..aabc77dd86 100644
--- a/ambari-agent/src/main/python/ambari_agent/Facter.py
+++ b/ambari-agent/src/main/python/ambari_agent/Facter.py
@@ -27,55 +27,29 @@ import shlex
import socket
import multiprocessing
import subprocess
-
+from shell import shellRunner
import time
import uuid
from ambari_commons import OSCheck
log = logging.getLogger()
-# selinux command
-GET_SE_LINUX_ST_CMD = "/usr/sbin/sestatus"
-GET_IFCONFIG_CMD = "ifconfig"
-GET_UPTIME_CMD = "cat /proc/uptime"
-GET_MEMINFO_CMD = "cat /proc/meminfo"
-
-class Facter():
- def __init__(self):
-
- self.DATA_IFCONFIG_OUTPUT = Facter.setDataIfConfigOutput()
- self.DATA_UPTIME_OUTPUT = Facter.setDataUpTimeOutput()
- self.DATA_MEMINFO_OUTPUT = Facter.setMemInfoOutput()
-
- @staticmethod
- def setDataIfConfigOutput():
-
- try:
- result = os.popen(GET_IFCONFIG_CMD).read()
- return result
- except OSError:
- log.warn("Can't execute {0}".format(GET_IFCONFIG_CMD))
- return ""
-
- @staticmethod
- def setDataUpTimeOutput():
- try:
- result = os.popen(GET_UPTIME_CMD).read()
- return result
- except OSError:
- log.warn("Can't execute {0}".format(GET_UPTIME_CMD))
- return ""
+def run_os_command(cmd):
+ if type(cmd) == str:
+ cmd = shlex.split(cmd)
+ process = subprocess.Popen(cmd,
+ stdout=subprocess.PIPE,
+ stdin=subprocess.PIPE,
+ stderr=subprocess.PIPE
+ )
+ (stdoutdata, stderrdata) = process.communicate()
+ return process.returncode, stdoutdata, stderrdata
- @staticmethod
- def setMemInfoOutput():
- try:
- result = os.popen(GET_MEMINFO_CMD).read()
- return result
- except OSError:
- log.warn("Can't execute {0}".format(GET_MEMINFO_CMD))
- return ""
+class FacterBase():
+ def __init__(self):
+ pass
# Returns the currently running user id
def getId(self):
@@ -87,7 +61,7 @@ class Facter():
# Returns the FQDN of the host
def getFqdn(self):
- return socket.getfqdn()
+ return socket.getfqdn().lower()
# Returns the host's primary DNS domain name
def getDomain(self):
@@ -153,15 +127,211 @@ class Facter():
def getOsFamily(self):
return OSCheck.get_os_family()
+ # Return uptime hours
+ def getUptimeHours(self):
+ return self.getUptimeSeconds() / (60 * 60)
+
+ # Return uptime days
+ def getUptimeDays(self):
+ return self.getUptimeSeconds() / (60 * 60 * 24)
+
+ def facterInfo(self):
+ facterInfo = {}
+ facterInfo['id'] = self.getId()
+ facterInfo['kernel'] = self.getKernel()
+ facterInfo['domain'] = self.getDomain()
+ facterInfo['fqdn'] = self.getFqdn()
+ facterInfo['hostname'] = self.getHostname()
+ facterInfo['macaddress'] = self.getMacAddress()
+ facterInfo['architecture'] = self.getArchitecture()
+ facterInfo['operatingsystem'] = self.getOperatingSystem()
+ facterInfo['operatingsystemrelease'] = self.getOperatingSystemRelease()
+ facterInfo['physicalprocessorcount'] = self.getProcessorcount()
+ facterInfo['processorcount'] = self.getProcessorcount()
+ facterInfo['timezone'] = self.getTimeZone()
+ facterInfo['hardwareisa'] = self.getArchitecture()
+ facterInfo['hardwaremodel'] = self.getArchitecture()
+ facterInfo['kernelrelease'] = self.getKernelRelease()
+ facterInfo['kernelversion'] = self.getKernelVersion()
+ facterInfo['osfamily'] = self.getOsFamily()
+ facterInfo['kernelmajversion'] = self.getKernelMajVersion()
+
+ facterInfo['ipaddress'] = self.getIpAddress()
+ facterInfo['netmask'] = self.getNetmask()
+ facterInfo['interfaces'] = self.getInterfaces()
+
+ facterInfo['uptime_seconds'] = str(self.getUptimeSeconds())
+ facterInfo['uptime_hours'] = str(self.getUptimeHours())
+ facterInfo['uptime_days'] = str(self.getUptimeDays())
+
+ facterInfo['memorysize'] = self.getMemorySize()
+ facterInfo['memoryfree'] = self.getMemoryFree()
+ facterInfo['memorytotal'] = self.getMemoryTotal()
+
+ return facterInfo
+
+ #Convert kB to GB
+ @staticmethod
+ def convertSizeKbToGb(size):
+ return "%0.2f GB" % round(float(size) / (1024.0 * 1024.0), 2)
+
+ #Convert MB to GB
+ @staticmethod
+ def convertSizeMbToGb(size):
+ return "%0.2f GB" % round(float(size) / (1024.0), 2)
+
+
+class FacterWindows(FacterBase):
+ GET_SYSTEM_INFO_CMD = "systeminfo"
+ GET_MEMORY_CMD = '$mem =(Get-WMIObject Win32_OperatingSystem -ComputerName "LocalHost" ); echo "$($mem.FreePhysicalMemory) $($mem.TotalVisibleMemorySize)"'
+ GET_PAGE_FILE_INFO = '$pgo=(Get-WmiObject Win32_PageFileUsage); echo "$($pgo.AllocatedBaseSize) $($pgo.AllocatedBaseSize-$pgo.CurrentUsage)"'
+ GET_UPTIME_CMD = 'echo $([int]((get-date)-[system.management.managementdatetimeconverter]::todatetime((get-wmiobject -class win32_operatingsystem).Lastbootuptime)).TotalSeconds)'
+
+ # Return first ip adress
+ def getIpAddress(self):
+ #TODO check if we need ipconfig
+ return socket.gethostbyname(socket.gethostname().lower())
+
+ # Return netmask
+ def getNetmask(self):
+ #TODO return correct netmask
+ return 'OS NOT SUPPORTED'
+
+ # Return interfaces
+ def getInterfaces(self):
+ #TODO return correct interfaces
+ return 'OS NOT SUPPORTED'
+
+ # Return uptime seconds
+ def getUptimeSeconds(self):
+ try:
+ runner = shellRunner()
+ result = runner.runPowershell(script_block=FacterWindows.GET_UPTIME_CMD).output.replace('\n', '').replace('\r',
+ '')
+ return int(result)
+ except:
+ log.warn("Can not get SwapFree")
+ return 0
+
+ # Return memoryfree
+ def getMemoryFree(self):
+ try:
+ runner = shellRunner()
+ result = runner.runPowershell(script_block=FacterWindows.GET_MEMORY_CMD).output.split(" ")[0].replace('\n',
+ '').replace(
+ '\r', '')
+ return result
+ except:
+ log.warn("Can not get MemoryFree")
+ return 0
+
+ # Return memorytotal
+ def getMemoryTotal(self):
+ try:
+ runner = shellRunner()
+ result = runner.runPowershell(script_block=FacterWindows.GET_MEMORY_CMD).output.split(" ")[-1].replace('\n',
+ '').replace(
+ '\r', '')
+ return result
+ except:
+ log.warn("Can not get MemoryTotal")
+ return 0
+
+ # Return swapfree
+ def getSwapFree(self):
+ try:
+ runner = shellRunner()
+ result = runner.runPowershell(script_block=FacterWindows.GET_PAGE_FILE_INFO).output.split(" ")[-1].replace('\n',
+ '').replace(
+ '\r', '')
+ return result
+ except:
+ log.warn("Can not get SwapFree")
+ return 0
+
+ # Return swapsize
+ def getSwapSize(self):
+ try:
+ runner = shellRunner()
+ result = runner.runPowershell(script_block=FacterWindows.GET_PAGE_FILE_INFO).output.split(" ")[0].replace('\n',
+ '').replace(
+ '\r', '')
+ return result
+ except:
+ log.warn("Can not get SwapFree")
+ return 0
+
+ # Return memorysize
+ def getMemorySize(self):
+ try:
+ runner = shellRunner()
+ result = runner.runPowershell(script_block=FacterWindows.GET_MEMORY_CMD).output.split(" ")[-1].replace('\n',
+ '').replace(
+ '\r', '')
+ return result
+ except:
+ log.warn("Can not get MemorySize")
+ return 0
+
+ def facterInfo(self):
+ facterInfo = FacterBase.facterInfo(self)
+ facterInfo['swapsize'] = FacterBase.convertSizeMbToGb(self.getSwapSize())
+ facterInfo['swapfree'] = FacterBase.convertSizeMbToGb(self.getSwapFree())
+ return facterInfo
+
+
+class FacterLinux(FacterBase):
+ # selinux command
+ GET_SE_LINUX_ST_CMD = "/usr/sbin/sestatus"
+ GET_IFCONFIG_CMD = "ifconfig"
+ GET_UPTIME_CMD = "cat /proc/uptime"
+ GET_MEMINFO_CMD = "cat /proc/meminfo"
+
+ def __init__(self):
+
+ self.DATA_IFCONFIG_OUTPUT = Facter.setDataIfConfigOutput()
+ self.DATA_UPTIME_OUTPUT = Facter.setDataUpTimeOutput()
+ self.DATA_MEMINFO_OUTPUT = Facter.setMemInfoOutput()
+
+ @staticmethod
+ def setDataIfConfigOutput():
+
+ try:
+ result = os.popen(FacterLinux.GET_IFCONFIG_CMD).read()
+ return result
+ except OSError:
+ log.warn("Can't execute {0}".format(FacterLinux.GET_IFCONFIG_CMD))
+ return ""
+
+ @staticmethod
+ def setDataUpTimeOutput():
+
+ try:
+ result = os.popen(FacterLinux.GET_UPTIME_CMD).read()
+ return result
+ except OSError:
+ log.warn("Can't execute {0}".format(FacterLinux.GET_UPTIME_CMD))
+ return ""
+
+ @staticmethod
+ def setMemInfoOutput():
+
+ try:
+ result = os.popen(FacterLinux.GET_MEMINFO_CMD).read()
+ return result
+ except OSError:
+ log.warn("Can't execute {0}".format(FacterLinux.GET_MEMINFO_CMD))
+ return ""
+
def isSeLinux(self):
try:
- retcode, out, err = run_os_command(GET_SE_LINUX_ST_CMD)
+ retcode, out, err = run_os_command(FacterLinux.GET_SE_LINUX_ST_CMD)
se_status = re.search('(enforcing|permissive|enabled)', out)
if se_status:
return True
except OSError:
- log.warn("Could not run {0}: OK".format(GET_SE_LINUX_ST_CMD))
+ log.warn("Could not run {0}: OK".format(FacterLinux.GET_SE_LINUX_ST_CMD))
return False
# Function that returns list of values that matches
@@ -183,10 +353,6 @@ class Facter():
return result
- #Convert kB to GB
- def convertSizeKbToGb(self, size):
- return "%0.2f GB" % round(float(size) / (1024.0 * 1024.0), 2)
-
# Return first ip adress
def getIpAddress(self):
ip_pattern="(?: inet addr:)(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})"
@@ -195,7 +361,7 @@ class Facter():
result = self.data_return_first(ip_pattern,self.DATA_IFCONFIG_OUTPUT)
if result == '':
log.warn("Can't get an ip address from {0}".format(self.DATA_IFCONFIG_OUTPUT))
- return socket.gethostbyname(socket.gethostname())
+ return socket.gethostbyname(socket.gethostname().lower())
else:
return result
@@ -231,15 +397,6 @@ class Facter():
log.warn("Can't get an uptime value from {0}".format(self.DATA_UPTIME_OUTPUT))
return 0
-
- # Return uptime hours
- def getUptimeHours(self):
- return self.getUptimeSeconds() / (60 * 60)
-
- # Return uptime days
- def getUptimeDays(self):
- return self.getUptimeSeconds() / (60 * 60 * 24)
-
# Return memoryfree
def getMemoryFree(self):
#:memoryfree_mb => "MemFree",
@@ -284,55 +441,18 @@ class Facter():
log.warn("Can't get memory size from {0}".format(self.DATA_MEMINFO_OUTPUT))
return 0
-
def facterInfo(self):
- facterInfo = {}
- facterInfo['id'] = self.getId()
- facterInfo['kernel'] = self.getKernel()
- facterInfo['domain'] = self.getDomain()
- facterInfo['fqdn'] = self.getFqdn()
- facterInfo['hostname'] = self.getHostname()
- facterInfo['macaddress'] = self.getMacAddress()
- facterInfo['architecture'] = self.getArchitecture()
- facterInfo['operatingsystem'] = self.getOperatingSystem()
- facterInfo['operatingsystemrelease'] = self.getOperatingSystemRelease()
- facterInfo['physicalprocessorcount'] = self.getProcessorcount()
- facterInfo['processorcount'] = self.getProcessorcount()
- facterInfo['timezone'] = self.getTimeZone()
- facterInfo['hardwareisa'] = self.getArchitecture()
- facterInfo['hardwaremodel'] = self.getArchitecture()
- facterInfo['kernelrelease'] = self.getKernelRelease()
- facterInfo['kernelversion'] = self.getKernelVersion()
- facterInfo['osfamily'] = self.getOsFamily()
+ facterInfo = FacterBase.facterInfo(self)
facterInfo['selinux'] = self.isSeLinux()
- facterInfo['kernelmajversion'] = self.getKernelMajVersion()
-
- facterInfo['ipaddress'] = self.getIpAddress()
- facterInfo['netmask'] = self.getNetmask()
- facterInfo['interfaces'] = self.getInterfaces()
-
- facterInfo['uptime_seconds'] = str(self.getUptimeSeconds())
- facterInfo['uptime_hours'] = str(self.getUptimeHours())
- facterInfo['uptime_days'] = str(self.getUptimeDays())
-
- facterInfo['memorysize'] = self.getMemorySize()
- facterInfo['memoryfree'] = self.getMemoryFree()
- facterInfo['swapsize'] = self.convertSizeKbToGb(self.getSwapSize())
- facterInfo['swapfree'] = self.convertSizeKbToGb(self.getSwapFree())
- facterInfo['memorytotal'] = self.getMemoryTotal()
-
+ facterInfo['swapsize'] = FacterBase.convertSizeKbToGb(self.getSwapSize())
+ facterInfo['swapfree'] = FacterBase.convertSizeKbToGb(self.getSwapFree())
return facterInfo
-def run_os_command(cmd):
- if type(cmd) == str:
- cmd = shlex.split(cmd)
- process = subprocess.Popen(cmd,
- stdout=subprocess.PIPE,
- stdin=subprocess.PIPE,
- stderr=subprocess.PIPE
- )
- (stdoutdata, stderrdata) = process.communicate()
- return process.returncode, stdoutdata, stderrdata
+
+if platform.system() == "Windows":
+ Facter = FacterWindows
+else:
+ Facter = FacterLinux
def main(argv=None):
@@ -341,8 +461,3 @@ def main(argv=None):
if __name__ == '__main__':
main()
-
-
-
-
-
diff --git a/ambari-agent/src/main/python/ambari_agent/FileCache.py b/ambari-agent/src/main/python/ambari_agent/FileCache.py
index 67b14b21e3..6820db7ed6 100644
--- a/ambari-agent/src/main/python/ambari_agent/FileCache.py
+++ b/ambari-agent/src/main/python/ambari_agent/FileCache.py
@@ -24,6 +24,7 @@ import os
import shutil
import zipfile
import urllib2
+import urllib
logger = logging.getLogger()
@@ -155,7 +156,7 @@ class FileCache():
filename - file inside directory we are trying to fetch
"""
return "{0}/{1}/{2}".format(server_url_prefix,
- directory, filename)
+ urllib.pathname2url(directory), filename)
def fetch_url(self, url):
diff --git a/ambari-agent/src/main/python/ambari_agent/Hardware.py b/ambari-agent/src/main/python/ambari_agent/Hardware.py
index 801fe3fd18..8b933556f9 100644
--- a/ambari-agent/src/main/python/ambari_agent/Hardware.py
+++ b/ambari-agent/src/main/python/ambari_agent/Hardware.py
@@ -21,12 +21,15 @@ limitations under the License.
import os.path
import logging
import subprocess
+import platform
+from shell import shellRunner
from Facter import Facter
logger = logging.getLogger()
class Hardware:
SSH_KEY_PATTERN = 'ssh.*key'
+ WINDOWS_GET_DRIVES_CMD ="foreach ($drive in [System.IO.DriveInfo]::getdrives()){$available = $drive.TotalFreeSpace;$used = $drive.TotalSize-$drive.TotalFreeSpace;$percent = ($used*100)/$drive.TotalSize;$size = $drive.TotalSize;$type = $drive.DriveFormat;$mountpoint = $drive.RootDirectory.FullName;echo \"$available $used $percent% $size $type $mountpoint\"}"
def __init__(self):
self.hardware = {}
@@ -59,8 +62,15 @@ class Hardware:
@staticmethod
def osdisks():
- """ Run df to find out the disks on the host. Only works on linux
- platforms. Note that this parser ignores any filesystems with spaces
+ if platform.system() == "Windows":
+ return Hardware._osdisks_win()
+ else:
+ return Hardware._osdisks_linux()
+
+ @staticmethod
+ def _osdisks_linux():
+ """ Run df to find out the disks on the host. Only works on linux
+ platforms. Note that this parser ignores any filesystems with spaces
and any mounts with spaces. """
mounts = []
df = subprocess.Popen(["df", "-kPT"], stdout=subprocess.PIPE)
@@ -74,6 +84,25 @@ class Hardware:
pass
return mounts
+ @staticmethod
+ def _osdisks_win():
+ mounts = []
+ runner = shellRunner()
+ command_result = runner.runPowershell(script_block=Hardware.WINDOWS_GET_DRIVES_CMD)
+ if command_result.exitCode != 0:
+ return mounts
+ else:
+ for drive in [line for line in command_result.output.split(os.linesep) if line != '']:
+ available, used, percent, size, type, mountpoint = drive.split(" ")
+ mounts.append({"available": available,
+ "used": used,
+ "percent": percent,
+ "size": size,
+ "type": type,
+ "mountpoint": mountpoint})
+
+ return mounts
+
def get(self):
return self.hardware
diff --git a/ambari-agent/src/main/python/ambari_agent/HeartbeatHandlers_windows.py b/ambari-agent/src/main/python/ambari_agent/HeartbeatHandlers_windows.py
new file mode 100644
index 0000000000..1b6c7de952
--- /dev/null
+++ b/ambari-agent/src/main/python/ambari_agent/HeartbeatHandlers_windows.py
@@ -0,0 +1,58 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import win32event
+
+from ambari_commons.exceptions import FatalException
+
+
+def bind_signal_handlers(agentPid):
+ return HeartbeatStopHandler()
+
+
+class HeartbeatStopHandler:
+ def __init__(self, stopEvent = None):
+ # Event is used for synchronizing heartbeat iterations (to make possible
+ # manual wait() interruption between heartbeats )
+ self._heventHeartbeat = win32event.CreateEvent(None, 0, 0, None)
+
+ # Event is used to stop the Agent process
+ if stopEvent is None:
+ #Allow standalone testing
+ self._heventStop = win32event.CreateEvent(None, 0, 0, None)
+ else:
+ #Allow one unique event per process
+ self._heventStop = stopEvent
+
+ def set_heartbeat(self):
+ win32event.SetEvent(self._heventHeartbeat)
+
+ def reset_heartbeat(self):
+ win32event.ResetEvent(self._heventHeartbeat)
+
+ def wait(self, timeout1, timeout2 = 0):
+ timeout = int(timeout1 + timeout2) * 1000
+
+ result = win32event.WaitForMultipleObjects([self._heventStop, self._heventHeartbeat], False, timeout)
+ if(win32event.WAIT_OBJECT_0 != result and win32event.WAIT_OBJECT_0 + 1 != result and win32event.WAIT_TIMEOUT != result):
+ raise FatalException(-1, "Error waiting for stop/heartbeat events: " + string(result))
+ if(win32event.WAIT_TIMEOUT == result):
+ return -1
+ return result - win32event.WAIT_OBJECT_0
diff --git a/ambari-agent/src/main/python/ambari_agent/HeartbeatStopHandler_linux.py b/ambari-agent/src/main/python/ambari_agent/HeartbeatStopHandler_linux.py
new file mode 100644
index 0000000000..2ef8c7f7c0
--- /dev/null
+++ b/ambari-agent/src/main/python/ambari_agent/HeartbeatStopHandler_linux.py
@@ -0,0 +1,91 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import os
+import logging
+import signal
+import threading
+import traceback
+
+
+logger = logging.getLogger()
+
+_handler = None
+
+def signal_handler(signum, frame):
+ _handler.set_stop()
+
+def bind_signal_handlers(agentPid):
+ if os.getpid() == agentPid:
+ signal.signal(signal.SIGINT, signal_handler)
+ signal.signal(signal.SIGTERM, signal_handler)
+ signal.signal(signal.SIGUSR1, debug)
+
+ global _handler
+ _handler = HeartbeatStopHandler()
+
+ return _handler
+
+def debug(sig, frame):
+ """Interrupt running process, and provide a python prompt for
+ interactive debugging."""
+ d={'_frame':frame} # Allow access to frame object.
+ d.update(frame.f_globals) # Unless shadowed by global
+ d.update(frame.f_locals)
+
+ message = "Signal received : entering python shell.\nTraceback:\n"
+ message += ''.join(traceback.format_stack(frame))
+ logger.info(message)
+
+class HeartbeatStopHandler:
+ def __init__(self, stopEvent = None):
+ # Event is used for synchronizing heartbeat iterations (to make possible
+ # manual wait() interruption between heartbeats )
+ self.heartbeat_wait_event = threading.Event()
+
+ # Event is used to stop the Agent process
+ if stopEvent is None:
+ #Allow standalone testing
+ self.stop_event = threading.Event()
+ else:
+ #Allow one unique event per process
+ self.stop_event = stopEvent
+
+ def set_heartbeat(self):
+ self.heartbeat_wait_event.set()
+
+ def reset_heartbeat(self):
+ self.heartbeat_wait_event.clear()
+
+ def set_stop(self):
+ self.stop_event.set()
+
+ def wait(self, timeout1, timeout2 = 0):
+ if self.heartbeat_wait_event.wait(timeout = timeout1):
+ #Event signaled, exit
+ return 0
+ # Stop loop when stop event received
+ # Otherwise sleep a bit more to allow STATUS_COMMAND results to be collected
+ # and sent in one heartbeat. Also avoid server overload with heartbeats
+ if self.stop_event.wait(timeout = timeout2):
+ logger.info("Stop event received")
+ return 1
+ #Timeout
+ return -1
diff --git a/ambari-agent/src/main/python/ambari_agent/HostCheckReportFileHandler.py b/ambari-agent/src/main/python/ambari_agent/HostCheckReportFileHandler.py
index c671584b6b..bf24730828 100644
--- a/ambari-agent/src/main/python/ambari_agent/HostCheckReportFileHandler.py
+++ b/ambari-agent/src/main/python/ambari_agent/HostCheckReportFileHandler.py
@@ -31,12 +31,13 @@ class HostCheckReportFileHandler:
HOST_CHECK_FILE = "hostcheck.result"
def __init__(self, config):
- if config != None:
+ self.hostCheckFilePath = None
+ if config is not None:
hostCheckFileDir = config.get('agent', 'prefix')
self.hostCheckFilePath = os.path.join(hostCheckFileDir, self.HOST_CHECK_FILE)
def writeHostCheckFile(self, hostInfo):
- if self.hostCheckFilePath == None:
+ if self.hostCheckFilePath is None:
return
try:
diff --git a/ambari-agent/src/main/python/ambari_agent/HostInfo.py b/ambari-agent/src/main/python/ambari_agent/HostInfo.py
index 89e22b13f2..7a1e50af78 100644
--- a/ambari-agent/src/main/python/ambari_agent/HostInfo.py
+++ b/ambari-agent/src/main/python/ambari_agent/HostInfo.py
@@ -18,394 +18,11 @@ See the License for the specific language governing permissions and
limitations under the License.
'''
-import os
-import glob
-import logging
-import pwd
-import re
-import time
-import subprocess
-import threading
-import shlex
import platform
-import hostname
-from PackagesAnalyzer import PackagesAnalyzer
-from HostCheckReportFileHandler import HostCheckReportFileHandler
-from Hardware import Hardware
-from ambari_commons import OSCheck, OSConst, Firewall
-import socket
-logger = logging.getLogger()
-
-# service cmd
-SERVICE_CMD = "service"
-
-
-class HostInfo:
- # List of project names to be used to find alternatives folders etc.
- DEFAULT_PROJECT_NAMES = [
- "hadoop*", "hadoop", "hbase", "hcatalog", "hive", "ganglia",
- "oozie", "sqoop", "hue", "zookeeper", "mapred", "hdfs", "flume",
- "storm", "hive-hcatalog", "tez", "falcon", "ambari_qa", "hadoop_deploy",
- "rrdcached", "hcat", "ambari-qa", "sqoop-ambari-qa", "sqoop-ambari_qa",
- "webhcat", "hadoop-hdfs", "hadoop-yarn", "hadoop-mapreduce"
- ]
-
- # List of live services checked for on the host, takes a map of plan strings
- DEFAULT_LIVE_SERVICES = [
- {OSConst.REDHAT_FAMILY: "ntpd", OSConst.SUSE_FAMILY: "ntp", OSConst.UBUNTU_FAMILY: "ntp"}
- ]
-
- # Set of default users (need to be replaced with the configured user names)
- DEFAULT_USERS = [
- "hive", "ambari-qa", "oozie", "hbase", "hcat", "mapred",
- "hdfs", "rrdcached", "zookeeper", "flume", "sqoop", "sqoop2",
- "hue", "yarn", "tez", "storm", "falcon", "kafka","knox"
- ]
-
- # Filters used to identify processed
- PROC_FILTER = [
- "hadoop", "zookeeper"
- ]
-
- # Additional path patterns to find existing directory
- DIRNAME_PATTERNS = [
- "/tmp/hadoop-", "/tmp/hsperfdata_"
- ]
-
- # Default set of directories that are checked for existence of files and folders
- DEFAULT_DIRS = [
- "/etc", "/var/run", "/var/log", "/usr/lib", "/var/lib", "/var/tmp", "/tmp", "/var", "/hadoop"
- ]
-
- # Packages that are used to find repos (then repos are used to find other packages)
- PACKAGES = [
- "hadoop_2_2_*","hadoop-2-2-.*","zookeeper_2_2_*","zookeeper-2-2-.*",
- "hadoop", "zookeeper", "webhcat", "*-manager-server-db", "*-manager-daemons"
- ]
-
- # Additional packages to look for (search packages that start with these)
- ADDITIONAL_PACKAGES = [
- "rrdtool", "rrdtool-python", "ganglia", "gmond", "gweb", "libconfuse",
- "ambari-log4j", "hadoop", "zookeeper", "oozie", "webhcat"
- ]
-
- # ignore packages from repos whose names start with these strings
- IGNORE_PACKAGES_FROM_REPOS = [
- "ambari", "installed"
- ]
-
- # ignore required packages
- IGNORE_PACKAGES = [
- "epel-release"
- ]
-
- # ignore repos from the list of repos to be cleaned
- IGNORE_REPOS = [
- "ambari", "HDP-UTILS"
- ]
-
- # default timeout for async invoked processes
- TIMEOUT_SECONDS = 60
- RESULT_UNAVAILABLE = "unable_to_determine"
-
- DEFAULT_SERVICE_NAME = "ntpd"
- SERVICE_STATUS_CMD = "%s %s status" % (SERVICE_CMD, DEFAULT_SERVICE_NAME)
-
- THP_FILE = "/sys/kernel/mm/redhat_transparent_hugepage/enabled"
-
- event = threading.Event()
-
- current_umask = -1
-
- def __init__(self, config=None):
- self.packages = PackagesAnalyzer()
- self.config = config
- self.reportFileHandler = HostCheckReportFileHandler(config)
-
- def dirType(self, path):
- if not os.path.exists(path):
- return 'not_exist'
- elif os.path.islink(path):
- return 'sym_link'
- elif os.path.isdir(path):
- return 'directory'
- elif os.path.isfile(path):
- return 'file'
- return 'unknown'
-
- def hadoopVarRunCount(self):
- if not os.path.exists('/var/run/hadoop'):
- return 0
- pids = glob.glob('/var/run/hadoop/*/*.pid')
- return len(pids)
-
- def hadoopVarLogCount(self):
- if not os.path.exists('/var/log/hadoop'):
- return 0
- logs = glob.glob('/var/log/hadoop/*/*.log')
- return len(logs)
-
- def etcAlternativesConf(self, projects, etcResults):
- if not os.path.exists('/etc/alternatives'):
- return []
- projectRegex = "'" + '|'.join(projects) + "'"
- files = [f for f in os.listdir('/etc/alternatives') if re.match(projectRegex, f)]
- for conf in files:
- result = {}
- filePath = os.path.join('/etc/alternatives', conf)
- if os.path.islink(filePath):
- realConf = os.path.realpath(filePath)
- result['name'] = conf
- result['target'] = realConf
- etcResults.append(result)
-
- def checkLiveServices(self, services, result):
- osType = OSCheck.get_os_family()
- for service in services:
- svcCheckResult = {}
- if isinstance(service, dict):
- serviceName = service[osType]
- else:
- serviceName = service
-
- service_check_live = shlex.split(self.SERVICE_STATUS_CMD)
- service_check_live[1] = serviceName
-
- svcCheckResult['name'] = serviceName
- svcCheckResult['status'] = "UNKNOWN"
- svcCheckResult['desc'] = ""
- try:
- osStat = subprocess.Popen(service_check_live, stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- out, err = osStat.communicate()
- if 0 != osStat.returncode:
- svcCheckResult['status'] = "Unhealthy"
- svcCheckResult['desc'] = out
- if len(out) == 0:
- svcCheckResult['desc'] = err
- else:
- svcCheckResult['status'] = "Healthy"
- except Exception, e:
- svcCheckResult['status'] = "Unhealthy"
- svcCheckResult['desc'] = repr(e)
- result.append(svcCheckResult)
-
- def checkUsers(self, users, results):
- f = open('/etc/passwd', 'r')
- for userLine in f:
- fields = userLine.split(":")
- if fields[0] in users:
- result = {}
- homeDir = fields[5]
- result['name'] = fields[0]
- result['homeDir'] = fields[5]
- result['status'] = "Available"
- if not os.path.exists(homeDir):
- result['status'] = "Invalid home directory"
- results.append(result)
-
- def osdiskAvailableSpace(self, path):
- diskInfo = {}
- try:
- df = subprocess.Popen(["df", "-kPT", path], stdout=subprocess.PIPE)
- dfdata = df.communicate()[0]
- return Hardware.extractMountInfo(dfdata.splitlines()[-1])
- except:
- pass
- return diskInfo
-
- def createAlerts(self, alerts):
- existingUsers = []
- self.checkUsers(self.DEFAULT_USERS, existingUsers)
- dirs = []
- self.checkFolders(self.DEFAULT_DIRS, self.DEFAULT_PROJECT_NAMES, existingUsers, dirs)
- alert = {
- 'name': 'host_alert',
- 'instance': None,
- 'service': 'AMBARI',
- 'component': 'host',
- 'host': hostname.hostname(self.config),
- 'state': 'OK',
- 'label': 'Disk space',
- 'text': 'Used disk space less than 80%'}
- message = ""
- mountinfoSet = []
- for dir in dirs:
- if dir["type"] == 'directory':
- mountinfo = self.osdiskAvailableSpace(dir['name'])
- if int(mountinfo["percent"].strip('%')) >= 80:
- if not mountinfo in mountinfoSet:
- mountinfoSet.append(mountinfo)
- message += str(dir['name']) + ";\n"
-
- if message != "":
- message = "These discs have low space:\n" + str(mountinfoSet) + "\n They include following critical directories:\n" + message
- alert['state'] = 'WARNING'
- alert['text'] = message
- alerts.append(alert)
- return alerts
-
- def checkFolders(self, basePaths, projectNames, existingUsers, dirs):
- foldersToIgnore = []
- for user in existingUsers:
- foldersToIgnore.append(user['homeDir'])
- try:
- for dirName in basePaths:
- for project in projectNames:
- path = os.path.join(dirName.strip(), project.strip())
- if not path in foldersToIgnore and os.path.exists(path):
- obj = {}
- obj['type'] = self.dirType(path)
- obj['name'] = path
- dirs.append(obj)
- except:
- pass
-
- def javaProcs(self, list):
- try:
- pids = [pid for pid in os.listdir('/proc') if pid.isdigit()]
- for pid in pids:
- cmd = open(os.path.join('/proc', pid, 'cmdline'), 'rb').read()
- cmd = cmd.replace('\0', ' ')
- if not 'AmbariServer' in cmd:
- if 'java' in cmd:
- dict = {}
- dict['pid'] = int(pid)
- dict['hadoop'] = False
- for filter in self.PROC_FILTER:
- if filter in cmd:
- dict['hadoop'] = True
- dict['command'] = cmd.strip()
- for line in open(os.path.join('/proc', pid, 'status')):
- if line.startswith('Uid:'):
- uid = int(line.split()[1])
- dict['user'] = pwd.getpwuid(uid).pw_name
- list.append(dict)
- except:
- pass
- pass
-
- def getReposToRemove(self, repos, ignoreList):
- reposToRemove = []
- for repo in repos:
- addToRemoveList = True
- for ignoreRepo in ignoreList:
- if self.packages.nameMatch(ignoreRepo, repo):
- addToRemoveList = False
- continue
- if addToRemoveList:
- reposToRemove.append(repo)
- return reposToRemove
-
- def getUMask(self):
- if (self.current_umask == -1):
- self.current_umask = os.umask(self.current_umask)
- os.umask(self.current_umask)
- return self.current_umask
- else:
- return self.current_umask
-
- def getTransparentHugePage(self):
- # This file exist only on redhat 6
- thp_regex = "\[(.+)\]"
- if os.path.isfile(self.THP_FILE):
- with open(self.THP_FILE) as f:
- file_content = f.read()
- return re.search(thp_regex, file_content).groups()[0]
- else:
- return ""
-
- def checkIptables(self):
- return Firewall().getFirewallObject().check_iptables()
-
- """ Return various details about the host
- componentsMapped: indicates if any components are mapped to this host
- commandsInProgress: indicates if any commands are in progress
- """
- def register(self, dict, componentsMapped=True, commandsInProgress=True):
- dict['hostHealth'] = {}
-
- java = []
- self.javaProcs(java)
- dict['hostHealth']['activeJavaProcs'] = java
-
- liveSvcs = []
- self.checkLiveServices(self.DEFAULT_LIVE_SERVICES, liveSvcs)
- dict['hostHealth']['liveServices'] = liveSvcs
-
- dict['umask'] = str(self.getUMask())
-
- dict['transparentHugePage'] = self.getTransparentHugePage()
- dict['iptablesIsRunning'] = self.checkIptables()
- dict['reverseLookup'] = self.checkReverseLookup()
- # If commands are in progress or components are already mapped to this host
- # Then do not perform certain expensive host checks
- if componentsMapped or commandsInProgress:
- dict['existingRepos'] = [self.RESULT_UNAVAILABLE]
- dict['installedPackages'] = []
- dict['alternatives'] = []
- dict['stackFoldersAndFiles'] = []
- dict['existingUsers'] = []
-
- else:
- etcs = []
- self.etcAlternativesConf(self.DEFAULT_PROJECT_NAMES, etcs)
- dict['alternatives'] = etcs
-
- existingUsers = []
- self.checkUsers(self.DEFAULT_USERS, existingUsers)
- dict['existingUsers'] = existingUsers
-
- dirs = []
- self.checkFolders(self.DEFAULT_DIRS, self.DEFAULT_PROJECT_NAMES, existingUsers, dirs)
- dict['stackFoldersAndFiles'] = dirs
-
- installedPackages = []
- availablePackages = []
- self.packages.allInstalledPackages(installedPackages)
- self.packages.allAvailablePackages(availablePackages)
-
- repos = []
- self.packages.getInstalledRepos(self.PACKAGES, installedPackages + availablePackages,
- self.IGNORE_PACKAGES_FROM_REPOS, repos)
- packagesInstalled = self.packages.getInstalledPkgsByRepo(repos, self.IGNORE_PACKAGES, installedPackages)
- additionalPkgsInstalled = self.packages.getInstalledPkgsByNames(
- self.ADDITIONAL_PACKAGES, installedPackages)
- allPackages = list(set(packagesInstalled + additionalPkgsInstalled))
- dict['installedPackages'] = self.packages.getPackageDetails(installedPackages, allPackages)
-
- repos = self.getReposToRemove(repos, self.IGNORE_REPOS)
- dict['existingRepos'] = repos
-
- self.reportFileHandler.writeHostCheckFile(dict)
- pass
-
- # The time stamp must be recorded at the end
- dict['hostHealth']['agentTimeStampAtReporting'] = int(time.time() * 1000)
-
- pass
-
- def checkReverseLookup(self):
- """
- Check if host fqdn resolves to current host ip
- """
- try:
- host_name = socket.gethostname()
- host_ip = socket.gethostbyname(host_name)
- host_fqdn = socket.getfqdn()
- fqdn_ip = socket.gethostbyname(host_fqdn)
- return host_ip == fqdn_ip
- except socket.error:
- pass
- return False
-
-def main(argv=None):
- h = HostInfo()
- struct = {}
- h.register(struct)
- print struct
-
-
-if __name__ == '__main__':
- main()
+if platform.system() == "Windows":
+ import HostInfo_win
+ HostInfo = HostInfo_win.HostInfo
+else:
+ import HostInfo_linux
+ HostInfo = HostInfo_linux.HostInfo
diff --git a/ambari-agent/src/main/python/ambari_agent/HostInfo_linux.py b/ambari-agent/src/main/python/ambari_agent/HostInfo_linux.py
new file mode 100644
index 0000000000..d172443bd4
--- /dev/null
+++ b/ambari-agent/src/main/python/ambari_agent/HostInfo_linux.py
@@ -0,0 +1,411 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import os
+import glob
+import logging
+import pwd
+import re
+import time
+import subprocess
+import threading
+import shlex
+import platform
+import hostname
+from PackagesAnalyzer import PackagesAnalyzer
+from HostCheckReportFileHandler import HostCheckReportFileHandler
+from Hardware import Hardware
+from ambari_commons import OSCheck, OSConst, Firewall
+import socket
+
+logger = logging.getLogger()
+
+# service cmd
+SERVICE_CMD = "service"
+
+
+class HostInfo:
+ # List of project names to be used to find alternatives folders etc.
+ DEFAULT_PROJECT_NAMES = [
+ "hadoop*", "hadoop", "hbase", "hcatalog", "hive", "ganglia",
+ "oozie", "sqoop", "hue", "zookeeper", "mapred", "hdfs", "flume",
+ "storm", "hive-hcatalog", "tez", "falcon", "ambari_qa", "hadoop_deploy",
+ "rrdcached", "hcat", "ambari-qa", "sqoop-ambari-qa", "sqoop-ambari_qa",
+ "webhcat", "hadoop-hdfs", "hadoop-yarn", "hadoop-mapreduce"
+ ]
+
+ # List of live services checked for on the host, takes a map of plan strings
+ DEFAULT_LIVE_SERVICES = [
+ {OSConst.REDHAT_FAMILY: "ntpd", OSConst.SUSE_FAMILY: "ntp", OSConst.UBUNTU_FAMILY: "ntp"}
+ ]
+
+ # Set of default users (need to be replaced with the configured user names)
+ DEFAULT_USERS = [
+ "hive", "ambari-qa", "oozie", "hbase", "hcat", "mapred",
+ "hdfs", "rrdcached", "zookeeper", "flume", "sqoop", "sqoop2",
+ "hue", "yarn", "tez", "storm", "falcon", "kafka","knox"
+ ]
+
+ # Filters used to identify processed
+ PROC_FILTER = [
+ "hadoop", "zookeeper"
+ ]
+
+ # Additional path patterns to find existing directory
+ DIRNAME_PATTERNS = [
+ "/tmp/hadoop-", "/tmp/hsperfdata_"
+ ]
+
+ # Default set of directories that are checked for existence of files and folders
+ DEFAULT_DIRS = [
+ "/etc", "/var/run", "/var/log", "/usr/lib", "/var/lib", "/var/tmp", "/tmp", "/var", "/hadoop"
+ ]
+
+ # Packages that are used to find repos (then repos are used to find other packages)
+ PACKAGES = [
+ "hadoop_2_2_*","hadoop-2-2-.*","zookeeper_2_2_*","zookeeper-2-2-.*",
+ "hadoop", "zookeeper", "webhcat", "*-manager-server-db", "*-manager-daemons"
+ ]
+
+ # Additional packages to look for (search packages that start with these)
+ ADDITIONAL_PACKAGES = [
+ "rrdtool", "rrdtool-python", "ganglia", "gmond", "gweb", "libconfuse",
+ "ambari-log4j", "hadoop", "zookeeper", "oozie", "webhcat"
+ ]
+
+ # ignore packages from repos whose names start with these strings
+ IGNORE_PACKAGES_FROM_REPOS = [
+ "ambari", "installed"
+ ]
+
+ # ignore required packages
+ IGNORE_PACKAGES = [
+ "epel-release"
+ ]
+
+ # ignore repos from the list of repos to be cleaned
+ IGNORE_REPOS = [
+ "ambari", "HDP-UTILS"
+ ]
+
+ # default timeout for async invoked processes
+ TIMEOUT_SECONDS = 60
+ RESULT_UNAVAILABLE = "unable_to_determine"
+
+ DEFAULT_SERVICE_NAME = "ntpd"
+ SERVICE_STATUS_CMD = "%s %s status" % (SERVICE_CMD, DEFAULT_SERVICE_NAME)
+
+ THP_FILE = "/sys/kernel/mm/redhat_transparent_hugepage/enabled"
+
+ event = threading.Event()
+
+ current_umask = -1
+
+ def __init__(self, config=None):
+ self.packages = PackagesAnalyzer()
+ self.config = config
+ self.reportFileHandler = HostCheckReportFileHandler(config)
+
+ def dirType(self, path):
+ if not os.path.exists(path):
+ return 'not_exist'
+ elif os.path.islink(path):
+ return 'sym_link'
+ elif os.path.isdir(path):
+ return 'directory'
+ elif os.path.isfile(path):
+ return 'file'
+ return 'unknown'
+
+ def hadoopVarRunCount(self):
+ if not os.path.exists('/var/run/hadoop'):
+ return 0
+ pids = glob.glob('/var/run/hadoop/*/*.pid')
+ return len(pids)
+
+ def hadoopVarLogCount(self):
+ if not os.path.exists('/var/log/hadoop'):
+ return 0
+ logs = glob.glob('/var/log/hadoop/*/*.log')
+ return len(logs)
+
+ def etcAlternativesConf(self, projects, etcResults):
+ if not os.path.exists('/etc/alternatives'):
+ return []
+ projectRegex = "'" + '|'.join(projects) + "'"
+ files = [f for f in os.listdir('/etc/alternatives') if re.match(projectRegex, f)]
+ for conf in files:
+ result = {}
+ filePath = os.path.join('/etc/alternatives', conf)
+ if os.path.islink(filePath):
+ realConf = os.path.realpath(filePath)
+ result['name'] = conf
+ result['target'] = realConf
+ etcResults.append(result)
+
+ def checkLiveServices(self, services, result):
+ osType = OSCheck.get_os_family()
+ for service in services:
+ svcCheckResult = {}
+ if isinstance(service, dict):
+ serviceName = service[osType]
+ else:
+ serviceName = service
+
+ service_check_live = shlex.split(self.SERVICE_STATUS_CMD)
+ service_check_live[1] = serviceName
+
+ svcCheckResult['name'] = serviceName
+ svcCheckResult['status'] = "UNKNOWN"
+ svcCheckResult['desc'] = ""
+ try:
+ osStat = subprocess.Popen(service_check_live, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ out, err = osStat.communicate()
+ if 0 != osStat.returncode:
+ svcCheckResult['status'] = "Unhealthy"
+ svcCheckResult['desc'] = out
+ if len(out) == 0:
+ svcCheckResult['desc'] = err
+ else:
+ svcCheckResult['status'] = "Healthy"
+ except Exception, e:
+ svcCheckResult['status'] = "Unhealthy"
+ svcCheckResult['desc'] = repr(e)
+ result.append(svcCheckResult)
+
+ def checkUsers(self, users, results):
+ f = open('/etc/passwd', 'r')
+ for userLine in f:
+ fields = userLine.split(":")
+ if fields[0] in users:
+ result = {}
+ homeDir = fields[5]
+ result['name'] = fields[0]
+ result['homeDir'] = fields[5]
+ result['status'] = "Available"
+ if not os.path.exists(homeDir):
+ result['status'] = "Invalid home directory"
+ results.append(result)
+
+ def osdiskAvailableSpace(self, path):
+ diskInfo = {}
+ try:
+ df = subprocess.Popen(["df", "-kPT", path], stdout=subprocess.PIPE)
+ dfdata = df.communicate()[0]
+ return Hardware.extractMountInfo(dfdata.splitlines()[-1])
+ except:
+ pass
+ return diskInfo
+
+ def createAlerts(self, alerts):
+ existingUsers = []
+ self.checkUsers(self.DEFAULT_USERS, existingUsers)
+ dirs = []
+ self.checkFolders(self.DEFAULT_DIRS, self.DEFAULT_PROJECT_NAMES, existingUsers, dirs)
+ alert = {
+ 'name': 'host_alert',
+ 'instance': None,
+ 'service': 'AMBARI',
+ 'component': 'host',
+ 'host': hostname.hostname(self.config),
+ 'state': 'OK',
+ 'label': 'Disk space',
+ 'text': 'Used disk space less than 80%'}
+ message = ""
+ mountinfoSet = []
+ for dir in dirs:
+ if dir["type"] == 'directory':
+ mountinfo = self.osdiskAvailableSpace(dir['name'])
+ if int(mountinfo["percent"].strip('%')) >= 80:
+ if not mountinfo in mountinfoSet:
+ mountinfoSet.append(mountinfo)
+ message += str(dir['name']) + ";\n"
+
+ if message != "":
+ message = "These discs have low space:\n" + str(mountinfoSet) + "\n They include following critical directories:\n" + message
+ alert['state'] = 'WARNING'
+ alert['text'] = message
+ alerts.append(alert)
+ return alerts
+
+ def checkFolders(self, basePaths, projectNames, existingUsers, dirs):
+ foldersToIgnore = []
+ for user in existingUsers:
+ foldersToIgnore.append(user['homeDir'])
+ try:
+ for dirName in basePaths:
+ for project in projectNames:
+ path = os.path.join(dirName.strip(), project.strip())
+ if not path in foldersToIgnore and os.path.exists(path):
+ obj = {}
+ obj['type'] = self.dirType(path)
+ obj['name'] = path
+ dirs.append(obj)
+ except:
+ pass
+
+ def javaProcs(self, list):
+ try:
+ pids = [pid for pid in os.listdir('/proc') if pid.isdigit()]
+ for pid in pids:
+ cmd = open(os.path.join('/proc', pid, 'cmdline'), 'rb').read()
+ cmd = cmd.replace('\0', ' ')
+ if not 'AmbariServer' in cmd:
+ if 'java' in cmd:
+ dict = {}
+ dict['pid'] = int(pid)
+ dict['hadoop'] = False
+ for filter in self.PROC_FILTER:
+ if filter in cmd:
+ dict['hadoop'] = True
+ dict['command'] = cmd.strip()
+ for line in open(os.path.join('/proc', pid, 'status')):
+ if line.startswith('Uid:'):
+ uid = int(line.split()[1])
+ dict['user'] = pwd.getpwuid(uid).pw_name
+ list.append(dict)
+ except:
+ pass
+ pass
+
+ def getReposToRemove(self, repos, ignoreList):
+ reposToRemove = []
+ for repo in repos:
+ addToRemoveList = True
+ for ignoreRepo in ignoreList:
+ if self.packages.nameMatch(ignoreRepo, repo):
+ addToRemoveList = False
+ continue
+ if addToRemoveList:
+ reposToRemove.append(repo)
+ return reposToRemove
+
+ def getUMask(self):
+ if (self.current_umask == -1):
+ self.current_umask = os.umask(self.current_umask)
+ os.umask(self.current_umask)
+ return self.current_umask
+ else:
+ return self.current_umask
+
+ def getTransparentHugePage(self):
+ # This file exist only on redhat 6
+ thp_regex = "\[(.+)\]"
+ if os.path.isfile(self.THP_FILE):
+ with open(self.THP_FILE) as f:
+ file_content = f.read()
+ return re.search(thp_regex, file_content).groups()[0]
+ else:
+ return ""
+
+ def checkIptables(self):
+ return Firewall().getFirewallObject().check_iptables()
+
+ """ Return various details about the host
+ componentsMapped: indicates if any components are mapped to this host
+ commandsInProgress: indicates if any commands are in progress
+ """
+ def register(self, dict, componentsMapped=True, commandsInProgress=True):
+ dict['hostHealth'] = {}
+
+ java = []
+ self.javaProcs(java)
+ dict['hostHealth']['activeJavaProcs'] = java
+
+ liveSvcs = []
+ self.checkLiveServices(self.DEFAULT_LIVE_SERVICES, liveSvcs)
+ dict['hostHealth']['liveServices'] = liveSvcs
+
+ dict['umask'] = str(self.getUMask())
+
+ dict['transparentHugePage'] = self.getTransparentHugePage()
+ dict['iptablesIsRunning'] = self.checkIptables()
+ dict['reverseLookup'] = self.checkReverseLookup()
+ # If commands are in progress or components are already mapped to this host
+ # Then do not perform certain expensive host checks
+ if componentsMapped or commandsInProgress:
+ dict['existingRepos'] = [self.RESULT_UNAVAILABLE]
+ dict['installedPackages'] = []
+ dict['alternatives'] = []
+ dict['stackFoldersAndFiles'] = []
+ dict['existingUsers'] = []
+
+ else:
+ etcs = []
+ self.etcAlternativesConf(self.DEFAULT_PROJECT_NAMES, etcs)
+ dict['alternatives'] = etcs
+
+ existingUsers = []
+ self.checkUsers(self.DEFAULT_USERS, existingUsers)
+ dict['existingUsers'] = existingUsers
+
+ dirs = []
+ self.checkFolders(self.DEFAULT_DIRS, self.DEFAULT_PROJECT_NAMES, existingUsers, dirs)
+ dict['stackFoldersAndFiles'] = dirs
+
+ installedPackages = []
+ availablePackages = []
+ self.packages.allInstalledPackages(installedPackages)
+ self.packages.allAvailablePackages(availablePackages)
+
+ repos = []
+ self.packages.getInstalledRepos(self.PACKAGES, installedPackages + availablePackages,
+ self.IGNORE_PACKAGES_FROM_REPOS, repos)
+ packagesInstalled = self.packages.getInstalledPkgsByRepo(repos, self.IGNORE_PACKAGES, installedPackages)
+ additionalPkgsInstalled = self.packages.getInstalledPkgsByNames(
+ self.ADDITIONAL_PACKAGES, installedPackages)
+ allPackages = list(set(packagesInstalled + additionalPkgsInstalled))
+ dict['installedPackages'] = self.packages.getPackageDetails(installedPackages, allPackages)
+
+ repos = self.getReposToRemove(repos, self.IGNORE_REPOS)
+ dict['existingRepos'] = repos
+
+ self.reportFileHandler.writeHostCheckFile(dict)
+ pass
+
+ # The time stamp must be recorded at the end
+ dict['hostHealth']['agentTimeStampAtReporting'] = int(time.time() * 1000)
+
+ pass
+
+ def checkReverseLookup(self):
+ """
+ Check if host fqdn resolves to current host ip
+ """
+ try:
+ host_name = socket.gethostname()
+ host_ip = socket.gethostbyname(host_name)
+ host_fqdn = socket.getfqdn()
+ fqdn_ip = socket.gethostbyname(host_fqdn)
+ return host_ip == fqdn_ip
+ except socket.error:
+ pass
+ return False
+
+def main(argv=None):
+ h = HostInfo()
+ struct = {}
+ h.register(struct)
+ print struct
+
+
+if __name__ == '__main__':
+ main() \ No newline at end of file
diff --git a/ambari-agent/src/main/python/ambari_agent/HostInfo_win.py b/ambari-agent/src/main/python/ambari_agent/HostInfo_win.py
new file mode 100644
index 0000000000..6ac987f8f8
--- /dev/null
+++ b/ambari-agent/src/main/python/ambari_agent/HostInfo_win.py
@@ -0,0 +1,231 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import os
+import logging
+import time
+import subprocess
+from HostCheckReportFileHandler import HostCheckReportFileHandler
+from shell import shellRunner
+from ambari_commons.os_check import OSCheck, OSConst
+from ambari_commons.os_windows import run_powershell_script, CHECK_FIREWALL_SCRIPT
+import socket
+
+logger = logging.getLogger()
+
+# OS info
+OS_VERSION = OSCheck().get_os_major_version()
+OS_TYPE = OSCheck.get_os_type()
+OS_FAMILY = OSCheck.get_os_family()
+
+class HostInfo:
+ # List of live services checked for on the host, takes a map of plan strings
+ DEFAULT_LIVE_SERVICES = [
+ {OSConst.WINSRV_FAMILY: "W32Time"}
+ ]
+
+ # Set of default users (need to be replaced with the configured user names)
+ DEFAULT_USERS = [
+ "hive", "ambari-qa", "oozie", "hbase", "hcat", "mapred",
+ "hdfs", "rrdcached", "zookeeper", "flume", "sqoop", "sqoop2",
+ "hue", "yarn"
+ ]
+
+ # Filters used to identify processed
+ PROC_FILTER = [
+ "hadoop", "zookeeper"
+ ]
+
+ RESULT_UNAVAILABLE = "unable_to_determine"
+
+ SERVICE_STATUS_CMD = 'If ((Get-Service | Where-Object {{$_.Name -eq \'{0}\'}}).Status -eq \'Running\') {{echo "Running"; $host.SetShouldExit(0)}} Else {{echo "Stopped"; $host.SetShouldExit(1)}}'
+ GET_USERS_CMD = '$accounts=(Get-WmiObject -Class Win32_UserAccount -Namespace "root\cimv2" -Filter "LocalAccount=\'$True\'" -ComputerName "LocalHost" -ErrorAction Stop); foreach ($acc in $accounts) {echo $acc.Name}'
+ GET_JAVA_PROC_CMD = 'foreach ($process in (gwmi Win32_Process -Filter "name = \'java.exe\'")){echo $process.ProcessId;echo $process.CommandLine; echo $process.GetOwner().User}'
+
+ current_umask = -1
+
+ def __init__(self, config=None):
+ self.reportFileHandler = HostCheckReportFileHandler(config)
+
+ def dirType(self, path):
+ if not os.path.exists(path):
+ return 'not_exist'
+ elif os.path.islink(path):
+ return 'sym_link'
+ elif os.path.isdir(path):
+ return 'directory'
+ elif os.path.isfile(path):
+ return 'file'
+ return 'unknown'
+
+ def checkLiveServices(self, services, result):
+ osType = OSCheck.get_os_family()
+ for service in services:
+ svcCheckResult = {}
+ if isinstance(service, dict):
+ serviceName = service[osType]
+ else:
+ serviceName = service
+
+ service_check_live = ["powershell",'-noProfile', '-NonInteractive', '-nologo', "-Command", self.SERVICE_STATUS_CMD.format(serviceName)]
+ svcCheckResult['name'] = serviceName
+ svcCheckResult['status'] = "UNKNOWN"
+ svcCheckResult['desc'] = ""
+ try:
+ osStat = subprocess.Popen(service_check_live, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ out, err = osStat.communicate()
+ if 0 != osStat.returncode:
+ svcCheckResult['status'] = "Unhealthy"
+ svcCheckResult['desc'] = out
+ if len(out) == 0:
+ svcCheckResult['desc'] = err
+ else:
+ svcCheckResult['status'] = "Healthy"
+ except Exception, e:
+ svcCheckResult['status'] = "Unhealthy"
+ svcCheckResult['desc'] = repr(e)
+ result.append(svcCheckResult)
+
+ #TODO get user directory
+ def checkUsers(self, users, results):
+ get_users_cmd = ["powershell",'-noProfile', '-NonInteractive', '-nologo', "-Command", self.GET_USERS_CMD]
+ try:
+ osStat = subprocess.Popen(get_users_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ out, err = osStat.communicate()
+ except:
+ raise Exception("Failed to get users.")
+ for user in out.split(os.linesep):
+ if user in users:
+ result = {}
+ result['name'] = user
+ result['status'] = "Available"
+ results.append(result)
+
+ def createAlerts(self, alerts):
+ #TODO AMBARI-7849 Implement createAlerts for Windows
+ return alerts
+
+ def javaProcs(self, list):
+ try:
+ runner = shellRunner()
+ command_result = runner.run(["powershell",'-noProfile', '-NonInteractive', '-nologo', "-Command", self.GET_JAVA_PROC_CMD])
+ if command_result["exitCode"] == 0:
+ splitted_output = command_result["output"].split(os.linesep)
+ for i in [index for index in range(0,len(splitted_output)) if (index % 3)==0]:
+ pid = splitted_output[i]
+ cmd = splitted_output[i+1]
+ user = splitted_output[i+2]
+ if not 'AmbariServer' in cmd:
+ if 'java' in cmd:
+ dict = {}
+ dict['pid'] = int(pid)
+ dict['hadoop'] = False
+ for filter in self.PROC_FILTER:
+ if filter in cmd:
+ dict['hadoop'] = True
+ dict['command'] = cmd.strip()
+ dict['user'] = user
+ list.append(dict)
+ except Exception as e:
+ pass
+ pass
+
+ def getUMask(self):
+ if (self.current_umask == -1):
+ self.current_umask = os.umask(self.current_umask)
+ os.umask(self.current_umask)
+ return self.current_umask
+ else:
+ return self.current_umask
+
+ def checkIptables(self):
+ out = run_powershell_script(CHECK_FIREWALL_SCRIPT)
+ if out[0] != 0:
+ logger.warn("Unable to check firewall status:{0}".format(out[2]))
+ return False
+ profiles_status = [i for i in out[1].split("\n") if not i == ""]
+ if "1" in profiles_status:
+ return True
+ return False
+
+ """ Return various details about the host
+ componentsMapped: indicates if any components are mapped to this host
+ commandsInProgress: indicates if any commands are in progress
+ """
+ def register(self, dict, componentsMapped=True, commandsInProgress=True):
+ dict['hostHealth'] = {}
+
+ java = []
+ self.javaProcs(java)
+ dict['hostHealth']['activeJavaProcs'] = java
+
+ liveSvcs = []
+ self.checkLiveServices(self.DEFAULT_LIVE_SERVICES, liveSvcs)
+ dict['hostHealth']['liveServices'] = liveSvcs
+
+ dict['umask'] = str(self.getUMask())
+
+ dict['iptablesIsRunning'] = self.checkIptables()
+ dict['reverseLookup'] = self.checkReverseLookup()
+ # If commands are in progress or components are already mapped to this host
+ # Then do not perform certain expensive host checks
+ if componentsMapped or commandsInProgress:
+ dict['existingRepos'] = [self.RESULT_UNAVAILABLE]
+ dict['installedPackages'] = []
+ dict['alternatives'] = []
+ dict['stackFoldersAndFiles'] = []
+ dict['existingUsers'] = []
+ else:
+ existingUsers = []
+ self.checkUsers(self.DEFAULT_USERS, existingUsers)
+ dict['existingUsers'] = existingUsers
+ #TODO check HDP stack and folders here
+ self.reportFileHandler.writeHostCheckFile(dict)
+ pass
+
+ # The time stamp must be recorded at the end
+ dict['hostHealth']['agentTimeStampAtReporting'] = int(time.time() * 1000)
+
+ pass
+
+ def checkReverseLookup(self):
+ """
+ Check if host fqdn resolves to current host ip
+ """
+ try:
+ host_name = socket.gethostname().lower()
+ host_ip = socket.gethostbyname(host_name)
+ host_fqdn = socket.getfqdn().lower()
+ fqdn_ip = socket.gethostbyname(host_fqdn)
+ return host_ip == fqdn_ip
+ except socket.error:
+ pass
+ return False
+
+def main(argv=None):
+ h = HostInfo()
+ struct = {}
+ h.register(struct)
+ print struct
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ambari-agent/src/main/python/ambari_agent/LiveStatus.py b/ambari-agent/src/main/python/ambari_agent/LiveStatus.py
index 49189c832b..ec01ee7419 100644
--- a/ambari-agent/src/main/python/ambari_agent/LiveStatus.py
+++ b/ambari-agent/src/main/python/ambari_agent/LiveStatus.py
@@ -65,7 +65,7 @@ class LiveStatus:
status = self.DEAD_STATUS # CLIENT components can't have status STARTED
elif component in self.COMPONENTS:
statusCheck = StatusCheck(AmbariConfig.servicesToPidNames,
- AmbariConfig.pidPathesVars, self.globalConfig,
+ AmbariConfig.pidPathVars, self.globalConfig,
AmbariConfig.servicesToLinuxUser)
serviceStatus = statusCheck.getStatus(self.component)
if serviceStatus is None:
diff --git a/ambari-agent/src/main/python/ambari_agent/NetUtil.py b/ambari-agent/src/main/python/ambari_agent/NetUtil.py
index 3ce981a29e..79d5343b74 100644
--- a/ambari-agent/src/main/python/ambari_agent/NetUtil.py
+++ b/ambari-agent/src/main/python/ambari_agent/NetUtil.py
@@ -15,10 +15,10 @@
from urlparse import urlparse
-import time
import logging
import httplib
from ssl import SSLError
+import platform
ERROR_SSL_WRONG_VERSION = "SSLError: Failed to connect. Please check openssl library versions. \n" +\
"Refer to: https://bugzilla.redhat.com/show_bug.cgi?id=1022468 for more details."
@@ -39,6 +39,23 @@ class NetUtil:
# For testing purposes
DEBUG_STOP_RETRIES_FLAG = False
+ # Stop implementation
+ # Typically, it waits for a certain time for the daemon/service to receive the stop signal.
+ # Received the number of seconds to wait as an argument
+ # Returns true if the application is stopping, false if continuing execution
+ stopCallback = None
+
+ def __init__(self, stop_callback=None):
+ if stop_callback is None:
+ IS_WINDOWS = platform.system() == "Windows"
+ if IS_WINDOWS:
+ from HeartbeatHandlers_windows import HeartbeatStopHandler
+ else:
+ from HeartbeatStopHandler_linux import HeartbeatStopHandler
+ stop_callback = HeartbeatStopHandler()
+
+ self.stopCallback = stop_callback
+
def checkURL(self, url):
"""Try to connect to a given url. Result is True if url returns HTTP code 200, in any other case
(like unreachable server or wrong HTTP code) result will be False.
@@ -78,6 +95,7 @@ class NetUtil:
Returns count of retries
"""
+ connected = False
if logger is not None:
logger.debug("Trying to connect to %s", server_url)
@@ -85,11 +103,17 @@ class NetUtil:
while (max_retries == -1 or retries < max_retries) and not self.DEBUG_STOP_RETRIES_FLAG:
server_is_up, responseBody = self.checkURL(self.SERVER_STATUS_REQUEST.format(server_url))
if server_is_up:
+ connected = True
break
else:
if logger is not None:
logger.warn('Server at {0} is not reachable, sleeping for {1} seconds...'.format(server_url,
self.CONNECT_SERVER_RETRY_INTERVAL_SEC))
retries += 1
- time.sleep(self.CONNECT_SERVER_RETRY_INTERVAL_SEC)
- return retries
+
+ if 0 == self.stopCallback.wait(self.CONNECT_SERVER_RETRY_INTERVAL_SEC):
+ #stop waiting
+ if logger is not None:
+ logger.info("Stop event received")
+ self.DEBUG_STOP_RETRIES_FLAG = True
+ return retries, connected
diff --git a/ambari-agent/src/main/python/ambari_agent/PackagesAnalyzer.py b/ambari-agent/src/main/python/ambari_agent/PackagesAnalyzer.py
index 7dabe7cd6d..062a1e71d7 100644
--- a/ambari-agent/src/main/python/ambari_agent/PackagesAnalyzer.py
+++ b/ambari-agent/src/main/python/ambari_agent/PackagesAnalyzer.py
@@ -77,7 +77,7 @@ class PackagesAnalyzer:
# All installed packages in systems supporting yum
def allInstalledPackages(self, allInstalledPackages):
osType = OSCheck.get_os_family()
-
+
if osType == OSConst.SUSE_FAMILY:
return self.lookUpZypperPackages(
["zypper", "search", "--installed-only", "--details"],
@@ -90,11 +90,11 @@ class PackagesAnalyzer:
elif osType == OSConst.UBUNTU_FAMILY:
return self.lookUpAptPackages(
LIST_INSTALLED_PACKAGES_UBUNTU,
- allInstalledPackages)
+ allInstalledPackages)
def allAvailablePackages(self, allAvailablePackages):
osType = OSCheck.get_os_family()
-
+
if osType == OSConst.SUSE_FAMILY:
return self.lookUpZypperPackages(
["zypper", "search", "--uninstalled-only", "--details"],
@@ -107,16 +107,16 @@ class PackagesAnalyzer:
elif osType == OSConst.UBUNTU_FAMILY:
return self.lookUpAptPackages(
LIST_AVAILABLE_PACKAGES_UBUNTU,
- allAvailablePackages)
-
- def lookUpAptPackages(self, command, allPackages):
+ allAvailablePackages)
+
+ def lookUpAptPackages(self, command, allPackages):
try:
result = self.subprocessWithTimeout(command)
if 0 == result['retCode']:
for x in result['out'].split('\n'):
if x.strip():
allPackages.append(x.split(' '))
-
+
except:
pass
diff --git a/ambari-agent/src/main/python/ambari_agent/PythonExecutor.py b/ambari-agent/src/main/python/ambari_agent/PythonExecutor.py
index d504183903..2a8fa5a75d 100644
--- a/ambari-agent/src/main/python/ambari_agent/PythonExecutor.py
+++ b/ambari-agent/src/main/python/ambari_agent/PythonExecutor.py
@@ -23,9 +23,10 @@ import os
import subprocess
import pprint
import threading
+import platform
from threading import Thread
import time
-from BackgroundCommandExecutionHandle import BackgroundCommandExecutionHandle
+from BackgroundCommandExecutionHandle import BackgroundCommandExecutionHandle
from Grep import Grep
import shell, sys
@@ -58,7 +59,7 @@ class PythonExecutor:
tmpout = open(tmpoutfile, 'a')
tmperr = open(tmperrfile, 'a')
return tmpout, tmperr
-
+
def run_file(self, script, script_params, tmp_dir, tmpoutfile, tmperrfile,
timeout, tmpstructedoutfile, logger_level, callback, task_id,
override_output_files = True, handle = None):
@@ -84,7 +85,7 @@ class PythonExecutor:
logger.info("Running command " + pprint.pformat(pythonCommand))
if(handle == None) :
tmpout, tmperr = self.open_subporcess_files(tmpoutfile, tmperrfile, override_output_files)
-
+
process = self.launch_python_subprocess(pythonCommand, tmpout, tmperr)
# map task_id to pid
callback(task_id, process.pid)
@@ -100,7 +101,7 @@ class PythonExecutor:
return self.prepare_process_result(process, tmpoutfile, tmperrfile, tmpstructedoutfile, timeout=timeout)
else:
holder = Holder(pythonCommand, tmpoutfile, tmperrfile, tmpstructedoutfile, handle)
-
+
background = BackgroundThread(holder, self)
background.start()
return {"exitcode": 777}
@@ -117,7 +118,7 @@ class PythonExecutor:
result = self.condenseOutput(out, error, returncode, structured_out)
logger.info("Result: %s" % result)
return result
-
+
def read_result_from_files(self, out_path, err_path, structured_out_path):
out = open(out_path, 'r').read()
error = open(err_path, 'r').read()
@@ -134,21 +135,23 @@ class PythonExecutor:
else:
structured_out = {}
return out, error, structured_out
-
+
def launch_python_subprocess(self, command, tmpout, tmperr):
"""
Creates subprocess with given parameters. This functionality was moved to separate method
to make possible unit testing
"""
+ close_fds = None if platform.system() == "Windows" else True
return subprocess.Popen(command,
stdout=tmpout,
- stderr=tmperr, close_fds=True)
-
+ stderr=tmperr, close_fds=close_fds)
+
def isSuccessfull(self, returncode):
return not self.python_process_has_been_killed and returncode == 0
def python_command(self, script, script_params):
- python_binary = sys.executable
+ #we need manually pass python executable on windows because sys.executable will return service wrapper
+ python_binary = os.environ['PYTHON_EXE'] if 'PYTHON_EXE' in os.environ else sys.executable
python_command = [python_binary, script] + script_params
return python_command
@@ -180,31 +183,29 @@ class Holder:
self.err_file = err_file
self.structured_out_file = structured_out_file
self.handle = handle
-
+
class BackgroundThread(threading.Thread):
def __init__(self, holder, pythonExecutor):
threading.Thread.__init__(self)
self.holder = holder
self.pythonExecutor = pythonExecutor
-
+
def run(self):
process_out, process_err = self.pythonExecutor.open_subporcess_files(self.holder.out_file, self.holder.err_file, True)
-
+
logger.info("Starting process command %s" % self.holder.command)
process = self.pythonExecutor.launch_python_subprocess(self.holder.command, process_out, process_err)
-
+
logger.info("Process has been started. Pid = %s" % process.pid)
-
+
self.holder.handle.pid = process.pid
self.holder.handle.status = BackgroundCommandExecutionHandle.RUNNING_STATUS
self.holder.handle.on_background_command_started(self.holder.handle.command['taskId'], process.pid)
-
+
process.communicate()
-
+
self.holder.handle.exitCode = process.returncode
process_condenced_result = self.pythonExecutor.prepare_process_result(process, self.holder.out_file, self.holder.err_file, self.holder.structured_out_file)
logger.info("Calling callback with args %s" % process_condenced_result)
self.holder.handle.on_background_command_complete_callback(process_condenced_result, self.holder.handle)
logger.info("Exiting from thread for holder pid %s" % self.holder.handle.pid)
-
-
diff --git a/ambari-agent/src/main/python/ambari_agent/StatusCheck.py b/ambari-agent/src/main/python/ambari_agent/StatusCheck.py
index 2b64989557..7feadb6f91 100644
--- a/ambari-agent/src/main/python/ambari_agent/StatusCheck.py
+++ b/ambari-agent/src/main/python/ambari_agent/StatusCheck.py
@@ -54,7 +54,7 @@ class StatusCheck:
def fillDirValues(self):
try:
- for pidVar in self.pidPathesVars:
+ for pidVar in self.pidPathVars:
pidVarName = pidVar['var']
pidDefaultvalue = pidVar['defaultValue']
if self.globalConfig.has_key(pidVarName):
@@ -64,11 +64,11 @@ class StatusCheck:
except Exception as e:
logger.error("Error while filling directories values " + str(e))
- def __init__(self, serviceToPidDict, pidPathesVars, globalConfig,
+ def __init__(self, serviceToPidDict, pidPathVars, globalConfig,
servicesToLinuxUser):
self.serToPidDict = serviceToPidDict.copy()
- self.pidPathesVars = pidPathesVars
+ self.pidPathVars = pidPathVars
self.pidPathes = []
self.sh = shellRunner()
self.pidFilesDict = {}
diff --git a/ambari-agent/src/main/python/ambari_agent/hostname.py b/ambari-agent/src/main/python/ambari_agent/hostname.py
index 9fbe145653..caf7600431 100644
--- a/ambari-agent/src/main/python/ambari_agent/hostname.py
+++ b/ambari-agent/src/main/python/ambari_agent/hostname.py
@@ -44,11 +44,11 @@ def hostname(config):
if (0 == osStat.returncode and 0 != len(out.strip())):
cached_hostname = out.strip()
else:
- cached_hostname = socket.getfqdn()
+ cached_hostname = socket.getfqdn().lower()
except:
- cached_hostname = socket.getfqdn()
+ cached_hostname = socket.getfqdn().lower()
except:
- cached_hostname = socket.getfqdn()
+ cached_hostname = socket.getfqdn().lower()
return cached_hostname
@@ -81,7 +81,7 @@ def public_hostname(config):
handle.close()
cached_public_hostname = str
except Exception, e:
- cached_public_hostname = socket.getfqdn()
+ cached_public_hostname = socket.getfqdn().lower()
return cached_public_hostname
def main(argv=None):
diff --git a/ambari-agent/src/main/python/ambari_agent/main.py b/ambari-agent/src/main/python/ambari_agent/main.py
index 622e86f276..e43d9016c3 100644
--- a/ambari-agent/src/main/python/ambari_agent/main.py
+++ b/ambari-agent/src/main/python/ambari_agent/main.py
@@ -25,60 +25,45 @@ import sys
import traceback
import os
import time
+import platform
import ConfigParser
import ProcessHelper
from Controller import Controller
-from AmbariConfig import AmbariConfig
+import AmbariConfig
from NetUtil import NetUtil
from PingPortListener import PingPortListener
import hostname
from DataCleaner import DataCleaner
import socket
-
logger = logging.getLogger()
+
formatstr = "%(levelname)s %(asctime)s %(filename)s:%(lineno)d - %(message)s"
agentPid = os.getpid()
-config = AmbariConfig()
+config = AmbariConfig.AmbariConfig()
configFile = config.CONFIG_FILE
two_way_ssl_property = config.TWO_WAY_SSL_PROPERTY
-if 'AMBARI_LOG_DIR' in os.environ:
- logfile = os.environ['AMBARI_LOG_DIR'] + "/ambari-agent.log"
-else:
- logfile = "/var/log/ambari-agent/ambari-agent.log"
-
-def signal_handler(signum, frame):
- #we want the handler to run only for the agent process and not
- #for the children (e.g. namenode, etc.)
- if os.getpid() != agentPid:
- os._exit(0)
- logger.info('signal received, exiting.')
- ProcessHelper.stopAgent()
-
-def debug(sig, frame):
- """Interrupt running process, and provide a python prompt for
- interactive debugging."""
- d={'_frame':frame} # Allow access to frame object.
- d.update(frame.f_globals) # Unless shadowed by global
- d.update(frame.f_locals)
-
- message = "Signal received : entering python shell.\nTraceback:\n"
- message += ''.join(traceback.format_stack(frame))
- logger.info(message)
+IS_WINDOWS = platform.system() == "Windows"
+if IS_WINDOWS:
+ from HeartbeatHandlers_windows import bind_signal_handlers
+else:
+ from HeartbeatStopHandler_linux import bind_signal_handlers
+ from HeartbeatStopHandler_linux import signal_handler
+ from HeartbeatStopHandler_linux import debug
def setup_logging(verbose):
formatter = logging.Formatter(formatstr)
- rotateLog = logging.handlers.RotatingFileHandler(logfile, "a", 10000000, 25)
+ rotateLog = logging.handlers.RotatingFileHandler(AmbariConfig.AmbariConfig.getLogFile(), "a", 10000000, 25)
rotateLog.setFormatter(formatter)
logger.addHandler(rotateLog)
if verbose:
- logging.basicConfig(format=formatstr, level=logging.DEBUG, filename=logfile)
+ logging.basicConfig(format=formatstr, level=logging.DEBUG, filename=AmbariConfig.AmbariConfig.getLogFile())
logger.setLevel(logging.DEBUG)
logger.info("loglevel=logging.DEBUG")
else:
- logging.basicConfig(format=formatstr, level=logging.INFO, filename=logfile)
+ logging.basicConfig(format=formatstr, level=logging.INFO, filename=AmbariConfig.AmbariConfig.getLogFile())
logger.setLevel(logging.INFO)
logger.info("loglevel=logging.INFO")
@@ -89,35 +74,30 @@ def update_log_level(config):
loglevel = config.get('agent', 'loglevel')
if loglevel is not None:
if loglevel == 'DEBUG':
- logging.basicConfig(format=formatstr, level=logging.DEBUG, filename=logfile)
+ logging.basicConfig(format=formatstr, level=logging.DEBUG, filename=AmbariConfig.AmbariConfig.getLogFile())
logger.setLevel(logging.DEBUG)
logger.info("Newloglevel=logging.DEBUG")
else:
- logging.basicConfig(format=formatstr, level=logging.INFO, filename=logfile)
+ logging.basicConfig(format=formatstr, level=logging.INFO, filename=AmbariConfig.AmbariConfig.getLogFile())
logger.setLevel(logging.INFO)
logger.debug("Newloglevel=logging.INFO")
except Exception, err:
logger.info("Default loglevel=DEBUG")
-def bind_signal_handlers():
- signal.signal(signal.SIGINT, signal_handler)
- signal.signal(signal.SIGTERM, signal_handler)
- signal.signal(signal.SIGUSR1, debug)
-
-
# ToDo: move that function inside AmbariConfig
def resolve_ambari_config():
global config
+ configPath = os.path.abspath(AmbariConfig.AmbariConfig.getConfigFile())
+
try:
- if os.path.exists(configFile):
- config.read(configFile)
+ if os.path.exists(configPath):
+ config.read(configPath)
else:
- raise Exception("No config found, use default")
+ raise Exception("No config found at {0}, use default".format(configPath))
except Exception, err:
logger.warn(err)
- return config
def perform_prestart_checks(expected_hostname):
@@ -137,16 +117,21 @@ def perform_prestart_checks(expected_hostname):
logger.error(msg)
sys.exit(1)
# Check if there is another instance running
- if os.path.isfile(ProcessHelper.pidfile):
+ if os.path.isfile(ProcessHelper.pidfile) and not IS_WINDOWS:
print("%s already exists, exiting" % ProcessHelper.pidfile)
sys.exit(1)
# check if ambari prefix exists
- elif not os.path.isdir(config.get("agent", "prefix")):
+ elif config.has_option('agent', 'prefix') and not os.path.isdir(os.path.abspath(config.get('agent', 'prefix'))):
msg = "Ambari prefix dir %s does not exists, can't continue" \
% config.get("agent", "prefix")
logger.error(msg)
print(msg)
sys.exit(1)
+ elif not config.has_option('agent', 'prefix'):
+ msg = "Ambari prefix dir %s not configured, can't continue"
+ logger.error(msg)
+ print(msg)
+ sys.exit(1)
def daemonize():
@@ -207,7 +192,9 @@ def reset_agent(options):
os._exit(0)
-def main():
+# event - event, that will be passed to Controller and NetUtil to make able to interrupt loops form outside process
+# we need this for windows os, where no sigterm available
+def main(heartbeat_stop_callback=None):
global config
parser = OptionParser()
parser.add_option("-v", "--verbose", dest="verbose", action="store_true", help="verbose log output", default=False)
@@ -222,7 +209,7 @@ def main():
default_cfg = {'agent': {'prefix': '/home/ambari'}}
config.load(default_cfg)
- bind_signal_handlers()
+ bind_signal_handlers(agentPid)
if (len(sys.argv) > 1) and sys.argv[1] == 'stop':
stop_agent()
@@ -231,16 +218,18 @@ def main():
reset_agent(sys.argv)
# Check for ambari configuration file.
- config = resolve_ambari_config()
+ resolve_ambari_config()
# Starting data cleanup daemon
data_cleaner = None
- if int(config.get('agent', 'data_cleanup_interval')) > 0:
+ if config.has_option('agent', 'data_cleanup_interval') and int(config.get('agent','data_cleanup_interval')) > 0:
data_cleaner = DataCleaner(config)
data_cleaner.start()
perform_prestart_checks(expected_hostname)
- daemonize()
+
+ if not IS_WINDOWS:
+ daemonize()
# Starting ping port listener
try:
@@ -264,15 +253,19 @@ def main():
logger.warn("Unable to determine the IP address of the Ambari server '%s'", server_hostname)
# Wait until server is reachable
- netutil = NetUtil()
- netutil.try_to_connect(server_url, -1, logger)
-
- # Launch Controller communication
- controller = Controller(config)
- controller.start()
- controller.join()
- stop_agent()
+ netutil = NetUtil(heartbeat_stop_callback)
+ retries, connected = netutil.try_to_connect(server_url, -1, logger)
+ # Ambari Agent was stopped using stop event
+ if connected:
+ # Launch Controller communication
+ controller = Controller(config, heartbeat_stop_callback)
+ controller.start()
+ controller.join()
+ if not IS_WINDOWS:
+ stop_agent()
logger.info("finished")
if __name__ == "__main__":
- main()
+ heartbeat_stop_callback = bind_signal_handlers(agentPid)
+
+ main(heartbeat_stop_callback)
diff --git a/ambari-agent/src/main/python/ambari_agent/security.py b/ambari-agent/src/main/python/ambari_agent/security.py
index bc101b8f1e..46eddca8ce 100644
--- a/ambari-agent/src/main/python/ambari_agent/security.py
+++ b/ambari-agent/src/main/python/ambari_agent/security.py
@@ -27,12 +27,12 @@ import json
import pprint
import traceback
import hostname
+import platform
logger = logging.getLogger()
-GEN_AGENT_KEY = "openssl req -new -newkey rsa:1024 -nodes -keyout %(keysdir)s/%(hostname)s.key\
- -subj /OU=%(hostname)s/\
- -out %(keysdir)s/%(hostname)s.csr"
+GEN_AGENT_KEY = 'openssl req -new -newkey rsa:1024 -nodes -keyout "%(keysdir)s'+os.sep+'%(hostname)s.key" '\
+ '-subj /OU=%(hostname)s/ -out "%(keysdir)s'+os.sep+'%(hostname)s.csr"'
class VerifiedHTTPSConnection(httplib.HTTPSConnection):
@@ -141,30 +141,30 @@ class CachedHTTPSConnection:
class CertificateManager():
def __init__(self, config):
self.config = config
- self.keysdir = self.config.get('security', 'keysdir')
+ self.keysdir = os.path.abspath(self.config.get('security', 'keysdir'))
self.server_crt = self.config.get('security', 'server_crt')
self.server_url = 'https://' + self.config.get('server', 'hostname') + ':' \
+ self.config.get('server', 'url_port')
def getAgentKeyName(self):
- keysdir = self.config.get('security', 'keysdir')
+ keysdir = os.path.abspath(self.config.get('security', 'keysdir'))
return keysdir + os.sep + hostname.hostname(self.config) + ".key"
def getAgentCrtName(self):
- keysdir = self.config.get('security', 'keysdir')
+ keysdir = os.path.abspath(self.config.get('security', 'keysdir'))
return keysdir + os.sep + hostname.hostname(self.config) + ".crt"
def getAgentCrtReqName(self):
- keysdir = self.config.get('security', 'keysdir')
+ keysdir = os.path.abspath(self.config.get('security', 'keysdir'))
return keysdir + os.sep + hostname.hostname(self.config) + ".csr"
def getSrvrCrtName(self):
- keysdir = self.config.get('security', 'keysdir')
+ keysdir = os.path.abspath(self.config.get('security', 'keysdir'))
return keysdir + os.sep + "ca.crt"
def checkCertExists(self):
- s = self.config.get('security', 'keysdir') + os.sep + "ca.crt"
+ s = os.path.abspath(self.config.get('security', 'keysdir')) + os.sep + "ca.crt"
server_crt_exists = os.path.exists(s)
@@ -240,10 +240,14 @@ class CertificateManager():
def genAgentCrtReq(self):
generate_script = GEN_AGENT_KEY % {'hostname': hostname.hostname(self.config),
- 'keysdir': self.config.get('security', 'keysdir')}
+ 'keysdir' : os.path.abspath(self.config.get('security', 'keysdir'))}
logger.info(generate_script)
- p = subprocess.Popen([generate_script], shell=True, stdout=subprocess.PIPE)
- p.communicate()
+ if platform.system() == 'Windows':
+ p = subprocess.Popen(generate_script, stdout=subprocess.PIPE)
+ p.communicate()
+ else:
+ p = subprocess.Popen([generate_script], shell=True, stdout=subprocess.PIPE)
+ p.communicate()
def initSecurity(self):
self.checkCertExists()
diff --git a/ambari-agent/src/main/python/ambari_agent/shell.py b/ambari-agent/src/main/python/ambari_agent/shell.py
index 4081bb012a..df6f0ca5ab 100644
--- a/ambari-agent/src/main/python/ambari_agent/shell.py
+++ b/ambari-agent/src/main/python/ambari_agent/shell.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+# !/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
@@ -29,26 +29,79 @@ import time
import traceback
import AmbariConfig
import pprint
+import platform
-try:
+if platform.system() != "Windows":
+ try:
import pwd
-except ImportError:
+ except ImportError:
import winpwd as pwd
-global serverTracker
-serverTracker = {}
logger = logging.getLogger()
+shellRunner = None
threadLocal = threading.local()
-gracefull_kill_delay = 5 # seconds between SIGTERM and SIGKILL
-tempFiles = []
+gracefull_kill_delay = 5 # seconds between SIGTERM and SIGKILL
+
+tempFiles = []
+
+
def noteTempFile(filename):
tempFiles.append(filename)
+
def getTempFiles():
return tempFiles
-def kill_process_with_children(parent_pid):
+
+class _dict_to_object:
+ def __init__(self, entries):
+ self.__dict__.update(entries)
+
+ def __getitem__(self, item):
+ return self.__dict__[item]
+
+
+# windows specific code
+def _kill_process_with_children_windows(parent_pid):
+ shellRunner().run(["taskkill", "/T", "/PID", "{0}".format(parent_pid)])
+
+
+class shellRunnerWindows:
+ # Run any command
+ def run(self, script, user=None):
+ logger.warn("user argument ignored on windows")
+ code = 0
+ if not isinstance(script, list):
+ cmd = " "
+ cmd = cmd.join(script)
+ else:
+ cmd = script
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE, shell=False)
+ out, err = p.communicate()
+ code = p.wait()
+ logger.debug("Exitcode for %s is %d" % (cmd, code))
+ return {'exitCode': code, 'output': out, 'error': err}
+
+ def runPowershell(self, file=None, script_block=None, args=[]):
+ logger.warn("user argument ignored on windows")
+ code = 0
+ cmd = None
+ if file:
+ cmd = ['powershell', '-WindowStyle', 'Hidden', '-File', file] + args
+ elif script_block:
+ cmd = ['powershell', '-WindowStyle', 'Hidden', '-Command', script_block] + args
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE, shell=False)
+ out, err = p.communicate()
+ code = p.wait()
+ logger.debug("Exitcode for %s is %d" % (cmd, code))
+ return _dict_to_object({'exitCode': code, 'output': out, 'error': err})
+
+
+#linux specific code
+def _kill_process_with_children_linux(parent_pid):
def kill_tree_function(pid, signal):
'''
Kills process tree starting from a given pid.
@@ -58,15 +111,17 @@ def kill_process_with_children(parent_pid):
# a given PID and then passes list of "kill -<SIGNAL> PID" commands to 'sh'
# shell.
CMD = """ps xf | awk -v PID=""" + str(pid) + \
- """ ' $1 == PID { P = $1; next } P && /_/ { P = P " " $1;""" + \
- """K=P } P && !/_/ { P="" } END { print "kill -""" \
- + str(signal) + """ "K }' | sh """
+ """ ' $1 == PID { P = $1; next } P && /_/ { P = P " " $1;""" + \
+ """K=P } P && !/_/ { P="" } END { print "kill -""" \
+ + str(signal) + """ "K }' | sh """
process = subprocess.Popen(CMD, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
process.communicate()
- run_kill_function(kill_tree_function, parent_pid)
-def run_kill_function(kill_function, pid):
+ _run_kill_function(kill_tree_function, parent_pid)
+
+
+def _run_kill_function(kill_function, pid):
try:
kill_function(pid, signal.SIGTERM)
except Exception, e:
@@ -81,17 +136,19 @@ def run_kill_function(kill_function, pid):
logger.error("Failed to send SIGKILL to PID %d. Process exited?" % (pid))
logger.error("Reported error: " + repr(e))
-def changeUid():
+
+def _changeUid():
try:
os.setuid(threadLocal.uid)
except Exception:
logger.warn("can not switch user for running command.")
-class shellRunner:
+
+class shellRunnerLinux:
# Run any command
def run(self, script, user=None):
try:
- if user!=None:
+ if user != None:
user = pwd.getpwnam(user)[2]
else:
user = os.getuid()
@@ -101,12 +158,28 @@ class shellRunner:
code = 0
cmd = " "
cmd = cmd.join(script)
- p = subprocess.Popen(cmd, preexec_fn=changeUid, stdout=subprocess.PIPE,
+ p = subprocess.Popen(cmd, preexec_fn=_changeUid, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True, close_fds=True)
out, err = p.communicate()
code = p.wait()
- logger.debug("Exitcode for %s is %d" % (cmd,code))
+ logger.debug("Exitcode for %s is %d" % (cmd, code))
return {'exitCode': code, 'output': out, 'error': err}
- def getServerTracker(self):
- return serverTracker \ No newline at end of file
+
+def kill_process_with_children(parent_pid):
+ if platform.system() == "Windows":
+ _kill_process_with_children_windows(parent_pid)
+ else:
+ _kill_process_with_children_linux(parent_pid)
+
+def changeUid():
+ if not platform.system() == "Windows":
+ try:
+ os.setuid(threadLocal.uid)
+ except Exception:
+ logger.warn("can not switch user for running command.")
+
+if platform.system() == "Windows":
+ shellRunner = shellRunnerWindows
+else:
+ shellRunner = shellRunnerLinux \ No newline at end of file
diff --git a/ambari-agent/src/packages/windows.xml b/ambari-agent/src/packages/windows.xml
new file mode 100644
index 0000000000..1abca20375
--- /dev/null
+++ b/ambari-agent/src/packages/windows.xml
@@ -0,0 +1,82 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.1"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.1 http://maven.apache.org/xsd/assembly-1.1.1.xsd">
+ <!--This 'all' id is not appended to the produced bundle because we do this:
+ http://maven.apache.org/plugins/maven-assembly-plugin/faq.html#required-classifiers
+ -->
+ <id>windows-dist</id>
+ <formats>
+ <format>dir</format>
+ </formats>
+ <includeBaseDirectory>false</includeBaseDirectory>
+ <fileSets>
+ <fileSet>
+ <directory>src/main/python/ambari_agent</directory>
+ <outputDirectory>/sbin/ambari_agent</outputDirectory>
+ </fileSet>
+ <fileSet>
+ <directory>${project.basedir}/../ambari-common/src/main/python/resource_management</directory>
+ <outputDirectory>/sbin/resource_management</outputDirectory>
+ </fileSet>
+ <fileSet>
+ <directory>${project.basedir}/../ambari-common/src/main/python/ambari_commons</directory>
+ <outputDirectory>/sbin/ambari_commons</outputDirectory>
+ </fileSet>
+ <fileSet>
+ <directory>${project.basedir}/../ambari-common/src/main/python/ambari_jinja2/ambari_jinja2</directory>
+ <outputDirectory>/sbin/ambari_jinja2</outputDirectory>
+ </fileSet>
+ <fileSet>
+ <directory>${project.basedir}/conf/windows</directory>
+ <outputDirectory>/</outputDirectory>
+ <excludes>
+ <exclude>service_wrapper.py</exclude>
+ <exclude>createservice.ps1</exclude>
+ </excludes>
+ </fileSet>
+ <fileSet>
+ <directory>${project.basedir}/conf/windows</directory>
+ <outputDirectory>/sbin</outputDirectory>
+ <includes>
+ <include>service_wrapper.py</include>
+ <include>createservice.ps1</include>
+ </includes>
+ </fileSet>
+ <fileSet>
+ <directory>${target.cache.dir}</directory>
+ <outputDirectory>/cache</outputDirectory>
+ </fileSet>
+ <!--empty directory-->
+ <fileSet>
+ <directory>./</directory>
+ <outputDirectory>/keys</outputDirectory>
+ <excludes>
+ <exclude>*/**</exclude>
+ </excludes>
+ </fileSet>
+ </fileSets>
+ <files>
+ <file>
+ <source>${project.basedir}/../version</source>
+ <outputDirectory>data</outputDirectory>
+ <filtered>true</filtered>
+ </file>
+ </files>
+</assembly>
diff --git a/ambari-agent/src/test/python/ambari_agent/TestAlerts.py b/ambari-agent/src/test/python/ambari_agent/TestAlerts.py
index 9bbcf3bae5..d3e4583dd0 100644
--- a/ambari-agent/src/test/python/ambari_agent/TestAlerts.py
+++ b/ambari-agent/src/test/python/ambari_agent/TestAlerts.py
@@ -18,9 +18,10 @@ See the License for the specific language governing permissions and
limitations under the License.
'''
+from stacks.utils.RMFTestCase import *
+import os
import socket
import sys
-import os
from ambari_agent.AlertSchedulerHandler import AlertSchedulerHandler
from ambari_agent.alerts.collector import AlertCollector
@@ -429,12 +430,12 @@ class TestAlerts(TestCase):
ash = AlertSchedulerHandler(test_file_path, test_stack_path, test_host_scripts_path)
ash.start()
-
+
self.assertEquals(1, ash.get_job_count())
ash.reschedule()
self.assertEquals(1, ash.get_job_count())
-
-
+
+
def test_alert_collector_purge(self):
json = { "name": "namenode_process",
"service": "HDFS",
@@ -466,13 +467,13 @@ class TestAlerts(TestCase):
self.assertEquals(6, pa.interval())
res = pa.collect()
-
+
self.assertTrue(collector.alerts()[0] is not None)
self.assertEquals('CRITICAL', collector.alerts()[0]['state'])
-
+
collector.remove_by_uuid('c1f73191-4481-4435-8dae-fd380e4c0be1')
self.assertEquals(0,len(collector.alerts()))
-
+
def test_disabled_definitions(self):
test_file_path = os.path.join('ambari_agent', 'dummy_files')
@@ -509,24 +510,23 @@ class TestAlerts(TestCase):
pa = PortAlert(json, json['source'])
ash.schedule_definition(pa)
-
+
self.assertEquals(2, ash.get_job_count())
-
+
json['enabled'] = False
pa = PortAlert(json, json['source'])
ash.schedule_definition(pa)
-
+
# verify disabled alert not scheduled
self.assertEquals(2, ash.get_job_count())
-
+
json['enabled'] = True
pa = PortAlert(json, json['source'])
ash.schedule_definition(pa)
-
+
# verify enabled alert was scheduled
self.assertEquals(3, ash.get_job_count())
-
def test_immediate_alert(self):
test_file_path = os.path.join('ambari_agent', 'dummy_files')
test_stack_path = os.path.join('ambari_agent', 'dummy_files')
@@ -538,10 +538,10 @@ class TestAlerts(TestCase):
self.assertEquals(1, ash.get_job_count())
self.assertEquals(0, len(ash._collector.alerts()))
- execution_commands = [ {
+ execution_commands = [ {
"clusterName": "c1",
- "hostName": "c6401.ambari.apache.org",
- "alertDefinition": {
+ "hostName": "c6401.ambari.apache.org",
+ "alertDefinition": {
"name": "namenode_process",
"service": "HDFS",
"component": "NAMENODE",
@@ -565,7 +565,7 @@ class TestAlerts(TestCase):
}
}
} ]
-
+
# execute the alert immediately and verify that the collector has the result
ash.execute_alert(execution_commands)
self.assertEquals(1, len(ash._collector.alerts()))
diff --git a/ambari-agent/src/test/python/ambari_agent/TestCertGeneration.py b/ambari-agent/src/test/python/ambari_agent/TestCertGeneration.py
index 8e01707cc1..c724c31d6b 100644
--- a/ambari-agent/src/test/python/ambari_agent/TestCertGeneration.py
+++ b/ambari-agent/src/test/python/ambari_agent/TestCertGeneration.py
@@ -45,4 +45,3 @@ class TestCertGeneration(TestCase):
def tearDown(self):
shutil.rmtree(self.tmpdir)
-
diff --git a/ambari-agent/src/test/python/ambari_agent/TestController.py b/ambari-agent/src/test/python/ambari_agent/TestController.py
index 72b0cea264..240d80884d 100644
--- a/ambari-agent/src/test/python/ambari_agent/TestController.py
+++ b/ambari-agent/src/test/python/ambari_agent/TestController.py
@@ -385,7 +385,7 @@ class TestController(unittest.TestCase):
hearbeat = MagicMock()
self.controller.heartbeat = hearbeat
-
+ event_mock.return_value = False
dumpsMock.return_value = "data"
sendRequest = MagicMock(name="sendRequest")
@@ -512,7 +512,7 @@ class TestController(unittest.TestCase):
response["restartAgent"] = "false"
self.controller.heartbeatWithServer()
- sleepMock.assert_called_with(
+ event_mock.assert_any_call(timeout=
self.controller.netutil.MINIMUM_INTERVAL_BETWEEN_HEARTBEATS)
# Check that server continues to heartbeat after connection errors
@@ -533,7 +533,7 @@ class TestController(unittest.TestCase):
self.controller.heartbeatWithServer()
self.assertTrue(sendRequest.call_count > 5)
- sleepMock.assert_called_with(
+ event_mock.assert_called_with(timeout=
self.controller.netutil.MINIMUM_INTERVAL_BETWEEN_HEARTBEATS)
sys.stdout = sys.__stdout__
diff --git a/ambari-agent/src/test/python/ambari_agent/TestCustomServiceOrchestrator.py b/ambari-agent/src/test/python/ambari_agent/TestCustomServiceOrchestrator.py
index 5f426e652d..24ee259690 100644
--- a/ambari-agent/src/test/python/ambari_agent/TestCustomServiceOrchestrator.py
+++ b/ambari-agent/src/test/python/ambari_agent/TestCustomServiceOrchestrator.py
@@ -95,7 +95,8 @@ class TestCustomServiceOrchestrator(TestCase):
'clusterHostInfo':{'namenode_host' : ['1'],
'slave_hosts' : ['0', '1'],
'all_hosts' : ['h1.hortonworks.com', 'h2.hortonworks.com'],
- 'all_ping_ports': ['8670:0,1']}
+ 'all_ping_ports': ['8670:0,1']},
+ 'hostLevelParams':{}
}
decompress_cluster_host_info_mock.return_value = {'namenode_host' : ['h2.hortonworks.com'],
@@ -315,15 +316,15 @@ class TestCustomServiceOrchestrator(TestCase):
self.assertTrue(os.path.exists(err))
os.remove(out)
os.remove(err)
-
+
from ambari_agent.StackVersionsFileHandler import StackVersionsFileHandler
-
+
@patch("shell.kill_process_with_children")
@patch.object(FileCache, "__init__")
@patch.object(CustomServiceOrchestrator, "resolve_script_path")
@patch.object(CustomServiceOrchestrator, "resolve_hook_script_path")
@patch.object(StackVersionsFileHandler, "read_stack_version")
- def test_cancel_backgound_command(self, read_stack_version_mock, resolve_hook_script_path_mock, resolve_script_path_mock, FileCache_mock,
+ def test_cancel_backgound_command(self, read_stack_version_mock, resolve_hook_script_path_mock, resolve_script_path_mock, FileCache_mock,
kill_process_with_children_mock):
FileCache_mock.return_value = None
FileCache_mock.cache_dir = MagicMock()
@@ -334,9 +335,9 @@ class TestCustomServiceOrchestrator(TestCase):
cfg.set('agent', 'tolerate_download_failures', 'true')
cfg.set('agent', 'prefix', '.')
cfg.set('agent', 'cache_dir', 'background_tasks')
-
+
actionQueue = ActionQueue(cfg, dummy_controller)
-
+
dummy_controller.actionQueue = actionQueue
orchestrator = CustomServiceOrchestrator(cfg, dummy_controller)
orchestrator.file_cache = MagicMock()
@@ -344,42 +345,42 @@ class TestCustomServiceOrchestrator(TestCase):
return ""
orchestrator.file_cache.get_service_base_dir = f
actionQueue.customServiceOrchestrator = orchestrator
-
+
import TestActionQueue
import copy
-
+
TestActionQueue.patch_output_file(orchestrator.python_executor)
orchestrator.python_executor.prepare_process_result = MagicMock()
orchestrator.dump_command_to_json = MagicMock()
-
+
lock = threading.RLock()
complete_done = threading.Condition(lock)
-
+
complete_was_called = {}
def command_complete_w(process_condenced_result, handle):
with lock:
complete_was_called['visited']= ''
complete_done.wait(3)
-
- actionQueue.on_background_command_complete_callback = TestActionQueue.wraped(actionQueue.on_background_command_complete_callback, command_complete_w, None)
+
+ actionQueue.on_background_command_complete_callback = TestActionQueue.wraped(actionQueue.on_background_command_complete_callback, command_complete_w, None)
execute_command = copy.deepcopy(TestActionQueue.TestActionQueue.background_command)
actionQueue.put([execute_command])
actionQueue.processBackgroundQueueSafeEmpty()
-
- time.sleep(.1)
-
+
+ time.sleep(.1)
+
orchestrator.cancel_command(19,'')
self.assertTrue(kill_process_with_children_mock.called)
kill_process_with_children_mock.assert_called_with(33)
-
+
with lock:
complete_done.notifyAll()
with lock:
self.assertTrue(complete_was_called.has_key('visited'))
-
+
time.sleep(.1)
-
+
runningCommand = actionQueue.commandStatuses.get_command_status(19)
self.assertTrue(runningCommand is not None)
self.assertEqual(runningCommand['status'], ActionQueue.FAILED_STATUS)
@@ -501,12 +502,12 @@ class TestCustomServiceOrchestrator(TestCase):
}
dummy_controller = MagicMock()
orchestrator = CustomServiceOrchestrator(self.config, dummy_controller)
-
+
import TestActionQueue
TestActionQueue.patch_output_file(orchestrator.python_executor)
orchestrator.python_executor.condenseOutput = MagicMock()
orchestrator.dump_command_to_json = MagicMock()
-
+
ret = orchestrator.runCommand(command, "out.txt", "err.txt")
self.assertEqual(ret['exitcode'], 777)
diff --git a/ambari-agent/src/test/python/ambari_agent/TestHostname.py b/ambari-agent/src/test/python/ambari_agent/TestHostname.py
index 7d1f3c6874..993a9d1bd9 100644
--- a/ambari-agent/src/test/python/ambari_agent/TestHostname.py
+++ b/ambari-agent/src/test/python/ambari_agent/TestHostname.py
@@ -34,7 +34,7 @@ class TestHostname(TestCase):
hostname.cached_hostname = None
hostname.cached_public_hostname = None
config = AmbariConfig()
- self.assertEquals(hostname.hostname(config), socket.getfqdn(),
+ self.assertEquals(hostname.hostname(config), socket.getfqdn().lower(),
"hostname should equal the socket-based hostname")
pass
diff --git a/ambari-agent/src/test/python/ambari_agent/TestMain.py b/ambari-agent/src/test/python/ambari_agent/TestMain.py
index f930c4a001..bb75bacda2 100644
--- a/ambari-agent/src/test/python/ambari_agent/TestMain.py
+++ b/ambari-agent/src/test/python/ambari_agent/TestMain.py
@@ -52,23 +52,22 @@ class TestMain(unittest.TestCase):
sys.stdout = sys.__stdout__
+ @patch("ambari_agent.HeartbeatStopHandler_linux")
@patch("os._exit")
@patch("os.getpid")
@patch.object(ProcessHelper, "stopAgent")
- def test_signal_handler(self, stopAgent_mock, os_getpid_mock, os_exit_mock):
+ def test_signal_handler(self, stopAgent_mock, os_getpid_mock, os_exit_mock, heartbeat_handler_mock):
# testing exit of children
main.agentPid = 4444
os_getpid_mock.return_value = 5555
main.signal_handler("signum", "frame")
- self.assertTrue(os_exit_mock.called)
-
+ heartbeat_handler_mock.set_stop.assert_called()
os_exit_mock.reset_mock()
# testing exit of main process
os_getpid_mock.return_value = main.agentPid
main.signal_handler("signum", "frame")
- self.assertFalse(os_exit_mock.called)
- self.assertTrue(stopAgent_mock.called)
+ heartbeat_handler_mock.set_stop.assert_called()
@patch.object(main.logger, "addHandler")
@@ -122,7 +121,7 @@ class TestMain(unittest.TestCase):
@patch("signal.signal")
def test_bind_signal_handlers(self, signal_mock):
- main.bind_signal_handlers()
+ main.bind_signal_handlers(os.getpid())
# Check if on SIGINT/SIGTERM agent is configured to terminate
signal_mock.assert_any_call(signal.SIGINT, main.signal_handler)
signal_mock.assert_any_call(signal.SIGTERM, main.signal_handler)
@@ -269,7 +268,7 @@ class TestMain(unittest.TestCase):
@patch.object(main, "setup_logging")
@patch.object(main, "bind_signal_handlers")
@patch.object(main, "stop_agent")
- @patch.object(main, "resolve_ambari_config")
+ @patch.object(AmbariConfig, "getConfigFile")
@patch.object(main, "perform_prestart_checks")
@patch.object(main, "daemonize")
@patch.object(main, "update_log_level")
@@ -285,21 +284,25 @@ class TestMain(unittest.TestCase):
def test_main(self, ping_port_init_mock, ping_port_start_mock, data_clean_init_mock,data_clean_start_mock,
parse_args_mock, join_mock, start_mock, Controller_init_mock, try_to_connect_mock,
update_log_level_mock, daemonize_mock, perform_prestart_checks_mock,
- resolve_ambari_config_mock, stop_mock, bind_signal_handlers_mock,
+ ambari_config_mock,
+ stop_mock, bind_signal_handlers_mock,
setup_logging_mock, socket_mock):
data_clean_init_mock.return_value = None
Controller_init_mock.return_value = None
ping_port_init_mock.return_value = None
options = MagicMock()
parse_args_mock.return_value = (options, MagicMock)
-
+ try_to_connect_mock.return_value = (0, True)
+ # use default unix config
+ ambari_config_mock.return_value = os.path.abspath("../../../conf/unix/ambari-agent.ini")
#testing call without command-line arguments
+
main.main()
self.assertTrue(setup_logging_mock.called)
self.assertTrue(bind_signal_handlers_mock.called)
self.assertTrue(stop_mock.called)
- self.assertTrue(resolve_ambari_config_mock.called)
+ #self.assertTrue(resolve_ambari_config_mock.called)
self.assertTrue(perform_prestart_checks_mock.called)
self.assertTrue(daemonize_mock.called)
self.assertTrue(update_log_level_mock.called)
diff --git a/ambari-agent/src/test/python/ambari_agent/TestNetUtil.py b/ambari-agent/src/test/python/ambari_agent/TestNetUtil.py
index 474548fbc0..255da88256 100644
--- a/ambari-agent/src/test/python/ambari_agent/TestNetUtil.py
+++ b/ambari-agent/src/test/python/ambari_agent/TestNetUtil.py
@@ -21,6 +21,7 @@ limitations under the License.
from ambari_agent import NetUtil
from mock.mock import MagicMock, patch
import unittest
+import threading
class TestNetUtil(unittest.TestCase):
@@ -51,15 +52,17 @@ class TestNetUtil(unittest.TestCase):
@patch("time.sleep")
- def test_try_to_connect(self, sleepMock):
-
+ @patch.object(threading._Event, "wait")
+ def test_try_to_connect(self, event_mock,
+ sleepMock):
+ event_mock.return_value = False
netutil = NetUtil.NetUtil()
checkURL = MagicMock(name="checkURL")
checkURL.return_value = True, "test"
netutil.checkURL = checkURL
# one successful get
- self.assertEqual(0, netutil.try_to_connect("url", 10))
+ self.assertEqual((0, True), netutil.try_to_connect("url", 10))
# got successful after N retries
gets = [[True, ""], [False, ""], [False, ""]]
@@ -67,9 +70,9 @@ class TestNetUtil(unittest.TestCase):
def side_effect(*args):
return gets.pop()
checkURL.side_effect = side_effect
- self.assertEqual(2, netutil.try_to_connect("url", 10))
+ self.assertEqual((2, True), netutil.try_to_connect("url", 10))
# max retries
checkURL.side_effect = None
checkURL.return_value = False, "test"
- self.assertEqual(5, netutil.try_to_connect("url", 5))
+ self.assertEqual((5,False), netutil.try_to_connect("url", 5))
diff --git a/ambari-agent/src/test/python/ambari_agent/TestStatusCheck.py b/ambari-agent/src/test/python/ambari_agent/TestStatusCheck.py
index 50657d883b..a872e7f192 100644
--- a/ambari-agent/src/test/python/ambari_agent/TestStatusCheck.py
+++ b/ambari-agent/src/test/python/ambari_agent/TestStatusCheck.py
@@ -47,7 +47,7 @@ class TestStatusCheck(TestCase):
def setUp(self):
- self.pidPathesVars = [
+ self.pidPathVars = [
{'var' : '',
'defaultValue' : PID_DIR}
]
@@ -84,7 +84,7 @@ class TestStatusCheck(TestCase):
@patch.object(StatusCheck, 'getIsLive')
def test_live(self, get_is_live_mock):
- statusCheck = StatusCheck(self.serviceToPidDict, self.pidPathesVars,
+ statusCheck = StatusCheck(self.serviceToPidDict, self.pidPathVars,
self.globalConfig, self.servicesToLinuxUser)
self.assertTrue(StatusCheck.USER_PATTERN in self.serviceToPidDict[COMPONENT_LIVE])
@@ -108,11 +108,11 @@ class TestStatusCheck(TestCase):
logger_info_mock.side_effect = my_side_effect
# call this three times
- statusCheck = StatusCheck(self.serviceToPidDict, self.pidPathesVars,
+ statusCheck = StatusCheck(self.serviceToPidDict, self.pidPathVars,
self.globalConfig, self.servicesToLinuxUser)
- statusCheck = StatusCheck(self.serviceToPidDict, self.pidPathesVars,
+ statusCheck = StatusCheck(self.serviceToPidDict, self.pidPathVars,
self.globalConfig, self.servicesToLinuxUser)
- statusCheck = StatusCheck(self.serviceToPidDict, self.pidPathesVars,
+ statusCheck = StatusCheck(self.serviceToPidDict, self.pidPathVars,
self.globalConfig, self.servicesToLinuxUser)
# logged not more then once
self.assert_(TestStatusCheck.timesLogged <= 1, "test_dont_relog_serToPidDict logged more then once")
@@ -129,7 +129,7 @@ class TestStatusCheck(TestCase):
self.pidFilesDict[one_more_pid_file_name] = one_more_pid_full_path
self.is_live_values[one_more_pid_full_path] = False
- statusCheck = StatusCheck(self.serviceToPidDict, self.pidPathesVars,
+ statusCheck = StatusCheck(self.serviceToPidDict, self.pidPathVars,
self.globalConfig, self.servicesToLinuxUser)
statusCheck.pidFilesDict = self.pidFilesDict
@@ -149,7 +149,7 @@ class TestStatusCheck(TestCase):
badServiceToPidDict = self.serviceToPidDict.copy()
badServiceToPidDict['BAD_COMPONENT'] = 'prefix' + StatusCheck.USER_PATTERN
- statusCheck = StatusCheck(badServiceToPidDict, self.pidPathesVars,
+ statusCheck = StatusCheck(badServiceToPidDict, self.pidPathVars,
self.globalConfig, self.servicesToLinuxUser)
statusCheck.pidFilesDict = self.pidFilesDict
@@ -162,7 +162,7 @@ class TestStatusCheck(TestCase):
# Ensure that status checker return False for dead process
@patch.object(StatusCheck, 'getIsLive')
def test_dead(self, get_is_live_mock):
- statusCheck = StatusCheck(self.serviceToPidDict, self.pidPathesVars,
+ statusCheck = StatusCheck(self.serviceToPidDict, self.pidPathVars,
self.globalConfig, self.servicesToLinuxUser)
statusCheck.pidFilesDict = self.pidFilesDict
diff --git a/ambari-agent/src/test/python/resource_management/TestContentSources.py b/ambari-agent/src/test/python/resource_management/TestContentSources.py
index 1c5e8a80f4..d09df44081 100644
--- a/ambari-agent/src/test/python/resource_management/TestContentSources.py
+++ b/ambari-agent/src/test/python/resource_management/TestContentSources.py
@@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
limitations under the License.
'''
-
+from stacks.utils.RMFTestCase import *
from unittest import TestCase
from mock.mock import patch, MagicMock
diff --git a/ambari-agent/src/test/python/resource_management/TestDirectoryResource.py b/ambari-agent/src/test/python/resource_management/TestDirectoryResource.py
index 866486eb72..5a63891854 100644
--- a/ambari-agent/src/test/python/resource_management/TestDirectoryResource.py
+++ b/ambari-agent/src/test/python/resource_management/TestDirectoryResource.py
@@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
limitations under the License.
'''
-
+from stacks.utils.RMFTestCase import *
from unittest import TestCase
from mock.mock import patch, MagicMock
import os
diff --git a/ambari-agent/src/test/python/resource_management/TestExecuteHadoopResource.py b/ambari-agent/src/test/python/resource_management/TestExecuteHadoopResource.py
index d2ef71cec4..f5308d1ebd 100644
--- a/ambari-agent/src/test/python/resource_management/TestExecuteHadoopResource.py
+++ b/ambari-agent/src/test/python/resource_management/TestExecuteHadoopResource.py
@@ -15,6 +15,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
+
+from stacks.utils.RMFTestCase import *
import os
from unittest import TestCase
diff --git a/ambari-agent/src/test/python/resource_management/TestExecuteResource.py b/ambari-agent/src/test/python/resource_management/TestExecuteResource.py
index f974b9202c..8423eec30d 100644
--- a/ambari-agent/src/test/python/resource_management/TestExecuteResource.py
+++ b/ambari-agent/src/test/python/resource_management/TestExecuteResource.py
@@ -16,6 +16,7 @@ See the License for the specific language governing permissions and
limitations under the License.
'''
+from stacks.utils.RMFTestCase import *
from unittest import TestCase
from mock.mock import patch, MagicMock, call
diff --git a/ambari-agent/src/test/python/resource_management/TestMonitorWebserverResource.py b/ambari-agent/src/test/python/resource_management/TestMonitorWebserverResource.py
index 533ecaa3ab..d5b2c420ad 100644
--- a/ambari-agent/src/test/python/resource_management/TestMonitorWebserverResource.py
+++ b/ambari-agent/src/test/python/resource_management/TestMonitorWebserverResource.py
@@ -16,6 +16,7 @@ See the License for the specific language governing permissions and
limitations under the License.
'''
+from stacks.utils.RMFTestCase import *
from unittest import TestCase
from mock.mock import patch, MagicMock
from resource_management import *
diff --git a/ambari-agent/src/test/python/resource_management/TestSubstituteVars.py b/ambari-agent/src/test/python/resource_management/TestSubstituteVars.py
index b3623cde0d..9e42f92170 100644
--- a/ambari-agent/src/test/python/resource_management/TestSubstituteVars.py
+++ b/ambari-agent/src/test/python/resource_management/TestSubstituteVars.py
@@ -17,6 +17,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
+
+from stacks.utils.RMFTestCase import *
from unittest import TestCase, main
from resource_management.libraries.functions.substitute_vars import substitute_vars
diff --git a/ambari-agent/src/test/python/unitTests.py b/ambari-agent/src/test/python/unitTests.py
index 082e1a11a1..83b6bdfa4f 100644
--- a/ambari-agent/src/test/python/unitTests.py
+++ b/ambari-agent/src/test/python/unitTests.py
@@ -23,7 +23,8 @@ import doctest
from os.path import dirname, split, isdir
import logging.handlers
import logging
-from random import shuffle
+#TODO Add an option to randomize the tests' execution
+#from random import shuffle
LOG_FILE_NAME='tests.log'
SELECTED_PREFIX = "_"
@@ -55,7 +56,8 @@ def all_tests_suite():
for directory in os.listdir(src_dir):
if os.path.isdir(directory):
files_list += os.listdir(src_dir + os.sep + directory)
- shuffle(files_list)
+ #TODO Add an option to randomize the tests' execution
+ #shuffle(files_list)
tests_list = []
logger.info('------------------------TESTS LIST:-------------------------------------')
diff --git a/ambari-client/groovy-client/pom.xml b/ambari-client/groovy-client/pom.xml
index fbedbd139e..cc823def37 100644
--- a/ambari-client/groovy-client/pom.xml
+++ b/ambari-client/groovy-client/pom.xml
@@ -87,6 +87,7 @@
<artifactId>apache-rat-plugin</artifactId>
<configuration>
<excludes>
+ <exclude>**/*.iml</exclude>
<exclude>src/main/resources/blueprints/**</exclude>
<exclude>src/test/resources/**</exclude>
</excludes>
diff --git a/ambari-client/python-client/pom.xml b/ambari-client/python-client/pom.xml
index 74fc0f5d4d..e16bbc28bc 100644
--- a/ambari-client/python-client/pom.xml
+++ b/ambari-client/python-client/pom.xml
@@ -75,7 +75,7 @@
<executions>
<execution>
<configuration>
- <executable>${project.basedir}/../../ambari-common/src/main/unix/ambari-python-wrap</executable>
+ <executable>python</executable>
<workingDirectory>src/test/python</workingDirectory>
<arguments>
<argument>unitTests.py</argument>
@@ -93,7 +93,7 @@
</execution>
<execution>
<configuration>
- <executable>${project.basedir}/../../ambari-common/src/main/unix/ambari-python-wrap</executable>
+ <executable>python</executable>
<workingDirectory>target/python-client-${project.version}</workingDirectory>
<arguments>
<argument>${project.basedir}/src/main/python/setup.py</argument>
@@ -115,7 +115,7 @@
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>rpm-maven-plugin</artifactId>
- <version>2.0.1</version>
+ <version>2.1-alpha-2</version>
<executions>
<execution>
<phase>none</phase>
diff --git a/ambari-common/src/main/python/ambari_commons/ambari_service.py b/ambari-common/src/main/python/ambari_commons/ambari_service.py
new file mode 100644
index 0000000000..8418e74c53
--- /dev/null
+++ b/ambari-common/src/main/python/ambari_commons/ambari_service.py
@@ -0,0 +1,79 @@
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import os
+
+import win32service
+
+from ambari_commons.os_windows import WinService
+
+
+AMBARI_VERSION_VAR = "AMBARI_VERSION_VAR"
+
+ENV_PYTHON_PATH = "PYTHONPATH"
+
+
+class AmbariService(WinService):
+ _svc_name_ = "Ambari Service"
+ _svc_display_name_ = "Ambari Service"
+ _svc_description_ = "Ambari Service"
+
+ # Sets the current dir and adjusts the PYTHONPATH env variable before calling SvcDoRun()
+ def SvcRun(self):
+ self.ReportServiceStatus(win32service.SERVICE_START_PENDING)
+
+ import servicemanager
+
+ parser = self._InitOptionsParser()
+ (self.options, args) = parser.parse_args()
+
+ try:
+ is_debugging = servicemanager.Debugging()
+ except:
+ is_debugging = False
+
+ if not is_debugging:
+ # Save the current dir, or the script dir if none set (typical for services)
+ script_path = os.path.dirname(__file__.replace('/', os.sep))
+ # the script resides in the sbin/ambari_commons subdir
+ self.options.current_dir = os.path.normpath(script_path + "\\..\\..")
+ os.chdir(self.options.current_dir)
+
+ python_path = os.path.normpath(script_path + "\\..")
+
+ #update the environment vars: set PYTHONPATH = $script_dir\sbin;%PYTHONPATH%
+ if os.environ.has_key(ENV_PYTHON_PATH):
+ python_path += os.pathsep + os.environ[ENV_PYTHON_PATH]
+ os.environ[ENV_PYTHON_PATH] = python_path
+
+ self.SvcDoRun()
+ pass
+
+ # Call during initialization to implement standard service versioning
+ @classmethod
+ def _AdjustServiceVersion(cls):
+ if os.environ.has_key(AMBARI_VERSION_VAR):
+ ambariVer = os.environ[AMBARI_VERSION_VAR]
+ else:
+ ambariVer = "1.3.0-SNAPSHOT"
+ AmbariService._svc_display_name_ += "-" + ambariVer
+ AmbariService._svc_description_ += " v" + ambariVer
+
+ # Override to customize the command-line arguments
+ def _InitOptionsParser(self):
+ pass
diff --git a/ambari-common/src/main/python/ambari_commons/exceptions.py b/ambari-common/src/main/python/ambari_commons/exceptions.py
new file mode 100644
index 0000000000..c5ed85d804
--- /dev/null
+++ b/ambari-common/src/main/python/ambari_commons/exceptions.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+class FatalException(Exception):
+ def __init__(self, code, reason):
+ self.code = code
+ self.reason = reason
+
+ def __str__(self):
+ return repr("Fatal exception: %s, exit code %s" % (self.reason, self.code))
+
+
+class NonFatalException(Exception):
+ def __init__(self, reason):
+ self.reason = reason
+
+ def __str__(self):
+ return repr("NonFatal exception: %s" % self.reason)
diff --git a/ambari-common/src/main/python/ambari_commons/inet_utils.py b/ambari-common/src/main/python/ambari_commons/inet_utils.py
new file mode 100644
index 0000000000..2a54cb6867
--- /dev/null
+++ b/ambari-common/src/main/python/ambari_commons/inet_utils.py
@@ -0,0 +1,148 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import os
+import sys
+import urllib2
+
+from exceptions import *
+from logging_utils import *
+
+def download_file(link, destination, chunk_size=16 * 1024):
+ print_info_msg("Downloading {0} to {1}".format(link, destination))
+ if os.path.exists(destination):
+ print_warning_msg("File {0} already exists, assuming it was downloaded before".format(destination))
+ return
+
+ force_download_file(link, destination, chunk_size)
+
+
+def download_progress(file_name, downloaded_size, blockSize, totalSize):
+ percent = int(downloaded_size * 100 / totalSize)
+ status = "\r" + file_name
+
+ if totalSize < blockSize:
+ status += "... %d%%" % (100)
+ else:
+ status += "... %d%% (%.1f MB of %.1f MB)" % (
+ percent, downloaded_size / 1024 / 1024.0, totalSize / 1024 / 1024.0)
+ sys.stdout.write(status)
+ sys.stdout.flush()
+
+
+def find_range_components(meta):
+ file_size = 0
+ seek_pos = 0
+ hdr_range = meta.getheaders("Content-Range")
+ if len(hdr_range) > 0:
+ range_comp1 = hdr_range[0].split('/')
+ if len(range_comp1) > 1:
+ range_comp2 = range_comp1[0].split(' ') #split away the "bytes" prefix
+ if len(range_comp2) == 0:
+ raise FatalException(12, 'Malformed Content-Range response header: "{}".' % hdr_range)
+ range_comp3 = range_comp2[1].split('-')
+ seek_pos = int(range_comp3[0])
+ if range_comp1[1] != '*': #'*' == unknown length
+ file_size = int(range_comp1[1])
+
+ if file_size == 0:
+ #Try the old-fashioned way
+ hdrLen = meta.getheaders("Content-Length")
+ if len(hdrLen) == 0:
+ raise FatalException(12, "Response header doesn't contain Content-Length. Chunked Transfer-Encoding is not supported for now.")
+ file_size = int(hdrLen[0])
+
+ return (file_size, seek_pos)
+
+
+def force_download_file(link, destination, chunk_size = 16 * 1024, progress_func = download_progress):
+ request = urllib2.Request(link)
+
+ if os.path.exists(destination) and not os.path.isfile(destination):
+ #Directory specified as target? Must be a mistake. Bail out, don't assume anything.
+ err = 'Download target {} is a directory.' % destination
+ raise FatalException(1, err)
+
+ (dest_path, file_name) = os.path.split(destination)
+
+ temp_dest = destination + ".tmpdownload"
+ partial_size = 0
+
+ if os.path.exists(temp_dest):
+ #Support for resuming downloads, in case the process is killed while downloading a file
+ # set resume range
+ # See http://stackoverflow.com/questions/6963283/python-urllib2-resume-download-doesnt-work-when-network-reconnects
+ partial_size = os.stat(temp_dest).st_size
+ if partial_size > chunk_size:
+ #Re-download the last chunk, to minimize the possibilities of file corruption
+ resume_pos = partial_size - chunk_size
+ request.add_header("Range", "bytes=%s-" % resume_pos)
+ else:
+ #Make sure the full dir structure is in place, otherwise file open will fail
+ if not os.path.exists(dest_path):
+ os.makedirs(dest_path)
+
+ response = urllib2.urlopen(request)
+ (file_size, seek_pos) = find_range_components(response.info())
+
+ print_info_msg("Downloading to: %s Bytes: %s" % (destination, file_size))
+
+ if partial_size < file_size:
+ if seek_pos == 0:
+ #New file, create it
+ open_mode = 'wb'
+ else:
+ #Resuming download of an existing file
+ open_mode = 'rb+' #rb+ doesn't create the file, using wb to create it
+ f = open(temp_dest, open_mode)
+
+ try:
+ #Resume the download from where it left off
+ if seek_pos > 0:
+ f.seek(seek_pos)
+
+ file_size_dl = seek_pos
+ while True:
+ buffer = response.read(chunk_size)
+ if not buffer:
+ break
+
+ file_size_dl += len(buffer)
+ f.write(buffer)
+
+ progress_func(file_name, file_size_dl, chunk_size, file_size)
+ finally:
+ f.close()
+
+ sys.stdout.write('\n')
+ sys.stdout.flush()
+
+ print_info_msg("Finished downloading {0} to {1}".format(link, destination))
+
+ downloaded_size = os.stat(temp_dest).st_size
+ if downloaded_size != file_size:
+ err = 'Size of downloaded file {} is {} bytes, it is probably damaged or incomplete' % (destination, downloaded_size)
+ raise FatalException(1, err)
+
+ # when download is complete -> mv temp_dest destination
+ if os.path.exists(destination):
+ #Windows behavior: rename fails if the destination file exists
+ os.unlink(destination)
+ os.rename(temp_dest, destination)
diff --git a/ambari-common/src/main/python/ambari_commons/logging_utils.py b/ambari-common/src/main/python/ambari_commons/logging_utils.py
new file mode 100644
index 0000000000..9d45fdb73d
--- /dev/null
+++ b/ambari-common/src/main/python/ambari_commons/logging_utils.py
@@ -0,0 +1,49 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+VERBOSE = False
+SILENT = False
+DEBUG_MODE = False
+
+# terminal styles
+BOLD_ON = '\033[1m'
+BOLD_OFF = '\033[0m'
+
+#
+# Prints an "info" messsage.
+#
+def print_info_msg(msg):
+ if VERBOSE:
+ print("INFO: " + msg)
+
+#
+# Prints an "error" messsage.
+#
+def print_error_msg(msg):
+ print("ERROR: " + msg)
+
+#
+# Prints a "warning" messsage.
+#
+def print_warning_msg(msg, bold=False):
+ if bold:
+ print(BOLD_ON + "WARNING: " + msg + BOLD_OFF)
+ else:
+ print("WARNING: " + msg)
diff --git a/ambari-common/src/main/python/ambari_commons/os_check.py b/ambari-common/src/main/python/ambari_commons/os_check.py
index 78cf7844bd..03e94c4f91 100644
--- a/ambari-common/src/main/python/ambari_commons/os_check.py
+++ b/ambari-common/src/main/python/ambari_commons/os_check.py
@@ -36,16 +36,40 @@ def linux_distribution():
PYTHON_VER = sys.version_info[0] * 10 + sys.version_info[1]
if PYTHON_VER < 26:
- linux_distribution = platform.dist()
+ (distname, version, id) = platform.dist()
elif os.path.exists('/etc/redhat-release'):
- linux_distribution = platform.dist()
+ (distname, version, id) = platform.dist()
else:
- linux_distribution = platform.linux_distribution()
+ (distname, version, id) = platform.linux_distribution()
- return linux_distribution
+ return (platform.system(), os.name, distname, version, id)
+def windows_distribution():
+ from os_windows import get_windows_version
+
+ # Only support Windows Server 64 bit
+ (win_release, win_version, win_csd, win_ptype) = platform.win32_ver()
+
+ if win_version.startswith("6.2."):
+ # win32_ver() doesn't work correctly for Windows Server 2012 R2 and Windows 8.1
+ (win_ver_major, win_ver_minor, win_ver_build) = get_windows_version()
+ if win_ver_major == 6 and win_ver_minor == 3:
+ win_release = "2012ServerR2"
+ win_version = "%d.%d.%d" % (win_ver_major, win_ver_minor, win_ver_build)
+
+ #if win_version
+ return (platform.system(), os.name, "win" + win_release, win_version, win_ptype)
class OS_CONST_TYPE(type):
+ # os platforms
+ LINUX_OS = 'linux'
+ WINDOWS_OS = 'windows'
+
+ # os families
+ REDHAT_FAMILY = 'redhat'
+ DEBIAN_FAMILY = 'debian'
+ SUSE_FAMILY = 'suse'
+ WINSRV_FAMILY = 'winsrv'
# Declare here os type mapping
OS_FAMILY_COLLECTION = []
@@ -58,7 +82,8 @@ class OS_CONST_TYPE(type):
Initialize internal data structures from file
"""
try:
- f = open(os.path.join(RESOURCES_DIR, OSFAMILY_JSON_RESOURCE))
+ fpath = os.path.join(RESOURCES_DIR, OSFAMILY_JSON_RESOURCE)
+ f = open(fpath)
json_data = json.load(f)
f.close()
for family in json_data:
@@ -69,7 +94,7 @@ class OS_CONST_TYPE(type):
'os_list': json_data[family][JSON_OS_TYPE]
}]
except:
- raise Exception("Couldn't load '%s' file" % OSFAMILY_JSON_RESOURCE)
+ raise Exception("Couldn't load '%s' file" % fpath)
def __init__(cls, name, bases, dct):
cls.initialize_data()
@@ -89,17 +114,45 @@ class OS_CONST_TYPE(type):
return name[:-7]
raise Exception("Unknown class property '%s'" % name)
+def get_os_distribution():
+ if platform.system() == 'Windows':
+ dist = windows_distribution()
+ else:
+ if platform.system() == 'Mac':
+ raise Exception("MacOS not supported. Exiting...")
+ else:
+ # Linux
+ # Read content from /etc/*-release file
+ # Full release name
+ dist = linux_distribution()
+ return dist
class OSConst:
__metaclass__ = OS_CONST_TYPE
class OSCheck:
+ _dist = get_os_distribution()
+
+ @staticmethod
+ def get_os_os():
+ """
+ Return values:
+ windows, linux
+
+ In case cannot detect - exit.
+ """
+ # Read content from /etc/*-release file
+ # Full release name
+ os_os = OSCheck._dist[0].lower()
+
+ return os_os
@staticmethod
def get_os_type():
"""
Return values:
+ win2008server, win2012server,
redhat, fedora, centos, oraclelinux, ascendos,
amazon, xenserver, oel, ovs, cloudlinux, slc, scientific, psbm,
ubuntu, debian, sles, sled, opensuse, suse ... and others
@@ -108,8 +161,7 @@ class OSCheck:
"""
# Read content from /etc/*-release file
# Full release name
- dist = linux_distribution()
- operatingSystem = dist[0].lower()
+ operatingSystem = OSCheck._dist[2].lower()
# special cases
if os.path.exists('/etc/oracle-release'):
@@ -147,10 +199,7 @@ class OSCheck:
In case cannot detect raises exception.
"""
- # Read content from /etc/*-release file
- # Full release name
- dist = linux_distribution()
- dist = dist[1]
+ dist = OSCheck._dist[3]
if dist:
return dist
@@ -173,8 +222,7 @@ class OSCheck:
In case cannot detect raises exception.
"""
- dist = linux_distribution()
- dist = dist[2].lower()
+ dist = OSCheck._dist[4].lower()
if dist:
return dist
@@ -226,6 +274,48 @@ class OSCheck:
return False
@staticmethod
+ def is_windows_family():
+ """
+ Return true if it is so or false if not
+
+ This is safe check for windows family, doesn't generate exception
+ """
+ try:
+ if OSCheck.get_os_family() == OSConst.WINSRV_FAMILY:
+ return True
+ except Exception:
+ pass
+ return False
+
+ @staticmethod
+ def is_linux_os():
+ """
+ Return true if it is so or false if not
+
+ This is safe check for linux os, doesn't generate exception
+ """
+ try:
+ if OSCheck.get_os_os() == OSConst.LINUX_OS:
+ return True
+ except Exception:
+ pass
+ return False
+
+ @staticmethod
+ def is_windows_os():
+ """
+ Return true if it is so or false if not
+
+ This is safe check for windows os, doesn't generate exception
+ """
+ try:
+ if OSCheck.get_os_os() == OSConst.WINDOWS_OS:
+ return True
+ except Exception:
+ pass
+ return False
+
+ @staticmethod
def is_redhat7():
"""
Return true if it is so or false if not
@@ -238,4 +328,10 @@ class OSCheck:
return True
except Exception:
pass
- return False \ No newline at end of file
+ return False
+
+# OS info
+OS_VERSION = OSCheck().get_os_major_version()
+OS_TYPE = OSCheck.get_os_type()
+OS_FAMILY = OSCheck.get_os_family()
+OS_OS = OSCheck.get_os_os()
diff --git a/ambari-common/src/main/python/ambari_commons/os_linux.py b/ambari-common/src/main/python/ambari_commons/os_linux.py
new file mode 100644
index 0000000000..38f3fb9999
--- /dev/null
+++ b/ambari-common/src/main/python/ambari_commons/os_linux.py
@@ -0,0 +1,81 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+import getpass
+
+import os
+import pwd
+import shlex
+import subprocess
+
+from logging_utils import *
+
+
+NR_CHMOD_CMD = 'chmod {0} {1} {2}'
+NR_CHOWN_CMD = 'chown {0} {1} {2}'
+
+ULIMIT_CMD = "ulimit -n"
+
+
+def run_os_command(cmd):
+ print_info_msg('about to run command: ' + str(cmd))
+ if type(cmd) == str:
+ cmd = shlex.split(cmd)
+ process = subprocess.Popen(cmd,
+ stdout=subprocess.PIPE,
+ stdin=subprocess.PIPE,
+ stderr=subprocess.PIPE
+ )
+ (stdoutdata, stderrdata) = process.communicate()
+ return process.returncode, stdoutdata, stderrdata
+
+def os_change_owner(filePath, user):
+ uid = pwd.getpwnam(user).pw_uid
+ gid = pwd.getpwnam(user).pw_gid
+ os.chown(filePath, uid, gid)
+
+def os_is_root():
+ '''
+ Checks effective UUID
+ Returns True if a program is running under root-level privileges.
+ '''
+ return os.geteuid() == 0
+
+def os_set_file_permissions(file, mod, recursive, user):
+ WARN_MSG = "Command {0} returned exit code {1} with message: {2}"
+ if recursive:
+ params = " -R "
+ else:
+ params = ""
+ command = NR_CHMOD_CMD.format(params, mod, file)
+ retcode, out, err = run_os_command(command)
+ if retcode != 0:
+ print_warning_msg(WARN_MSG.format(command, file, err))
+ command = NR_CHOWN_CMD.format(params, user, file)
+ retcode, out, err = run_os_command(command)
+ if retcode != 0:
+ print_warning_msg(WARN_MSG.format(command, file, err))
+
+def os_set_open_files_limit(maxOpenFiles):
+ command = "%s %s" % (ULIMIT_CMD, str(maxOpenFiles))
+ run_os_command(command)
+
+
+def os_getpass(prompt):
+ return getpass.unix_getpass(prompt)
diff --git a/ambari-common/src/main/python/ambari_commons/os_utils.py b/ambari-common/src/main/python/ambari_commons/os_utils.py
new file mode 100644
index 0000000000..9ea423f731
--- /dev/null
+++ b/ambari-common/src/main/python/ambari_commons/os_utils.py
@@ -0,0 +1,102 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import shutil
+import string
+
+from os_check import *
+
+if OSCheck.is_windows_os():
+ from os_windows import *
+else:
+ # MacOS not supported
+ from os_linux import *
+
+from logging_utils import *
+from exceptions import FatalException
+
+
+def is_valid_filepath(filepath):
+ if not filepath or not os.path.exists(filepath) or os.path.isdir(filepath):
+ print 'Invalid path, please provide the absolute file path.'
+ return False
+ else:
+ return True
+
+def quote_path(filepath):
+ if(filepath.find(' ') != -1):
+ filepath_ret = '"' + filepath + '"'
+ else:
+ filepath_ret = filepath
+ return filepath_ret
+
+def search_file(filename, search_path, pathsep=os.pathsep):
+ """ Given a search path, find file with requested name """
+ for path in string.split(search_path, pathsep):
+ candidate = os.path.join(path, filename)
+ if os.path.exists(candidate):
+ return os.path.abspath(candidate)
+ return None
+
+def copy_file(src, dest_file):
+ try:
+ shutil.copyfile(src, dest_file)
+ except Exception, e:
+ err = "Can not copy file {0} to {1} due to: {2} . Please check file " \
+ "permissions and free disk space.".format(src, dest_file, e.message)
+ raise FatalException(1, err)
+
+def copy_files(files, dest_dir):
+ if os.path.isdir(dest_dir):
+ for filepath in files:
+ shutil.copy(filepath, dest_dir)
+ return 0
+ else:
+ return -1
+
+def remove_file(filePath):
+ if os.path.exists(filePath):
+ try:
+ os.remove(filePath)
+ except Exception, e:
+ print_warning_msg('Unable to remove file: ' + str(e))
+ return 1
+ pass
+ return 0
+
+def set_file_permissions(file, mod, user, recursive):
+ if os.path.exists(file):
+ os_set_file_permissions(file, mod, recursive, user)
+ else:
+ print_info_msg("File %s does not exist" % file)
+
+def is_root():
+ return os_is_root()
+
+# Proxy to the os implementation
+def change_owner(filePath, user):
+ os_change_owner(filePath, user)
+
+# Proxy to the os implementation
+def set_open_files_limit(maxOpenFiles):
+ os_set_open_files_limit(maxOpenFiles)
+
+def get_password(prompt):
+ return os_getpass(prompt) \ No newline at end of file
diff --git a/ambari-common/src/main/python/ambari_commons/os_windows.py b/ambari-common/src/main/python/ambari_commons/os_windows.py
new file mode 100644
index 0000000000..2fb98e4947
--- /dev/null
+++ b/ambari-common/src/main/python/ambari_commons/os_windows.py
@@ -0,0 +1,563 @@
+# !/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+import os
+import getpass
+import shlex
+import subprocess
+import sys
+import time
+import win32api
+import win32event
+import win32service
+import win32con
+import win32serviceutil
+import wmi
+import random
+import string
+
+import ctypes
+
+from win32security import *
+from win32api import *
+from winerror import ERROR_INVALID_HANDLE
+from win32process import GetExitCodeProcess, STARTF_USESTDHANDLES, STARTUPINFO, CreateProcessAsUser
+from win32event import WaitForSingleObject, INFINITE
+import msvcrt
+import tempfile
+from win32event import *
+from win32api import CloseHandle
+
+from ambari_commons.exceptions import *
+from logging_utils import *
+
+from win32security import LsaOpenPolicy, POLICY_CREATE_ACCOUNT, POLICY_LOOKUP_NAMES, LookupAccountName, \
+ LsaAddAccountRights, LsaRemoveAccountRights, SE_SERVICE_LOGON_NAME
+from win32net import NetUserAdd
+from win32netcon import USER_PRIV_USER, UF_NORMAL_ACCOUNT, UF_SCRIPT
+import pywintypes
+
+SERVICE_STATUS_UNKNOWN = "unknown"
+SERVICE_STATUS_STARTING = "starting"
+SERVICE_STATUS_RUNNING = "running"
+SERVICE_STATUS_STOPPING = "stopping"
+SERVICE_STATUS_STOPPED = "stopped"
+SERVICE_STATUS_NOT_INSTALLED = "not installed"
+
+WHOAMI_GROUPS = "whoami /groups"
+ADMIN_ACCOUNT = "BUILTIN\\Administrators"
+
+class OSVERSIONINFOEXW(ctypes.Structure):
+ _fields_ = [('dwOSVersionInfoSize', ctypes.c_ulong),
+ ('dwMajorVersion', ctypes.c_ulong),
+ ('dwMinorVersion', ctypes.c_ulong),
+ ('dwBuildNumber', ctypes.c_ulong),
+ ('dwPlatformId', ctypes.c_ulong),
+ ('szCSDVersion', ctypes.c_wchar*128),
+ ('wServicePackMajor', ctypes.c_ushort),
+ ('wServicePackMinor', ctypes.c_ushort),
+ ('wSuiteMask', ctypes.c_ushort),
+ ('wProductType', ctypes.c_byte),
+ ('wReserved', ctypes.c_byte)]
+
+def get_windows_version():
+ """
+ Get's the OS major and minor versions. Returns a tuple of
+ (OS_MAJOR, OS_MINOR).
+ """
+ os_version = OSVERSIONINFOEXW()
+ os_version.dwOSVersionInfoSize = ctypes.sizeof(os_version)
+ retcode = ctypes.windll.Ntdll.RtlGetVersion(ctypes.byref(os_version))
+ if retcode != 0:
+ raise Exception("Failed to get OS version")
+
+ return os_version.dwMajorVersion, os_version.dwMinorVersion, os_version.dwBuildNumber
+
+CHECK_FIREWALL_SCRIPT = """[string]$CName = $env:computername
+$reg = [Microsoft.Win32.RegistryKey]::OpenRemoteBaseKey("LocalMachine",$computer)
+$domain = $reg.OpenSubKey("System\CurrentControlSet\Services\SharedAccess\Parameters\FirewallPolicy\DomainProfile").GetValue("EnableFirewall")
+$standart = $reg.OpenSubKey("System\CurrentControlSet\Services\SharedAccess\Parameters\FirewallPolicy\StandardProfile").GetValue("EnableFirewall")
+$public = $reg.OpenSubKey("System\CurrentControlSet\Services\SharedAccess\Parameters\FirewallPolicy\PublicProfile").GetValue("EnableFirewall")
+Write-Host $domain
+Write-Host $standart
+Write-Host $public
+"""
+
+def _create_tmp_files():
+ out_file = tempfile.TemporaryFile(mode="r+b")
+ err_file = tempfile.TemporaryFile(mode="r+b")
+ return (msvcrt.get_osfhandle(out_file.fileno()),
+ msvcrt.get_osfhandle(err_file.fileno()),
+ out_file,
+ err_file)
+
+
+def _get_files_output(out, err):
+ out.seek(0)
+ err.seek(0)
+ return out.read().strip(), err.read().strip()
+
+
+def _safe_duplicate_handle(h):
+ try:
+ h = DuplicateHandle(GetCurrentProcess(),
+ h,
+ GetCurrentProcess(),
+ 0,
+ True,
+ win32con.DUPLICATE_SAME_ACCESS)
+ return True, h
+ except Exception as exc:
+ if exc.winerror == ERROR_INVALID_HANDLE:
+ return True, None
+ return False, None
+
+
+def run_os_command_impersonated(cmd, user, password, domain='.'):
+ si = STARTUPINFO()
+
+ out_handle, err_handle, out_file, err_file = _create_tmp_files()
+
+ ok, si.hStdInput = _safe_duplicate_handle(GetStdHandle(STD_INPUT_HANDLE))
+
+ if not ok:
+ raise Exception("Unable to create StdInput for child process")
+ ok, si.hStdOutput = _safe_duplicate_handle(out_handle)
+ if not ok:
+ raise Exception("Unable to create StdOut for child process")
+ ok, si.hStdError = _safe_duplicate_handle(err_handle)
+ if not ok:
+ raise Exception("Unable to create StdErr for child process")
+
+ si.dwFlags = STARTF_USESTDHANDLES
+ si.lpDesktop = ""
+
+ user_token = LogonUser(user, domain, password, win32con.LOGON32_LOGON_SERVICE, win32con.LOGON32_PROVIDER_DEFAULT)
+ primary_token = DuplicateTokenEx(user_token, SecurityImpersonation, 0, TokenPrimary)
+ info = CreateProcessAsUser(primary_token, None, cmd, None, None, 1, 0, None, None, si)
+
+ hProcess, hThread, dwProcessId, dwThreadId = info
+ hThread.Close()
+
+ try:
+ WaitForSingleObject(hProcess, INFINITE)
+ except KeyboardInterrupt:
+ pass
+
+ out, err = _get_files_output(out_file, err_file)
+ exitcode = GetExitCodeProcess(hProcess)
+
+ return exitcode, out, err
+
+def run_os_command(cmd, env=None):
+ if isinstance(cmd,basestring):
+ cmd = cmd.replace("\\", "\\\\")
+ cmd = shlex.split(cmd)
+ process = subprocess.Popen(cmd,
+ stdout=subprocess.PIPE,
+ stdin=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env=env
+ )
+ (stdoutdata, stderrdata) = process.communicate()
+ return process.returncode, stdoutdata, stderrdata
+
+# execute powershell script passed in script_content. Script will be in temporary file to avoid different escape
+# and formatting problems.
+def run_powershell_script(script_content):
+ tmp_dir = tempfile.gettempdir()
+ random_filename = ''.join(random.choice(string.lowercase) for i in range(10))
+ script_file = open(os.path.join(tmp_dir,random_filename+".ps1"),"w")
+ script_file.write(script_content)
+ script_file.close()
+ result = run_os_command("powershell -ExecutionPolicy unrestricted -File {0}".format(script_file.name))
+ os.remove(script_file.name)
+ return result
+
+def os_change_owner(filePath, user):
+ cmd = ['icacls', filePath, '/setowner', user]
+ retcode, outdata, errdata = run_os_command(cmd)
+ return retcode
+
+def os_is_root():
+ '''
+ Checks whether the current user is a member of the Administrators group
+ Returns True if yes, otherwise False
+ '''
+ retcode, out, err = run_os_command(WHOAMI_GROUPS)
+ if retcode != 0:
+ err_msg = "Unable to check the current user's group memberships. Command {0} returned exit code {1} with message: {2}".format(WHOAMI_GROUPS, retcode, err)
+ print_warning_msg(err_msg)
+ raise FatalException(retcode, err_msg)
+
+ #Check for Administrators group membership
+ if -1 != out.find('\n' + ADMIN_ACCOUNT):
+ return True
+
+ return False
+
+def os_set_file_permissions(file, mod, recursive, user):
+ retcode = 0
+
+ #WARN_MSG = "Command {0} returned exit code {1} with message: {2}"
+ #if recursive:
+ # params = " -R "
+ #else:
+ # params = ""
+ #command = NR_CHMOD_CMD.format(params, mod, file)
+ #retcode, out, err = run_os_command(command)
+ #if retcode != 0:
+ # print_warning_msg(WARN_MSG.format(command, file, err))
+ #command = NR_CHOWN_CMD.format(params, user, file)
+ #retcode, out, err = run_os_command(command)
+ #if retcode != 0:
+ # print_warning_msg(WARN_MSG.format(command, file, err))
+
+ # rights = mod
+ # acls_remove_cmd = "icacls {0} /remove {1}".format(file, user)
+ # retcode, out, err = run_os_command(acls_remove_cmd)
+ # if retcode == 0:
+ # acls_modify_cmd = "icacls {0} /grant {1}:{2}".format(file, user, rights)
+ # retcode, out, err = run_os_command(acls_modify_cmd)
+ return retcode
+
+
+def os_set_open_files_limit(maxOpenFiles):
+ # No open files limit in Windows. Not messing around with the System Resource Manager, at least for now.
+ pass
+
+
+def os_getpass(prompt, stream=None):
+ """Prompt for password with echo off, using Windows getch()."""
+ if sys.stdin is not sys.__stdin__:
+ return getpass.fallback_getpass(prompt, stream)
+
+ import msvcrt
+
+ for c in prompt:
+ msvcrt.putch(c)
+
+ pw = ""
+ while True:
+ c = msvcrt.getch()
+ if c == '\r' or c == '\n':
+ break
+ if c == '\003':
+ raise KeyboardInterrupt
+ if c == '\b':
+ if pw == '':
+ pass
+ else:
+ pw = pw[:-1]
+ msvcrt.putch('\b')
+ msvcrt.putch(" ")
+ msvcrt.putch('\b')
+ else:
+ pw = pw + c
+ msvcrt.putch("*")
+
+ msvcrt.putch('\r')
+ msvcrt.putch('\n')
+ return pw
+
+#[fbarca] Not used for now, keep it around just in case
+def wait_for_pid_wmi(processName, parentPid, pattern, timeout):
+ """
+ Check pid for existence during timeout
+ """
+ tstart = time.time()
+ pid_live = 0
+
+ c = wmi.WMI(find_classes=False)
+ qry = "select * from Win32_Process where Name=\"%s\" and ParentProcessId=%d" % (processName, parentPid)
+
+ while int(time.time() - tstart) <= timeout:
+ for proc in c.query(qry):
+ cmdLine = proc.CommandLine
+ if cmdLine is not None and pattern in cmdLine:
+ return pid_live
+ time.sleep(1)
+ return 0
+
+
+#need this for redirecting output form python process to file
+class SyncStreamWriter(object):
+ def __init__(self, stream, hMutexWrite):
+ self.stream = stream
+ self.hMutexWrite = hMutexWrite
+
+ def write(self, data):
+ #Ensure that the output is thread-safe when writing from 2 separate streams into the same file
+ # (typical when redirecting both stdout and stderr to the same file).
+ win32event.WaitForSingleObject(self.hMutexWrite, win32event.INFINITE)
+ try:
+ self.stream.write(data)
+ self.stream.flush()
+ finally:
+ win32event.ReleaseMutex(self.hMutexWrite)
+
+ def __getattr__(self, attr):
+ return getattr(self.stream, attr)
+
+
+class SvcStatusCallback(object):
+ def __init__(self, svc):
+ self.svc = svc
+
+ def reportStartPending(self):
+ self.svc.ReportServiceStatus(win32service.SERVICE_START_PENDING)
+
+ def reportStarted(self):
+ self.svc.ReportServiceStatus(win32service.SERVICE_RUNNING)
+
+ def reportStopPending(self):
+ self.svc.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
+
+ def reportStopped(self):
+ self.svc.ReportServiceStatus(win32service.SERVICE_STOPPED)
+
+
+class WinServiceController:
+ @staticmethod
+ def Start(serviceName, waitSecs=30):
+ err = 0
+ try:
+ win32serviceutil.StartService(serviceName)
+ if waitSecs:
+ win32serviceutil.WaitForServiceStatus(serviceName, win32service.SERVICE_RUNNING, waitSecs)
+ except win32service.error, exc:
+ print "Error starting service: %s" % exc.strerror
+ err = exc.winerror
+ return err
+
+ @staticmethod
+ def Stop(serviceName, waitSecs=30):
+ err = 0
+ try:
+ if waitSecs:
+ win32serviceutil.StopServiceWithDeps(serviceName, waitSecs=waitSecs)
+ else:
+ win32serviceutil.StopService(serviceName)
+ if waitSecs:
+ win32serviceutil.WaitForServiceStatus(serviceName, win32service.SERVICE_STOPPED, waitSecs)
+ except win32service.error, exc:
+ print "Error stopping service: %s (%d)" % (exc.strerror, exc.winerror)
+ err = exc.winerror
+ return err
+
+ @staticmethod
+ def QueryStatus(serviceName):
+ statusString = SERVICE_STATUS_UNKNOWN
+
+ try:
+ status = win32serviceutil.QueryServiceStatus(serviceName)[1]
+
+ if status == win32service.SERVICE_STOPPED:
+ statusString = SERVICE_STATUS_STOPPED
+ elif status == win32service.SERVICE_START_PENDING:
+ statusString = SERVICE_STATUS_STARTING
+ elif status == win32service.SERVICE_RUNNING:
+ statusString = SERVICE_STATUS_RUNNING
+ elif status == win32service.SERVICE_STOP_PENDING:
+ statusString = SERVICE_STATUS_STOPPING
+ except win32api.error:
+ statusString = SERVICE_STATUS_NOT_INSTALLED
+ pass
+
+ return statusString
+
+ @staticmethod
+ def EnsureServiceIsStarted(serviceName, waitSecs=30):
+ err = 0
+ try:
+ status = win32serviceutil.QueryServiceStatus(serviceName)[1]
+ if win32service.SERVICE_RUNNING != status:
+ if win32service.SERVICE_START_PENDING != status:
+ win32serviceutil.StartService(serviceName)
+ if waitSecs:
+ win32serviceutil.WaitForServiceStatus(serviceName, win32service.SERVICE_RUNNING, waitSecs)
+ except win32service.error, exc:
+ err = exc.winerror
+ return err
+
+
+class WinService(win32serviceutil.ServiceFramework):
+ # _svc_name_ = The service name
+ # _svc_display_name_ = The service display name
+ # _svc_description_ = The service description
+
+ _heventSvcStop = win32event.CreateEvent(None, 0, 0, None)
+ _hmtxOut = win32event.CreateMutex(None, False, None) #[fbarca] Python doesn't support critical sections
+
+ def __init__(self, *args):
+ win32serviceutil.ServiceFramework.__init__(self, *args)
+
+ def SvcDoRun(self):
+ try:
+ self.ReportServiceStatus(win32service.SERVICE_RUNNING)
+ self.ServiceMain()
+ except Exception, x:
+ #TODO: Log exception
+ self.SvcStop()
+
+ def SvcStop(self):
+ self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
+ win32event.SetEvent(self._heventSvcStop)
+
+ # Service code entry point. Override it to implement the intended functionality.
+ def ServiceMain(self):
+ #Default implementation, does nothing.
+ win32event.WaitForSingleObject(self._heventSvcStop, win32event.INFINITE)
+ pass
+
+ def DefCtrlCHandler(self):
+ print_info_msg("Ctrl+C handler invoked. Stopping.")
+ win32event.SetEvent(self._heventSvcStop)
+ pass
+
+ #username domain\\username : The Username the service is to run under
+ #password password : The password for the username
+ #startup [manual|auto|disabled|delayed] : How the service starts, default = auto
+ #interactive : Allow the service to interact with the desktop.
+ #perfmonini file: .ini file to use for registering performance monitor data
+ #perfmondll file: .dll file to use when querying the service for performance data, default = perfmondata.dll
+ @classmethod
+ def Install(cls, startupMode = "auto", username = None, password = None, interactive = False,
+ perfMonIni = None, perfMonDll = None):
+ installArgs = [sys.argv[0], "--startup=" + startupMode]
+ if username is not None and username:
+ installArgs.append("--username=" + username)
+ if password is not None and password:
+ installArgs.append("--password=" + password)
+ if interactive:
+ installArgs.append("--interactive")
+ if perfMonIni is not None and perfMonIni:
+ installArgs.append("--perfmonini=" + perfMonIni)
+ if perfMonDll is not None and perfMonDll:
+ installArgs.append("--perfmondll=" + perfMonDll)
+ installArgs.append("install")
+ win32serviceutil.HandleCommandLine(cls, None, installArgs)
+
+ @classmethod
+ def Start(cls, waitSecs = 30):
+ return WinServiceController.Start(cls._svc_name_, waitSecs)
+
+ @classmethod
+ def Stop(cls, waitSecs = 30):
+ return WinServiceController.Stop(cls._svc_name_, waitSecs)
+
+ @classmethod
+ def QueryStatus(cls):
+ return WinServiceController.QueryStatus(cls._svc_name_)
+
+ @classmethod
+ def set_ctrl_c_handler(cls, ctrlHandler):
+ win32api.SetConsoleCtrlHandler(ctrlHandler, True)
+ pass
+
+ def _RedirectOutputStreamsToFile(self, outFilePath):
+ outFileDir = os.path.dirname(outFilePath)
+ if not os.path.exists(outFileDir):
+ os.makedirs(outFileDir)
+
+ out_writer = SyncStreamWriter(file(outFilePath, "w"), self._hmtxOut)
+ sys.stderr = out_writer
+ sys.stdout = out_writer
+ pass
+
+ def CheckForStop(self):
+ #Check for stop event to be signaled
+ return win32event.WAIT_OBJECT_0 == win32event.WaitForSingleObject(self._heventSvcStop, 1)
+
+ def _StopOrWaitForChildProcessToFinish(self, childProcess):
+ #Wait for the child process to finish or for the stop event to be signaled
+ if(win32event.WAIT_OBJECT_0 == win32event.WaitForMultipleObjects([self._heventSvcStop, childProcess._handle], False, win32event.INFINITE)):
+ # The OS only detaches the child process when the master process exits.
+ # We must kill it manually.
+ try:
+ #Sending signal.CTRL_BREAK_EVENT doesn't work. It only detaches the child process from the master.
+ # Must brutally terminate the child process. Sorry Java.
+ childProcess.terminate()
+ except OSError, e:
+ print_info_msg("Unable to stop Ambari Server - " + str(e))
+ return False
+
+ return True
+
+class SystemWideLock(object):
+
+ def __init__(self, name):
+ self._mutex = CreateMutex(None, 0, name)
+
+ def lock(self, timeout=0):
+ result = WaitForSingleObject(self._mutex, timeout)
+ if result in [WAIT_TIMEOUT, WAIT_ABANDONED, WAIT_FAILED]:
+ return False
+ elif result == WAIT_OBJECT_0:
+ return True
+
+ def unlock(self):
+ try:
+ ReleaseMutex(self._mutex)
+ return True
+ except:
+ return False
+
+ def __del__(self):
+ CloseHandle(self._mutex)
+
+class UserHelper(object):
+ ACTION_OK = 0
+ USER_EXISTS = 1
+ ACTION_FAILED = -1
+
+ def __init__(self):
+ self._policy = LsaOpenPolicy(None, POLICY_CREATE_ACCOUNT | POLICY_LOOKUP_NAMES)
+
+ def create_user(self, name, password, comment="Ambari user"):
+ user_info = {}
+ user_info['name'] = name
+ user_info['password'] = password
+ user_info['priv'] = USER_PRIV_USER
+ user_info['comment'] = comment
+ user_info['flags'] = UF_NORMAL_ACCOUNT | UF_SCRIPT
+ try:
+ NetUserAdd(None, 1, user_info)
+ except pywintypes.error as e:
+ if e.winerror == 2224:
+ return UserHelper.USER_EXISTS, e.strerror
+ else:
+ return UserHelper.ACTION_FAILED, e.strerror
+ return UserHelper.ACTION_OK, "User created."
+
+ def add_user_privilege(self, name, privilege):
+ try:
+ acc_sid = LookupAccountName(None, name)[0]
+ LsaAddAccountRights(self._policy, acc_sid, (privilege,))
+ except pywintypes.error as e:
+ return UserHelper.ACTION_FAILED, e.strerror
+ return UserHelper.ACTION_OK, "Privilege added."
+
+ def remove_user_privilege(self, name, privilege):
+ try:
+ acc_sid = LookupAccountName(None, name)[0]
+ LsaRemoveAccountRights(self._policy, acc_sid, 0, (privilege,))
+ except pywintypes.error as e:
+ return UserHelper.ACTION_FAILED, e.strerror
+ return UserHelper.ACTION_OK, "Privilege removed."
diff --git a/ambari-common/src/main/python/ambari_commons/resources/os_family.json b/ambari-common/src/main/python/ambari_commons/resources/os_family.json
index 2f2abcca6e..ac8b759d7a 100644
--- a/ambari-common/src/main/python/ambari_commons/resources/os_family.json
+++ b/ambari-common/src/main/python/ambari_commons/resources/os_family.json
@@ -41,5 +41,16 @@
"versions": [
11
]
+ },
+ "winsrv": {
+ "distro": [
+ "win2008server",
+ "win2008serverr2",
+ "win2012server",
+ "win2012serverr2"
+ ],
+ "versions": [
+ 6
+ ]
}
}
diff --git a/ambari-common/src/main/python/ambari_commons/str_utils.py b/ambari-common/src/main/python/ambari_commons/str_utils.py
new file mode 100644
index 0000000000..9a9e954492
--- /dev/null
+++ b/ambari-common/src/main/python/ambari_commons/str_utils.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+def compress_backslashes(s):
+ s1 = s
+ while (-1 != s1.find('\\\\')):
+ s1 = s1.replace('\\\\', '\\')
+ return s1
+
+def ensure_double_backslashes(s):
+ s1 = compress_backslashes(s)
+ s2 = s1.replace('\\', '\\\\')
+ return s2
diff --git a/ambari-common/src/main/python/resource_management/core/logger.py b/ambari-common/src/main/python/resource_management/core/logger.py
index 5e45e94800..3550247fb6 100644
--- a/ambari-common/src/main/python/resource_management/core/logger.py
+++ b/ambari-common/src/main/python/resource_management/core/logger.py
@@ -26,7 +26,7 @@ from resource_management.libraries.script.config_dictionary import UnknownConfig
class Logger:
logger = logging.getLogger("resource_management")
-
+
# unprotected_strings : protected_strings map
sensitive_strings = {}
@@ -41,8 +41,8 @@ class Logger:
@staticmethod
def info(text):
Logger.logger.info(Logger.get_protected_text(text))
-
- @staticmethod
+
+ @staticmethod
def debug(text):
Logger.logger.debug(Logger.get_protected_text(text))
@@ -57,11 +57,11 @@ class Logger:
@staticmethod
def info_resource(resource):
Logger.info(Logger.get_protected_text(Logger._get_resource_repr(resource)))
-
- @staticmethod
+
+ @staticmethod
def debug_resource(resource):
Logger.debug(Logger.get_protected_text(Logger._get_resource_repr(resource)))
-
+
@staticmethod
def get_protected_text(text):
"""
@@ -69,17 +69,17 @@ class Logger:
"""
for unprotected_string, protected_string in Logger.sensitive_strings.iteritems():
text = text.replace(unprotected_string, protected_string)
-
+
return text
-
- @staticmethod
+
+ @staticmethod
def _get_resource_repr(resource):
MESSAGE_MAX_LEN = 256
logger_level = logging._levelNames[Logger.logger.level]
-
+
arguments_str = ""
for x,y in resource.arguments.iteritems():
-
+
# strip unicode 'u' sign
if isinstance(y, unicode):
# don't show long messages
@@ -87,7 +87,7 @@ class Logger:
y = '...'
val = repr(y).lstrip('u')
# don't show dicts of configurations
- # usually too long
+ # usually too long
elif logger_level != 'DEBUG' and isinstance(y, dict):
val = "..."
# for configs which didn't come
@@ -95,14 +95,17 @@ class Logger:
val = "[EMPTY]"
# correctly output 'mode' (as they are octal values like 0755)
elif y and x == 'mode':
- val = oct(y)
+ try:
+ val = oct(y)
+ except:
+ val = repr(y)
else:
val = repr(y)
-
-
+
+
arguments_str += "'{0}': {1}, ".format(x, val)
-
- if arguments_str:
+
+ if arguments_str:
arguments_str = arguments_str[:-2]
-
- return unicode("{0} {{{1}}}").format(resource, arguments_str) \ No newline at end of file
+
+ return unicode("{0} {{{1}}}").format(resource, arguments_str)
diff --git a/ambari-common/src/main/python/resource_management/core/providers/__init__.py b/ambari-common/src/main/python/resource_management/core/providers/__init__.py
index 67ca483d00..7f9733681a 100644
--- a/ambari-common/src/main/python/resource_management/core/providers/__init__.py
+++ b/ambari-common/src/main/python/resource_management/core/providers/__init__.py
@@ -50,6 +50,12 @@ PROVIDERS = dict(
ubuntu=dict(
Package="resource_management.core.providers.package.apt.AptProvider",
),
+ winsrv=dict(
+ Service="resource_management.core.providers.windows.service.ServiceProvider",
+ Execute="resource_management.core.providers.windows.system.ExecuteProvider",
+ File="resource_management.core.providers.windows.system.FileProvider",
+ Directory="resource_management.core.providers.windows.system.DirectoryProvider"
+ ),
default=dict(
File="resource_management.core.providers.system.FileProvider",
Directory="resource_management.core.providers.system.DirectoryProvider",
diff --git a/ambari-common/src/main/python/resource_management/core/providers/windows/__init__.py b/ambari-common/src/main/python/resource_management/core/providers/windows/__init__.py
new file mode 100644
index 0000000000..b0b988b18b
--- /dev/null
+++ b/ambari-common/src/main/python/resource_management/core/providers/windows/__init__.py
@@ -0,0 +1,20 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+""" \ No newline at end of file
diff --git a/ambari-common/src/main/python/resource_management/core/providers/windows/service.py b/ambari-common/src/main/python/resource_management/core/providers/windows/service.py
new file mode 100644
index 0000000000..cdf31379a9
--- /dev/null
+++ b/ambari-common/src/main/python/resource_management/core/providers/windows/service.py
@@ -0,0 +1,65 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+from resource_management.core.providers import Provider
+from resource_management.core.base import Fail
+import win32service
+import time
+
+
+_schSCManager = win32service.OpenSCManager(None, None, win32service.SC_MANAGER_ALL_ACCESS)
+
+
+class ServiceProvider(Provider):
+ def action_start(self):
+ self._service_handle = self._service_handle if hasattr(self, "_service_handle") else \
+ win32service.OpenService(_schSCManager, self.resource.service_name, win32service.SERVICE_ALL_ACCESS)
+ if not self.status():
+ win32service.StartService(self._service_handle, None)
+ self.wait_status(win32service.SERVICE_RUNNING)
+
+ def action_stop(self):
+ self._service_handle = self._service_handle if hasattr(self, "_service_handle") else \
+ win32service.OpenService(_schSCManager, self.resource.service_name, win32service.SERVICE_ALL_ACCESS)
+ if self.status():
+ win32service.ControlService(self._service_handle, win32service.SERVICE_CONTROL_STOP)
+ self.wait_status(win32service.SERVICE_STOPPED)
+
+ def action_restart(self):
+ self._service_handle = win32service.OpenService(_schSCManager, self.resource.service_name,
+ win32service.SERVICE_ALL_ACCESS)
+ self.action_stop()
+ self.action_start()
+
+ def action_reload(self):
+ raise Fail("Reload for Service resource not supported on windows")
+
+ def status(self):
+ if win32service.QueryServiceStatusEx(self._service_handle)["CurrentState"] == win32service.SERVICE_RUNNING:
+ return True
+ return False
+
+ def get_current_status(self):
+ return win32service.QueryServiceStatusEx(self._service_handle)["CurrentState"]
+
+ def wait_status(self, status, timeout=5):
+ begin = time.time()
+ while self.get_current_status() != status and (timeout == 0 or time.time() - begin < timeout):
+ time.sleep(1) \ No newline at end of file
diff --git a/ambari-common/src/main/python/resource_management/core/providers/windows/system.py b/ambari-common/src/main/python/resource_management/core/providers/windows/system.py
new file mode 100644
index 0000000000..e7a98fc432
--- /dev/null
+++ b/ambari-common/src/main/python/resource_management/core/providers/windows/system.py
@@ -0,0 +1,382 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management.core.providers import Provider
+from resource_management.core.logger import Logger
+from resource_management.core.base import Fail
+from resource_management.core import ExecuteTimeoutException
+import time
+import os
+import subprocess
+import shutil
+from resource_management.libraries.script import Script
+import win32con
+from win32security import *
+from win32api import *
+from winerror import ERROR_INVALID_HANDLE
+from win32profile import CreateEnvironmentBlock
+from win32process import GetExitCodeProcess, STARTF_USESTDHANDLES, STARTUPINFO, CreateProcessAsUser
+from win32event import WaitForSingleObject, INFINITE
+from win32security import *
+import msvcrt
+import tempfile
+
+def _create_tmp_files(env=None):
+ dirname = None
+ if env is None:
+ env = os.environ
+
+ for env_var_name in 'TMPDIR', 'TEMP', 'TMP':
+ if env.has_key(env_var_name):
+ dirname = env[env_var_name]
+ if dirname and os.path.exists(dirname):
+ break
+
+ if dirname is None:
+ for dirname2 in r'c:\temp', r'c:\tmp', r'\temp', r'\tmp':
+ try:
+ os.makedirs(dirname2)
+ dirname = dirname2
+ break
+ except:
+ pass
+
+ if dirname is None:
+ raise Exception('Unable to create temp dir. Insufficient access rights.')
+
+ out_file = tempfile.TemporaryFile(mode="r+b", dir=dirname)
+ err_file = tempfile.TemporaryFile(mode="r+b", dir=dirname)
+ return (msvcrt.get_osfhandle(out_file.fileno()),
+ msvcrt.get_osfhandle(err_file.fileno()),
+ out_file,
+ err_file)
+
+
+def _get_files_output(out, err):
+ out.seek(0)
+ err.seek(0)
+ return out.read().strip(), err.read().strip()
+
+
+def _safe_duplicate_handle(h):
+ try:
+ h = DuplicateHandle(GetCurrentProcess(),
+ h,
+ GetCurrentProcess(),
+ 0,
+ True,
+ win32con.DUPLICATE_SAME_ACCESS)
+ return True, h
+ except Exception as exc:
+ if exc.winerror == ERROR_INVALID_HANDLE:
+ return True, None
+ return False, None
+
+
+def _merge_env(env1, env2, merge_keys=['PYTHONPATH']):
+ """
+ Merge env2 into env1. Also current python instance variables from merge_keys list taken into account and they will be
+ merged with equivalent keys from env1 and env2 using system path separator.
+ :param env1: first environment, usually returned by CreateEnvironmentBlock
+ :param env2: custom environment
+ :param merge_keys: env variables to merge as PATH
+ :return: merged environment
+ """
+ env1 = dict(env1) # copy to new dict in case env1 is os.environ
+ if env2:
+ for key, value in env2.iteritems():
+ if not key in merge_keys:
+ env1[key] = value
+ # strnsform keys and values to str(windows can not accept unicode)
+ result_env = {}
+ for key, value in env1.iteritems():
+ if not key in merge_keys:
+ result_env[str(key)] = str(value)
+ #merge keys from merge_keys
+ def put_values(key, env, result):
+ if env and key in env:
+ result.extend(env[key].split(os.pathsep))
+
+ for key in merge_keys:
+ all_values = []
+ for env in [env1, env2, os.environ]:
+ put_values(key, env, all_values)
+ result_env[str(key)] = str(os.pathsep.join(set(all_values)))
+ return result_env
+
+def AdjustPrivilege(htoken, priv, enable = 1):
+ # Get the ID for the privilege.
+ privId = LookupPrivilegeValue(None, priv)
+ # Now obtain the privilege for this token.
+ # Create a list of the privileges to be added.
+ privState = SE_PRIVILEGE_ENABLED if enable else 0
+ newPrivileges = [(privId, privState)]
+ # and make the adjustment.
+ AdjustTokenPrivileges(htoken, 0, newPrivileges)
+
+def QueryPrivilegeState(hToken, priv):
+ # Get the ID for the privilege.
+ privId = LookupPrivilegeValue(None, priv)
+ privList = GetTokenInformation(hToken, TokenPrivileges)
+ privState = 0
+ for (id, attr) in privList:
+ if id == privId:
+ privState = attr
+ Logger.debug('Privilege state: {}={} ({}) Enabled={}'.format(privId, priv, LookupPrivilegeDisplayName(None, priv), privState))
+ return privState
+
+# Execute command. As windows hdp stack heavily relies on proper environment it is better to reload fresh environment
+# on every execution. env variable will me merged with fresh environment for user.
+def _call_command(command, logoutput=False, cwd=None, env=None, wait_for_finish=True, timeout=None, user=None):
+ # TODO implement timeout, wait_for_finish
+ Logger.info("Executing %s" % (command))
+ if user:
+ proc_token = OpenProcessToken(GetCurrentProcess(), TOKEN_QUERY | TOKEN_ADJUST_PRIVILEGES)
+
+ old_states = []
+
+ privileges = [
+ SE_ASSIGNPRIMARYTOKEN_NAME,
+ SE_INCREASE_QUOTA_NAME,
+ ]
+
+ for priv in privileges:
+ old_states.append(QueryPrivilegeState(proc_token, priv))
+ AdjustPrivilege(proc_token, priv)
+ QueryPrivilegeState(proc_token, priv)
+
+ user_token = LogonUser(user, ".", Script.get_password(user), win32con.LOGON32_LOGON_SERVICE,
+ win32con.LOGON32_PROVIDER_DEFAULT)
+ env_token = DuplicateTokenEx(user_token, SecurityIdentification, TOKEN_QUERY, TokenPrimary)
+ # getting updated environment for impersonated user and merge it with custom env
+ current_env = CreateEnvironmentBlock(env_token, False)
+ current_env = _merge_env(current_env, env)
+
+ si = STARTUPINFO()
+ out_handle, err_handle, out_file, err_file = _create_tmp_files(current_env)
+ ok, si.hStdInput = _safe_duplicate_handle(GetStdHandle(STD_INPUT_HANDLE))
+ if not ok:
+ raise Exception("Unable to create StdInput for child process")
+ ok, si.hStdOutput = _safe_duplicate_handle(out_handle)
+ if not ok:
+ raise Exception("Unable to create StdOut for child process")
+ ok, si.hStdError = _safe_duplicate_handle(err_handle)
+ if not ok:
+ raise Exception("Unable to create StdErr for child process")
+
+ Logger.debug("Redirecting stdout to '{}', stderr to '{}'".format(out_file.name, err_file.name))
+
+ si.dwFlags = win32con.STARTF_USESTDHANDLES
+ si.lpDesktop = ""
+
+ try:
+ info = CreateProcessAsUser(user_token, None, command, None, None, 1, win32con.CREATE_NO_WINDOW, current_env, cwd, si)
+ hProcess, hThread, dwProcessId, dwThreadId = info
+ hThread.Close()
+
+ try:
+ WaitForSingleObject(hProcess, INFINITE)
+ except KeyboardInterrupt:
+ pass
+ out, err = _get_files_output(out_file, err_file)
+ code = GetExitCodeProcess(hProcess)
+ finally:
+ for priv in privileges:
+ old_state = old_states.pop(0)
+ AdjustPrivilege(proc_token, priv, old_state)
+ else:
+ # getting updated environment for current process and merge it with custom env
+ cur_token = OpenProcessToken(GetCurrentProcess(), TOKEN_QUERY)
+ current_env = CreateEnvironmentBlock(cur_token, False)
+ current_env = _merge_env(current_env, env)
+ proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
+ cwd=cwd, env=current_env, shell=False)
+ out, err = proc.communicate()
+ code = proc.returncode
+
+ if logoutput and out:
+ Logger.info(out)
+ if logoutput and err:
+ Logger.info(err)
+ return code, out, err
+
+
+# see msdn Icacls doc for rights
+def _set_file_acl(file, user, rights):
+ acls_modify_cmd = "icacls {0} /grant {1}:{2}".format(file, user, rights)
+ acls_remove_cmd = "icacls {0} /remove {1}".format(file, user)
+ code, out, err = _call_command(acls_remove_cmd)
+ if code != 0:
+ raise Fail("Can not remove rights for path {0} and user {1}".format(file, user))
+ code, out, err = _call_command(acls_modify_cmd)
+ if code != 0:
+ raise Fail("Can not set rights {0} for path {1} and user {2}".format(file, user))
+ else:
+ return
+
+
+class FileProvider(Provider):
+ def action_create(self):
+ path = self.resource.path
+
+ if os.path.isdir(path):
+ raise Fail("Applying %s failed, directory with name %s exists" % (self.resource, path))
+
+ dirname = os.path.dirname(path)
+ if not os.path.isdir(dirname):
+ raise Fail("Applying %s failed, parent directory %s doesn't exist" % (self.resource, dirname))
+
+ write = False
+ content = self._get_content()
+ if not os.path.exists(path):
+ write = True
+ reason = "it doesn't exist"
+ elif self.resource.replace:
+ if content is not None:
+ with open(path, "rb") as fp:
+ old_content = fp.read()
+ if content != old_content:
+ write = True
+ reason = "contents don't match"
+ if self.resource.backup:
+ self.resource.env.backup_file(path)
+
+ if write:
+ Logger.info("Writing %s because %s" % (self.resource, reason))
+ with open(path, "wb") as fp:
+ if content:
+ fp.write(content)
+
+ if self.resource.owner and self.resource.mode:
+ _set_file_acl(self.resource.path, self.resource.owner, self.resource.mode)
+
+ def action_delete(self):
+ path = self.resource.path
+
+ if os.path.isdir(path):
+ raise Fail("Applying %s failed, %s is directory not file!" % (self.resource, path))
+
+ if os.path.exists(path):
+ Logger.info("Deleting %s" % self.resource)
+ os.unlink(path)
+
+ def _get_content(self):
+ content = self.resource.content
+ if content is None:
+ return None
+ elif isinstance(content, basestring):
+ return content
+ elif hasattr(content, "__call__"):
+ return content()
+ raise Fail("Unknown source type for %s: %r" % (self, content))
+
+
+class ExecuteProvider(Provider):
+ def action_run(self):
+ if self.resource.creates:
+ if os.path.exists(self.resource.creates):
+ return
+
+ Logger.debug("Executing %s" % self.resource)
+
+ if self.resource.path != []:
+ if not self.resource.environment:
+ self.resource.environment = {}
+
+ self.resource.environment['PATH'] = os.pathsep.join(self.resource.path)
+
+ for i in range(0, self.resource.tries):
+ try:
+ code, _, _ = _call_command(self.resource.command, logoutput=self.resource.logoutput,
+ cwd=self.resource.cwd, env=self.resource.environment,
+ wait_for_finish=self.resource.wait_for_finish,
+ timeout=self.resource.timeout, user=self.resource.user)
+ if code != 0 and not self.resource.ignore_failures:
+ raise Fail("Failed to execute " + self.resource.command)
+ break
+ except Fail as ex:
+ if i == self.resource.tries - 1: # last try
+ raise ex
+ else:
+ Logger.info("Retrying after %d seconds. Reason: %s" % (self.resource.try_sleep, str(ex)))
+ time.sleep(self.resource.try_sleep)
+ except ExecuteTimeoutException:
+ err_msg = ("Execution of '%s' was killed due timeout after %d seconds") % (
+ self.resource.command, self.resource.timeout)
+
+ if self.resource.on_timeout:
+ Logger.info("Executing '%s'. Reason: %s" % (self.resource.on_timeout, err_msg))
+ _call_command(self.resource.on_timeout)
+ else:
+ raise Fail(err_msg)
+
+
+class DirectoryProvider(Provider):
+ def action_create(self):
+ path = DirectoryProvider._trim_uri(self.resource.path)
+ if not os.path.exists(path):
+ Logger.info("Creating directory %s" % self.resource)
+ if self.resource.recursive:
+ os.makedirs(path)
+ else:
+ dirname = os.path.dirname(path)
+ if not os.path.isdir(dirname):
+ raise Fail("Applying %s failed, parent directory %s doesn't exist" % (self.resource, dirname))
+
+ os.mkdir(path)
+
+ if not os.path.isdir(path):
+ raise Fail("Applying %s failed, file %s already exists" % (self.resource, path))
+
+ if self.resource.owner and self.resource.mode:
+ _set_file_acl(path, self.resource.owner, self.resource.mode)
+
+ def action_delete(self):
+ path = self.resource.path
+ if os.path.exists(path):
+ if not os.path.isdir(path):
+ raise Fail("Applying %s failed, %s is not a directory" % (self.resource, path))
+
+ Logger.info("Removing directory %s and all its content" % self.resource)
+ shutil.rmtree(path)
+
+ @staticmethod
+ def _trim_uri(file_uri):
+ if file_uri.startswith("file:///"):
+ return file_uri[8:]
+ return file_uri
+ # class res: pass
+ # resource = res()
+ # resource.creates = None
+ # resource.path =[]
+ # resource.tries = 1
+ # resource.logoutput = True
+ # resource.cwd = None
+ # resource.environment = None
+ # resource.wait_for_finish = True
+ # resource.timeout = None
+ # resource.command = "cmd /C echo 1 & echo 2"
+ # provider = ExecuteProvider(resource)
+ # provider.action_run()
+ # pass
+ # _set_file_acl("C:\\lol.txt", "Administrator","f")
+ # pass
+ # pass \ No newline at end of file
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/__init__.py b/ambari-common/src/main/python/resource_management/libraries/functions/__init__.py
index 36b8e510ac..9b32b92cc8 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/__init__.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/__init__.py
@@ -20,6 +20,8 @@ Ambari Agent
"""
+import platform
+
from resource_management.libraries.functions.default import *
from resource_management.libraries.functions.format import *
from resource_management.libraries.functions.get_kinit_path import *
@@ -31,3 +33,10 @@ from resource_management.libraries.functions.get_port_from_url import *
from resource_management.libraries.functions.hive_check import *
from resource_management.libraries.functions.version import *
from resource_management.libraries.functions.format_jvm_option import *
+
+IS_WINDOWS = platform.system() == "Windows"
+
+if IS_WINDOWS:
+ from resource_management.libraries.functions.windows_service_utils import *
+ from resource_management.libraries.functions.install_hdp_msi import *
+ from resource_management.libraries.functions.reload_windows_env import *
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/default.py b/ambari-common/src/main/python/resource_management/libraries/functions/default.py
index 733c03a555..16782de8a6 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/default.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/default.py
@@ -20,7 +20,7 @@ Ambari Agent
"""
-__all__ = ["default"]
+__all__ = ['default', 'default_string']
from resource_management.libraries.script import Script
from resource_management.libraries.script.config_dictionary import UnknownConfiguration
from resource_management.core.logger import Logger
@@ -37,4 +37,8 @@ def default(name, default_value):
Logger.debug("Cannot find configuration: '%s'. Using '%s' value as default" % (name, default_value))
return default_value
- return curr_dict \ No newline at end of file
+ return curr_dict
+
+def default_string(name, default_value, delimiter):
+ default_list = default(name, default_value)
+ return delimiter.join(default_list)
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/get_unique_id_and_date.py b/ambari-common/src/main/python/resource_management/libraries/functions/get_unique_id_and_date.py
index a79a1e5ce9..bb68270a51 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/get_unique_id_and_date.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/get_unique_id_and_date.py
@@ -23,12 +23,19 @@ Ambari Agent
__all__ = ["get_unique_id_and_date"]
import datetime
from resource_management.core import shell
-
+from ambari_commons import os_check
def get_unique_id_and_date():
+ if os_check.OSCheck.is_windows_os():
+ from ambari_commons.os_windows import run_os_command
+ code, out, err = run_os_command("cmd /c vol C:")
+ for line in out.splitlines():
+ if line.startswith(" Volume Serial Number is"):
+ id = line[25:]
+ else:
out = shell.checked_call("hostid")[1].split('\n')[-1] # bugfix: take the lastline (stdin is not tty part cut)
id = out.strip()
- now = datetime.datetime.now()
- date = now.strftime("%M%d%y")
+ now = datetime.datetime.now()
+ date = now.strftime("%M%d%y")
- return "id{id}_date{date}".format(id=id, date=date)
+ return "id{id}_date{date}".format(id=id, date=date)
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/install_hdp_msi.py b/ambari-common/src/main/python/resource_management/libraries/functions/install_hdp_msi.py
new file mode 100644
index 0000000000..a7c2fe2432
--- /dev/null
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/install_hdp_msi.py
@@ -0,0 +1,182 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+from ambari_commons import os_utils
+from ambari_commons.inet_utils import download_file
+from ambari_commons.os_windows import SystemWideLock
+
+from resource_management.core.resources.system import Execute
+from resource_management.core.resources.system import File
+from resource_management.core.logger import Logger
+from resource_management.core.exceptions import Fail
+from resource_management.libraries.functions.reload_windows_env import reload_windows_env
+from resource_management.libraries.functions.windows_service_utils import check_windows_service_exists
+import socket
+import os
+import glob
+
+
+__all__ = ['install_windows_msi']
+
+msi_save_dir = None
+hdp_log_dir = "c:\\hadoop\\logs"
+hdp_data_dir = "c:\\hadoopDefaultData"
+local_host = socket.getfqdn()
+db_flavor = "DERBY"
+cluster_properties = """#Log directory
+HDP_LOG_DIR={hdp_log_dir}
+
+#Data directory
+HDP_DATA_DIR={hdp_data_dir}
+
+#hosts
+NAMENODE_HOST={local_host}
+SECONDARY_NAMENODE_HOST={local_host}
+RESOURCEMANAGER_HOST={local_host}
+HIVE_SERVER_HOST={local_host}
+OOZIE_SERVER_HOST={local_host}
+WEBHCAT_HOST={local_host}
+SLAVE_HOSTS={local_host}
+ZOOKEEPER_HOSTS={local_host}
+CLIENT_HOSTS={local_host}
+HBASE_MASTER={local_host}
+HBASE_REGIONSERVERS={local_host}
+FLUME_HOSTS={local_host}
+FALCON_HOST={local_host}
+KNOX_HOST={local_host}
+STORM_NIMBUS={local_host}
+STORM_SUPERVISORS={local_host}
+
+#Database host
+DB_FLAVOR={db_flavor}
+DB_HOSTNAME={local_host}
+DB_PORT=1527
+
+#Hive properties
+HIVE_DB_NAME=hive
+HIVE_DB_USERNAME=hive
+HIVE_DB_PASSWORD=hive
+
+#Oozie properties
+OOZIE_DB_NAME=oozie
+OOZIE_DB_USERNAME=oozie
+OOZIE_DB_PASSWORD=oozie
+"""
+
+INSTALL_MSI_CMD = 'cmd /C start /wait msiexec /qn /i {hdp_msi_path} /lv {hdp_log_path} MSIUSEREALADMINDETECTION=1 ' \
+ 'HDP_LAYOUT={hdp_layout_path} DESTROY_DATA=yes HDP_USER_PASSWORD={hadoop_password_arg} HDP=yes ' \
+ 'KNOX=yes KNOX_MASTER_SECRET="AmbariHDP2Windows" FALCON=yes STORM=yes HBase=yes STORM=yes FLUME=yes'
+CREATE_SERVICE_SCRIPT = os.path.abspath("sbin\createservice.ps1")
+CREATE_SERVICE_CMD = 'cmd /C powershell -File "{script}" -username hadoop -password "{password}" -servicename ' \
+ '{servicename} -hdpresourcesdir "{resourcedir}" -servicecmdpath "{servicecmd}"'
+INSTALL_MARKER_OK = "msi.installed"
+INSTALL_MARKER_FAILED = "msi.failed"
+_working_dir = None
+
+
+def _ensure_services_created(hadoop_password):
+ resource_dir_hdfs = os.path.join(os.environ["HADOOP_HDFS_HOME"], "bin")
+ service_cmd_hdfs = os.path.join(os.environ["HADOOP_HDFS_HOME"], "bin", "hdfs.cmd")
+ if not check_windows_service_exists("journalnode"):
+ Execute(CREATE_SERVICE_CMD.format(script=CREATE_SERVICE_SCRIPT, password=hadoop_password, servicename="journalnode",
+ resourcedir=resource_dir_hdfs, servicecmd=service_cmd_hdfs), logoutput=True)
+ if not check_windows_service_exists("zkfc"):
+ Execute(CREATE_SERVICE_CMD.format(script=CREATE_SERVICE_SCRIPT, password=hadoop_password, servicename="zkfc",
+ resourcedir=resource_dir_hdfs, servicecmd=service_cmd_hdfs), logoutput=True)
+
+
+# creating symlinks to services folders to avoid using stack-dependent paths
+def _create_symlinks():
+ # folders
+ Execute("cmd /c mklink /d %HADOOP_NODE%\\hadoop %HADOOP_HOME%")
+ Execute("cmd /c mklink /d %HADOOP_NODE%\\hive %HIVE_HOME%")
+ # files pairs (symlink_path, path_template_to_target_file), use * to replace file version
+ links_pairs = [
+ ("%HADOOP_HOME%\\share\\hadoop\\tools\\lib\\hadoop-streaming.jar",
+ "%HADOOP_HOME%\\share\\hadoop\\tools\\lib\\hadoop-streaming-*.jar"),
+ ("%HIVE_HOME%\\hcatalog\\share\\webhcat\\svr\\lib\\hive-webhcat.jar",
+ "%HIVE_HOME%\\hcatalog\\share\\webhcat\\svr\\lib\\hive-webhcat-*.jar"),
+ ("%HIVE_HOME%\\lib\\zookeeper.jar", "%HIVE_HOME%\\lib\\zookeeper-*.jar")
+ ]
+ for link_pair in links_pairs:
+ link, target = link_pair
+ target = glob.glob(os.path.expandvars(target))[0].replace("\\\\", "\\")
+ Execute('cmd /c mklink "{0}" "{1}"'.format(link, target))
+
+
+# check if services exists and marker file present
+def _is_msi_installed():
+ return os.path.exists(os.path.join(_working_dir, INSTALL_MARKER_OK)) and check_windows_service_exists("namenode")
+
+
+# check if msi was installed correctly and raise Fail in case of broken install
+def _validate_msi_install():
+ if not _is_msi_installed() and os.path.exists(os.path.join(_working_dir, INSTALL_MARKER_FAILED)):
+ Fail("Current or previous hdp.msi install failed. Check hdp.msi install logs")
+ return _is_msi_installed()
+
+
+def _write_marker():
+ if check_windows_service_exists("namenode"):
+ open(os.path.join(_working_dir, INSTALL_MARKER_OK), "w").close()
+ else:
+ open(os.path.join(_working_dir, INSTALL_MARKER_FAILED), "w").close()
+
+
+def install_windows_msi(msi_url, save_dir, save_file, hadoop_password):
+ global _working_dir
+ _working_dir = save_dir
+ save_dir = os.path.abspath(save_dir)
+ msi_save_dir = save_dir
+ # system wide lock to prevent simultaneous installations(when first task failed on timeout)
+ install_lock = SystemWideLock("hdp_msi_lock")
+ try:
+ # try to acquire lock
+ if not install_lock.lock():
+ Logger.info("Some other task currently installing hdp.msi, waiting for 10 min for finish")
+ if not install_lock.lock(600000):
+ raise Fail("Timeout on acquiring lock")
+ if _validate_msi_install():
+ Logger.info("hdp.msi already installed")
+ return
+
+ # install msi
+ download_file(msi_url, os.path.join(msi_save_dir, save_file))
+ File(os.path.join(msi_save_dir, "properties.txt"), content=cluster_properties.format(hdp_log_dir=hdp_log_dir,
+ hdp_data_dir=hdp_data_dir,
+ local_host=local_host,
+ db_flavor=db_flavor))
+ hdp_msi_path = os_utils.quote_path(os.path.join(save_dir, "hdp.msi"))
+ hdp_log_path = os_utils.quote_path(os.path.join(save_dir, "hdp.log"))
+ hdp_layout_path = os_utils.quote_path(os.path.join(save_dir, "properties.txt"))
+ hadoop_password_arg = os_utils.quote_path(hadoop_password)
+
+ Execute(
+ INSTALL_MSI_CMD.format(hdp_msi_path=hdp_msi_path, hdp_log_path=hdp_log_path, hdp_layout_path=hdp_layout_path,
+ hadoop_password_arg=hadoop_password_arg))
+ reload_windows_env()
+ # create additional services manually due to hdp.msi limitaitons
+ _ensure_services_created(hadoop_password)
+ _create_symlinks()
+ # finalizing install
+ _write_marker()
+ _validate_msi_install()
+ finally:
+ install_lock.unlock()
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/reload_windows_env.py b/ambari-common/src/main/python/resource_management/libraries/functions/reload_windows_env.py
new file mode 100644
index 0000000000..f6f3626f51
--- /dev/null
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/reload_windows_env.py
@@ -0,0 +1,48 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from _winreg import (OpenKey, EnumValue, HKEY_LOCAL_MACHINE, KEY_READ, CloseKey)
+import os
+
+default_whitelist = ["FALCON_CONF_DIR", "FALCON_DATA_DIR", "FALCON_HOME", "FALCON_LOG_DIR", "FLUME_HOME",
+ "HADOOP_COMMON_HOME", "HADOOP_CONF_DIR", "HADOOP_HDFS_HOME", "HADOOP_HOME", "HADOOP_LOG_DIR",
+ "HADOOP_MAPRED_HOME", "HADOOP_NODE", "HADOOP_NODE_INSTALL_ROOT", "HADOOP_PACKAGES",
+ "HADOOP_SETUP_TOOLS", "HADOOP_YARN_HOME", "HBASE_CONF_DIR", "HBASE_HOME", "HCAT_HOME",
+ "HDFS_AUDIT_LOGGER", "HDFS_DATA_DIR", "HIVE_CONF_DIR", "HIVE_HOME", "HIVE_LIB_DIR", "HIVE_LOG_DIR",
+ "HIVE_OPTS", "KNOX_CONF_DIR", "KNOX_HOME", "KNOX_LOG_DIR", "MAHOUT_HOME", "OOZIE_DATA",
+ "OOZIE_HOME", "OOZIE_LOG", "OOZIE_ROOT", "PIG_HOME", "SQOOP_HOME", "STORM_CONF_DIR", "STORM_HOME",
+ "STORM_LOG_DIR", "WEBHCAT_CONF_DIR", "YARN_LOG_DIR", "ZOOKEEPER_CONF_DIR", "ZOOKEEPER_HOME",
+ "ZOOKEEPER_LIB_DIR", "ZOO_LOG_DIR"]
+def reload_windows_env(keys_white_list=default_whitelist):
+ root = HKEY_LOCAL_MACHINE
+ subkey = r'SYSTEM\CurrentControlSet\Control\Session Manager\Environment'
+ key = OpenKey(root, subkey, 0, KEY_READ)
+ finish = False
+ index = 0
+ while not finish:
+ try:
+ _key, _value, _ = EnumValue(key, index)
+ if (_key in keys_white_list):
+ os.environ[_key] = _value
+ except WindowsError:
+ finish = True
+ index += 1
+ CloseKey(key)
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/tar_archive.py b/ambari-common/src/main/python/resource_management/libraries/functions/tar_archive.py
new file mode 100644
index 0000000000..efbf933ccc
--- /dev/null
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/tar_archive.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import os
+import tarfile
+from contextlib import closing
+
+def archive_dir(output_filename, input_dir):
+ with closing(tarfile.open(output_filename, "w:gz")) as tar:
+ try:
+ tar.add(input_dir, arcname=os.path.basename("."))
+ finally:
+ tar.close()
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/windows_service_utils.py b/ambari-common/src/main/python/resource_management/libraries/functions/windows_service_utils.py
new file mode 100644
index 0000000000..7d994b761c
--- /dev/null
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/windows_service_utils.py
@@ -0,0 +1,42 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management.core.exceptions import ComponentIsNotRunning
+from resource_management.core.logger import Logger
+__all__ = ['check_windows_service_status', 'check_windows_service_exists']
+
+import win32service
+
+_schSCManager = win32service.OpenSCManager(None, None, win32service.SC_MANAGER_ALL_ACCESS)
+
+def check_windows_service_status(service_name):
+ _service_handle = win32service.OpenService(_schSCManager, service_name, win32service.SERVICE_ALL_ACCESS)
+ if win32service.QueryServiceStatusEx(_service_handle)["CurrentState"] == win32service.SERVICE_STOPPED:
+ raise ComponentIsNotRunning()
+
+def check_windows_service_exists(service_name):
+ typeFilter = win32service.SERVICE_WIN32
+ stateFilter = win32service.SERVICE_STATE_ALL
+ statuses = win32service.EnumServicesStatus(_schSCManager, typeFilter, stateFilter)
+ for (short_name, desc, status) in statuses:
+ if short_name == service_name:
+ return True
+ return False
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/zip_archive.py b/ambari-common/src/main/python/resource_management/libraries/functions/zip_archive.py
new file mode 100644
index 0000000000..cab3627178
--- /dev/null
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/zip_archive.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import os
+import zipfile
+
+def _zip_dir(zip, root):
+ for dirname, dirnames, filenames in os.walk(root):
+ for filename in filenames:
+ if len(dirname) > len(root):
+ rel_path = os.path.relpath(dirname, root)
+ arch_name = rel_path + os.sep + filename
+ else:
+ arch_name = filename
+ zip.write(os.path.join(dirname, filename), arch_name)
+
+
+def archive_dir(output_filename, input_dir):
+ zipf = zipfile.ZipFile(output_filename, 'w')
+ try:
+ _zip_dir(zipf, input_dir)
+ finally:
+ zipf.close()
diff --git a/ambari-common/src/main/python/resource_management/libraries/providers/__init__.py b/ambari-common/src/main/python/resource_management/libraries/providers/__init__.py
index 5ca7bd9eb1..80e0a14ced 100644
--- a/ambari-common/src/main/python/resource_management/libraries/providers/__init__.py
+++ b/ambari-common/src/main/python/resource_management/libraries/providers/__init__.py
@@ -30,6 +30,9 @@ PROVIDERS = dict(
ubuntu=dict(
Repository="resource_management.libraries.providers.repository.UbuntuRepositoryProvider",
),
+ winsrv=dict(
+
+ ),
default=dict(
ExecuteHadoop="resource_management.libraries.providers.execute_hadoop.ExecuteHadoopProvider",
TemplateConfig="resource_management.libraries.providers.template_config.TemplateConfigProvider",
diff --git a/ambari-common/src/main/python/resource_management/libraries/providers/xml_config.py b/ambari-common/src/main/python/resource_management/libraries/providers/xml_config.py
index 87fc657a3e..b5c2b5423d 100644
--- a/ambari-common/src/main/python/resource_management/libraries/providers/xml_config.py
+++ b/ambari-common/src/main/python/resource_management/libraries/providers/xml_config.py
@@ -21,13 +21,14 @@ Ambari Agent
"""
import time
+import os
from resource_management import *
class XmlConfigProvider(Provider):
def action_create(self):
filename = self.resource.filename
xml_config_provider_config_dir = self.resource.conf_dir
-
+
# |e - for html-like escaping of <,>,',"
config_content = InlineTemplate('''<!--{{time.asctime(time.localtime())}}-->
<configuration>
@@ -48,12 +49,12 @@ class XmlConfigProvider(Provider):
{% endfor %}
</configuration>''', extra_imports=[time], configurations_dict=self.resource.configurations,
configuration_attrs=self.resource.configuration_attributes)
-
-
- Logger.info(format("Generating config: {xml_config_provider_config_dir}/{filename}"))
-
+
+ xml_config_dest_file_path = os.path.join(xml_config_provider_config_dir, filename)
+ Logger.info("Generating config: {0}".format(xml_config_dest_file_path))
+
with Environment.get_instance_copy() as env:
- File (format("{xml_config_provider_config_dir}/{filename}"),
+ File (xml_config_dest_file_path,
content = config_content,
owner = self.resource.owner,
group = self.resource.group,
diff --git a/ambari-common/src/main/python/resource_management/libraries/script/script.py b/ambari-common/src/main/python/resource_management/libraries/script/script.py
index 001922d392..39511bc62c 100644
--- a/ambari-common/src/main/python/resource_management/libraries/script/script.py
+++ b/ambari-common/src/main/python/resource_management/libraries/script/script.py
@@ -17,7 +17,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
-import tarfile
import tempfile
__all__ = ["Script"]
@@ -26,19 +25,24 @@ import os
import sys
import json
import logging
-from contextlib import closing
-
+import platform
from resource_management.libraries.resources import XmlConfig
from resource_management.libraries.resources import PropertiesFile
from resource_management.core.resources import File, Directory
from resource_management.core.source import InlineTemplate
-
from resource_management.core.environment import Environment
from resource_management.core.exceptions import Fail, ClientComponentHasNoStatus, ComponentIsNotRunning
from resource_management.core.resources.packaging import Package
-from resource_management.libraries.script.config_dictionary import ConfigDictionary
+from resource_management.libraries.script.config_dictionary import ConfigDictionary, UnknownConfiguration
+IS_WINDOWS = platform.system() == "Windows"
+if IS_WINDOWS:
+ from resource_management.libraries.functions.install_hdp_msi import install_windows_msi
+ from resource_management.libraries.functions.reload_windows_env import reload_windows_env
+ from resource_management.libraries.functions.zip_archive import archive_dir
+else:
+ from resource_management.libraries.functions.tar_archive import archive_dir
USAGE = """Usage: {0} <COMMAND> <JSON_CONFIG> <BASEDIR> <STROUTPUT> <LOGGING_LEVEL> <TMP_DIR>
@@ -50,6 +54,19 @@ USAGE = """Usage: {0} <COMMAND> <JSON_CONFIG> <BASEDIR> <STROUTPUT> <LOGGING_LEV
<TMP_DIR> temporary directory for executable scripts. Ex: /var/lib/ambari-agent/data/tmp
"""
+_PASSWORD_MAP = {"/configurations/cluster-env/hadoop.user.name":"/configurations/cluster-env/hadoop.user.password"}
+
+def get_path_form_configuration(name, configuration):
+ subdicts = filter(None, name.split('/'))
+
+ for x in subdicts:
+ if x in configuration:
+ configuration = configuration[x]
+ else:
+ return None
+
+ return configuration
+
class Script(object):
"""
Executes a command for custom service. stdout and stderr are written to
@@ -91,13 +108,13 @@ class Script(object):
cherr.setFormatter(formatter)
logger.addHandler(cherr)
logger.addHandler(chout)
-
+
# parse arguments
- if len(sys.argv) < 7:
+ if len(sys.argv) < 7:
logger.error("Script expects at least 6 arguments")
print USAGE.format(os.path.basename(sys.argv[0])) # print to stdout
sys.exit(1)
-
+
command_name = str.lower(sys.argv[1])
command_data_file = sys.argv[2]
basedir = sys.argv[3]
@@ -108,11 +125,23 @@ class Script(object):
logging_level_str = logging._levelNames[logging_level]
chout.setLevel(logging_level_str)
logger.setLevel(logging_level_str)
-
+
+ # on windows we need to reload some of env variables manually because there is no default paths for configs(like
+ # /etc/something/conf on linux. When this env vars created by one of the Script execution, they can not be updated
+ # in agent, so other Script executions will not be able to access to new env variables
+ if platform.system() == "Windows":
+ reload_windows_env()
+
try:
with open(command_data_file, "r") as f:
pass
Script.config = ConfigDictionary(json.load(f))
+ #load passwords here(used on windows to impersonate different users)
+ Script.passwords = {}
+ for k, v in _PASSWORD_MAP.iteritems():
+ if get_path_form_configuration(k,Script.config) and get_path_form_configuration(v,Script.config ):
+ Script.passwords[get_path_form_configuration(k,Script.config)] = get_path_form_configuration(v,Script.config)
+
except IOError:
logger.exception("Can not read json file with command parameters: ")
sys.exit(1)
@@ -150,6 +179,9 @@ class Script(object):
"""
return Script.config
+ @staticmethod
+ def get_password(user):
+ return Script.passwords[user]
@staticmethod
def get_tmp_dir():
@@ -170,28 +202,39 @@ class Script(object):
self.install_packages(env)
- def install_packages(self, env, exclude_packages=[]):
- """
- List of packages that are required< by service is received from the server
- as a command parameter. The method installs all packages
- from this list
- """
- config = self.get_config()
-
- try:
- package_list_str = config['hostLevelParams']['package_list']
- if isinstance(package_list_str,basestring) and len(package_list_str) > 0:
- package_list = json.loads(package_list_str)
- for package in package_list:
- if not package['name'] in exclude_packages:
- name = package['name']
- Package(name)
- except KeyError:
- pass # No reason to worry
-
- #RepoInstaller.remove_repos(config)
-
-
+ if not IS_WINDOWS:
+ def install_packages(self, env, exclude_packages=[]):
+ """
+ List of packages that are required< by service is received from the server
+ as a command parameter. The method installs all packages
+ from this list
+ """
+ config = self.get_config()
+ try:
+ package_list_str = config['hostLevelParams']['package_list']
+ if isinstance(package_list_str, basestring) and len(package_list_str) > 0:
+ package_list = json.loads(package_list_str)
+ for package in package_list:
+ if not package['name'] in exclude_packages:
+ name = package['name']
+ Package(name)
+ except KeyError:
+ pass # No reason to worry
+
+ # RepoInstaller.remove_repos(config)
+ pass
+ else:
+ def install_packages(self, env, exclude_packages=[]):
+ """
+ List of packages that are required< by service is received from the server
+ as a command parameter. The method installs all packages
+ from this list
+ """
+ config = self.get_config()
+
+ install_windows_msi(os.path.join(config['hostLevelParams']['jdk_location'], "hdp.msi"),
+ config["hostLevelParams"]["agentCacheDir"], "hdp.msi", self.get_password("hadoop"))
+ pass
def fail_with_error(self, message):
"""
@@ -239,56 +282,60 @@ class Script(object):
self.fail_with_error('configure method isn\'t implemented')
def generate_configs_get_template_file_content(self, filename, dicts):
- import params
+ config = self.get_config()
content = ''
for dict in dicts.split(','):
- if dict.strip() in params.config['configurations']:
- content += params.config['configurations'][dict.strip()]['content']
+ if dict.strip() in config['configurations']:
+ try:
+ content += config['configurations'][dict.strip()]['content']
+ except Fail:
+ # 'content' section not available in the component client configuration
+ pass
return content
def generate_configs_get_xml_file_content(self, filename, dict):
- import params
- return {'configurations':params.config['configurations'][dict],
- 'configuration_attributes':params.config['configuration_attributes'][dict]}
+ config = self.get_config()
+ return {'configurations':config['configurations'][dict],
+ 'configuration_attributes':config['configuration_attributes'][dict]}
def generate_configs_get_xml_file_dict(self, filename, dict):
- import params
- return params.config['configurations'][dict]
+ config = self.get_config()
+ return config['configurations'][dict]
def generate_configs(self, env):
"""
Generates config files and stores them as an archive in tmp_dir
based on xml_configs_list and env_configs_list from commandParams
"""
- import params
- env.set_params(params)
- xml_configs_list = params.config['commandParams']['xml_configs_list']
- env_configs_list = params.config['commandParams']['env_configs_list']
- properties_configs_list = params.config['commandParams']['properties_configs_list']
-
- conf_tmp_dir = tempfile.mkdtemp()
- output_filename = os.path.join(self.get_tmp_dir(),params.config['commandParams']['output_file'])
+ config = self.get_config()
+
+ xml_configs_list = config['commandParams']['xml_configs_list']
+ env_configs_list = config['commandParams']['env_configs_list']
+ properties_configs_list = config['commandParams']['properties_configs_list']
Directory(self.get_tmp_dir(), recursive=True)
- for file_dict in xml_configs_list:
- for filename, dict in file_dict.iteritems():
- XmlConfig(filename,
- conf_dir=conf_tmp_dir,
- **self.generate_configs_get_xml_file_content(filename, dict)
- )
- for file_dict in env_configs_list:
- for filename,dicts in file_dict.iteritems():
- File(os.path.join(conf_tmp_dir, filename),
- content=InlineTemplate(self.generate_configs_get_template_file_content(filename, dicts)))
-
- for file_dict in properties_configs_list:
- for filename, dict in file_dict.iteritems():
- PropertiesFile(os.path.join(conf_tmp_dir, filename),
- properties=self.generate_configs_get_xml_file_dict(filename, dict)
- )
-
- with closing(tarfile.open(output_filename, "w:gz")) as tar:
- tar.add(conf_tmp_dir, arcname=os.path.basename("."))
- tar.close()
- Directory(conf_tmp_dir, action="delete")
+
+ conf_tmp_dir = tempfile.mkdtemp(dir=self.get_tmp_dir())
+ output_filename = os.path.join(self.get_tmp_dir(), config['commandParams']['output_file'])
+
+ try:
+ for file_dict in xml_configs_list:
+ for filename, dict in file_dict.iteritems():
+ XmlConfig(filename,
+ conf_dir=conf_tmp_dir,
+ **self.generate_configs_get_xml_file_content(filename, dict)
+ )
+ for file_dict in env_configs_list:
+ for filename,dicts in file_dict.iteritems():
+ File(os.path.join(conf_tmp_dir, filename),
+ content=InlineTemplate(self.generate_configs_get_template_file_content(filename, dicts)))
+
+ for file_dict in properties_configs_list:
+ for filename, dict in file_dict.iteritems():
+ PropertiesFile(os.path.join(conf_tmp_dir, filename),
+ properties=self.generate_configs_get_xml_file_dict(filename, dict)
+ )
+ archive_dir(output_filename, conf_tmp_dir)
+ finally:
+ Directory(conf_tmp_dir, action="delete")
diff --git a/ambari-server/conf/unix/ambari.properties b/ambari-server/conf/unix/ambari.properties
index ed1994c559..e5b9e7b9ad 100644
--- a/ambari-server/conf/unix/ambari.properties
+++ b/ambari-server/conf/unix/ambari.properties
@@ -35,6 +35,7 @@ bootstrap.setup_agent.script=/usr/lib/python2.6/site-packages/ambari_server/setu
recommendations.dir=/var/run/ambari-server/stack-recommendations
stackadvisor.script=/var/lib/ambari-server/resources/scripts/stack_advisor.py
server.tmp.dir=/var/lib/ambari-server/tmp
+ambari.python.wrap=ambari-python-wrap
api.authenticate=true
server.connection.max.idle.millis=900000
diff --git a/ambari-server/conf/windows/ambari-env.cmd b/ambari-server/conf/windows/ambari-env.cmd
new file mode 100644
index 0000000000..23600d4330
--- /dev/null
+++ b/ambari-server/conf/windows/ambari-env.cmd
@@ -0,0 +1,19 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements. See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License. You may obtain a copy of the License at
+rem
+rem http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+
+set AMBARI_PASSHPHRASE=DEV
+set AMBARI_JVM_ARGS=%AMBARI_JVM_ARGS% -Xms512m -Xmx2048m -Djava.security.auth.login.config=conf\krb5JAASLogin.conf -Djava.security.krb5.conf=conf\krb5.conf -Djavax.security.auth.useSubjectCredsOnly=false
diff --git a/ambari-server/conf/windows/ambari.properties b/ambari-server/conf/windows/ambari.properties
new file mode 100644
index 0000000000..fd3a7baf30
--- /dev/null
+++ b/ambari-server/conf/windows/ambari.properties
@@ -0,0 +1,82 @@
+# Copyright 2011 The Apache Software Foundation
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+security.server.keys_dir=keystore
+resources.dir=resources
+shared.resources.dir = sbin\\ambari_commons\\resources
+custom.action.definitions=resources\\custom_action_definitions
+
+#Comma-separated list of JDK versions
+#java.releases=jdk1.8.20,jdk1.6.31
+java.releases=jdk1.7.67
+jdk1.7.67.desc=Oracle JDK 1.7.67
+jdk1.7.67.url=http://public-repo-1.hortonworks.com/ARTIFACTS/jdk-7u67-windows-x64.exe
+jdk1.7.67.dest-file=jdk-7u67-windows-x64.exe
+jdk1.7.67.jcpol-url=http://public-repo-1.hortonworks.com/ARTIFACTS/UnlimitedJCEPolicyJDK7.zip
+jdk1.7.67.jcpol-file=UnlimitedJCEPolicyJDK7.zip
+jdk1.7.67.home=C:\\jdk1.7.0_67
+
+metadata.path=resources\\stacks
+server.version.file=version
+webapp.dir=web
+bootstrap.dir=bootstrap
+bootstrap.script=bootstrap\\bootstrap.py
+bootstrap.setup_agent.script=bootstrap\\setupAgent.py
+api.authenticate=true
+server.connection.max.idle.millis=900000
+server.fqdn.service.url=http://127.0.0.1/latest/meta-data/public-hostname
+server.stages.parallel=true
+
+# Scheduler settings
+server.execution.scheduler.isClustered=false
+server.execution.scheduler.maxThreads=5
+server.execution.scheduler.maxDbConnections=5
+server.execution.scheduler.misfire.toleration.minutes=480
+
+recommendations.dir=\\var\\run\\ambari-server\\stack-recommendations
+stackadvisor.script=resources\\scripts\\stack_advisor.py
+server.tmp.dir=\\var\\run\\ambari-server\\tmp
+views.dir=resources\\views
+ambari.python.wrap=python.exe
+
+# Default timeout in seconds before task is killed
+agent.task.timeout=600
+
+# thread pool maximums
+client.threadpool.size.max=25
+agent.threadpool.size.max=25
+
+# linux open-file limit
+ulimit.open.files=10000
+
+#java.home=C:\j2se1.8.0_05
+
+#server.jdbc.rca.driver=com.microsoft.sqlserver.jdbc.SQLServerDriver
+#server.jdbc.rca.url=jdbc:sqlserver://localhost\\SQLEXPRESS;databaseName=ambari;integratedSecurity=true
+##server.jdbc.rca.user.name=ambari
+##server.jdbc.rca.user.passwd=etc\\ambari-server\\conf\\password.dat
+
+#server.jdbc.driver=com.microsoft.sqlserver.jdbc.SQLServerDriver
+#server.jdbc.driver.path=C:\\Program Files\\Microsoft JDBC DRIVER 4.0 for SQL Server\\sqljdbc_4.0\\enu\\sqljdbc4.jar
+#server.jdbc.url=jdbc:sqlserver://localhost\\SQLEXPRESS;databaseName=ambari;integratedSecurity=true
+#server.jdbc.schema=ambari
+##server.jdbc.user.passwd=etc\\ambari-server\\conf\\password.dat
+##server.jdbc.user.name=ambari
+#scom.sink.db.driver=com.microsoft.sqlserver.jdbc.SQLServerDriver
+##scom.sink.db.url=jdbc:sqlserver://[server]:[port];databaseName=[databaseName];user=[user];password=[password]
+#scom.sink.db.url=jdbc:sqlserver://localhost\\SQLEXPRESS;databaseName=HadoopMetrics;integratedSecurity=true
diff --git a/ambari-server/conf/windows/ca.config b/ambari-server/conf/windows/ca.config
new file mode 100644
index 0000000000..b4dd1c54a7
--- /dev/null
+++ b/ambari-server/conf/windows/ca.config
@@ -0,0 +1,29 @@
+[ ca ]
+default_ca = CA_CLIENT
+[ CA_CLIENT ]
+dir = keystore\\db
+certs = $dir\\certs
+new_certs_dir = $dir\\newcerts
+
+database = $dir\\index.txt
+serial = $dir\\serial
+default_days = 365
+
+default_crl_days = 7
+default_md = md5
+
+policy = policy_anything
+
+[ policy_anything ]
+countryName = optional
+stateOrProvinceName = optional
+localityName = optional
+organizationName = optional
+organizationalUnitName = optional
+commonName = optional
+emailAddress = optional
+
+[ jdk7_ca ]
+subjectKeyIdentifier = hash
+authorityKeyIdentifier = keyid:always,issuer:always
+basicConstraints = CA:true \ No newline at end of file
diff --git a/ambari-server/conf/windows/install-helper.cmd b/ambari-server/conf/windows/install-helper.cmd
new file mode 100644
index 0000000000..3d4d688965
--- /dev/null
+++ b/ambari-server/conf/windows/install-helper.cmd
@@ -0,0 +1,61 @@
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements. See the NOTICE file distributed with
+rem this work for additional information rega4rding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License. You may obtain a copy of the License at
+rem
+rem http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+rem ##################################################################
+rem # SERVER INSTALL HELPER #
+rem ##################################################################
+
+set COMMON_DIR="/usr/lib/python2.6/site-packages/common_functions"
+set INSTALL_HELPER_AGENT="/var/lib/ambari-agent/install-helper.sh"
+set COMMON_DIR_SERVER="/usr/lib/ambari-server/lib/common_functions"
+
+set PYTHON_WRAPER_TARGET="/usr/bin/ambari-python-wrap"
+set PYTHON_WRAPER_SOURCE="/var/lib/ambari-server/ambari-python-wrap"
+
+do_install(){
+ # setting common_functions shared resource
+ if [ ! -d "$COMMON_DIR" ]; then
+ ln -s "$COMMON_DIR_SERVER" "$COMMON_DIR"
+ fi
+ # setting python-wrapper script
+ if [ ! -f "$PYTHON_WRAPER_TARGET" ]; then
+ ln -s "$PYTHON_WRAPER_SOURCE" "$PYTHON_WRAPER_TARGET"
+ fi
+}
+
+do_remove(){
+ if [ -d "$COMMON_DIR" ]; then # common dir exists
+ rm -f "$COMMON_DIR"
+ fi
+
+ if [ -f "$PYTHON_WRAPER_TARGET" ]; then
+ rm -f "$PYTHON_WRAPER_TARGET"
+ fi
+
+ # if server package exists, restore their settings
+ if [ -f "$INSTALL_HELPER_AGENT" ]; then # call agent shared files installer
+ $INSTALL_HELPER_AGENT install
+ fi
+}
+
+
+case "$1" in
+install)
+ do_install
+ ;;
+remove)
+ do_remove
+ ;;
+esac
diff --git a/ambari-server/conf/windows/krb5JAASLogin.conf b/ambari-server/conf/windows/krb5JAASLogin.conf
new file mode 100644
index 0000000000..2db99595a4
--- /dev/null
+++ b/ambari-server/conf/windows/krb5JAASLogin.conf
@@ -0,0 +1,12 @@
+com.sun.security.jgss.initiate {
+ com.sun.security.auth.module.Krb5LoginModule required
+ renewTGT=true
+ doNotPrompt=true
+ useKeyTab=true
+ keyTab="etc\\security\\keytabs\\ambari.keytab"
+ principal="ambari@EXAMPLE.COM"
+ isInitiator=true
+ storeKey=true
+ useTicketCache=true
+ client=true;
+};
diff --git a/ambari-server/conf/windows/log4j.properties b/ambari-server/conf/windows/log4j.properties
new file mode 100644
index 0000000000..e0c602d6b9
--- /dev/null
+++ b/ambari-server/conf/windows/log4j.properties
@@ -0,0 +1,68 @@
+# Copyright 2011 The Apache Software Foundation
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Define some default values that can be overridden by system properties
+ambari.root.logger=INFO,DRFA
+ambari.log.dir=\\var\\log\\ambari-server-1.3.0-SNAPSHOT\\
+ambari.log.file=ambari-server.log
+ambari.config-changes.file=ambari-config-changes.log
+
+
+# Define the root logger to the system property "ambari.root.logger".
+log4j.rootLogger=${ambari.root.logger}
+
+# Logging Threshold
+log4j.threshhold=ALL
+
+#
+# Daily Rolling File Appender
+#
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${ambari.log.dir}\${ambari.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+# Log config changes
+log4j.logger.configchange=INFO,configchange
+log4j.additivity.configchange=false
+log4j.appender.configchange=org.apache.log4j.FileAppender
+log4j.appender.configchange.File=${ambari.log.dir}\${ambari.config-changes.file}
+log4j.appender.configchange.layout=org.apache.log4j.PatternLayout
+log4j.appender.configchange.layout.ConversionPattern=%d{ISO8601} %5p - %m%n
diff --git a/ambari-server/docs/api/v1/clusters-cluster.md b/ambari-server/docs/api/v1/clusters-cluster.md
index 8da5b29b9e..0c84c43f66 100644
--- a/ambari-server/docs/api/v1/clusters-cluster.md
+++ b/ambari-server/docs/api/v1/clusters-cluster.md
@@ -66,191 +66,191 @@ Get information for the cluster "cluster001".
200 OK
{
- "href" : "http://your.ambari.server/api/v1/clusters/cluster001",
- "Clusters" : {
- "cluster_id" : 9,
- "cluster_name" : "cluster001",
- "health_report" : {
- "Host/stale_config" : 1,
- "Host/maintenance_state" : 0,
- "Host/host_state/HEALTHY" : 3,
- "Host/host_state/UNHEALTHY" : 0,
- "Host/host_state/HEARTBEAT_LOST" : 0,
- "Host/host_state/INIT" : 0,
- "Host/host_status/HEALTHY" : 3,
- "Host/host_status/UNHEALTHY" : 0,
- "Host/host_status/UNKNOWN" : 0,
- "Host/host_status/ALERT" : 0
- },
- "provisioning_state" : "INIT",
- "total_hosts" : 3,
- "version" : "HDP-2.0",
- "desired_configs" : {
- "capacity-scheduler" : {
- "user" : "admin",
- "tag" : "version1408514705943"
- },
- "core-site" : {
- "user" : "admin",
- "tag" : "version1409806913314"
- },
- "global" : {
- "user" : "admin",
- "tag" : "version1409806913314"
- },
- "hdfs-log4j" : {
- "user" : "admin",
- "tag" : "version1"
- },
- "hdfs-site" : {
- "user" : "admin",
- "tag" : "version1407908591996"
- },
- "mapred-site" : {
- "user" : "admin",
- "tag" : "version1408514705943"
- },
- "mapreduce2-log4j" : {
- "user" : "admin",
- "tag" : "version1408514705943"
- },
- "yarn-log4j" : {
- "user" : "admin",
- "tag" : "version1408514705943"
- },
- "yarn-site" : {
- "user" : "admin",
- "tag" : "version1408514705943"
- },
- "zoo.cfg" : {
- "user" : "admin",
- "tag" : "version1"
- },
- "zookeeper-log4j" : {
- "user" : "admin",
- "tag" : "version1"
- }
- }
- },
- "alerts" : {
- "summary" : {
- "CRITICAL" : 1,
- "OK" : 2,
- "PASSIVE" : 0,
- "WARNING" : 0
- }
- },
- "requests" : [
- {
- "href" : "http://your.ambari.server/api/v1/clusters/cluster001/requests/304",
- "Requests" : {
- "cluster_name" : "cluster001",
- "id" : 304
- }
- },
- {
- "href" : "http://your.ambari.server/api/v1/clusters/cluster001/requests/305",
- "Requests" : {
- "cluster_name" : "cluster001",
- "id" : 305
- }
- }
- ],
- "services" : [
- {
- "href" : "http://your.ambari.server/api/v1/clusters/cluster001/services/GANGLIA",
- "ServiceInfo" : {
- "cluster_name" : "cluster001",
- "service_name" : "GANGLIA"
- }
- },
- {
- "href" : "http://your.ambari.server/api/v1/clusters/cluster001/services/HDFS",
- "ServiceInfo" : {
- "cluster_name" : "cluster001",
- "service_name" : "HDFS"
- }
- },
- {
- "href" : "http://your.ambari.server/api/v1/clusters/cluster001/services/MAPREDUCE2",
- "ServiceInfo" : {
- "cluster_name" : "cluster001",
- "service_name" : "MAPREDUCE2"
- }
- },
- {
- "href" : "http://your.ambari.server/api/v1/clusters/cluster001/services/ZOOKEEPER",
- "ServiceInfo" : {
- "cluster_name" : "cluster001",
- "service_name" : "ZOOKEEPER"
- }
- }
+ "href" : "http://your.ambari.server/api/v1/clusters/cluster001",
+ "Clusters" : {
+ "cluster_id" : 9,
+ "cluster_name" : "cluster001",
+ "health_report" : {
+ "Host/stale_config" : 1,
+ "Host/maintenance_state" : 0,
+ "Host/host_state/HEALTHY" : 3,
+ "Host/host_state/UNHEALTHY" : 0,
+ "Host/host_state/HEARTBEAT_LOST" : 0,
+ "Host/host_state/INIT" : 0,
+ "Host/host_status/HEALTHY" : 3,
+ "Host/host_status/UNHEALTHY" : 0,
+ "Host/host_status/UNKNOWN" : 0,
+ "Host/host_status/ALERT" : 0
+ },
+ "provisioning_state" : "INIT",
+ "total_hosts" : 3,
+ "version" : "HDP-2.0",
+ "desired_configs" : {
+ "capacity-scheduler" : {
+ "user" : "admin",
+ "tag" : "version1408514705943"
+ },
+ "core-site" : {
+ "user" : "admin",
+ "tag" : "version1409806913314"
+ },
+ "global" : {
+ "user" : "admin",
+ "tag" : "version1409806913314"
+ },
+ "hdfs-log4j" : {
+ "user" : "admin",
+ "tag" : "version1"
+ },
+ "hdfs-site" : {
+ "user" : "admin",
+ "tag" : "version1407908591996"
+ },
+ "mapred-site" : {
+ "user" : "admin",
+ "tag" : "version1408514705943"
+ },
+ "mapreduce2-log4j" : {
+ "user" : "admin",
+ "tag" : "version1408514705943"
+ },
+ "yarn-log4j" : {
+ "user" : "admin",
+ "tag" : "version1408514705943"
+ },
+ "yarn-site" : {
+ "user" : "admin",
+ "tag" : "version1408514705943"
+ },
+ "zoo.cfg" : {
+ "user" : "admin",
+ "tag" : "version1"
+ },
+ "zookeeper-log4j" : {
+ "user" : "admin",
+ "tag" : "version1"
+ }
+ }
+ },
+ "alerts" : {
+ "summary" : {
+ "CRITICAL" : 1,
+ "OK" : 2,
+ "PASSIVE" : 0,
+ "WARNING" : 0
+ }
+ },
+ "requests" : [
+ {
+ "href" : "http://your.ambari.server/api/v1/clusters/cluster001/requests/304",
+ "Requests" : {
+ "cluster_name" : "cluster001",
+ "id" : 304
+ }
+ },
+ {
+ "href" : "http://your.ambari.server/api/v1/clusters/cluster001/requests/305",
+ "Requests" : {
+ "cluster_name" : "cluster001",
+ "id" : 305
+ }
+ }
+ ],
+ "services" : [
+ {
+ "href" : "http://your.ambari.server/api/v1/clusters/cluster001/services/GANGLIA",
+ "ServiceInfo" : {
+ "cluster_name" : "cluster001",
+ "service_name" : "GANGLIA"
+ }
+ },
+ {
+ "href" : "http://your.ambari.server/api/v1/clusters/cluster001/services/HDFS",
+ "ServiceInfo" : {
+ "cluster_name" : "cluster001",
+ "service_name" : "HDFS"
+ }
+ },
+ {
+ "href" : "http://your.ambari.server/api/v1/clusters/cluster001/services/MAPREDUCE2",
+ "ServiceInfo" : {
+ "cluster_name" : "cluster001",
+ "service_name" : "MAPREDUCE2"
+ }
+ },
+ {
+ "href" : "http://your.ambari.server/api/v1/clusters/cluster001/services/ZOOKEEPER",
+ "ServiceInfo" : {
+ "cluster_name" : "cluster001",
+ "service_name" : "ZOOKEEPER"
+ }
+ }
],
- "config_groups" : [
- {
- "href" : "http://your.ambari.server/api/v1/clusters/cluster001/config_groups/2",
- "ConfigGroup" : {
- "cluster_name" : "cluster001",
- "id" : 2
- }
- }
- ],
- "workflows" : [ ],
- "hosts" : [
- {
- "href" : "http://your.ambari.server/api/v1/clusters/cluster001/hosts/host1.domain.com",
- "Hosts" : {
- "cluster_name" : "cluster001",
- "host_name" : "host1.domain.com"
- }
- },
- {
- "href" : "http://your.ambari.server/api/v1/clusters/cluster001/hosts/host2.domain.com",
- "Hosts" : {
- "cluster_name" : "cluster001",
- "host_name" : "host2.domain.com"
- }
- },
- {
- "href" : "http://your.ambari.server/api/v1/clusters/cluster001/hosts/host3.domain.com",
- "Hosts" : {
- "cluster_name" : "cluster001",
- "host_name" : "host3.domain.com"
- }
- }
- ],
- "configurations" : [
- {
- "href" : "http://your.ambari.server/api/v1/clusters/cluster001/configurations?type=core-site&tag=version1",
- "tag" : "version1",
- "type" : "core-site",
- "Config" : {
- "cluster_name" : "cluster001"
- }
- },
- {
- "href" : "http://your.ambari.server/api/v1/clusters/cluster001/configurations?type=global&tag=version1",
- "tag" : "version1",
- "type" : "global",
- "Config" : {
- "cluster_name" : "cluster001"
- }
- },
- {
- "href" : "http://your.ambari.server/api/v1/clusters/cluster001/configurations?type=hdfs-site&tag=version1",
- "tag" : "version1",
- "type" : "hdfs-site",
- "Config" : {
- "cluster_name" : "cluster001"
- }
- },
- {
- "href" : "http://your.ambari.server/api/v1/clusters/cluster001/configurations?type=zoo.cfg&tag=version1",
- "tag" : "version1",
- "type" : "zoo.cfg",
- "Config" : {
- "cluster_name" : "cluster001"
- }
- },
- ]
+ "config_groups" : [
+ {
+ "href" : "http://your.ambari.server/api/v1/clusters/cluster001/config_groups/2",
+ "ConfigGroup" : {
+ "cluster_name" : "cluster001",
+ "id" : 2
+ }
+ }
+ ],
+ "workflows" : [ ],
+ "hosts" : [
+ {
+ "href" : "http://your.ambari.server/api/v1/clusters/cluster001/hosts/host1.domain.com",
+ "Hosts" : {
+ "cluster_name" : "cluster001",
+ "host_name" : "host1.domain.com"
+ }
+ },
+ {
+ "href" : "http://your.ambari.server/api/v1/clusters/cluster001/hosts/host2.domain.com",
+ "Hosts" : {
+ "cluster_name" : "cluster001",
+ "host_name" : "host2.domain.com"
+ }
+ },
+ {
+ "href" : "http://your.ambari.server/api/v1/clusters/cluster001/hosts/host3.domain.com",
+ "Hosts" : {
+ "cluster_name" : "cluster001",
+ "host_name" : "host3.domain.com"
+ }
+ }
+ ],
+ "configurations" : [
+ {
+ "href" : "http://your.ambari.server/api/v1/clusters/cluster001/configurations?type=core-site&tag=version1",
+ "tag" : "version1",
+ "type" : "core-site",
+ "Config" : {
+ "cluster_name" : "cluster001"
+ }
+ },
+ {
+ "href" : "http://your.ambari.server/api/v1/clusters/cluster001/configurations?type=global&tag=version1",
+ "tag" : "version1",
+ "type" : "global",
+ "Config" : {
+ "cluster_name" : "cluster001"
+ }
+ },
+ {
+ "href" : "http://your.ambari.server/api/v1/clusters/cluster001/configurations?type=hdfs-site&tag=version1",
+ "tag" : "version1",
+ "type" : "hdfs-site",
+ "Config" : {
+ "cluster_name" : "cluster001"
+ }
+ },
+ {
+ "href" : "http://your.ambari.server/api/v1/clusters/cluster001/configurations?type=zoo.cfg&tag=version1",
+ "tag" : "version1",
+ "type" : "zoo.cfg",
+ "Config" : {
+ "cluster_name" : "cluster001"
+ }
+ },
+ ]
}
diff --git a/ambari-server/pom.xml b/ambari-server/pom.xml
index 4e7477e9f0..196e9e186f 100644
--- a/ambari-server/pom.xml
+++ b/ambari-server/pom.xml
@@ -18,7 +18,7 @@
<modelVersion>4.0.0</modelVersion>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-server</artifactId>
- <packaging>jar</packaging>
+ <packaging>${packagingFormat}</packaging>
<name>Ambari Server</name>
<version>1.3.0-SNAPSHOT</version>
<description>Ambari Server</description>
@@ -106,10 +106,35 @@
<version>3.0</version>
</plugin>
<plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-antrun-plugin</artifactId>
+ <version>1.7</version>
+ <executions>
+ <execution>
+ <phase>package</phase>
+ <configuration>
+ <tasks>
+ <jar destfile="target/DBConnectionVerification.jar">
+ <fileset dir="${basedir}/target/classes/"
+ includes="**/DBConnectionVerification.class" />
+ <manifest>
+ <attribute name="Main-Class"
+ value="org.apache.ambari.server.DBConnectionVerification" />
+ </manifest>
+ </jar>
+ </tasks>
+ </configuration>
+ <goals>
+ <goal>run</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
<artifactId>maven-assembly-plugin</artifactId>
<configuration>
<descriptors>
- <descriptor>src/main/assemblies/server.xml</descriptor>
+ <descriptor>${assemblydescriptor}</descriptor>
</descriptors>
<tarLongFileMode>gnu</tarLongFileMode>
</configuration>
@@ -139,15 +164,23 @@
<exclude>src/main/resources/db/serial</exclude>
<exclude>src/main/resources/db/index.txt</exclude>
<exclude>src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/templates/exclude_hosts_list.j2</exclude>
+ <exclude>src/main/windows/ambari-server.cmd</exclude>
+ <exclude>src/main/windows/ambari-server.ps1</exclude>
<exclude>src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/balancer-emulator/balancer-err.log</exclude>
<exclude>src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/balancer-emulator/balancer.log</exclude>
<exclude>src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/balancer-emulator/balancer.log</exclude>
<exclude>src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/balancer-emulator/balancer-err.log</exclude>
<exclude>conf/unix/ca.config</exclude>
<exclude>conf/unix/krb5JAASLogin.conf</exclude>
+ <exclude>conf/windows/ca.config</exclude>
+ <exclude>conf/windows/krb5JAASLogin.conf</exclude>
+ <exclude>**/*.iml</exclude>
<exclude>**/*.json</exclude>
<exclude>**/*.sql</exclude>
+ <exclude>**/*.wxs</exclude>
<exclude>**/repo_suse_rhel.j2</exclude>
+ <exclude>**/repo_debian.j2</exclude>
+ <exclude>**/cluster.properties.j2</exclude>
<exclude>**/repo_ubuntu.j2</exclude>
<exclude>**/.pydev*</exclude>
@@ -156,6 +189,7 @@
<!-- Stack definitions -->
<exclude>src/main/resources/stacks/HDP/2.0._/services/HBASE/package/templates/regionservers.j2</exclude>
+ <exclude>src/main/resources/stacks/HDPWIN/2.1/services/*/configuration*/*</exclude>
<!--test samples -->
<exclude>src/test/resources/TestAmbaryServer.samples/**</exclude>
@@ -199,7 +233,6 @@
</dependency>
</dependencies>
</plugin>
-
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>rpm-maven-plugin</artifactId>
@@ -424,6 +457,15 @@
<location>src/main/resources/Ambari-DDL-MySQL-DROP.sql</location>
</source>
<source>
+ <location>target/classes/Ambari-DDL-SQLServer-CREATE.sql</location>
+ </source>
+ <source>
+ <location>target/classes/Ambari-DDL-SQLServer-CREATELOCAL.sql</location>
+ </source>
+ <source>
+ <location>src/main/resources/Ambari-DDL-SQLServer-DROP.sql</location>
+ </source>
+ <source>
<location>${project.build.directory}/DBConnectionVerification.jar</location>
</source>
<source>
@@ -1025,20 +1067,27 @@
</executions>
</plugin>
<plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-surefire-plugin</artifactId>
+ <configuration>
+ <skip>${skipSurefireTests}</skip>
+ </configuration>
+ </plugin>
+ <plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>exec-maven-plugin</artifactId>
<version>1.2</version>
<executions>
<execution>
<configuration>
- <executable>${project.basedir}/../ambari-common/src/main/unix/ambari-python-wrap</executable>
+ <executable>${executable.python}</executable>
<workingDirectory>src/test/python</workingDirectory>
<arguments>
<argument>unitTests.py</argument>
<argument>${custom.tests}</argument>
</arguments>
<environmentVariables>
- <PYTHONPATH>${project.basedir}/../ambari-common/src/main/python:${project.basedir}/../ambari-agent/src/main/python:${project.basedir}/../ambari-common/src/main/python/ambari_jinja2:${project.basedir}/../ambari-common/src/main/python/ambari_commons:${project.basedir}/../ambari-common/src/test/python:${project.basedir}/src/main/python:${project.basedir}/src/main/python/ambari-server-state:${project.basedir}/src/test/python:$PYTHONPATH</PYTHONPATH>
+ <PYTHONPATH>${path.python.1}${pathsep}$PYTHONPATH</PYTHONPATH>
</environmentVariables>
<skip>${skipTests}</skip>
</configuration>
@@ -1185,6 +1234,139 @@
</build>
</profile>
<profile>
+ <id>windows</id>
+ <activation>
+ <os>
+ <family>win</family>
+ </os>
+ </activation>
+ <properties>
+ <envClassifier>win</envClassifier>
+ <dirsep>\</dirsep>
+ <pathsep>;</pathsep>
+ <executable.python>python</executable.python>
+ <executable.shell>cmd</executable.shell>
+ <fileextension.shell>cmd</fileextension.shell>
+ <fileextension.dot.shell-default>.cmd</fileextension.dot.shell-default>
+ <path.python.1>${project.basedir}\..\ambari-common\src\main\python;${project.basedir}\..\ambari-agent\src\main\python;${project.basedir}\..\ambari-common\src\main\python\ambari_jinja2;${project.basedir}\..\ambari-common\src\main\python\ambari_commons;${project.basedir}\..\ambari-common\src\test\python;${project.basedir}\src\main\python;${project.basedir}\src\main\python\ambari-server-state;${project.basedir}\src\main\resources\custom_actions;${project.basedir}\src\main\resources\scripts;${project.basedir}\src\test\python</path.python.1>
+ <assemblydescriptor>src/main/assemblies/server-windows.xml</assemblydescriptor>
+ <packagingFormat>jar</packagingFormat>
+ </properties>
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>exec-maven-plugin</artifactId>
+ <version>1.2</version>
+ <executions>
+ <execution>
+ <id>run-heat</id>
+ <phase>package</phase>
+ <goals>
+ <goal>exec</goal>
+ </goals>
+ <configuration>
+ <executable>heat.exe</executable>
+ <arguments>
+ <argument>dir</argument>
+ <argument>"."</argument>
+ <argument>-dr</argument>
+ <argument>"AMBARI_SERVER_MSI"</argument>
+ <argument>-platform</argument>
+ <argument>Win64</argument>
+ <argument>-cg</argument>
+ <argument>"AmbariServerGroup"</argument>
+ <argument>-gg</argument>
+ <argument>-ke</argument>
+ <argument>-srd</argument>
+ <argument>-o</argument>
+ <argument>".\..\..\ambari-server-files.wxs"</argument>
+ </arguments>
+ <workingDirectory>target/ambari-server-${project.version}-dist/ambari-server-${project.version}</workingDirectory>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.npanday.plugins</groupId>
+ <artifactId>wix-maven-plugin</artifactId>
+ <version>1.4.0-incubating</version>
+ <extensions>true</extensions>
+ <configuration>
+ <sourceFiles>
+ <sourceFile>target/ambari-server.wxs</sourceFile>
+ <sourceFile>target/ambari-server-files.wxs</sourceFile>
+ </sourceFiles>
+ <outputDirectory>target</outputDirectory>
+ <objectFiles>
+ <objectFile>target/ambari-server.wixobj</objectFile>
+ <objectFile>target/ambari-server-files.wixobj</objectFile>
+ </objectFiles>
+ <outputFile>target/ambari-server-${ambariVersion}.msi</outputFile>
+ <extensions>
+ <extension>WixUIExtension</extension>
+ </extensions>
+ </configuration>
+ <executions>
+ <execution>
+ <id>wix-candle</id>
+ <phase>package</phase>
+ <goals>
+ <goal>candle</goal>
+ </goals>
+ <configuration>
+ <arguments>-arch x64</arguments>
+ </configuration>
+ </execution>
+ <execution>
+ <id>wix-light</id>
+ <phase>package</phase>
+ <goals>
+ <goal>light</goal>
+ </goals>
+ <configuration>
+ <arguments>-b ${basedir}/target/ambari-server-${project.version}-dist/ambari-server-${project.version}</arguments>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+ <dependencies>
+ <dependency>
+ <groupId>org.apache.npanday.plugins</groupId>
+ <artifactId>wix-maven-plugin</artifactId>
+ <version>1.4.0-incubating</version>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>${pom.groupId}</groupId>
+ <artifactId>metrics-sink</artifactId>
+ <version>1.0.0</version>
+ </dependency>
+ </dependencies>
+ </profile>
+ <profile>
+ <id>linux</id>
+ <activation>
+ <os>
+ <family>unix</family>
+ </os>
+ </activation>
+ <properties>
+ <envClassifier>linux</envClassifier>
+ <dirsep>/</dirsep>
+ <pathsep>:</pathsep>
+ <executable.python>${project.basedir}/../ambari-common/src/main/unix/ambari-python-wrap</executable.python>
+ <executable.shell>sh</executable.shell>
+ <fileextension.shell>sh</fileextension.shell>
+ <fileextension.dot.shell-default></fileextension.dot.shell-default>
+ <path.python.1>${project.basedir}/../ambari-common/src/main/python:${project.basedir}/../ambari-agent/src/main/python:${project.basedir}/../ambari-common/src/main/python/ambari_jinja2:${project.basedir}/../ambari-common/src/main/python/ambari_commons:${project.basedir}/../ambari-common/src/test/python:${project.basedir}/src/main/python:${project.basedir}/src/main/python/ambari-server-state:${project.basedir}/src/main/resources/custom_actions:${project.basedir}/src/main/resources/scripts:${project.basedir}/src/test/python</path.python.1>
+ <assemblydescriptor>src/main/assemblies/server.xml</assemblydescriptor>
+ <packagingFormat>jar</packagingFormat>
+ </properties>
+ </profile>
+ <profile>
<id>suse11</id>
<properties>
<python.xml.package>python-xml</python.xml.package>
@@ -1493,7 +1675,7 @@
<version>1.5.2</version>
</dependency>
</dependencies>
-
+
<pluginRepositories>
<pluginRepository>
<id>oss.sonatype.org</id>
diff --git a/ambari-server/src/main/assemblies/server-windows.xml b/ambari-server/src/main/assemblies/server-windows.xml
new file mode 100644
index 0000000000..e48d3d8d50
--- /dev/null
+++ b/ambari-server/src/main/assemblies/server-windows.xml
@@ -0,0 +1,183 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements. See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership. The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<assembly>
+ <id>dist</id>
+ <formats>
+ <format>dir</format>
+ </formats>
+ <includeBaseDirectory>false</includeBaseDirectory>
+ <files>
+ <file>
+ <source>${project.build.directory}/${artifact.artifactId}-${artifact.version}.jar</source>
+ <outputDirectory>ambari-server-${project.version}/lib</outputDirectory>
+ </file>
+ <file>
+ <source>${project.build.directory}/DBConnectionVerification.jar</source>
+ <outputDirectory>ambari-server-${project.version}/resources</outputDirectory>
+ </file>
+ <file>
+ <source>${basedir}/conf/windows/ambari.properties</source>
+ <outputDirectory>/ambari-server-${project.version}/conf</outputDirectory>
+ </file>
+ <file>
+ <source>${basedir}/conf/windows/log4j.properties</source>
+ <outputDirectory>/ambari-server-${project.version}/conf</outputDirectory>
+ </file>
+ <file>
+ <source>${basedir}/conf/windows/ca.config</source>
+ <outputDirectory>/ambari-server-${project.version}/keystore</outputDirectory>
+ </file>
+ <file>
+ <source>${basedir}/src/main/python/ambari-server-windows.py</source>
+ <outputDirectory>/ambari-server-${project.version}/sbin</outputDirectory>
+ </file>
+ <file>
+ <source>${basedir}/src/main/python/bootstrap.py</source>
+ <outputDirectory>/ambari-server-${project.version}/bootstrap</outputDirectory>
+ </file>
+ <file>
+ <source>${basedir}/src/main/python/setupAgent.py</source>
+ <outputDirectory>/ambari-server-${project.version}/bootstrap</outputDirectory>
+ </file>
+ <file>
+ <source>${basedir}/src/main/windows/ambari-server.cmd</source>
+ <outputDirectory>/ambari-server-${project.version}</outputDirectory>
+ </file>
+ <file>
+ <source>${basedir}/src/main/windows/ambari-server.ps1</source>
+ <outputDirectory>/ambari-server-${project.version}</outputDirectory>
+ </file>
+ <file>
+ <source>${project.build.directory}/version</source>
+ <outputDirectory>ambari-server-${project.version}/</outputDirectory>
+ </file>
+ <file>
+ <source>${basedir}/../contrib/ambari-scom/metrics-sink/db/Hadoop-Metrics-SQLServer-CREATE.sql</source>
+ <outputDirectory>/ambari-server-${project.version}/resources</outputDirectory>
+ </file>
+ <file>
+ <source>${basedir}/../contrib/ambari-scom/metrics-sink/db/Hadoop-Metrics-SQLServer-CREATELOCAL.sql</source>
+ <outputDirectory>/ambari-server-${project.version}/resources</outputDirectory>
+ </file>
+ <file>
+ <source>${basedir}/../contrib/ambari-scom/metrics-sink/db/Hadoop-Metrics-SQLServer-DROP.sql</source>
+ <outputDirectory>/ambari-server-${project.version}/resources</outputDirectory>
+ </file>
+ <file>
+ <source>${basedir}/../contrib/ambari-scom/metrics-sink/target/metrics-sink-1.0.0.jar</source>
+ <outputDirectory>/ambari-server-${project.version}/resources</outputDirectory>
+ </file>
+ <file>
+ <source>${basedir}/src/main/package/msi/ambari-server.wxs</source>
+ <outputDirectory>../../</outputDirectory>
+ <filtered>true</filtered>
+ </file>
+ </files>
+ <fileSets>
+ <!-- Distro files, readme, licenses, etc -->
+ <fileSet>
+ <directory>${basedir}/../ambari-admin/target</directory>
+ <outputDirectory>/ambari-server-${project.version}/resources/views</outputDirectory>
+ <includes>
+ <include>*.jar</include>
+ </includes>
+ </fileSet>
+ <fileSet>
+ <directory>${basedir}/../</directory>
+ <outputDirectory>ambari-server-${project.version}/</outputDirectory>
+ <includes>
+ <include>*.txt</include>
+ </includes>
+ </fileSet>
+ <fileSet>
+ <directory>${basedir}/src/main/python/ambari_server</directory>
+ <outputDirectory>ambari-server-${project.version}/sbin/ambari_server</outputDirectory>
+ <includes>
+ <include>*.py</include>
+ <include>*.pyc</include>
+ </includes>
+ </fileSet>
+ <fileSet>
+ <directory>${basedir}/../ambari-common/src/main/python/ambari_commons</directory>
+ <outputDirectory>ambari-server-${project.version}/sbin/ambari_commons</outputDirectory>
+ </fileSet>
+ <fileSet>
+ <directory>${basedir}/../ambari-common/src/main/python/ambari_jinja2/ambari_jinja2</directory>
+ <outputDirectory>ambari-server-${project.version}/sbin/ambari_jinja2</outputDirectory>
+ </fileSet>
+ <fileSet>
+ <directory>${basedir}/../ambari-common/src/main/python/resource_management</directory>
+ <outputDirectory>ambari-server-${project.version}/sbin/resource_management</outputDirectory>
+ </fileSet>
+ <!--
+ <fileSet>
+ <directory>${project.build.directory}/web/</directory>
+ <outputDirectory>ambari-server-${project.version}/web/</outputDirectory>
+ <includes>
+ <include>*</include>
+ </includes>
+ </fileSet>
+ -->
+ <!--
+ <fileSet>
+ <directory>${basedir}/src/main/bin</directory>
+ <outputDirectory>ambari-server-${project.version}/bin</outputDirectory>
+ <includes>
+ <include>*</include>
+ </includes>
+ <fileMode>0755</fileMode>
+ </fileSet>
+ -->
+ <fileSet>
+ <directory>${basedir}/src/main/resources/</directory>
+ <outputDirectory>/ambari-server-${project.version}/keystore</outputDirectory>
+ <includes>
+ <include>db/*</include>
+ <include>pass.txt</include>
+ </includes>
+ </fileSet>
+ <fileSet>
+ <directory>${basedir}/../ambari-web/public</directory>
+ <outputDirectory>ambari-server-${project.version}/web</outputDirectory>
+ <includes>
+ <include>**</include>
+ </includes>
+ </fileSet>
+ <fileSet>
+ <directory>${basedir}/src/main/resources</directory>
+ <outputDirectory>/ambari-server-${project.version}/resources/</outputDirectory>
+ <includes>
+ <include>Ambari-DDL-SQLServer-*.sql</include>
+ <include>custom_action_definitions/**</include>
+ <include>custom_actions/**</include>
+ <include>stacks/stack_advisor.py</include>
+ <include>scripts/**</include>
+ <include>stacks/HDPWIN/**</include>
+ <include>upgrade/**</include>
+ </includes>
+ </fileSet>
+ </fileSets>
+ <dependencySets>
+ <dependencySet>
+ <outputDirectory>ambari-server-${project.version}/lib</outputDirectory>
+ <unpack>false</unpack>
+ <scope>compile</scope>
+ </dependencySet>
+ </dependencySets>
+</assembly>
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/DBConnectionVerification.java b/ambari-server/src/main/java/org/apache/ambari/server/DBConnectionVerification.java
index 7c053f3b91..9ff04fb46b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/DBConnectionVerification.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/DBConnectionVerification.java
@@ -18,6 +18,8 @@
package org.apache.ambari.server;
+import org.apache.commons.lang.StringUtils;
+
import java.sql.*;
public class DBConnectionVerification {
@@ -30,7 +32,11 @@ public class DBConnectionVerification {
Connection conn = null;
try {
Class.forName(driver);
- conn = DriverManager.getConnection(url, username, password);
+ if(url.contains("integratedSecurity=true")) {
+ conn = DriverManager.getConnection(url);
+ } else {
+ conn = DriverManager.getConnection(url, username, password);
+ }
System.out.println("Connected to DB Successfully!");
} catch (Exception e) {
System.out.println("ERROR: Unable to connect to the DB. Please check DB connection properties.");
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/ComponentService.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/ComponentService.java
index 5510697b37..e7e00293f7 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/ComponentService.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/ComponentService.java
@@ -234,7 +234,7 @@ public class ComponentService extends BaseService {
Response.ResponseBuilder rb = Response.status(Response.Status.OK);
Configuration configs = new Configuration();
String tmpDir = configs.getProperty(Configuration.SERVER_TMP_DIR_KEY);
- File file = new File(tmpDir+File.separator+componentName+"-configs.tar.gz");
+ File file = new File(tmpDir + File.separator + componentName + "-configs" + Configuration.DEF_ARCHIVE_EXTENSION);
InputStream resultInputStream = null;
try {
resultInputStream = new FileInputStream(file);
@@ -242,8 +242,8 @@ public class ComponentService extends BaseService {
e.printStackTrace();
}
- String contentType = "application/x-ustar";
- String outputFileName = componentName + "-configs.tar.gz";
+ String contentType = Configuration.DEF_ARCHIVE_CONTENT_TYPE;
+ String outputFileName = componentName + "-configs" + Configuration.DEF_ARCHIVE_EXTENSION;
rb.header("Content-Disposition", "attachment; filename=\"" + outputFileName + "\"");
rb.entity(resultInputStream);
return rb.type(contentType).build();
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorRunner.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorRunner.java
index a50f915a85..191aeed1ba 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorRunner.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorRunner.java
@@ -37,7 +37,7 @@ public class StackAdvisorRunner {
/**
* Runs stack_advisor.py script in the specified {@code actionDirectory}.
- *
+ *
* @param script stack advisor script
* @param saCommandType {@link StackAdvisorCommandType} to run.
* @param actionDirectory directory for the action
@@ -110,7 +110,7 @@ public class StackAdvisorRunner {
* Gets an instance of a {@link ProcessBuilder} that's ready to execute the
* shell command to run the stack advisor script. This will take the
* environment variables from the current process.
- *
+ *
* @param script
* @param saCommandType
* @param actionDirectory
@@ -126,8 +126,13 @@ public class StackAdvisorRunner {
// includes the original command plus the arguments for it
List<String> builderParameters = new ArrayList<String>();
- builderParameters.add("sh");
- builderParameters.add("-c");
+ if (System.getProperty("os.name").contains("Windows")) {
+ builderParameters.add("cmd");
+ builderParameters.add("/c");
+ } else {
+ builderParameters.add("sh");
+ builderParameters.add("-c");
+ }
// for the 3rd argument, build a single parameter since we use -c
// ProcessBuilder doesn't support output redirection until JDK 1.7
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java b/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
index a833c83bdc..0a96193619 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
@@ -73,6 +73,8 @@ public class Configuration {
public static final String RECOMMENDATIONS_DIR_DEFAULT = "/var/run/ambari-server/stack-recommendations";
public static final String STACK_ADVISOR_SCRIPT = "stackadvisor.script";
public static final String STACK_ADVISOR_SCRIPT_DEFAULT = "/var/lib/ambari-server/resources/scripts/stack_advisor.py";
+ public static final String AMBARI_PYTHON_WRAP_KEY = "ambari.python.wrap";
+ public static final String AMBARI_PYTHON_WRAP_DEFAULT = "ambari-python-wrap";
public static final String API_AUTHENTICATE = "api.authenticate";
public static final String API_USE_SSL = "api.ssl";
public static final String API_CSRF_PREVENTION_KEY = "api.csrfPrevention.enabled";
@@ -167,6 +169,11 @@ public class Configuration {
public static final String SERVER_JDBC_RCA_USER_PASSWD_KEY = "server.jdbc.rca.user.passwd";
public static final String SERVER_JDBC_RCA_DRIVER_KEY = "server.jdbc.rca.driver";
public static final String SERVER_JDBC_RCA_URL_KEY = "server.jdbc.rca.url";
+ public static final String SCOM_JDBC_SINK_USER_NAME_KEY = "scom.sink.db.username";
+ public static final String SCOM_JDBC_SINK_USER_PASSWD_KEY = "scom.sink.db.password";
+ public static final String SCOM_JDBC_SINK_DRIVER_KEY = "scom.sink.db.driver";
+ public static final String SCOM_JDBC_SINK_URL_KEY = "scom.sink.db.url";
+ public static final String SCOM_JDBC_SINK_INT_AUTH_KEY = "scom.sink.db.use.integrated.auth";
public static final String SERVER_JDBC_GENERATE_TABLES_KEY = "server.jdbc.generateTables";
public static final String JDBC_UNIT_NAME = "ambari-server";
public static final String JDBC_LOCAL_URL = "jdbc:postgresql://localhost/";
@@ -236,6 +243,9 @@ public class Configuration {
public static final String SERVER_TMP_DIR_DEFAULT = "/var/lib/ambari-server/tmp";
public static final String EXTERNAL_SCRIPT_TIMEOUT_KEY = "server.script.timeout";
public static final String EXTERNAL_SCRIPT_TIMEOUT_DEFAULT = "5000";
+ public static final String DEF_ARCHIVE_EXTENSION;
+ public static final String DEF_ARCHIVE_CONTENT_TYPE;
+
/**
* This key defines whether stages of parallel requests are executed in
* parallel or sequentally. Only stages from different requests
@@ -257,6 +267,8 @@ public class Configuration {
private static final String SERVER_JDBC_USER_PASSWD_DEFAULT = "bigdata";
private static final String SERVER_JDBC_RCA_USER_NAME_DEFAULT = "mapred";
private static final String SERVER_JDBC_RCA_USER_PASSWD_DEFAULT = "mapred";
+ private static final String SCOM_JDBC_SINK_USER_NAME_DEFAULT = "hadoop";
+ private static final String SCOM_JDBC_SINK_USER_PASSWD_DEFAULT = "hadoop";
private static final String SRVR_TWO_WAY_SSL_DEFAULT = "false";
private static final String SRVR_KSTR_DIR_DEFAULT = ".";
private static final String API_CSRF_PREVENTION_DEFAULT = "true";
@@ -327,6 +339,17 @@ public class Configuration {
private volatile boolean credentialProviderInitialized = false;
private Map<String, String> customDbProperties = null;
+ static {
+ if (System.getProperty("os.name").contains("Windows")) {
+ DEF_ARCHIVE_EXTENSION = ".zip";
+ DEF_ARCHIVE_CONTENT_TYPE = "application/zip";
+ }
+ else {
+ DEF_ARCHIVE_EXTENSION = ".tar.gz";
+ DEF_ARCHIVE_CONTENT_TYPE = "application/x-ustar";
+ }
+ }
+
public Configuration() {
this(readConfigFile());
}
@@ -341,6 +364,8 @@ public class Configuration {
this.properties = properties;
configsMap = new HashMap<String, String>();
+ configsMap.put(AMBARI_PYTHON_WRAP_KEY, properties.getProperty(
+ AMBARI_PYTHON_WRAP_KEY, AMBARI_PYTHON_WRAP_DEFAULT));
configsMap.put(SRVR_TWO_WAY_SSL_KEY, properties.getProperty(
SRVR_TWO_WAY_SSL_KEY, SRVR_TWO_WAY_SSL_DEFAULT));
configsMap.put(SRVR_TWO_WAY_SSL_PORT_KEY, properties.getProperty(
@@ -765,6 +790,32 @@ public class Configuration {
return readPasswordFromFile(passwdProp, SERVER_JDBC_RCA_USER_PASSWD_DEFAULT);
}
+ public String getSinkDatabaseDriver() {
+ return properties.getProperty(SCOM_JDBC_SINK_DRIVER_KEY);
+ }
+
+ public String getSinkDatabaseUrl() {
+ return properties.getProperty(SCOM_JDBC_SINK_URL_KEY);
+ }
+
+ public boolean getSinkUseIntegratedAuth() {
+ return "true".equalsIgnoreCase(properties.getProperty(SCOM_JDBC_SINK_INT_AUTH_KEY));
+ }
+
+ public String getSinkDatabaseUser() {
+ return properties.getProperty(SCOM_JDBC_SINK_USER_NAME_KEY, SCOM_JDBC_SINK_USER_NAME_DEFAULT);
+ }
+
+ public String getSinkDatabasePassword() {
+ String passwdProp = properties.getProperty(SCOM_JDBC_SINK_USER_PASSWD_KEY);
+ if (passwdProp != null) {
+ String dbpasswd = readPasswordFromStore(passwdProp);
+ if (dbpasswd != null)
+ return dbpasswd;
+ }
+ return readPasswordFromFile(passwdProp, SCOM_JDBC_SINK_USER_PASSWD_DEFAULT);
+ }
+
private String readPasswordFromFile(String filePath, String defaultPassword) {
if (filePath == null) {
LOG.debug("DB password file not specified - using default");
@@ -1090,7 +1141,7 @@ public class Configuration {
public String getResourceDirPath() {
return properties.getProperty(RESOURCES_DIR_KEY, RESOURCES_DIR_DEFAULT);
}
-
+
public String getSharedResourcesDirPath(){
return properties.getProperty(SHARED_RESOURCES_DIR_KEY, SHARED_RESOURCES_DIR_DEFAULT);
}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractPropertyProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractPropertyProvider.java
index 4a675c5cfb..a660d5a401 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractPropertyProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractPropertyProvider.java
@@ -303,6 +303,18 @@ public abstract class AbstractPropertyProvider extends BaseProvider implements P
}
}
+ protected PropertyInfo updatePropertyInfo(String propertyKey, String id, PropertyInfo propertyInfo) {
+ List<String> regexGroups = getRegexGroups(propertyKey, id);
+ String propertyId = propertyInfo.getPropertyId();
+ if(propertyId != null) {
+ for (String regexGroup : regexGroups) {
+ regexGroup = regexGroup.replace("/", ".");
+ propertyId = propertyId.replaceFirst(FIND_REGEX_IN_METRIC_REGEX, regexGroup);
+ }
+ }
+ return new PropertyInfo(propertyId, propertyInfo.isTemporal(), propertyInfo.isPointInTime());
+ }
+
/**
* Verify that the component metrics contains the property id.
* @param componentName Name of the component
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractProviderModule.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractProviderModule.java
index cae9200857..9660cf5ed1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractProviderModule.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractProviderModule.java
@@ -32,6 +32,7 @@ import java.util.concurrent.ConcurrentHashMap;
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.Role;
import org.apache.ambari.server.configuration.ComponentSSLConfiguration;
+import org.apache.ambari.server.configuration.Configuration;
import org.apache.ambari.server.controller.AmbariManagementController;
import org.apache.ambari.server.controller.AmbariServer;
import org.apache.ambari.server.controller.HostRequest;
@@ -47,6 +48,9 @@ import org.apache.ambari.server.controller.jmx.JMXHostProvider;
import org.apache.ambari.server.controller.jmx.JMXPropertyProvider;
import org.apache.ambari.server.controller.metrics.MetricsHostProvider;
import org.apache.ambari.server.controller.nagios.NagiosPropertyProvider;
+import org.apache.ambari.server.controller.sql.HostInfoProvider;
+import org.apache.ambari.server.controller.sql.SQLPropertyProvider;
+import org.apache.ambari.server.controller.sql.SinkConnectionFactory;
import org.apache.ambari.server.controller.spi.NoSuchParentResourceException;
import org.apache.ambari.server.controller.spi.NoSuchResourceException;
import org.apache.ambari.server.controller.spi.Predicate;
@@ -68,12 +72,18 @@ import org.apache.ambari.server.state.State;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import java.net.InetAddress;
+import java.util.*;
+import java.util.Map.Entry;
+import java.util.concurrent.ConcurrentHashMap;
+import java.lang.System;
+
import com.google.inject.Inject;
/**
* An abstract provider module implementation.
*/
-public abstract class AbstractProviderModule implements ProviderModule, ResourceProviderObserver, JMXHostProvider, GangliaHostProvider, MetricsHostProvider {
+public abstract class AbstractProviderModule implements ProviderModule, ResourceProviderObserver, JMXHostProvider, GangliaHostProvider, HostInfoProvider, MetricsHostProvider {
private static final int PROPERTY_REQUEST_CONNECT_TIMEOUT = 5000;
private static final int PROPERTY_REQUEST_READ_TIMEOUT = 10000;
@@ -89,7 +99,7 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
private static final Map<Service.Type, Map<String, String[]>> serviceDesiredProperties = new EnumMap<Service.Type, Map<String, String[]>>(Service.Type.class);
private static final Map<String, Service.Type> componentServiceMap = new HashMap<String, Service.Type>();
- private static final Map<String, Map<String, String[]>> jmxDesiredProperties = new HashMap<String, Map<String,String[]>>();
+ private static final Map<String, Map<String, String[]>> jmxDesiredProperties = new HashMap<String, Map<String, String[]>>();
private volatile Map<String, String> clusterCoreSiteConfigVersionMap = new HashMap<String, String>();
private volatile Map<String, String> clusterJmxProtocolMap = new HashMap<String, String>();
@@ -110,30 +120,30 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
componentServiceMap.put("HISTORYSERVER", Service.Type.MAPREDUCE2);
Map<String, String[]> initPropMap = new HashMap<String, String[]>();
- initPropMap.put("NAMENODE", new String[] {"dfs.http.address", "dfs.namenode.http-address"});
- initPropMap.put("DATANODE", new String[] {"dfs.datanode.http.address"});
+ initPropMap.put("NAMENODE", new String[]{"dfs.http.address", "dfs.namenode.http-address"});
+ initPropMap.put("DATANODE", new String[]{"dfs.datanode.http.address"});
serviceDesiredProperties.put(Service.Type.HDFS, initPropMap);
initPropMap = new HashMap<String, String[]>();
- initPropMap.put("JOBTRACKER", new String[] {"mapred.job.tracker.http.address"});
- initPropMap.put("TASKTRACKER", new String[] {"mapred.task.tracker.http.address"});
+ initPropMap.put("JOBTRACKER", new String[]{"mapred.job.tracker.http.address"});
+ initPropMap.put("TASKTRACKER", new String[]{"mapred.task.tracker.http.address"});
serviceDesiredProperties.put(Service.Type.MAPREDUCE, initPropMap);
initPropMap = new HashMap<String, String[]>();
- initPropMap.put("HBASE_MASTER", new String[] {"hbase.master.info.port"});
+ initPropMap.put("HBASE_MASTER", new String[]{"hbase.master.info.port"});
serviceDesiredProperties.put(Service.Type.HBASE, initPropMap);
initPropMap = new HashMap<String, String[]>();
- initPropMap.put("RESOURCEMANAGER", new String[] {"yarn.resourcemanager.webapp.address"});
- initPropMap.put("NODEMANAGER", new String[] {"yarn.nodemanager.webapp.address"});
+ initPropMap.put("RESOURCEMANAGER", new String[]{"yarn.resourcemanager.webapp.address"});
+ initPropMap.put("NODEMANAGER", new String[]{"yarn.nodemanager.webapp.address"});
serviceDesiredProperties.put(Service.Type.YARN, initPropMap);
initPropMap = new HashMap<String, String[]>();
- initPropMap.put("HISTORYSERVER", new String[] {"mapreduce.jobhistory.webapp.address"});
+ initPropMap.put("HISTORYSERVER", new String[]{"mapreduce.jobhistory.webapp.address"});
serviceDesiredProperties.put(Service.Type.MAPREDUCE2, initPropMap);
initPropMap = new HashMap<String, String[]>();
- initPropMap.put("NAMENODE", new String[] {"hadoop.ssl.enabled"});
+ initPropMap.put("NAMENODE", new String[]{"hadoop.ssl.enabled"});
jmxDesiredProperties.put("NAMENODE", initPropMap);
}
@@ -145,7 +155,7 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
/**
* The map of lists of property providers.
*/
- private final Map<Resource.Type,List<PropertyProvider>> propertyProviders = new HashMap<Resource.Type, List<PropertyProvider>>();
+ private final Map<Resource.Type, List<PropertyProvider>> propertyProviders = new HashMap<Resource.Type, List<PropertyProvider>>();
@Inject
AmbariManagementController managementController;
@@ -164,7 +174,7 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
* JMX ports read from the configs
*/
private final Map<String, Map<String, String>> jmxPortMap =
- new HashMap<String, Map<String, String>>();
+ new HashMap<String, Map<String, String>>();
private volatile boolean initialized = false;
@@ -266,13 +276,13 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
// performance with a ConcurrentHashMap and maybe get default/existing
// ports for a few calls.
if (!currVersion.equals(oldVersion) ||
- !clusterJmxPorts.containsKey(componentName)) {
+ !clusterJmxPorts.containsKey(componentName)) {
serviceConfigVersions.put(service, currVersion);
Map<String, String> portMap = getDesiredConfigMap(clusterName,
- currVersion, serviceConfigTypes.get(service),
- serviceDesiredProperties.get(service));
+ currVersion, serviceConfigTypes.get(service),
+ serviceDesiredProperties.get(service));
for (Entry<String, String> entry : portMap.entrySet()) {
// portString will be null if the property defined for the component doesn't exist
@@ -292,43 +302,82 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
return clusterJmxPorts.get(componentName);
}
- /**Post process property value. If value has one ore some substrings
+ /**
+ * Post process property value. If value has one ore some substrings
* started with "${" and ended with "}" these substrings will replace
* with properties from current propertiesMap. It is doing recursively.
- * @param key - properties name
- * @param value - properties value
+ *
+ * @param key - properties name
+ * @param value - properties value
* @param properties - map with properties
*/
private String postProcessPropertyValue(String key, String value, Map<String, String> properties, Set<String> prevProps) {
- if (value != null && key != null && value.contains("${")){
- if (prevProps == null) {
- prevProps = new HashSet<String>();
- }
- if (prevProps.contains(key)){
- return value;
- }
- prevProps.add(key);
- String refValueString = value;
- Map<String, String> refMap = new HashMap<String, String>();
- while(refValueString.contains("${")) {
- int startValueRef = refValueString.indexOf("${") + 2;
- int endValueRef = refValueString.indexOf('}');
- String valueRef = refValueString.substring(startValueRef, endValueRef);
- refValueString = refValueString.substring(endValueRef+1);
- String trueValue = postProcessPropertyValue(valueRef, properties.get(valueRef), properties, prevProps);
- if (trueValue != null){
- refMap.put("${"+valueRef+ '}', trueValue);
- }
- }
- for (Entry<String, String> entry : refMap.entrySet()){
- refValueString = entry.getValue();
- value = value.replace(entry.getKey(), refValueString);
- }
- properties.put(key, value);
+ if (value != null && key != null && value.contains("${")) {
+ if (prevProps == null) {
+ prevProps = new HashSet<String>();
+ }
+ if (prevProps.contains(key)) {
+ return value;
+ }
+ prevProps.add(key);
+ String refValueString = value;
+ Map<String, String> refMap = new HashMap<String, String>();
+ while (refValueString.contains("${")) {
+ int startValueRef = refValueString.indexOf("${") + 2;
+ int endValueRef = refValueString.indexOf('}');
+ String valueRef = refValueString.substring(startValueRef, endValueRef);
+ refValueString = refValueString.substring(endValueRef + 1);
+ String trueValue = postProcessPropertyValue(valueRef, properties.get(valueRef), properties, prevProps);
+ if (trueValue != null) {
+ refMap.put("${" + valueRef + '}', trueValue);
+ }
+ }
+ for (Entry<String, String> entry : refMap.entrySet()) {
+ refValueString = entry.getValue();
+ value = value.replace(entry.getKey(), refValueString);
+ }
+ properties.put(key, value);
}
return value;
}
+ // ----- HostInfoProvider -----------------------------------------------
+
+ @Override
+ public String getHostName(String id) throws SystemException {
+ return getClusterNodeName(id);
+ }
+
+ @Override
+ public String getHostAddress(String id) throws SystemException {
+ return getClusterHostAddress(id);
+ }
+
+
+ // get the hostname
+ private String getClusterNodeName(String hostname) throws SystemException {
+ try {
+ if (hostname.equalsIgnoreCase("localhost")) {
+ return InetAddress.getLocalHost().getCanonicalHostName();
+ }
+ return InetAddress.getByName(hostname).getCanonicalHostName();
+ } catch (Exception e) {
+ throw new SystemException("Error getting hostname.", e);
+ }
+ }
+
+ // get the hostname
+ private String getClusterHostAddress(String hostname) throws SystemException {
+ try {
+ if (hostname.equalsIgnoreCase("localhost")) {
+ return InetAddress.getLocalHost().getHostAddress();
+ }
+ return InetAddress.getByName(hostname).getHostAddress();
+ } catch (Exception e) {
+ throw new SystemException("Error getting ip address.", e);
+ }
+ }
+
// ----- GangliaHostProvider -----------------------------------------------
@Override
@@ -376,9 +425,9 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
final String gangliaCollectorHostName = getGangliaCollectorHostName(clusterName);
ServiceComponentHostRequest componentRequest = new ServiceComponentHostRequest(clusterName, "GANGLIA",
- Role.GANGLIA_SERVER.name(),
- gangliaCollectorHostName,
- null);
+ Role.GANGLIA_SERVER.name(),
+ gangliaCollectorHostName,
+ null);
Set<ServiceComponentHostResponse> hostComponents =
managementController.getHostComponents(Collections.singleton(componentRequest));
@@ -403,7 +452,7 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
ResourceProvider resourceProvider = createResourceProvider(type);
if (resourceProvider instanceof ObservableResourceProvider) {
- ((ObservableResourceProvider)resourceProvider).addObserver(this);
+ ((ObservableResourceProvider) resourceProvider).addObserver(this);
}
putResourceProvider(type, resourceProvider);
@@ -468,15 +517,23 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
null,
PropertyHelper.getPropertyId("ServiceComponentInfo", "component_name"),
PropertyHelper.getPropertyId("ServiceComponentInfo", "state"));
-
- PropertyProvider gpp = createGangliaComponentPropertyProvider(
- type,
- streamProvider,
- ComponentSSLConfiguration.instance(),
- this,
- PropertyHelper.getPropertyId("ServiceComponentInfo", "cluster_name"),
- PropertyHelper.getPropertyId("ServiceComponentInfo", "component_name"));
-
+ PropertyProvider gpp = null;
+ if (System.getProperty("os.name").contains("Windows")) {
+ gpp = createSQLComponentPropertyProvider(
+ type,
+ this,
+ PropertyHelper.getPropertyId("ServiceComponentInfo", "cluster_name"),
+ PropertyHelper.getPropertyId("ServiceComponentInfo", "component_name"),
+ PropertyHelper.getPropertyId("ServiceComponentInfo", "service_name"));
+ } else {
+ gpp = createGangliaComponentPropertyProvider(
+ type,
+ streamProvider,
+ ComponentSSLConfiguration.instance(),
+ this,
+ PropertyHelper.getPropertyId("ServiceComponentInfo", "cluster_name"),
+ PropertyHelper.getPropertyId("ServiceComponentInfo", "component_name"));
+ }
providers.add(new StackDefinedPropertyProvider(
type,
this,
@@ -489,8 +546,8 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
PropertyHelper.getPropertyId("ServiceComponentInfo", "state"),
jpp,
gpp));
- }
- break;
+ }
+ break;
case HostComponent: {
// TODO as we fill out stack metric definitions, these can be phased out
PropertyProvider jpp = createJMXPropertyProvider(
@@ -502,16 +559,25 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
PropertyHelper.getPropertyId("HostRoles", "host_name"),
PropertyHelper.getPropertyId("HostRoles", "component_name"),
PropertyHelper.getPropertyId("HostRoles", "state"));
-
- PropertyProvider gpp = createGangliaHostComponentPropertyProvider(
- type,
- streamProvider,
- ComponentSSLConfiguration.instance(),
- this,
- PropertyHelper.getPropertyId("HostRoles", "cluster_name"),
- PropertyHelper.getPropertyId("HostRoles", "host_name"),
- PropertyHelper.getPropertyId("HostRoles", "component_name"));
-
+ PropertyProvider gpp = null;
+ if (System.getProperty("os.name").contains("Windows")) {
+ gpp = createSQLHostComponentPropertyProvider(
+ type,
+ this,
+ PropertyHelper.getPropertyId("HostRoles", "cluster_name"),
+ PropertyHelper.getPropertyId("HostRoles", "host_name"),
+ PropertyHelper.getPropertyId("HostRoles", "component_name"),
+ PropertyHelper.getPropertyId("HostRoles", "service_name"));
+ } else {
+ gpp = createGangliaHostComponentPropertyProvider(
+ type,
+ streamProvider,
+ ComponentSSLConfiguration.instance(),
+ this,
+ PropertyHelper.getPropertyId("HostRoles", "cluster_name"),
+ PropertyHelper.getPropertyId("HostRoles", "host_name"),
+ PropertyHelper.getPropertyId("HostRoles", "component_name"));
+ }
providers.add(new StackDefinedPropertyProvider(
type,
this,
@@ -524,8 +590,8 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
PropertyHelper.getPropertyId("HostRoles", "state"),
jpp,
gpp));
- }
- break;
+ }
+ break;
default:
break;
}
@@ -533,7 +599,7 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
putPropertyProviders(type, providers);
}
- private void checkInit() throws SystemException{
+ private void checkInit() throws SystemException {
if (!initialized) {
synchronized (this) {
if (!initialized) {
@@ -568,7 +634,7 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
jmxPortMap.clear();
Set<Resource> clusters = provider.getResources(request, null);
- clusterHostComponentMap = new HashMap<String, Map<String, String>>();
+ clusterHostComponentMap = new HashMap<String, Map<String, String>>();
clusterGangliaCollectorMap = new HashMap<String, String>();
for (Resource cluster : clusters) {
@@ -582,9 +648,9 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID);
Predicate predicate = new PredicateBuilder().property(HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID).
- equals(clusterName).toPredicate();
+ equals(clusterName).toPredicate();
- Set<Resource> hostComponents = provider.getResources(request, predicate);
+ Set<Resource> hostComponents = provider.getResources(request, predicate);
Map<String, String> hostComponentMap = clusterHostComponentMap.get(clusterName);
if (hostComponentMap == null) {
@@ -594,7 +660,7 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
for (Resource hostComponent : hostComponents) {
String componentName = (String) hostComponent.getPropertyValue(HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID);
- String hostName = (String) hostComponent.getPropertyValue(HOST_COMPONENT_HOST_NAME_PROPERTY_ID);
+ String hostName = (String) hostComponent.getPropertyValue(HOST_COMPONENT_HOST_NAME_PROPERTY_ID);
hostComponentMap.put(componentName, hostName);
@@ -624,7 +690,7 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
private String getPortString(String value) {
return value != null && value.contains(":") ? value.substring
- (value.lastIndexOf(":") + 1, value.length()) : value;
+ (value.lastIndexOf(":") + 1, value.length()) : value;
}
private String getDesiredConfigVersion(String clusterName,
@@ -634,10 +700,10 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
// Get config version tag
ResourceProvider clusterResourceProvider = getResourceProvider(Resource
- .Type.Cluster);
+ .Type.Cluster);
Predicate basePredicate = new PredicateBuilder().property
- (ClusterResourceProvider.CLUSTER_NAME_PROPERTY_ID).equals(clusterName)
- .toPredicate();
+ (ClusterResourceProvider.CLUSTER_NAME_PROPERTY_ID).equals(clusterName)
+ .toPredicate();
Set<Resource> clusterResource = null;
try {
@@ -661,8 +727,8 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
if (clusterResource != null) {
for (Resource resource : clusterResource) {
Map<String, Object> configs =
- resource.getPropertiesMap().get(ClusterResourceProvider
- .CLUSTER_DESIRED_CONFIGS_PROPERTY_ID);
+ resource.getPropertiesMap().get(ClusterResourceProvider
+ .CLUSTER_DESIRED_CONFIGS_PROPERTY_ID);
if (configs != null) {
DesiredConfig config = (DesiredConfig) configs.get(configType);
if (config != null) {
@@ -675,21 +741,21 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
}
private Map<String, String> getDesiredConfigMap(String clusterName,
- String versionTag, String configType, Map<String, String[]> keys) throws
- NoSuchParentResourceException, UnsupportedPropertyException,
- SystemException {
+ String versionTag, String configType, Map<String, String[]> keys) throws
+ NoSuchParentResourceException, UnsupportedPropertyException,
+ SystemException {
// Get desired configs based on the tag
ResourceProvider configResourceProvider = getResourceProvider(Resource.Type.Configuration);
Predicate configPredicate = new PredicateBuilder().property
- (ConfigurationResourceProvider.CONFIGURATION_CLUSTER_NAME_PROPERTY_ID).equals(clusterName).and()
- .property(ConfigurationResourceProvider.CONFIGURATION_CONFIG_TYPE_PROPERTY_ID).equals(configType).and()
- .property(ConfigurationResourceProvider.CONFIGURATION_CONFIG_TAG_PROPERTY_ID).equals(versionTag).toPredicate();
+ (ConfigurationResourceProvider.CONFIGURATION_CLUSTER_NAME_PROPERTY_ID).equals(clusterName).and()
+ .property(ConfigurationResourceProvider.CONFIGURATION_CONFIG_TYPE_PROPERTY_ID).equals(configType).and()
+ .property(ConfigurationResourceProvider.CONFIGURATION_CONFIG_TAG_PROPERTY_ID).equals(versionTag).toPredicate();
Set<Resource> configResources;
try {
configResources = configResourceProvider.getResources
- (PropertyHelper.getReadRequest(ConfigurationResourceProvider.CONFIGURATION_CLUSTER_NAME_PROPERTY_ID,
- ConfigurationResourceProvider.CONFIGURATION_CONFIG_TYPE_PROPERTY_ID,
- ConfigurationResourceProvider.CONFIGURATION_CONFIG_TAG_PROPERTY_ID), configPredicate);
+ (PropertyHelper.getReadRequest(ConfigurationResourceProvider.CONFIGURATION_CLUSTER_NAME_PROPERTY_ID,
+ ConfigurationResourceProvider.CONFIGURATION_CONFIG_TYPE_PROPERTY_ID,
+ ConfigurationResourceProvider.CONFIGURATION_CONFIG_TAG_PROPERTY_ID), configPredicate);
} catch (NoSuchResourceException e) {
LOG.info("Resource for the desired config not found. " + e);
return Collections.emptyMap();
@@ -698,8 +764,8 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
Map<String, String> mConfigs = new HashMap<String, String>();
if (configResources != null) {
for (Resource res : configResources) {
- Map<String, String> evaluatedProperties = null;
- for (Entry<String,String[]> entry : keys.entrySet()) {
+ Map<String, String> evaluatedProperties = null;
+ for (Entry<String, String[]> entry : keys.entrySet()) {
String propName = null;
String value = null;
@@ -713,15 +779,15 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
}
if (value != null && value.contains("${")) {
- if (evaluatedProperties == null){
+ if (evaluatedProperties == null) {
evaluatedProperties = new HashMap<String, String>();
Map<String, Object> properties = res.getPropertiesMap().get(PROPERTIES_CATEGORY);
for (Map.Entry<String, Object> subentry : properties.entrySet()) {
String keyString = subentry.getKey();
Object object = subentry.getValue();
String valueString;
- if (object != null && object instanceof String){
- valueString = (String)object;
+ if (object != null && object instanceof String) {
+ valueString = (String) object;
evaluatedProperties.put(keyString, valueString);
postProcessPropertyValue(keyString, valueString, evaluatedProperties, null);
}
@@ -751,58 +817,97 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
return new JMXPropertyProvider(PropertyHelper.getJMXPropertyIds(type), streamProvider,
jmxHostProvider, metricsHostProvider, clusterNamePropertyId, hostNamePropertyId,
- componentNamePropertyId, statePropertyId);
+ componentNamePropertyId, statePropertyId);
}
/**
* Create the Ganglia report property provider for the given type.
*/
- private PropertyProvider createGangliaReportPropertyProvider( Resource.Type type, StreamProvider streamProvider,
- ComponentSSLConfiguration configuration,
- GangliaHostProvider hostProvider,
- String clusterNamePropertyId) {
+ private PropertyProvider createGangliaReportPropertyProvider(Resource.Type type, StreamProvider streamProvider,
+ ComponentSSLConfiguration configuration,
+ GangliaHostProvider hostProvider,
+ String clusterNamePropertyId) {
return new GangliaReportPropertyProvider(PropertyHelper.getGangliaPropertyIds(type), streamProvider,
- configuration, hostProvider, clusterNamePropertyId);
+ configuration, hostProvider, clusterNamePropertyId);
}
/**
* Create the Ganglia host property provider for the given type.
*/
- private PropertyProvider createGangliaHostPropertyProvider( Resource.Type type, StreamProvider streamProvider,
- ComponentSSLConfiguration configuration,
- GangliaHostProvider hostProvider,
- String clusterNamePropertyId,
- String hostNamePropertyId) {
+ private PropertyProvider createGangliaHostPropertyProvider(Resource.Type type, StreamProvider streamProvider,
+ ComponentSSLConfiguration configuration,
+ GangliaHostProvider hostProvider,
+ String clusterNamePropertyId,
+ String hostNamePropertyId) {
return new GangliaHostPropertyProvider(PropertyHelper.getGangliaPropertyIds(type), streamProvider,
- configuration, hostProvider, clusterNamePropertyId, hostNamePropertyId);
+ configuration, hostProvider, clusterNamePropertyId, hostNamePropertyId);
}
/**
* Create the Ganglia component property provider for the given type.
*/
- private PropertyProvider createGangliaComponentPropertyProvider( Resource.Type type, StreamProvider streamProvider,
- ComponentSSLConfiguration configuration,
- GangliaHostProvider hostProvider,
- String clusterNamePropertyId,
- String componentNamePropertyId) {
+ private PropertyProvider createGangliaComponentPropertyProvider(Resource.Type type, StreamProvider streamProvider,
+ ComponentSSLConfiguration configuration,
+ GangliaHostProvider hostProvider,
+ String clusterNamePropertyId,
+ String componentNamePropertyId) {
return new GangliaComponentPropertyProvider(PropertyHelper.getGangliaPropertyIds(type),
- streamProvider, configuration, hostProvider, clusterNamePropertyId, componentNamePropertyId);
+ streamProvider, configuration, hostProvider, clusterNamePropertyId, componentNamePropertyId);
}
/**
* Create the Ganglia host component property provider for the given type.
*/
- private PropertyProvider createGangliaHostComponentPropertyProvider( Resource.Type type, StreamProvider streamProvider,
- ComponentSSLConfiguration configuration,
- GangliaHostProvider hostProvider,
- String clusterNamePropertyId,
- String hostNamePropertyId,
- String componentNamePropertyId) {
+ private PropertyProvider createGangliaHostComponentPropertyProvider(Resource.Type type, StreamProvider streamProvider,
+ ComponentSSLConfiguration configuration,
+ GangliaHostProvider hostProvider,
+ String clusterNamePropertyId,
+ String hostNamePropertyId,
+ String componentNamePropertyId) {
return new GangliaHostComponentPropertyProvider(PropertyHelper.getGangliaPropertyIds(type), streamProvider,
- configuration, hostProvider, clusterNamePropertyId, hostNamePropertyId, componentNamePropertyId);
+ configuration, hostProvider, clusterNamePropertyId, hostNamePropertyId, componentNamePropertyId);
+ }
+
+ /**
+ * Create the SQL component property provider for the given type.
+ */
+ private PropertyProvider createSQLComponentPropertyProvider(Resource.Type type,
+ HostInfoProvider hostProvider,
+ String clusterNamePropertyId,
+ String componentNamePropertyId,
+ String serviceNamePropertyId) {
+ return new SQLPropertyProvider(
+ PropertyHelper.getSQLServerPropertyIds(type),
+ hostProvider,
+ clusterNamePropertyId,
+ null,
+ componentNamePropertyId,
+ serviceNamePropertyId,
+ SinkConnectionFactory.instance());
+ }
+
+
+ /**
+ * Create the SQL host component property provider for the given type.
+ */
+ private PropertyProvider createSQLHostComponentPropertyProvider(Resource.Type type,
+ HostInfoProvider hostProvider,
+ String clusterNamePropertyId,
+ String hostNamePropertyId,
+ String componentNamePropertyId,
+ String serviceNamePropertyId) {
+
+ return new SQLPropertyProvider(
+ PropertyHelper.getSQLServerPropertyIds(type),
+ hostProvider,
+ clusterNamePropertyId,
+ hostNamePropertyId,
+ componentNamePropertyId,
+ serviceNamePropertyId,
+ SinkConnectionFactory.instance());
}
@Override
@@ -825,7 +930,7 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
} catch (Exception e) {
LOG.info("Exception while detecting JMX protocol for clusterName = " + clusterName +
- ", componentName = " + componentName, e);
+ ", componentName = " + componentName, e);
LOG.info("Defaulting JMX to HTTP protocol for for clusterName = " + clusterName +
", componentName = " + componentName +
componentName);
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
index d90047095c..d13b53eef3 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
@@ -123,6 +123,7 @@ public class ClientConfigResourceProvider extends AbstractControllerResourceProv
Configuration configs = new Configuration();
String TMP_PATH = configs.getProperty(Configuration.SERVER_TMP_DIR_KEY);
+ String pythonCmd = configs.getProperty(Configuration.AMBARI_PYTHON_WRAP_KEY);
AmbariManagementController managementController = getManagementController();
ConfigHelper configHelper = managementController.getConfigHelper();
Cluster cluster = null;
@@ -285,7 +286,7 @@ public class ClientConfigResourceProvider extends AbstractControllerResourceProv
commandParams.put("xml_configs_list", xmlConfigs);
commandParams.put("env_configs_list", envConfigs);
commandParams.put("properties_configs_list", propertiesConfigs);
- commandParams.put("output_file", componentName + "-configs.tar.gz");
+ commandParams.put("output_file", componentName + "-configs" + Configuration.DEF_ARCHIVE_EXTENSION);
Map<String, Object> jsonContent = new TreeMap<String, Object>();
jsonContent.put("configurations", configurations);
@@ -316,7 +317,7 @@ public class ClientConfigResourceProvider extends AbstractControllerResourceProv
throw new SystemException("Failed to write configurations to json file ", e);
}
- String cmd = "ambari-python-wrap " + commandScriptAbsolute + " generate_configs " + jsonFileName.getAbsolutePath() + " " +
+ String cmd = pythonCmd + " " + commandScriptAbsolute + " generate_configs " + jsonFileName.getAbsolutePath() + " " +
packageFolderAbsolute + " " + TMP_PATH + File.separator + "structured-out.json" + " INFO " + TMP_PATH;
try {
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ComponentResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ComponentResourceProvider.java
index 20bd60ddef..dea90f674e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ComponentResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ComponentResourceProvider.java
@@ -481,7 +481,7 @@ public class ComponentResourceProvider extends AbstractControllerResourceProvide
}
if (serviceName == null
|| serviceName.isEmpty()) {
- throw new AmbariException("Could not find service for component"
+ throw new ObjectNotFoundException("Could not find service for component"
+ ", componentName=" + request.getComponentName()
+ ", clusterName=" + cluster.getClusterName()
+ ", stackInfo=" + stackId.getStackId());
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/sql/HostInfoProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/sql/HostInfoProvider.java
new file mode 100644
index 0000000000..08bbe71871
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/sql/HostInfoProvider.java
@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.controller.sql;
+
+import org.apache.ambari.server.controller.spi.SystemException;
+
+/**
+ * Provider of host information.
+ */
+public interface HostInfoProvider {
+
+ /**
+ * Get the host name for the given cluster name and component name.
+ *
+ * @param clusterName the cluster name
+ * @param componentName the component name
+ *
+ * @return the host name
+ *
+ * @throws SystemException if unable to get the host name
+ */
+ public String getHostName(String clusterName, String componentName)
+ throws SystemException;
+
+ /**
+ * Get the host name.
+ *
+ * @param id the host identifier
+ *
+ * @return the host name
+ *
+ * @throws SystemException if unable to get the host name
+ */
+ public String getHostName(String id)
+ throws SystemException;
+
+ /**
+ * Get the host ip address.
+ *
+ * @param id the host identifier
+ *
+ * @return the host ip address
+ *
+ * @throws SystemException if unable to get the host address
+ */
+ public String getHostAddress(String id)
+ throws SystemException;
+}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/sql/SQLPropertyProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/sql/SQLPropertyProvider.java
new file mode 100644
index 0000000000..30f82fe68e
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/sql/SQLPropertyProvider.java
@@ -0,0 +1,572 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.controller.sql;
+
+import org.apache.ambari.server.controller.internal.AbstractPropertyProvider;
+import org.apache.ambari.server.controller.internal.PropertyInfo;
+import org.apache.ambari.server.controller.jdbc.ConnectionFactory;
+import org.apache.ambari.server.controller.spi.Predicate;
+import org.apache.ambari.server.controller.spi.Request;
+import org.apache.ambari.server.controller.spi.Resource;
+import org.apache.ambari.server.controller.spi.SystemException;
+import org.apache.ambari.server.controller.spi.TemporalInfo;
+import org.apache.commons.lang.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.Serializable;
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.text.NumberFormat;
+import java.text.ParsePosition;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * SQL based property/metrics provider required for ambari-scom.
+ */
+public class SQLPropertyProvider extends AbstractPropertyProvider {
+
+ private final HostInfoProvider hostProvider;
+
+ private final String clusterNamePropertyId;
+
+ private final String hostNamePropertyId;
+
+ private final String componentNamePropertyId;
+
+ private final String serviceNamePropertyId;
+
+ private final ConnectionFactory connectionFactory;
+
+
+ // ----- Constants ---------------------------------------------------------
+
+ private static final String GET_METRICS_STATEMENT =
+ "SELECT s.RecordTypeContext, s.RecordTypeName, s.TagPairs, s.NodeName, s.ServiceName, mn.Name AS MetricName, s.RecordTimeStamp, mp.MetricValue\n" +
+ "FROM HadoopMetrics.dbo.MetricPair mp\n" +
+ " INNER JOIN (\n" +
+ " SELECT mr.RecordID AS RecordID, mr.RecordTimeStamp AS RecordTimeStamp, rt.Context AS RecordTypeContext, rt.Name AS RecordTypeName, ts.TagPairs AS TagPairs, nd.Name AS NodeName, sr.Name AS ServiceName\n" +
+ " FROM HadoopMetrics.dbo.MetricRecord mr\n" +
+ " INNER JOIN HadoopMetrics.dbo.RecordType rt ON (mr.RecordTypeId = rt.RecordTypeId)\n" +
+ " INNER JOIN HadoopMetrics.dbo.TagSet ts ON (mr.TagSetID = ts.TagSetID)\n" +
+ " INNER JOIN HadoopMetrics.dbo.Node nd ON (mr.NodeID = nd.NodeID)\n" +
+ " INNER JOIN HadoopMetrics.dbo.Service sr ON (mr.ServiceID = sr.ServiceID)\n" +
+ " WHERE rt.Context in (%s)\n" +
+ " AND rt.Name in (%s)\n" +
+ " AND (ts.TagPairs LIKE %s)\n" +
+ " AND (nd.Name in (%s))\n" +
+ " AND (sr.Name in (%s))\n" +
+ " AND mr.RecordTimestamp >= %d\n" +
+ " AND mr.RecordTimestamp <= %d\n" +
+ " ) s ON (mp.RecordID = s.RecordID)\n" +
+ " INNER JOIN HadoopMetrics.dbo.MetricName mn ON (mp.MetricID = mn.MetricID)\n" +
+ "WHERE (mn.Name in (%s))";
+
+ protected final static Logger LOG = LoggerFactory.getLogger(SQLPropertyProvider.class);
+
+
+ // ----- Constructors ------------------------------------------------------
+
+ public SQLPropertyProvider(
+ Map<String, Map<String, PropertyInfo>> componentPropertyInfoMap,
+ HostInfoProvider hostProvider,
+ String clusterNamePropertyId,
+ String hostNamePropertyId,
+ String componentNamePropertyId,
+ String serviceNamePropertyId,
+ ConnectionFactory connectionFactory) {
+ super(componentPropertyInfoMap);
+ this.hostProvider = hostProvider;
+ this.clusterNamePropertyId = clusterNamePropertyId;
+ this.hostNamePropertyId = hostNamePropertyId;
+ this.componentNamePropertyId = componentNamePropertyId;
+ this.serviceNamePropertyId = serviceNamePropertyId;
+ this.connectionFactory = connectionFactory;
+ }
+
+
+ // ----- PropertyProvider --------------------------------------------------
+
+ @Override
+ public Set<Resource> populateResources(Set<Resource> resources, Request request, Predicate predicate)
+ throws SystemException {
+ Set<Resource> keepers = new HashSet<Resource>();
+ try {
+ Connection connection = connectionFactory.getConnection();
+ try {
+ Statement statement = connection.createStatement();
+ try {
+ for (Resource resource : resources) {
+ if (populateResource(resource, request, predicate, statement)) {
+ keepers.add(resource);
+ }
+ }
+ } finally {
+ statement.close();
+ }
+ } finally {
+ connection.close();
+ }
+ } catch (SQLException e) {
+ if (LOG.isErrorEnabled()) {
+ LOG.error("Error during populateResources call.");
+ LOG.debug("Error during populateResources call : caught exception", e);
+ }
+ }
+ return keepers;
+ }
+
+
+ // ----- helper methods ----------------------------------------------------
+
+ // Populate the given resource
+ private boolean populateResource(Resource resource, Request request, Predicate predicate, Statement statement) throws SystemException {
+
+ Set<String> ids = getRequestPropertyIds(request, predicate);
+ if (ids.isEmpty()) {
+ // no properties requested ... nothing to do.
+ return true;
+ }
+
+ String componentName = (String) resource.getPropertyValue(componentNamePropertyId);
+ String serviceName = (String) resource.getPropertyValue(serviceNamePropertyId);
+
+ if (getComponentMetrics().get(componentName) == null) {
+ // no metrics defined for the given component ... nothing to do.
+ return true;
+ }
+
+ String clusterName = (String) resource.getPropertyValue(clusterNamePropertyId);
+ String hostName = getHost(resource, clusterName, componentName);
+
+ if (hostName == null) {
+ throw new SystemException(
+ "Unable to get metrics. No host name for " + componentName, null);
+ }
+
+ Set<MetricDefinition> metricsDefinitionSet = new HashSet<MetricDefinition>();
+ for (String id : ids) {
+ Map<String, PropertyInfo> propertyInfoMap = getPropertyInfoMap(componentName, id);
+
+ for (Map.Entry<String, PropertyInfo> entry : propertyInfoMap.entrySet()) {
+ String propertyKey = entry.getKey();
+ PropertyInfo propertyInfo = entry.getValue();
+ if (containsArguments(propertyKey)) {
+ propertyInfo = updatePropertyInfo(propertyKey, id, propertyInfo);
+ }
+
+ String propertyId = propertyInfo.getPropertyId();
+ TemporalInfo temporalInfo = request.getTemporalInfo(id);
+
+ if ((propertyInfo.isPointInTime() && temporalInfo == null) ||
+ (propertyInfo.isTemporal() && temporalInfo != null)) {
+
+ long startTime;
+ long endTime;
+
+ if (temporalInfo != null) {
+ Long endTimeSeconds = temporalInfo.getEndTime();
+
+ endTime = endTimeSeconds != -1 ? endTimeSeconds * 1000 : Long.MAX_VALUE;
+ startTime = temporalInfo.getStartTime() * 1000;
+ } else {
+ startTime = 0L;
+ endTime = Long.MAX_VALUE;
+ }
+
+ String category = "";
+ String recordTypeContext = "";
+ String recordTypeName = "";
+ String metricName = "";
+ String tagPairsPattern = ",";
+ int dotIndex = propertyId.lastIndexOf('.');
+ if (dotIndex != -1) {
+ category = propertyId.substring(0, dotIndex);
+ metricName = propertyId.substring(dotIndex + 1);
+ }
+ String[] parts = category.split("\\.");
+ if (parts.length >= 2) {
+ recordTypeContext = parts[0];
+ recordTypeName = parts[1];
+ if (containsArguments(propertyKey) && parts.length > 2) {
+ tagPairsPattern = StringUtils.join(Arrays.copyOfRange(parts, 2, parts.length), ".");
+ }
+ metricsDefinitionSet.add(
+ new MetricDefinition(
+ startTime,
+ endTime,
+ recordTypeContext,
+ recordTypeName,
+ tagPairsPattern,
+ metricName,
+ serviceName != null && serviceName.toLowerCase().equals("hbase") ? serviceName.toLowerCase() : componentName.toLowerCase(),
+ hostName,
+ propertyKey,
+ id,
+ temporalInfo)
+ );
+ } else {
+ if (LOG.isWarnEnabled()) {
+ LOG.warn("Can't get metrics for " + id + " : " + propertyId);
+ }
+ }
+ }
+ }
+ }
+
+ Map<MetricDefinition, List<DataPoint>> results = getMetric(metricsDefinitionSet, statement);
+
+ for (MetricDefinition metricDefinition : metricsDefinitionSet) {
+ List<DataPoint> dataPoints = results.containsKey(metricDefinition) ? results.get(metricDefinition) : new ArrayList<DataPoint>();
+ TemporalInfo temporalInfo = metricDefinition.getTemporalInfo();
+ String propertyKey = metricDefinition.getPropertyKey();
+ String requestedPropertyKey = metricDefinition.getRequestedPropertyKey();
+ if (dataPoints != null) {
+ if (temporalInfo == null) {
+ // return the value of the last data point
+ int length = dataPoints.size();
+ Serializable value = length > 0 ? dataPoints.get(length - 1).getValue() : 0;
+ resource.setProperty(propertyKey, value);
+ } else {
+ Number[][] dp = new Number[dataPoints.size()][2];
+ for (int i = 0; i < dp.length; i++) {
+ dp[i][0] = dataPoints.get(i).getValue();
+ dp[i][1] = dataPoints.get(i).getTimestamp() / 1000;
+ }
+ if (containsArguments(propertyKey)) {
+ resource.setProperty(requestedPropertyKey, dp);
+ } else {
+ resource.setProperty(propertyKey, dp);
+ }
+ }
+ }
+ }
+
+ return true;
+ }
+
+ // get a metric from a sql connection
+ private Map<MetricDefinition, List<DataPoint>> getMetric(Set<MetricDefinition> metricDefinitionSet, Statement statement) throws SystemException {
+ Map<MetricDefinition, List<DataPoint>> results = new HashMap<MetricDefinition, List<DataPoint>>();
+ try {
+ StringBuilder query = new StringBuilder();
+ Set<String> recordTypeContexts = new HashSet<String>();
+ Set<String> recordTypeNamess = new HashSet<String>();
+ Set<String> tagPairsPatterns = new HashSet<String>();
+ Set<String> nodeNames = new HashSet<String>();
+ Set<String> serviceNames = new HashSet<String>();
+ Set<String> metricNames = new HashSet<String>();
+ long startTime = 0, endTime = 0;
+ for (MetricDefinition metricDefinition : metricDefinitionSet) {
+ if (metricDefinition.getRecordTypeContext() == null || metricDefinition.getRecordTypeName() == null || metricDefinition.getNodeName() == null) {
+ continue;
+ }
+
+ recordTypeContexts.add(metricDefinition.getRecordTypeContext());
+ recordTypeNamess.add(metricDefinition.getRecordTypeName());
+ tagPairsPatterns.add(metricDefinition.getTagPairsPattern());
+ nodeNames.add(metricDefinition.getNodeName());
+ serviceNames.add(metricDefinition.getServiceName());
+ metricNames.add(metricDefinition.getMetricName());
+ startTime = metricDefinition.getStartTime();
+ endTime = metricDefinition.getEndTime();
+ }
+
+ for (String tagPairsPattern : tagPairsPatterns) {
+ if (query.length() != 0) {
+ query.append("\nUNION\n");
+ }
+ query.append(String.format(GET_METRICS_STATEMENT,
+ "'" + StringUtils.join(recordTypeContexts, "','") + "'",
+ "'" + StringUtils.join(recordTypeNamess, "','") + "'",
+ "'%" + tagPairsPattern + "%'",
+ "'" + StringUtils.join(nodeNames, "','") + "'",
+ "'" + StringUtils.join(serviceNames, "','") + "'",
+ startTime,
+ endTime,
+ "'" + StringUtils.join(metricNames, "','") + "'"
+ ));
+ }
+
+ ResultSet rs = null;
+ if (query.length() != 0) {
+ rs = statement.executeQuery(query.toString());
+ }
+
+ if (rs != null) {
+ //(RecordTimeStamp bigint, MetricValue NVARCHAR(512))
+ while (rs.next()) {
+ MetricDefinition metricDefinition = null;
+ for (MetricDefinition md : metricDefinitionSet) {
+ if (md.getRecordTypeContext().equalsIgnoreCase(rs.getString("RecordTypeContext"))
+ && md.getRecordTypeName().equalsIgnoreCase(rs.getString("RecordTypeName"))
+ && md.getMetricName().equalsIgnoreCase(rs.getString("MetricName"))
+ && md.getServiceName().equalsIgnoreCase(rs.getString("ServiceName"))
+ && md.getNodeName().equalsIgnoreCase(rs.getString("NodeName"))
+ && rs.getString("TagPairs").contains(md.getTagPairsPattern())) {
+ metricDefinition = md;
+ break;
+ }
+ }
+ if (metricDefinition == null) {
+ LOG.error("Error during getMetric call : No metricdefinition found for result");
+ continue;
+ }
+ ParsePosition parsePosition = new ParsePosition(0);
+ NumberFormat numberFormat = NumberFormat.getInstance();
+ Number parsedNumber = numberFormat.parse(rs.getString("MetricValue"), parsePosition);
+ if (results.containsKey(metricDefinition)) {
+ results.get(metricDefinition).add(new DataPoint(rs.getLong("RecordTimeStamp"), parsedNumber));
+ } else {
+ List<DataPoint> dataPoints = new ArrayList<DataPoint>();
+ dataPoints.add(new DataPoint(rs.getLong("RecordTimeStamp"), parsedNumber));
+ results.put(metricDefinition, dataPoints);
+ }
+ }
+ }
+ } catch (SQLException e) {
+ throw new SystemException("Error during getMetric call : caught exception - ", e);
+ }
+ return results;
+ }
+
+ // get the hostname for a given resource
+ private String getHost(Resource resource, String clusterName, String componentName) throws SystemException {
+ return hostNamePropertyId == null ?
+ hostProvider.getHostName(clusterName, componentName) :
+ hostProvider.getHostName((String) resource.getPropertyValue(hostNamePropertyId));
+ }
+
+
+ // ----- inner class : DataPoint -------------------------------------------
+
+ /**
+ * Structure to hold a single datapoint (value/timestamp pair) retrieved from the db.
+ */
+ private static class DataPoint {
+ private final long timestamp;
+ private final Number value;
+
+ // ----- Constructor -------------------------------------------------
+
+ /**
+ * Construct a data point from the given value and timestamp.
+ *
+ * @param timestamp the timestamp
+ * @param value the value
+ */
+ private DataPoint(long timestamp, Number value) {
+ this.timestamp = timestamp;
+ this.value = value;
+ }
+
+ // ----- DataPoint ---------------------------------------------------
+
+ /**
+ * Get the timestamp value.
+ *
+ * @return the timestamp
+ */
+ public long getTimestamp() {
+ return timestamp;
+ }
+
+ /**
+ * Get the value.
+ *
+ * @return the value
+ */
+ public Number getValue() {
+ return value;
+ }
+
+ // ----- Object overrides --------------------------------------------
+
+ @Override
+ public String toString() {
+ return "{" + value + " : " + timestamp + "}";
+ }
+ }
+
+ private class MetricDefinition {
+ long startTime;
+ long endTime;
+
+ String recordTypeContext;
+ String recordTypeName;
+ String tagPairsPattern;
+ String metricName;
+ String serviceName;
+ String nodeName;
+
+ String propertyKey;
+ String requestedPropertyKey;
+ TemporalInfo temporalInfo;
+
+ private MetricDefinition(long startTime, long endTime, String recordTypeContext, String recordTypeName, String tagPairsPattern, String metricName, String serviceName, String nodeName, String propertyKey, String requestedPropertyKey, TemporalInfo temporalInfo) {
+ this.startTime = startTime;
+ this.endTime = endTime;
+ this.recordTypeContext = recordTypeContext;
+ this.recordTypeName = recordTypeName;
+ this.tagPairsPattern = tagPairsPattern;
+ this.metricName = metricName;
+ this.serviceName = serviceName;
+ this.nodeName = nodeName;
+ this.propertyKey = propertyKey;
+ this.requestedPropertyKey = requestedPropertyKey;
+ this.temporalInfo = temporalInfo;
+ }
+
+ private MetricDefinition(String recordTypeContext, String recordTypeName, String tagPairsPattern, String metricName, String serviceName, String nodeName) {
+ this.recordTypeContext = recordTypeContext;
+ this.recordTypeName = recordTypeName;
+ this.tagPairsPattern = tagPairsPattern;
+ this.metricName = metricName;
+ this.serviceName = serviceName;
+ this.nodeName = nodeName;
+ }
+
+ public long getStartTime() {
+ return startTime;
+ }
+
+ public void setStartTime(long startTime) {
+ this.startTime = startTime;
+ }
+
+ public long getEndTime() {
+ return endTime;
+ }
+
+ public void setEndTime(long endTime) {
+ this.endTime = endTime;
+ }
+
+ public String getRecordTypeContext() {
+ return recordTypeContext;
+ }
+
+ public void setRecordTypeContext(String recordTypeContext) {
+ this.recordTypeContext = recordTypeContext;
+ }
+
+ public String getRecordTypeName() {
+ return recordTypeName;
+ }
+
+ public void setRecordTypeName(String recordTypeName) {
+ this.recordTypeName = recordTypeName;
+ }
+
+ public String getTagPairsPattern() {
+ return tagPairsPattern;
+ }
+
+ public void getTagPairsPattern(String tagPairsPattern) {
+ this.tagPairsPattern = tagPairsPattern;
+ }
+
+ public String getMetricName() {
+ return metricName;
+ }
+
+ public void setMetricName(String metricName) {
+ this.metricName = metricName;
+ }
+
+ public String getServiceName() {
+ return serviceName;
+ }
+
+ public void setServiceName(String serviceName) {
+ this.serviceName = serviceName;
+ }
+
+ public String getNodeName() {
+ return nodeName;
+ }
+
+ public void setNodeName(String nodeName) {
+ this.nodeName = nodeName;
+ }
+
+ public String getPropertyKey() {
+ return propertyKey;
+ }
+
+ public void setPropertyKey(String propertyKey) {
+ this.propertyKey = propertyKey;
+ }
+
+ public String getRequestedPropertyKey() {
+ return requestedPropertyKey;
+ }
+
+ public void setRequestedPropertyKey(String requestedPropertyKey) {
+ this.requestedPropertyKey = requestedPropertyKey;
+ }
+
+ public TemporalInfo getTemporalInfo() {
+ return temporalInfo;
+ }
+
+ public void setTemporalInfo(TemporalInfo temporalInfo) {
+ this.temporalInfo = temporalInfo;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ MetricDefinition that = (MetricDefinition) o;
+
+ if (metricName != null ? !metricName.equals(that.metricName) : that.metricName != null) return false;
+ if (nodeName != null ? !nodeName.equalsIgnoreCase(that.nodeName) : that.nodeName != null) return false;
+ if (recordTypeContext != null ? !recordTypeContext.equals(that.recordTypeContext) : that.recordTypeContext != null)
+ return false;
+ if (recordTypeName != null ? !recordTypeName.equals(that.recordTypeName) : that.recordTypeName != null)
+ return false;
+ if (serviceName != null ? !serviceName.equals(that.serviceName) : that.serviceName != null) return false;
+ if (tagPairsPattern != null ? !(tagPairsPattern.contains(that.tagPairsPattern) ||
+ that.tagPairsPattern.contains(tagPairsPattern)) : that.tagPairsPattern != null)
+ return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ int result = recordTypeContext != null ? recordTypeContext.hashCode() : 0;
+ result = 31 * result + (recordTypeName != null ? recordTypeName.hashCode() : 0);
+ result = 31 * result + (metricName != null ? metricName.hashCode() : 0);
+ result = 31 * result + (serviceName != null ? serviceName.hashCode() : 0);
+ result = 31 * result + (nodeName != null ? nodeName.toLowerCase().hashCode() : 0);
+ return result;
+ }
+ }
+}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/sql/SinkConnectionFactory.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/sql/SinkConnectionFactory.java
new file mode 100644
index 0000000000..154926c293
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/sql/SinkConnectionFactory.java
@@ -0,0 +1,132 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.controller.sql;
+
+import com.mchange.v2.c3p0.ComboPooledDataSource;
+import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.controller.jdbc.ConnectionFactory;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+
+/**
+ * Factory for the sink database connection.
+ */
+public class SinkConnectionFactory implements ConnectionFactory {
+
+ /**
+ * The database URL.
+ */
+ private String databaseUrl;
+
+ /**
+ * The database driver.
+ */
+ private String databaseDriver;
+
+ private String databaseUser;
+
+ private String databasePassword;
+
+ private boolean useIntegratedAuth;
+
+ /**
+ * Indicates whether or not the driver has been initialized
+ */
+ private boolean connectionInitialized = false;
+
+ private ComboPooledDataSource cpds;
+ /**
+ * The singleton.
+ */
+ private static SinkConnectionFactory singleton = new SinkConnectionFactory();
+
+ // ----- Constructor -------------------------------------------------------
+
+ protected SinkConnectionFactory() {
+ Configuration config = new Configuration();
+ this.databaseUrl = config.getSinkDatabaseUrl();
+ this.databaseDriver = config.getSinkDatabaseDriver();
+ this.useIntegratedAuth = config.getSinkUseIntegratedAuth();
+ this.databaseUser = config.getSinkDatabaseUser();
+ this.databasePassword = config.getSinkDatabasePassword();
+ }
+
+
+ // ----- SinkConnectionFactory ---------------------------------------------
+
+ /**
+ * Initialize.
+ */
+ public void init() {
+ this.cpds = new ComboPooledDataSource();
+ this.cpds.setJdbcUrl(this.databaseUrl);
+ if(!useIntegratedAuth) {
+ this.cpds.setUser(this.databaseUser);
+ this.cpds.setPassword(this.databasePassword);
+ }
+ this.cpds.setMaxPoolSize(5);
+ }
+
+ /**
+ * Get the singleton instance.
+ *
+ * @return the singleton instance
+ */
+ public static SinkConnectionFactory instance() {
+ return singleton;
+ }
+
+ /**
+ * Get the database URL.
+ *
+ * @return the database URL
+ */
+ public String getDatabaseUrl() {
+ return databaseUrl;
+ }
+
+ /**
+ * Get the database driver.
+ *
+ * @return the database driver
+ */
+ public String getDatabaseDriver() {
+ return databaseDriver;
+ }
+
+// ----- ConnectionFactory -----------------------------------------------
+
+ @Override
+ public Connection getConnection() throws SQLException {
+ synchronized (this) {
+ if (!connectionInitialized) {
+ try {
+ Class.forName(databaseDriver);
+ } catch (Exception e) {
+ throw new SQLException("Can't load the driver class.", e);
+ }
+ init();
+ connectionInitialized = true;
+ }
+ }
+ return this.cpds.getConnection();
+ }
+}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/PropertyHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/PropertyHelper.java
index d0226f1f28..3e2111eefb 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/PropertyHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/PropertyHelper.java
@@ -44,6 +44,7 @@ public class PropertyHelper {
private static final String PROPERTIES_FILE = "properties.json";
private static final String GANGLIA_PROPERTIES_FILE = "ganglia_properties.json";
+ private static final String SQLSERVER_PROPERTIES_FILE = "sqlserver_properties.json";
private static final String JMX_PROPERTIES_FILE = "jmx_properties.json";
private static final String KEY_PROPERTIES_FILE = "key_properties.json";
private static final char EXTERNAL_PATH_SEP = '/';
@@ -51,6 +52,7 @@ public class PropertyHelper {
private static final Map<Resource.InternalType, Set<String>> PROPERTY_IDS = readPropertyIds(PROPERTIES_FILE);
private static final Map<Resource.InternalType, Map<String, Map<String, PropertyInfo>>> JMX_PROPERTY_IDS = readPropertyProviderIds(JMX_PROPERTIES_FILE);
private static final Map<Resource.InternalType, Map<String, Map<String, PropertyInfo>>> GANGLIA_PROPERTY_IDS = readPropertyProviderIds(GANGLIA_PROPERTIES_FILE);
+ private static final Map<Resource.InternalType, Map<String, Map<String, PropertyInfo>>> SQLSERVER_PROPERTY_IDS = readPropertyProviderIds(SQLSERVER_PROPERTIES_FILE);
private static final Map<Resource.InternalType, Map<Resource.Type, String>> KEY_PROPERTY_IDS = readKeyPropertyIds(KEY_PROPERTIES_FILE);
/**
@@ -114,6 +116,10 @@ public class PropertyHelper {
return GANGLIA_PROPERTY_IDS.get(resourceType.getInternalType());
}
+ public static Map<String, Map<String, PropertyInfo>> getSQLServerPropertyIds(Resource.Type resourceType) {
+ return SQLSERVER_PROPERTY_IDS.get(resourceType.getInternalType());
+ }
+
public static Map<String, Map<String, PropertyInfo>> getJMXPropertyIds(Resource.Type resourceType) {
return JMX_PROPERTY_IDS.get(resourceType.getInternalType());
}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/resources/ResourceManager.java b/ambari-server/src/main/java/org/apache/ambari/server/resources/ResourceManager.java
index f17564f1da..9f4e70861d 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/resources/ResourceManager.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/resources/ResourceManager.java
@@ -41,7 +41,7 @@ public class ResourceManager {
*/
public File getResource(String resourcePath) {
String resDir = configs.getConfigsMap().get(Configuration.RESOURCES_DIR_KEY);
- String resourcePathIndep = resourcePath.replaceAll("/", File.separator);
+ String resourcePathIndep = resourcePath.replace("/", File.separator);
File resourceFile = new File(resDir + File.separator + resourcePathIndep);
if (LOG.isDebugEnabled()) {
LOG.debug("Resource requested from ResourceManager"
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/security/CertificateManager.java b/ambari-server/src/main/java/org/apache/ambari/server/security/CertificateManager.java
index 1dbc0649de..b698ef3f8b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/security/CertificateManager.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/security/CertificateManager.java
@@ -17,6 +17,14 @@
*/
package org.apache.ambari.server.security;
+import com.google.inject.Inject;
+import com.google.inject.Singleton;
+import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.utils.ShellCommandUtil;
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
import java.io.BufferedReader;
import java.io.File;
import java.io.IOException;
@@ -25,15 +33,6 @@ import java.nio.charset.Charset;
import java.text.MessageFormat;
import java.util.Map;
-import org.apache.ambari.server.utils.ShellCommandUtil;
-import org.apache.ambari.server.configuration.Configuration;
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-import com.google.inject.Inject;
-import com.google.inject.Singleton;
-
/**
* Ambari security.
* Manages server and agent certificates
@@ -47,22 +46,22 @@ public class CertificateManager {
private static final String GEN_SRVR_KEY = "openssl genrsa -des3 " +
- "-passout pass:{0} -out {1}/{2} 4096 ";
+ "-passout pass:{0} -out {1}" + File.separator + "{2} 4096 ";
private static final String GEN_SRVR_REQ = "openssl req -passin pass:{0} " +
- "-new -key {1}/{2} -out {1}/{5} -batch";
+ "-new -key {1}" + File.separator + "{2} -out {1}" + File.separator + "{5} -batch";
private static final String SIGN_SRVR_CRT = "openssl ca -create_serial " +
- "-out {1}/{3} -days 365 -keyfile {1}/{2} -key {0} -selfsign " +
- "-extensions jdk7_ca -config {1}/ca.config -batch " +
- "-infiles {1}/{5}";
+ "-out {1}" + File.separator + "{3} -days 365 -keyfile {1}" + File.separator + "{2} -key {0} -selfsign " +
+ "-extensions jdk7_ca -config {1}" + File.separator + "ca.config -batch " +
+ "-infiles {1}" + File.separator + "{5}";
private static final String EXPRT_KSTR = "openssl pkcs12 -export" +
- " -in {1}/{3} -inkey {1}/{2} -certfile {1}/{3} -out {1}/{4} " +
+ " -in {1}" + File.separator + "{3} -inkey {1}" + File.separator + "{2} -certfile {1}" + File.separator + "{3} -out {1}" + File.separator + "{4} " +
"-password pass:{0} -passin pass:{0} \n";
private static final String REVOKE_AGENT_CRT = "openssl ca " +
- "-config {0}/ca.config -keyfile {0}/{4} -revoke {0}/{2} -batch " +
- "-passin pass:{3} -cert {0}/{5}";
+ "-config {0}" + File.separator + "ca.config -keyfile {0}" + File.separator + "{4} -revoke {0}" + File.separator + "{2} -batch " +
+ "-passin pass:{3} -cert {0}" + File.separator + "{5}";
private static final String SIGN_AGENT_CRT = "openssl ca -config " +
- "{0}/ca.config -in {0}/{1} -out {0}/{2} -batch -passin pass:{3} " +
- "-keyfile {0}/{4} -cert {0}/{5}"; /**
+ "{0}" + File.separator + "ca.config -in {0}" + File.separator + "{1} -out {0}" + File.separator + "{2} -batch -passin pass:{3} " +
+ "-keyfile {0}" + File.separator + "{4} -cert {0}" + File.separator + "{5}"; /**
* Verify that root certificate exists, generate it otherwise.
*/
public void initRootCert() {
diff --git a/ambari-server/src/main/package/msi/ambari-server.wxs b/ambari-server/src/main/package/msi/ambari-server.wxs
new file mode 100644
index 0000000000..4a1afe8333
--- /dev/null
+++ b/ambari-server/src/main/package/msi/ambari-server.wxs
@@ -0,0 +1,60 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<Wix xmlns="http://schemas.microsoft.com/wix/2006/wi">
+<?define Version = "${ambariVersion}" ?>
+<?define UpgradeCode = "c8f5c145-c0aa-4d50-b1f5-ac9bc4055fb8" ?>
+ <Product Id="*" Name="Ambari Server ${ambariVersion}-SNAPSHOT" Language="1033"
+ Version="$(var.Version)" Manufacturer="Apache Software Foundation"
+ UpgradeCode="$(var.UpgradeCode)">
+ <Package Description="Ambari Server for Windows" Comments="Ambari Server for Windows" InstallerVersion="200"
+ Compressed="yes" Platform="x64"/>
+
+ <!-- upgrade rules -->
+ <Upgrade Id="$(var.UpgradeCode)">
+ <UpgradeVersion Minimum="$(var.Version)" IncludeMinimum="no" OnlyDetect="yes" Property="NEWERVERSIONDETECTED" />
+ <UpgradeVersion Minimum="0.0.0.0" Maximum="$(var.Version)" IncludeMinimum="yes" IncludeMaximum="yes" Property="OLDERVERSIONBEINGUPGRADED" />
+ </Upgrade>
+
+ <InstallExecuteSequence>
+ <RemoveExistingProducts After="InstallInitialize" />
+ <Custom Action="NewerVersion" After="FindRelatedProducts">NEWERVERSIONDETECTED</Custom>
+ <Custom Action="BackupConfiguration" After='InstallInitialize'>OLDERVERSIONBEINGUPGRADED</Custom>
+ </InstallExecuteSequence>
+
+ <CustomAction Id="NewerVersion" Error="A later version of Ambari Server is already installed." />
+ <!-- backup ambari.properties before installing new bits-->
+ <CustomAction Id="BackupConfiguration"
+ Directory="AMBARI_SERVER_MSI"
+ ExeCommand='cmd.exe /c copy conf\ambari.properties conf\ambari.properties.backup'
+ Return="check" />
+
+ <Media Id="1" Cabinet="simple.cab" EmbedCab="yes"/>
+ <Directory Id="TARGETDIR" Name="SourceDir">
+ <Directory Id="ProgramFiles64Folder">
+ <Directory Id="AMBARI_SERVER_MSI" Name="ambari-server">
+ </Directory>
+ </Directory>
+ </Directory>
+ <Feature Id="DefaultFeature" Title="Main Feature" Level="1">
+ <ComponentGroupRef Id="AmbariServerGroup"/>
+ </Feature>
+ <UI/>
+ <Property Id="WIXUI_INSTALLDIR" Value="AMBARI_SERVER_MSI"/>
+ <UIRef Id="WixUI_InstallDir"/>
+ </Product>
+</Wix> \ No newline at end of file
diff --git a/ambari-server/src/main/python/ambari-server-windows.py b/ambari-server/src/main/python/ambari-server-windows.py
new file mode 100644
index 0000000000..039069c6ab
--- /dev/null
+++ b/ambari-server/src/main/python/ambari-server-windows.py
@@ -0,0 +1,601 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import optparse
+
+from ambari_commons.ambari_service import AmbariService
+from ambari_commons.logging_utils import *
+from ambari_commons.os_utils import remove_file
+from ambari_commons.os_windows import SvcStatusCallback
+
+from ambari_server import utils
+from ambari_server.dbConfiguration import DBMSConfig
+from ambari_server.resourceFilesKeeper import ResourceFilesKeeper, KeeperException
+from ambari_server.serverConfiguration import *
+from ambari_server.serverSetup import setup, reset, is_server_running, upgrade
+from ambari_server.setupActions import *
+from ambari_server.setupSecurity import *
+from ambari_server.serverSetup_windows import SERVICE_PASSWORD_KEY, SERVICE_USERNAME_KEY
+
+# debug settings
+SERVER_START_DEBUG = False
+SUSPEND_START_MODE = False
+
+# server commands
+ambari_provider_module_option = ""
+ambari_provider_module = os.environ.get('AMBARI_PROVIDER_MODULE')
+
+#Common setup or upgrade message
+SETUP_OR_UPGRADE_MSG = "- If this is a new setup, then run the \"ambari-server setup\" command to create the user\n" \
+"- If this is an upgrade of an existing setup, run the \"ambari-server upgrade\" command.\n" \
+"Refer to the Ambari documentation for more information on setup and upgrade."
+
+AMBARI_SERVER_DIE_MSG = "Ambari Server java process died with exitcode {0}. Check {1} for more information."
+
+if ambari_provider_module is not None:
+ ambari_provider_module_option = "-Dprovider.module.class=" +\
+ ambari_provider_module + " "
+
+SERVER_START_CMD = \
+ "-server -XX:NewRatio=3 "\
+ "-XX:+UseConcMarkSweepGC " +\
+ "-XX:-UseGCOverheadLimit -XX:CMSInitiatingOccupancyFraction=60 " +\
+ ambari_provider_module_option +\
+ os.getenv('AMBARI_JVM_ARGS', '-Xms512m -Xmx2048m') +\
+ " -cp {0}" +\
+ " org.apache.ambari.server.controller.AmbariServer"
+SERVER_START_CMD_DEBUG = \
+ "-server -XX:NewRatio=2 -XX:+UseConcMarkSweepGC " +\
+ ambari_provider_module_option +\
+ os.getenv('AMBARI_JVM_ARGS', '-Xms512m -Xmx2048m') +\
+ " -Xdebug -Xrunjdwp:transport=dt_socket,address=5005,"\
+ "server=y,suspend={1} -cp {0}" +\
+ " org.apache.ambari.server.controller.AmbariServer"
+SERVER_SEARCH_PATTERN = "org.apache.ambari.server.controller.AmbariServer"
+
+SERVER_INIT_TIMEOUT = 5
+SERVER_START_TIMEOUT = 10
+
+PID_NAME = "ambari-server.pid"
+EXITCODE_NAME = "ambari-server.exitcode"
+
+SERVER_VERSION_FILE_PATH = "server.version.file"
+
+# linux open-file limit
+ULIMIT_OPEN_FILES_KEY = 'ulimit.open.files'
+ULIMIT_OPEN_FILES_DEFAULT = 10000
+
+
+class AmbariServerService(AmbariService):
+ AmbariService._svc_name_ = "Ambari Server"
+ AmbariService._svc_display_name_ = "Ambari Server"
+ AmbariService._svc_description_ = "Ambari Server"
+
+ AmbariService._AdjustServiceVersion()
+
+ def SvcDoRun(self):
+ scmStatus = SvcStatusCallback(self)
+
+ properties = get_ambari_properties()
+ self.options.verbose = get_value_from_properties(properties, VERBOSE_OUTPUT_KEY, self.options.verbose)
+ self.options.debug = get_value_from_properties(properties, DEBUG_MODE_KEY, self.options.debug)
+ self.options.suspend_start = get_value_from_properties(properties, SUSPEND_START_MODE_KEY, self.options.suspend_start)
+
+ self.redirect_output_streams()
+
+ childProc = server_process_main(self.options, scmStatus)
+
+ if not self._StopOrWaitForChildProcessToFinish(childProc):
+ return
+
+ pid_file_path = PID_DIR + os.sep + PID_NAME
+ remove_file(pid_file_path)
+ pass
+
+ def _InitOptionsParser(self):
+ return init_options_parser()
+
+ def redirect_output_streams(self):
+ properties = get_ambari_properties()
+
+ outFilePath = properties[SERVER_OUT_FILE_KEY]
+ if (outFilePath is None or outFilePath == ""):
+ outFilePath = SERVER_OUT_FILE
+
+ self._RedirectOutputStreamsToFile(outFilePath)
+ pass
+
+def ctrlHandler(ctrlType):
+ AmbariServerService.DefCtrlCHandler()
+ return True
+
+def svcsetup():
+ AmbariServerService.set_ctrl_c_handler(ctrlHandler)
+ # we don't save password between 'setup' runs, so we can't run Install every time. We run 'setup' only if user and
+ # password provided or if service not installed
+ if (SERVICE_USERNAME_KEY in os.environ and SERVICE_PASSWORD_KEY in os.environ):
+ AmbariServerService.Install(username=os.environ[SERVICE_USERNAME_KEY], password=os.environ[SERVICE_PASSWORD_KEY])
+ elif AmbariServerService.QueryStatus() == "not installed":
+ AmbariServerService.Install()
+ pass
+
+#
+# Starts the Ambari Server as a standalone process.
+# args:
+# <no arguments> = start the server as a process. For now, there is no restrictions for the number of server instances
+# that can run like this.
+# -s, --single-instance = Reserved for future use. When starting the server as a process, ensure only one instance of the process is running.
+# If this is the second instance of the process, the function fails.
+#
+def start(options):
+ AmbariServerService.set_ctrl_c_handler(ctrlHandler)
+
+ #Run as a normal process. Invoke the ServiceMain directly.
+ childProc = server_process_main(options)
+
+ childProc.wait()
+
+ pid_file_path = PID_DIR + os.sep + PID_NAME
+ remove_file(pid_file_path)
+
+#
+# Starts the Ambari Server as a service.
+# Start the server in normal mode, as a Windows service. If the Ambari server is
+# not registered as a service, the function fails. By default, only one instance of the service can
+# possibly run.
+#
+def svcstart():
+ AmbariServerService.Start()
+ pass
+
+def server_process_main(options, scmStatus=None):
+ # set verbose
+ try:
+ global VERBOSE
+ VERBOSE = options.verbose
+ except AttributeError:
+ pass
+
+ # set silent
+ try:
+ global SILENT
+ SILENT = options.silent
+ except AttributeError:
+ pass
+
+ # debug mode
+ try:
+ global DEBUG_MODE
+ DEBUG_MODE = options.debug
+ except AttributeError:
+ pass
+
+ # stop Java process at startup?
+ try:
+ global SUSPEND_START_MODE
+ SUSPEND_START_MODE = options.suspend_start
+ except AttributeError:
+ pass
+
+ if not utils.check_reverse_lookup():
+ print_warning_msg("The hostname was not found in the reverse DNS lookup. "
+ "This may result in incorrect behavior. "
+ "Please check the DNS setup and fix the issue.")
+
+ properties = get_ambari_properties()
+
+ print_info_msg("Ambari Server is not running...")
+
+ conf_dir = get_conf_dir()
+ jdk_path = find_jdk()
+ if jdk_path is None:
+ err = "No JDK found, please run the \"ambari-server setup\" " \
+ "command to install a JDK automatically or install any " \
+ "JDK manually to " + JDK_INSTALL_DIR
+ raise FatalException(1, err)
+
+ # Preparations
+
+ result = ensure_dbms_is_running(options, properties, scmStatus)
+ if result == -1:
+ raise FatalException(-1, "Unable to connect to the database")
+
+ if scmStatus is not None:
+ scmStatus.reportStartPending()
+
+ ensure_resources_are_organized(properties)
+
+ if scmStatus is not None:
+ scmStatus.reportStartPending()
+
+ environ = os.environ.copy()
+ ensure_server_security_is_configured(properties, environ)
+
+ if scmStatus is not None:
+ scmStatus.reportStartPending()
+
+ conf_dir = os.path.abspath(conf_dir) + os.pathsep + get_ambari_classpath()
+ if conf_dir.find(' ') != -1:
+ conf_dir = '"' + conf_dir + '"'
+
+ java_exe = jdk_path + os.sep + JAVA_EXE_SUBPATH
+ pidfile = PID_DIR + os.sep + PID_NAME
+ command_base = SERVER_START_CMD_DEBUG if (DEBUG_MODE or SERVER_START_DEBUG) else SERVER_START_CMD
+ suspend_mode = 'y' if SUSPEND_START_MODE else 'n'
+ command = command_base.format(conf_dir, suspend_mode)
+ if not os.path.exists(PID_DIR):
+ os.makedirs(PID_DIR, 0755)
+
+ set_open_files_limit(get_ulimit_open_files());
+
+ #Ignore the requirement to run as root. In Windows, by default the child process inherits the security context
+ # and the environment from the parent process.
+ param_list = java_exe + " " + command
+
+ print_info_msg("Running server: " + str(param_list))
+ procJava = subprocess.Popen(param_list, env=environ)
+
+ #wait for server process for SERVER_START_TIMEOUT seconds
+ print "Waiting for server start..."
+
+ pidJava = procJava.pid
+ if pidJava <= 0:
+ procJava.terminate()
+ exitcode = procJava.returncode
+ exitfile = os.path.join(PID_DIR, EXITCODE_NAME)
+ utils.save_pid(exitcode, exitfile)
+
+ if scmStatus is not None:
+ scmStatus.reportStopPending()
+
+ raise FatalException(-1, AMBARI_SERVER_DIE_MSG.format(exitcode, SERVER_OUT_FILE))
+ else:
+ utils.save_pid(pidJava, pidfile)
+ print "Server PID at: "+pidfile
+ print "Server out at: "+SERVER_OUT_FILE
+ print "Server log at: "+SERVER_LOG_FILE
+
+ if scmStatus is not None:
+ scmStatus.reportStarted()
+
+ return procJava
+
+#Check the JDBC driver status
+#If not found abort
+#Get SQL Server service status from SCM
+#If 'stopped' then start it
+#Wait until the status is 'started' or a configured timeout elapses
+#If the timeout has been reached, bail out with exception
+def ensure_dbms_is_running(options, properties, scmStatus):
+ dbms = DBMSConfig.create(options, properties, "Ambari")
+ if not dbms._is_jdbc_driver_installed(properties):
+ raise FatalException(-1, "JDBC driver is not installed. Run ambari-server setup and try again.")
+
+ dbms.ensure_dbms_is_running(options, properties, scmStatus)
+
+ dbms2 = DBMSConfig.create(options, properties, "Metrics")
+ if dbms2.database_host.lower() != dbms.database_host.lower():
+ dbms2.ensure_dbms_is_running(options, properties, scmStatus)
+ pass
+
+def ensure_resources_are_organized(properties):
+ resources_location = get_resources_location(properties)
+ resource_files_keeper = ResourceFilesKeeper(resources_location)
+ try:
+ print "Organizing resource files at {0}...".format(resources_location,
+ verbose=VERBOSE)
+ resource_files_keeper.perform_housekeeping()
+ except KeeperException, ex:
+ msg = "Can not organize resource files at {0}: {1}".format(
+ resources_location, str(ex))
+ raise FatalException(-1, msg)
+
+
+def ensure_server_security_is_configured(properties, environ):
+ pass
+
+
+#
+# Stops the Ambari Server.
+#
+def svcstop():
+ AmbariServerService.Stop()
+
+
+### Stack upgrade ###
+
+#def upgrade_stack(args, stack_id, repo_url=None, repo_url_os=None):
+
+
+def get_resources_location(properties):
+ res_location = properties[RESOURCES_DIR_PROPERTY]
+ if res_location is None:
+ res_location = RESOURCES_DIR_DEFAULT
+ return res_location
+# pass
+
+def get_stack_location(properties):
+ stack_location = properties[STACK_LOCATION_KEY]
+ if stack_location is None:
+ stack_location = STACK_LOCATION_DEFAULT
+ return stack_location
+# pass
+
+
+#
+# The Ambari Server status.
+#
+def svcstatus(options):
+ options.exit_message = None
+
+ statusStr = AmbariServerService.QueryStatus()
+ print "Ambari Server is " + statusStr
+
+
+def get_ulimit_open_files():
+ properties = get_ambari_properties()
+ if properties == -1:
+ print "Error reading ambari properties"
+ return None
+
+ open_files = int(properties[ULIMIT_OPEN_FILES_KEY])
+ if open_files > 0:
+ return open_files
+ else:
+ return ULIMIT_OPEN_FILES_DEFAULT
+
+
+def init_options_parser():
+ parser = optparse.OptionParser(usage="usage: %prog action [options] [stack_id os]", )
+ #parser.add_option('-i', '--create-db-script-file', dest="create_db_script_file",
+ # default="resources" + os.sep + "Ambari-DDL-SQLServer-CREATELOCAL.sql",
+ # help="File with database creation script")
+ parser.add_option('-f', '--init-script-file', dest="init_db_script_file",
+ default="resources" + os.sep + "Ambari-DDL-SQLServer-CREATE.sql",
+ help="File with database setup script")
+ parser.add_option('-r', '--drop-script-file', dest="cleanup_db_script_file",
+ default="resources" + os.sep + "Ambari-DDL-SQLServer-DROP.sql",
+ help="File with database cleanup script")
+ parser.add_option('-j', '--java-home', dest="java_home", default=None,
+ help="Use specified java_home. Must be valid on all hosts")
+ parser.add_option("-v", "--verbose",
+ action="store_true", dest="verbose", default=False,
+ help="Print verbose status messages")
+ parser.add_option("-s", "--silent",
+ action="store_true", dest="silent", default=False,
+ help="Silently accepts default prompt values")
+ parser.add_option('-g', '--debug', action="store_true", dest='debug', default=False,
+ help="Start ambari-server in debug mode")
+ parser.add_option('-y', '--suspend-start', action="store_true", dest='suspend_start', default=False,
+ help="Freeze ambari-server Java process at startup in debug mode")
+
+ parser.add_option('-a', '--databasehost', dest="database_host", default=None,
+ help="Hostname of database server")
+ parser.add_option('-n', '--databaseport', dest="database_port", default=None,
+ help="Database server listening port")
+ parser.add_option('-d', '--databasename', dest="database_name", default=None,
+ help="Database/Schema/Service name or ServiceID")
+ parser.add_option('-w', '--windowsauth', action="store_true", dest="database_windows_auth", default=None,
+ help="Integrated Windows authentication")
+ parser.add_option('-u', '--databaseusername', dest="database_username", default=None,
+ help="Database user login")
+ parser.add_option('-p', '--databasepassword', dest="database_password", default=None,
+ help="Database user password")
+
+ parser.add_option('-t', '--init-metrics-script-file', dest="init_metrics_db_script_file", default=None,
+ help="File with metrics database setup script")
+ parser.add_option('-c', '--drop-metrics-script-file', dest="cleanup_metrics_db_script_file", default=None,
+ help="File with metrics database cleanup script")
+
+ parser.add_option('-m', '--metricsdatabasehost', dest="metrics_database_host", default=None,
+ help="Hostname of metrics database server")
+ parser.add_option('-o', '--metricsdatabaseport', dest="metrics_database_port", default=None,
+ help="Metrics database server listening port")
+ parser.add_option('-e', '--metricsdatabasename', dest="metrics_database_name", default=None,
+ help="Metrics database/Schema/Service name or ServiceID")
+ parser.add_option('-z', '--metricswindowsauth', action="store_true", dest="metrics_database_windows_auth", default=None,
+ help="Integrated Windows authentication for the metrics database")
+ parser.add_option('-q', '--metricsdatabaseusername', dest="metrics_database_username", default=None,
+ help="Metrics database user login")
+ parser.add_option('-l', '--metricsdatabasepassword', dest="metrics_database_password", default=None,
+ help="Metrics database user password")
+
+ parser.add_option('--jdbc-driver', default=None, dest="jdbc_driver",
+ help="Specifies the path to the JDBC driver JAR file for the " \
+ "database type specified with the --jdbc-db option. Used only with --jdbc-db option.")
+ # -b, -i, -k and -x the remaining available short options
+ # -h reserved for help
+ return parser
+
+def are_cmd_line_db_args_blank(options):
+ if (options.database_host is None \
+ and options.database_name is None \
+ and options.database_windows_auth is None \
+ and options.database_username is None \
+ and options.database_password is None \
+ and options.metrics_database_host is None \
+ and options.metrics_database_name is None \
+ and options.metrics_database_windows_auth is None \
+ and options.metrics_database_username is None \
+ and options.metrics_database_password is None):
+ return True
+ return False
+
+
+def are_db_auth_options_ok(db_windows_auth, db_username, db_password):
+ if db_windows_auth is True:
+ return True
+ else:
+ if db_username is not None and db_username is not "" and db_password is not None and db_password is not "":
+ return True
+ return False
+
+def are_cmd_line_db_args_valid(options):
+ if (options.database_host is not None and options.database_host is not "" \
+ #and options.database_name is not None \ # ambari by default is ok
+ and are_db_auth_options_ok(options.database_windows_auth,
+ options.database_username,
+ options.database_password) \
+ and options.metrics_database_host is not None and options.metrics_database_host is not "" \
+ #and options.metrics_database_name is not None \ # HadoopMetrics by default is ok
+ and are_db_auth_options_ok(options.metrics_database_windows_auth,
+ options.metrics_database_username,
+ options.metrics_database_password)):
+ return True
+ return False
+
+
+def setup_security(args):
+ need_restart = True
+ #Print menu options
+ print '=' * 75
+ print 'Choose one of the following options: '
+ print ' [1] Enable HTTPS for Ambari server.'
+ print ' [2] Encrypt passwords stored in ambari.properties file.'
+ print ' [3] Setup Ambari kerberos JAAS configuration.'
+ print '=' * 75
+ choice = get_validated_string_input('Enter choice, (1-3): ', '0', '[1-3]',
+ 'Invalid choice', False, False)
+
+ if choice == '1':
+ need_restart = setup_https(args)
+ elif choice == '2':
+ setup_master_key()
+ elif choice == '3':
+ setup_ambari_krb5_jaas()
+ else:
+ raise FatalException('Unknown option for setup-security command.')
+
+ return need_restart
+
+#
+# Main.
+#
+def main():
+ parser = init_options_parser()
+ (options, args) = parser.parse_args()
+
+ #perform checks
+ options.warnings = []
+ options.must_set_database_options = False
+
+ if are_cmd_line_db_args_blank(options):
+ options.must_set_database_options = True
+ #TODO Silent is invalid here, right?
+
+ elif not are_cmd_line_db_args_valid(options):
+ parser.error('All database options should be set. Please see help for the options.')
+
+ ## jdbc driver and db options validation
+ #if options.jdbc_driver is None and options.jdbc_db is not None:
+ # parser.error("Option --jdbc-db is used only in pair with --jdbc-driver")
+ #elif options.jdbc_driver is not None and options.jdbc_db is None:
+ # parser.error("Option --jdbc-driver is used only in pair with --jdbc-db")
+
+ if options.debug:
+ sys.frozen = 'windows_exe' # Fake py2exe so we can debug
+
+ if len(args) == 0:
+ print parser.print_help()
+ parser.error("No action entered")
+
+ action = args[0]
+
+ if action == UPGRADE_STACK_ACTION:
+ possible_args_numbers = [2,4] # OR
+ else:
+ possible_args_numbers = [1]
+
+ matches = 0
+ for args_number_required in possible_args_numbers:
+ matches += int(len(args) == args_number_required)
+
+ if matches == 0:
+ print parser.print_help()
+ possible_args = ' or '.join(str(x) for x in possible_args_numbers)
+ parser.error("Invalid number of arguments. Entered: " + str(len(args)) + ", required: " + possible_args)
+
+ options.exit_message = "Ambari Server '%s' completed successfully." % action
+ need_restart = True
+ try:
+ if action == SETUP_ACTION:
+ setup(options)
+ svcsetup()
+ elif action == START_ACTION:
+ svcstart()
+ elif action == PSTART_ACTION:
+ start(options)
+ elif action == STOP_ACTION:
+ svcstop()
+ elif action == RESET_ACTION:
+ reset(options, AmbariServerService)
+ elif action == STATUS_ACTION:
+ svcstatus(options)
+ elif action == UPGRADE_ACTION:
+ upgrade(options)
+# elif action == UPGRADE_STACK_ACTION:
+# stack_id = args[1]
+# repo_url = None
+# repo_url_os = None
+#
+# if len(args) > 2:
+# repo_url = args[2]
+# if len(args) > 3:
+# repo_url_os = args[3]
+#
+# upgrade_stack(options, stack_id, repo_url, repo_url_os)
+ elif action == LDAP_SETUP_ACTION:
+ setup_ldap()
+ elif action == SETUP_SECURITY_ACTION:
+ need_restart = setup_security(options)
+ else:
+ parser.error("Invalid action")
+
+ if action in ACTION_REQUIRE_RESTART and need_restart:
+ status, stateDesc = is_server_running(AmbariServerService)
+ if status:
+ print 'NOTE: Restart Ambari Server to apply changes' + \
+ ' ("ambari-server restart|stop|start")'
+
+ if options.warnings:
+ for warning in options.warnings:
+ print_warning_msg(warning)
+ pass
+ options.exit_message = "Ambari Server '%s' completed with warnings." % action
+ pass
+ except FatalException as e:
+ if e.reason is not None:
+ print_error_msg("Exiting with exit code {0}. \nREASON: {1}".format(e.code, e.reason))
+ sys.exit(e.code)
+ except NonFatalException as e:
+ options.exit_message = "Ambari Server '%s' completed with warnings." % action
+ if e.reason is not None:
+ print_warning_msg(e.reason)
+
+ if options.exit_message is not None:
+ print options.exit_message
+
+
+if __name__ == "__main__":
+ try:
+ main()
+ except (KeyboardInterrupt, EOFError):
+ print("\nAborting ... Keyboard Interrupt.")
+ sys.exit(1) \ No newline at end of file
diff --git a/ambari-server/src/main/python/ambari-server.py b/ambari-server/src/main/python/ambari-server.py
index 19b875d4ce..91deb741e0 100755
--- a/ambari-server/src/main/python/ambari-server.py
+++ b/ambari-server/src/main/python/ambari-server.py
@@ -758,9 +758,9 @@ def check_reverse_lookup():
Check if host fqdn resolves to current host ip
"""
try:
- host_name = socket.gethostname()
+ host_name = socket.gethostname().lower()
host_ip = socket.gethostbyname(host_name)
- host_fqdn = socket.getfqdn()
+ host_fqdn = socket.getfqdn().lower()
fqdn_ip = socket.gethostbyname(host_fqdn)
return host_ip == fqdn_ip
except socket.herror:
@@ -1317,7 +1317,7 @@ def store_remote_properties(args):
# to the jdbc hostname since its passed onto the agents for RCA
jdbc_hostname = args.database_host
if (args.database_host == "localhost"):
- jdbc_hostname = socket.getfqdn()
+ jdbc_hostname = socket.getfqdn().lower()
connectionStringFormat = DATABASE_CONNECTION_STRINGS
if args.sid_or_sname == "sid":
@@ -2944,7 +2944,7 @@ def upgrade(args):
if os.path.lexists(jdbc_symlink):
os.remove(jdbc_symlink)
os.symlink(os.path.join(resources_dir,JDBC_DB_DEFAULT_DRIVER[db_name]), jdbc_symlink)
-
+
# check if ambari has obsolete LDAP configuration
if properties.get_property(LDAP_PRIMARY_URL_PROPERTY) and not properties.get_property(IS_LDAP_CONFIGURED):
args.warnings.append("Existing LDAP configuration is detected. You must run the \"ambari-server setup-ldap\" command to adjust existing LDAP configuration.")
@@ -3528,7 +3528,7 @@ def setup_master_key():
if not is_alias_string(ldap_password) and os.path.isfile(ldap_password):
with open(ldap_password, 'r') as passwdfile:
ldap_password = passwdfile.read()
-
+
ts_password = properties.get_property(SSL_TRUSTSTORE_PASSWORD_PROPERTY)
resetKey = False
masterKey = None
@@ -4293,7 +4293,7 @@ def get_fqdn():
handle.close()
return str
except Exception:
- return socket.getfqdn()
+ return socket.getfqdn().lower()
def get_ulimit_open_files():
diff --git a/ambari-server/src/main/python/ambari_server/dbConfiguration.py b/ambari-server/src/main/python/ambari_server/dbConfiguration.py
new file mode 100644
index 0000000000..b9d4e4ebf8
--- /dev/null
+++ b/ambari-server/src/main/python/ambari_server/dbConfiguration.py
@@ -0,0 +1,213 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+from ambari_commons import OSCheck
+from ambari_commons.exceptions import FatalException
+from ambari_commons.logging_utils import print_error_msg
+from ambari_server.setupSecurity import SECURITY_IS_ENCRYPTION_ENABLED
+from serverConfiguration import get_ambari_properties
+
+
+#Database settings
+DB_STATUS_RUNNING_DEFAULT = "running"
+
+SETUP_DB_CONNECT_TIMEOUT = 5
+SETUP_DB_CONNECT_ATTEMPTS = 3
+
+DATABASE_INDEX = 0
+USERNAME_PATTERN = "^[a-zA-Z_][a-zA-Z0-9_\-]*$"
+PASSWORD_PATTERN = "^[a-zA-Z0-9_-]*$"
+DATABASE_NAMES = ["postgres", "oracle", "mysql"]
+DATABASE_STORAGE_NAMES = ["Database", "Service", "Database"]
+DATABASE_PORTS = ["5432", "1521", "3306"]
+DATABASE_DRIVER_NAMES = ["org.postgresql.Driver", "oracle.jdbc.driver.OracleDriver", "com.mysql.jdbc.Driver"]
+DATABASE_CONNECTION_STRINGS = [
+ "jdbc:postgresql://{0}:{1}/{2}",
+ "jdbc:oracle:thin:@{0}:{1}/{2}",
+ "jdbc:mysql://{0}:{1}/{2}"]
+DATABASE_CONNECTION_STRINGS_ALT = [
+ "jdbc:postgresql://{0}:{1}/{2}",
+ "jdbc:oracle:thin:@{0}:{1}:{2}",
+ "jdbc:mysql://{0}:{1}/{2}"]
+ORACLE_SID_PATTERN = "jdbc:oracle:thin:@.+:.+/.+"
+ORACLE_SNAME_PATTERN = "jdbc:oracle:thin:@.+:.+:.+"
+
+DATABASE_CLI_TOOLS = [["psql"], ["sqlplus", "sqlplus64"], ["mysql"]]
+DATABASE_CLI_TOOLS_DESC = ["psql", "sqlplus", "mysql"]
+DATABASE_CLI_TOOLS_USAGE = ['su -postgres --command=psql -f {0} -v username=\'"{1}"\' -v password="\'{2}\'"',
+ 'sqlplus {1}/{2} < {0} ',
+ 'mysql --user={1} --password={2} {3}<{0}']
+
+MYSQL_INIT_SCRIPT = '/var/lib/ambari-server/resources/Ambari-DDL-MySQL-CREATE.sql'
+DATABASE_INIT_SCRIPTS = ['/var/lib/ambari-server/resources/Ambari-DDL-Postgres-CREATE.sql',
+ '/var/lib/ambari-server/resources/Ambari-DDL-Oracle-CREATE.sql',
+ MYSQL_INIT_SCRIPT]
+DATABASE_DROP_SCRIPTS = ['/var/lib/ambari-server/resources/Ambari-DDL-Postgres-DROP.sql',
+ '/var/lib/ambari-server/resources/Ambari-DDL-Oracle-DROP.sql',
+ '/var/lib/ambari-server/resources/Ambari-DDL-MySQL-DROP.sql']
+#
+# Database configuration base class
+#
+class DBMSConfig(object):
+ def __init__(self, options, properties):
+ """
+ #Just load the defaults. The derived classes will be able to modify them later
+ """
+ self.persistence_type = 'remote'
+ self.dbms = ""
+ self.driver_name = ""
+ self.database_host = ""
+ self.database_port = ""
+ self.database_name = ""
+ self.database_username = ""
+ self.password_file = None
+
+ self.silent = options.silent
+
+ isSecureProp = properties.get_property(SECURITY_IS_ENCRYPTION_ENABLED)
+ self.isSecure = True if isSecureProp and isSecureProp.lower() == 'true' else False
+ pass
+
+
+ @staticmethod
+ # properties = property bag that will ultimately define the type of database. Since
+ # right now in Windows we only support SQL Server, this argument is not yet used.
+ # dbId = additional information, that helps distinguish between various database connections
+ # (Ambari vs. Metrics is a prime example)
+ def create(options, properties, dbId = "Ambari"):
+ #if OSCheck.is_windows_os():
+ if dbId == "Ambari":
+ return SQLServerAmbariDBConfig(options, properties)
+ elif dbId == "Metrics":
+ return SQLServerMetricsDBConfig(options, properties)
+ else:
+ raise FatalException(-1, "Invalid database requested: " + str(dbId))
+ #else:
+ # go the classic Linux way
+ #return PGConfig(properties, dbId)
+ #return MySQLConfig(properties, dbId)
+ #return OracleConfig(properties, dbId)
+
+
+ #
+ # Public methods
+ #
+
+ #
+ # Main method. Configures the database according to the options and the existing properties.
+ #
+ def configure_database(self, args, properties):
+ result = self._prompt_db_properties()
+ if result:
+ #DB setup should be done last after doing any setup.
+ if self._is_local_database():
+ self._setup_local_server(properties)
+ else:
+ self._setup_remote_server(properties)
+ return result
+
+ def setup_database(self):
+ print 'Configuring {} database...'.format(self.db_title)
+
+ #DB setup should be done last after doing any setup.
+ if self._is_local_database():
+ self._setup_local_database()
+ else:
+ self._setup_remote_database()
+ pass
+
+ def reset_database(self):
+ print 'Resetting {} database...'.format(self.db_title)
+
+ if self._is_local_database():
+ self._reset_local_database()
+ else:
+ self._reset_remote_database()
+ pass
+
+ def ensure_jdbc_driver_installed(self, args, properties):
+ result = self._is_jdbc_driver_installed(properties)
+ if result == -1:
+ (result, msg) = self._prompt_jdbc_driver_install(properties)
+ if result == -1:
+ print_error_msg(msg)
+ raise FatalException(-1, msg)
+
+ if result != 1:
+ if self._install_jdbc_driver(args, properties):
+ return True
+ return False
+
+
+ #
+ # Private implementation
+ #
+
+ #
+ # Checks if options determine local DB configuration
+ #
+ def _is_local_database(self):
+ return self.persistence_type == 'local'
+
+ def _is_jdbc_driver_installed(self, properties):
+ return 1
+
+ def configure_database_password(showDefault=True):
+ pass
+
+ def _prompt_db_properties(self):
+ #if WINDOWS
+ # prompt for SQL Server host and instance name
+ #else
+ # go the classic Linux way
+ #linux_prompt_db_properties(args)
+ return False
+
+ def _setup_local_server(self, properties):
+ pass
+
+ def _setup_local_database(self):
+ pass
+
+ def _reset_local_database(self):
+ pass
+
+ def _setup_remote_server(self, properties):
+ pass
+
+ def _setup_remote_database(self):
+ pass
+
+ def _reset_remote_database(self):
+ pass
+
+ def _prompt_jdbc_driver_install(self, properties):
+ return (False, "")
+
+ def _install_jdbc_driver(self, options, properties):
+ return False
+
+ def ensure_dbms_is_running(self, options, properties, scmStatus=None):
+ pass
+
+if OSCheck.is_windows_os():
+ from ambari_server.dbConfiguration_windows import SQLServerAmbariDBConfig, SQLServerMetricsDBConfig
+#else:
+# from ambari_server.dbConfiguration_linux import PostgreSQLConfig #and potentially MySQLConfig, OracleConfig
diff --git a/ambari-server/src/main/python/ambari_server/dbConfiguration_linux.py b/ambari-server/src/main/python/ambari_server/dbConfiguration_linux.py
new file mode 100644
index 0000000000..ce47faec70
--- /dev/null
+++ b/ambari-server/src/main/python/ambari_server/dbConfiguration_linux.py
@@ -0,0 +1,740 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import shutil
+
+from ambari_commons import OSConst
+from ambari_commons.logging_utils import *
+from exceptions import *
+from dbConfiguration import *
+from utils import *
+
+import utils
+
+# PostgreSQL settings
+PG_JDBC_CONNECTION_STRING = "jdbc:postgresql://{0}:{1}/{2}"
+PG_JDBC_CONNECTION_STRING_ALT = "jdbc:postgresql://{0}:{1}/{2}"
+
+UBUNTU_PG_HBA_ROOT = "/etc/postgresql"
+PG_HBA_ROOT_DEFAULT = "/var/lib/pgsql/data"
+
+SETUP_DB_CMD = ['su', '-', 'postgres',
+ '--command=psql -f {0} -v username=\'"{1}"\' -v password="\'{2}\'" -v dbname="{3}"']
+UPGRADE_STACK_CMD = ['su', 'postgres',
+ '--command=psql -f {0} -v stack_name="\'{1}\'" -v stack_version="\'{2}\'" -v dbname="{3}"']
+
+CHANGE_OWNER_COMMAND = ['su', '-', 'postgres',
+ '--command=/var/lib/ambari-server/resources/scripts/change_owner.sh -d {0} -s {1} -o {2}']
+
+PG_ERROR_BLOCKED = "is being accessed by other users"
+PG_STATUS_RUNNING = get_running_status()
+PG_DEFAULT_PASSWORD = "bigdata"
+SERVICE_CMD = "/usr/bin/env service"
+PG_SERVICE_NAME = "postgresql"
+PG_HBA_DIR = utils.get_postgre_hba_dir()
+
+PG_ST_CMD = "%s %s status" % (SERVICE_CMD, PG_SERVICE_NAME)
+if os.path.isfile("/usr/bin/postgresql-setup"):
+ PG_INITDB_CMD = "/usr/bin/postgresql-setup initdb"
+else:
+ PG_INITDB_CMD = "%s %s initdb" % (SERVICE_CMD, PG_SERVICE_NAME)
+
+PG_START_CMD = "%s %s start" % (SERVICE_CMD, PG_SERVICE_NAME)
+PG_RESTART_CMD = "%s %s restart" % (SERVICE_CMD, PG_SERVICE_NAME)
+PG_HBA_RELOAD_CMD = "%s %s reload" % (SERVICE_CMD, PG_SERVICE_NAME)
+
+PG_HBA_CONF_FILE = os.path.join(PG_HBA_DIR, "pg_hba.conf")
+PG_HBA_CONF_FILE_BACKUP = os.path.join(PG_HBA_DIR, "pg_hba_bak.conf.old")
+POSTGRESQL_CONF_FILE = os.path.join(PG_HBA_DIR, "postgresql.conf")
+
+
+# Set database properties to default values
+def load_default_db_properties(args):
+ args.persistence_type = 'local'
+ args.dbms = DATABASE_NAMES[DATABASE_INDEX]
+ args.database_host = "localhost"
+ args.database_port = DATABASE_PORTS[DATABASE_INDEX]
+ args.database_name = DEFAULT_DB_NAME
+ args.database_username = "ambari"
+ args.database_password = "bigdata"
+ args.sid_or_sname = "sname"
+ pass
+
+def configure_database_password(showDefault=True):
+ passwordDefault = PG_DEFAULT_PASSWORD
+ if showDefault:
+ passwordPrompt = 'Enter Database Password (' + passwordDefault + '): '
+ else:
+ passwordPrompt = 'Enter Database Password: '
+ passwordPattern = "^[a-zA-Z0-9_-]*$"
+ passwordDescr = "Invalid characters in password. Use only alphanumeric or "\
+ "_ or - characters"
+
+ password = read_password(passwordDefault, passwordPattern, passwordPrompt,
+ passwordDescr)
+
+ return password
+
+# Ask user for database connection properties
+def prompt_linux_db_properties(args):
+ global DATABASE_INDEX
+
+ if args.must_set_database_options:
+ load_default_db_properties(args)
+ ok = get_YN_input("Enter advanced database configuration [y/n] (n)? ", False)
+ if ok:
+
+ print "=============================================================================="
+ print "Choose one of the following options:"
+
+ database_num = str(DATABASE_INDEX + 1)
+ database_num = get_validated_string_input(
+ "[1] - PostgreSQL (Embedded)\n[2] - Oracle\n[3] - MySQL\n[4] - PostgreSQL\n"
+ "==============================================================================\n"
+ "Enter choice (" + database_num + "): ",
+ database_num,
+ "^[1234]$",
+ "Invalid number.",
+ False
+ )
+
+ if int(database_num) == 1:
+ args.persistence_type = 'local'
+ args.database_index = 0
+ else:
+ args.persistence_type = 'remote'
+ selected_db_option = int(database_num)
+
+ if selected_db_option == 2:
+ args.database_index = 1
+ elif selected_db_option == 3:
+ args.database_index = 2
+ elif selected_db_option == 4:
+ args.database_index = 0
+ else:
+ print_info_msg('Unknown db option, default to embbeded postgres.')
+ args.database_index = 0
+ pass
+ pass
+
+ DATABASE_INDEX = args.database_index
+ args.dbms = DATABASE_NAMES[args.database_index]
+
+ if args.persistence_type != 'local':
+ args.database_host = get_validated_string_input(
+ "Hostname (" + args.database_host + "): ",
+ args.database_host,
+ "^[a-zA-Z0-9.\-]*$",
+ "Invalid hostname.",
+ False
+ )
+
+ args.database_port = DATABASE_PORTS[DATABASE_INDEX]
+ args.database_port = get_validated_string_input(
+ "Port (" + args.database_port + "): ",
+ args.database_port,
+ "^([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$",
+ "Invalid port.",
+ False
+ )
+
+ if args.dbms == "oracle":
+ # Oracle uses service name or service id
+ idType = "1"
+ idType = get_validated_string_input(
+ "Select Oracle identifier type:\n1 - " + ORACLE_DB_ID_TYPES[0] +
+ "\n2 - " + ORACLE_DB_ID_TYPES[1] + "\n(" + idType + "): ",
+ idType,
+ "^[12]$",
+ "Invalid number.",
+ False
+ )
+
+ if idType == "2":
+ args.sid_or_sname = "sid"
+
+ IDTYPE_INDEX = int(idType) - 1
+ args.database_name = get_validated_service_name(args.database_name,
+ IDTYPE_INDEX)
+ elif args.dbms in ["mysql", "postgres"]:
+ args.database_name = get_validated_db_name(args.database_name)
+
+ else:
+ # other DB types
+ pass
+ pass
+ else:
+ args.database_host = "localhost"
+ args.database_port = DATABASE_PORTS[DATABASE_INDEX]
+
+ args.database_name = get_validated_db_name(args.database_name)
+ pass
+
+ # Username is common for Oracle/MySQL/Postgres
+ args.database_username = get_validated_string_input(
+ 'Username (' + args.database_username + '): ',
+ args.database_username,
+ USERNAME_PATTERN,
+ "Invalid characters in username. Start with _ or alpha "
+ "followed by alphanumeric or _ or - characters",
+ False
+ )
+ args.database_password = configure_database_password(True)
+
+ print_info_msg('Using database options: {database},{host},{port},{schema},{user},{password}'.format(
+ database=args.dbms,
+ host=args.database_host,
+ port=args.database_port,
+ schema=args.database_name,
+ user=args.database_username,
+ password=args.database_password
+ ))
+
+# PostgreSQL configuration and setup
+class PGConfig(DBMSConfig):
+ def __init__(self):
+ #Init the database configuration data here, if any
+ pass
+
+ def configure_database_password(showDefault=True):
+ passwordDefault = PG_DEFAULT_PASSWORD
+ if showDefault:
+ passwordPrompt = 'Enter Database Password (' + passwordDefault + '): '
+ else:
+ passwordPrompt = 'Enter Database Password: '
+ passwordPattern = "^[a-zA-Z0-9_-]*$"
+ passwordDescr = "Invalid characters in password. Use only alphanumeric or "\
+ "_ or - characters"
+
+ password = read_password(passwordDefault, passwordPattern, passwordPrompt,
+ passwordDescr)
+
+ return password
+
+ #
+ # Private implementation
+ #
+ def _change_db_files_owner(args):
+ print 'Fixing database objects owner'
+ database_name = args.database_name
+ new_owner = args.database_username
+ if '"' not in new_owner:
+ #wrap to allow old username "ambari-server", postgres only
+ new_owner = '\'"{0}"\''.format(new_owner)
+ pass
+
+ command = CHANGE_OWNER_COMMAND[:]
+ command[-1] = command[-1].format(database_name, 'ambari', new_owner)
+ return run_os_command(command)
+
+ def _configure_pg_hba_ambaridb_users(self):
+ args = optparse.Values()
+ configure_database_username_password(args)
+
+ with open(PG_HBA_CONF_FILE, "a") as pgHbaConf:
+ pgHbaConf.write("\n")
+ pgHbaConf.write("local all " + args.database_username +
+ ",mapred md5")
+ pgHbaConf.write("\n")
+ pgHbaConf.write("host all " + args.database_username +
+ ",mapred 0.0.0.0/0 md5")
+ pgHbaConf.write("\n")
+ pgHbaConf.write("host all " + args.database_username +
+ ",mapred ::/0 md5")
+ pgHbaConf.write("\n")
+ retcode, out, err = run_os_command(PG_HBA_RELOAD_CMD)
+ if not retcode == 0:
+ raise FatalException(retcode, err)
+
+ def _configure_pg_hba_postgres_user(self):
+ postgresString = "all postgres"
+ for line in fileinput.input(PG_HBA_CONF_FILE, inplace=1):
+ print re.sub('all\s*all', postgresString, line),
+ os.chmod(PG_HBA_CONF_FILE, 0644)
+
+ def _configure_postgresql_conf(self):
+ listenAddress = "listen_addresses = '*' #"
+ for line in fileinput.input(POSTGRESQL_CONF_FILE, inplace=1):
+ print re.sub('#+listen_addresses.*?(#|$)', listenAddress, line),
+ os.chmod(POSTGRESQL_CONF_FILE, 0644)
+
+ # Store set of properties for remote database connection
+ def _store_remote_properties(args):
+ properties = get_ambari_properties()
+ if properties == -1:
+ print_error_msg("Error getting ambari properties")
+ return -1
+
+ isSecure = get_is_secure(properties)
+
+ properties.process_pair(PERSISTENCE_TYPE_PROPERTY, "remote")
+
+ properties.process_pair(JDBC_DATABASE_PROPERTY, args.dbms)
+ properties.process_pair(JDBC_HOSTNAME_PROPERTY, args.database_host)
+ properties.process_pair(JDBC_PORT_PROPERTY, args.database_port)
+ properties.process_pair(JDBC_SCHEMA_PROPERTY, args.database_name)
+
+ properties.process_pair(JDBC_DRIVER_PROPERTY, DBCN.get_driver_name())
+ # fully qualify the hostname to make sure all the other hosts can connect
+ # to the jdbc hostname since its passed onto the agents for RCA
+ jdbc_hostname = args.database_host
+ if (args.database_host == "localhost"):
+ jdbc_hostname = socket.getfqdn()
+
+ #TODO: Implement the DBCN connection string generation
+ #connectionStringFormat = DATABASE_CONNECTION_STRINGS
+ #if args.sid_or_sname == "sid":
+ # connectionStringFormat = DATABASE_CONNECTION_STRINGS_ALT
+ #properties.process_pair(JDBC_URL_PROPERTY, connectionStringFormat[DATABASE_INDEX].format(jdbc_hostname, args.database_port, args.database_name))
+ properties.process_pair(JDBC_URL_PROPERTY, DBCN.get_connection_string())
+ properties.process_pair(JDBC_USER_NAME_PROPERTY, args.database_username)
+ properties.process_pair(JDBC_PASSWORD_PROPERTY,
+ store_password_file(args.database_password, JDBC_PASSWORD_FILENAME))
+
+ # save any other defined properties to pass to JDBC
+ if DATABASE_INDEX < len(DATABASE_JDBC_PROPERTIES):
+ for pair in DATABASE_JDBC_PROPERTIES[DATABASE_INDEX]:
+ properties.process_pair(JDBC_PROPERTIES_PREFIX + pair[0], pair[1])
+
+ if isSecure:
+ encrypted_password = encrypt_password(JDBC_RCA_PASSWORD_ALIAS, args.database_password)
+ if encrypted_password != args.database_password:
+ properties.process_pair(JDBC_PASSWORD_PROPERTY, encrypted_password)
+ pass
+
+ properties.process_pair(JDBC_RCA_DRIVER_PROPERTY, DBCN.get_driver_name())
+ properties.process_pair(JDBC_RCA_URL_PROPERTY, DBCN.get_connection_string())
+ properties.process_pair(JDBC_RCA_USER_NAME_PROPERTY, args.database_username)
+ properties.process_pair(JDBC_RCA_PASSWORD_FILE_PROPERTY,
+ store_password_file(args.database_password, JDBC_PASSWORD_FILENAME))
+ if isSecure:
+ encrypted_password = encrypt_password(JDBC_RCA_PASSWORD_ALIAS, args.database_password)
+ if encrypted_password != args.database_password:
+ properties.process_pair(JDBC_RCA_PASSWORD_FILE_PROPERTY, encrypted_password)
+ pass
+
+ conf_file = properties.fileName
+
+ try:
+ properties.store(open(conf_file, "w"))
+ except Exception, e:
+ print 'Could not write ambari config file "%s": %s' % (conf_file, e)
+ return -1
+
+ return 0
+
+ #
+ # Public methods
+ #
+ def configure_postgres(self):
+ if os.path.isfile(PG_HBA_CONF_FILE):
+ if not os.path.isfile(PG_HBA_CONF_FILE_BACKUP):
+ shutil.copyfile(PG_HBA_CONF_FILE, PG_HBA_CONF_FILE_BACKUP)
+ else:
+ #Postgres has been configured before, must not override backup
+ print "Backup for pg_hba found, reconfiguration not required"
+ return 0
+ self._configure_pg_hba_postgres_user()
+ self._configure_pg_hba_ambaridb_users()
+ os.chmod(PG_HBA_CONF_FILE, 0644)
+ self._configure_postgresql_conf()
+ #restart postgresql if already running
+ pg_status = get_postgre_status()
+ if pg_status == PG_STATUS_RUNNING:
+ retcode = restart_postgres()
+ return retcode
+ return 0
+
+ def configure_database(self, args):
+ prompt_db_properties(args)
+
+ #DB setup should be done last after doing any setup.
+
+ if is_local_database(args):
+ #check if jdbc user is changed
+ is_user_changed = is_jdbc_user_changed(args)
+
+ print 'Default properties detected. Using built-in database.'
+ store_local_properties(args)
+
+ print 'Checking PostgreSQL...'
+ retcode = check_postgre_up()
+ if not retcode == 0:
+ err = 'Unable to start PostgreSQL server. Exiting'
+ raise FatalException(retcode, err)
+
+ print 'Configuring local database...'
+ retcode, outdata, errdata = setup_db(args)
+ if not retcode == 0:
+ err = 'Running database init script was failed. Exiting.'
+ raise FatalException(retcode, err)
+
+ if is_user_changed:
+ #remove backup for pg_hba in order to reconfigure postgres
+ remove_file(PG_HBA_CONF_FILE_BACKUP)
+
+ print 'Configuring PostgreSQL...'
+ retcode = configure_postgres()
+ if not retcode == 0:
+ err = 'Unable to configure PostgreSQL server. Exiting'
+ raise FatalException(retcode, err)
+
+ else:
+ retcode = self._store_remote_properties(args)
+ if retcode != 0:
+ err = 'Unable to save config file'
+ raise FatalException(retcode, err)
+
+ check_jdbc_drivers(args)
+
+ print 'Configuring remote database connection properties...'
+ retcode = setup_remote_db(args)
+ if retcode == -1:
+ err = "Remote database setup aborted."
+ raise NonFatalException(err)
+
+ if not retcode == 0:
+ err = 'Error while configuring connection properties. Exiting'
+ raise FatalException(retcode, err)
+ check_jdbc_drivers(args)
+
+
+ def configure_database_username_password(self, args):
+ properties = get_ambari_properties()
+ if properties == -1:
+ print_error_msg("Error getting ambari properties")
+ return -1
+
+ username = properties[JDBC_USER_NAME_PROPERTY]
+ passwordProp = properties[JDBC_PASSWORD_PROPERTY]
+ dbname = properties[JDBC_DATABASE_PROPERTY]
+
+ if username and passwordProp and dbname:
+ print_info_msg("Database username + password already configured")
+ args.database_username = username
+ args.database_name = dbname
+ if is_alias_string(passwordProp):
+ args.database_password = decrypt_password_for_alias(JDBC_RCA_PASSWORD_ALIAS)
+ else:
+ if os.path.exists(passwordProp):
+ with open(passwordProp, 'r') as file:
+ args.database_password = file.read()
+
+ return 1
+ else:
+ print_error_msg("Connection properties not set in config file.")
+
+ def setup_db(self, args):
+ self.configure_database_username_password(args)
+
+ dbname = args.database_name
+ scriptFile = args.init_script_file
+ username = args.database_username
+ password = args.database_password
+
+ #setup DB
+ command = SETUP_DB_CMD[:]
+ command[-1] = command[-1].format(scriptFile, username, password, dbname)
+
+ for i in range(SETUP_DB_CONNECT_ATTEMPTS):
+ sys.stdout.write('Connecting to local database...')
+ retcode, outdata, errdata = run_os_command(command)
+ if retcode == 0:
+ print 'done.'
+ return retcode, outdata, errdata
+ timeOutMsg = 'connection timed out'
+ if (i+1) < SETUP_DB_CONNECT_ATTEMPTS:
+ timeOutMsg += '...retrying (%d)' % (i+1)
+ print timeOutMsg
+ time.sleep(SETUP_DB_CONNECT_TIMEOUT)
+
+ print 'unable to connect to database'
+ utils.print_error_msg(errdata)
+ return retcode, outdata, errdata
+
+ # Initialize remote database schema
+ def setup_remote_db(args):
+
+ setup_msg = "Before starting Ambari Server, you must run the following DDL " \
+ "against the database to create the schema: {0}".format(DATABASE_INIT_SCRIPTS[DATABASE_INDEX])
+
+ print_warning_msg(setup_msg)
+
+ proceed = get_YN_input("Proceed with configuring remote database connection properties [y/n] (y)? ", True)
+ retCode = 0 if proceed else -1
+
+ return retCode
+
+ def change_db_files_owner(self, args):
+ if args.persistence_type == 'local':
+ retcode, stdout, stderr = self._change_db_files_owner(args)
+ if not retcode == 0:
+ raise FatalException(20, 'Unable to change owner of database objects')
+
+ def reset_remote_db(self, args):
+ client_usage_cmd_drop = DATABASE_CLI_TOOLS_USAGE[DATABASE_INDEX].format(DATABASE_DROP_SCRIPTS[DATABASE_INDEX], args.database_username,
+ BLIND_PASSWORD, args.database_name)
+ client_usage_cmd_init = DATABASE_CLI_TOOLS_USAGE[DATABASE_INDEX].format(DATABASE_INIT_SCRIPTS[DATABASE_INDEX], args.database_username,
+ BLIND_PASSWORD, args.database_name)
+
+ print_warning_msg('To reset Ambari Server schema ' +
+ 'you must run the following DDL against the database to '
+ + 'drop the schema:' + os.linesep + client_usage_cmd_drop
+ + os.linesep + 'Then you must run the following DDL ' +
+ 'against the database to create the schema: ' + os.linesep +
+ client_usage_cmd_init + os.linesep)
+
+ def reset_local_db(args):
+ dbname = args.database_name
+ filename = args.drop_script_file
+ username = args.database_username
+ password = args.database_password
+ command = SETUP_DB_CMD[:]
+ command[-1] = command[-1].format(filename, username, password, dbname)
+ drop_retcode, drop_outdata, drop_errdata = run_os_command(command)
+ if not drop_retcode == 0:
+ raise FatalException(1, drop_errdata)
+ if drop_errdata and PG_ERROR_BLOCKED in drop_errdata:
+ raise FatalException(1, "Database is in use. Please, make sure all connections to the database are closed")
+ if drop_errdata and VERBOSE:
+ print_warning_msg(drop_errdata)
+ print_info_msg("About to run database setup")
+ retcode, outdata, errdata = setup_db(args)
+ if errdata and VERBOSE:
+ print_warning_msg(errdata)
+ if (errdata and 'ERROR' in errdata.upper()) or (drop_errdata and 'ERROR' in drop_errdata.upper()):
+ if not VERBOSE:
+ raise NonFatalException("Non critical error in DDL, use --verbose for more information")
+ else:
+ raise NonFatalException("Non critical error in DDL")
+
+# PostgreSQL database
+class PGDatabase:
+ _driverName = ''
+ _connectionString = ''
+
+ def __init__(self):
+ #Init the database connection here, if any
+ pass
+
+ #
+ # Private implementation
+ #
+
+ # Get database client executable path
+ def get_db_cli_tool(self, args):
+ for tool in DATABASE_CLI_TOOLS[DATABASE_INDEX]:
+ cmd = CHECK_COMMAND_EXIST_CMD.format(tool)
+ ret, out, err = run_in_shell(cmd)
+ if ret == 0:
+ return get_exec_path(tool)
+
+ return None
+
+ #
+ # Public interface
+ #
+ def get_driver_name(self):
+ return self._driverName
+
+ def get_connection_string(self):
+ return self._connectionString
+
+ def connect(self, args):
+ if args.persistence_type == "local":
+ return self.check_postgre_up()
+ else:
+ return 0
+
+ def get_running_status(self):
+ """Return postgre running status indicator"""
+ if OS_TYPE == OSConst.OS_UBUNTU:
+ return "%s/main" % PGDatabase.get_ubuntu_db_version()
+ else:
+ return DB_STATUS_RUNNING_DEFAULT
+
+ @staticmethod
+ def get_hba_dir():
+ """Return postgre hba dir location depends on OS"""
+ if OS_TYPE == OSConst.OS_UBUNTU:
+ return "%s/%s/main" % (UBUNTU_PG_HBA_ROOT, PGDatabase.get_ubuntu_db_version())
+ else:
+ return PG_HBA_ROOT_DEFAULT
+
+ @staticmethod
+ def get_ubuntu_db_version():
+ """Return installed version of postgre server. In case of several
+ installed versions will be returned a more new one.
+ """
+ postgre_ver = ""
+
+ if os.path.isdir(UBUNTU_PG_HBA_ROOT): # detect actual installed versions of PG and select a more new one
+ postgre_ver = sorted(
+ [fld for fld in os.listdir(UBUNTU_PG_HBA_ROOT) if os.path.isdir(os.path.join(UBUNTU_PG_HBA_ROOT, fld))], reverse=True)
+ if len(postgre_ver) > 0:
+ return postgre_ver[0]
+ return postgre_ver
+
+
+ def restart_postgres():
+ print "Restarting PostgreSQL"
+ process = subprocess.Popen(PG_RESTART_CMD.split(' '),
+ stdout=subprocess.PIPE,
+ stdin=subprocess.PIPE,
+ stderr=subprocess.PIPE
+ )
+ time.sleep(5)
+ result = process.poll()
+ if result is None:
+ print_info_msg("Killing restart PostgresSQL process")
+ process.kill()
+ pg_status = get_postgre_status()
+ # SUSE linux set status of stopped postgresql proc to unused
+ if pg_status == "unused" or pg_status == "stopped":
+ print_info_msg("PostgreSQL is stopped. Restarting ...")
+ retcode, out, err = run_os_command(PG_START_CMD)
+ return retcode
+ return 0
+
+ def execute_db_script(self, args, file):
+ #password access to ambari-server and mapred
+ configure_database_username_password(args)
+ dbname = args.database_name
+ username = args.database_username
+ password = args.database_password
+ command = SETUP_DB_CMD[:]
+ command[-1] = command[-1].format(file, username, password, dbname)
+ retcode, outdata, errdata = run_os_command(command)
+ if not retcode == 0:
+ print errdata
+ return retcode
+
+ def execute_remote_script(self, args, scriptPath):
+ print_warning_msg("Deprecated method called.")
+ tool = get_db_cli_tool(args)
+ if not tool:
+ # args.warnings.append('{0} not found. Please, run DDL script manually'.format(DATABASE_CLI_TOOLS[DATABASE_INDEX]))
+ if VERBOSE:
+ print_warning_msg('{0} not found'.format(DATABASE_CLI_TOOLS[DATABASE_INDEX]))
+ return -1, "Client wasn't found", "Client wasn't found"
+
+ os.environ["PGPASSWORD"] = args.database_password
+ retcode, out, err = run_in_shell('{0} {1}'.format(tool, POSTGRES_EXEC_ARGS.format(
+ args.database_host,
+ args.database_port,
+ args.database_name,
+ args.database_username,
+ scriptPath
+ )))
+ return retcode, out, err
+
+ def check_db_consistency(args, file):
+ #password access to ambari-server and mapred
+ configure_database_username_password(args)
+ dbname = args.database_name
+ username = args.database_username
+ password = args.database_password
+ command = SETUP_DB_CMD[:]
+ command[-1] = command[-1].format(file, username, password, dbname)
+ retcode, outdata, errdata = run_os_command(command)
+ if not retcode == 0:
+ print errdata
+ return retcode
+ else:
+ # Assumes that the output is of the form ...\n<count>
+ print_info_msg("Parsing output: " + outdata)
+ lines = outdata.splitlines()
+ if (lines[-1] == '3' or lines[-1] == '0'):
+ return 0
+ return -1
+
+
+ def get_postgre_status():
+ retcode, out, err = run_os_command(PG_ST_CMD)
+ try:
+ pg_status = re.search('(stopped|running)', out, re.IGNORECASE).group(0).lower()
+ except AttributeError:
+ pg_status = None
+ return pg_status
+
+
+ def check_postgre_up():
+ pg_status = get_postgre_status()
+ if pg_status == PG_STATUS_RUNNING:
+ print_info_msg("PostgreSQL is running")
+ return 0
+ else:
+ # run initdb only on non ubuntu systems as ubuntu does not have initdb cmd.
+ if OS_TYPE != OSConst.OS_UBUNTU:
+ print "Running initdb: This may take upto a minute."
+ retcode, out, err = run_os_command(PG_INITDB_CMD)
+ if retcode == 0:
+ print out
+ print "About to start PostgreSQL"
+ try:
+ process = subprocess.Popen(PG_START_CMD.split(' '),
+ stdout=subprocess.PIPE,
+ stdin=subprocess.PIPE,
+ stderr=subprocess.PIPE
+ )
+ if OS_TYPE == OSConst.OS_SUSE:
+ time.sleep(20)
+ result = process.poll()
+ print_info_msg("Result of postgres start cmd: " + str(result))
+ if result is None:
+ process.kill()
+ pg_status = get_postgre_status()
+ else:
+ retcode = result
+ else:
+ out, err = process.communicate()
+ retcode = process.returncode
+ if pg_status == PG_STATUS_RUNNING:
+ print_info_msg("Postgres process is running. Returning...")
+ return 0
+ except (Exception), e:
+ pg_status = get_postgre_status()
+ if pg_status == PG_STATUS_RUNNING:
+ return 0
+ else:
+ print_error_msg("Postgres start failed. " + str(e))
+ return 1
+ return retcode
+
+
+ def get_validated_db_name(database_name):
+ return get_validated_string_input(
+ DATABASE_STORAGE_NAMES[DATABASE_INDEX] + " Name ("
+ + database_name + "): ",
+ database_name,
+ ".*",
+ "Invalid " + DATABASE_STORAGE_NAMES[DATABASE_INDEX] + " name.",
+ False
+ )
+
+
+ def get_validated_service_name(service_name, index):
+ return get_validated_string_input(
+ ORACLE_DB_ID_TYPES[index] + " (" + service_name + "): ",
+ service_name,
+ ".*",
+ "Invalid " + ORACLE_DB_ID_TYPES[index] + ".",
+ False
+ )
diff --git a/ambari-server/src/main/python/ambari_server/dbConfiguration_windows.py b/ambari-server/src/main/python/ambari_server/dbConfiguration_windows.py
new file mode 100644
index 0000000000..647a940ae5
--- /dev/null
+++ b/ambari-server/src/main/python/ambari_server/dbConfiguration_windows.py
@@ -0,0 +1,461 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import socket
+import string
+import win32api
+
+from ambari_commons.exceptions import *
+from ambari_commons.logging_utils import print_warning_msg
+from ambari_commons.os_utils import search_file
+from ambari_commons.os_windows import *
+from ambari_commons.str_utils import compress_backslashes, ensure_double_backslashes
+from ambari_server.setupSecurity import SECURITY_IS_ENCRYPTION_ENABLED, encrypt_password, store_password_file
+from serverConfiguration import *
+from dbConfiguration import *
+from userInput import get_validated_string_input
+
+#Import the SQL Server libraries
+
+# SQL Server settings
+DBPATH = 'C:\\Program Files\\Microsoft SQL Server\\MSSQL12.SQLEXPRESS\\MSSQL\\DATA\\'
+# DBPATH = 'C:\\Program Files\\Microsoft SQL Server\\MSSQL10_50.MSSQLSERVER\\MSSQL\\DATA\\'
+
+DATABASE_DBMS = "sqlserver"
+DATABASE_DRIVER_NAME = "com.microsoft.sqlserver.jdbc.SQLServerDriver"
+LOCAL_DATABASE_SERVER = "localhost\\SQLEXPRESS"
+AMBARI_DATABASE_NAME = "ambari"
+
+METRICS_DATABASE_NAME = "HadoopMetrics"
+
+
+class DbPropKeys:
+ def __init__(self, i_dbms_key, i_driver_key, i_server_key, i_port_key, i_db_name_key, i_db_url_key):
+ self.reset(i_dbms_key, i_driver_key, i_server_key, i_port_key, i_db_name_key, i_db_url_key)
+ pass
+
+ def reset(self, i_dbms_key, i_driver_key, i_server_key, i_port_key, i_db_name_key, i_db_url_key):
+ self.dbms_key = i_dbms_key
+ self.driver_key = i_driver_key
+ self.server_key = i_server_key
+ self.port_key = i_port_key
+ self.db_name_key = i_db_name_key
+ self.db_url_key = i_db_url_key
+ pass
+
+class AuthenticationKeys:
+ def __init__(self, i_integrated_auth_key, i_user_name_key, i_password_key, i_password_alias, i_password_filename):
+ self.reset(i_integrated_auth_key, i_user_name_key, i_password_key, i_password_alias, i_password_filename)
+ pass
+
+ def reset(self, i_integrated_auth_key, i_user_name_key, i_password_key, i_password_alias, i_password_filename):
+ self.integrated_auth_key = i_integrated_auth_key
+ self.user_name_key = i_user_name_key
+ self.password_key = i_password_key
+ self.password_alias = i_password_alias
+ self.password_filename = i_password_filename
+ pass
+
+# SQL Server configuration and setup
+class SQLServerConfig(DBMSConfig):
+ def __init__(self, options, properties):
+ super(SQLServerConfig, self).__init__(options, properties)
+
+ """
+ #Just load the defaults. The derived classes will be able to modify them later
+ """
+ self.dbms = DATABASE_DBMS
+ self.driver_name = DATABASE_DRIVER_NAME
+
+ # The values from options supersede the values from properties
+ self.database_host = options.database_host if options.database_host is not None and options.database_host is not "" else \
+ properties.get_property(self.dbPropKeys.server_key)
+ try:
+ if self.database_host is None or self.database_host is "":
+ self.database_host = options.default_database_host
+ else:
+ self.database_host = compress_backslashes(self.database_host)
+ except:
+ self.database_host = "localhost\\SQLEXPRESS"
+ pass
+ self.database_port = options.database_port if options.database_port is not None and options.database_port is not "" else \
+ properties.get_property(self.dbPropKeys.port_key)
+ self.database_name = options.database_name if options.database_name is not None and options.database_name is not "" else \
+ properties.get_property(self.dbPropKeys.db_name_key)
+
+ self.use_windows_authentication = options.database_windows_auth if options.database_windows_auth is True else \
+ properties.get_property(self.dbAuthKeys.integrated_auth_key)
+ self.database_username = options.database_username if options.database_username is not None and options.database_username is not "" \
+ else properties.get_property(self.dbAuthKeys.user_name_key)
+ self.database_password = options.database_password if options.database_password is not None and options.database_password is not "" \
+ else ""
+ self.password_file = properties[self.dbAuthKeys.password_key]
+
+ self.database_url = self._build_sql_server_connection_string()
+
+ self.persistence_property = None
+
+ self.db_title = ""
+
+ self.env_var_db_name = ""
+ self.env_var_db_log_name = ""
+ self.env_var_db_owner = ""
+
+ self.init_script_file = ""
+ self.drop_tables_script_file = ""
+
+ #
+ # No local DB configuration supported
+ #
+ def _is_local_database(self):
+ return False
+
+ def _is_jdbc_driver_installed(self, properties):
+ """
+ #Attempt to load the sqljdbc4.jar and sqljdbc_auth.dll. This will automatically scan the PATH.
+ :param None
+ :rtype : bool
+ """
+ paths = "." + os.pathsep + os.environ["PATH"]
+
+ # Find the jar by attempting to load it as a resource dll
+ driver_path = search_file("sqljdbc4.jar", paths)
+ if not driver_path:
+ return 0
+
+ auth_dll_path = search_file("sqljdbc_auth.dll", paths)
+ if not auth_dll_path:
+ return 0
+
+ try:
+ driver_path = properties[JDBC_DRIVER_PATH_PROPERTY]
+ if driver_path is None or driver_path is "":
+ return 0
+ except Exception:
+ # No such attribute set
+ return 0
+
+ return 1
+
+ def get_jdbc_driver_path(self):
+ paths = "." + os.pathsep + os.environ["PATH"]
+
+ # Find the jar by attempting to load it as a resource dll
+ driver_path = search_file("sqljdbc4.jar", paths)
+ return driver_path
+
+ def configure_database_password(showDefault=True):
+ #No password needed, using SQL Server integrated authentication
+ pass
+
+ def _prompt_db_properties(self):
+ if self.silent:
+ # All the settings are supposed to be retrieved from the command-line parameters
+ return True
+
+ #prompt for SQL Server host and instance name
+ hostname_prompt = "SQL Server host and instance for the {} database: ({}) ".format(self.db_title, self.database_host)
+ self.database_host = get_validated_string_input(hostname_prompt, self.database_host, None, None, False, True)
+
+ #prompt for SQL Server authentication method
+ if (not self.use_windows_authentication is None and self.use_windows_authentication.lower() == "true") or \
+ self.database_username is None or self.database_username == "":
+ auth_option_default = '1'
+ else:
+ auth_option_default = '2'
+
+ user_prompt = \
+ "[1] - Use SQL Server integrated authentication\n[2] - Use username+password authentication\n" \
+ "Enter choice ({}): ".format(auth_option_default)
+ auth_option = get_validated_string_input(user_prompt,
+ auth_option_default,
+ "^[12]$",
+ "Invalid number.",
+ False
+ )
+ if str(auth_option) == '1':
+ self.use_windows_authentication = True
+ self.database_password = None
+ else:
+ self.use_windows_authentication = False
+
+ user_prompt = "SQL Server user name for the {} database: ({}) ".format(self.db_title, self.database_username)
+ username = get_validated_string_input(user_prompt, self.database_username, None, "User name", False,
+ False)
+ self.database_username = username
+
+ user_prompt = "SQL Server password for the {} database: ".format(self.db_title)
+ password = get_validated_string_input(user_prompt, "", None, "Password", True, False)
+ self.database_password = password
+
+ self.database_url = self._build_sql_server_connection_string()
+
+ return True
+
+ def _setup_remote_server(self, properties):
+ properties.removeOldProp(self.dbPropKeys.port_key)
+ properties.removeOldProp(self.dbAuthKeys.integrated_auth_key)
+ properties.removeOldProp(self.dbAuthKeys.user_name_key)
+ properties.removeOldProp(self.dbAuthKeys.password_key)
+
+ properties.process_pair(self.persistence_property, 'remote')
+
+ properties.process_pair(self.dbPropKeys.dbms_key, self.dbms)
+ properties.process_pair(self.dbPropKeys.driver_key, self.driver_name)
+ properties.process_pair(self.dbPropKeys.server_key, ensure_double_backslashes(self.database_host))
+ if self.database_port is not None and self.database_port != "":
+ properties.process_pair(self.dbPropKeys.port_key, self.database_port)
+ properties.process_pair(self.dbPropKeys.db_name_key, self.database_name)
+
+ self._store_db_auth_config(properties, self.dbAuthKeys)
+
+ properties.process_pair(self.dbPropKeys.db_url_key, self.database_url)
+ pass
+
+ def _setup_remote_database(self):
+ print 'Populating {} database structure...'.format(self.db_title)
+
+ self._populate_database_structure()
+
+ def _reset_remote_database(self):
+ print 'Resetting {} database structure...'.format(self.db_title)
+
+ self._populate_database_structure()
+
+ def _prompt_jdbc_driver_install(self, properties):
+ result = False
+ msg = 'Before starting Ambari Server, you must install the SQL Server JDBC driver.'
+
+ if not self.silent:
+ print_warning_msg(msg)
+ raw_input(PRESS_ENTER_MSG)
+ result = self._is_jdbc_driver_installed(properties)
+ return (result, msg)
+
+ def _install_jdbc_driver(self, options, properties):
+ try:
+ driver_path = properties[JDBC_DRIVER_PATH_PROPERTY]
+ except Exception:
+ # No such attribute set
+ driver_path = None
+
+ if driver_path is None or driver_path == "":
+ driver_path = self.get_jdbc_driver_path()
+
+ properties.process_pair(JDBC_DRIVER_PATH_PROPERTY, driver_path)
+ return True
+ return False
+
+ def ensure_dbms_is_running(self, options, properties, scmStatus=None):
+ """
+ :param scmStatus : SvcStatusCallback
+ :rtype : None
+ """
+
+ db_host_components = self.database_host.split("\\")
+ if len(db_host_components) == 1:
+ db_machine = self.database_host
+ sql_svc_name = "MSSQLServer"
+ else:
+ db_machine = db_host_components[0]
+ sql_svc_name = "MSSQL$" + db_host_components[1]
+
+ if db_machine == "localhost" or db_machine.lower() == os.getenv("COMPUTERNAME").lower() or \
+ db_machine.lower() == socket.getfqdn().lower():
+ #TODO: Configure the SQL Server service name in ambari.properties
+ ret = WinServiceController.EnsureServiceIsStarted(sql_svc_name)
+ if 0 != ret:
+ raise FatalException(-1, "Error starting SQL Server: " + string(ret))
+
+ if scmStatus is not None:
+ scmStatus.reportStartPending()
+
+ ret = WinServiceController.EnsureServiceIsStarted("SQLBrowser") #The SQL Server JDBC driver needs this one
+ if 0 != ret:
+ raise FatalException(-1, "Error starting SQL Server Browser: " + string(ret))
+ pass
+
+
+ def _build_sql_server_connection_string(self):
+ databaseUrl = "jdbc:sqlserver://{}".format(ensure_double_backslashes(self.database_host))
+ if self.database_port is not None and self.database_port != "":
+ databaseUrl += ":{}".format(self.database_port)
+ databaseUrl += ";databaseName={}".format(self.database_name)
+ if(self.use_windows_authentication):
+ databaseUrl += ";integratedSecurity=true"
+ #No need to append the username and password, the Ambari server adds them by itself when connecting to the database
+ return databaseUrl
+
+ def _store_db_auth_config(self, properties, keys):
+ if (self.use_windows_authentication):
+ properties.process_pair(keys.integrated_auth_key, "True")
+ properties.removeProp(keys.password_key)
+ else:
+ properties.process_pair(keys.integrated_auth_key, "False")
+
+ properties.process_pair(keys.user_name_key, self.database_username)
+
+ if self.isSecure:
+ encrypted_password = encrypt_password(keys.password_alias, self.database_password)
+ if self.database_password != encrypted_password:
+ properties.process_pair(keys.password_key, encrypted_password)
+ else:
+ passwordFile = store_password_file(self.database_password, keys.password_filename)
+ properties.process_pair(keys.password_key, passwordFile)
+
+ def _populate_database_structure(self):
+ # Setup DB
+ os.environ[self.env_var_db_name] = self.database_name
+ os.environ[self.env_var_db_log_name] = self.database_name + '_log'
+ os.environ[self.env_var_db_owner] = 'hadoop'
+
+ # Don't create the database, assume it already exists. Just clear out the known tables structure
+ SQLServerConfig._execute_db_script(self.database_host, self.drop_tables_script_file)
+
+ # Init DB
+ SQLServerConfig._execute_db_script(self.database_host, self.init_script_file)
+ pass
+
+ @staticmethod
+ def _execute_db_script(databaseHost, databaseScript):
+ dbCmd = 'sqlcmd -S {} -i {}'.format(databaseHost, databaseScript)
+ retCode, outData, errData = run_os_command(['cmd', '/C', dbCmd])
+ if not retCode == 0:
+ err = 'Running database create script failed. Error output: {} Output: {} Exiting.'.format(errData, outData)
+ raise FatalException(retCode, err)
+ print_info_msg("sqlcmd output:")
+ print_info_msg(outData)
+ pass
+
+# SQL Server Ambari database configuration and setup
+class SQLServerAmbariDBConfig(SQLServerConfig):
+ def __init__(self, options, properties):
+ self.dbPropKeys = DbPropKeys(
+ JDBC_DATABASE_PROPERTY,
+ JDBC_DRIVER_PROPERTY,
+ JDBC_HOSTNAME_PROPERTY,
+ JDBC_PORT_PROPERTY,
+ JDBC_SCHEMA_PROPERTY,
+ JDBC_URL_PROPERTY)
+ self.dbAuthKeys = AuthenticationKeys(
+ JDBC_USE_INTEGRATED_AUTH_PROPERTY,
+ JDBC_USER_NAME_PROPERTY,
+ JDBC_PASSWORD_PROPERTY,
+ JDBC_RCA_PASSWORD_ALIAS,
+ JDBC_PASSWORD_FILENAME
+ )
+
+ super(SQLServerAmbariDBConfig, self).__init__(options, properties)
+
+ if self.database_name is None or self.database_name is "":
+ self.database_name = AMBARI_DATABASE_NAME
+
+ self.persistence_property = PERSISTENCE_TYPE_PROPERTY
+
+ self.db_title = "ambari"
+
+ self.env_var_db_name ='AMBARIDBNAME'
+ self.env_var_db_log_name = 'AMBARIDBLOGNAME'
+ self.env_var_db_owner = 'AMBARIDBOWNER'
+
+ # The values from options supersede the values from properties
+ if options.init_db_script_file is not None and options.init_db_script_file is not "":
+ self.init_script_file = compress_backslashes(options.init_db_script_file)
+ else:
+ self.init_script_file = "resources" + os.path.sep + "Ambari-DDL-SQLServer-CREATE.sql"
+ if options.cleanup_db_script_file is not None and options.cleanup_db_script_file is not "":
+ self.drop_tables_script_file = compress_backslashes(options.cleanup_db_script_file)
+ else:
+ self.drop_tables_script_file = "resources" + os.path.sep + "Ambari-DDL-SQLServer-DROP.sql"
+ pass
+
+ def _setup_remote_server(self, properties):
+ super(SQLServerAmbariDBConfig, self)._setup_remote_server(properties)
+
+ properties.process_pair(JDBC_RCA_DRIVER_PROPERTY, self.driver_name)
+ properties.process_pair(JDBC_RCA_HOSTNAME_PROPERTY, ensure_double_backslashes(self.database_host))
+ if self.database_port is not None and self.database_port != "":
+ properties.process_pair(JDBC_RCA_PORT_PROPERTY, self.database_port)
+ properties.process_pair(JDBC_RCA_SCHEMA_PROPERTY, self.database_name)
+
+ authKeys = AuthenticationKeys(
+ JDBC_RCA_USE_INTEGRATED_AUTH_PROPERTY,
+ JDBC_RCA_USER_NAME_PROPERTY,
+ JDBC_RCA_PASSWORD_FILE_PROPERTY,
+ JDBC_RCA_PASSWORD_ALIAS,
+ JDBC_PASSWORD_FILENAME
+ )
+ self._store_db_auth_config(properties, authKeys)
+
+ properties.process_pair(JDBC_RCA_URL_PROPERTY, self.database_url)
+ pass
+
+
+# SQL Server Metrics database configuration and setup
+class SQLServerMetricsDBConfig(SQLServerConfig):
+ def __init__(self, options, properties):
+ self.dbPropKeys = DbPropKeys(
+ JDBC_METRICS_DATABASE_PROPERTY,
+ JDBC_METRICS_DRIVER_PROPERTY,
+ JDBC_METRICS_HOSTNAME_PROPERTY,
+ JDBC_METRICS_PORT_PROPERTY,
+ JDBC_METRICS_SCHEMA_PROPERTY,
+ JDBC_METRICS_URL_PROPERTY)
+ self.dbAuthKeys = AuthenticationKeys(
+ JDBC_METRICS_USE_INTEGRATED_AUTH_PROPERTY,
+ JDBC_METRICS_USER_NAME_PROPERTY,
+ JDBC_METRICS_PASSWORD_PROPERTY,
+ JDBC_METRICS_PASSWORD_ALIAS,
+ JDBC_METRICS_PASSWORD_FILENAME
+ )
+
+ super(SQLServerMetricsDBConfig, self).__init__(options, properties)
+
+ self.database_name = METRICS_DATABASE_NAME
+
+ self.persistence_property = METRICS_PERSISTENCE_TYPE_PROPERTY
+
+ self.db_title = "metrics"
+
+ self.env_var_db_name ='METRICSDBNAME'
+ self.env_var_db_log_name = 'METRICSDBLOGNAME'
+ self.env_var_db_owner = 'METRICSDBOWNER'
+
+ if options.init_metrics_db_script_file is not None and options.init_metrics_db_script_file is not "":
+ self.init_script_file = compress_backslashes(options.init_db_script_file)
+ else:
+ self.init_script_file = "resources" + os.sep + "Hadoop-Metrics-SQLServer-CREATE.sql"
+ if options.cleanup_metrics_db_script_file is not None and options.cleanup_metrics_db_script_file is not "":
+ self.drop_tables_script_file = compress_backslashes(options.cleanup_db_script_file)
+ else:
+ self.drop_tables_script_file = "resources" + os.sep + "Hadoop-Metrics-SQLServer-DROP.sql"
+ pass
+
+
+# SQL Server database
+class SQLServerDatabase:
+ def __init__(self):
+ #Init the database connection here
+ pass
+
+ def get_running_status(self):
+ #if the connection is active, return running
+ #else return stopped
+ return DB_STATUS_RUNNING_DEFAULT
diff --git a/ambari-server/src/main/python/ambari_server/properties.py b/ambari-server/src/main/python/ambari_server/properties.py
new file mode 100644
index 0000000000..8e00762d81
--- /dev/null
+++ b/ambari-server/src/main/python/ambari_server/properties.py
@@ -0,0 +1,223 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import os
+import re
+import time
+
+#Apache License Header
+ASF_LICENSE_HEADER = '''
+# Copyright 2011 The Apache Software Foundation
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+'''
+
+# A Python replacement for java.util.Properties
+# Based on http://code.activestate.com/recipes
+# /496795-a-python-replacement-for-javautilproperties/
+class Properties(object):
+ def __init__(self, props=None):
+ self._props = {}
+ self._origprops = {}
+ self._keymap = {}
+
+ self.othercharre = re.compile(r'(?<!\\)(\s*\=)|(?<!\\)(\s*\:)')
+ self.othercharre2 = re.compile(r'(\s*\=)|(\s*\:)')
+ self.bspacere = re.compile(r'\\(?!\s$)')
+
+ def __parse(self, lines):
+ lineno = 0
+ i = iter(lines)
+ for line in i:
+ lineno += 1
+ line = line.strip()
+ if not line:
+ continue
+ if line[0] == '#':
+ continue
+ escaped = False
+ sepidx = -1
+ flag = 0
+ m = self.othercharre.search(line)
+ if m:
+ first, last = m.span()
+ start, end = 0, first
+ flag = 1
+ wspacere = re.compile(r'(?<![\\\=\:])(\s)')
+ else:
+ if self.othercharre2.search(line):
+ wspacere = re.compile(r'(?<![\\])(\s)')
+ start, end = 0, len(line)
+ m2 = wspacere.search(line, start, end)
+ if m2:
+ first, last = m2.span()
+ sepidx = first
+ elif m:
+ first, last = m.span()
+ sepidx = last - 1
+ while line[-1] == '\\':
+ nextline = i.next()
+ nextline = nextline.strip()
+ lineno += 1
+ line = line[:-1] + nextline
+ if sepidx != -1:
+ key, value = line[:sepidx], line[sepidx + 1:]
+ else:
+ key, value = line, ''
+ self.process_pair(key, value)
+
+ def process_pair(self, key, value):
+ oldkey = key
+ oldvalue = value
+ keyparts = self.bspacere.split(key)
+ strippable = False
+ lastpart = keyparts[-1]
+ if lastpart.find('\\ ') != -1:
+ keyparts[-1] = lastpart.replace('\\', '')
+ elif lastpart and lastpart[-1] == ' ':
+ strippable = True
+ key = ''.join(keyparts)
+ if strippable:
+ key = key.strip()
+ oldkey = oldkey.strip()
+ oldvalue = self.unescape(oldvalue)
+ value = self.unescape(value)
+ self._props[key] = None if value is None else value.strip()
+ if self._keymap.has_key(key):
+ oldkey = self._keymap.get(key)
+ self._origprops[oldkey] = None if oldvalue is None else oldvalue.strip()
+ else:
+ self._origprops[oldkey] = None if oldvalue is None else oldvalue.strip()
+ self._keymap[key] = oldkey
+
+ def unescape(self, value):
+ newvalue = value
+ if not value is None:
+ newvalue = value.replace('\:', ':')
+ newvalue = newvalue.replace('\=', '=')
+ return newvalue
+
+ def removeOldProp(self, key):
+ if self._origprops.has_key(key):
+ del self._origprops[key]
+ pass
+
+ def removeProp(self, key):
+ if self._props.has_key(key):
+ del self._props[key]
+ pass
+
+ def load(self, stream):
+ if type(stream) is not file:
+ raise TypeError, 'Argument should be a file object!'
+ if stream.mode != 'r':
+ raise ValueError, 'Stream should be opened in read-only mode!'
+ try:
+ self.fileName = os.path.abspath(stream.name)
+ lines = stream.readlines()
+ self.__parse(lines)
+ except IOError:
+ raise
+
+ def get_property(self, key):
+ return self._props.get(key, '')
+
+ def propertyNames(self):
+ return self._props.keys()
+
+ def getPropertyDict(self):
+ return self._props
+
+ def __getitem__(self, name):
+ return self.get_property(name)
+
+ def __getattr__(self, name):
+ try:
+ return self.__dict__[name]
+ except KeyError:
+ if hasattr(self._props, name):
+ return getattr(self._props, name)
+
+ def sort_props(self):
+ tmp_props = {}
+ for key in sorted(self._props.iterkeys()):
+ tmp_props[key] = self._props[key]
+ self._props = tmp_props
+ pass
+
+ def sort_origprops(self):
+ tmp_props = self._origprops.copy()
+ self._origprops.clear()
+ for key in sorted(tmp_props.iterkeys()):
+ self._origprops[key] = tmp_props[key]
+ pass
+
+ def store(self, out, header=""):
+ """ Write the properties list to the stream 'out' along
+ with the optional 'header' """
+ if out.mode[0] != 'w':
+ raise ValueError, 'Steam should be opened in write mode!'
+ try:
+ out.write(''.join(('#', ASF_LICENSE_HEADER, '\n')))
+ out.write(''.join(('#', header, '\n')))
+ # Write timestamp
+ tstamp = time.strftime('%a %b %d %H:%M:%S %Z %Y', time.localtime())
+ out.write(''.join(('#', tstamp, '\n')))
+ # Write properties from the pristine dictionary
+ for prop, val in self._origprops.items():
+ if val is not None:
+ out.write(''.join((prop, '=', val, '\n')))
+ out.close()
+ except IOError:
+ raise
+
+ def store_ordered(self, out, header=""):
+ """ Write the properties list to the stream 'out' along
+ with the optional 'header' """
+ if out.mode[0] != 'w':
+ raise ValueError, 'Steam should be opened in write mode!'
+ try:
+ out.write(''.join(('#', ASF_LICENSE_HEADER, '\n')))
+ out.write(''.join(('#', header, '\n')))
+ # Write timestamp
+ tstamp = time.strftime('%a %b %d %H:%M:%S %Z %Y', time.localtime())
+ out.write(''.join(('#', tstamp, '\n')))
+ # Write properties from the pristine dictionary
+ for key in sorted(self._origprops.iterkeys()):
+ val = self._origprops[key]
+ if val is not None:
+ out.write(''.join((key, '=', val, '\n')))
+ out.close()
+ except IOError:
+ raise
diff --git a/ambari-server/src/main/python/ambari_server/serverConfiguration.py b/ambari-server/src/main/python/ambari_server/serverConfiguration.py
new file mode 100644
index 0000000000..e189e91f88
--- /dev/null
+++ b/ambari-server/src/main/python/ambari_server/serverConfiguration.py
@@ -0,0 +1,589 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import datetime
+import glob
+import re
+
+from ambari_commons.os_utils import *
+from ambari_commons.logging_utils import print_warning_msg, print_info_msg, print_error_msg
+from properties import Properties
+
+if OSCheck.is_windows_os():
+ from serverConfiguration_windows import *
+else:
+ # MacOS not supported
+ from serverConfiguration_linux import *
+
+
+# Non-root user setup commands
+NR_USER_PROPERTY = "ambari-server.user"
+
+# constants
+STACK_NAME_VER_SEP = "-"
+BLIND_PASSWORD = "*****"
+
+# Common messages
+PRESS_ENTER_MSG = "Press <enter> to continue."
+
+OS_TYPE_PROPERTY = "server.os_type"
+
+BOOTSTRAP_DIR_PROPERTY = "bootstrap.dir"
+
+AMBARI_CONF_VAR = "AMBARI_CONF_DIR"
+AMBARI_PROPERTIES_FILE = "ambari.properties"
+AMBARI_PROPERTIES_BACKUP_FILE = "ambari.properties.backup"
+
+GET_FQDN_SERVICE_URL = "server.fqdn.service.url"
+
+SERVER_OUT_FILE_KEY = "ambari.output.file.path"
+VERBOSE_OUTPUT_KEY = "ambari.output.verbose"
+
+DEBUG_MODE_KEY = "ambari.server.debug"
+SUSPEND_START_MODE_KEY = "ambari.server.debug.suspend.start"
+
+# Environment variables
+AMBARI_SERVER_LIB = "AMBARI_SERVER_LIB"
+JAVA_HOME = "JAVA_HOME"
+
+AMBARI_VERSION_VAR = "AMBARI_VERSION_VAR"
+
+# JDK
+JAVA_HOME_PROPERTY = "java.home"
+JDK_NAME_PROPERTY = "jdk.name"
+JCE_NAME_PROPERTY = "jce.name"
+
+#JCE Policy files
+JCE_POLICY_FILENAMES = ["UnlimitedJCEPolicyJDK7.zip", "jce_policy-6.zip"]
+JCE_DOWNLOAD_CMD = "curl -o {0} {1}"
+JCE_MIN_FILESIZE = 5000
+
+# JDBC
+#TODO property used incorrectly in local case, it was meant to be dbms name, not postgres database name,
+# has workaround for now, as we don't need dbms name if persistence_type=local
+JDBC_DATABASE_PROPERTY = "server.jdbc.database"
+JDBC_HOSTNAME_PROPERTY = "server.jdbc.hostname"
+JDBC_PORT_PROPERTY = "server.jdbc.port"
+JDBC_SCHEMA_PROPERTY = "server.jdbc.schema"
+
+JDBC_USER_NAME_PROPERTY = "server.jdbc.user.name"
+JDBC_PASSWORD_PROPERTY = "server.jdbc.user.passwd"
+JDBC_PASSWORD_FILENAME = "password.dat"
+JDBC_RCA_PASSWORD_FILENAME = "rca_password.dat"
+
+CLIENT_API_PORT_PROPERTY = "client.api.port"
+CLIENT_API_PORT = "8080"
+
+PERSISTENCE_TYPE_PROPERTY = "server.persistence.type"
+JDBC_DRIVER_PROPERTY = "server.jdbc.driver"
+JDBC_DRIVER_PATH_PROPERTY = "server.jdbc.driver.path"
+JDBC_URL_PROPERTY = "server.jdbc.url"
+
+JDBC_RCA_DATABASE_PROPERTY = "server.jdbc.database"
+JDBC_RCA_HOSTNAME_PROPERTY = "server.jdbc.hostname"
+JDBC_RCA_PORT_PROPERTY = "server.jdbc.port"
+JDBC_RCA_SCHEMA_PROPERTY = "server.jdbc.schema"
+
+JDBC_RCA_DRIVER_PROPERTY = "server.jdbc.rca.driver"
+JDBC_RCA_URL_PROPERTY = "server.jdbc.rca.url"
+JDBC_RCA_USER_NAME_PROPERTY = "server.jdbc.rca.user.name"
+JDBC_RCA_PASSWORD_FILE_PROPERTY = "server.jdbc.rca.user.passwd"
+
+JDBC_RCA_PASSWORD_ALIAS = "ambari.db.password"
+
+# resources repo configuration
+RESOURCES_DIR_PROPERTY = "resources.dir"
+RESOURCES_DIR_DEFAULT = "resources"
+
+# stack repo upgrade
+STACK_LOCATION_KEY = 'metadata.path'
+STACK_LOCATION_DEFAULT = "resources" + os.sep + "stacks"
+
+# JDK
+JDK_RELEASES="java.releases"
+
+# configuration backup
+back_up_file_path = None
+
+
+def get_conf_dir():
+ try:
+ conf_dir = os.environ[AMBARI_CONF_VAR]
+ return conf_dir
+ except KeyError:
+ default_conf_dir = DEFAULT_CONF_DIR
+ print AMBARI_CONF_VAR + " is not set, using default " + default_conf_dir
+ return default_conf_dir
+
+def find_properties_file():
+ conf_file = search_file(AMBARI_PROPERTIES_FILE, get_conf_dir())
+ if conf_file is None:
+ err = 'File %s not found in search path $%s: %s' % (AMBARI_PROPERTIES_FILE,
+ AMBARI_CONF_VAR, get_conf_dir())
+ print err
+ raise FatalException(1, err)
+ else:
+ print_info_msg('Loading properties from ' + conf_file)
+ return conf_file
+
+# Load ambari properties and return dict with values
+def get_ambari_properties():
+ conf_file = find_properties_file()
+
+ properties = None
+ try:
+ properties = Properties()
+ properties.load(open(conf_file))
+ except (Exception), e:
+ print 'Could not read "%s": %s' % (conf_file, e)
+ return -1
+ return properties
+
+def read_ambari_user():
+ '''
+ Reads ambari user from properties file
+ '''
+ conf_file = find_properties_file()
+ try:
+ properties = Properties()
+ properties.load(open(conf_file))
+ user = properties[NR_USER_PROPERTY]
+ if user:
+ return user
+ else:
+ return None
+ except Exception, e:
+ print_error_msg('Could not read "%s": %s' % (conf_file, e))
+ return None
+
+def get_value_from_properties(properties, key, default=""):
+ try:
+ value = properties.get_property(key)
+ if not value:
+ value = default
+ except:
+ return default
+ return value
+
+def get_prompt_default(defaultStr=None):
+ if not defaultStr or defaultStr == "":
+ return ""
+ else:
+ return '(' + defaultStr + ')'
+
+# Copy file to /tmp and save with file.# (largest # is latest file)
+def backup_file_in_temp(filePath):
+ global back_up_file_path
+ if filePath is not None and back_up_file_path is None:
+ tmpDir = tempfile.gettempdir()
+ back_up_file_count = len(glob.glob1(tmpDir, AMBARI_PROPERTIES_FILE + "*"))
+ back_up_file_path = os.path.join(tmpDir, AMBARI_PROPERTIES_FILE + "." + str(back_up_file_count + 1))
+ try:
+ shutil.copyfile(filePath, back_up_file_path)
+ except (Exception), e:
+ print_error_msg('Could not backup file in temp "%s": %s' % (str(
+ back_up_file_count, e)))
+ return 0
+
+def check_database_name_property():
+ properties = get_ambari_properties()
+ if properties == -1:
+ print_error_msg("Error getting ambari properties")
+ return -1
+
+ dbname = properties[JDBC_DATABASE_PROPERTY]
+ if dbname is None or dbname == "":
+ err = "DB Name property not set in config file.\n" + SETUP_OR_UPGRADE_MSG
+ raise FatalException(-1, err)
+
+def update_database_name_property():
+ try:
+ check_database_name_property()
+ except FatalException:
+ properties = get_ambari_properties()
+ if properties == -1:
+ err = "Error getting ambari properties"
+ raise FatalException(-1, err)
+ print_warning_msg(JDBC_DATABASE_PROPERTY + " property isn't set in " +
+ AMBARI_PROPERTIES_FILE + ". Setting it to default value - " + DEFAULT_DB_NAME)
+ properties.process_pair(JDBC_DATABASE_PROPERTY, DEFAULT_DB_NAME)
+ conf_file = find_properties_file()
+ try:
+ properties.store(open(conf_file, "w"))
+ except Exception, e:
+ err = 'Could not write ambari config file "%s": %s' % (conf_file, e)
+ raise FatalException(-1, err)
+
+
+def is_alias_string(passwdStr):
+ regex = re.compile("\$\{alias=[\w\.]+\}")
+ # Match implies string at beginning of word
+ r = regex.match(passwdStr)
+ if r is not None:
+ return True
+ else:
+ return False
+
+
+# Load database connection properties from conf file
+def parse_properties_file(args):
+ properties = get_ambari_properties()
+ if properties == -1:
+ print_error_msg("Error getting ambari properties")
+ return -1
+
+ # args.server_version_file_path = properties[SERVER_VERSION_FILE_PATH]
+ args.persistence_type = properties[PERSISTENCE_TYPE_PROPERTY]
+ args.jdbc_url = properties[JDBC_URL_PROPERTY]
+
+ if not args.persistence_type:
+ args.persistence_type = "local"
+
+ if args.persistence_type == 'remote':
+ args.dbms = properties[JDBC_DATABASE_PROPERTY]
+ args.database_host = properties[JDBC_HOSTNAME_PROPERTY]
+ args.database_port = properties[JDBC_PORT_PROPERTY]
+ args.database_name = properties[JDBC_SCHEMA_PROPERTY]
+ else:
+ #TODO incorrect property used!! leads to bunch of troubles. Workaround for now
+ args.database_name = properties[JDBC_DATABASE_PROPERTY]
+
+ args.database_username = properties[JDBC_USER_NAME_PROPERTY]
+ args.database_password_file = properties[JDBC_PASSWORD_PROPERTY]
+ if args.database_password_file:
+ if not is_alias_string(args.database_password_file):
+ args.database_password = open(properties[JDBC_PASSWORD_PROPERTY]).read()
+ else:
+ args.database_password = args.database_password_file
+ return 0
+
+
+def run_schema_upgrade():
+ jdk_path = find_jdk()
+ if jdk_path is None:
+ print_error_msg("No JDK found, please run the \"setup\" "
+ "command to install a JDK automatically or install any "
+ "JDK manually to " + JDK_INSTALL_DIR)
+ return 1
+ command = SCHEMA_UPGRADE_HELPER_CMD.format(jdk_path, get_conf_dir(), get_ambari_classpath())
+ (retcode, stdout, stderr) = run_os_command(command)
+ print_info_msg("Return code from schema upgrade command, retcode = " + str(retcode))
+ if retcode > 0:
+ print_error_msg("Error executing schema upgrade, please check the server logs.")
+ return retcode
+
+
+def update_ambari_properties():
+ prev_conf_file = search_file(AMBARI_PROPERTIES_BACKUP_FILE, get_conf_dir())
+ conf_file = search_file(AMBARI_PROPERTIES_FILE, get_conf_dir())
+
+ # Previous config file does not exist
+ if (not prev_conf_file) or (prev_conf_file is None):
+ print_warning_msg("Can not find ambari.properties.backup file from previous version, skipping import of settings")
+ return 0
+
+ try:
+ old_properties = Properties()
+ old_properties.load(open(prev_conf_file))
+ except Exception, e:
+ print 'Could not read "%s": %s' % (prev_conf_file, e)
+ return -1
+
+ try:
+ new_properties = Properties()
+ new_properties.load(open(conf_file))
+
+ for prop_key, prop_value in old_properties.getPropertyDict().items():
+ if ("agent.fqdn.service.url" == prop_key):
+ #BUG-7179 what is agent.fqdn property in ambari.props?
+ new_properties.process_pair(GET_FQDN_SERVICE_URL, prop_value)
+ elif ("server.os_type" == prop_key):
+ new_properties.process_pair(OS_TYPE_PROPERTY, OS_FAMILY + OS_VERSION)
+ else:
+ new_properties.process_pair(prop_key, prop_value)
+
+ # Adding custom user name property if it is absent
+ # In previous versions without custom user support server was started as
+ # "root" anyway so it's a reasonable default
+ if not NR_USER_PROPERTY in new_properties.keys():
+ new_properties.process_pair(NR_USER_PROPERTY, "root")
+
+ isJDK16Installed = new_properties.get_property(JAVA_HOME_PROPERTY) == DEFAULT_JDK16_LOCATION
+ if not JDK_NAME_PROPERTY in new_properties.keys() and isJDK16Installed:
+ new_properties.process_pair(JDK_NAME_PROPERTY, JDK_NAMES[1])
+
+ if not JCE_NAME_PROPERTY in new_properties.keys() and isJDK16Installed:
+ new_properties.process_pair(JCE_NAME_PROPERTY, JCE_POLICY_FILENAMES[1])
+
+ new_properties.store(open(conf_file, 'w'))
+
+ except Exception, e:
+ print 'Could not write "%s": %s' % (conf_file, e)
+ return -1
+
+ timestamp = datetime.datetime.now()
+ format = '%Y%m%d%H%M%S'
+ os.rename(prev_conf_file, prev_conf_file + '.' + timestamp.strftime(format))
+
+ return 0
+
+# update properties in a section-less properties file
+# Cannot use ConfigParser due to bugs in version 2.6
+def update_properties(propertyMap):
+ conf_file = search_file(AMBARI_PROPERTIES_FILE, get_conf_dir())
+ backup_file_in_temp(conf_file)
+ if propertyMap is not None and conf_file is not None:
+ properties = Properties()
+ try:
+ with open(conf_file, 'r') as file:
+ properties.load(file)
+ except (Exception), e:
+ print_error_msg('Could not read "%s": %s' % (conf_file, e))
+ return -1
+
+ #for key in propertyMap.keys():
+ #properties[key] = propertyMap[key]
+ for key in propertyMap.keys():
+ properties.removeOldProp(key)
+ properties.process_pair(key, str(propertyMap[key]))
+
+ for key in properties.keys():
+ if not propertyMap.has_key(key):
+ properties.removeOldProp(key)
+
+ with open(conf_file, 'w') as file:
+ properties.store_ordered(file)
+
+ return 0
+
+def update_properties_2(properties, propertyMap):
+ conf_file = search_file(AMBARI_PROPERTIES_FILE, get_conf_dir())
+ backup_file_in_temp(conf_file)
+ if conf_file is not None:
+ if propertyMap is not None:
+ for key in propertyMap.keys():
+ properties.removeOldProp(key)
+ properties.process_pair(key, str(propertyMap[key]))
+ pass
+
+ with open(conf_file, 'w') as file:
+ properties.store(file)
+ pass
+ pass
+
+def write_property(key, value):
+ conf_file = find_properties_file()
+ properties = Properties()
+ try:
+ properties.load(open(conf_file))
+ except Exception, e:
+ print_error_msg('Could not read ambari config file "%s": %s' % (conf_file, e))
+ return -1
+ properties.process_pair(key, value)
+ try:
+ properties.store(open(conf_file, "w"))
+ except Exception, e:
+ print_error_msg('Could not write ambari config file "%s": %s' % (conf_file, e))
+ return -1
+ return 0
+
+def remove_property(key):
+ conf_file = find_properties_file()
+ properties = Properties()
+ try:
+ properties.load(open(conf_file))
+ except Exception, e:
+ print_error_msg('Could not read ambari config file "%s": %s' % (conf_file, e))
+ return -1
+ properties.removeOldProp(key)
+ try:
+ properties.store(open(conf_file, "w"))
+ except Exception, e:
+ print_error_msg('Could not write ambari config file "%s": %s' % (conf_file, e))
+ return -1
+ return 0
+
+#
+# Checks if options determine local DB configuration
+#
+def is_local_database(args):
+ return hasattr(args, 'persistence_type') and args.persistence_type == 'local'
+
+#
+### JDK ###
+#
+
+#
+# Describes the JDK configuration data, necessary for download and installation
+#
+class JDKRelease:
+ name = ""
+ desc = ""
+ url = ""
+ dest_file = ""
+ jcpol_url = "http://public-repo-1.hortonworks.com/ARTIFACTS/UnlimitedJCEPolicyJDK7.zip"
+ dest_jcpol_file = ""
+ inst_dir = ""
+
+ def __init__(self, i_name, i_desc, i_url, i_dest_file, i_jcpol_url, i_dest_jcpol_file, i_inst_dir):
+ if i_name is None or i_name is "":
+ raise FatalException(-1, "Invalid JDK name: " + (i_desc or ""))
+ self.name = i_name
+ if i_desc is None or i_desc is "":
+ self.desc = self.name
+ else:
+ self.desc = i_desc
+ if i_url is None or i_url is "":
+ raise FatalException(-1, "Invalid URL for JDK " + i_name)
+ self.url = i_url
+ if i_dest_file is None or i_dest_file is "":
+ self.dest_file = i_name + ".exe"
+ else:
+ self.dest_file = i_dest_file
+ if not (i_jcpol_url is None or i_jcpol_url is ""):
+ self.jcpol_url = i_jcpol_url
+ if i_dest_jcpol_file is None or i_dest_jcpol_file is "":
+ self.dest_jcpol_file = "jcpol-" + i_name + ".zip"
+ else:
+ self.dest_jcpol_file = i_dest_jcpol_file
+ if i_inst_dir is None or i_inst_dir is "":
+ self.inst_dir = "C:\\" + i_desc
+ else:
+ self.inst_dir = i_inst_dir
+
+ @classmethod
+ def from_properties(cls, properties, section_name):
+ (desc, url, dest_file, jcpol_url, jcpol_file, inst_dir) = JDKRelease.__load_properties(properties, section_name)
+ cls = JDKRelease(section_name, desc, url, dest_file, jcpol_url, jcpol_file, inst_dir)
+ return cls
+
+ @staticmethod
+ def __load_properties(properties, section_name):
+ if section_name is None or section_name is "":
+ raise FatalException(-1, "Invalid properties section: " + ("(empty)" if section_name is None else ""))
+ if(properties.has_key(section_name + ".desc")): #Not critical
+ desc = properties[section_name + ".desc"]
+ else:
+ desc = section_name
+ if not properties.has_key(section_name + ".url"):
+ raise FatalException(-1, "Invalid JDK URL in the properties section: " + section_name)
+ url = properties[section_name + ".url"] #Required
+ if(properties.has_key(section_name + ".dest-file")): #Not critical
+ dest_file = properties[section_name + ".dest-file"]
+ else:
+ dest_file = section_name + ".exe"
+ if(properties.has_key(section_name + ".jcpol-url")): #Not critical
+ jcpol_url = properties[section_name + ".jcpol-url"]
+ else:
+ jcpol_url = None
+ if(properties.has_key(section_name + ".jcpol-file")): #Not critical
+ jcpol_file = properties[section_name + ".jcpol-file"]
+ else:
+ jcpol_file = None
+ if(properties.has_key(section_name + ".home")): #Not critical
+ inst_dir = properties[section_name + ".home"]
+ else:
+ inst_dir = "C:\\" + section_name
+ return (desc, url, dest_file, jcpol_url, jcpol_file, inst_dir)
+ pass
+
+def get_ambari_jars():
+ try:
+ conf_dir = os.environ[AMBARI_SERVER_LIB]
+ return conf_dir
+ except KeyError:
+ default_jar_location = DEFAULT_LIBS_DIR
+ print_info_msg(AMBARI_SERVER_LIB + " is not set, using default "
+ + default_jar_location)
+ return default_jar_location
+
+def get_share_jars():
+ share_jars = ""
+ file_list = []
+ file_list.extend(glob.glob(JAVA_SHARE_PATH + os.sep + "*mysql*"))
+ file_list.extend(glob.glob(JAVA_SHARE_PATH + os.sep + "*ojdbc*"))
+ if len(file_list) > 0:
+ share_jars = string.join(file_list, os.pathsep)
+ return share_jars
+
+def get_jdbc_cp():
+ jdbc_jar_path = ""
+ properties = get_ambari_properties()
+ if properties != -1:
+ jdbc_jar_path = properties[JDBC_DRIVER_PATH_PROPERTY]
+ return jdbc_jar_path
+
+def get_ambari_classpath():
+ ambari_cp = os.path.abspath(get_ambari_jars() + os.sep + "*")
+ jdbc_cp = get_jdbc_cp()
+ if len(jdbc_cp) > 0:
+ ambari_cp = ambari_cp + os.pathsep + jdbc_cp
+ share_cp = get_share_jars()
+ if len(share_cp) > 0:
+ ambari_cp = ambari_cp + os.pathsep + share_cp
+ return ambari_cp
+
+def get_JAVA_HOME():
+ properties = get_ambari_properties()
+ if properties == -1:
+ print_error_msg("Error getting ambari properties")
+ return None
+
+ java_home = properties[JAVA_HOME_PROPERTY]
+
+ if (not 0 == len(java_home)) and (os.path.exists(java_home)):
+ return java_home
+
+ return None
+
+#
+# Checks jdk path for correctness
+#
+def validate_jdk(jdk_path):
+ if jdk_path:
+ if os.path.exists(jdk_path):
+ java_exe_path = os.path.join(jdk_path, JAVA_EXE_SUBPATH)
+ if os.path.exists(java_exe_path) and os.path.isfile(java_exe_path):
+ return True
+ return False
+
+#
+# Finds the available JDKs.
+#
+def find_jdk():
+ jdkPath = get_JAVA_HOME()
+ if jdkPath:
+ if validate_jdk(jdkPath):
+ return jdkPath
+ print "Looking for available JDKs at " + JDK_INSTALL_DIR
+ jdks = glob.glob(JDK_INSTALL_DIR + os.sep + JDK_SEARCH_PATTERN)
+ #[fbarca] Use the newest JDK
+ jdks.sort(None, None, True)
+ print "Found: " + str(jdks)
+ if len(jdks) == 0:
+ return
+ for jdkPath in jdks:
+ print "Trying to use JDK {0}".format(jdkPath)
+ if validate_jdk(jdkPath):
+ print "Selected JDK {0}".format(jdkPath)
+ return jdkPath
+ else:
+ print "JDK {0} is invalid".format(jdkPath)
+ return
diff --git a/ambari-server/src/main/python/ambari_server/serverConfiguration_linux.py b/ambari-server/src/main/python/ambari_server/serverConfiguration_linux.py
new file mode 100644
index 0000000000..a21437a837
--- /dev/null
+++ b/ambari-server/src/main/python/ambari_server/serverConfiguration_linux.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+JAVA_SHARE_PATH = "/usr/share/java"
+OUT_DIR = "/var/log/ambari-server"
+SERVER_OUT_FILE = OUT_DIR + "/ambari-server.out"
+SERVER_LOG_FILE = OUT_DIR + "/ambari-server.log"
+ROOT_FS_PATH = "/"
+
+# JDK
+JDK_NAMES = ["jdk-7u45-linux-x64.tar.gz", "jdk-6u31-linux-x64.bin"]
+DEFAULT_JDK16_LOCATION = "/usr/jdk64/jdk1.6.0_31"
+JDK_INSTALL_DIR = "/usr/jdk64"
+JDK_SEARCH_PATTERN = "jdk*"
+JAVA_EXE_SUBPATH = "bin/java"
+
+# Configuration defaults
+DEFAULT_CONF_DIR = "/etc/ambari-server/conf"
+PID_DIR = "/var/run/ambari-server"
+DEFAULT_LIBS_DIR = "/usr/lib/ambari-server"
+
+# ownership/permissions mapping
+# path - permissions - user - group - recursive
+# Rules are executed in the same order as they are listed
+# {0} in user/group will be replaced by customized ambari-server username
+NR_ADJUST_OWNERSHIP_LIST = [
+
+ ("/var/log/ambari-server", "644", "{0}", True),
+ ("/var/log/ambari-server", "755", "{0}", False),
+ ("/var/run/ambari-server", "644", "{0}", True),
+ ("/var/run/ambari-server", "755", "{0}", False),
+ ("/var/run/ambari-server/bootstrap", "755", "{0}", False),
+ ("/var/lib/ambari-server/ambari-env.sh", "700", "{0}", False),
+ ("/var/lib/ambari-server/keys", "600", "{0}", True),
+ ("/var/lib/ambari-server/keys", "700", "{0}", False),
+ ("/var/lib/ambari-server/keys/db", "700", "{0}", False),
+ ("/var/lib/ambari-server/keys/db/newcerts", "700", "{0}", False),
+ ("/var/lib/ambari-server/keys/.ssh", "700", "{0}", False),
+ ("/var/lib/ambari-server/resources/stacks/", "755", "{0}", True),
+ ("/var/lib/ambari-server/resources/custom_actions/", "755", "{0}", True),
+ ("/etc/ambari-server/conf", "644", "{0}", True),
+ ("/etc/ambari-server/conf", "755", "{0}", False),
+ ("/etc/ambari-server/conf/password.dat", "640", "{0}", False),
+ # Also, /etc/ambari-server/conf/password.dat
+ # is generated later at store_password_file
+]
+
+MASTER_KEY_FILE_PERMISSIONS = "600"
+CREDENTIALS_STORE_FILE_PERMISSIONS = "600"
+TRUST_STORE_LOCATION_PERMISSIONS = "600"
diff --git a/ambari-server/src/main/python/ambari_server/serverConfiguration_windows.py b/ambari-server/src/main/python/ambari_server/serverConfiguration_windows.py
new file mode 100644
index 0000000000..a0fa508353
--- /dev/null
+++ b/ambari-server/src/main/python/ambari_server/serverConfiguration_windows.py
@@ -0,0 +1,98 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import string
+import os
+
+JDBC_USE_INTEGRATED_AUTH_PROPERTY = "server.jdbc.use.integrated.auth"
+
+JDBC_RCA_USE_INTEGRATED_AUTH_PROPERTY = "server.jdbc.rca.use.integrated.auth"
+
+JDBC_METRICS_USE_INTEGRATED_AUTH_PROPERTY = "scom.sink.db.use.integrated.auth"
+
+METRICS_PERSISTENCE_TYPE_PROPERTY = "metrics.persistence.type"
+
+JDBC_METRICS_DATABASE_PROPERTY = "scom.sink.db.database"
+JDBC_METRICS_HOSTNAME_PROPERTY = "scom.sink.db.hostname"
+JDBC_METRICS_PORT_PROPERTY = "scom.sink.db.port"
+JDBC_METRICS_SCHEMA_PROPERTY = "scom.sink.db.schema"
+
+JDBC_METRICS_DRIVER_PROPERTY = "scom.sink.db.driver"
+JDBC_METRICS_URL_PROPERTY = "scom.sink.db.url"
+JDBC_METRICS_USER_NAME_PROPERTY = "scom.sink.db.username"
+JDBC_METRICS_PASSWORD_PROPERTY = "scom.sink.db.password"
+JDBC_METRICS_PASSWORD_FILENAME = "scom_password.dat"
+
+JDBC_METRICS_PASSWORD_ALIAS = "scom.db.password"
+
+JAVA_SHARE_PATH = "/usr/share/java"
+OUT_DIR = "\\var\\log\\ambari-server"
+SERVER_OUT_FILE = OUT_DIR + "\\ambari-server.out"
+SERVER_LOG_FILE = OUT_DIR + "\\ambari-server.log"
+ROOT_FS_PATH = "\\"
+
+JDK_INSTALL_DIR = "C:\\"
+JDK_SEARCH_PATTERN = "j[2se|dk|re]*"
+JAVA_EXE_SUBPATH = "bin\\java.exe"
+
+# Configuration defaults
+DEFAULT_CONF_DIR = "conf"
+PID_DIR = "\\var\\run\\ambari-server"
+DEFAULT_LIBS_DIR = "lib"
+
+# ownership/permissions mapping
+# path - permissions - user - group - recursive
+# Rules are executed in the same order as they are listed
+# {0} in user/group will be replaced by customized ambari-server username
+# The permissions are icacls
+NR_ADJUST_OWNERSHIP_LIST = [
+
+ (OUT_DIR, "M", "{0}", True), #0110-0100-0100 rw-r-r
+ (OUT_DIR, "F", "{0}", False), #0111-0101-0101 rwx-rx-rx
+ (PID_DIR, "M", "{0}", True),
+ (PID_DIR, "F", "{0}", False),
+ ("bootstrap", "F", "{0}", False),
+ ("ambari-env.cmd", "F", "{0}", False),
+ ("keystore", "M", "{0}", True),
+ ("keystore", "F", "{0}", False),
+ ("keystore\\db", "700", "{0}", False),
+ ("keystore\\db\\newcerts", "700", "{0}", False),
+ ("resources\\stacks", "755", "{0}", True),
+ ("resources\\custom_actions", "755", "{0}", True),
+ ("conf", "644", "{0}", True),
+ ("conf", "755", "{0}", False),
+ ("conf\\password.dat", "640", "{0}", False),
+ # Also, conf\password.dat
+ # is generated later at store_password_file
+]
+
+MASTER_KEY_FILE_PERMISSIONS = "600"
+CREDENTIALS_STORE_FILE_PERMISSIONS = "600"
+TRUST_STORE_LOCATION_PERMISSIONS = "600"
+
+SCHEMA_UPGRADE_HELPER_CMD = "{0}" + os.sep + "bin" + os.sep + "java -cp {1}" + \
+ os.pathsep + "{2} " + \
+ "org.apache.ambari.server.upgrade.SchemaUpgradeHelper" + \
+ " > " + SERVER_OUT_FILE + " 2>&1"
+
+STACK_UPGRADE_HELPER_CMD = "{0}" + os.sep + "bin" + os.sep + "java -cp {1}" + \
+ os.pathsep + "{2} " + \
+ "org.apache.ambari.server.upgrade.StackUpgradeHelper" + \
+ " {3} {4} > " + SERVER_OUT_FILE + " 2>&1"
diff --git a/ambari-server/src/main/python/ambari_server/serverSetup.py b/ambari-server/src/main/python/ambari_server/serverSetup.py
new file mode 100644
index 0000000000..059400ea09
--- /dev/null
+++ b/ambari-server/src/main/python/ambari_server/serverSetup.py
@@ -0,0 +1,533 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import socket
+import sys
+import urllib2
+from ambari_commons.inet_utils import force_download_file
+from ambari_commons.logging_utils import print_warning_msg, print_error_msg
+
+from serverConfiguration import *
+from setupSecurity import adjust_directory_permissions, get_is_secure, store_password_file, encrypt_password, \
+ get_is_persisted
+from userInput import *
+from utils import *
+
+if OSCheck.is_windows_os():
+ from serverSetup_windows import *
+else:
+ # MacOS not supported
+ from serverSetup_linux import *
+
+
+JDK_INDEX = 0
+
+def verify_setup_allowed():
+ properties = get_ambari_properties()
+ if properties == -1:
+ print_error_msg("Error getting ambari properties")
+ return -1
+
+ isSecure = get_is_secure(properties)
+ (isPersisted, masterKeyFile) = get_is_persisted(properties)
+ if isSecure and not isPersisted and SILENT:
+ print "ERROR: Cannot run silent 'setup' with password encryption enabled " \
+ "and Master Key not persisted."
+ print "Ambari Server 'setup' exiting."
+ return 1
+ return 0
+
+
+def check_ambari_user():
+ try:
+ user = read_ambari_user()
+ create_user = False
+ update_user_setting = False
+ if user is not None:
+ create_user = get_YN_input(NR_USER_CHANGE_PROMPT.format(user), False)
+ update_user_setting = create_user # Only if we will create another user
+ else: # user is not configured yet
+ update_user_setting = True # Write configuration anyway
+ create_user = get_YN_input(NR_USER_CUSTOMIZE_PROMPT, False)
+ if not create_user:
+ user = NR_DEFAULT_USER
+
+ if create_user:
+ (retcode, user) = create_custom_user()
+ if retcode != 0:
+ return retcode
+
+ if update_user_setting:
+ write_property(NR_USER_PROPERTY, user)
+
+ adjust_directory_permissions(user)
+ except OSError as e:
+ print_error_msg("Failed: %s" % e.strerror)
+ return 4
+ except Exception as e:
+ print_error_msg("Unexpected error %s" % e)
+ return 1
+ return 0
+
+def create_custom_user():
+ return os_create_custom_user()
+
+
+# ## JDK ###
+
+#
+# Downloads and installs the JDK and the JCE policy archive
+#
+def _dowload_jdk(jdk_url, dest_file):
+ jdk_download_fail_msg = " Failed to download JDK: {0}. Please check that Oracle " \
+ "JDK is available at {1}. Also you may specify JDK file " \
+ "location in local filesystem using --jdk-location command " \
+ "line argument.".format("{0}", jdk_url)
+ try:
+ force_download_file(jdk_url, dest_file)
+
+ print 'Successfully downloaded JDK distribution to ' + dest_file
+ except FatalException:
+ raise
+ except Exception, e:
+ err = jdk_download_fail_msg.format(str(e))
+ raise FatalException(1, err)
+
+
+def download_and_install_jdk(args):
+ properties = get_ambari_properties()
+ if properties == -1:
+ err = "Error getting ambari properties"
+ raise FatalException(-1, err)
+
+ conf_file = properties.fileName
+ ok = False
+ jcePolicyWarn = "JCE Policy files are required for configuring Kerberos security. If you plan to use Kerberos," \
+ "please make sure JCE Unlimited Strength Jurisdiction Policy Files are valid on all hosts."
+
+ if args.java_home:
+ if not os.path.exists(args.java_home) or not os.path.isfile(os.path.join(args.java_home, JAVA_EXE_SUBPATH)):
+ err = "Path to java home " + args.java_home + " or java binary file does not exists"
+ raise FatalException(1, err)
+
+ print_warning_msg("JAVA_HOME " + args.java_home + " must be valid on ALL hosts")
+ print_warning_msg(jcePolicyWarn)
+
+ properties.process_pair(JAVA_HOME_PROPERTY, args.java_home)
+ properties.removeOldProp(JDK_NAME_PROPERTY)
+ properties.removeOldProp(JCE_NAME_PROPERTY)
+ update_properties(properties)
+
+ os_ensure_java_home_env_var_is_set(args.java_home)
+ return 0
+ else:
+ global JDK_INDEX
+
+ java_home_var = get_JAVA_HOME()
+ if java_home_var:
+ if args.silent:
+ change_jdk = False
+ else:
+ change_jdk = get_YN_input("Do you want to change Oracle JDK [y/n] (n)? ", False)
+ if not change_jdk:
+ os_ensure_java_home_env_var_is_set(java_home_var)
+ return 0
+ #Handle silent JDK setup when args.silent is set
+ elif args.silent:
+ #No java_home_var set, detect if java is already installed
+ if os.environ.has_key(JAVA_HOME):
+ args.java_home = os.environ[JAVA_HOME]
+
+ properties.process_pair(JAVA_HOME_PROPERTY, args.java_home)
+ properties.removeOldProp(JDK_NAME_PROPERTY)
+ properties.removeOldProp(JCE_NAME_PROPERTY)
+ update_properties(properties)
+
+ os_ensure_java_home_env_var_is_set(args.java_home)
+ return 0
+ else:
+ #Continue with the normal setup, taking the first listed JDK version as the default option
+ jdk_num = "1"
+ (jdks, jdk_choice_prompt, jdk_valid_choices, custom_jdk_number) = populate_jdk_configs(properties, jdk_num)
+ else:
+ jdk_num = str(JDK_INDEX + 1)
+ (jdks, jdk_choice_prompt, jdk_valid_choices, custom_jdk_number) = populate_jdk_configs(properties, jdk_num)
+
+ jdk_num = get_validated_string_input(
+ jdk_choice_prompt,
+ jdk_num,
+ jdk_valid_choices,
+ "Invalid number.",
+ False
+ )
+
+ java_bin = "java"
+ if OSCheck.is_windows_os():
+ java_bin = "java.exe"
+
+ if jdk_num == str(custom_jdk_number):
+ print_warning_msg("JDK must be installed on all hosts and JAVA_HOME must be valid on all hosts.")
+ print_warning_msg(jcePolicyWarn)
+ args.java_home = get_validated_string_input("Path to JAVA_HOME: ", None, None, None, False, False)
+ if not os.path.exists(args.java_home) or not os.path.isfile(os.path.join(args.java_home, "bin", java_bin)):
+ err = "Java home path or java binary file is unavailable. Please put correct path to java home."
+ raise FatalException(1, err)
+ print "Validating JDK on Ambari Server...done."
+
+ properties.process_pair(JAVA_HOME_PROPERTY, args.java_home)
+ properties.removeOldProp(JDK_NAME_PROPERTY)
+ properties.removeOldProp(JCE_NAME_PROPERTY)
+ update_properties(properties)
+
+ os_ensure_java_home_env_var_is_set(args.java_home)
+ return 0
+
+ JDK_INDEX = int(jdk_num) - 1
+ jdk_cfg = jdks[JDK_INDEX]
+
+ try:
+ resources_dir = properties[RESOURCES_DIR_PROPERTY]
+ except (KeyError), e:
+ err = 'Property ' + str(e) + ' is not defined at ' + conf_file
+ raise FatalException(1, err)
+
+ dest_file = os.path.abspath(os.path.join(resources_dir, jdk_cfg.dest_file))
+ if os.path.exists(dest_file):
+ print "JDK already exists, using " + dest_file
+ else:
+ if args.silent:
+ print "Accepting the JDK license terms by default..."
+ else:
+ ok = get_YN_input("To download the Oracle JDK you must accept the "
+ "license terms found at "
+ "http://www.oracle.com/technetwork/java/javase/"
+ "terms/license/index.html and not accepting will "
+ "cancel the Ambari Server setup.\nDo you accept the "
+ "Oracle Binary Code License Agreement [y/n] (y)? ", True)
+ if not ok:
+ print 'Exiting...'
+ sys.exit(1)
+
+ jdk_url = jdk_cfg.url
+
+ print 'Downloading JDK from ' + jdk_url + ' to ' + dest_file
+ _dowload_jdk(jdk_url, dest_file)
+
+ try:
+ (retcode, out) = install_jdk(dest_file, jdk_cfg.inst_dir)
+ except Exception, e:
+ print "Installation of JDK has failed: %s\n" % e.message
+ file_exists = os.path.isfile(dest_file)
+ if file_exists:
+ if args.silent:
+ ok = False
+ else:
+ ok = get_YN_input("JDK found at " + dest_file + ". "
+ "Would you like to re-download the JDK [y/n] (y)? ", True)
+ if not ok:
+ err = "Unable to install JDK. Please remove JDK file found at " + \
+ dest_file + " and re-run Ambari Server setup"
+ raise FatalException(1, err)
+ else:
+ jdk_url = jdk_cfg.url
+
+ print 'Re-downloading JDK from ' + jdk_url + ' to ' + dest_file
+ _dowload_jdk(jdk_url, dest_file)
+ print 'Successfully re-downloaded JDK distribution to ' + dest_file
+
+ try:
+ (retcode, out) = install_jdk(dest_file, jdk_cfg.inst_dir)
+ except Exception, e:
+ print "Installation of JDK was failed: %s\n" % e.message
+ err = "Unable to install JDK. Please remove JDK, file found at " + \
+ dest_file + " and re-run Ambari Server setup"
+ raise FatalException(1, err)
+
+ else:
+ err = "Unable to install JDK. File " + dest_file + " does not exist, " \
+ "please re-run Ambari Server setup"
+ raise FatalException(1, err)
+
+ properties.process_pair(JDK_NAME_PROPERTY, jdk_cfg.dest_file)
+ properties.process_pair(JAVA_HOME_PROPERTY, jdk_cfg.inst_dir)
+
+ try:
+ download_jce_policy(jdk_cfg, resources_dir, properties)
+ except FatalException, e:
+ print "JCE Policy files are required for secure HDP setup. Please ensure " \
+ " all hosts have the JCE unlimited strength policy 6, files."
+ print_error_msg("Failed to download JCE policy files:")
+ if e.reason is not None:
+ print_error_msg("\nREASON: {0}".format(e.reason))
+ # TODO: We don't fail installation if download_jce_policy fails. Is it OK?
+
+ update_properties(properties)
+
+ os_ensure_java_home_env_var_is_set(jdk_cfg.inst_dir)
+
+ return 0
+
+
+def download_jce_policy(jdk_cfg, resources_dir, properties):
+ jcpol_url = jdk_cfg.jcpol_url
+ dest_file = os.path.abspath(os.path.join(resources_dir, jdk_cfg.dest_jcpol_file))
+
+ if not os.path.exists(dest_file):
+ print 'Downloading JCE Policy archive from ' + jcpol_url + ' to ' + dest_file
+ try:
+ force_download_file(jcpol_url, dest_file)
+
+ print 'Successfully downloaded JCE Policy archive to ' + dest_file
+ properties.process_pair(JCE_NAME_PROPERTY, jdk_cfg.dest_jcpol_file)
+ except FatalException:
+ raise
+ except Exception, e:
+ err = 'Failed to download JCE Policy archive: ' + str(e)
+ raise FatalException(1, err)
+ else:
+ print "JCE Policy archive already exists, using " + dest_file
+
+
+
+def install_jdk(java_inst_file, java_home_dir):
+ return os_install_jdk(java_inst_file, java_home_dir)
+
+
+#
+# Configures the OS settings in ambari properties.
+#
+def configure_os_settings():
+ properties = get_ambari_properties()
+ if properties == -1:
+ print_error_msg("Error getting ambari properties")
+ return -1
+ try:
+ conf_os_type = properties[OS_TYPE_PROPERTY]
+ if conf_os_type != '':
+ print_info_msg("os_type already set in the properties file")
+ return 0
+ except (KeyError):
+ print_error_msg("os_type is not set in the properties file. Setting it now.")
+
+ if OSCheck.is_windows_os():
+ master_os_type = OS_TYPE + OS_VERSION
+ else:
+ # MacOS not supported
+ master_os_type = OS_FAMILY + OS_VERSION
+
+ write_property(OS_TYPE_PROPERTY, master_os_type)
+ return 0
+
+
+#
+# JDBC
+#
+
+def proceedJDBCProperties(args):
+ if not os.path.isfile(args.jdbc_driver):
+ err = "File {0} does not exist!".format(args.jdbc_driver)
+ raise FatalException(1, err)
+
+ if args.jdbc_db not in JDBC_DB_OPTION_VALUES:
+ err = "Unsupported database name {0}. Please see help for more information.".format(args.jdbc_db)
+ raise FatalException(1, err)
+
+ properties = get_ambari_properties()
+ if properties == -1:
+ err = "Error getting ambari properties"
+ raise FatalException(-1, err)
+ conf_file = properties.fileName
+
+ try:
+ resources_dir = properties[RESOURCES_DIR_PROPERTY]
+ except (KeyError), e:
+ err = 'Property ' + str(e) + ' is not defined at ' + conf_file
+ raise FatalException(1, err)
+
+ symlink_name = args.jdbc_db + "-jdbc-driver.jar"
+ jdbc_symlink = os.path.join(resources_dir, symlink_name)
+ path, jdbc_name = os.path.split(args.jdbc_driver)
+
+ if os.path.lexists(jdbc_symlink):
+ os.remove(jdbc_symlink)
+
+ if not os.path.isfile(os.path.join(resources_dir, jdbc_name)):
+ try:
+ shutil.copy(args.jdbc_driver, resources_dir)
+ except Exception, e:
+ err = "Can not copy file {0} to {1} due to: {2} . Please check file " \
+ "permissions and free disk space.".format(args.jdbc_driver, resources_dir, e)
+ raise FatalException(1, err)
+
+ os.symlink(os.path.join(resources_dir, jdbc_name), jdbc_symlink)
+ print "JDBC driver was successfully initialized."
+
+def check_jdbc_drivers(args):
+ os_setup_jdbc_drivers(args)
+ pass
+
+
+# Ask user for database conenction properties
+def prompt_db_properties(args):
+ if not args.silent:
+ def_option = 'y' if args.must_set_database_options else 'n'
+ ok = get_YN_input("Enter advanced database configuration [y/n] ({})? ".format(def_option), args.must_set_database_options)
+ if not ok:
+ return False
+
+ print 'Configuring database...'
+
+ #TODO: Add here code for DBMS selection, in case we want to support other databases besides SQL Server
+
+ return True
+
+
+#
+# Setup the Ambari Server.
+#
+
+def setup(options):
+ retcode = verify_setup_allowed()
+ if not retcode == 0:
+ raise FatalException(1, None)
+
+ if not is_root():
+ err = MESSAGE_ERROR_NOT_ROOT
+ raise FatalException(4, err)
+
+ # proceed jdbc properties if they were set
+ if os_check_jdbc_options(options):
+ proceedJDBCProperties(options)
+ return
+
+ (retcode, err) = disable_security_enhancements()
+ if not retcode == 0:
+ raise FatalException(retcode, err)
+
+ #Create ambari user, if needed
+ retcode = check_ambari_user()
+ if not retcode == 0:
+ err = 'Failed to create user. Exiting.'
+ raise FatalException(retcode, err)
+
+ print MESSAGE_CHECK_FIREWALL
+ os_check_firewall()
+
+ # proceed jdbc properties if they were set
+ if os_check_jdbc_options(options):
+ proceedJDBCProperties(options)
+
+ print 'Checking JDK...'
+ try:
+ download_and_install_jdk(options)
+ except FatalException as e:
+ err = 'Downloading or installing JDK failed: {0}. Exiting.'.format(e)
+ raise FatalException(e.code, err)
+
+ print 'Completing setup...'
+ retcode = configure_os_settings()
+ if not retcode == 0:
+ err = 'Configure of OS settings in ambari.properties failed. Exiting.'
+ raise FatalException(retcode, err)
+
+ if prompt_db_properties(options):
+ #DB setup should be done last after doing any setup.
+ os_setup_database(options)
+
+ check_jdbc_drivers(options)
+ pass
+
+#
+# Upgrades the Ambari Server.
+#
+def upgrade(args):
+ if not is_root():
+ err = 'Ambari-server upgrade should be run with ' \
+ 'root-level privileges'
+ raise FatalException(4, err)
+
+ print 'Updating properties in ' + AMBARI_PROPERTIES_FILE + ' ...'
+ retcode = update_ambari_properties()
+ if not retcode == 0:
+ err = AMBARI_PROPERTIES_FILE + ' file can\'t be updated. Exiting'
+ raise FatalException(retcode, err)
+
+ try:
+ update_database_name_property()
+ except FatalException:
+ return -1
+
+ parse_properties_file(args)
+
+ retcode = run_schema_upgrade()
+ if not retcode == 0:
+ print_error_msg("Ambari server upgrade failed. Please look at /var/log/ambari-server/ambari-server.log, for more details.")
+ raise FatalException(11, 'Schema upgrade failed.')
+
+ user = read_ambari_user()
+ if user is None:
+ warn = "Can not determine custom ambari user.\n" + SETUP_OR_UPGRADE_MSG
+ print_warning_msg(warn)
+ else:
+ adjust_directory_permissions(user)
+
+
+#
+# Resets the Ambari Server.
+#
+def reset(options, serviceClass):
+ if not is_root():
+ err = 'Ambari-server reset should be run with ' \
+ 'administrator-level privileges'
+ raise FatalException(4, err)
+
+ status, stateDesc = is_server_running(serviceClass)
+ if status:
+ err = 'Ambari-server must be stopped to reset'
+ raise FatalException(1, err)
+
+ #force reset if silent option provided
+ if not options.silent:
+ choice = get_YN_input("**** WARNING **** You are about to reset and clear the "
+ "Ambari Server database. This will remove all cluster "
+ "host and configuration information from the database. "
+ "You will be required to re-configure the Ambari server "
+ "and re-run the cluster wizard. \n"
+ "Are you SURE you want to perform the reset "
+ "[yes/no] (no)? ", False)
+ okToRun = choice
+ if not okToRun:
+ err = "Ambari Server 'reset' cancelled"
+ raise FatalException(1, err)
+
+ os_reset_database(options)
+ pass
+
+
+def is_server_running(serviceClass):
+ statusStr = serviceClass.QueryStatus()
+ if statusStr in(SERVICE_STATUS_STARTING, SERVICE_STATUS_RUNNING, SERVICE_STATUS_STOPPING):
+ return True, ""
+ elif statusStr == SERVICE_STATUS_STOPPED:
+ return False, SERVICE_STATUS_STOPPED
+ elif statusStr == SERVICE_STATUS_NOT_INSTALLED:
+ return False, SERVICE_STATUS_NOT_INSTALLED
+ else:
+ return False, None
diff --git a/ambari-server/src/main/python/ambari_server/serverSetup_linux.py b/ambari-server/src/main/python/ambari_server/serverSetup_linux.py
new file mode 100644
index 0000000000..b5436e0c12
--- /dev/null
+++ b/ambari-server/src/main/python/ambari_server/serverSetup_linux.py
@@ -0,0 +1,795 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+import glob
+import optparse
+
+import os
+import re
+import socket
+
+from ambari_commons.exceptions import *
+from ambari_commons.logging_utils import *
+from ambari_commons.os_linux import run_os_command
+from ambari_server.dbConfiguration_linux import SERVICE_CMD, PG_HBA_CONF_FILE_BACKUP
+from ambari_server.serverConfiguration import *
+from ambari_server.serverConfiguration_linux import JAVA_SHARE_PATH
+from ambari_server.setupSecurity import *
+from ambari_server.userInput import get_YN_input, get_validated_string_input
+from ambari_server import utils
+
+# selinux commands
+GET_SE_LINUX_ST_CMD = utils.locate_file('sestatus', '/usr/sbin')
+SE_SETENFORCE_CMD = "setenforce 0"
+SE_STATUS_DISABLED = "disabled"
+SE_STATUS_ENABLED = "enabled"
+SE_MODE_ENFORCING = "enforcing"
+SE_MODE_PERMISSIVE = "permissive"
+
+# Non-root user setup commands
+NR_USER_COMMENT = "Ambari user"
+NR_GET_OWNER_CMD = 'stat -c "%U" {0}'
+NR_USERADD_CMD = 'useradd -M --comment "{1}" ' \
+ '--shell %s -d /var/lib/ambari-server/keys/ {0}' % utils.locate_file('nologin', '/sbin')
+NR_SET_USER_COMMENT_CMD = 'usermod -c "{0}" {1}'
+
+NR_USER_CHANGE_PROMPT = "Ambari-server daemon is configured to run under user '{0}'. Change this setting [y/n] (n)? "
+NR_USER_CUSTOMIZE_PROMPT = "Customize user account for ambari-server daemon [y/n] (n)? "
+NR_DEFAULT_USER = "root"
+
+# jdk commands
+JDK_URL_PROPERTIES = ["jdk1.7.url", "jdk1.6.url"]
+JCE_URL_PROPERTIES = ["jce_policy1.7.url", "jce_policy1.6.url"]
+JDK_VERSION_REs = ["(jdk.*)/jre", "Creating (jdk.*)/jre"]
+JDK_CHOICE_PROMPT = "[1] - Oracle JDK 1.7\n[2] - Oracle JDK 1.6\n[3] - Custom JDK\n==============================================================================\nEnter choice ({0}): "
+JDK_VALID_CHOICES = "^[123]$"
+CUSTOM_JDK_NUMBER = "3"
+JDK_MIN_FILESIZE = 5000
+CREATE_JDK_DIR_CMD = "/bin/mkdir -p {0}"
+MAKE_FILE_EXECUTABLE_CMD = "chmod a+x {0}"
+
+JDK_DOWNLOAD_CMD = "curl --create-dirs -o {0} {1}"
+JDK_DOWNLOAD_SIZE_CMD = "curl -I {0}"
+
+# use --no-same-owner when running as root to prevent uucp as the user (AMBARI-6478)
+UNTAR_JDK_ARCHIVE = "tar --no-same-owner -xvf {0}"
+
+
+#JDBC
+DATABASE_INDEX = 0
+USERNAME_PATTERN = "^[a-zA-Z_][a-zA-Z0-9_\-]*$"
+DATABASE_NAMES = ["postgres", "oracle", "mysql"]
+DATABASE_STORAGE_NAMES = ["Database", "Service", "Database"]
+DATABASE_PORTS = ["5432", "1521", "3306"]
+DATABASE_DRIVER_NAMES = ["org.postgresql.Driver", "oracle.jdbc.driver.OracleDriver", "com.mysql.jdbc.Driver"]
+DATABASE_CONNECTION_STRINGS = [
+ "jdbc:postgresql://{0}:{1}/{2}",
+ "jdbc:oracle:thin:@{0}:{1}/{2}",
+ "jdbc:mysql://{0}:{1}/{2}"]
+DATABASE_CONNECTION_STRINGS_ALT = [
+ "jdbc:postgresql://{0}:{1}/{2}",
+ "jdbc:oracle:thin:@{0}:{1}:{2}",
+ "jdbc:mysql://{0}:{1}/{2}"]
+ORACLE_SID_PATTERN = "jdbc:oracle:thin:@.+:.+/.+"
+ORACLE_SNAME_PATTERN = "jdbc:oracle:thin:@.+:.+:.+"
+
+DATABASE_CLI_TOOLS = [["psql"], ["sqlplus", "sqlplus64"], ["mysql"]]
+DATABASE_CLI_TOOLS_DESC = ["psql", "sqlplus", "mysql"]
+DATABASE_CLI_TOOLS_USAGE = ['su -postgres --command=psql -f {0} -v username=\'"{1}"\' -v password="\'{2}\'"',
+ 'sqlplus {1}/{2} < {0} ',
+ 'mysql --user={1} --password={2} {3}<{0}']
+
+MYSQL_INIT_SCRIPT = '/var/lib/ambari-server/resources/Ambari-DDL-MySQL-CREATE.sql'
+DATABASE_INIT_SCRIPTS = ['/var/lib/ambari-server/resources/Ambari-DDL-Postgres-CREATE.sql',
+ '/var/lib/ambari-server/resources/Ambari-DDL-Oracle-CREATE.sql',
+ MYSQL_INIT_SCRIPT]
+DATABASE_DROP_SCRIPTS = ['/var/lib/ambari-server/resources/Ambari-DDL-Postgres-DROP.sql',
+ '/var/lib/ambari-server/resources/Ambari-DDL-Oracle-DROP.sql',
+ '/var/lib/ambari-server/resources/Ambari-DDL-MySQL-DROP.sql']
+
+JDBC_PROPERTIES_PREFIX = "server.jdbc.properties."
+DATABASE_JDBC_PROPERTIES = [
+ [ ],
+ [
+ ["oracle.net.CONNECT_TIMEOUT", "2000"], # socket level timeout
+ ["oracle.net.READ_TIMEOUT", "2000"], # socket level timeout
+ ["oracle.jdbc.ReadTimeout", "8000"] # query fetch timeout
+ ],
+ [ ]
+ ]
+
+POSTGRES_EXEC_ARGS = "-h {0} -p {1} -d {2} -U {3} -f {4} -v username='{3}'"
+ORACLE_EXEC_ARGS = "-S -L '{0}/{1}@(description=(address=(protocol=TCP)(host={2})(port={3}))(connect_data=({6}={4})))' @{5} {0}"
+MYSQL_EXEC_ARGS_WITH_USER_VARS = "--host={0} --port={1} --user={2} --password={3} {4} " \
+ "-e\"set @schema=\'{4}\'; set @username=\'{2}\'; source {5};\""
+MYSQL_EXEC_ARGS_WO_USER_VARS = "--force --host={0} --port={1} --user={2} --password={3} --database={4} < {5} 2> /dev/null"
+MYSQL_UPGRADE_STACK_ARGS = "--host={0} --port={1} --user={2} --password={3} --database={4} " \
+ "-e\"set @stackName=\'{6}\'; set @stackVersion=\'{7}\'; source {5};\""
+
+ORACLE_UPGRADE_STACK_ARGS = "-S -L '{0}/{1}@(description=(address=(protocol=TCP)(host={2})(port={3}))(connect_data=({6}={4})))' @{5} {7} {8}"
+
+JDBC_PATTERNS = {"oracle": "*ojdbc*.jar", "mysql": "*mysql*.jar"}
+DATABASE_FULL_NAMES = {"oracle": "Oracle", "mysql": "MySQL", "postgres": "PostgreSQL"}
+JDBC_DB_OPTION_VALUES = ["postgres", "mysql", "oracle"]
+JDBC_DB_DEFAULT_DRIVER = {"postgresql" : "postgresql-jdbc.jar", "mysql" : "mysql-connector-java.jar", "oracle" : "ojdbc6.jar"}
+ORACLE_DB_ID_TYPES = ["Service Name", "SID"]
+
+
+DEFAULT_DB_NAME = "ambari"
+
+
+MESSAGE_ERROR_NOT_ROOT = 'Ambari-server setup should be run with root-level privileges'
+
+MESSAGE_CHECK_FIREWALL = 'Checking iptables...'
+
+class FirewallChecks(object):
+ def __init__(self):
+
+ self.FIREWALL_SERVICE_NAME = "iptables"
+ self.SERVICE_CMD = SERVICE_CMD
+ self.SERVICE_SUBCMD = "status"
+
+ def get_command(self):
+ return "%s %s %s" % (self.SERVICE_CMD, self.FIREWALL_SERVICE_NAME, self.SERVICE_SUBCMD)
+
+ def check_result(self, retcode, out, err):
+ return retcode == 0
+
+ def check_iptables(self):
+ retcode, out, err = run_os_command(self.get_command())
+ if err and len(err) > 0:
+ print err
+ if self.check_result(retcode, out, err):
+ print_warning_msg("%s is running. Confirm the necessary Ambari ports are accessible. " %
+ self.FIREWALL_SERVICE_NAME +
+ "Refer to the Ambari documentation for more details on ports.")
+ ok = get_YN_input("OK to continue [y/n] (y)? ", True)
+ if not ok:
+ raise FatalException(1, None)
+
+ def get_running_result(self):
+ # To support test code. Expected ouput from run_os_command.
+ return (0, "", "")
+
+ def get_stopped_result(self):
+ # To support test code. Expected output from run_os_command.
+ return (3, "", "")
+
+
+class UbuntuFirewallChecks(FirewallChecks):
+ def __init__(self):
+ super(UbuntuFirewallChecks, self).__init__()
+
+ self.FIREWALL_SERVICE_NAME = "ufw"
+ self.SERVICE_CMD = utils.locate_file('service', '/usr/sbin')
+
+ def check_result(self, retcode, out, err):
+ # On ubuntu, the status command returns 0 whether running or not
+ return out and len(out) > 0 and out.strip() != "ufw stop/waiting"
+
+ def get_running_result(self):
+ # To support test code. Expected ouput from run_os_command.
+ return (0, "ufw start/running", "")
+
+ def get_stopped_result(self):
+ # To support test code. Expected output from run_os_command.
+ return (0, "ufw stop/waiting", "")
+
+
+class Fedora18FirewallChecks(FirewallChecks):
+ def __init__(self):
+ self.FIREWALL_SERVICE_NAME = "firewalld.service"
+
+ def get_command(self):
+ return "systemctl is-active firewalld.service"
+
+
+class OpenSuseFirewallChecks(FirewallChecks):
+ def __init__(self):
+ self.FIREWALL_SERVICE_NAME = "SuSEfirewall2"
+
+ def get_command(self):
+ return "/sbin/SuSEfirewall2 status"
+
+
+def get_firewall_object():
+ if OS_TYPE == OSConst.OS_UBUNTU:
+ return UbuntuFirewallChecks()
+ elif OS_TYPE == OSConst.OS_FEDORA and int(OS_VERSION) >= 18:
+ return Fedora18FirewallChecks()
+ elif OS_TYPE == OSConst.OS_OPENSUSE:
+ return OpenSuseFirewallChecks()
+ else:
+ return FirewallChecks()
+
+
+def get_firewall_object_types():
+ # To support test code, so tests can loop through the types
+ return (FirewallChecks,
+ UbuntuFirewallChecks,
+ Fedora18FirewallChecks,
+ OpenSuseFirewallChecks)
+
+
+def os_check_firewall():
+ return get_firewall_object().check_iptables()
+
+
+#
+# Checks SELinux
+#
+def check_selinux():
+ try:
+ retcode, out, err = run_os_command(GET_SE_LINUX_ST_CMD)
+ se_status = re.search('(disabled|enabled)', out).group(0)
+ print "SELinux status is '" + se_status + "'"
+ if se_status == SE_STATUS_DISABLED:
+ return 0
+ else:
+ try:
+ se_mode = re.search('(enforcing|permissive)', out).group(0)
+ except AttributeError:
+ err = "Error determining SELinux mode. Exiting."
+ raise FatalException(1, err)
+ print "SELinux mode is '" + se_mode + "'"
+ if se_mode == SE_MODE_ENFORCING:
+ print "Temporarily disabling SELinux"
+ run_os_command(SE_SETENFORCE_CMD)
+ print_warning_msg(
+ "SELinux is set to 'permissive' mode and temporarily disabled.")
+ ok = get_YN_input("OK to continue [y/n] (y)? ", True)
+ if not ok:
+ raise FatalException(1, None)
+ return 0
+ except OSError:
+ print_warning_msg("Could not run {0}: OK".format(GET_SE_LINUX_ST_CMD))
+ return 0
+
+def disable_security_enhancements():
+ print 'Checking SELinux...'
+ err = ''
+ retcode = check_selinux()
+ if not retcode == 0:
+ err = 'Failed to disable SELinux. Exiting.'
+ return (retcode, err)
+
+
+#
+# User account creation
+#
+
+def os_create_custom_user():
+ user = get_validated_string_input(
+ "Enter user account for ambari-server daemon (root):",
+ "root",
+ "^[a-z_][a-z0-9_-]{1,31}$",
+ "Invalid username.",
+ False
+ )
+
+ print_info_msg("Trying to create user {0}".format(user))
+ command = NR_USERADD_CMD.format(user, NR_USER_COMMENT)
+ retcode, out, err = run_os_command(command)
+ if retcode == 9: # 9 = username already in use
+ print_info_msg("User {0} already exists, "
+ "skipping user creation".format(user))
+
+ elif retcode != 0: # fail
+ print_warning_msg("Can't create user {0}. Command {1} "
+ "finished with {2}: \n{3}".format(user, command, retcode, err))
+ return retcode, None
+
+ print_info_msg("User configuration is done.")
+ return 0, user
+
+
+#
+# JDK Setup
+#
+
+def os_install_jdk(java_inst_file, java_home_dir):
+ print "Installing JDK to {0}".format(java_home_dir)
+ retcode, out, err = run_os_command(CREATE_JDK_DIR_CMD.format(java_home_dir))
+ savedPath = os.getcwd()
+ os.chdir(java_home_dir)
+
+ if java_inst_file.endswith(".bin"):
+ retcode, out, err = run_os_command(MAKE_FILE_EXECUTABLE_CMD.format(java_inst_file))
+ retcode, out, err = run_os_command(java_inst_file + ' -noregister')
+ elif java_inst_file.endswith(".gz"):
+ retcode, out, err = run_os_command(UNTAR_JDK_ARCHIVE.format(java_inst_file))
+ else:
+ err = "JDK installation failed.Unknown file mask."
+ raise FatalException(1, err)
+
+ os.chdir(savedPath)
+
+ if retcode != 0:
+ err = "Installation of JDK returned exit code %s" % retcode
+ raise FatalException(retcode, err)
+
+ print "Successfully installed JDK to {0}".format(java_home_dir)
+ return (retcode, out)
+
+def os_ensure_java_home_env_var_is_set(java_home_var):
+ if not os.environ.has_key(JAVA_HOME) or os.environ[JAVA_HOME] != java_home_var:
+ os.system("SETX {0} {1} /M".format(JAVA_HOME, java_home_var))
+ os.environ[JAVA_HOME] = java_home_var
+ pass
+
+
+#
+# JDBC Setup
+#
+
+def os_check_jdbc_options(options):
+ return (options.jdbc_driver is not None and options.jdbc_db is not None)
+
+#Check if required jdbc drivers present
+def os_find_jdbc_driver(args):
+ if args.dbms in JDBC_PATTERNS.keys():
+ drivers = []
+ drivers.extend(glob.glob(JAVA_SHARE_PATH + os.sep + JDBC_PATTERNS[args.dbms]))
+ if drivers:
+ return drivers
+ return -1
+ return 0
+
+def os_setup_jdbc_drivers(args):
+ result = os_find_jdbc_driver(args)
+
+ msg = 'Before starting Ambari Server, ' \
+ 'you must copy the {0} JDBC driver JAR file to {1}.'.format(
+ DATABASE_FULL_NAMES[args.dbms],
+ JAVA_SHARE_PATH)
+
+ if result == -1:
+ if SILENT:
+ print_error_msg(msg)
+ raise FatalException(-1, msg)
+ else:
+ print_warning_msg(msg)
+ raw_input(PRESS_ENTER_MSG)
+ result = os_find_jdbc_driver(args)
+ if result == -1:
+ print_error_msg(msg)
+ raise FatalException(-1, msg)
+
+ # Check if selected RDBMS requires drivers to copy
+ if type(result) is not int:
+ print 'Copying JDBC drivers to server resources...'
+ try:
+ resources_dir = args[RESOURCES_DIR_PROPERTY]
+ except KeyError:
+ print_error_msg("There is no value for " + RESOURCES_DIR_PROPERTY + "in " + AMBARI_PROPERTIES_FILE)
+ return -1
+
+ db_name = DATABASE_FULL_NAMES[args.dbms].lower()
+ jdbc_symlink = os.path.join(resources_dir, db_name + "-jdbc-driver.jar")
+ db_default_driver_path = os.path.join(JAVA_SHARE_PATH, JDBC_DB_DEFAULT_DRIVER[db_name])
+
+ if os.path.lexists(jdbc_symlink):
+ os.remove(jdbc_symlink)
+
+ copy_status = copy_files(result, resources_dir)
+
+ if not copy_status == 0:
+ raise FatalException(-1, "Failed to copy JDBC drivers to server resources")
+
+ if db_default_driver_path in result:
+ os.symlink(os.path.join(resources_dir, JDBC_DB_DEFAULT_DRIVER[db_name]), jdbc_symlink)
+
+def os_load_default_db_properties(args):
+ args.persistence_type = 'local'
+ args.dbms = DATABASE_NAMES[DATABASE_INDEX]
+ args.database_host = "localhost"
+ args.database_port = DATABASE_PORTS[DATABASE_INDEX]
+ args.database_name = DEFAULT_DB_NAME
+ args.database_username = "ambari"
+ args.database_password = "bigdata"
+ args.sid_or_sname = "sname"
+ pass
+
+# Check if jdbc user is changed
+def is_jdbc_user_changed(args):
+ properties = get_ambari_properties()
+ if properties == -1:
+ print_error_msg("Error getting ambari properties")
+ return None
+
+ previos_user = properties[JDBC_USER_NAME_PROPERTY]
+ new_user = args.database_username
+
+ if previos_user and new_user:
+ if previos_user != new_user:
+ return True
+ else:
+ return False
+
+ return None
+
+def os_setup_database(options):
+ if is_local_database(options):
+ os_setup_local_database(options)
+ else:
+ os_setup_remote_database(options)
+
+def os_setup_local_database(options):
+ #check if jdbc user is changed
+ is_user_changed = is_jdbc_user_changed(options)
+
+ print 'Default properties detected. Using built-in database.'
+ os_store_local_properties(options)
+
+ print 'Checking PostgreSQL...'
+ pg_status, retcode, out, err = check_postgre_up()
+ if not retcode == 0:
+ err = 'Unable to start PostgreSQL server. Status {0}. {1}.' \
+ ' Exiting'.format(pg_status, err)
+ raise FatalException(retcode, err)
+
+ print 'Configuring local database...'
+ retcode, outdata, errdata = setup_db(options)
+ if not retcode == 0:
+ err = 'Running database init script was failed. {0}. Exiting.'.format(errdata)
+ raise FatalException(retcode, err)
+
+ if is_user_changed:
+ #remove backup for pg_hba in order to reconfigure postgres
+ remove_file(PG_HBA_CONF_FILE_BACKUP)
+
+ print 'Configuring PostgreSQL...'
+ retcode, out, err = configure_postgres()
+ if not retcode == 0:
+ err = 'Unable to configure PostgreSQL server. {0} Exiting'.format(err)
+ raise FatalException(retcode, err)
+ pass
+
+def os_setup_remote_database(options):
+ retcode = os_store_remote_properties(options)
+ if retcode != 0:
+ err = 'Unable to save config file'
+ raise FatalException(retcode, err)
+
+ os_setup_jdbc_drivers(options)
+
+ print 'Configuring remote database connection properties...'
+ retcode = setup_remote_db(options)
+ if retcode == -1:
+ err = "Remote database setup aborted."
+ raise NonFatalException(err)
+
+ if not retcode == 0:
+ err = 'Error while configuring connection properties. Exiting'
+ raise FatalException(retcode, err)
+ pass
+
+def os_reset_database(options):
+ pass
+
+
+### Postgres ###
+
+
+def configure_pg_hba_ambaridb_users():
+ args = optparse.Values()
+ configure_database_username_password(args)
+
+ with open(PG_HBA_CONF_FILE, "a") as pgHbaConf:
+ pgHbaConf.write("\n")
+ pgHbaConf.write("local all " + args.database_username +
+ ",mapred md5")
+ pgHbaConf.write("\n")
+ pgHbaConf.write("host all " + args.database_username +
+ ",mapred 0.0.0.0/0 md5")
+ pgHbaConf.write("\n")
+ pgHbaConf.write("host all " + args.database_username +
+ ",mapred ::/0 md5")
+ pgHbaConf.write("\n")
+ retcode, out, err = run_os_command(PG_HBA_RELOAD_CMD)
+ if not retcode == 0:
+ raise FatalException(retcode, err)
+
+
+def configure_pg_hba_postgres_user():
+ postgresString = "all postgres"
+ for line in fileinput.input(PG_HBA_CONF_FILE, inplace=1):
+ print re.sub('all\s*all', postgresString, line),
+ os.chmod(PG_HBA_CONF_FILE, 0644)
+
+
+def configure_postgresql_conf():
+ listenAddress = "listen_addresses = '*' #"
+ for line in fileinput.input(POSTGRESQL_CONF_FILE, inplace=1):
+ print re.sub('#+listen_addresses.*?(#|$)', listenAddress, line),
+ os.chmod(POSTGRESQL_CONF_FILE, 0644)
+
+
+def configure_postgres():
+ if os.path.isfile(PG_HBA_CONF_FILE):
+ if not os.path.isfile(PG_HBA_CONF_FILE_BACKUP):
+ shutil.copyfile(PG_HBA_CONF_FILE, PG_HBA_CONF_FILE_BACKUP)
+ else:
+ #Postgres has been configured before, must not override backup
+ print "Backup for pg_hba found, reconfiguration not required"
+ return 0, "", ""
+ configure_pg_hba_postgres_user()
+ configure_pg_hba_ambaridb_users()
+ os.chmod(PG_HBA_CONF_FILE, 0644)
+ configure_postgresql_conf()
+ #restart postgresql if already running
+ pg_status, retcode, out, err = get_postgre_status()
+ if pg_status == PG_STATUS_RUNNING:
+ retcode, out, err = restart_postgres()
+ return retcode, out, err
+ return 0, "", ""
+
+
+def restart_postgres():
+ print "Restarting PostgreSQL"
+ process = subprocess.Popen(PG_RESTART_CMD.split(' '),
+ stdout=subprocess.PIPE,
+ stdin=subprocess.PIPE,
+ stderr=subprocess.PIPE
+ )
+ time.sleep(5)
+ result = process.poll()
+ if result is None:
+ print_info_msg("Killing restart PostgresSQL process")
+ process.kill()
+ pg_status, retcode, out, err = get_postgre_status()
+ # SUSE linux set status of stopped postgresql proc to unused
+ if pg_status == "unused" or pg_status == "stopped":
+ print_info_msg("PostgreSQL is stopped. Restarting ...")
+ retcode, out, err = run_os_command(PG_START_CMD)
+ return retcode, out, err
+ return 0, "", ""
+
+
+# todo: check if the scheme is already exist
+
+
+def setup_db(args):
+ #password access to ambari-server and mapred
+ configure_database_username_password(args)
+ dbname = args.database_name
+ scriptFile = args.init_script_file
+ username = args.database_username
+ password = args.database_password
+
+ #setup DB
+ command = SETUP_DB_CMD[:]
+ command[-1] = command[-1].format(scriptFile, username, password, dbname)
+
+ for i in range(SETUP_DB_CONNECT_ATTEMPTS):
+ sys.stdout.write('Connecting to local database...')
+ retcode, outdata, errdata = run_os_command(command)
+ if retcode == 0:
+ print 'done.'
+ return retcode, outdata, errdata
+ timeOutMsg = 'connection timed out'
+ if (i+1) < SETUP_DB_CONNECT_ATTEMPTS:
+ timeOutMsg += '...retrying (%d)' % (i+1)
+ print timeOutMsg
+ time.sleep(SETUP_DB_CONNECT_TIMEOUT)
+
+ print 'unable to connect to database'
+ print_error_msg(errdata)
+ return retcode, outdata, errdata
+
+
+def execute_db_script(args, file):
+ #password access to ambari-server and mapred
+ configure_database_username_password(args)
+ dbname = args.database_name
+ username = args.database_username
+ password = args.database_password
+ command = SETUP_DB_CMD[:]
+ command[-1] = command[-1].format(file, username, password, dbname)
+ retcode, outdata, errdata = run_os_command(command)
+ if not retcode == 0:
+ print errdata
+ return retcode
+
+
+def check_db_consistency(args, file):
+ #password access to ambari-server and mapred
+ configure_database_username_password(args)
+ dbname = args.database_name
+ username = args.database_username
+ password = args.database_password
+ command = SETUP_DB_CMD[:]
+ command[-1] = command[-1].format(file, username, password, dbname)
+ retcode, outdata, errdata = run_os_command(command)
+ if not retcode == 0:
+ print errdata
+ return retcode
+ else:
+ # Assumes that the output is of the form ...\n<count>
+ print_info_msg("Parsing output: " + outdata)
+ lines = outdata.splitlines()
+ if (lines[-1] == '3' or lines[-1] == '0'):
+ return 0
+ return -1
+
+
+def get_postgre_status():
+ retcode, out, err = run_os_command(PG_ST_CMD)
+ try:
+ pg_status = re.search('(stopped|running)', out, re.IGNORECASE).group(0).lower()
+ except AttributeError:
+ pg_status = None
+ return pg_status, retcode, out, err
+
+
+def check_postgre_up():
+ pg_status, retcode, out, err = get_postgre_status()
+ if pg_status == PG_STATUS_RUNNING:
+ print_info_msg("PostgreSQL is running")
+ return pg_status, 0, out, err
+ else:
+ # run initdb only on non ubuntu systems as ubuntu does not have initdb cmd.
+ if OS_TYPE != OSConst.OS_UBUNTU:
+ print "Running initdb: This may take upto a minute."
+ retcode, out, err = run_os_command(PG_INITDB_CMD)
+ if retcode == 0:
+ print out
+ print "About to start PostgreSQL"
+ try:
+ process = subprocess.Popen(PG_START_CMD.split(' '),
+ stdout=subprocess.PIPE,
+ stdin=subprocess.PIPE,
+ stderr=subprocess.PIPE
+ )
+ if OS_TYPE == OSConst.OS_SUSE:
+ time.sleep(20)
+ result = process.poll()
+ print_info_msg("Result of postgres start cmd: " + str(result))
+ if result is None:
+ process.kill()
+ pg_status, retcode, out, err = get_postgre_status()
+ else:
+ retcode = result
+ else:
+ out, err = process.communicate()
+ retcode = process.returncode
+ if pg_status == PG_STATUS_RUNNING:
+ print_info_msg("Postgres process is running. Returning...")
+ return pg_status, 0, out, err
+ except (Exception), e:
+ pg_status, retcode, out, err = get_postgre_status()
+ if pg_status == PG_STATUS_RUNNING:
+ return pg_status, 0, out, err
+ else:
+ print_error_msg("Postgres start failed. " + str(e))
+ return pg_status, retcode, out, err
+
+def get_validated_db_name(database_name):
+ return get_validated_string_input(
+ DATABASE_STORAGE_NAMES[DATABASE_INDEX] + " Name ("
+ + database_name + "): ",
+ database_name,
+ ".*",
+ "Invalid " + DATABASE_STORAGE_NAMES[DATABASE_INDEX] + " name.",
+ False
+ )
+
+def get_validated_service_name(service_name, index):
+ return get_validated_string_input(
+ ORACLE_DB_ID_TYPES[index] + " (" + service_name + "): ",
+ service_name,
+ ".*",
+ "Invalid " + ORACLE_DB_ID_TYPES[index] + ".",
+ False
+ )
+
+def get_pass_file_path(conf_file):
+ return os.path.join(os.path.dirname(conf_file),
+ JDBC_PASSWORD_FILENAME)
+
+# Store local database connection properties
+def os_store_local_properties(args):
+ properties = get_ambari_properties()
+ if properties == -1:
+ print_error_msg("Error getting ambari properties")
+ return -1
+
+ isSecure = get_is_secure(properties)
+
+ properties.removeOldProp(JDBC_SCHEMA_PROPERTY)
+ properties.removeOldProp(JDBC_HOSTNAME_PROPERTY)
+ properties.removeOldProp(JDBC_RCA_DRIVER_PROPERTY)
+ properties.removeOldProp(JDBC_RCA_URL_PROPERTY)
+ properties.removeOldProp(JDBC_PORT_PROPERTY)
+ properties.removeOldProp(JDBC_DRIVER_PROPERTY)
+ properties.removeOldProp(JDBC_URL_PROPERTY)
+ properties.process_pair(PERSISTENCE_TYPE_PROPERTY, "local")
+ properties.process_pair(JDBC_DATABASE_PROPERTY, args.database_name)
+ properties.process_pair(JDBC_USER_NAME_PROPERTY, args.database_username)
+ properties.process_pair(JDBC_PASSWORD_PROPERTY,
+ store_password_file(args.database_password, JDBC_PASSWORD_FILENAME))
+
+ if isSecure:
+ encrypted_password = encrypt_password(JDBC_RCA_PASSWORD_ALIAS, args.database_password)
+ if args.database_password != encrypted_password:
+ properties.process_pair(JDBC_PASSWORD_PROPERTY, encrypted_password)
+ pass
+ pass
+
+ return 0
+
+
+# Store set of properties for remote database connection
+def os_store_remote_properties(args):
+ properties = get_ambari_properties()
+ if properties == -1:
+ print_error_msg("Error getting ambari properties")
+ return -1
+
+ isSecure = get_is_secure(properties)
+
+ properties.process_pair(PERSISTENCE_TYPE_PROPERTY, "remote")
+
+ properties.process_pair(JDBC_DATABASE_PROPERTY, args.dbms)
+ properties.process_pair(JDBC_HOSTNAME_PROPERTY, args.database_host)
+ properties.process_pair(JDBC_PORT_PROPERTY, args.database_port)
+ properties.process_pair(JDBC_SCHEMA_PROPERTY, args.database_name)
+
+ properties.process_pair(JDBC_DRIVER_PROPERTY, DATABASE_DRIVER_NAMES[DATABASE_INDEX])
+ # fully qualify the hostname to make sure all the other hosts can connect
+ # to the jdbc hostname since its passed onto the agents for RCA
+ jdbc_hostname = args.database_host
+ if (args.database_host == "localhost"):
+ jdbc_hostname = socket.getfqdn()
+
+ connectionStringFormat = DATABASE_CONNECTION_STRINGS
+ if args.sid_or_sname == "sid":
+ connectionStringFormat = DATABASE_CONNECTION_STRINGS_ALT
+ properties.process_pair(JDBC_URL_PROPERTY,
+ connectionStringFormat[DATABASE_INDEX].format(jdbc_hostname, args.database_port,
+ args.database_name))
+ properties.process_pair(JDBC_USER_NAME_PROPERTY, args.database_username)
+ properties.process_pair(JDBC_PASSWORD_PROPERTY,
+ store_password_file(args.database_password, JDBC_PASSWORD_FILENAME))
+
+ # save any other defined properties to pass to JDBC
+ if DATABASE_INDEX < len(DATABASE_JDBC_PROPERTIES):
+ for pair in DATABASE_JDBC_PROPERTIES[DATABASE_INDEX]:
+ properties.process_pair(JDBC_PROPERTIES_PREFIX + pair[0], pair[1])
+
+ if isSecure:
+ encrypted_password = encrypt_password(JDBC_RCA_PASSWORD_ALIAS, args.database_password)
+ if encrypted_password != args.database_password:
+ properties.process_pair(JDBC_PASSWORD_PROPERTY, encrypted_password)
+ pass
+
+ properties.process_pair(JDBC_RCA_DRIVER_PROPERTY, DATABASE_DRIVER_NAMES[DATABASE_INDEX])
+ properties.process_pair(JDBC_RCA_URL_PROPERTY,
+ connectionStringFormat[DATABASE_INDEX].format(jdbc_hostname, args.database_port,
+ args.database_name))
+ properties.process_pair(JDBC_RCA_USER_NAME_PROPERTY, args.database_username)
+ properties.process_pair(JDBC_RCA_PASSWORD_FILE_PROPERTY,
+ store_password_file(args.database_password, JDBC_PASSWORD_FILENAME))
+ if isSecure:
+ encrypted_password = encrypt_password(JDBC_RCA_PASSWORD_ALIAS, args.database_password)
+ if encrypted_password != args.database_password:
+ properties.process_pair(JDBC_RCA_PASSWORD_FILE_PROPERTY, encrypted_password)
+ pass
+
+ return 0
diff --git a/ambari-server/src/main/python/ambari_server/serverSetup_windows.py b/ambari-server/src/main/python/ambari_server/serverSetup_windows.py
new file mode 100644
index 0000000000..a906ef5002
--- /dev/null
+++ b/ambari-server/src/main/python/ambari_server/serverSetup_windows.py
@@ -0,0 +1,313 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import optparse
+import os
+import socket
+import string
+
+from _winreg import (OpenKey, EnumValue, HKEY_LOCAL_MACHINE, KEY_READ, CloseKey, KEY_WRITE, QueryValueEx, SetValueEx,
+ REG_EXPAND_SZ)
+
+from ambari_commons import os_utils
+
+from ambari_commons.exceptions import *
+from ambari_commons.logging_utils import *
+from ambari_commons.os_windows import run_os_command, UserHelper
+from ambari_server.dbConfiguration import DBMSConfig
+from ambari_server.serverConfiguration import *
+from ambari_server.serverConfiguration_windows import OUT_DIR
+from ambari_server.userInput import get_validated_string_input
+
+# Non-root user setup commands
+NR_USER_COMMENT = "Ambari user"
+NR_GET_OWNER_CMD = 'stat -c "%U" {0}'
+NR_USERADD_CMD = 'cmd /C net user {0} {1} /ADD'
+NR_SET_USER_COMMENT_CMD = 'usermod -c "{0}" {1}'
+
+NR_USER_CHANGE_PROMPT = "Ambari-server service is configured to run under user '{0}'. Change this setting [y/n] (n)? "
+NR_USER_CUSTOMIZE_PROMPT = "Customize user account for ambari-server service [y/n] (n)? "
+NR_DEFAULT_USER = "NT AUTHORITY\SYSTEM"
+
+SERVICE_USERNAME_KEY = "TMP_AMBARI_USERNAME"
+SERVICE_PASSWORD_KEY = "TMP_AMBARI_PASSWORD"
+
+# JDK setup choices
+JDK_DEFAULT_CONFIGS = [
+ JDKRelease("jdk7.67", "Oracle JDK 7.67",
+ "http://public-repo-1.hortonworks.com/ARTIFACTS/jdk-7u67-windows-x64.exe", "jdk-7u67-windows-x64.exe",
+ "http://public-repo-1.hortonworks.com/ARTIFACTS/UnlimitedJCEPolicyJDK7.zip", "UnlimitedJCEPolicyJDK7.zip",
+ None)
+]
+
+JDK_VERSION_REs = ["(jdk.*)/jre", "Creating (jdk.*)/jre"]
+JDK_PROMPT = "[{0}] {1}\n"
+JDK_CUSTOM_CHOICE_PROMPT = "[{0}] - Custom JDK\n==============================================================================\nEnter choice ({1}): "
+JDK_VALID_CHOICES = "^[{0}{1:d}]$"
+CUSTOM_JDK_NUMBER = "4"
+JDK_MIN_FILESIZE = 5000
+MAKE_FILE_EXECUTABLE_CMD = "chmod a+x {0}"
+
+JDK_DOWNLOAD_CMD = "curl --create-dirs -o {0} {1}"
+JDK_DOWNLOAD_SIZE_CMD = "curl -I {0}"
+
+# use --no-same-owner when running as root to prevent uucp as the user (AMBARI-6478)
+UNTAR_JDK_ARCHIVE = "tar --no-same-owner -xvf {0}"
+
+
+#JDBC
+USERNAME_PATTERN = "^[a-zA-Z_][a-zA-Z0-9_\-]*$"
+DATABASE_DBMS = "sqlserver"
+DATABASE_NAME = "ambari"
+DATABASE_SERVER = "localhost\\\\SQLEXPRESS"
+DATABASE_DRIVER_NAME = "com.microsoft.sqlserver.jdbc.SQLServerDriver"
+
+METRICS_DATABASE_NAME = "HadoopMetrics"
+
+JDBC_PATTERNS = {"sqlserver": "sqljdbc*.jar"}
+DATABASE_FULL_NAMES = {"sqlserver": "SQL Server"}
+JDBC_DB_OPTION_VALUES = ["sqlserver"]
+JDBC_DB_DEFAULT_DRIVER = {"sqlserver" : "sqljdbc4.jar"}
+
+
+ERROR_NOT_ROOT = 'Ambari-server setup should be run with administrator-level privileges'
+
+MESSAGE_CHECK_FIREWALL = 'Checking firewall status...'
+
+def os_check_firewall():
+ out = run_powershell_script(CHECK_FIREWALL_SCRIPT)
+ if out[0] != 0:
+ print_warning_msg("Unable to check firewall status:{0}".format(out[2]))
+ return False
+ profiles_status = [i for i in out[1].split("\n") if not i == ""]
+ if "1" in profiles_status:
+ enabled_profiles = []
+ if profiles_status[0] == "1":
+ enabled_profiles.append("DomainProfile")
+ if profiles_status[1] == "1":
+ enabled_profiles.append("StandardProfile")
+ if profiles_status[2] == "1":
+ enabled_profiles.append("PublicProfile")
+ print_warning_msg("Following firewall profiles enabled:{0}. Make sure that firewall properly configured.".format(",".join(enabled_profiles)))
+ return False
+ return True
+
+# No security enhancements in Windows
+def disable_security_enhancements():
+ retcode = 0
+ err = ''
+ return (retcode, err)
+
+
+#
+# User account creation
+#
+
+def os_create_custom_user():
+ user = get_validated_string_input(
+ "Enter user account for ambari-server service ({0}):".format(NR_DEFAULT_USER),
+ NR_DEFAULT_USER, None,
+ "Invalid username.",
+ False
+ )
+ if user == NR_DEFAULT_USER:
+ return 0, user
+ password = get_validated_string_input("Enter password for user {0}:".format(user), "", None, "Password", True, False)
+
+ uh = UserHelper()
+
+ status, message = uh.create_user(user,password)
+ if status == UserHelper.USER_EXISTS:
+ print_info_msg("User {0} already exists, make sure that you typed correct password for user, "
+ "skipping user creation".format(user))
+
+ elif status == UserHelper.ACTION_FAILED: # fail
+ print_warning_msg("Can't create user {0}. Failed with message {1}".format(user, message))
+ return UserHelper.ACTION_FAILED, None
+
+ # setting SeServiceLogonRight to user
+
+ status, message = uh.add_user_privilege(user, 'SeServiceLogonRight')
+ if status == UserHelper.ACTION_FAILED:
+ print_warning_msg("Can't add SeServiceLogonRight to user {0}. Failed with message {1}".format(user, message))
+ return UserHelper.ACTION_FAILED, None
+
+ print_info_msg("User configuration is done.")
+ print_warning_msg("When using non SYSTEM user make sure that your user have read\write access to log directories and "
+ "all server directories. In case of integrated authentication for SQL Server make sure that your "
+ "user properly configured to use ambari and metric database.")
+ #storing username and password in os.environ temporary to pass them to service
+ os.environ[SERVICE_USERNAME_KEY] = user
+ os.environ[SERVICE_PASSWORD_KEY] = password
+ return 0, user
+
+
+#
+# JDK Setup
+#
+def populate_jdk_configs(properties, jdk_num):
+ if properties.has_key(JDK_RELEASES):
+ jdk_names = properties[JDK_RELEASES].split(',')
+ jdks = []
+ for jdk_name in jdk_names:
+ jdkR = JDKRelease.from_properties(properties, jdk_name)
+ jdks.append(jdkR)
+ else:
+ jdks = JDK_DEFAULT_CONFIGS
+
+ n_config = 1
+ jdk_choice_prompt = ''
+ jdk_choices = ''
+ for jdk in jdks:
+ jdk_choice_prompt += JDK_PROMPT.format(n_config, jdk.desc)
+ jdk_choices_tmp = '{0}{1:d}'.format(jdk_choices, n_config)
+ jdk_choices = jdk_choices_tmp
+ n_config += 1
+
+ jdk_choice_prompt += JDK_CUSTOM_CHOICE_PROMPT.format(n_config, jdk_num)
+ jdk_valid_choices = JDK_VALID_CHOICES.format(jdk_choices, n_config)
+
+ return (jdks, jdk_choice_prompt, jdk_valid_choices, n_config)
+
+
+def os_install_jdk(java_inst_file, java_home_dir):
+ print "Installing JDK to {0}".format(java_home_dir)
+
+ if not os.path.exists(java_home_dir):
+ os.makedirs(java_home_dir)
+
+ if java_inst_file.endswith(".exe"):
+ (dirname, filename) = os.path.split(java_inst_file)
+ installLogFilePath = os.path.join(OUT_DIR, filename + "-install.log")
+ #jre7u67.exe /s INSTALLDIR=<dir> STATIC=1 WEB_JAVA=0 /L \\var\\log\\ambari-server\\jre7u67.exe-install.log
+ installCmd = [
+ java_inst_file,
+ "/s",
+ "INSTALLDIR=" + java_home_dir,
+ "STATIC=1",
+ "WEB_JAVA=0",
+ "/L",
+ installLogFilePath
+ ]
+ retcode, out, err = run_os_command(installCmd)
+ #TODO: support .msi file installations
+ #msiexec.exe jre.msi /s INSTALLDIR=<dir> STATIC=1 WEB_JAVA=0 /L \\var\\log\\ambari-server\\jre7u67-install.log ?
+ else:
+ err = "JDK installation failed.Unknown file mask."
+ raise FatalException(1, err)
+
+ if retcode == 1603:
+ # JDK already installed
+ print "JDK already installed in {0}".format(java_home_dir)
+ retcode = 0
+ else:
+ if retcode != 0:
+ err = "Installation of JDK returned exit code %s" % retcode
+ raise FatalException(retcode, err)
+
+ print "Successfully installed JDK to {0}".format(java_home_dir)
+
+ # Don't forget to adjust the JAVA_HOME env var
+
+ return (retcode, out)
+
+def os_ensure_java_home_env_var_is_set(java_home_var):
+ if not os.environ.has_key(JAVA_HOME) or os.environ[JAVA_HOME] != java_home_var:
+ java_home_var_val = java_home_var.replace('\\\\', '\\')
+ os.system("SETX {0} {1} /M".format(JAVA_HOME, java_home_var_val))
+ os.environ[JAVA_HOME] = java_home_var
+ pass
+
+#
+# JDBC Setup
+#
+
+def os_check_jdbc_options(options):
+ #Only SQL Server supported, no point in checking options.jdbc_db
+ return (options.jdbc_driver is not None)
+
+def os_setup_jdbc_drivers(args):
+ properties = get_ambari_properties()
+ if properties == -1:
+ print_error_msg("Error getting ambari properties")
+ return -1
+
+ #Only support SQL Server
+ dbms = DBMSConfig.create(args, properties)
+ if dbms.ensure_jdbc_driver_installed(args, properties):
+ # Now save the properties file
+ update_properties(properties)
+ pass
+
+def os_setup_database(options):
+ properties = get_ambari_properties()
+ if properties == -1:
+ raise FatalException(-1, "Error getting ambari properties")
+
+ #Ensure the default database host is set
+ options.default_database_host = "localhost\\SQLEXPRESS"
+
+ #Only support SQL Server
+ dbmsAmbari = DBMSConfig.create(options, properties, "Ambari")
+ resultA = dbmsAmbari.configure_database(options, properties)
+
+ #By default, use the same server for Metrics
+ options.default_database_host = dbmsAmbari.database_host
+
+ dbmsMetrics = DBMSConfig.create(options, properties, "Metrics")
+ resultM = dbmsMetrics.configure_database(options, properties)
+
+ # Now save the properties file
+ if resultA or resultM:
+ update_properties(properties)
+
+ dbmsAmbari.setup_database()
+ dbmsMetrics.setup_database()
+
+def os_reset_database(options):
+ properties = get_ambari_properties()
+ if properties == -1:
+ raise FatalException(-1, "Error getting ambari properties")
+
+ if not (properties.getPropertyDict().has_key(JDBC_URL_PROPERTY) and
+ properties.getPropertyDict().has_key(JDBC_RCA_URL_PROPERTY) and
+ properties.getPropertyDict().has_key(JDBC_METRICS_URL_PROPERTY)):
+ raise FatalException(-1, "Ambari Server not set up yet. Nothing to reset.")
+
+ empty_options = optparse.Values()
+ empty_options.silent = options.silent
+ empty_options.database_host = ""
+ empty_options.database_port = ""
+ empty_options.database_name = ""
+ empty_options.database_windows_auth = False
+ empty_options.database_username = ""
+ empty_options.database_password = ""
+ empty_options.init_db_script_file = ""
+ empty_options.cleanup_db_script_file = ""
+ empty_options.init_metrics_db_script_file = ""
+ empty_options.cleanup_metrics_db_script_file = ""
+
+ #Only support SQL Server
+ dbmsAmbari = DBMSConfig.create(empty_options, properties, "Ambari")
+ dbmsAmbari.reset_database()
+
+ dbmsMetrics = DBMSConfig.create(empty_options, properties, "Metrics")
+ dbmsMetrics.reset_database()
+ pass
diff --git a/ambari-server/src/main/python/ambari_server/setupActions.py b/ambari-server/src/main/python/ambari_server/setupActions.py
new file mode 100644
index 0000000000..ee8eaa07fe
--- /dev/null
+++ b/ambari-server/src/main/python/ambari_server/setupActions.py
@@ -0,0 +1,39 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+# action commands
+SETUP_ACTION = "setup"
+START_ACTION = "start"
+PSTART_ACTION = "pstart"
+STOP_ACTION = "stop"
+RESET_ACTION = "reset"
+UPGRADE_ACTION = "upgrade"
+UPGRADE_STACK_ACTION = "upgradestack"
+REFRESH_STACK_HASH_ACTION = "refresh-stack-hash"
+STATUS_ACTION = "status"
+SETUP_HTTPS_ACTION = "setup-https"
+LDAP_SETUP_ACTION = "setup-ldap"
+SETUP_GANGLIA_HTTPS_ACTION = "setup-ganglia-https"
+SETUP_NAGIOS_HTTPS_ACTION = "setup-nagios-https"
+ENCRYPT_PASSWORDS_ACTION = "encrypt-passwords"
+SETUP_SECURITY_ACTION = "setup-security"
+
+ACTION_REQUIRE_RESTART = [RESET_ACTION, UPGRADE_ACTION, UPGRADE_STACK_ACTION,
+ SETUP_SECURITY_ACTION, LDAP_SETUP_ACTION]
diff --git a/ambari-server/src/main/python/ambari_server/setupSecurity.py b/ambari-server/src/main/python/ambari_server/setupSecurity.py
new file mode 100644
index 0000000000..8efd371f68
--- /dev/null
+++ b/ambari-server/src/main/python/ambari_server/setupSecurity.py
@@ -0,0 +1,1216 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+import datetime
+import fileinput
+import random
+import socket
+import stat
+import sys
+import urllib2
+
+from ambari_commons.exceptions import *
+from serverConfiguration import *
+from setupActions import *
+from userInput import *
+
+
+SSL_PASSWORD_FILE = "pass.txt"
+SSL_PASSIN_FILE = "passin.txt"
+
+# openssl command
+VALIDATE_KEYSTORE_CMD = "openssl pkcs12 -info -in '{0}' -password file:'{1}' -passout file:'{2}'"
+EXPRT_KSTR_CMD = "openssl pkcs12 -export -in '{0}' -inkey '{1}' -certfile '{0}' -out '{4}' -password file:'{2}' -passin file:'{3}'"
+CHANGE_KEY_PWD_CND = 'openssl rsa -in {0} -des3 -out {0}.secured -passout pass:{1}'
+GET_CRT_INFO_CMD = 'openssl x509 -dates -subject -in {0}'
+
+#keytool commands
+keytool_bin = "keytool"
+if OSCheck.is_windows_os():
+ keytool_bin = "keytool.exe"
+
+KEYTOOL_IMPORT_CERT_CMD = "{0}" + os.sep + "bin" + os.sep + keytool_bin + " -import -alias '{1}' -storetype '{2}' -file '{3}' -storepass '{4}' -noprompt"
+KEYTOOL_DELETE_CERT_CMD = "{0}" + os.sep + "bin" + os.sep + keytool_bin + " -delete -alias '{1}' -storepass '{2}' -noprompt"
+KEYTOOL_KEYSTORE = " -keystore '{0}'"
+
+java_bin = "java"
+if OSCheck.is_windows_os():
+ java_bin = "java.exe"
+
+SECURITY_PROVIDER_GET_CMD = "{0}" + os.sep + "bin" + os.sep + java_bin + " -cp {1}" +\
+ os.pathsep + "{2} " +\
+ "org.apache.ambari.server.security.encryption" +\
+ ".CredentialProvider GET {3} {4} {5} " +\
+ "> " + SERVER_OUT_FILE + " 2>&1"
+
+SECURITY_PROVIDER_PUT_CMD = "{0}" + os.sep + "bin" + os.sep + java_bin + " -cp {1}" +\
+ os.pathsep + "{2} " +\
+ "org.apache.ambari.server.security.encryption" +\
+ ".CredentialProvider PUT {3} {4} {5} " +\
+ "> " + SERVER_OUT_FILE + " 2>&1"
+
+SECURITY_PROVIDER_KEY_CMD = "{0}" + os.sep + "bin" + os.sep + java_bin + " -cp {1}" +\
+ os.pathsep + "{2} " +\
+ "org.apache.ambari.server.security.encryption" +\
+ ".MasterKeyServiceImpl {3} {4} {5} " +\
+ "> " + SERVER_OUT_FILE + " 2>&1"
+
+SSL_KEY_DIR = 'security.server.keys_dir'
+SSL_API_PORT = 'client.api.ssl.port'
+SSL_API = 'api.ssl'
+SSL_SERVER_CERT_NAME = 'client.api.ssl.cert_name'
+SSL_SERVER_KEY_NAME = 'client.api.ssl.key_name'
+SSL_CERT_FILE_NAME = "https.crt"
+SSL_KEY_FILE_NAME = "https.key"
+SSL_KEYSTORE_FILE_NAME = "https.keystore.p12"
+SSL_KEY_PASSWORD_FILE_NAME = "https.pass.txt"
+SSL_KEY_PASSWORD_LENGTH = 50
+DEFAULT_SSL_API_PORT = 8443
+SSL_DATE_FORMAT = '%b %d %H:%M:%S %Y GMT'
+
+#SSL certificate metainfo
+COMMON_NAME_ATTR = 'CN'
+NOT_BEFORE_ATTR = 'notBefore'
+NOT_AFTER_ATTR = 'notAfter'
+
+SRVR_TWO_WAY_SSL_PORT_PROPERTY = "security.server.two_way_ssl.port"
+SRVR_TWO_WAY_SSL_PORT = "8441"
+
+SRVR_ONE_WAY_SSL_PORT_PROPERTY = "security.server.one_way_ssl.port"
+SRVR_ONE_WAY_SSL_PORT = "8440"
+
+SECURITY_KEYS_DIR = "security.server.keys_dir"
+SECURITY_MASTER_KEY_LOCATION = "security.master.key.location"
+SECURITY_KEY_IS_PERSISTED = "security.master.key.ispersisted"
+SECURITY_KEY_ENV_VAR_NAME = "AMBARI_SECURITY_MASTER_KEY"
+SECURITY_MASTER_KEY_FILENAME = "master"
+SECURITY_IS_ENCRYPTION_ENABLED = "security.passwords.encryption.enabled"
+SECURITY_KERBEROS_JASS_FILENAME = "krb5JAASLogin.conf"
+
+GANGLIA_HTTPS = 'ganglia.https'
+NAGIOS_HTTPS = 'nagios.https'
+
+SSL_TRUSTSTORE_PASSWORD_ALIAS = "ambari.ssl.trustStore.password"
+SSL_TRUSTSTORE_PATH_PROPERTY = "ssl.trustStore.path"
+SSL_TRUSTSTORE_PASSWORD_PROPERTY = "ssl.trustStore.password"
+SSL_TRUSTSTORE_TYPE_PROPERTY = "ssl.trustStore.type"
+
+DEFAULT_PASSWORD = "bigdata"
+PASSWORD_PATTERN = "^[a-zA-Z0-9_-]*$"
+
+LDAP_MGR_PASSWORD_ALIAS = "ambari.ldap.manager.password"
+LDAP_MGR_PASSWORD_PROPERTY = "authentication.ldap.managerPassword"
+LDAP_MGR_USERNAME_PROPERTY = "authentication.ldap.managerDn"
+
+REGEX_IP_ADDRESS = "^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$"
+REGEX_HOSTNAME = "^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])$"
+REGEX_HOSTNAME_PORT = "^(.*:[0-9]{1,5}$)"
+REGEX_TRUE_FALSE = "^(true|false)?$"
+REGEX_ANYTHING = ".*"
+
+CLIENT_SECURITY_KEY = "client.security"
+
+# ownership/permissions mapping
+# path - permissions - user - group - recursive
+# Rules are executed in the same order as they are listed
+# {0} in user/group will be replaced by customized ambari-server username
+NR_ADJUST_OWNERSHIP_LIST = [
+
+ ("/var/log/ambari-server", "644", "{0}", True),
+ ("/var/log/ambari-server", "755", "{0}", False),
+ ("/var/run/ambari-server", "644", "{0}", True),
+ ("/var/run/ambari-server", "755", "{0}", False),
+ ("/var/run/ambari-server/bootstrap", "755", "{0}", False),
+ ("/var/lib/ambari-server/ambari-env.sh", "700", "{0}", False),
+ ("/var/lib/ambari-server/keys", "600", "{0}", True),
+ ("/var/lib/ambari-server/keys", "700", "{0}", False),
+ ("/var/lib/ambari-server/keys/db", "700", "{0}", False),
+ ("/var/lib/ambari-server/keys/db/newcerts", "700", "{0}", False),
+ ("/var/lib/ambari-server/keys/.ssh", "700", "{0}", False),
+ ("/var/lib/ambari-server/resources/stacks/", "755", "{0}", True),
+ ("/var/lib/ambari-server/resources/custom_actions/", "755", "{0}", True),
+ ("/etc/ambari-server/conf", "644", "{0}", True),
+ ("/etc/ambari-server/conf", "755", "{0}", False),
+ ("/etc/ambari-server/conf/password.dat", "640", "{0}", False),
+ # Also, /etc/ambari-server/conf/password.dat
+ # is generated later at store_password_file
+]
+
+
+def is_valid_https_port(port):
+ properties = get_ambari_properties()
+ if properties == -1:
+ print "Error getting ambari properties"
+ return False
+
+ one_way_port = properties[SRVR_ONE_WAY_SSL_PORT_PROPERTY]
+ if not one_way_port:
+ one_way_port = SRVR_ONE_WAY_SSL_PORT
+
+ two_way_port = properties[SRVR_TWO_WAY_SSL_PORT_PROPERTY]
+ if not two_way_port:
+ two_way_port = SRVR_TWO_WAY_SSL_PORT
+
+ if port.strip() == one_way_port.strip():
+ print "Port for https can't match the port for one way authentication port(" + one_way_port + ")"
+ return False
+
+ if port.strip() == two_way_port.strip():
+ print "Port for https can't match the port for two way authentication port(" + two_way_port + ")"
+ return False
+
+ return True
+
+def run_component_https_cmd(cmd):
+ retcode, out, err = run_os_command(cmd)
+
+ if not retcode == 0:
+ err = 'Error occured during truststore setup ! :' + out + " : " + err
+ raise FatalException(1, err)
+
+def get_cert_info(path):
+ retcode, out, err = run_os_command(GET_CRT_INFO_CMD.format(path))
+
+ if retcode != 0:
+ print 'Error getting Certificate info'
+ print err
+ return None
+
+ if out:
+ certInfolist = out.split(os.linesep)
+ else:
+ print 'Empty Certificate info'
+ return None
+
+ notBefore = None
+ notAfter = None
+ subject = None
+
+ for item in range(len(certInfolist)):
+
+ if certInfolist[item].startswith('notAfter='):
+ notAfter = certInfolist[item].split('=')[1]
+
+ if certInfolist[item].startswith('notBefore='):
+ notBefore = certInfolist[item].split('=')[1]
+
+ if certInfolist[item].startswith('subject='):
+ subject = certInfolist[item].split('=', 1)[1]
+
+ #Convert subj to dict
+ pattern = re.compile(r"[A-Z]{1,2}=[\w.-]{1,}")
+ if subject:
+ subjList = pattern.findall(subject)
+ keys = [item.split('=')[0] for item in subjList]
+ values = [item.split('=')[1] for item in subjList]
+ subjDict = dict(zip(keys, values))
+
+ result = subjDict
+ result['notBefore'] = notBefore
+ result['notAfter'] = notAfter
+ result['subject'] = subject
+
+ return result
+ else:
+ return {}
+
+def is_valid_cert_exp(certInfoDict):
+ if certInfoDict.has_key(NOT_BEFORE_ATTR):
+ notBefore = certInfoDict[NOT_BEFORE_ATTR]
+ else:
+ print_warning_msg('There is no Not Before value in Certificate')
+ return False
+
+ if certInfoDict.has_key(NOT_AFTER_ATTR):
+ notAfter = certInfoDict['notAfter']
+ else:
+ print_warning_msg('There is no Not After value in Certificate')
+ return False
+
+ notBeforeDate = datetime.datetime.strptime(notBefore, SSL_DATE_FORMAT)
+ notAfterDate = datetime.datetime.strptime(notAfter, SSL_DATE_FORMAT)
+
+ currentDate = datetime.datetime.now()
+
+ if currentDate > notAfterDate:
+ print_warning_msg('Certificate expired on: ' + str(notAfterDate))
+ return False
+
+ if currentDate < notBeforeDate:
+ print_warning_msg('Certificate will be active from: ' + str(notBeforeDate))
+ return False
+
+ return True
+
+def is_valid_cert_host(certInfoDict):
+ if certInfoDict.has_key(COMMON_NAME_ATTR):
+ commonName = certInfoDict[COMMON_NAME_ATTR]
+ else:
+ print_warning_msg('There is no Common Name in Certificate')
+ return False
+
+ fqdn = get_fqdn()
+
+ if not fqdn:
+ print_warning_msg('Failed to get server FQDN')
+ return False
+
+ if commonName != fqdn:
+ print_warning_msg('Common Name in Certificate: ' + commonName + ' does not match the server FQDN: ' + fqdn)
+ return False
+
+ return True
+
+def generate_random_string(length=SSL_KEY_PASSWORD_LENGTH):
+ chars = string.digits + string.ascii_letters
+ return ''.join(random.choice(chars) for x in range(length))
+
+def import_cert_and_key(security_server_keys_dir):
+ import_cert_path = get_validated_filepath_input(\
+ "Enter path to Certificate: ",\
+ "Certificate not found")
+ import_key_path = get_validated_filepath_input(\
+ "Enter path to Private Key: ", "Private Key not found")
+ pem_password = get_validated_string_input("Please enter password for Private Key: ", "", None, None, True)
+
+ certInfoDict = get_cert_info(import_cert_path)
+
+ if not certInfoDict:
+ print_warning_msg('Unable to get Certificate information')
+ else:
+ #Validate common name of certificate
+ if not is_valid_cert_host(certInfoDict):
+ print_warning_msg('Unable to validate Certificate hostname')
+
+ #Validate issue and expirations dates of certificate
+ if not is_valid_cert_exp(certInfoDict):
+ print_warning_msg('Unable to validate Certificate issue and expiration dates')
+
+ #jetty requires private key files with non-empty key passwords
+ retcode = 0
+ err = ''
+ if not pem_password:
+ print 'Generating random password for HTTPS keystore...done.'
+ pem_password = generate_random_string()
+ retcode, out, err = run_os_command(CHANGE_KEY_PWD_CND.format(
+ import_key_path, pem_password))
+ import_key_path += '.secured'
+
+ if retcode == 0:
+ keystoreFilePath = os.path.join(security_server_keys_dir,\
+ SSL_KEYSTORE_FILE_NAME)
+ keystoreFilePathTmp = os.path.join(tempfile.gettempdir(),\
+ SSL_KEYSTORE_FILE_NAME)
+ passFilePath = os.path.join(security_server_keys_dir,\
+ SSL_KEY_PASSWORD_FILE_NAME)
+ passFilePathTmp = os.path.join(tempfile.gettempdir(),\
+ SSL_KEY_PASSWORD_FILE_NAME)
+ passinFilePath = os.path.join(tempfile.gettempdir(),\
+ SSL_PASSIN_FILE)
+ passwordFilePath = os.path.join(tempfile.gettempdir(),\
+ SSL_PASSWORD_FILE)
+
+ with open(passFilePathTmp, 'w+') as passFile:
+ passFile.write(pem_password)
+ passFile.close
+ pass
+
+ set_file_permissions(passFilePath, "660", read_ambari_user(), False)
+
+ copy_file(passFilePathTmp, passinFilePath)
+ copy_file(passFilePathTmp, passwordFilePath)
+
+ retcode, out, err = run_os_command(EXPRT_KSTR_CMD.format(import_cert_path,\
+ import_key_path, passwordFilePath, passinFilePath, keystoreFilePathTmp))
+ if retcode == 0:
+ print 'Importing and saving Certificate...done.'
+ import_file_to_keystore(keystoreFilePathTmp, keystoreFilePath)
+ import_file_to_keystore(passFilePathTmp, passFilePath)
+
+ import_file_to_keystore(import_cert_path, os.path.join(\
+ security_server_keys_dir, SSL_CERT_FILE_NAME))
+ import_file_to_keystore(import_key_path, os.path.join(\
+ security_server_keys_dir, SSL_KEY_FILE_NAME))
+
+ #Validate keystore
+ retcode, out, err = run_os_command(VALIDATE_KEYSTORE_CMD.format(keystoreFilePath,\
+ passwordFilePath, passinFilePath))
+
+ remove_file(passinFilePath)
+ remove_file(passwordFilePath)
+
+ if not retcode == 0:
+ print 'Error during keystore validation occured!:'
+ print err
+ return False
+
+ return True
+ else:
+ print_error_msg('Could not import Certificate and Private Key.')
+ print 'SSL error on exporting keystore: ' + err.rstrip() + \
+ '.\nPlease ensure that provided Private Key password is correct and ' +\
+ 're-import Certificate.'
+
+ return False
+
+def import_cert_and_key_action(security_server_keys_dir, properties):
+ if import_cert_and_key(security_server_keys_dir):
+ properties.process_pair(SSL_SERVER_CERT_NAME, SSL_CERT_FILE_NAME)
+ properties.process_pair(SSL_SERVER_KEY_NAME, SSL_KEY_FILE_NAME)
+ properties.process_pair(SSL_API, "true")
+ return True
+ else:
+ return False
+
+def get_delete_cert_command(jdk_path, alias, truststore_path, truststore_password):
+ cmd = KEYTOOL_DELETE_CERT_CMD.format(jdk_path, alias, truststore_password)
+ if truststore_path:
+ cmd += KEYTOOL_KEYSTORE.format(truststore_path)
+ return cmd
+
+def get_import_cert_command(jdk_path, alias, truststore_type, import_cert_path, truststore_path, truststore_password):
+ cmd = KEYTOOL_IMPORT_CERT_CMD.format(jdk_path, alias, truststore_type, import_cert_path, truststore_password)
+ if truststore_path:
+ cmd += KEYTOOL_KEYSTORE.format(truststore_path)
+ return cmd
+
+def import_file_to_keystore(source, destination):
+ shutil.copy(source, destination)
+ set_file_permissions(destination, "660", read_ambari_user(), False)
+
+def get_truststore_type(properties):
+
+ truststore_type = properties.get_property(SSL_TRUSTSTORE_TYPE_PROPERTY)
+ if not truststore_type:
+ SSL_TRUSTSTORE_TYPE_DEFAULT = get_value_from_properties(properties, SSL_TRUSTSTORE_TYPE_PROPERTY, "jks")
+
+ truststore_type = get_validated_string_input(
+ "TrustStore type [jks/jceks/pkcs12] {0}:".format(get_prompt_default(SSL_TRUSTSTORE_TYPE_DEFAULT)),
+ SSL_TRUSTSTORE_TYPE_DEFAULT,
+ "^(jks|jceks|pkcs12)?$", "Wrong type", False)
+
+ if truststore_type:
+ properties.process_pair(SSL_TRUSTSTORE_TYPE_PROPERTY, truststore_type)
+
+ return truststore_type
+
+def get_truststore_path(properties):
+
+ truststore_path = properties.get_property(SSL_TRUSTSTORE_PATH_PROPERTY)
+ if not truststore_path:
+ SSL_TRUSTSTORE_PATH_DEFAULT = get_value_from_properties(properties, SSL_TRUSTSTORE_PATH_PROPERTY)
+
+ while not truststore_path:
+ truststore_path = get_validated_string_input(
+ "Path to TrustStore file {0}:".format(get_prompt_default(SSL_TRUSTSTORE_PATH_DEFAULT)),
+ SSL_TRUSTSTORE_PATH_DEFAULT,
+ ".*", False, False)
+
+ if truststore_path:
+ properties.process_pair(SSL_TRUSTSTORE_PATH_PROPERTY, truststore_path)
+
+ return truststore_path
+
+def get_truststore_password(properties):
+ truststore_password = properties.get_property(SSL_TRUSTSTORE_PASSWORD_PROPERTY)
+ isSecure = get_is_secure(properties)
+ if truststore_password:
+ if isSecure:
+ truststore_password = decrypt_password_for_alias(SSL_TRUSTSTORE_PASSWORD_ALIAS)
+ else:
+ truststore_password = read_password("", ".*", "Password for TrustStore:", "Invalid characters in password")
+ if truststore_password:
+ encrypted_password = get_encrypted_password(SSL_TRUSTSTORE_PASSWORD_ALIAS, truststore_password, properties)
+ properties.process_pair(SSL_TRUSTSTORE_PASSWORD_PROPERTY, encrypted_password)
+
+ return truststore_password
+
+def read_password(passwordDefault=DEFAULT_PASSWORD,
+ passwordPattern=PASSWORD_PATTERN,
+ passwordPrompt=None,
+ passwordDescr=None):
+ # setup password
+ if passwordPrompt is None:
+ passwordPrompt = 'Password (' + passwordDefault + '): '
+
+ if passwordDescr is None:
+ passwordDescr = "Invalid characters in password. Use only alphanumeric or " \
+ "_ or - characters"
+
+ password = get_validated_string_input(passwordPrompt, passwordDefault,
+ passwordPattern, passwordDescr, True)
+
+ if not password:
+ print 'Password cannot be blank.'
+ return read_password(passwordDefault, passwordPattern, passwordPrompt,
+ passwordDescr)
+
+ if password != passwordDefault:
+ password1 = get_validated_string_input("Re-enter password: ",
+ passwordDefault, passwordPattern, passwordDescr, True)
+ if password != password1:
+ print "Passwords do not match"
+ return read_password(passwordDefault, passwordPattern, passwordPrompt,
+ passwordDescr)
+
+ return password
+
+def get_is_secure(properties):
+ isSecure = properties.get_property(SECURITY_IS_ENCRYPTION_ENABLED)
+ isSecure = True if isSecure and isSecure.lower() == 'true' else False
+ return isSecure
+
+def encrypt_password(alias, password):
+ properties = get_ambari_properties()
+ if properties == -1:
+ raise FatalException(1, None)
+ return get_encrypted_password(alias, password, properties)
+
+def get_encrypted_password(alias, password, properties):
+ isSecure = get_is_secure(properties)
+ (isPersisted, masterKeyFile) = get_is_persisted(properties)
+ if isSecure:
+ masterKey = None
+ if not masterKeyFile:
+ # Encryption enabled but no master key file found
+ masterKey = get_original_master_key(properties)
+
+ retCode = save_passwd_for_alias(alias, password, masterKey)
+ if retCode != 0:
+ print 'Failed to save secure password!'
+ return password
+ else:
+ return get_alias_string(alias)
+
+ return password
+
+def is_alias_string(passwdStr):
+ regex = re.compile("\$\{alias=[\w\.]+\}")
+ # Match implies string at beginning of word
+ r = regex.match(passwdStr)
+ if r is not None:
+ return True
+ else:
+ return False
+
+def get_alias_string(alias):
+ return "${alias=" + alias + "}"
+
+def get_alias_from_alias_string(aliasStr):
+ return aliasStr[8:-1]
+
+def read_passwd_for_alias(alias, masterKey=""):
+ if alias:
+ jdk_path = find_jdk()
+ if jdk_path is None:
+ print_error_msg("No JDK found, please run the \"setup\" "
+ "command to install a JDK automatically or install any "
+ "JDK manually to " + JDK_INSTALL_DIR)
+ return 1
+
+ tempFileName = "ambari.passwd"
+ passwd = ""
+ tempDir = tempfile.gettempdir()
+ #create temporary file for writing
+ tempFilePath = tempDir + os.sep + tempFileName
+ file = open(tempFilePath, 'w+')
+ os.chmod(tempFilePath, stat.S_IREAD | stat.S_IWRITE)
+ file.close()
+
+ if masterKey is None or masterKey == "":
+ masterKey = "None"
+
+ command = SECURITY_PROVIDER_GET_CMD.format(jdk_path,
+ get_conf_dir(), get_ambari_classpath(), alias, tempFilePath, masterKey)
+ (retcode, stdout, stderr) = run_os_command(command)
+ print_info_msg("Return code from credential provider get passwd: " +
+ str(retcode))
+ if retcode != 0:
+ print 'ERROR: Unable to read password from store. alias = ' + alias
+ else:
+ passwd = open(tempFilePath, 'r').read()
+ # Remove temporary file
+ os.remove(tempFilePath)
+ return passwd
+ else:
+ print_error_msg("Alias is unreadable.")
+
+def decrypt_password_for_alias(alias):
+ properties = get_ambari_properties()
+ if properties == -1:
+ raise FatalException(1, None)
+
+ isSecure = get_is_secure(properties)
+ (isPersisted, masterKeyFile) = get_is_persisted(properties)
+ if isSecure:
+ masterKey = None
+ if not masterKeyFile:
+ # Encryption enabled but no master key file found
+ masterKey = get_original_master_key(properties)
+
+ return read_passwd_for_alias(alias, masterKey)
+ else:
+ return alias
+
+def save_passwd_for_alias(alias, passwd, masterKey=""):
+ if alias and passwd:
+ jdk_path = find_jdk()
+ if jdk_path is None:
+ print_error_msg("No JDK found, please run the \"setup\" "
+ "command to install a JDK automatically or install any "
+ "JDK manually to " + JDK_INSTALL_DIR)
+ return 1
+
+ if masterKey is None or masterKey == "":
+ masterKey = "None"
+
+ command = SECURITY_PROVIDER_PUT_CMD.format(jdk_path, get_conf_dir(),
+ get_ambari_classpath(), alias, passwd, masterKey)
+ (retcode, stdout, stderr) = run_os_command(command)
+ print_info_msg("Return code from credential provider save passwd: " +
+ str(retcode))
+ return retcode
+ else:
+ print_error_msg("Alias or password is unreadable.")
+
+def get_is_persisted(properties):
+ keyLocation = get_master_key_location(properties)
+ masterKeyFile = search_file(SECURITY_MASTER_KEY_FILENAME, keyLocation)
+ isPersisted = True if masterKeyFile else False
+
+ return (isPersisted, masterKeyFile)
+
+def get_credential_store_location(properties):
+ store_loc = properties[SECURITY_KEYS_DIR]
+ if store_loc is None or store_loc == "":
+ store_loc = "/var/lib/ambari-server/keys/credentials.jceks"
+ else:
+ store_loc += os.sep + "credentials.jceks"
+ return store_loc
+
+def get_master_key_location(properties):
+ keyLocation = properties[SECURITY_MASTER_KEY_LOCATION]
+ if keyLocation is None or keyLocation == "":
+ keyLocation = properties[SECURITY_KEYS_DIR]
+ return keyLocation
+
+def get_original_master_key(properties):
+ try:
+ masterKey = get_validated_string_input('Enter current Master Key: ',
+ "", ".*", "", True, False)
+ except KeyboardInterrupt:
+ print 'Exiting...'
+ sys.exit(1)
+
+ # Find an alias that exists
+ alias = None
+ property = properties.get_property(JDBC_PASSWORD_PROPERTY)
+ if property and is_alias_string(property):
+ alias = JDBC_RCA_PASSWORD_ALIAS
+
+ alias = None
+ property = properties.get_property(JDBC_METRICS_PASSWORD_PROPERTY)
+ if property and is_alias_string(property):
+ alias = JDBC_METRICS_PASSWORD_ALIAS
+
+ if not alias:
+ property = properties.get_property(LDAP_MGR_PASSWORD_PROPERTY)
+ if property and is_alias_string(property):
+ alias = LDAP_MGR_PASSWORD_ALIAS
+
+ if not alias:
+ property = properties.get_property(SSL_TRUSTSTORE_PASSWORD_PROPERTY)
+ if property and is_alias_string(property):
+ alias = SSL_TRUSTSTORE_PASSWORD_ALIAS
+
+ # Decrypt alias with master to validate it, if no master return
+ if alias and masterKey:
+ password = read_passwd_for_alias(alias, masterKey)
+ if not password:
+ print "ERROR: Master key does not match."
+ return get_original_master_key(properties)
+
+ return masterKey
+
+def read_master_key(isReset=False):
+ passwordPattern = ".*"
+ passwordPrompt = "Please provide master key for locking the credential store: "
+ passwordDescr = "Invalid characters in password. Use only alphanumeric or "\
+ "_ or - characters"
+ passwordDefault = ""
+ if isReset:
+ passwordPrompt = "Enter new Master Key: "
+
+ masterKey = get_validated_string_input(passwordPrompt, passwordDefault,
+ passwordPattern, passwordDescr, True, True)
+
+ if not masterKey:
+ print "Master Key cannot be empty!"
+ return read_master_key()
+
+ masterKey2 = get_validated_string_input("Re-enter master key: ",
+ passwordDefault, passwordPattern, passwordDescr, True, True)
+
+ if masterKey != masterKey2:
+ print "Master key did not match!"
+ return read_master_key()
+
+ return masterKey
+
+def save_master_key(master_key, key_location, persist=True):
+ if master_key:
+ jdk_path = find_jdk()
+ if jdk_path is None:
+ print_error_msg("No JDK found, please run the \"setup\" "
+ "command to install a JDK automatically or install any "
+ "JDK manually to " + JDK_INSTALL_DIR)
+ return 1
+ command = SECURITY_PROVIDER_KEY_CMD.format(jdk_path,
+ get_ambari_classpath(), get_conf_dir(), master_key, key_location, persist)
+ (retcode, stdout, stderr) = run_os_command(command)
+ print_info_msg("Return code from credential provider save KEY: " +
+ str(retcode))
+ else:
+ print_error_msg("Master key cannot be None.")
+
+def store_password_file(password, filename):
+ conf_file = find_properties_file()
+ passFilePath = os.path.join(os.path.dirname(conf_file),
+ filename)
+
+ with open(passFilePath, 'w+') as passFile:
+ passFile.write(password)
+ print_info_msg("Adjusting filesystem permissions")
+ ambari_user = read_ambari_user()
+ set_file_permissions(passFilePath, "660", ambari_user, False)
+
+ #Windows paths need double backslashes, otherwise the Ambari server deserializer will think the single \ are escape markers
+ return passFilePath.replace('\\', '\\\\')
+
+def remove_password_file(filename):
+ conf_file = find_properties_file()
+ passFilePath = os.path.join(os.path.dirname(conf_file),
+ filename)
+
+ if os.path.exists(passFilePath):
+ try:
+ os.remove(passFilePath)
+ except Exception, e:
+ print_warning_msg('Unable to remove password file: ' + str(e))
+ return 1
+ pass
+ return 0
+
+def adjust_directory_permissions(ambari_user):
+ properties = get_ambari_properties()
+ bootstrap_dir = get_value_from_properties(properties, BOOTSTRAP_DIR_PROPERTY)
+ print_info_msg("Cleaning bootstrap directory ({0}) contents...".format(bootstrap_dir))
+ shutil.rmtree(bootstrap_dir, True) #Ignore the non-existent dir error
+ os.makedirs(bootstrap_dir)
+ # Add master key and credential store if exists
+ keyLocation = get_master_key_location(properties)
+ masterKeyFile = search_file(SECURITY_MASTER_KEY_FILENAME, keyLocation)
+ if masterKeyFile:
+ NR_ADJUST_OWNERSHIP_LIST.append((masterKeyFile, MASTER_KEY_FILE_PERMISSIONS, "{0}", "{0}", False))
+ credStoreFile = get_credential_store_location(properties)
+ if os.path.exists(credStoreFile):
+ NR_ADJUST_OWNERSHIP_LIST.append((credStoreFile, CREDENTIALS_STORE_FILE_PERMISSIONS, "{0}", "{0}", False))
+ trust_store_location = properties[SSL_TRUSTSTORE_PATH_PROPERTY]
+ if trust_store_location:
+ NR_ADJUST_OWNERSHIP_LIST.append((trust_store_location, TRUST_STORE_LOCATION_PERMISSIONS, "{0}", "{0}", False))
+ print "Adjusting ambari-server permissions and ownership..."
+ for pack in NR_ADJUST_OWNERSHIP_LIST:
+ file = pack[0]
+ mod = pack[1]
+ user = pack[2].format(ambari_user)
+ recursive = pack[3]
+ set_file_permissions(file, mod, user, recursive)
+
+def get_fqdn():
+ properties = get_ambari_properties()
+ if properties == -1:
+ print "Error reading ambari properties"
+ return None
+
+ get_fqdn_service_url = properties[GET_FQDN_SERVICE_URL]
+ try:
+ handle = urllib2.urlopen(get_fqdn_service_url, '', 2)
+ str = handle.read()
+ handle.close()
+ return str
+ except Exception:
+ return socket.getfqdn()
+
+def configure_ldap_password():
+ passwordDefault = ""
+ passwordPrompt = 'Enter Manager Password* : '
+ passwordPattern = ".*"
+ passwordDescr = "Invalid characters in password."
+
+ password = read_password(passwordDefault, passwordPattern, passwordPrompt,
+ passwordDescr)
+
+ return password
+
+def setup_https(args):
+ if not is_root():
+ err = 'ambari-server setup-https should be run with ' \
+ 'root-level privileges'
+ raise FatalException(4, err)
+ args.exit_message = None
+ if not SILENT:
+ properties = get_ambari_properties()
+ try:
+ security_server_keys_dir = properties.get_property(SSL_KEY_DIR)
+ client_api_ssl_port = DEFAULT_SSL_API_PORT if properties.get_property(SSL_API_PORT) in ("")\
+ else properties.get_property(SSL_API_PORT)
+ api_ssl = properties.get_property(SSL_API) in ['true']
+ cert_was_imported = False
+ cert_must_import = True
+ if api_ssl:
+ if get_YN_input("Do you want to disable HTTPS [y/n] (n)? ", False):
+ properties.process_pair(SSL_API, "false")
+ cert_must_import=False
+ else:
+ properties.process_pair(SSL_API_PORT, \
+ get_validated_string_input(\
+ "SSL port ["+str(client_api_ssl_port)+"] ? ",\
+ str(client_api_ssl_port),\
+ "^[0-9]{1,5}$", "Invalid port.", False, validatorFunction = is_valid_https_port))
+ cert_was_imported = import_cert_and_key_action(security_server_keys_dir, properties)
+ else:
+ if get_YN_input("Do you want to configure HTTPS [y/n] (y)? ", True):
+ properties.process_pair(SSL_API_PORT,\
+ get_validated_string_input("SSL port ["+str(client_api_ssl_port)+"] ? ",\
+ str(client_api_ssl_port), "^[0-9]{1,5}$", "Invalid port.", False, validatorFunction = is_valid_https_port))
+ cert_was_imported = import_cert_and_key_action(security_server_keys_dir, properties)
+ else:
+ return False
+
+ if cert_must_import and not cert_was_imported:
+ print 'Setup of HTTPS failed. Exiting.'
+ return False
+
+ conf_file = find_properties_file()
+ f = open(conf_file, 'w')
+ properties.store(f, "Changed by 'ambari-server setup-https' command")
+
+ ambari_user = read_ambari_user()
+ if ambari_user:
+ adjust_directory_permissions(ambari_user)
+ return True
+ except (KeyError), e:
+ err = 'Property ' + str(e) + ' is not defined'
+ raise FatalException(1, err)
+ else:
+ warning = "setup-https is not enabled in silent mode."
+ raise NonFatalException(warning)
+
+def setup_component_https(component, command, property, alias):
+
+ if not SILENT:
+ jdk_path = find_jdk()
+ if jdk_path is None:
+ err = "No JDK found, please run the \"ambari-server setup\" " \
+ "command to install a JDK automatically or install any " \
+ "JDK manually to " + JDK_INSTALL_DIR
+ raise FatalException(1, err)
+
+ properties = get_ambari_properties()
+
+ use_https = properties.get_property(property) in ['true']
+
+ if use_https:
+ if get_YN_input("Do you want to disable HTTPS for " + component + " [y/n] (n)? ", False):
+
+ truststore_path = get_truststore_path(properties)
+ truststore_password = get_truststore_password(properties)
+
+ run_component_https_cmd(get_delete_cert_command(jdk_path, alias, truststore_path, truststore_password))
+
+ properties.process_pair(property, "false")
+
+ else:
+ return
+ else:
+ if get_YN_input("Do you want to configure HTTPS for " + component + " [y/n] (y)? ", True):
+
+ truststore_type = get_truststore_type(properties)
+ truststore_path = get_truststore_path(properties)
+ truststore_password = get_truststore_password(properties)
+
+ run_os_command(get_delete_cert_command(jdk_path, alias, truststore_path, truststore_password))
+
+ import_cert_path = get_validated_filepath_input(\
+ "Enter path to " + component + " Certificate: ",\
+ "Certificate not found")
+
+ run_component_https_cmd(get_import_cert_command(jdk_path, alias, truststore_type, import_cert_path, truststore_path, truststore_password))
+
+ properties.process_pair(property, "true")
+
+ else:
+ return
+
+ conf_file = find_properties_file()
+ f = open(conf_file, 'w')
+ properties.store(f, "Changed by 'ambari-server " + command + "' command")
+
+ else:
+ print command + " is not enabled in silent mode."
+
+def setup_master_key():
+ if not is_root():
+ err = 'Ambari-server setup should be run with '\
+ 'root-level privileges'
+ raise FatalException(4, err)
+
+ properties = get_ambari_properties()
+ if properties == -1:
+ raise FatalException(1, "Failed to read properties file.")
+
+ db_windows_auth_prop = properties.get_property(JDBC_USE_INTEGRATED_AUTH_PROPERTY)
+ db_sql_auth = False if db_windows_auth_prop and db_windows_auth_prop.lower() == 'true' else True
+ db_password = properties.get_property(JDBC_PASSWORD_PROPERTY)
+ # Encrypt passwords cannot be called before setup
+ if db_sql_auth and not db_password:
+ print 'Please call "setup" before "encrypt-passwords". Exiting...'
+ return 1
+
+ # Check configuration for location of master key
+ isSecure = get_is_secure(properties)
+ (isPersisted, masterKeyFile) = get_is_persisted(properties)
+
+ # Read clear text password from file
+ if db_sql_auth and not is_alias_string(db_password) and os.path.isfile(db_password):
+ with open(db_password, 'r') as passwdfile:
+ db_password = passwdfile.read()
+
+ # Read clear text metrics password from file
+ db_metrics_windows_auth_prop = properties.get_property(JDBC_METRICS_USE_INTEGRATED_AUTH_PROPERTY)
+ db_metrics_sql_auth = False if db_metrics_windows_auth_prop and db_metrics_windows_auth_prop.lower() == 'true' else True
+ metrics_password = properties.get_property(JDBC_METRICS_PASSWORD_PROPERTY)
+ if db_metrics_sql_auth and not is_alias_string(metrics_password) and os.path.isfile(metrics_password):
+ with open(metrics_password, 'r') as passwdfile:
+ metrics_password = passwdfile.read()
+
+ ldap_password = properties.get_property(LDAP_MGR_PASSWORD_PROPERTY)
+ ts_password = properties.get_property(SSL_TRUSTSTORE_PASSWORD_PROPERTY)
+ resetKey = False
+ masterKey = None
+
+ if isSecure:
+ print "Password encryption is enabled."
+ resetKey = get_YN_input("Do you want to reset Master Key? [y/n] (n): ", False)
+
+ # For encrypting of only unencrypted passwords without resetting the key ask
+ # for master key if not persisted.
+ if isSecure and not isPersisted and not resetKey:
+ print "Master Key not persisted."
+ masterKey = get_original_master_key(properties)
+ pass
+
+ # Make sure both passwords are clear-text if master key is lost
+ if resetKey:
+ if not isPersisted:
+ print "Master Key not persisted."
+ masterKey = get_original_master_key(properties)
+ # Unable get the right master key or skipped question <enter>
+ if not masterKey:
+ print "To disable encryption, do the following:"
+ print "- Edit " + find_properties_file() + \
+ " and set " + SECURITY_IS_ENCRYPTION_ENABLED + " = " + "false."
+ err = "{0} is already encrypted. Please call {1} to store unencrypted" \
+ " password and call 'encrypt-passwords' again."
+ if db_sql_auth and db_password and is_alias_string(db_password):
+ print err.format('- Database password', "'" + SETUP_ACTION + "'")
+ if db_metrics_sql_auth and metrics_password and is_alias_string(metrics_password):
+ print err.format('- Metrics Database password', "'" + SETUP_ACTION + "'")
+ if ldap_password and is_alias_string(ldap_password):
+ print err.format('- LDAP manager password', "'" + LDAP_SETUP_ACTION + "'")
+ if ts_password and is_alias_string(ts_password):
+ print err.format('TrustStore password', "'" + LDAP_SETUP_ACTION + "'")
+
+ return 1
+ pass
+ pass
+ pass
+
+ # Read back any encrypted passwords
+ if db_sql_auth and db_password and is_alias_string(db_password):
+ db_password = read_passwd_for_alias(JDBC_RCA_PASSWORD_ALIAS, masterKey)
+ if db_metrics_sql_auth and metrics_password and is_alias_string(metrics_password):
+ metrics_password = read_passwd_for_alias(JDBC_METRICS_PASSWORD_ALIAS, masterKey)
+ if ldap_password and is_alias_string(ldap_password):
+ ldap_password = read_passwd_for_alias(LDAP_MGR_PASSWORD_ALIAS, masterKey)
+ if ts_password and is_alias_string(ts_password):
+ ts_password = read_passwd_for_alias(SSL_TRUSTSTORE_PASSWORD_ALIAS, masterKey)
+ # Read master key, if non-secure or reset is true
+ if resetKey or not isSecure:
+ masterKey = read_master_key(resetKey)
+ persist = get_YN_input("Do you want to persist master key. If you choose "\
+ "not to persist, you need to provide the Master "\
+ "Key while starting the ambari server as an env "\
+ "variable named " + SECURITY_KEY_ENV_VAR_NAME +\
+ " or the start will prompt for the master key."
+ " Persist [y/n] (y)? ", True)
+ if persist:
+ save_master_key(masterKey, get_master_key_location(properties) + os.sep +
+ SECURITY_MASTER_KEY_FILENAME, persist)
+ elif not persist and masterKeyFile:
+ try:
+ os.remove(masterKeyFile)
+ print_info_msg("Deleting master key file at location: " + str(
+ masterKeyFile))
+ except Exception, e:
+ print 'ERROR: Could not remove master key file. %s' % e
+ # Blow up the credential store made with previous key, if any
+ store_file = get_credential_store_location(properties)
+ if os.path.exists(store_file):
+ try:
+ os.remove(store_file)
+ except:
+ print_warning_msg("Failed to remove credential store file.")
+ pass
+ pass
+ pass
+
+ propertyMap = {SECURITY_IS_ENCRYPTION_ENABLED: 'true'}
+ # Encrypt only un-encrypted passwords
+ if db_password and not is_alias_string(db_password):
+ retCode = save_passwd_for_alias(JDBC_RCA_PASSWORD_ALIAS, db_password, masterKey)
+ if retCode != 0:
+ print 'Failed to save secure database password.'
+ else:
+ propertyMap[JDBC_PASSWORD_PROPERTY] = get_alias_string(JDBC_RCA_PASSWORD_ALIAS)
+ remove_password_file(JDBC_PASSWORD_FILENAME)
+ if properties.get_property(JDBC_RCA_PASSWORD_FILE_PROPERTY):
+ propertyMap[JDBC_RCA_PASSWORD_FILE_PROPERTY] = get_alias_string(JDBC_RCA_PASSWORD_ALIAS)
+ pass
+
+ if metrics_password and not is_alias_string(metrics_password):
+ retCode = save_passwd_for_alias(JDBC_METRICS_PASSWORD_ALIAS, metrics_password, masterKey)
+ if retCode != 0:
+ print 'Failed to save secure metrics database password.'
+ else:
+ propertyMap[JDBC_METRICS_PASSWORD_PROPERTY] = get_alias_string(JDBC_METRICS_PASSWORD_ALIAS)
+ remove_password_file(JDBC_METRICS_PASSWORD_FILENAME)
+ pass
+
+ if ldap_password and not is_alias_string(ldap_password):
+ retCode = save_passwd_for_alias(LDAP_MGR_PASSWORD_ALIAS, ldap_password, masterKey)
+ if retCode != 0:
+ print 'Failed to save secure LDAP password.'
+ else:
+ propertyMap[LDAP_MGR_PASSWORD_PROPERTY] = get_alias_string(LDAP_MGR_PASSWORD_ALIAS)
+ pass
+
+ if ts_password and not is_alias_string(ts_password):
+ retCode = save_passwd_for_alias(SSL_TRUSTSTORE_PASSWORD_ALIAS, ts_password, masterKey)
+ if retCode != 0:
+ print 'Failed to save secure TrustStore password.'
+ else:
+ propertyMap[SSL_TRUSTSTORE_PASSWORD_PROPERTY] = get_alias_string(SSL_TRUSTSTORE_PASSWORD_ALIAS)
+ pass
+
+ update_properties_2(properties, propertyMap)
+
+ # Since files for store and master are created we need to ensure correct
+ # permissions
+ ambari_user = read_ambari_user()
+ if ambari_user:
+ adjust_directory_permissions(ambari_user)
+
+ return 0
+
+def setup_ambari_krb5_jaas():
+ jaas_conf_file = search_file(SECURITY_KERBEROS_JASS_FILENAME, get_conf_dir())
+ if os.path.exists(jaas_conf_file):
+ print 'Setting up Ambari kerberos JAAS configuration to access ' +\
+ 'secured Hadoop daemons...'
+ principal = get_validated_string_input('Enter ambari server\'s kerberos '
+ 'principal name (ambari@EXAMPLE.COM): ', 'ambari@EXAMPLE.COM', '.*', '', False,
+ False)
+ keytab = get_validated_string_input('Enter keytab path for ambari '
+ 'server\'s kerberos principal: ',
+ '/etc/security/keytabs/ambari.keytab', '.*', False, False,
+ validatorFunction=is_valid_filepath)
+
+ for line in fileinput.FileInput(jaas_conf_file, inplace=1):
+ line = re.sub('keyTab=.*$', 'keyTab="' + keytab + '"', line)
+ line = re.sub('principal=.*$', 'principal="' + principal + '"', line)
+ print line,
+
+ else:
+ raise NonFatalException('No jaas config file found at location: ' +
+ jaas_conf_file)
+
+def setup_ldap():
+ if not is_root():
+ err = 'Ambari-server setup-ldap should be run with ' \
+ 'root-level privileges'
+ raise FatalException(4, err)
+
+ properties = get_ambari_properties()
+ isSecure = get_is_secure(properties)
+ # python2.x dict is not ordered
+ ldap_property_list_reqd = ["authentication.ldap.primaryUrl",
+ "authentication.ldap.secondaryUrl",
+ "authentication.ldap.useSSL",
+ "authentication.ldap.usernameAttribute",
+ "authentication.ldap.baseDn",
+ "authentication.ldap.bindAnonymously"]
+
+ ldap_property_list_opt = ["authentication.ldap.managerDn",
+ LDAP_MGR_PASSWORD_PROPERTY,
+ SSL_TRUSTSTORE_TYPE_PROPERTY,
+ SSL_TRUSTSTORE_PATH_PROPERTY,
+ SSL_TRUSTSTORE_PASSWORD_PROPERTY]
+
+ ldap_property_list_truststore=[SSL_TRUSTSTORE_TYPE_PROPERTY,
+ SSL_TRUSTSTORE_PATH_PROPERTY,
+ SSL_TRUSTSTORE_PASSWORD_PROPERTY]
+
+ ldap_property_list_passwords=[LDAP_MGR_PASSWORD_PROPERTY,
+ SSL_TRUSTSTORE_PASSWORD_PROPERTY]
+
+ LDAP_PRIMARY_URL_DEFAULT = get_value_from_properties(properties, ldap_property_list_reqd[0])
+ LDAP_SECONDARY_URL_DEFAULT = get_value_from_properties(properties, ldap_property_list_reqd[1])
+ LDAP_USE_SSL_DEFAULT = get_value_from_properties(properties, ldap_property_list_reqd[2], "false")
+ LDAP_USER_ATT_DEFAULT = get_value_from_properties(properties, ldap_property_list_reqd[3], "uid")
+ LDAP_BASE_DN_DEFAULT = get_value_from_properties(properties, ldap_property_list_reqd[4])
+ LDAP_BIND_DEFAULT = get_value_from_properties(properties, ldap_property_list_reqd[5], "false")
+ LDAP_MGR_DN_DEFAULT = get_value_from_properties(properties, ldap_property_list_opt[0])
+ SSL_TRUSTSTORE_TYPE_DEFAULT = get_value_from_properties(properties, SSL_TRUSTSTORE_TYPE_PROPERTY, "jks")
+ SSL_TRUSTSTORE_PATH_DEFAULT = get_value_from_properties(properties, SSL_TRUSTSTORE_PATH_PROPERTY)
+
+
+ ldap_properties_map_reqd =\
+ {
+ ldap_property_list_reqd[0]:(LDAP_PRIMARY_URL_DEFAULT, "Primary URL* {{host:port}} {0}: ".format(get_prompt_default(LDAP_PRIMARY_URL_DEFAULT)), False),\
+ ldap_property_list_reqd[1]:(LDAP_SECONDARY_URL_DEFAULT, "Secondary URL {{host:port}} {0}: ".format(get_prompt_default(LDAP_SECONDARY_URL_DEFAULT)), True),\
+ ldap_property_list_reqd[2]:(LDAP_USE_SSL_DEFAULT, "Use SSL* [true/false] {0}: ".format(get_prompt_default(LDAP_USE_SSL_DEFAULT)), False),\
+ ldap_property_list_reqd[3]:(LDAP_USER_ATT_DEFAULT, "User name attribute* {0}: ".format(get_prompt_default(LDAP_USER_ATT_DEFAULT)), False),\
+ ldap_property_list_reqd[4]:(LDAP_BASE_DN_DEFAULT, "Base DN* {0}: ".format(get_prompt_default(LDAP_BASE_DN_DEFAULT)), False),\
+ ldap_property_list_reqd[5]:(LDAP_BIND_DEFAULT, "Bind anonymously* [true/false] {0}: ".format(get_prompt_default(LDAP_BIND_DEFAULT)), False)\
+ }
+
+ ldap_property_value_map = {}
+ for idx, key in enumerate(ldap_property_list_reqd):
+ if idx in [0, 1]:
+ pattern = REGEX_HOSTNAME_PORT
+ elif idx in [2, 5]:
+ pattern = REGEX_TRUE_FALSE
+ else:
+ pattern = REGEX_ANYTHING
+ input = get_validated_string_input(ldap_properties_map_reqd[key][1],
+ ldap_properties_map_reqd[key][0], pattern,
+ "Invalid characters in the input!", False, ldap_properties_map_reqd[key][2])
+ if input is not None and input != "":
+ ldap_property_value_map[key] = input
+
+ bindAnonymously = ldap_property_value_map["authentication.ldap.bindAnonymously"]
+ anonymous = (bindAnonymously and bindAnonymously.lower() == 'true')
+ mgr_password = None
+ # Ask for manager credentials only if bindAnonymously is false
+ if not anonymous:
+ username = get_validated_string_input("Manager DN* {0}: ".format(
+ get_prompt_default(LDAP_MGR_DN_DEFAULT)), LDAP_MGR_DN_DEFAULT, ".*",
+ "Invalid characters in the input!", False, False)
+ ldap_property_value_map[LDAP_MGR_USERNAME_PROPERTY] = username
+ mgr_password = configure_ldap_password()
+ ldap_property_value_map[LDAP_MGR_PASSWORD_PROPERTY] = mgr_password
+
+ useSSL = ldap_property_value_map["authentication.ldap.useSSL"]
+ ldaps = (useSSL and useSSL.lower() == 'true')
+ ts_password = None
+
+ if ldaps:
+ truststore_default = "n"
+ truststore_set = bool(SSL_TRUSTSTORE_PATH_DEFAULT)
+ if truststore_set:
+ truststore_default = "y"
+ custom_trust_store = get_YN_input("Do you want to provide custom TrustStore for Ambari [y/n] ({0})?".
+ format(truststore_default),
+ truststore_set)
+ if custom_trust_store:
+ ts_type = get_validated_string_input(
+ "TrustStore type [jks/jceks/pkcs12] {0}:".format(get_prompt_default(SSL_TRUSTSTORE_TYPE_DEFAULT)),
+ SSL_TRUSTSTORE_TYPE_DEFAULT,
+ "^(jks|jceks|pkcs12)?$", "Wrong type", False)
+ ts_path = None
+ while True:
+ ts_path = get_validated_string_input(
+ "Path to TrustStore file {0}:".format(get_prompt_default(SSL_TRUSTSTORE_PATH_DEFAULT)),
+ SSL_TRUSTSTORE_PATH_DEFAULT,
+ ".*", False, False)
+ if os.path.exists(ts_path):
+ break
+ else:
+ print 'File not found.'
+
+ ts_password = read_password("", ".*", "Password for TrustStore:", "Invalid characters in password")
+
+ ldap_property_value_map[SSL_TRUSTSTORE_TYPE_PROPERTY] = ts_type
+ ldap_property_value_map[SSL_TRUSTSTORE_PATH_PROPERTY] = ts_path
+ ldap_property_value_map[SSL_TRUSTSTORE_PASSWORD_PROPERTY] = ts_password
+ pass
+ else:
+ properties.removeOldProp(SSL_TRUSTSTORE_TYPE_PROPERTY)
+ properties.removeOldProp(SSL_TRUSTSTORE_PATH_PROPERTY)
+ properties.removeOldProp(SSL_TRUSTSTORE_PASSWORD_PROPERTY)
+ pass
+ pass
+
+ print '=' * 20
+ print 'Review Settings'
+ print '=' * 20
+ for property in ldap_property_list_reqd:
+ if property in ldap_property_value_map:
+ print("%s: %s" % (property, ldap_property_value_map[property]))
+
+ for property in ldap_property_list_opt:
+ if ldap_property_value_map.has_key(property):
+ if property not in ldap_property_list_passwords:
+ print("%s: %s" % (property, ldap_property_value_map[property]))
+ else:
+ print("%s: %s" % (property, BLIND_PASSWORD))
+
+ save_settings = get_YN_input("Save settings [y/n] (y)? ", True)
+
+ if save_settings:
+ ldap_property_value_map[CLIENT_SECURITY_KEY] = 'ldap'
+ if isSecure:
+ if mgr_password:
+ encrypted_passwd = encrypt_password(LDAP_MGR_PASSWORD_ALIAS, mgr_password)
+ if mgr_password != encrypted_passwd:
+ ldap_property_value_map[LDAP_MGR_PASSWORD_PROPERTY] = encrypted_passwd
+ pass
+ if ts_password:
+ encrypted_passwd = encrypt_password(SSL_TRUSTSTORE_PASSWORD_ALIAS, ts_password)
+ if ts_password != encrypted_passwd:
+ ldap_property_value_map[SSL_TRUSTSTORE_PASSWORD_PROPERTY] = encrypted_passwd
+ pass
+ pass
+
+ # Persisting values
+ update_properties_2(properties, ldap_property_value_map)
+ print 'Saving...done'
+
+ return 0
diff --git a/ambari-server/src/main/python/ambari_server/userInput.py b/ambari-server/src/main/python/ambari_server/userInput.py
new file mode 100644
index 0000000000..7a35831d0f
--- /dev/null
+++ b/ambari-server/src/main/python/ambari_server/userInput.py
@@ -0,0 +1,110 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import os
+import getpass
+import re
+
+from ambari_commons.logging_utils import *
+
+
+#
+# Gets the y/n input.
+#
+# return True if 'y' or False if 'n'
+#
+from ambari_commons.os_utils import get_password
+
+
+def get_YN_input(prompt, default):
+ yes = set(['yes', 'ye', 'y'])
+ no = set(['no', 'n'])
+ return get_choice_string_input(prompt, default, yes, no)
+
+
+def get_choice_string_input(prompt, default, firstChoice, secondChoice):
+ if SILENT:
+ print(prompt)
+ return default
+ choice = raw_input(prompt).lower()
+ if choice in firstChoice:
+ return True
+ elif choice in secondChoice:
+ return False
+ elif choice is "": # Just enter pressed
+ return default
+ else:
+ print "input not recognized, please try again: "
+ return get_choice_string_input(prompt, default, firstChoice, secondChoice)
+
+
+def get_validated_string_input(prompt, default, pattern, description,
+ is_pass, allowEmpty=True, validatorFunction=None):
+
+ input = ""
+ while not input:
+ if SILENT:
+ print (prompt)
+ input = default
+ elif is_pass:
+ input = get_password(prompt)
+ else:
+ input = raw_input(prompt)
+ if not input.strip():
+ # Empty input - if default available use default
+ if not allowEmpty and not default:
+ msg = 'Property' if description is None or description is "" else description
+ msg += ' cannot be blank.'
+ print msg
+ input = ""
+ continue
+ else:
+ input = default
+ if validatorFunction:
+ if not validatorFunction(input):
+ input = ""
+ continue
+ break # done here and picking up default
+ else:
+ if not pattern == None and not re.search(pattern, input.strip()):
+ print description
+ input = ""
+
+ if validatorFunction:
+ if not validatorFunction(input):
+ input = ""
+ continue
+ return input
+
+def get_validated_filepath_input(prompt, description, default=None):
+ input = False
+ while not input:
+ if SILENT:
+ print (prompt)
+ return default
+ else:
+ input = raw_input(prompt)
+ if not input == None:
+ input = input.strip()
+ if not input == None and not "" == input and os.path.isfile(input):
+ return input
+ else:
+ print description
+ input = False
diff --git a/ambari-server/src/main/python/ambari_server/utils.py b/ambari-server/src/main/python/ambari_server/utils.py
index 474caa446a..23bd9d6c8e 100644
--- a/ambari-server/src/main/python/ambari_server/utils.py
+++ b/ambari-server/src/main/python/ambari_server/utils.py
@@ -18,7 +18,9 @@ See the License for the specific language governing permissions and
limitations under the License.
'''
import os
+import re
import signal
+import socket
import sys
import time
import glob
@@ -71,6 +73,22 @@ def check_exitcode(exitcode_file_path):
return exitcode
+def save_pid(pid, pidfile):
+ """
+ Save pid to pidfile.
+ """
+ try:
+ pfile = open(pidfile, "w")
+ pfile.write("%s\n" % pid)
+ except IOError:
+ pass
+ finally:
+ try:
+ pfile.close()
+ except:
+ pass
+
+
def save_main_pid_ex(pids, pidfile, exclude_list=[], kill_exclude_list=False):
"""
Save pid which is not included to exclude_list to pidfile.
@@ -222,3 +240,25 @@ def get_postgre_running_status(OS_FAMILY):
return os.path.join(get_ubuntu_pg_version(), "main")
else:
return PG_STATUS_RUNNING_DEFAULT
+
+
+def compare_versions(version1, version2):
+ def normalize(v):
+ return [int(x) for x in re.sub(r'(\.0+)*$', '', v).split(".")]
+ return cmp(normalize(version1), normalize(version2))
+ pass
+
+
+def check_reverse_lookup():
+ """
+ Check if host fqdn resolves to current host ip
+ """
+ try:
+ host_name = socket.gethostname().lower()
+ host_ip = socket.gethostbyname(host_name)
+ host_fqdn = socket.getfqdn().lower()
+ fqdn_ip = socket.gethostbyname(host_fqdn)
+ return host_ip == fqdn_ip
+ except socket.error:
+ pass
+ return False
diff --git a/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql
new file mode 100644
index 0000000000..d1b643529c
--- /dev/null
+++ b/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql
@@ -0,0 +1,694 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+Schema population script for $(AMBARIDBNAME)
+
+Use this script in sqlcmd mode, setting the environment variables like this:
+set AMBARIDBNAME=ambari
+
+sqlcmd -S localhost\SQLEXPRESS -i C:\app\ambari-server-1.3.0-SNAPSHOT\resources\Ambari-DDL-SQLServer-CREATE.sql
+*/
+
+use [$(AMBARIDBNAME)]
+GO
+
+------create the database------
+
+------create tables and grant privileges to db user---------
+CREATE TABLE clusters (cluster_id BIGINT NOT NULL, resource_id BIGINT NOT NULL, cluster_info VARCHAR(255) NOT NULL, cluster_name VARCHAR(100) NOT NULL UNIQUE, provisioning_state VARCHAR(255) NOT NULL DEFAULT 'INIT', desired_cluster_state VARCHAR(255) NOT NULL, desired_stack_version VARCHAR(255) NOT NULL, PRIMARY KEY CLUSTERED (cluster_id));
+CREATE TABLE clusterconfig (config_id BIGINT NOT NULL, version_tag VARCHAR(255) NOT NULL, version BIGINT NOT NULL, type_name VARCHAR(255) NOT NULL, cluster_id BIGINT NOT NULL, config_data VARCHAR(MAX) NOT NULL, config_attributes VARCHAR(MAX), create_timestamp BIGINT NOT NULL, PRIMARY KEY CLUSTERED (config_id));
+CREATE TABLE serviceconfig (service_config_id BIGINT NOT NULL, cluster_id BIGINT NOT NULL, service_name VARCHAR(255) NOT NULL, version BIGINT NOT NULL, create_timestamp BIGINT NOT NULL, user_name VARCHAR(255) NOT NULL DEFAULT '_db', group_id BIGINT, note VARCHAR(MAX), PRIMARY KEY CLUSTERED (service_config_id));
+CREATE TABLE serviceconfighosts (service_config_id BIGINT NOT NULL, hostname VARCHAR(255) NOT NULL, PRIMARY KEY CLUSTERED (service_config_id, hostname));
+CREATE TABLE serviceconfigmapping (service_config_id BIGINT NOT NULL, config_id BIGINT NOT NULL, PRIMARY KEY CLUSTERED (service_config_id, config_id));
+CREATE TABLE clusterconfigmapping (cluster_id BIGINT NOT NULL, type_name VARCHAR(255) NOT NULL, version_tag VARCHAR(255) NOT NULL, create_timestamp BIGINT NOT NULL, selected INTEGER NOT NULL DEFAULT 0, user_name VARCHAR(255) NOT NULL DEFAULT '_db', PRIMARY KEY CLUSTERED (cluster_id, type_name, create_timestamp));
+CREATE TABLE clusterservices (service_name VARCHAR(255) NOT NULL, cluster_id BIGINT NOT NULL, service_enabled INTEGER NOT NULL, PRIMARY KEY CLUSTERED (service_name, cluster_id));
+CREATE TABLE clusterstate (cluster_id BIGINT NOT NULL, current_cluster_state VARCHAR(255) NOT NULL, current_stack_version VARCHAR(255) NOT NULL, PRIMARY KEY CLUSTERED (cluster_id));
+CREATE TABLE hostcomponentdesiredstate (cluster_id BIGINT NOT NULL, component_name VARCHAR(255) NOT NULL, desired_stack_version VARCHAR(255) NOT NULL, desired_state VARCHAR(255) NOT NULL, host_name VARCHAR(255) NOT NULL, service_name VARCHAR(255) NOT NULL, admin_state VARCHAR(32), maintenance_state VARCHAR(32) NOT NULL, restart_required BIT NOT NULL DEFAULT 0, PRIMARY KEY CLUSTERED (cluster_id, component_name, host_name, service_name));
+CREATE TABLE hostcomponentstate (cluster_id BIGINT NOT NULL, component_name VARCHAR(255) NOT NULL, current_stack_version VARCHAR(255) NOT NULL, current_state VARCHAR(255) NOT NULL, host_name VARCHAR(255) NOT NULL, service_name VARCHAR(255) NOT NULL, PRIMARY KEY CLUSTERED (cluster_id, component_name, host_name, service_name));
+CREATE TABLE hosts (host_name VARCHAR(255) NOT NULL, cpu_count INTEGER NOT NULL, ph_cpu_count INTEGER, cpu_info VARCHAR(255) NOT NULL, discovery_status VARCHAR(2000) NOT NULL, host_attributes VARCHAR(MAX) NOT NULL, ipv4 VARCHAR(255), ipv6 VARCHAR(255), public_host_name VARCHAR(255), last_registration_time BIGINT NOT NULL, os_arch VARCHAR(255) NOT NULL, os_info VARCHAR(1000) NOT NULL, os_type VARCHAR(255) NOT NULL, rack_info VARCHAR(255) NOT NULL, total_mem BIGINT NOT NULL, PRIMARY KEY CLUSTERED (host_name));
+CREATE TABLE hoststate (agent_version VARCHAR(255) NOT NULL, available_mem BIGINT NOT NULL, current_state VARCHAR(255) NOT NULL, health_status VARCHAR(255), host_name VARCHAR(255) NOT NULL, time_in_state BIGINT NOT NULL, maintenance_state VARCHAR(512), PRIMARY KEY CLUSTERED (host_name));
+CREATE TABLE servicecomponentdesiredstate (component_name VARCHAR(255) NOT NULL, cluster_id BIGINT NOT NULL, desired_stack_version VARCHAR(255) NOT NULL, desired_state VARCHAR(255) NOT NULL, service_name VARCHAR(255) NOT NULL, PRIMARY KEY CLUSTERED (component_name, cluster_id, service_name));
+CREATE TABLE servicedesiredstate (cluster_id BIGINT NOT NULL, desired_host_role_mapping INTEGER NOT NULL, desired_stack_version VARCHAR(255) NOT NULL, desired_state VARCHAR(255) NOT NULL, service_name VARCHAR(255) NOT NULL, maintenance_state VARCHAR(32) NOT NULL, PRIMARY KEY CLUSTERED (cluster_id, service_name));
+CREATE TABLE users (user_id INTEGER, principal_id BIGINT NOT NULL, ldap_user INTEGER NOT NULL DEFAULT 0, user_name VARCHAR(255) NOT NULL, create_time DATETIME DEFAULT GETDATE(), user_password VARCHAR(255), active INTEGER NOT NULL DEFAULT 1, PRIMARY KEY CLUSTERED (user_id), UNIQUE (ldap_user, user_name));
+CREATE TABLE groups (group_id INTEGER, principal_id BIGINT NOT NULL, group_name VARCHAR(255) NOT NULL, ldap_group INTEGER NOT NULL DEFAULT 0, PRIMARY KEY (group_id));
+CREATE TABLE members (member_id INTEGER, group_id INTEGER NOT NULL, user_id INTEGER NOT NULL, PRIMARY KEY (member_id));
+CREATE TABLE execution_command (command VARBINARY(8000), task_id BIGINT NOT NULL, PRIMARY KEY CLUSTERED (task_id));
+CREATE TABLE host_role_command (task_id BIGINT NOT NULL, attempt_count SMALLINT NOT NULL, event VARCHAR(MAX) NOT NULL, exitcode INTEGER NOT NULL, host_name VARCHAR(255) NOT NULL, last_attempt_time BIGINT NOT NULL, request_id BIGINT NOT NULL, role VARCHAR(255), stage_id BIGINT NOT NULL, start_time BIGINT NOT NULL, end_time BIGINT, status VARCHAR(255), std_error VARBINARY(max), std_out VARBINARY(max), output_log VARCHAR(255) NULL, error_log VARCHAR(255) NULL, structured_out VARBINARY(max), role_command VARCHAR(255), command_detail VARCHAR(255), custom_command_name VARCHAR(255), PRIMARY KEY CLUSTERED (task_id));
+CREATE TABLE role_success_criteria (role VARCHAR(255) NOT NULL, request_id BIGINT NOT NULL, stage_id BIGINT NOT NULL, success_factor FLOAT NOT NULL, PRIMARY KEY CLUSTERED (role, request_id, stage_id));
+CREATE TABLE stage (stage_id BIGINT NOT NULL, request_id BIGINT NOT NULL, cluster_id BIGINT NOT NULL, log_info VARCHAR(255) NOT NULL, request_context VARCHAR(255), cluster_host_info VARBINARY(8000) NOT NULL, command_params VARBINARY(8000), host_params VARBINARY(8000), PRIMARY KEY CLUSTERED (stage_id, request_id));
+CREATE TABLE request (request_id BIGINT NOT NULL, cluster_id BIGINT, command_name VARCHAR(255), create_time BIGINT NOT NULL, end_time BIGINT NOT NULL, exclusive_execution BIT NOT NULL DEFAULT 0, inputs VARBINARY(8000), request_context VARCHAR(255), request_type VARCHAR(255), request_schedule_id BIGINT, start_time BIGINT NOT NULL, status VARCHAR(255), PRIMARY KEY CLUSTERED (request_id));
+CREATE TABLE requestresourcefilter (filter_id BIGINT NOT NULL, request_id BIGINT NOT NULL, service_name VARCHAR(255), component_name VARCHAR(255), hosts VARBINARY(8000), PRIMARY KEY CLUSTERED (filter_id));
+CREATE TABLE requestoperationlevel (operation_level_id BIGINT NOT NULL, request_id BIGINT NOT NULL, level_name VARCHAR(255), cluster_name VARCHAR(255), service_name VARCHAR(255), host_component_name VARCHAR(255), host_name VARCHAR(255), PRIMARY KEY CLUSTERED (operation_level_id));
+CREATE TABLE ClusterHostMapping (cluster_id BIGINT NOT NULL, host_name VARCHAR(255) NOT NULL, PRIMARY KEY CLUSTERED (cluster_id, host_name));
+CREATE TABLE key_value_store ([key] VARCHAR(255), [value] VARCHAR(MAX), PRIMARY KEY CLUSTERED ([key]));
+CREATE TABLE hostconfigmapping (cluster_id BIGINT NOT NULL, host_name VARCHAR(255) NOT NULL, type_name VARCHAR(255) NOT NULL, version_tag VARCHAR(255) NOT NULL, service_name VARCHAR(255), create_timestamp BIGINT NOT NULL, selected INTEGER NOT NULL DEFAULT 0, user_name VARCHAR(255) NOT NULL DEFAULT '_db', PRIMARY KEY CLUSTERED (cluster_id, host_name, type_name, create_timestamp));
+CREATE TABLE metainfo ([metainfo_key] VARCHAR(255), [metainfo_value] VARCHAR(255), PRIMARY KEY CLUSTERED ([metainfo_key]));
+CREATE TABLE ambari_sequences (sequence_name VARCHAR(255) PRIMARY KEY, [sequence_value] BIGINT NOT NULL);
+CREATE TABLE configgroup (group_id BIGINT, cluster_id BIGINT NOT NULL, group_name VARCHAR(255) NOT NULL, tag VARCHAR(1024) NOT NULL, description VARCHAR(1024), create_timestamp BIGINT NOT NULL, service_name VARCHAR(255), PRIMARY KEY(group_id));
+CREATE TABLE confgroupclusterconfigmapping (config_group_id BIGINT NOT NULL, cluster_id BIGINT NOT NULL, config_type VARCHAR(255) NOT NULL, version_tag VARCHAR(255) NOT NULL, user_name VARCHAR(255) DEFAULT '_db', create_timestamp BIGINT NOT NULL, PRIMARY KEY(config_group_id, cluster_id, config_type));
+CREATE TABLE configgrouphostmapping (config_group_id BIGINT NOT NULL, host_name VARCHAR(255) NOT NULL, PRIMARY KEY(config_group_id, host_name));
+CREATE TABLE requestschedule (schedule_id bigint, cluster_id bigint NOT NULL, description varchar(255), status varchar(255), batch_separation_seconds smallint, batch_toleration_limit smallint, create_user varchar(255), create_timestamp bigint, update_user varchar(255), update_timestamp bigint, minutes varchar(10), hours varchar(10), days_of_month varchar(10), month varchar(10), day_of_week varchar(10), yearToSchedule varchar(10), startTime varchar(50), endTime varchar(50), last_execution_status varchar(255), PRIMARY KEY(schedule_id));
+CREATE TABLE requestschedulebatchrequest (schedule_id bigint, batch_id bigint, request_id bigint, request_type varchar(255), request_uri varchar(1024), request_body VARBINARY(8000), request_status varchar(255), return_code smallint, return_message text, PRIMARY KEY(schedule_id, batch_id));
+CREATE TABLE blueprint (blueprint_name VARCHAR(255) NOT NULL, stack_name VARCHAR(255) NOT NULL, stack_version VARCHAR(255) NOT NULL, PRIMARY KEY(blueprint_name));
+CREATE TABLE hostgroup (blueprint_name VARCHAR(255) NOT NULL, name VARCHAR(255) NOT NULL, cardinality VARCHAR(255) NOT NULL, PRIMARY KEY(blueprint_name, name));
+CREATE TABLE hostgroup_component (blueprint_name VARCHAR(255) NOT NULL, hostgroup_name VARCHAR(255) NOT NULL, name VARCHAR(255) NOT NULL, PRIMARY KEY(blueprint_name, hostgroup_name, name));
+CREATE TABLE blueprint_configuration (blueprint_name varchar(255) NOT NULL, type_name varchar(255) NOT NULL, config_data text NOT NULL, config_attributes VARCHAR(8000), PRIMARY KEY(blueprint_name, type_name));
+CREATE TABLE hostgroup_configuration (blueprint_name VARCHAR(255) NOT NULL, hostgroup_name VARCHAR(255) NOT NULL, type_name VARCHAR(255) NOT NULL, config_data TEXT NOT NULL, config_attributes TEXT, PRIMARY KEY(blueprint_name, hostgroup_name, type_name));
+CREATE TABLE viewmain (view_name VARCHAR(255) NOT NULL, label VARCHAR(255), description VARCHAR(2048), version VARCHAR(255), resource_type_id INTEGER NOT NULL, icon VARCHAR(255), icon64 VARCHAR(255), archive VARCHAR(255), mask VARCHAR(255), system_view BIT NOT NULL DEFAULT 0, PRIMARY KEY(view_name));
+CREATE TABLE viewinstancedata (view_instance_id BIGINT, view_name VARCHAR(255) NOT NULL, view_instance_name VARCHAR(255) NOT NULL, name VARCHAR(255) NOT NULL, user_name VARCHAR(255) NOT NULL, value VARCHAR(2000) NOT NULL, PRIMARY KEY(view_instance_id, name, user_name));
+CREATE TABLE viewinstance (view_instance_id BIGINT, resource_id BIGINT NOT NULL, view_name VARCHAR(255) NOT NULL, name VARCHAR(255) NOT NULL, label VARCHAR(255), description VARCHAR(2048), visible CHAR(1), icon VARCHAR(255), icon64 VARCHAR(255), xml_driven CHAR(1), PRIMARY KEY(view_instance_id));
+CREATE TABLE viewinstanceproperty (view_name VARCHAR(255) NOT NULL, view_instance_name VARCHAR(255) NOT NULL, name VARCHAR(255) NOT NULL, value VARCHAR(2000) NOT NULL, PRIMARY KEY(view_name, view_instance_name, name));
+CREATE TABLE viewparameter (view_name VARCHAR(255) NOT NULL, name VARCHAR(255) NOT NULL, description VARCHAR(2048), required CHAR(1), masked CHAR(1), PRIMARY KEY(view_name, name));
+CREATE TABLE viewresource (view_name VARCHAR(255) NOT NULL, name VARCHAR(255) NOT NULL, plural_name VARCHAR(255), id_property VARCHAR(255), subResource_names VARCHAR(255), provider VARCHAR(255), service VARCHAR(255), resource VARCHAR(255), PRIMARY KEY(view_name, name));
+CREATE TABLE viewentity (id BIGINT NOT NULL, view_name VARCHAR(255) NOT NULL, view_instance_name VARCHAR(255) NOT NULL, class_name VARCHAR(255) NOT NULL, id_property VARCHAR(255), PRIMARY KEY(id));
+CREATE TABLE adminresourcetype (resource_type_id INTEGER NOT NULL, resource_type_name VARCHAR(255) NOT NULL, PRIMARY KEY(resource_type_id));
+CREATE TABLE adminresource (resource_id BIGINT NOT NULL, resource_type_id INTEGER NOT NULL, PRIMARY KEY(resource_id));
+CREATE TABLE adminprincipaltype (principal_type_id INTEGER NOT NULL, principal_type_name VARCHAR(255) NOT NULL, PRIMARY KEY(principal_type_id));
+CREATE TABLE adminprincipal (principal_id BIGINT NOT NULL, principal_type_id INTEGER NOT NULL, PRIMARY KEY(principal_id));
+CREATE TABLE adminpermission (permission_id BIGINT NOT NULL, permission_name VARCHAR(255) NOT NULL, resource_type_id INTEGER NOT NULL, PRIMARY KEY(permission_id));
+CREATE TABLE adminprivilege (privilege_id BIGINT, permission_id BIGINT NOT NULL, resource_id BIGINT NOT NULL, principal_id BIGINT NOT NULL, PRIMARY KEY(privilege_id));
+
+-- altering tables by creating unique constraints----------
+--------altering tables to add constraints----------
+ALTER TABLE users ADD CONSTRAINT UNQ_users_0 UNIQUE (user_name, ldap_user);
+ALTER TABLE groups ADD CONSTRAINT UNQ_groups_0 UNIQUE (group_name, ldap_group);
+ALTER TABLE members ADD CONSTRAINT UNQ_members_0 UNIQUE (group_id, user_id);
+ALTER TABLE clusterconfig ADD CONSTRAINT UQ_config_type_tag UNIQUE (cluster_id, type_name, version_tag);
+ALTER TABLE clusterconfig ADD CONSTRAINT UQ_config_type_version UNIQUE (cluster_id, type_name, version);
+ALTER TABLE viewinstance ADD CONSTRAINT UQ_viewinstance_name UNIQUE (view_name, name);
+ALTER TABLE viewinstance ADD CONSTRAINT UQ_viewinstance_name_id UNIQUE (view_instance_id, view_name, name);
+ALTER TABLE serviceconfig ADD CONSTRAINT UQ_scv_service_version UNIQUE (cluster_id, service_name, version);
+ALTER TABLE adminpermission ADD CONSTRAINT UQ_perm_name_resource_type_id UNIQUE (permission_name, resource_type_id);
+
+-- altering tables by creating foreign keys----------
+ALTER TABLE members ADD CONSTRAINT FK_members_group_id FOREIGN KEY (group_id) REFERENCES groups (group_id);
+ALTER TABLE members ADD CONSTRAINT FK_members_user_id FOREIGN KEY (user_id) REFERENCES users (user_id);
+ALTER TABLE clusterconfig ADD CONSTRAINT FK_clusterconfig_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
+ALTER TABLE clusterservices ADD CONSTRAINT FK_clusterservices_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
+ALTER TABLE clusterconfigmapping ADD CONSTRAINT clusterconfigmappingcluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
+ALTER TABLE clusterstate ADD CONSTRAINT FK_clusterstate_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
+ALTER TABLE hostcomponentdesiredstate ADD CONSTRAINT hstcmponentdesiredstatehstname FOREIGN KEY (host_name) REFERENCES hosts (host_name);
+ALTER TABLE hostcomponentdesiredstate ADD CONSTRAINT hstcmpnntdesiredstatecmpnntnme FOREIGN KEY (component_name, cluster_id, service_name) REFERENCES servicecomponentdesiredstate (component_name, cluster_id, service_name);
+ALTER TABLE hostcomponentstate ADD CONSTRAINT hstcomponentstatecomponentname FOREIGN KEY (component_name, cluster_id, service_name) REFERENCES servicecomponentdesiredstate (component_name, cluster_id, service_name);
+ALTER TABLE hostcomponentstate ADD CONSTRAINT hostcomponentstate_host_name FOREIGN KEY (host_name) REFERENCES hosts (host_name);
+ALTER TABLE hoststate ADD CONSTRAINT FK_hoststate_host_name FOREIGN KEY (host_name) REFERENCES hosts (host_name);
+ALTER TABLE servicecomponentdesiredstate ADD CONSTRAINT srvccmponentdesiredstatesrvcnm FOREIGN KEY (service_name, cluster_id) REFERENCES clusterservices (service_name, cluster_id);
+ALTER TABLE servicedesiredstate ADD CONSTRAINT servicedesiredstateservicename FOREIGN KEY (service_name, cluster_id) REFERENCES clusterservices (service_name, cluster_id);
+ALTER TABLE execution_command ADD CONSTRAINT FK_execution_command_task_id FOREIGN KEY (task_id) REFERENCES host_role_command (task_id);
+ALTER TABLE host_role_command ADD CONSTRAINT FK_host_role_command_stage_id FOREIGN KEY (stage_id, request_id) REFERENCES stage (stage_id, request_id);
+ALTER TABLE host_role_command ADD CONSTRAINT FK_host_role_command_host_name FOREIGN KEY (host_name) REFERENCES hosts (host_name);
+ALTER TABLE role_success_criteria ADD CONSTRAINT role_success_criteria_stage_id FOREIGN KEY (stage_id, request_id) REFERENCES stage (stage_id, request_id);
+ALTER TABLE stage ADD CONSTRAINT FK_stage_request_id FOREIGN KEY (request_id) REFERENCES request (request_id);
+ALTER TABLE request ADD CONSTRAINT FK_request_schedule_id FOREIGN KEY (request_schedule_id) REFERENCES requestschedule (schedule_id);
+ALTER TABLE ClusterHostMapping ADD CONSTRAINT ClusterHostMapping_cluster_id FOREIGN KEY (host_name) REFERENCES hosts (host_name);
+ALTER TABLE ClusterHostMapping ADD CONSTRAINT ClusterHostMapping_host_name FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
+ALTER TABLE hostconfigmapping ADD CONSTRAINT FK_hostconfmapping_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
+ALTER TABLE hostconfigmapping ADD CONSTRAINT FK_hostconfmapping_host_name FOREIGN KEY (host_name) REFERENCES hosts (host_name);
+ALTER TABLE serviceconfigmapping ADD CONSTRAINT FK_scvm_scv FOREIGN KEY (service_config_id) REFERENCES serviceconfig(service_config_id);
+ALTER TABLE serviceconfigmapping ADD CONSTRAINT FK_scvm_config FOREIGN KEY (config_id) REFERENCES clusterconfig(config_id);
+ALTER TABLE serviceconfighosts ADD CONSTRAINT FK_scvhosts_scv FOREIGN KEY (service_config_id) REFERENCES serviceconfig(service_config_id);
+ALTER TABLE configgroup ADD CONSTRAINT FK_configgroup_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
+ALTER TABLE confgroupclusterconfigmapping ADD CONSTRAINT FK_confg FOREIGN KEY (cluster_id, config_type, version_tag) REFERENCES clusterconfig (cluster_id, type_name, version_tag);
+ALTER TABLE confgroupclusterconfigmapping ADD CONSTRAINT FK_cgccm_gid FOREIGN KEY (config_group_id) REFERENCES configgroup (group_id);
+ALTER TABLE configgrouphostmapping ADD CONSTRAINT FK_cghm_cgid FOREIGN KEY (config_group_id) REFERENCES configgroup (group_id);
+ALTER TABLE configgrouphostmapping ADD CONSTRAINT FK_cghm_hname FOREIGN KEY (host_name) REFERENCES hosts (host_name);
+ALTER TABLE requestschedulebatchrequest ADD CONSTRAINT FK_rsbatchrequest_schedule_id FOREIGN KEY (schedule_id) REFERENCES requestschedule (schedule_id);
+ALTER TABLE hostgroup ADD CONSTRAINT FK_hg_blueprint_name FOREIGN KEY (blueprint_name) REFERENCES blueprint(blueprint_name);
+ALTER TABLE hostgroup_component ADD CONSTRAINT FK_hgc_blueprint_name FOREIGN KEY (blueprint_name, hostgroup_name) REFERENCES hostgroup (blueprint_name, name);
+ALTER TABLE blueprint_configuration ADD CONSTRAINT FK_cfg_blueprint_name FOREIGN KEY (blueprint_name) REFERENCES blueprint(blueprint_name);
+ALTER TABLE hostgroup_configuration ADD CONSTRAINT FK_hg_cfg_bp_hg_name FOREIGN KEY (blueprint_name, hostgroup_name) REFERENCES hostgroup (blueprint_name, name);
+ALTER TABLE requestresourcefilter ADD CONSTRAINT FK_reqresfilter_req_id FOREIGN KEY (request_id) REFERENCES request (request_id);
+ALTER TABLE requestoperationlevel ADD CONSTRAINT FK_req_op_level_req_id FOREIGN KEY (request_id) REFERENCES request (request_id);
+ALTER TABLE viewparameter ADD CONSTRAINT FK_viewparam_view_name FOREIGN KEY (view_name) REFERENCES viewmain(view_name);
+ALTER TABLE viewresource ADD CONSTRAINT FK_viewres_view_name FOREIGN KEY (view_name) REFERENCES viewmain(view_name);
+ALTER TABLE viewinstance ADD CONSTRAINT FK_viewinst_view_name FOREIGN KEY (view_name) REFERENCES viewmain(view_name);
+ALTER TABLE viewinstanceproperty ADD CONSTRAINT FK_viewinstprop_view_name FOREIGN KEY (view_name, view_instance_name) REFERENCES viewinstance(view_name, name);
+ALTER TABLE viewinstancedata ADD CONSTRAINT FK_viewinstdata_view_name FOREIGN KEY (view_instance_id, view_name, view_instance_name) REFERENCES viewinstance(view_instance_id, view_name, name);
+ALTER TABLE viewentity ADD CONSTRAINT FK_viewentity_view_name FOREIGN KEY (view_name, view_instance_name) REFERENCES viewinstance(view_name, name);
+ALTER TABLE adminresource ADD CONSTRAINT FK_resource_resource_type_id FOREIGN KEY (resource_type_id) REFERENCES adminresourcetype(resource_type_id);
+ALTER TABLE adminprincipal ADD CONSTRAINT FK_principal_principal_type_id FOREIGN KEY (principal_type_id) REFERENCES adminprincipaltype(principal_type_id);
+ALTER TABLE adminpermission ADD CONSTRAINT FK_permission_resource_type_id FOREIGN KEY (resource_type_id) REFERENCES adminresourcetype(resource_type_id);
+ALTER TABLE adminprivilege ADD CONSTRAINT FK_privilege_permission_id FOREIGN KEY (permission_id) REFERENCES adminpermission(permission_id);
+ALTER TABLE adminprivilege ADD CONSTRAINT FK_privilege_resource_id FOREIGN KEY (resource_id) REFERENCES adminresource(resource_id);
+ALTER TABLE viewmain ADD CONSTRAINT FK_view_resource_type_id FOREIGN KEY (resource_type_id) REFERENCES adminresourcetype(resource_type_id);
+ALTER TABLE viewinstance ADD CONSTRAINT FK_viewinstance_resource_id FOREIGN KEY (resource_id) REFERENCES adminresource(resource_id);
+ALTER TABLE adminprivilege ADD CONSTRAINT FK_privilege_principal_id FOREIGN KEY (principal_id) REFERENCES adminprincipal(principal_id);
+ALTER TABLE users ADD CONSTRAINT FK_users_principal_id FOREIGN KEY (principal_id) REFERENCES adminprincipal(principal_id);
+ALTER TABLE groups ADD CONSTRAINT FK_groups_principal_id FOREIGN KEY (principal_id) REFERENCES adminprincipal(principal_id);
+ALTER TABLE clusters ADD CONSTRAINT FK_clusters_resource_id FOREIGN KEY (resource_id) REFERENCES adminresource(resource_id);
+
+-- Alerting Framework
+CREATE TABLE alert_definition (
+ definition_id BIGINT NOT NULL,
+ cluster_id BIGINT NOT NULL,
+ definition_name VARCHAR(255) NOT NULL,
+ service_name VARCHAR(255) NOT NULL,
+ component_name VARCHAR(255),
+ scope VARCHAR(255) DEFAULT 'ANY' NOT NULL,
+ label VARCHAR(255),
+ enabled SMALLINT DEFAULT 1 NOT NULL,
+ schedule_interval INTEGER NOT NULL,
+ source_type VARCHAR(255) NOT NULL,
+ alert_source TEXT NOT NULL,
+ hash VARCHAR(64) NOT NULL,
+ PRIMARY KEY (definition_id),
+ FOREIGN KEY (cluster_id) REFERENCES clusters(cluster_id),
+ CONSTRAINT uni_alert_def_name UNIQUE(cluster_id,definition_name)
+);
+
+CREATE TABLE alert_history (
+ alert_id BIGINT NOT NULL,
+ cluster_id BIGINT NOT NULL,
+ alert_definition_id BIGINT NOT NULL,
+ service_name VARCHAR(255) NOT NULL,
+ component_name VARCHAR(255),
+ host_name VARCHAR(255),
+ alert_instance VARCHAR(255),
+ alert_timestamp BIGINT NOT NULL,
+ alert_label VARCHAR(1024),
+ alert_state VARCHAR(255) NOT NULL,
+ alert_text TEXT,
+ PRIMARY KEY (alert_id),
+ FOREIGN KEY (alert_definition_id) REFERENCES alert_definition(definition_id),
+ FOREIGN KEY (cluster_id) REFERENCES clusters(cluster_id)
+);
+
+CREATE TABLE alert_current (
+ alert_id BIGINT NOT NULL,
+ definition_id BIGINT NOT NULL,
+ history_id BIGINT NOT NULL UNIQUE,
+ maintenance_state VARCHAR(255),
+ original_timestamp BIGINT NOT NULL,
+ latest_timestamp BIGINT NOT NULL,
+ latest_text TEXT,
+ PRIMARY KEY (alert_id),
+ FOREIGN KEY (definition_id) REFERENCES alert_definition(definition_id),
+ FOREIGN KEY (history_id) REFERENCES alert_history(alert_id)
+);
+
+CREATE TABLE alert_group (
+ group_id BIGINT NOT NULL,
+ cluster_id BIGINT NOT NULL,
+ group_name VARCHAR(255) NOT NULL,
+ is_default SMALLINT NOT NULL DEFAULT 0,
+ service_name VARCHAR(255),
+ PRIMARY KEY (group_id),
+ CONSTRAINT uni_alert_group_name UNIQUE(cluster_id,group_name)
+);
+
+CREATE TABLE alert_target (
+ target_id BIGINT NOT NULL,
+ target_name VARCHAR(255) NOT NULL UNIQUE,
+ notification_type VARCHAR(64) NOT NULL,
+ properties TEXT,
+ description VARCHAR(1024),
+ PRIMARY KEY (target_id)
+);
+
+CREATE TABLE alert_group_target (
+ group_id BIGINT NOT NULL,
+ target_id BIGINT NOT NULL,
+ PRIMARY KEY (group_id, target_id),
+ FOREIGN KEY (group_id) REFERENCES alert_group(group_id),
+ FOREIGN KEY (target_id) REFERENCES alert_target(target_id)
+);
+
+CREATE TABLE alert_grouping (
+ definition_id BIGINT NOT NULL,
+ group_id BIGINT NOT NULL,
+ PRIMARY KEY (group_id, definition_id),
+ FOREIGN KEY (definition_id) REFERENCES alert_definition(definition_id),
+ FOREIGN KEY (group_id) REFERENCES alert_group(group_id)
+);
+
+CREATE TABLE alert_notice (
+ notification_id BIGINT NOT NULL,
+ target_id BIGINT NOT NULL,
+ history_id BIGINT NOT NULL,
+ notify_state VARCHAR(255) NOT NULL,
+ uuid VARCHAR(64) NOT NULL UNIQUE,
+ PRIMARY KEY (notification_id),
+ FOREIGN KEY (target_id) REFERENCES alert_target(target_id),
+ FOREIGN KEY (history_id) REFERENCES alert_history(alert_id)
+);
+
+CREATE INDEX idx_alert_history_def_id on alert_history(alert_definition_id);
+CREATE INDEX idx_alert_history_service on alert_history(service_name);
+CREATE INDEX idx_alert_history_host on alert_history(host_name);
+CREATE INDEX idx_alert_history_time on alert_history(alert_timestamp);
+CREATE INDEX idx_alert_history_state on alert_history(alert_state);
+CREATE INDEX idx_alert_group_name on alert_group(group_name);
+CREATE INDEX idx_alert_notice_state on alert_notice(notify_state);
+
+---------inserting some data-----------
+BEGIN TRANSACTION
+ INSERT INTO ambari_sequences (sequence_name, [sequence_value])
+ SELECT 'cluster_id_seq', 1
+ UNION ALL
+ SELECT 'host_role_command_id_seq', 1
+ UNION ALL
+ SELECT 'user_id_seq', 2
+ UNION ALL
+ SELECT 'group_id_seq', 1
+ UNION ALL
+ SELECT 'member_id_seq', 1
+ UNION ALL
+ SELECT 'configgroup_id_seq', 1
+ UNION ALL
+ SELECT 'requestschedule_id_seq', 1
+ UNION ALL
+ SELECT 'resourcefilter_id_seq', 1
+ UNION ALL
+ SELECT 'viewentity_id_seq', 0
+ UNION ALL
+ SELECT 'operation_level_id_seq', 1
+ UNION ALL
+ SELECT 'view_instance_id_seq', 1
+ UNION ALL
+ SELECT 'resource_type_id_seq', 4
+ UNION ALL
+ SELECT 'resource_id_seq', 2
+ UNION ALL
+ SELECT 'principal_type_id_seq', 3
+ UNION ALL
+ SELECT 'principal_id_seq', 2
+ UNION ALL
+ SELECT 'permission_id_seq', 5
+ UNION ALL
+ SELECT 'privilege_id_seq', 1
+ UNION ALL
+ SELECT 'config_id_seq', 1
+ UNION ALL
+ SELECT 'service_config_id_seq', 1
+ UNION ALL
+ SELECT 'alert_definition_id_seq', 0
+ UNION ALL
+ SELECT 'alert_group_id_seq', 0
+ UNION ALL
+ SELECT 'alert_target_id_seq', 0
+ UNION ALL
+ SELECT 'alert_history_id_seq', 0
+ UNION ALL
+ SELECT 'alert_notice_id_seq', 0
+ UNION ALL
+ SELECT 'alert_current_id_seq', 0;
+
+ insert into adminresourcetype (resource_type_id, resource_type_name)
+ select 1, 'AMBARI'
+ union all
+ select 2, 'CLUSTER'
+ union all
+ select 3, 'VIEW';
+
+ insert into adminresource (resource_id, resource_type_id)
+ select 1, 1;
+
+ insert into adminprincipaltype (principal_type_id, principal_type_name)
+ select 1, 'USER'
+ union all
+ select 2, 'GROUP';
+
+ insert into adminprincipal (principal_id, principal_type_id)
+ select 1, 1;
+
+ insert into users(user_id, principal_id, user_name, user_password)
+ select 1, 1, 'admin','538916f8943ec225d97a9a86a2c6ec0818c1cd400e09e03b660fdaaec4af29ddbb6f2b1033b81b00';
+
+ insert into adminpermission(permission_id, permission_name, resource_type_id)
+ select 1, 'AMBARI.ADMIN', 1
+ union all
+ select 2, 'CLUSTER.READ', 2
+ union all
+ select 3, 'CLUSTER.OPERATE', 2
+ union all
+ select 4, 'VIEW.USE', 3;
+
+ insert into adminprivilege (privilege_id, permission_id, resource_id, principal_id)
+ select 1, 1, 1, 1;
+
+ insert into metainfo(metainfo_key, metainfo_value)
+ select 'version','${ambariVersion}';
+COMMIT TRANSACTION
+
+-- Quartz tables
+
+CREATE TABLE qrtz_job_details
+ (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ JOB_NAME VARCHAR(200) NOT NULL,
+ JOB_GROUP VARCHAR(200) NOT NULL,
+ DESCRIPTION VARCHAR(250) NULL,
+ JOB_CLASS_NAME VARCHAR(250) NOT NULL,
+ IS_DURABLE BIT NOT NULL,
+ IS_NONCONCURRENT BIT NOT NULL,
+ IS_UPDATE_DATA BIT NOT NULL,
+ REQUESTS_RECOVERY BIT NOT NULL,
+ JOB_DATA VARBINARY(MAX) NULL,
+ PRIMARY KEY CLUSTERED (SCHED_NAME,JOB_NAME,JOB_GROUP)
+);
+
+CREATE TABLE qrtz_triggers
+ (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ TRIGGER_NAME VARCHAR(200) NOT NULL,
+ TRIGGER_GROUP VARCHAR(200) NOT NULL,
+ JOB_NAME VARCHAR(200) NOT NULL,
+ JOB_GROUP VARCHAR(200) NOT NULL,
+ DESCRIPTION VARCHAR(250) NULL,
+ NEXT_FIRE_TIME BIGINT NULL,
+ PREV_FIRE_TIME BIGINT NULL,
+ PRIORITY INTEGER NULL,
+ TRIGGER_STATE VARCHAR(16) NOT NULL,
+ TRIGGER_TYPE VARCHAR(8) NOT NULL,
+ START_TIME BIGINT NOT NULL,
+ END_TIME BIGINT NULL,
+ CALENDAR_NAME VARCHAR(200) NULL,
+ MISFIRE_INSTR SMALLINT NULL,
+ JOB_DATA VARBINARY(MAX) NULL,
+ PRIMARY KEY CLUSTERED (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP),
+ FOREIGN KEY (SCHED_NAME,JOB_NAME,JOB_GROUP)
+ REFERENCES QRTZ_JOB_DETAILS(SCHED_NAME,JOB_NAME,JOB_GROUP)
+);
+
+CREATE TABLE qrtz_simple_triggers
+ (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ TRIGGER_NAME VARCHAR(200) NOT NULL,
+ TRIGGER_GROUP VARCHAR(200) NOT NULL,
+ REPEAT_COUNT BIGINT NOT NULL,
+ REPEAT_INTERVAL BIGINT NOT NULL,
+ TIMES_TRIGGERED BIGINT NOT NULL,
+ PRIMARY KEY CLUSTERED (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP),
+ FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+ REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+);
+
+CREATE TABLE qrtz_cron_triggers
+ (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ TRIGGER_NAME VARCHAR(200) NOT NULL,
+ TRIGGER_GROUP VARCHAR(200) NOT NULL,
+ CRON_EXPRESSION VARCHAR(120) NOT NULL,
+ TIME_ZONE_ID VARCHAR(80),
+ PRIMARY KEY CLUSTERED (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP),
+ FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+ REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+);
+
+CREATE TABLE qrtz_simprop_triggers
+ (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ TRIGGER_NAME VARCHAR(200) NOT NULL,
+ TRIGGER_GROUP VARCHAR(200) NOT NULL,
+ STR_PROP_1 VARCHAR(512) NULL,
+ STR_PROP_2 VARCHAR(512) NULL,
+ STR_PROP_3 VARCHAR(512) NULL,
+ INT_PROP_1 INT NULL,
+ INT_PROP_2 INT NULL,
+ LONG_PROP_1 BIGINT NULL,
+ LONG_PROP_2 BIGINT NULL,
+ DEC_PROP_1 NUMERIC(13,4) NULL,
+ DEC_PROP_2 NUMERIC(13,4) NULL,
+ BOOL_PROP_1 BIT NULL,
+ BOOL_PROP_2 BIT NULL,
+ PRIMARY KEY CLUSTERED (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP),
+ FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+ REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+);
+
+CREATE TABLE qrtz_blob_triggers
+ (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ TRIGGER_NAME VARCHAR(200) NOT NULL,
+ TRIGGER_GROUP VARCHAR(200) NOT NULL,
+ BLOB_DATA VARBINARY(MAX) NULL,
+ PRIMARY KEY CLUSTERED (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP),
+ FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+ REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+);
+
+CREATE TABLE qrtz_calendars
+ (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ CALENDAR_NAME VARCHAR(200) NOT NULL,
+ CALENDAR VARBINARY(MAX) NOT NULL,
+ PRIMARY KEY CLUSTERED (SCHED_NAME,CALENDAR_NAME)
+);
+
+
+CREATE TABLE qrtz_paused_trigger_grps
+ (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ TRIGGER_GROUP VARCHAR(200) NOT NULL,
+ PRIMARY KEY CLUSTERED (SCHED_NAME,TRIGGER_GROUP)
+);
+
+CREATE TABLE qrtz_fired_triggers
+ (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ ENTRY_ID VARCHAR(95) NOT NULL,
+ TRIGGER_NAME VARCHAR(200) NOT NULL,
+ TRIGGER_GROUP VARCHAR(200) NOT NULL,
+ INSTANCE_NAME VARCHAR(200) NOT NULL,
+ FIRED_TIME BIGINT NOT NULL,
+ SCHED_TIME BIGINT NOT NULL,
+ PRIORITY INTEGER NOT NULL,
+ STATE VARCHAR(16) NOT NULL,
+ JOB_NAME VARCHAR(200) NULL,
+ JOB_GROUP VARCHAR(200) NULL,
+ IS_NONCONCURRENT BIT NULL,
+ REQUESTS_RECOVERY BIT NULL,
+ PRIMARY KEY CLUSTERED (SCHED_NAME,ENTRY_ID)
+);
+
+CREATE TABLE qrtz_scheduler_state
+ (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ INSTANCE_NAME VARCHAR(200) NOT NULL,
+ LAST_CHECKIN_TIME BIGINT NOT NULL,
+ CHECKIN_INTERVAL BIGINT NOT NULL,
+ PRIMARY KEY CLUSTERED (SCHED_NAME,INSTANCE_NAME)
+);
+
+CREATE TABLE qrtz_locks
+ (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ LOCK_NAME VARCHAR(40) NOT NULL,
+ PRIMARY KEY CLUSTERED (SCHED_NAME,LOCK_NAME)
+);
+
+create index idx_qrtz_j_req_recovery on qrtz_job_details(SCHED_NAME,REQUESTS_RECOVERY);
+create index idx_qrtz_j_grp on qrtz_job_details(SCHED_NAME,JOB_GROUP);
+
+create index idx_qrtz_t_j on qrtz_triggers(SCHED_NAME,JOB_NAME,JOB_GROUP);
+create index idx_qrtz_t_jg on qrtz_triggers(SCHED_NAME,JOB_GROUP);
+create index idx_qrtz_t_c on qrtz_triggers(SCHED_NAME,CALENDAR_NAME);
+create index idx_qrtz_t_g on qrtz_triggers(SCHED_NAME,TRIGGER_GROUP);
+create index idx_qrtz_t_state on qrtz_triggers(SCHED_NAME,TRIGGER_STATE);
+create index idx_qrtz_t_n_state on qrtz_triggers(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP,TRIGGER_STATE);
+create index idx_qrtz_t_n_g_state on qrtz_triggers(SCHED_NAME,TRIGGER_GROUP,TRIGGER_STATE);
+create index idx_qrtz_t_next_fire_time on qrtz_triggers(SCHED_NAME,NEXT_FIRE_TIME);
+create index idx_qrtz_t_nft_st on qrtz_triggers(SCHED_NAME,TRIGGER_STATE,NEXT_FIRE_TIME);
+create index idx_qrtz_t_nft_misfire on qrtz_triggers(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME);
+create index idx_qrtz_t_nft_st_misfire on qrtz_triggers(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_STATE);
+create index idx_qrtz_t_nft_st_misfire_grp on qrtz_triggers(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_GROUP,TRIGGER_STATE);
+
+create index idx_qrtz_ft_trig_inst_name on qrtz_fired_triggers(SCHED_NAME,INSTANCE_NAME);
+create index idx_qrtz_ft_inst_job_req_rcvry on qrtz_fired_triggers(SCHED_NAME,INSTANCE_NAME,REQUESTS_RECOVERY);
+create index idx_qrtz_ft_j_g on qrtz_fired_triggers(SCHED_NAME,JOB_NAME,JOB_GROUP);
+create index idx_qrtz_ft_jg on qrtz_fired_triggers(SCHED_NAME,JOB_GROUP);
+create index idx_qrtz_ft_t_g on qrtz_fired_triggers(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
+create index idx_qrtz_ft_tg on qrtz_fired_triggers(SCHED_NAME,TRIGGER_GROUP);
+
+-- ambari log4j DDL
+
+CREATE TABLE workflow (
+ workflowId varchar(255) PRIMARY KEY CLUSTERED,
+ workflowName varchar(255),
+ parentWorkflowId varchar(255),
+ workflowContext TEXT, userName varchar(255),
+ startTime BIGINT, lastUpdateTime BIGINT,
+ numJobsTotal INTEGER, numJobsCompleted INTEGER,
+ inputBytes BIGINT, outputBytes BIGINT,
+ duration BIGINT,
+ FOREIGN KEY (parentWorkflowId) REFERENCES workflow (workflowId)
+);
+
+CREATE TABLE job (
+ jobId varchar(255) NOT NULL,
+ workflowId varchar(255) NOT NULL,
+ jobName varchar(255), workflowEntityName varchar(255),
+ userName varchar(255), queue varchar(255), acls varchar(2000), confPath varchar(260),
+ submitTime BIGINT, launchTime BIGINT, finishTime BIGINT,
+ maps INTEGER, reduces INTEGER, status varchar(255), priority varchar(255),
+ finishedMaps INTEGER, finishedReduces INTEGER,
+ failedMaps INTEGER, failedReduces INTEGER,
+ mapsRuntime BIGINT, reducesRuntime BIGINT,
+ mapCounters TEXT, reduceCounters TEXT, jobCounters TEXT,
+ inputBytes BIGINT, outputBytes BIGINT,
+ PRIMARY KEY CLUSTERED (jobId),
+ FOREIGN KEY (workflowId) REFERENCES workflow (workflowId)
+);
+
+CREATE TABLE task (
+ taskId varchar(255) NOT NULL,
+ jobId varchar(255) NOT NULL,
+ taskType varchar(255), splits varchar(2000),
+ startTime BIGINT, finishTime BIGINT, status TEXT, error TEXT, counters TEXT,
+ failedAttempt TEXT,
+ PRIMARY KEY CLUSTERED (taskId),
+ FOREIGN KEY (jobId) REFERENCES job (jobId)
+);
+
+CREATE TABLE taskAttempt (
+ taskAttemptId varchar(255) NOT NULL,
+ taskId varchar(255) NOT NULL,
+ jobId varchar(255) NOT NULL,
+ taskType varchar(255), taskTracker varchar(255),
+ startTime BIGINT, finishTime BIGINT,
+ mapFinishTime BIGINT, shuffleFinishTime BIGINT, sortFinishTime BIGINT,
+ locality TEXT, avataar TEXT,
+ status TEXT, error TEXT, counters TEXT,
+ inputBytes BIGINT, outputBytes BIGINT,
+ PRIMARY KEY CLUSTERED (taskAttemptId),
+ FOREIGN KEY (jobId) REFERENCES job (jobId),
+ FOREIGN KEY (taskId) REFERENCES task (taskId)
+);
+
+CREATE TABLE hdfsEvent (
+ timestamp BIGINT,
+ userName varchar(255),
+ clientIP varchar(255),
+ operation varchar(255),
+ srcPath varchar(260),
+ dstPath varchar(260),
+ permissions TEXT
+);
+
+CREATE TABLE mapreduceEvent (
+ timestamp BIGINT,
+ userName varchar(255),
+ clientIP varchar(255),
+ operation varchar(255),
+ target varchar(255),
+ result TEXT,
+ description TEXT,
+ permissions TEXT
+);
+
+CREATE TABLE clusterEvent (
+ timestamp BIGINT,
+ service varchar(255), status TEXT,
+ error TEXT, data TEXT,
+ host TEXT, rack TEXT
+);
+
+GO
+
+IF OBJECT_ID ('trigger_workflow_delete','TR') IS NOT NULL
+ DROP TRIGGER trigger_workflow_delete;
+GO
+
+CREATE TRIGGER trigger_workflow_delete
+ON workflow
+INSTEAD OF DELETE
+AS
+BEGIN
+ declare @cteTmp table
+ (
+ rowid int identity,
+ workflowId varchar(255)
+ );
+
+ declare @cteTmpRev table
+ (
+ rowid int identity,
+ workflowId varchar(255)
+ );
+
+ --the trigger does not get called recursively, so we need to store the child node ids in a temp table
+ with cte as
+ (
+ select wr.workflowId workflowId
+ from workflow wr inner join deleted d ON wr.workflowId = d.workflowId
+
+ union all
+
+ select w.workflowId
+ from cte
+ inner join workflow w on cte.workflowId = w.parentWorkflowId
+ )
+ insert into @cteTmp
+ select workflowId from cte;
+
+ --order by is invalid in subqueries and common table expression queries, do whatever we can
+ -- watch out for scalability issues due to data duplication
+ insert into @cteTmpRev
+ select workflowId from @cteTmp
+ order by rowid desc;
+
+ --delete from the referred tables
+ delete from job
+ from job j inner join @cteTmpRev r on j.workflowId = r.workflowId;
+
+ --finally delete from the master table
+ delete from workflow
+ from workflow w inner join @cteTmpRev r on w.workflowId = r.workflowId
+END
+
+GO
+
+IF OBJECT_ID ('trigger_job_delete','TR') IS NOT NULL
+ DROP TRIGGER trigger_job_delete;
+GO
+
+CREATE TRIGGER trigger_job_delete
+ON job
+INSTEAD OF DELETE
+AS
+BEGIN
+ --delete from referred tables
+ delete from task
+ from task t inner join deleted d on t.jobId = d.jobId
+
+ delete from job
+ from job j inner join deleted d on j.jobId = d.jobId
+END
+
+GO
+
+IF OBJECT_ID ('trigger_task_delete','TR') IS NOT NULL
+ DROP TRIGGER trigger_task_delete;
+GO
+
+CREATE TRIGGER trigger_task_delete
+ON task
+INSTEAD OF DELETE
+AS
+BEGIN
+ --delete from referred tables
+ delete from taskAttempt
+ from taskAttempt ta inner join task t on ta.taskId = t.taskId
+ inner join deleted d on t.jobId = d.jobId
+
+ delete from task
+ from task t inner join deleted d on t.taskId = d.taskId
+END
+
+GO
diff --git a/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATELOCAL.sql b/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATELOCAL.sql
new file mode 100644
index 0000000000..87e7a40380
--- /dev/null
+++ b/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATELOCAL.sql
@@ -0,0 +1,128 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+Deployment script for $(AMBARIDBNAME)
+
+Use this script in sqlcmd mode, with a series of environment variables like this:
+set AMBARIDBNAME=ambari
+set AMBARIDBLOGNAME=ambari_log
+
+set AMBARIDBPATH=C:\Program Files\Microsoft SQL Server\MSSQL12.SQLEXPRESS\MSSQL\DATA\ambari.mdf
+set AMBARIDBLOGPATH=C:\Program Files\Microsoft SQL Server\MSSQL12.SQLEXPRESS\MSSQL\DATA\ambari_log.ldf
+
+set AMBARIDBOWNER=hadoop
+
+sqlcmd -S localhost\SQLEXPRESS -i C:\app\ambari-server-1.3.0-SNAPSHOT\resources\Ambari-DDL-SQLServer-CREATELOCAL.sql
+*/
+
+USE [master]
+GO
+
+IF db_id('$(AMBARIDBNAME)') IS NOT NULL
+BEGIN
+ Print N'Dropping [dbo].[$(AMBARIDBNAME)] database...'
+ ALTER DATABASE [$(AMBARIDBNAME)] SET SINGLE_USER WITH ROLLBACK IMMEDIATE
+ DROP DATABASE [$(AMBARIDBNAME)]
+END
+GO
+
+Print N'Creating [dbo].[$(AMBARIDBNAME)] database at $(AMBARIDBPATH)...'
+
+/****** Object: Database [$(AMBARIDBNAME)] Script Date: 6/11/2014 9:21:53 PM ******/
+
+CREATE DATABASE [$(AMBARIDBNAME)]
+ CONTAINMENT = NONE
+ ON PRIMARY
+( NAME = N'$(AMBARIDBNAME)', FILENAME = N'$(AMBARIDBPATH)' , SIZE = 3072KB , FILEGROWTH = 1024KB )
+ LOG ON
+( NAME = N'$(AMBARIDBLOGNAME)', FILENAME = N'$(AMBARIDBLOGPATH)' , SIZE = 1024KB , FILEGROWTH = 10%)
+GO
+
+ALTER DATABASE [$(AMBARIDBNAME)] SET COMPATIBILITY_LEVEL = 120
+GO
+ALTER DATABASE [$(AMBARIDBNAME)] SET ANSI_NULL_DEFAULT OFF
+GO
+ALTER DATABASE [$(AMBARIDBNAME)] SET ANSI_NULLS OFF
+GO
+ALTER DATABASE [$(AMBARIDBNAME)] SET ANSI_PADDING OFF
+GO
+ALTER DATABASE [$(AMBARIDBNAME)] SET ANSI_WARNINGS OFF
+GO
+ALTER DATABASE [$(AMBARIDBNAME)] SET ARITHABORT OFF
+GO
+ALTER DATABASE [$(AMBARIDBNAME)] SET AUTO_CLOSE OFF
+GO
+ALTER DATABASE [$(AMBARIDBNAME)] SET AUTO_SHRINK OFF
+GO
+ALTER DATABASE [$(AMBARIDBNAME)] SET AUTO_CREATE_STATISTICS ON
+GO
+ALTER DATABASE [$(AMBARIDBNAME)] SET AUTO_UPDATE_STATISTICS ON
+GO
+ALTER DATABASE [$(AMBARIDBNAME)] SET CURSOR_CLOSE_ON_COMMIT OFF
+GO
+ALTER DATABASE [$(AMBARIDBNAME)] SET CURSOR_DEFAULT GLOBAL
+GO
+ALTER DATABASE [$(AMBARIDBNAME)] SET CONCAT_NULL_YIELDS_NULL OFF
+GO
+ALTER DATABASE [$(AMBARIDBNAME)] SET NUMERIC_ROUNDABORT OFF
+GO
+ALTER DATABASE [$(AMBARIDBNAME)] SET QUOTED_IDENTIFIER OFF
+GO
+ALTER DATABASE [$(AMBARIDBNAME)] SET RECURSIVE_TRIGGERS OFF
+GO
+ALTER DATABASE [$(AMBARIDBNAME)] SET DISABLE_BROKER
+GO
+ALTER DATABASE [$(AMBARIDBNAME)] SET AUTO_UPDATE_STATISTICS_ASYNC OFF
+GO
+ALTER DATABASE [$(AMBARIDBNAME)] SET DATE_CORRELATION_OPTIMIZATION OFF
+GO
+ALTER DATABASE [$(AMBARIDBNAME)] SET PARAMETERIZATION SIMPLE
+GO
+ALTER DATABASE [$(AMBARIDBNAME)] SET READ_COMMITTED_SNAPSHOT OFF
+GO
+ALTER DATABASE [$(AMBARIDBNAME)] SET READ_WRITE
+GO
+ALTER DATABASE [$(AMBARIDBNAME)] SET RECOVERY SIMPLE
+GO
+ALTER DATABASE [$(AMBARIDBNAME)] SET MULTI_USER
+GO
+ALTER DATABASE [$(AMBARIDBNAME)] SET PAGE_VERIFY CHECKSUM
+GO
+ALTER DATABASE [$(AMBARIDBNAME)] SET TARGET_RECOVERY_TIME = 0 SECONDS
+GO
+ALTER DATABASE [$(AMBARIDBNAME)] SET DELAYED_DURABILITY = DISABLED
+GO
+
+USE [$(AMBARIDBNAME)]
+GO
+
+IF NOT EXISTS (SELECT name FROM sys.filegroups WHERE is_default=1 AND name = N'PRIMARY') ALTER DATABASE [$(AMBARIDBNAME)] MODIFY FILEGROUP [PRIMARY] DEFAULT
+GO
+
+ALTER authorization on DATABASE::$(AMBARIDBNAME) to [NT AUTHORITY\SYSTEM]
+GO
+
+if exists (select 1 from master.sys.syslogins where name='$(AMBARIDBOWNER)')
+BEGIN
+ CREATE USER [$(AMBARIDBOWNER)] FOR LOGIN [$(AMBARIDBOWNER)]
+ ALTER ROLE [db_owner] ADD MEMBER [$(AMBARIDBOWNER)]
+END
+GO
+
+Print N'[dbo].[$(AMBARIDBNAME)] database created.'
diff --git a/ambari-server/src/main/resources/Ambari-DDL-SQLServer-DROP.sql b/ambari-server/src/main/resources/Ambari-DDL-SQLServer-DROP.sql
new file mode 100644
index 0000000000..203db3a983
--- /dev/null
+++ b/ambari-server/src/main/resources/Ambari-DDL-SQLServer-DROP.sql
@@ -0,0 +1,203 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+Schema purge script for $(AMBARIDBNAME)
+
+Use this script in sqlcmd mode, setting the environment variables like this:
+set AMBARIDBNAME=ambari
+
+sqlcmd -S localhost\SQLEXPRESS -i C:\app\ambari-server-1.3.0-SNAPSHOT\resources\Ambari-DDL-SQLServer-DROP.sql
+*/
+
+USE [$(AMBARIDBNAME)];
+
+IF OBJECT_ID ('trigger_task_delete','TR') IS NOT NULL DROP TRIGGER trigger_task_delete;
+GO
+IF OBJECT_ID ('trigger_job_delete','TR') IS NOT NULL DROP TRIGGER trigger_job_delete;
+GO
+IF OBJECT_ID ('trigger_workflow_delete','TR') IS NOT NULL DROP TRIGGER trigger_workflow_delete;
+GO
+IF OBJECT_ID('clusterEvent', 'U') IS NOT NULL DROP TABLE clusterEvent
+GO
+IF OBJECT_ID('mapreduceEvent', 'U') IS NOT NULL DROP TABLE mapreduceEvent
+GO
+IF OBJECT_ID('hdfsEvent', 'U') IS NOT NULL DROP TABLE hdfsEvent
+GO
+IF OBJECT_ID('taskAttempt', 'U') IS NOT NULL DROP TABLE taskAttempt
+GO
+IF OBJECT_ID('task', 'U') IS NOT NULL DROP TABLE task
+GO
+IF OBJECT_ID('job', 'U') IS NOT NULL DROP TABLE job
+GO
+IF OBJECT_ID('workflow', 'U') IS NOT NULL DROP TABLE workflow
+GO
+
+IF OBJECT_ID('qrtz_locks', 'U') IS NOT NULL DROP TABLE qrtz_locks
+GO
+IF OBJECT_ID('qrtz_scheduler_state', 'U') IS NOT NULL DROP TABLE qrtz_scheduler_state
+GO
+IF OBJECT_ID('qrtz_fired_triggers', 'U') IS NOT NULL DROP TABLE qrtz_fired_triggers
+GO
+IF OBJECT_ID('qrtz_paused_trigger_grps', 'U') IS NOT NULL DROP TABLE qrtz_paused_trigger_grps
+GO
+IF OBJECT_ID('qrtz_calendars', 'U') IS NOT NULL DROP TABLE qrtz_calendars
+GO
+IF OBJECT_ID('qrtz_blob_triggers', 'U') IS NOT NULL DROP TABLE qrtz_blob_triggers
+GO
+IF OBJECT_ID('qrtz_simprop_triggers', 'U') IS NOT NULL DROP TABLE qrtz_simprop_triggers
+GO
+IF OBJECT_ID('qrtz_cron_triggers', 'U') IS NOT NULL DROP TABLE qrtz_cron_triggers
+GO
+IF OBJECT_ID('qrtz_simple_triggers', 'U') IS NOT NULL DROP TABLE qrtz_simple_triggers
+GO
+IF OBJECT_ID('qrtz_triggers', 'U') IS NOT NULL DROP TABLE qrtz_triggers
+GO
+IF OBJECT_ID('qrtz_job_details', 'U') IS NOT NULL DROP TABLE qrtz_job_details
+GO
+
+IF OBJECT_ID('viewentity', 'U') IS NOT NULL DROP TABLE viewentity
+GO
+IF OBJECT_ID('viewresource', 'U') IS NOT NULL DROP TABLE viewresource
+GO
+IF OBJECT_ID('viewparameter', 'U') IS NOT NULL DROP TABLE viewparameter
+GO
+IF OBJECT_ID('viewinstanceproperty', 'U') IS NOT NULL DROP TABLE viewinstanceproperty
+GO
+IF OBJECT_ID('viewinstancedata', 'U') IS NOT NULL DROP TABLE viewinstancedata
+GO
+IF OBJECT_ID('viewinstance', 'U') IS NOT NULL DROP TABLE viewinstance
+GO
+IF OBJECT_ID('viewmain', 'U') IS NOT NULL DROP TABLE viewmain
+GO
+
+IF OBJECT_ID('hostgroup_configuration', 'U') IS NOT NULL DROP TABLE hostgroup_configuration
+GO
+IF OBJECT_ID('blueprint_configuration', 'U') IS NOT NULL DROP TABLE blueprint_configuration
+GO
+IF OBJECT_ID('hostgroup_component', 'U') IS NOT NULL DROP TABLE hostgroup_component
+GO
+IF OBJECT_ID('hostgroup', 'U') IS NOT NULL DROP TABLE hostgroup
+GO
+IF OBJECT_ID('blueprint', 'U') IS NOT NULL DROP TABLE blueprint
+GO
+
+IF OBJECT_ID('configgrouphostmapping', 'U') IS NOT NULL DROP TABLE configgrouphostmapping
+GO
+IF OBJECT_ID('confgroupclusterconfigmapping', 'U') IS NOT NULL DROP TABLE confgroupclusterconfigmapping
+GO
+IF OBJECT_ID('configgroup', 'U') IS NOT NULL DROP TABLE configgroup
+GO
+IF OBJECT_ID('ambari_sequences', 'U') IS NOT NULL DROP TABLE ambari_sequences
+GO
+IF OBJECT_ID('metainfo', 'U') IS NOT NULL DROP TABLE metainfo
+GO
+IF OBJECT_ID('hostconfigmapping', 'U') IS NOT NULL DROP TABLE hostconfigmapping
+GO
+IF OBJECT_ID('key_value_store', 'U') IS NOT NULL DROP TABLE key_value_store
+GO
+IF OBJECT_ID('user_roles', 'U') IS NOT NULL DROP TABLE user_roles
+GO
+IF OBJECT_ID('ClusterHostMapping', 'U') IS NOT NULL DROP TABLE ClusterHostMapping
+GO
+IF OBJECT_ID('role_success_criteria', 'U') IS NOT NULL DROP TABLE role_success_criteria
+GO
+IF OBJECT_ID('execution_command', 'U') IS NOT NULL DROP TABLE execution_command
+GO
+IF OBJECT_ID('host_role_command', 'U') IS NOT NULL DROP TABLE host_role_command
+GO
+IF OBJECT_ID('members', 'U') IS NOT NULL DROP TABLE members
+GO
+IF OBJECT_ID('groups', 'U') IS NOT NULL DROP TABLE groups
+GO
+IF OBJECT_ID('users', 'U') IS NOT NULL DROP TABLE users
+GO
+IF OBJECT_ID('roles', 'U') IS NOT NULL DROP TABLE roles
+GO
+IF OBJECT_ID('stage', 'U') IS NOT NULL DROP TABLE stage
+GO
+IF OBJECT_ID('requestoperationlevel', 'U') IS NOT NULL DROP TABLE requestoperationlevel
+GO
+IF OBJECT_ID('requestresourcefilter', 'U') IS NOT NULL DROP TABLE requestresourcefilter
+GO
+IF OBJECT_ID('request', 'U') IS NOT NULL DROP TABLE request
+GO
+IF OBJECT_ID('requestschedulebatchrequest', 'U') IS NOT NULL DROP TABLE requestschedulebatchrequest
+GO
+IF OBJECT_ID('requestschedule', 'U') IS NOT NULL DROP TABLE requestschedule
+GO
+IF OBJECT_ID('hoststate', 'U') IS NOT NULL DROP TABLE hoststate
+GO
+IF OBJECT_ID('hostcomponentdesiredstate', 'U') IS NOT NULL DROP TABLE hostcomponentdesiredstate
+GO
+IF OBJECT_ID('hostcomponentstate', 'U') IS NOT NULL DROP TABLE hostcomponentstate
+GO
+IF OBJECT_ID('hosts', 'U') IS NOT NULL DROP TABLE hosts
+GO
+IF OBJECT_ID('servicedesiredstate', 'U') IS NOT NULL DROP TABLE servicedesiredstate
+GO
+IF OBJECT_ID('servicecomponentdesiredstate', 'U') IS NOT NULL DROP TABLE servicecomponentdesiredstate
+GO
+IF OBJECT_ID('clusterstate', 'U') IS NOT NULL DROP TABLE clusterstate
+GO
+IF OBJECT_ID('clusterservices', 'U') IS NOT NULL DROP TABLE clusterservices
+GO
+IF OBJECT_ID('clusterconfigmapping', 'U') IS NOT NULL DROP TABLE clusterconfigmapping
+GO
+
+IF OBJECT_ID('alert_notice', 'U') IS NOT NULL DROP TABLE alert_notice
+GO
+IF OBJECT_ID('alert_grouping', 'U') IS NOT NULL DROP TABLE alert_grouping
+GO
+IF OBJECT_ID('alert_group_target', 'U') IS NOT NULL DROP TABLE alert_group_target
+GO
+IF OBJECT_ID('alert_target', 'U') IS NOT NULL DROP TABLE alert_target
+GO
+IF OBJECT_ID('alert_group', 'U') IS NOT NULL DROP TABLE alert_group
+GO
+IF OBJECT_ID('alert_current', 'U') IS NOT NULL DROP TABLE alert_current
+GO
+IF OBJECT_ID('alert_history', 'U') IS NOT NULL DROP TABLE alert_history
+GO
+IF OBJECT_ID('alert_definition', 'U') IS NOT NULL DROP TABLE alert_definition
+GO
+
+IF OBJECT_ID('serviceconfighosts', 'U') IS NOT NULL DROP TABLE serviceconfighosts
+GO
+IF OBJECT_ID('serviceconfigmapping', 'U') IS NOT NULL DROP TABLE serviceconfigmapping
+GO
+IF OBJECT_ID('serviceconfig', 'U') IS NOT NULL DROP TABLE serviceconfig
+GO
+
+IF OBJECT_ID('clusterconfig', 'U') IS NOT NULL DROP TABLE clusterconfig
+GO
+IF OBJECT_ID('clusters', 'U') IS NOT NULL DROP TABLE clusters
+GO
+
+IF OBJECT_ID('adminprivilege', 'U') IS NOT NULL DROP TABLE adminprivilege
+GO
+IF OBJECT_ID('adminresource', 'U') IS NOT NULL DROP TABLE adminresource
+GO
+IF OBJECT_ID('adminpermission', 'U') IS NOT NULL DROP TABLE adminpermission
+GO
+IF OBJECT_ID('adminprincipal', 'U') IS NOT NULL DROP TABLE adminprincipal
+GO
+IF OBJECT_ID('adminprincipaltype', 'U') IS NOT NULL DROP TABLE adminprincipaltype
+GO
+IF OBJECT_ID('adminresourcetype', 'U') IS NOT NULL DROP TABLE adminresourcetype
+GO
diff --git a/ambari-server/src/main/resources/custom_actions/check_host.py b/ambari-server/src/main/resources/custom_actions/check_host.py
index 898957ddf4..7430ba1954 100644
--- a/ambari-server/src/main/resources/custom_actions/check_host.py
+++ b/ambari-server/src/main/resources/custom_actions/check_host.py
@@ -24,6 +24,9 @@ import os
import subprocess
import socket
+from ambari_commons import os_utils
+from ambari_commons.os_check import OSCheck, OSConst
+from ambari_commons.inet_utils import download_file
from resource_management import Script, Execute, format
from ambari_agent.HostInfo import HostInfo
@@ -35,15 +38,18 @@ CHECK_LAST_AGENT_ENV = "last_agent_env_check"
DB_MYSQL = "mysql"
DB_ORACLE = "oracle"
DB_POSTGRESQL = "postgres"
+DB_MSSQL = "mssql"
JDBC_DRIVER_MYSQL = "com.mysql.jdbc.Driver"
JDBC_DRIVER_ORACLE = "oracle.jdbc.driver.OracleDriver"
JDBC_DRIVER_POSTGRESQL = "org.postgresql.Driver"
+JDBC_DRIVER_MSSQL = "com.microsoft.sqlserver.jdbc.SQLServerDriver"
JDBC_DRIVER_SYMLINK_MYSQL = "mysql-jdbc-driver.jar"
JDBC_DRIVER_SYMLINK_ORACLE = "oracle-jdbc-driver.jar"
JDBC_DRIVER_SYMLINK_POSTGRESQL = "postgres-jdbc-driver.jar"
-
+JDBC_DRIVER_SYMLINK_MSSQL = "sqljdbc4.jar"
+JDBC_AUTH_SYMLINK_MSSQL = "sqljdbc_auth.dll"
class CheckHost(Script):
def actionexecute(self, env):
@@ -96,8 +102,11 @@ class CheckHost(Script):
java64_home = config['commandParams']['java_home']
print "Java home to check: " + java64_home
+ java_bin = "java"
+ if OSCheck.is_windows_family():
+ java_bin = "java.exe"
- if not os.path.isfile(os.path.join(java64_home, "bin", "java")):
+ if not os.path.isfile(os.path.join(java64_home, "bin", java_bin)):
print "Java home doesn't exist!"
java_home_check_structured_output = {"exit_code" : 1, "message": "Java home doesn't exist!"}
else:
@@ -130,11 +139,26 @@ class CheckHost(Script):
jdbc_url = jdk_location + JDBC_DRIVER_SYMLINK_POSTGRESQL
jdbc_driver = JDBC_DRIVER_POSTGRESQL
jdbc_name = JDBC_DRIVER_SYMLINK_POSTGRESQL
+ elif db_name == DB_MSSQL:
+ jdbc_url = jdk_location + JDBC_DRIVER_SYMLINK_MSSQL
+ jdbc_driver = JDBC_DRIVER_MSSQL
+ jdbc_name = JDBC_DRIVER_SYMLINK_MSSQL
db_connection_url = config['commandParams']['db_connection_url']
user_name = config['commandParams']['user_name']
user_passwd = config['commandParams']['user_passwd']
- java_exec = os.path.join(java64_home, "bin","java")
+ agent_cache_dir = os.path.abspath(config["hostLevelParams"]["agentCacheDir"])
+ check_db_connection_url = jdk_location + check_db_connection_jar_name
+ jdbc_path = os.path.join(agent_cache_dir, jdbc_name)
+ check_db_connection_path = os.path.join(agent_cache_dir, check_db_connection_jar_name)
+
+ java_bin = "java"
+ class_path_delimiter = ":"
+ if OSCheck.is_windows_family():
+ java_bin = "java.exe"
+ class_path_delimiter = ";"
+
+ java_exec = os.path.join(java64_home, "bin",java_bin)
if ('jdk_name' not in config['commandParams'] or config['commandParams']['jdk_name'] == None \
or config['commandParams']['jdk_name'] == '') and not os.path.isfile(java_exec):
@@ -145,18 +169,14 @@ class CheckHost(Script):
return db_connection_check_structured_output
environment = { "no_proxy": format("{ambari_server_hostname}") }
- artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
- java_dir = os.path.dirname(java64_home)
-
# download and install java if it doesn't exists
if not os.path.isfile(java_exec):
+ jdk_name = config['commandParams']['jdk_name']
+ jdk_url = "{}/{}".format(jdk_location, jdk_name)
+ jdk_download_target = os.path.join(agent_cache_dir, jdk_name)
+ java_dir = os.path.dirname(java64_home)
try:
- jdk_name = config['commandParams']['jdk_name']
- jdk_curl_target = format("{artifact_dir}/{jdk_name}")
- Execute(format("mkdir -p {artifact_dir} ; curl -kf "
- "--retry 10 {jdk_location}/{jdk_name} -o {jdk_curl_target}"),
- path = ["/bin","/usr/bin/"],
- environment = environment)
+ download_file(jdk_url, jdk_download_target)
except Exception, e:
message = "Error downloading JDK from Ambari Server resources. Check network access to " \
"Ambari Server.\n" + str(e)
@@ -165,13 +185,20 @@ class CheckHost(Script):
return db_connection_check_structured_output
if jdk_name.endswith(".bin"):
- install_cmd = format("mkdir -p {java_dir} ; chmod +x {jdk_curl_target}; cd {java_dir} ; echo A | " \
+ install_cmd = format("mkdir -p {java_dir} ; chmod +x {jdk_download_target}; cd {java_dir} ; echo A | " \
"{jdk_curl_target} -noregister > /dev/null 2>&1")
+ install_path = ["/bin","/usr/bin/"]
elif jdk_name.endswith(".gz"):
- install_cmd = format("mkdir -p {java_dir} ; cd {java_dir} ; tar -xf {jdk_curl_target} > /dev/null 2>&1")
+ install_cmd = format("mkdir -p {java_dir} ; cd {java_dir} ; tar -xf {jdk_download_target} > /dev/null 2>&1")
+ install_path = ["/bin","/usr/bin/"]
+ elif jdk_name.endswith(".exe"):
+ install_cmd = "{} /s INSTALLDIR={} STATIC=1 WEB_JAVA=0 /L \\var\\log\\ambari-agent".format(
+ os_utils.quote_path(jdk_download_target), os_utils.quote_path(java64_home),
+ )
+ install_path = [java_dir]
try:
- Execute(install_cmd, path = ["/bin","/usr/bin/"])
+ Execute(install_cmd, path = install_path)
except Exception, e:
message = "Error installing java.\n" + str(e)
print message
@@ -180,10 +207,8 @@ class CheckHost(Script):
# download DBConnectionVerification.jar from ambari-server resources
try:
- cmd = format("/bin/sh -c 'cd /usr/lib/ambari-agent/ && curl -kf "
- "--retry 5 {jdk_location}{check_db_connection_jar_name} "
- "-o {check_db_connection_jar_name}'")
- Execute(cmd, not_if=format("[ -f /usr/lib/ambari-agent/{check_db_connection_jar_name}]"), environment = environment)
+ download_file(check_db_connection_url, check_db_connection_path)
+
except Exception, e:
message = "Error downloading DBConnectionVerification.jar from Ambari Server resources. Check network access to " \
"Ambari Server.\n" + str(e)
@@ -192,11 +217,12 @@ class CheckHost(Script):
return db_connection_check_structured_output
# download jdbc driver from ambari-server resources
-
try:
- cmd = format("/bin/sh -c 'cd /usr/lib/ambari-agent/ && curl -kf "
- "--retry 5 {jdbc_url} -o {jdbc_name}'")
- Execute(cmd, not_if=format("[ -f /usr/lib/ambari-agent/{jdbc_name}]"), environment = environment)
+ download_file(jdbc_url, jdbc_path)
+ if db_name == DB_MSSQL:
+ jdbc_auth_path = os.path.join(agent_cache_dir, JDBC_AUTH_SYMLINK_MSSQL)
+ jdbc_auth_url = jdk_location + JDBC_AUTH_SYMLINK_MSSQL
+ download_file(jdbc_auth_url, jdbc_auth_path)
except Exception, e:
message = format("Error: Ambari Server cannot download the database JDBC driver and is unable to test the " \
"database connection. You must run ambari-server setup --jdbc-db={db_name} " \
@@ -208,11 +234,10 @@ class CheckHost(Script):
# try to connect to db
-
- db_connection_check_command = format("{java64_home}/bin/java -cp /usr/lib/ambari-agent/{check_db_connection_jar_name}:" \
- "/usr/lib/ambari-agent/{jdbc_name} org.apache.ambari.server.DBConnectionVerification '{db_connection_url}' " \
+ db_connection_check_command = format("{java_exec} -cp {check_db_connection_path}{class_path_delimiter}" \
+ "{jdbc_path} -Djava.library.path={agent_cache_dir} org.apache.ambari.server.DBConnectionVerification {db_connection_url} " \
"{user_name} {user_passwd!p} {jdbc_driver}")
-
+ print "INFO db_connection_check_command: " + db_connection_check_command
process = subprocess.Popen(db_connection_check_command,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
@@ -229,7 +254,7 @@ class CheckHost(Script):
db_connection_check_structured_output = {"exit_code" : 1, "message": stdoutdata + stderrdata }
return db_connection_check_structured_output
-
+
# check whether each host in the command can be resolved to an IP address
def execute_host_resolution_check(self, config):
print "IP address forward resolution check started."
diff --git a/ambari-server/src/main/resources/sqlserver_properties.json b/ambari-server/src/main/resources/sqlserver_properties.json
new file mode 100644
index 0000000000..10756f2f7e
--- /dev/null
+++ b/ambari-server/src/main/resources/sqlserver_properties.json
@@ -0,0 +1,23463 @@
+{
+ "Cluster":{
+ "*":{
+ "metrics/cpu/Idle":{
+ "metric":"cpu_report.Idle\\g",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/cpu/Nice":{
+ "metric":"cpu_report.Nice\\g",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/cpu/System":{
+ "metric":"cpu_report.System\\g",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/cpu/User":{
+ "metric":"cpu_report.User\\g",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/cpu/Wait":{
+ "metric":"cpu_report.Wait\\g",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/load/1-min":{
+ "metric":"load_report.1-min",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/load/CPUs":{
+ "metric":"load_report.CPUs ",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/load/Nodes":{
+ "metric":"load_report.Nodes",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/load/Procs":{
+ "metric":"load_report.Procs",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/memory/Buffer":{
+ "metric":"mem_report.Buffer\\g",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/memory/Cache":{
+ "metric":"mem_report.Cache\\g",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/memory/Share":{
+ "metric":"mem_report.Share\\g",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/memory/Swap":{
+ "metric":"mem_report.Swap\\g",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/memory/Total":{
+ "metric":"mem_report.Total\\g",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/memory/Use":{
+ "metric":"mem_report.Use\\g",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/network/In":{
+ "metric":"network_report.In ",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/network/Out":{
+ "metric":"network_report.Out",
+ "pointInTime":false,
+ "temporal":true
+ }
+ }
+ },
+
+ "Host":{
+ "*":{
+ "metrics/boottime":{
+ "metric":"boottime",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_aidle":{
+ "metric":"cpu_aidle",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_idle":{
+ "metric":"cpu_idle",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_nice":{
+ "metric":"cpu_nice",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_num":{
+ "metric":"cpu_num",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_speed":{
+ "metric":"cpu_speed",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_system":{
+ "metric":"cpu_system",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_user":{
+ "metric":"cpu_user",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_wio":{
+ "metric":"cpu_wio",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/disk/disk_free":{
+ "metric":"disk_free",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/disk/disk_total":{
+ "metric":"disk_total",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/gcCount":{
+ "metric":"jvm.JvmMetrics.gcCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/gcTimeMillis":{
+ "metric":"jvm.JvmMetrics.gcTimeMillis",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/logError":{
+ "metric":"jvm.JvmMetrics.logError",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/logFatal":{
+ "metric":"jvm.JvmMetrics.logFatal",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/logInfo":{
+ "metric":"jvm.JvmMetrics.logInfo",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/logWarn":{
+ "metric":"jvm.JvmMetrics.logWarn",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/maxMemoryM":{
+ "metric":"jvm.JvmMetrics.maxMemoryM",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/memHeapCommittedM":{
+ "metric":"jvm.JvmMetrics.memHeapCommittedM",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/memHeapUsedM":{
+ "metric":"jvm.JvmMetrics.memHeapUsedM",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/memNonHeapCommittedM":{
+ "metric":"jvm.JvmMetrics.memNonHeapCommittedM",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/memNonHeapUsedM":{
+ "metric":"jvm.JvmMetrics.memNonHeapUsedM",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsBlocked":{
+ "metric":"jvm.JvmMetrics.threadsBlocked",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsNew":{
+ "metric":"jvm.JvmMetrics.threadsNew",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsRunnable":{
+ "metric":"jvm.JvmMetrics.threadsRunnable",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsTerminated":{
+ "metric":"jvm.JvmMetrics.threadsTerminated",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsTimedWaiting":{
+ "metric":"jvm.JvmMetrics.threadsTimedWaiting",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsWaiting":{
+ "metric":"jvm.JvmMetrics.threadsWaiting",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/load/load_fifteen":{
+ "metric":"load_fifteen",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/load/load_five":{
+ "metric":"load_five",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/load/load_one":{
+ "metric":"load_one",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/mem_buffers":{
+ "metric":"mem_buffers",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/mem_cached":{
+ "metric":"mem_cached",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/mem_free":{
+ "metric":"mem_free",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/mem_shared":{
+ "metric":"mem_shared",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/mem_total":{
+ "metric":"mem_total",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/swap_free":{
+ "metric":"swap_free",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/swap_total":{
+ "metric":"swap_total",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/network/bytes_in":{
+ "metric":"bytes_in",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/network/bytes_out":{
+ "metric":"bytes_out",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/network/pkts_in":{
+ "metric":"pkts_in",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/network/pkts_out":{
+ "metric":"pkts_out",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/part_max_used":{
+ "metric":"part_max_used",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/process/proc_run":{
+ "metric":"proc_run",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/process/proc_total":{
+ "metric":"proc_total",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/NumOpenConnections":{
+ "metric":"rpc.metrics.NumOpenConnections",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/ReceivedBytes":{
+ "metric":"rpc.rpc.ReceivedBytes",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/RpcProcessingTime_avg_time":{
+ "metric":"rpc.rpc.RpcProcessingTime_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/RpcProcessingTime_num_ops":{
+ "metric":"rpc.rpc.RpcProcessingTime_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/RpcQueueTime_avg_time":{
+ "metric":"rpc.metrics.RpcQueueTime_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/RpcQueueTime_num_ops":{
+ "metric":"rpc.rpc.RpcQueueTime_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/RpcSlowResponse_avg_time":{
+ "metric":"rpc.metrics.RpcSlowResponse_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/RpcSlowResponse_num_ops":{
+ "metric":"rpc.metrics.RpcSlowResponse_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/SentBytes":{
+ "metric":"rpc.rpc.SentBytes",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/abort/aboveOneSec/_avg_time":{
+ "metric":"rpc.metrics.abort.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/abort/aboveOneSec/_num_ops":{
+ "metric":"rpc.metrics.abort.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/abort_avg_time":{
+ "metric":"rpc.metrics.abort_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/abort_num_ops":{
+ "metric":"rpc.metrics.abort_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/addColumn_avg_time":{
+ "metric":"rpc.metrics.addColumn_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/addColumn_num_ops":{
+ "metric":"rpc.metrics.addColumn_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/addToOnlineRegions/aboveOneSec/_avg_time":{
+ "metric":"rpc.metrics.addToOnlineRegions.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/addToOnlineRegions/aboveOneSec/_num_ops":{
+ "metric":"rpc.metrics.addToOnlineRegions.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/addToOnlineRegions_avg_time":{
+ "metric":"rpc.metrics.addToOnlineRegions_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/addToOnlineRegions_num_ops":{
+ "metric":"rpc.metrics.addToOnlineRegions_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/assign_avg_time":{
+ "metric":"rpc.metrics.assign_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/assign_num_ops":{
+ "metric":"rpc.metrics.assign_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/balanceSwitch_avg_time":{
+ "metric":"rpc.metrics.balanceSwitch_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/balanceSwitch_num_ops":{
+ "metric":"rpc.metrics.balanceSwitch_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/balance_avg_time":{
+ "metric":"rpc.metrics.balance_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/balance_num_ops":{
+ "metric":"rpc.metrics.balance_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/bulkLoadHFiles/aboveOneSec/_avg_time":{
+ "metric":"rpc.metrics.bulkLoadHFiles.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/bulkLoadHFiles/aboveOneSec/_num_ops":{
+ "metric":"rpc.metrics.bulkLoadHFiles.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/bulkLoadHFiles_avg_time":{
+ "metric":"rpc.metrics.bulkLoadHFiles_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/bulkLoadHFiles_num_ops":{
+ "metric":"rpc.metrics.bulkLoadHFiles_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/callQueueLen":{
+ "metric":"rpc.metrics.callQueueLen",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndDelete/aboveOneSec/_avg_time":{
+ "metric":"rpc.metrics.checkAndDelete.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndDelete/aboveOneSec/_num_ops":{
+ "metric":"rpc.metrics.checkAndDelete.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndDelete_avg_time":{
+ "metric":"rpc.metrics.checkAndDelete_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndDelete_num_ops":{
+ "metric":"rpc.metrics.checkAndDelete_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndPut/aboveOneSec/_avg_time":{
+ "metric":"rpc.metrics.checkAndPut.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndPut/aboveOneSec/_num_ops":{
+ "metric":"rpc.metrics.checkAndPut.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndPut_avg_time":{
+ "metric":"rpc.metrics.checkAndPut_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndPut_num_ops":{
+ "metric":"rpc.metrics.checkAndPut_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkOOME/aboveOneSec/_avg_time":{
+ "metric":"rpc.metrics.checkOOME.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkOOME/aboveOneSec/_num_ops":{
+ "metric":"rpc.metrics.checkOOME.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkOOME_avg_time":{
+ "metric":"rpc.metrics.checkOOME_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkOOME_num_ops":{
+ "metric":"rpc.metrics.checkOOME_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/close/aboveOneSec/_avg_time":{
+ "metric":"rpc.metrics.close.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/close/aboveOneSec/_num_ops":{
+ "metric":"rpc.metrics.close.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/closeRegion/aboveOneSec/_avg_time":{
+ "metric":"rpc.metrics.closeRegion.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/closeRegion/aboveOneSec/_num_ops":{
+ "metric":"rpc.metrics.closeRegion.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/closeRegion_avg_time":{
+ "metric":"rpc.metrics.closeRegion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/closeRegion_num_ops":{
+ "metric":"rpc.metrics.closeRegion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/close_avg_time":{
+ "metric":"rpc.metrics.close_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/close_num_ops":{
+ "metric":"rpc.metrics.close_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/compactRegion/aboveOneSec/_avg_time":{
+ "metric":"rpc.metrics.compactRegion.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/compactRegion/aboveOneSec/_num_ops":{
+ "metric":"rpc.metrics.compactRegion.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/compactRegion_avg_time":{
+ "metric":"rpc.metrics.compactRegion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/compactRegion_num_ops":{
+ "metric":"rpc.metrics.compactRegion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/createTable_avg_time":{
+ "metric":"rpc.metrics.createTable_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/createTable_num_ops":{
+ "metric":"rpc.metrics.createTable_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/delete/aboveOneSec/_avg_time":{
+ "metric":"rpc.metrics.delete.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/delete/aboveOneSec/_num_ops":{
+ "metric":"rpc.metrics.delete.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/deleteColumn_avg_time":{
+ "metric":"rpc.metrics.deleteColumn_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/deleteColumn_num_ops":{
+ "metric":"rpc.metrics.deleteColumn_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/deleteTable_avg_time":{
+ "metric":"rpc.metrics.deleteTable_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/deleteTable_num_ops":{
+ "metric":"rpc.metrics.deleteTable_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/delete_avg_time":{
+ "metric":"rpc.metrics.delete_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/delete_num_ops":{
+ "metric":"rpc.metrics.delete_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/disableTable_avg_time":{
+ "metric":"rpc.metrics.disableTable_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/disableTable_num_ops":{
+ "metric":"rpc.metrics.disableTable_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/enableTable_avg_time":{
+ "metric":"rpc.metrics.enableTable_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/enableTable_num_ops":{
+ "metric":"rpc.metrics.enableTable_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/execCoprocessor/aboveOneSec/_avg_time":{
+ "metric":"rpc.metrics.execCoprocessor.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/execCoprocessor/aboveOneSec/_num_ops":{
+ "metric":"rpc.metrics.execCoprocessor.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/execCoprocessor_avg_time":{
+ "metric":"rpc.metrics.execCoprocessor_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/execCoprocessor_num_ops":{
+ "metric":"rpc.metrics.execCoprocessor_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/exists/aboveOneSec/_avg_time":{
+ "metric":"rpc.metrics.exists.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/exists/aboveOneSec/_num_ops":{
+ "metric":"rpc.metrics.exists.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/exists_avg_time":{
+ "metric":"rpc.metrics.exists_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/exists_num_ops":{
+ "metric":"rpc.metrics.exists_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/flushRegion/aboveOneSec/_avg_time":{
+ "metric":"rpc.metrics.flushRegion.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/flushRegion/aboveOneSec/_num_ops":{
+ "metric":"rpc.metrics.flushRegion.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/flushRegion_avg_time":{
+ "metric":"rpc.metrics.flushRegion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/flushRegion_num_ops":{
+ "metric":"rpc.metrics.flushRegion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/get/aboveOneSec/_avg_time":{
+ "metric":"rpc.metrics.get.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/get/aboveOneSec/_num_ops":{
+ "metric":"rpc.metrics.get.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getAlterStatus_avg_time":{
+ "metric":"rpc.metrics.getAlterStatus_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getAlterStatus_num_ops":{
+ "metric":"rpc.metrics.getAlterStatus_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getBlockCacheColumnFamilySummaries/aboveOneSec/_avg_time":{
+ "metric":"rpc.metrics.getBlockCacheColumnFamilySummaries.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getBlockCacheColumnFamilySummaries/aboveOneSec/_num_ops":{
+ "metric":"rpc.metrics.getBlockCacheColumnFamilySummaries.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getBlockCacheColumnFamilySummaries_avg_time":{
+ "metric":"rpc.metrics.getBlockCacheColumnFamilySummaries_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getBlockCacheColumnFamilySummaries_num_ops":{
+ "metric":"rpc.metrics.getBlockCacheColumnFamilySummaries_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getCatalogTracker/aboveOneSec/_avg_time":{
+ "metric":"rpc.metrics.getCatalogTracker.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getCatalogTracker/aboveOneSec/_num_ops":{
+ "metric":"rpc.metrics.getCatalogTracker.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getCatalogTracker_avg_time":{
+ "metric":"rpc.metrics.getCatalogTracker_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getCatalogTracker_num_ops":{
+ "metric":"rpc.metrics.getCatalogTracker_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getClosestRowBefore/aboveOneSec/_avg_time":{
+ "metric":"rpc.metrics.getClosestRowBefore.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getClosestRowBefore/aboveOneSec/_num_ops":{
+ "metric":"rpc.metrics.getClosestRowBefore.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getClosestRowBefore_avg_time":{
+ "metric":"rpc.metrics.getClosestRowBefore_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getClosestRowBefore_num_ops":{
+ "metric":"rpc.metrics.getClosestRowBefore_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getClusterStatus_avg_time":{
+ "metric":"rpc.metrics.getClusterStatus_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getClusterStatus_num_ops":{
+ "metric":"rpc.metrics.getClusterStatus_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getConfiguration/aboveOneSec/_avg_time":{
+ "metric":"rpc.metrics.getConfiguration.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getConfiguration/aboveOneSec/_num_ops":{
+ "metric":"rpc.metrics.getConfiguration.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getConfiguration_avg_time":{
+ "metric":"rpc.metrics.getConfiguration_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getConfiguration_num_ops":{
+ "metric":"rpc.metrics.getConfiguration_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getFromOnlineRegions/aboveOneSec/_avg_time":{
+ "metric":"rpc.metrics.getFromOnlineRegions.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getFromOnlineRegions/aboveOneSec/_num_ops":{
+ "metric":"rpc.metrics.getFromOnlineRegions.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getFromOnlineRegions_avg_time":{
+ "metric":"rpc.metrics.getFromOnlineRegions_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getFromOnlineRegions_num_ops":{
+ "metric":"rpc.metrics.getFromOnlineRegions_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getHServerInfo/aboveOneSec/_avg_time":{
+ "metric":"rpc.metrics.getHServerInfo.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getHServerInfo/aboveOneSec/_num_ops":{
+ "metric":"rpc.metrics.getHServerInfo.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getHServerInfo_avg_time":{
+ "metric":"rpc.metrics.getHServerInfo_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getHServerInfo_num_ops":{
+ "metric":"rpc.metrics.getHServerInfo_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getHTableDescriptors_avg_time":{
+ "metric":"rpc.metrics.getHTableDescriptors_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getHTableDescriptors_num_ops":{
+ "metric":"rpc.metrics.getHTableDescriptors_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getOnlineRegions/aboveOneSec/_avg_time":{
+ "metric":"rpc.metrics.getOnlineRegions.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getOnlineRegions/aboveOneSec/_num_ops":{
+ "metric":"rpc.metrics.getOnlineRegions.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getOnlineRegions_avg_time":{
+ "metric":"rpc.metrics.getOnlineRegions_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getOnlineRegions_num_ops":{
+ "metric":"rpc.metrics.getOnlineRegions_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolSignature/aboveOneSec/_avg_time":{
+ "metric":"rpc.metrics.getProtocolSignature.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolSignature/aboveOneSec/_num_ops":{
+ "metric":"rpc.metrics.getProtocolSignature.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolSignature_avg_time":{
+ "metric":"rpc.metrics.getProtocolSignature_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolSignature_num_ops":{
+ "metric":"rpc.metrics.getProtocolSignature_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolVersion/aboveOneSec/_avg_time":{
+ "metric":"rpc.metrics.getProtocolVersion.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolVersion/aboveOneSec/_num_ops":{
+ "metric":"rpc.metrics.getProtocolVersion.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolVersion_avg_time":{
+ "metric":"rpc.metrics.getProtocolVersion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolVersion_num_ops":{
+ "metric":"rpc.metrics.getProtocolVersion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getRegionInfo/aboveOneSec/_avg_time":{
+ "metric":"rpc.metrics.getRegionInfo.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getRegionInfo/aboveOneSec/_num_ops":{
+ "metric":"rpc.metrics.getRegionInfo.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getRegionInfo_avg_time":{
+ "metric":"rpc.metrics.getRegionInfo_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getRegionInfo_num_ops":{
+ "metric":"rpc.metrics.getRegionInfo_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getServerName/aboveOneSec/_avg_time":{
+ "metric":"rpc.metrics.getServerName.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getServerName/aboveOneSec/_num_ops":{
+ "metric":"rpc.metrics.getServerName.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getServerName_avg_time":{
+ "metric":"rpc.metrics.getServerName_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getServerName_num_ops":{
+ "metric":"rpc.metrics.getServerName_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getZooKeeper/aboveOneSec/_avg_time":{
+ "metric":"rpc.metrics.getZooKeeper.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getZooKeeper/aboveOneSec/_num_ops":{
+ "metric":"rpc.metrics.getZooKeeper.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getZooKeeper_avg_time":{
+ "metric":"rpc.metrics.getZooKeeper_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getZooKeeper_num_ops":{
+ "metric":"rpc.metrics.getZooKeeper_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/get_avg_time":{
+ "metric":"rpc.metrics.get_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/get_num_ops":{
+ "metric":"rpc.metrics.get_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/increment/aboveOneSec/_avg_time":{
+ "metric":"rpc.metrics.increment.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/increment/aboveOneSec/_num_ops":{
+ "metric":"rpc.metrics.increment.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/incrementColumnValue/aboveOneSec/_avg_time":{
+ "metric":"rpc.metrics.incrementColumnValue.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/incrementColumnValue/aboveOneSec/_num_ops":{
+ "metric":"rpc.metrics.incrementColumnValue.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/incrementColumnValue_avg_time":{
+ "metric":"rpc.metrics.incrementColumnValue_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/incrementColumnValue_num_ops":{
+ "metric":"rpc.metrics.incrementColumnValue_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/increment_avg_time":{
+ "metric":"rpc.metrics.increment_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/increment_num_ops":{
+ "metric":"rpc.metrics.increment_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isAborted/aboveOneSec/_avg_time":{
+ "metric":"rpc.metrics.isAborted.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isAborted/aboveOneSec/_num_ops":{
+ "metric":"rpc.metrics.isAborted.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isAborted_avg_time":{
+ "metric":"rpc.metrics.isAborted_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isAborted_num_ops":{
+ "metric":"rpc.metrics.isAborted_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isMasterRunning_avg_time":{
+ "metric":"rpc.metrics.isMasterRunning_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isMasterRunning_num_ops":{
+ "metric":"rpc.metrics.isMasterRunning_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isStopped/aboveOneSec/_avg_time":{
+ "metric":"rpc.metrics.isStopped.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isStopped/aboveOneSec/_num_ops":{
+ "metric":"rpc.metrics.isStopped.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isStopped_avg_time":{
+ "metric":"rpc.metrics.isStopped_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isStopped_num_ops":{
+ "metric":"rpc.metrics.isStopped_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/lockRow/aboveOneSec/_avg_time":{
+ "metric":"rpc.metrics.lockRow.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/lockRow/aboveOneSec/_num_ops":{
+ "metric":"rpc.metrics.lockRow.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/lockRow_avg_time":{
+ "metric":"rpc.metrics.lockRow_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/lockRow_num_ops":{
+ "metric":"rpc.metrics.lockRow_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/modifyColumn_avg_time":{
+ "metric":"rpc.metrics.modifyColumn_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/modifyColumn_num_ops":{
+ "metric":"rpc.metrics.modifyColumn_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/modifyTable_avg_time":{
+ "metric":"rpc.metrics.modifyTable_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/modifyTable_num_ops":{
+ "metric":"rpc.metrics.modifyTable_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/move_avg_time":{
+ "metric":"rpc.metrics.move_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/move_num_ops":{
+ "metric":"rpc.metrics.move_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/multi/aboveOneSec/_avg_time":{
+ "metric":"rpc.metrics.multi.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/multi/aboveOneSec/_num_ops":{
+ "metric":"rpc.metrics.multi.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/multi_avg_time":{
+ "metric":"rpc.metrics.multi_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/multi_num_ops":{
+ "metric":"rpc.metrics.multi_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/next/aboveOneSec/_avg_time":{
+ "metric":"rpc.metrics.next.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/next/aboveOneSec/_num_ops":{
+ "metric":"rpc.metrics.next.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/next_avg_time":{
+ "metric":"rpc.metrics.next_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/next_num_ops":{
+ "metric":"rpc.metrics.next_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/offline_avg_time":{
+ "metric":"rpc.metrics.offline_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/offline_num_ops":{
+ "metric":"rpc.metrics.offline_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegion/aboveOneSec/_avg_time":{
+ "metric":"rpc.metrics.openRegion.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegion/aboveOneSec/_num_ops":{
+ "metric":"rpc.metrics.openRegion.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegion_avg_time":{
+ "metric":"rpc.metrics.openRegion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegion_num_ops":{
+ "metric":"rpc.metrics.openRegion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegions/aboveOneSec/_avg_time":{
+ "metric":"rpc.metrics.openRegions.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegions/aboveOneSec/_num_ops":{
+ "metric":"rpc.metrics.openRegions.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegions_avg_time":{
+ "metric":"rpc.metrics.openRegions_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegions_num_ops":{
+ "metric":"rpc.metrics.openRegions_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openScanner/aboveOneSec/_avg_time":{
+ "metric":"rpc.metrics.openScanner.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openScanner/aboveOneSec/_num_ops":{
+ "metric":"rpc.metrics.openScanner.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openScanner_avg_time":{
+ "metric":"rpc.metrics.openScanner_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openScanner_num_ops":{
+ "metric":"rpc.metrics.openScanner_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/put/aboveOneSec/_avg_time":{
+ "metric":"rpc.metrics.put.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/put/aboveOneSec/_num_ops":{
+ "metric":"rpc.metrics.put.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/put_avg_time":{
+ "metric":"rpc.metrics.put_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/put_num_ops":{
+ "metric":"rpc.metrics.put_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/regionServerReport_avg_time":{
+ "metric":"rpc.metrics.regionServerReport_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/regionServerReport_num_ops":{
+ "metric":"rpc.metrics.regionServerReport_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/regionServerStartup_avg_time":{
+ "metric":"rpc.metrics.regionServerStartup_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/regionServerStartup_num_ops":{
+ "metric":"rpc.metrics.regionServerStartup_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/removeFromOnlineRegions/aboveOneSec/_avg_time":{
+ "metric":"rpc.metrics.removeFromOnlineRegions.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/removeFromOnlineRegions/aboveOneSec/_num_ops":{
+ "metric":"rpc.metrics.removeFromOnlineRegions.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/removeFromOnlineRegions_avg_time":{
+ "metric":"rpc.metrics.removeFromOnlineRegions_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/removeFromOnlineRegions_num_ops":{
+ "metric":"rpc.metrics.removeFromOnlineRegions_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/replicateLogEntries/aboveOneSec/_avg_time":{
+ "metric":"rpc.metrics.replicateLogEntries.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/replicateLogEntries/aboveOneSec/_num_ops":{
+ "metric":"rpc.metrics.replicateLogEntries.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/replicateLogEntries_avg_time":{
+ "metric":"rpc.metrics.replicateLogEntries_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/replicateLogEntries_num_ops":{
+ "metric":"rpc.metrics.replicateLogEntries_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/reportRSFatalError_avg_time":{
+ "metric":"rpc.metrics.reportRSFatalError_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/reportRSFatalError_num_ops":{
+ "metric":"rpc.metrics.reportRSFatalError_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rollHLogWriter/aboveOneSec/_avg_time":{
+ "metric":"rpc.metrics.rollHLogWriter.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rollHLogWriter/aboveOneSec/_num_ops":{
+ "metric":"rpc.metrics.rollHLogWriter.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rollHLogWriter_avg_time":{
+ "metric":"rpc.metrics.rollHLogWriter_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rollHLogWriter_num_ops":{
+ "metric":"rpc.metrics.rollHLogWriter_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rpcAuthenticationFailures":{
+ "metric":"rpc.metrics.rpcAuthenticationFailures",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rpcAuthenticationSuccesses":{
+ "metric":"rpc.metrics.rpcAuthenticationSuccesses",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rpcAuthorizationFailures":{
+ "metric":"rpc.rpc.rpcAuthorizationFailures",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rpcAuthorizationSuccesses":{
+ "metric":"rpc.metrics.rpcAuthorizationSuccesses",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/shutdown_avg_time":{
+ "metric":"rpc.metrics.shutdown_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/shutdown_num_ops":{
+ "metric":"rpc.metrics.shutdown_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/splitRegion/aboveOneSec/_avg_time":{
+ "metric":"rpc.metrics.splitRegion.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/splitRegion/aboveOneSec/_num_ops":{
+ "metric":"rpc.metrics.splitRegion.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/splitRegion_avg_time":{
+ "metric":"rpc.metrics.splitRegion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/splitRegion_num_ops":{
+ "metric":"rpc.metrics.splitRegion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/stop/aboveOneSec/_avg_time":{
+ "metric":"rpc.metrics.stop.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/stop/aboveOneSec/_num_ops":{
+ "metric":"rpc.metrics.stop.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/stopMaster_avg_time":{
+ "metric":"rpc.metrics.stopMaster_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/stopMaster_num_ops":{
+ "metric":"rpc.metrics.stopMaster_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/stop_avg_time":{
+ "metric":"rpc.metrics.stop_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/stop_num_ops":{
+ "metric":"rpc.metrics.stop_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/unassign_avg_time":{
+ "metric":"rpc.metrics.unassign_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/unassign_num_ops":{
+ "metric":"rpc.metrics.unassign_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/unlockRow/aboveOneSec/_avg_time":{
+ "metric":"rpc.metrics.unlockRow.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/unlockRow/aboveOneSec/_num_ops":{
+ "metric":"rpc.metrics.unlockRow.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/unlockRow_avg_time":{
+ "metric":"rpc.metrics.unlockRow_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/unlockRow_num_ops":{
+ "metric":"rpc.metrics.unlockRow_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/canCommit_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.canCommit_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/canCommit_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.canCommit_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/commitPending_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.commitPending_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/commitPending_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.commitPending_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/done_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.done_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/done_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.done_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getBlockLocalPathInfo_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.getBlockLocalPathInfo_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getBlockLocalPathInfo_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.getBlockLocalPathInfo_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getProtocolVersion_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.getProtocolVersion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getProtocolVersion_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.getProtocolVersion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getTask_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.getTask_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getTask_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.getTask_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/ping_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.ping_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/ping_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.ping_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/statusUpdate_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.statusUpdate_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/statusUpdate_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.statusUpdate_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/ugi/loginFailure_avg_time":{
+ "metric":"ugi.ugi.loginFailure_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/ugi/loginFailure_num_ops":{
+ "metric":"ugi.ugi.loginFailure_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/ugi/loginSuccess_avg_time":{
+ "metric":"ugi.ugi.loginSuccess_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/ugi/loginSuccess_num_ops":{
+ "metric":"ugi.ugi.loginSuccess_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ }
+ }
+ },
+
+ "Component":{
+ "NAMENODE":{
+ "metrics/dfs/FSNamesystem/TotalLoad": {
+ "metric": "dfs.FSNamesystem.TotalLoad",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/FSNamesystem/BlockCapacity": {
+ "metric": "dfs.FSNamesystem.BlockCapacity",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/namenode/GetListingOps": {
+ "metric": "dfs.namenode.GetListingOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/namenode/FilesAppended": {
+ "metric": "dfs.namenode.FilesAppended",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/getProtocolVersion_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_num_ops",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/fsync_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.FsyncAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/ugi/loginSuccess_avg_time": {
+ "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/load/load_one": {
+ "metric": "load_one",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/renewLease_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.RenewLeaseNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/getFileInfo_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.GetFileInfoAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/memNonHeapUsedM": {
+ "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/complete_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.CompleteAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/setPermission_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.SetPermissionNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/FSNamesystem/CapacityTotalGB": {
+ "metric": "dfs.FSNamesystem.CapacityTotalGB",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/FSNamesystem/CapacityTotal": {
+ "metric": "dfs.FSNamesystem.CapacityTotal",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/setOwner_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.SetOwnerNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/getBlockLocations_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.GetBlockLocationsNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/process/proc_run": {
+ "metric": "proc_run",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/FSNamesystem/CapacityUsedGB": {
+ "metric": "dfs.FSNamesystem.CapacityUsedGB",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/FSNamesystem/CapacityUsed": {
+ "metric": "dfs.FSNamesystem.CapacityUsed",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/namenode/AddBlockOps": {
+ "metric": "dfs.namenode.AddBlockOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/memory/swap_total": {
+ "metric": "swap_total",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/namenode/FilesDeleted": {
+ "metric": "dfs.namenode.FilesDeleted",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/namenode/Syncs_avg_time": {
+ "metric": "dfs.namenode.SyncsAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/threadsBlocked": {
+ "metric": "jvm.JvmMetrics.ThreadsBlocked",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/RpcQueueTime_num_ops": {
+ "metric": "rpc.rpc.RpcQueueTimeNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/process/proc_total": {
+ "metric": "proc_total",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/namenode/blockReport_avg_time": {
+ "metric": "dfs.namenode.BlockReportAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/disk/part_max_used": {
+ "metric": "part_max_used",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/getFileInfo_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.GetFileInfoNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/getEditLogSize_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.GetEditLogManifestAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/ugi/loginSuccess_num_ops": {
+ "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/blockReceived_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.BlockReceivedAndDeletedAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_idle": {
+ "metric": "cpu_idle",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/versionRequest_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.VersionRequestAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_aidle": {
+ "metric": "cpu_aidle",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/memory/mem_free": {
+ "metric": "mem_free",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/versionRequest_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.VersionRequestNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/addBlock_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.AddBlockNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/namenode/FilesCreated": {
+ "metric": "dfs.namenode.FilesCreated",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/rename_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.RenameAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/network/bytes_in": {
+ "metric": "bytes_in",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/setSafeMode_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.SetSafeModeNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/network/pkts_out": {
+ "metric": "pkts_out",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/memNonHeapCommittedM": {
+ "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/memory/mem_cached": {
+ "metric": "mem_cached",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/disk/disk_total": {
+ "metric": "disk_total",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/setPermission_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.SetPermissionAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/namenode/FilesRenamed": {
+ "metric": "dfs.namenode.FilesRenamed",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/register_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.RegisterDatanodeAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/setReplication_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.setReplication_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/namenode/JournalTransactionsBatchedInSync": {
+ "metric": "dfs.namenode.JournalTransactionsBatchedInSync",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/ugi/loginFailure_num_ops": {
+ "metric": "ugi.UgiMetrics.LoginFailureNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/namenode/GetBlockLocations": {
+ "metric": "dfs.namenode.GetBlockLocations",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/fsync_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.FsyncNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_wio": {
+ "metric": "cpu_wio",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/create_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.CreateAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/FSNamesystem/PendingReplicationBlocks": {
+ "metric": "dfs.FSNamesystem.PendingReplicationBlocks",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_speed": {
+ "metric": "cpu_speed",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/delete_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.DeleteAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/namenode/FileInfoOps": {
+ "metric": "dfs.namenode.FileInfoOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/sendHeartbeat_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.SendHeartbeatNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/namenode/DeleteFileOps": {
+ "metric": "dfs.namenode.DeleteFileOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/RpcProcessingTime_avg_time": {
+ "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/blockReport_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.BlockReportNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/setSafeMode_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.SetSafeModeAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/rpcAuthenticationSuccesses": {
+ "metric": "rpc.rpc.RpcAuthenticationSuccesses",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/FSNamesystem/PendingDeletionBlocks": {
+ "metric": "dfs.FSNamesystem.PendingDeletionBlocks",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/rpcAuthenticationFailures": {
+ "metric": "rpc.rpc.RpcAuthenticationFailures",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/network/pkts_in": {
+ "metric": "pkts_in",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/memory/mem_total": {
+ "metric": "mem_total",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/getEditLogSize_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.GetEditLogManifestNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/memHeapCommittedM": {
+ "metric": "jvm.JvmMetrics.MemHeapCommittedM",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/namenode/FilesInGetListingOps": {
+ "metric": "dfs.namenode.FilesInGetListingOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/threadsRunnable": {
+ "metric": "jvm.JvmMetrics.ThreadsRunnable",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/complete_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.CompleteNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/threadsNew": {
+ "metric": "jvm.JvmMetrics.ThreadsNew",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/rollFsImage_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.rollFsImage_num_ops",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/rpcAuthorizationFailures": {
+ "metric": "rpc.rpc.RpcAuthorizationFailures",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/namenode/Syncs_num_ops": {
+ "metric": "dfs.namenode.SyncsNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/RpcQueueTime_avg_time": {
+ "metric": "rpc.rpc.RpcQueueTimeAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/blockReceived_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.BlockReceivedAndDeletedNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/setReplication_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.setReplication_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/rollEditLog_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.RollEditLogAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/SentBytes": {
+ "metric": "rpc.rpc.SentBytes",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/FSNamesystem/FilesTotal": {
+ "metric": "dfs.FSNamesystem.FilesTotal",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/logWarn": {
+ "metric": "jvm.JvmMetrics.LogWarn",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/FSNamesystem/ExcessBlocks": {
+ "metric": "dfs.FSNamesystem.ExcessBlocks",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/threadsTimedWaiting": {
+ "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/gcCount": {
+ "metric": "jvm.JvmMetrics.GcCount",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/ReceivedBytes": {
+ "metric": "rpc.rpc.ReceivedBytes",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_nice": {
+ "metric": "cpu_nice",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/namenode/blockReport_num_ops": {
+ "metric": "dfs.namenode.BlockReportNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/namenode/SafemodeTime": {
+ "metric": "dfs.namenode.SafemodeTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/rollFsImage_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.rollFsImage_avg_time",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/mkdirs_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.MkdirsAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/NumOpenConnections": {
+ "metric": "rpc.rpc.NumOpenConnections",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/memHeapUsedM": {
+ "metric": "jvm.JvmMetrics.MemHeapUsedM",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/FSNamesystem/ScheduledReplicationBlocks": {
+ "metric": "dfs.FSNamesystem.ScheduledReplicationBlocks",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/threadsWaiting": {
+ "metric": "jvm.JvmMetrics.ThreadsWaiting",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/disk/disk_free": {
+ "metric": "disk_free",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/FSNamesystem/BlocksTotal": {
+ "metric": "dfs.FSNamesystem.BlocksTotal",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/memory/mem_buffers": {
+ "metric": "mem_buffers",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/gcTimeMillis": {
+ "metric": "jvm.JvmMetrics.GcTimeMillis",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/getBlockLocations_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.GetBlockLocationsAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/namenode/Transactions_num_ops": {
+ "metric": "dfs.namenode.TransactionsNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/create_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.CreateNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/threadsTerminated": {
+ "metric": "jvm.JvmMetrics.ThreadsTerminated",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/network/bytes_out": {
+ "metric": "bytes_out",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_user": {
+ "metric": "cpu_user",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/memory/swap_free": {
+ "metric": "swap_free",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/load/load_five": {
+ "metric": "load_five",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_system": {
+ "metric": "cpu_system",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/FSNamesystem/CapacityRemainingGB": {
+ "metric": "dfs.FSNamesystem.CapacityRemainingGB",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/FSNamesystem/CapacityRemaining": {
+ "metric": "dfs.FSNamesystem.CapacityRemaining",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/namenode/Transactions_avg_time": {
+ "metric": "dfs.namenode.TransactionsAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/boottime": {
+ "metric": "boottime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/FSNamesystem/MissingBlocks": {
+ "metric": "dfs.FSNamesystem.MissingBlocks",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/callQueueLen": {
+ "metric": "rpc.rpc.CallQueueLength",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/delete_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.DeleteNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/FSNamesystem/CorruptBlocks": {
+ "metric": "dfs.FSNamesystem.CorruptBlocks",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/rename_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.RenameNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/blockReport_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.BlockReportAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/mkdirs_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.MkdirsNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/load/load_fifteen": {
+ "metric": "load_fifteen",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/logInfo": {
+ "metric": "jvm.JvmMetrics.LogInfo",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/namenode/fsImageLoadTime": {
+ "metric": "dfs.namenode.FsImageLoadTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/getListing_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.GetListingNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/blocksBeingWrittenReport_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.blocksBeingWrittenReport_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/rollEditLog_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.RollEditLogNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/addBlock_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.AddBlockAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/blocksBeingWrittenReport_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.blocksBeingWrittenReport_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/setOwner_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.SetOwnerAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/RpcProcessingTime_num_ops": {
+ "metric": "rpc.rpc.RpcProcessingTimeNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/memory/mem_shared": {
+ "metric": "mem_shared",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/FSNamesystem/UnderReplicatedBlocks": {
+ "metric": "dfs.FSNamesystem.UnderReplicatedBlocks",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/sendHeartbeat_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.SendHeartbeatAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/namenode/CreateFileOps": {
+ "metric": "dfs.namenode.CreateFileOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/logError": {
+ "metric": "jvm.JvmMetrics.LogError",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/ugi/loginFailure_avg_time": {
+ "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_num": {
+ "metric": "cpu_num",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/getProtocolVersion_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_avg_time",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/register_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.RegisterDatanodeNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/rpcAuthorizationSuccesses": {
+ "metric": "rpc.rpc.RpcAuthorizationSuccesses",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/getListing_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.GetListingAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/logFatal": {
+ "metric": "jvm.JvmMetrics.LogFatal",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/renewLease_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.RenewLeaseAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ }
+ },
+ "DATANODE":{
+ "metrics/dfs/datanode/heartBeats_avg_time": {
+ "metric": "dfs.datanode.HeartbeatsAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/closeRegion_num_ops": {
+ "metric": "rpc.rpc.closeRegion_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/unassign_num_ops": {
+ "metric": "rpc.rpc.unassign_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/modifyTable_num_ops": {
+ "metric": "rpc.rpc.modifyTable_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/getProtocolVersion_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/splitRegion/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.splitRegion.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/ugi/loginSuccess_avg_time": {
+ "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getZooKeeper/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.getZooKeeper.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getProtocolVersion_avg_time": {
+ "metric": "rpc.rpc.getProtocolVersion_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getHServerInfo/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.getHServerInfo.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/load/load_one": {
+ "metric": "load_one",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/datanode/writes_from_remote_client": {
+ "metric": "dfs.datanode.WritesFromRemoteClient",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getClosestRowBefore_num_ops": {
+ "metric": "rpc.rpc.getClosestRowBefore_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getClosestRowBefore/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.getClosestRowBefore.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/datanode/blocks_replicated": {
+ "metric": "dfs.datanode.BlocksReplicated",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/memNonHeapUsedM": {
+ "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/getTask_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.getTask_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/replicateLogEntries_num_ops": {
+ "metric": "rpc.rpc.replicateLogEntries_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/flushRegion_avg_time": {
+ "metric": "rpc.rpc.flushRegion_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/lockRow_num_ops": {
+ "metric": "rpc.rpc.lockRow_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/memory/swap_total": {
+ "metric": "swap_total",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/multi_avg_time": {
+ "metric": "rpc.rpc.multi_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/openRegions/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.openRegions.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/stopMaster_num_ops": {
+ "metric": "rpc.rpc.stopMaster_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/datanode/blocks_get_local_pathinfo": {
+ "metric": "dfs.datanode.BlocksGetLocalPathInfo",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/balance_avg_time": {
+ "metric": "rpc.rpc.balance_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/process/proc_total": {
+ "metric": "proc_total",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/splitRegion_num_ops": {
+ "metric": "rpc.rpc.splitRegion_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/disk/part_max_used": {
+ "metric": "part_max_used",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getCatalogTracker_avg_time": {
+ "metric": "rpc.rpc.getCatalogTracker_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/datanode/readBlockOp_num_ops": {
+ "metric": "dfs.datanode.ReadBlockOpNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/datanode/writeBlockOp_num_ops": {
+ "metric": "dfs.datanode.WriteBlockOpNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/modifyColumn_avg_time": {
+ "metric": "rpc.rpc.modifyColumn_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/multi/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.multi.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/balance_num_ops": {
+ "metric": "rpc.rpc.balance_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getZooKeeper_num_ops": {
+ "metric": "rpc.rpc.getZooKeeper_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/shutdown_num_ops": {
+ "metric": "rpc.rpc.shutdown_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/flushRegion_num_ops": {
+ "metric": "rpc.rpc.flushRegion_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/ugi/loginSuccess_num_ops": {
+ "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/isAborted/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.isAborted.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_idle": {
+ "metric": "cpu_idle",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/rollHLogWriter/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.rollHLogWriter.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/removeFromOnlineRegions/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.removeFromOnlineRegions.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getProtocolSignature/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.getProtocolSignature.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/get_num_ops": {
+ "metric": "rpc.rpc.get_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/datanode/blockChecksumOp_num_ops": {
+ "metric": "dfs.datanode.BlockChecksumOpNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getServerName_avg_time": {
+ "metric": "rpc.rpc.getServerName_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/stopMaster_avg_time": {
+ "metric": "rpc.rpc.stopMaster_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getOnlineRegions/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.getOnlineRegions.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/network/bytes_in": {
+ "metric": "bytes_in",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/memNonHeapCommittedM": {
+ "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/removeFromOnlineRegions_num_ops": {
+ "metric": "rpc.rpc.removeFromOnlineRegions_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/abort_avg_time": {
+ "metric": "rpc.rpc.abort_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/openScanner/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.openScanner.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/ping_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.ping_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getRegionInfo_avg_time": {
+ "metric": "rpc.rpc.getRegionInfo_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getCatalogTracker/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.getCatalogTracker.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/getBlockLocalPathInfo_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.getBlockLocalPathInfo_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/enableTable_num_ops": {
+ "metric": "rpc.rpc.enableTable_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/lockRow/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.lockRow.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/lockRow_avg_time": {
+ "metric": "rpc.rpc.lockRow_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/commitPending_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.commitPending_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/checkOOME_num_ops": {
+ "metric": "rpc.rpc.checkOOME_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/addColumn_avg_time": {
+ "metric": "rpc.rpc.addColumn_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/addToOnlineRegions/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.addToOnlineRegions.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/reportRSFatalError_num_ops": {
+ "metric": "rpc.rpc.reportRSFatalError_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/ugi/loginFailure_num_ops": {
+ "metric": "ugi.UgiMetrics.LoginFailureNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_wio": {
+ "metric": "cpu_wio",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getConfiguration/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.getConfiguration.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getZooKeeper/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.getZooKeeper.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/datanode/reads_from_local_client": {
+ "metric": "dfs.datanode.ReadsFromLocalClient",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getServerName_num_ops": {
+ "metric": "rpc.rpc.getServerName_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getServerName/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.getServerName.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/datanode/blockReports_num_ops": {
+ "metric": "dfs.datanode.BlockReportsNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/reportRSFatalError_avg_time": {
+ "metric": "rpc.rpc.reportRSFatalError_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/rpcAuthenticationSuccesses": {
+ "metric": "rpc.rpc.RpcAuthenticationSuccesses",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/network/pkts_in": {
+ "metric": "pkts_in",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/isStopped/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.isStopped.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/disableTable_avg_time": {
+ "metric": "rpc.rpc.disableTable_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/memory/mem_total": {
+ "metric": "mem_total",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/rollHLogWriter/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.rollHLogWriter.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/memHeapCommittedM": {
+ "metric": "jvm.JvmMetrics.MemHeapCommittedM",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/abort/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.abort.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/threadsRunnable": {
+ "metric": "jvm.JvmMetrics.ThreadsRunnable",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/openRegion_avg_time": {
+ "metric": "rpc.rpc.openRegion_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/threadsNew": {
+ "metric": "jvm.JvmMetrics.ThreadsNew",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/unlockRow/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.unlockRow.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getClusterStatus_num_ops": {
+ "metric": "rpc.rpc.getClusterStatus_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getHTableDescriptors_avg_time": {
+ "metric": "rpc.rpc.getHTableDescriptors_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/rpcAuthorizationFailures": {
+ "metric": "rpc.rpc.RpcAuthorizationFailures",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/deleteColumn_num_ops": {
+ "metric": "rpc.rpc.deleteColumn_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/regionServerReport_avg_time": {
+ "metric": "rpc.rpc.regionServerReport_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/delete/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.delete.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/RpcQueueTime_avg_time": {
+ "metric": "rpc.rpc.RpcQueueTimeAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/increment_num_ops": {
+ "metric": "rpc.rpc.increment_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/getMapCompletionEvents_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.getMapCompletionEvents_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/stop/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.stop.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getAlterStatus_num_ops": {
+ "metric": "rpc.rpc.getAlterStatus_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/modifyColumn_num_ops": {
+ "metric": "rpc.rpc.modifyColumn_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/flushRegion/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.flushRegion.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/next_avg_time": {
+ "metric": "rpc.rpc.next_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/incrementColumnValue/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.incrementColumnValue.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/next/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.next.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/checkOOME_avg_time": {
+ "metric": "rpc.rpc.checkOOME_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/RpcSlowResponse_avg_time": {
+ "metric": "rpc.rpc.RpcSlowResponse_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getConfiguration_avg_time": {
+ "metric": "rpc.rpc.getConfiguration_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getServerName/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.getServerName.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/ReceivedBytes": {
+ "metric": "rpc.rpc.ReceivedBytes",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_nice": {
+ "metric": "cpu_nice",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/bulkLoadHFiles_num_ops": {
+ "metric": "rpc.rpc.bulkLoadHFiles_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/ping_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.ping_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/unassign_avg_time": {
+ "metric": "rpc.rpc.unassign_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/NumOpenConnections": {
+ "metric": "rpc.rpc.NumOpenConnections",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/gcTimeMillis": {
+ "metric": "jvm.JvmMetrics.GcTimeMillis",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/delete/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.delete.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/canCommit_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.canCommit_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/multi/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.multi.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/threadsTerminated": {
+ "metric": "jvm.JvmMetrics.ThreadsTerminated",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getProtocolVersion/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.getProtocolVersion.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/network/bytes_out": {
+ "metric": "bytes_out",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/checkAndDelete/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.checkAndDelete.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/balanceSwitch_avg_time": {
+ "metric": "rpc.rpc.balanceSwitch_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/load/load_five": {
+ "metric": "load_five",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/datanode/blocks_read": {
+ "metric": "dfs.datanode.BlocksRead",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/boottime": {
+ "metric": "boottime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/openRegion/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.openRegion.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/lockRow/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.lockRow.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/compactRegion_avg_time": {
+ "metric": "rpc.rpc.compactRegion_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/openRegion_num_ops": {
+ "metric": "rpc.rpc.openRegion_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/callQueueLen": {
+ "metric": "rpc.rpc.CallQueueLength",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/datanode/blocks_removed": {
+ "metric": "dfs.datanode.BlocksRemoved",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/compactRegion/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.compactRegion.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getProtocolSignature/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.getProtocolSignature.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/execCoprocessor_num_ops": {
+ "metric": "rpc.rpc.execCoprocessor_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/canCommit_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.canCommit_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getHServerInfo/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.getHServerInfo.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/datanode/readBlockOp_avg_time": {
+ "metric": "dfs.datanode.ReadBlockOpAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getOnlineRegions_avg_time": {
+ "metric": "rpc.rpc.getOnlineRegions_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getCatalogTracker/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.getCatalogTracker.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/incrementColumnValue_avg_time": {
+ "metric": "rpc.rpc.incrementColumnValue_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/move_num_ops": {
+ "metric": "rpc.rpc.move_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/RpcProcessingTime_num_ops": {
+ "metric": "rpc.rpc.RpcProcessingTimeNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/stop_num_ops": {
+ "metric": "rpc.rpc.stop_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/replicateLogEntries_avg_time": {
+ "metric": "rpc.rpc.replicateLogEntries_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/deleteTable_num_ops": {
+ "metric": "rpc.rpc.deleteTable_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/datanode/blockChecksumOp_avg_time": {
+ "metric": "dfs.datanode.BlockChecksumOpAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/datanode/copyBlockOp_avg_time": {
+ "metric": "dfs.datanode.CopyBlockOpAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/get_avg_time": {
+ "metric": "rpc.rpc.get_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/logError": {
+ "metric": "jvm.JvmMetrics.LogError",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/multi_num_ops": {
+ "metric": "rpc.rpc.multi_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/next/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.next.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/datanode/writeBlockOp_avg_time": {
+ "metric": "dfs.datanode.WriteBlockOpAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getBlockCacheColumnFamilySummaries_num_ops": {
+ "metric": "rpc.rpc.getBlockCacheColumnFamilySummaries_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getFromOnlineRegions/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.getFromOnlineRegions.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/addToOnlineRegions_avg_time": {
+ "metric": "rpc.rpc.addToOnlineRegions_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getRegionInfo/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.getRegionInfo.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/deleteColumn_avg_time": {
+ "metric": "rpc.rpc.deleteColumn_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/bulkLoadHFiles_avg_time": {
+ "metric": "rpc.rpc.bulkLoadHFiles_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/isAborted/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.isAborted.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/exists_avg_time": {
+ "metric": "rpc.rpc.exists_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/stop/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.stop.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/addToOnlineRegions_num_ops": {
+ "metric": "rpc.rpc.addToOnlineRegions_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/abort/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.abort.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/removeFromOnlineRegions/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.removeFromOnlineRegions.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getOnlineRegions/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.getOnlineRegions.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/exists/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.exists.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/offline_avg_time": {
+ "metric": "rpc.rpc.offline_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/replicateLogEntries/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.replicateLogEntries.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/unlockRow_avg_time": {
+ "metric": "rpc.rpc.unlockRow_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/delete_num_ops": {
+ "metric": "rpc.rpc.delete_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getCatalogTracker_num_ops": {
+ "metric": "rpc.rpc.getCatalogTracker_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/regionServerStartup_avg_time": {
+ "metric": "rpc.rpc.regionServerStartup_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/checkOOME/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.checkOOME.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/exists_num_ops": {
+ "metric": "rpc.rpc.exists_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/checkAndDelete_num_ops": {
+ "metric": "rpc.rpc.checkAndDelete_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/checkAndPut/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.checkAndPut.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/closeRegion_avg_time": {
+ "metric": "rpc.rpc.closeRegion_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/getBlockLocalPathInfo_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.getBlockLocalPathInfo_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/close/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.close.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getProtocolSignature_avg_time": {
+ "metric": "rpc.rpc.getProtocolSignature_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/assign_avg_time": {
+ "metric": "rpc.rpc.assign_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/createTable_num_ops": {
+ "metric": "rpc.rpc.createTable_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/execCoprocessor_avg_time": {
+ "metric": "rpc.rpc.execCoprocessor_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/process/proc_run": {
+ "metric": "proc_run",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/close_avg_time": {
+ "metric": "rpc.rpc.close_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getConfiguration_num_ops": {
+ "metric": "rpc.rpc.getConfiguration_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/threadsBlocked": {
+ "metric": "jvm.JvmMetrics.ThreadsBlocked",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getHServerInfo_num_ops": {
+ "metric": "rpc.rpc.getHServerInfo_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/isStopped_avg_time": {
+ "metric": "rpc.rpc.isStopped_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/RpcQueueTime_num_ops": {
+ "metric": "rpc.rpc.RpcQueueTimeNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/stop_avg_time": {
+ "metric": "rpc.rpc.stop_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/rollHLogWriter_avg_time": {
+ "metric": "rpc.rpc.rollHLogWriter_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/datanode/writes_from_local_client": {
+ "metric": "dfs.datanode.WritesFromLocalClient",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/isStopped_num_ops": {
+ "metric": "rpc.rpc.isStopped_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/addToOnlineRegions/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.addToOnlineRegions.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/getMapCompletionEvents_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.getMapCompletionEvents_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/datanode/heartBeats_num_ops": {
+ "metric": "dfs.datanode.HeartbeatsNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/datanode/replaceBlockOp_num_ops": {
+ "metric": "dfs.datanode.ReplaceBlockOpNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/isMasterRunning_avg_time": {
+ "metric": "rpc.rpc.isMasterRunning_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/incrementColumnValue_num_ops": {
+ "metric": "rpc.rpc.incrementColumnValue_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/datanode/copyBlockOp_num_ops": {
+ "metric": "dfs.datanode.CopyBlockOpNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/bulkLoadHFiles/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.bulkLoadHFiles.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_aidle": {
+ "metric": "cpu_aidle",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/memory/mem_free": {
+ "metric": "mem_free",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/execCoprocessor/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.execCoprocessor.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/close/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.close.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/assign_num_ops": {
+ "metric": "rpc.rpc.assign_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/enableTable_avg_time": {
+ "metric": "rpc.rpc.enableTable_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/incrementColumnValue/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.incrementColumnValue.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/checkAndDelete/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.checkAndDelete.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/network/pkts_out": {
+ "metric": "pkts_out",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getFromOnlineRegions_num_ops": {
+ "metric": "rpc.rpc.getFromOnlineRegions_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/close_num_ops": {
+ "metric": "rpc.rpc.close_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/memory/mem_cached": {
+ "metric": "mem_cached",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getConfiguration/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.getConfiguration.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/disk/disk_total": {
+ "metric": "disk_total",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/get/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.get.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/done_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.done_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/openScanner_avg_time": {
+ "metric": "rpc.rpc.openScanner_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/RpcSlowResponse_num_ops": {
+ "metric": "rpc.rpc.RpcSlowResponse_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getFromOnlineRegions_avg_time": {
+ "metric": "rpc.rpc.getFromOnlineRegions_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/datanode/reads_from_remote_client": {
+ "metric": "dfs.datanode.ReadsFromRemoteClient",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/splitRegion/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.splitRegion.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getClosestRowBefore/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.getClosestRowBefore.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/flushRegion/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.flushRegion.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/isAborted_avg_time": {
+ "metric": "rpc.rpc.isAborted_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/increment/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.increment.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getBlockCacheColumnFamilySummaries/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.getBlockCacheColumnFamilySummaries.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/deleteTable_avg_time": {
+ "metric": "rpc.rpc.deleteTable_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/commitPending_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.commitPending_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/datanode/bytes_read": {
+ "metric": "dfs.datanode.BytesRead",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/put/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.put.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/datanode/blocks_verified": {
+ "metric": "dfs.datanode.BlocksVerified",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/delete_avg_time": {
+ "metric": "rpc.rpc.delete_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getClosestRowBefore_avg_time": {
+ "metric": "rpc.rpc.getClosestRowBefore_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_speed": {
+ "metric": "cpu_speed",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/get/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.get.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/statusUpdate_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.statusUpdate_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/compactRegion/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.compactRegion.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/openRegions/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.openRegions.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/RpcProcessingTime_avg_time": {
+ "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/put_num_ops": {
+ "metric": "rpc.rpc.put_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/move_avg_time": {
+ "metric": "rpc.rpc.move_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/rpcAuthenticationFailures": {
+ "metric": "rpc.rpc.RpcAuthenticationFailures",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/openScanner/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.openScanner.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getClusterStatus_avg_time": {
+ "metric": "rpc.rpc.getClusterStatus_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/datanode/bytes_written": {
+ "metric": "dfs.datanode.BytesWritten",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/unlockRow/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.unlockRow.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/removeFromOnlineRegions_avg_time": {
+ "metric": "rpc.rpc.removeFromOnlineRegions_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/modifyTable_avg_time": {
+ "metric": "rpc.rpc.modifyTable_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/put/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.put.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/checkAndPut_avg_time": {
+ "metric": "rpc.rpc.checkAndPut_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/datanode/blockReports_avg_time": {
+ "metric": "dfs.datanode.BlockReportsAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/datanode/replaceBlockOp_avg_time": {
+ "metric": "dfs.datanode.ReplaceBlockOpAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/isStopped/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.isStopped.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/put_avg_time": {
+ "metric": "rpc.rpc.put_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/increment/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.increment.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/openRegion/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.openRegion.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/SentBytes": {
+ "metric": "rpc.rpc.SentBytes",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/createTable_avg_time": {
+ "metric": "rpc.rpc.createTable_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/getTask_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.getTask_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getRegionInfo/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.getRegionInfo.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/addColumn_num_ops": {
+ "metric": "rpc.rpc.addColumn_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getHTableDescriptors_num_ops": {
+ "metric": "rpc.rpc.getHTableDescriptors_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getAlterStatus_avg_time": {
+ "metric": "rpc.rpc.getAlterStatus_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getRegionInfo_num_ops": {
+ "metric": "rpc.rpc.getRegionInfo_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/maxMemoryM": {
+ "metric": "jvm.JvmMetrics.maxMemoryM",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/logWarn": {
+ "metric": "jvm.JvmMetrics.LogWarn",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/statusUpdate_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.statusUpdate_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/compactRegion_num_ops": {
+ "metric": "rpc.rpc.compactRegion_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getFromOnlineRegions/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.getFromOnlineRegions.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/threadsTimedWaiting": {
+ "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/isAborted_num_ops": {
+ "metric": "rpc.rpc.isAborted_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/gcCount": {
+ "metric": "jvm.JvmMetrics.GcCount",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getOnlineRegions_num_ops": {
+ "metric": "rpc.rpc.getOnlineRegions_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/done_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.done_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/checkOOME/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.checkOOME.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/memHeapUsedM": {
+ "metric": "jvm.JvmMetrics.MemHeapUsedM",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getProtocolVersion_num_ops": {
+ "metric": "rpc.rpc.getProtocolVersion_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/disk/disk_free": {
+ "metric": "disk_free",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/threadsWaiting": {
+ "metric": "jvm.JvmMetrics.ThreadsWaiting",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/unlockRow_num_ops": {
+ "metric": "rpc.rpc.unlockRow_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/replicateLogEntries/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.replicateLogEntries.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/disableTable_num_ops": {
+ "metric": "rpc.rpc.disableTable_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/memory/mem_buffers": {
+ "metric": "mem_buffers",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/shutdown_avg_time": {
+ "metric": "rpc.rpc.shutdown_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/openScanner_num_ops": {
+ "metric": "rpc.rpc.openScanner_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/regionServerStartup_num_ops": {
+ "metric": "rpc.rpc.regionServerStartup_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/bulkLoadHFiles/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.bulkLoadHFiles.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_user": {
+ "metric": "cpu_user",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/memory/swap_free": {
+ "metric": "swap_free",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_system": {
+ "metric": "cpu_system",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/exists/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.exists.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/openRegions_avg_time": {
+ "metric": "rpc.rpc.openRegions_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/regionServerReport_num_ops": {
+ "metric": "rpc.rpc.regionServerReport_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/next_num_ops": {
+ "metric": "rpc.rpc.next_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getBlockCacheColumnFamilySummaries_avg_time": {
+ "metric": "rpc.rpc.getBlockCacheColumnFamilySummaries_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/datanode/block_verification_failures": {
+ "metric": "dfs.datanode.BlockVerificationFailures",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/closeRegion/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.closeRegion.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/checkAndDelete_avg_time": {
+ "metric": "rpc.rpc.checkAndDelete_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/load/load_fifteen": {
+ "metric": "load_fifteen",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getHServerInfo_avg_time": {
+ "metric": "rpc.rpc.getHServerInfo_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/logInfo": {
+ "metric": "jvm.JvmMetrics.LogInfo",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getBlockCacheColumnFamilySummaries/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.getBlockCacheColumnFamilySummaries.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getZooKeeper_avg_time": {
+ "metric": "rpc.rpc.getZooKeeper_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/execCoprocessor/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.execCoprocessor.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/datanode/blocks_written": {
+ "metric": "dfs.datanode.BlocksWritten",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/balanceSwitch_num_ops": {
+ "metric": "rpc.rpc.balanceSwitch_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/isMasterRunning_num_ops": {
+ "metric": "rpc.rpc.isMasterRunning_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/offline_num_ops": {
+ "metric": "rpc.rpc.offline_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/memory/mem_shared": {
+ "metric": "mem_shared",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getProtocolSignature_num_ops": {
+ "metric": "rpc.rpc.getProtocolSignature_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/abort_num_ops": {
+ "metric": "rpc.rpc.abort_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getProtocolVersion/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.getProtocolVersion.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/ugi/loginFailure_avg_time": {
+ "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_num": {
+ "metric": "cpu_num",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/getProtocolVersion_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/rollHLogWriter_num_ops": {
+ "metric": "rpc.rpc.rollHLogWriter_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/rpcAuthorizationSuccesses": {
+ "metric": "rpc.rpc.RpcAuthorizationSuccesses",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/openRegions_num_ops": {
+ "metric": "rpc.rpc.openRegions_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/logFatal": {
+ "metric": "jvm.JvmMetrics.LogFatal",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/splitRegion_avg_time": {
+ "metric": "rpc.rpc.splitRegion_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/closeRegion/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.closeRegion.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/checkAndPut_num_ops": {
+ "metric": "rpc.rpc.checkAndPut_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/checkAndPut/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.checkAndPut.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/increment_avg_time": {
+ "metric": "rpc.rpc.increment_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ }
+ },
+ "JOBTRACKER":{
+
+ "metrics/boottime":{
+ "metric":"boottime",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_aidle":{
+ "metric":"cpu_aidle",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_idle":{
+ "metric":"cpu_idle",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_nice":{
+ "metric":"cpu_nice",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_num":{
+ "metric":"cpu_num",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_speed":{
+ "metric":"cpu_speed",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_system":{
+ "metric":"cpu_system",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_user":{
+ "metric":"cpu_user",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_wio":{
+ "metric":"cpu_wio",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/disk/disk_free":{
+ "metric":"disk_free",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/disk/disk_total":{
+ "metric":"disk_total",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/disk/part_max_used":{
+ "metric":"part_max_used",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/gcCount":{
+ "metric":"jvm.JvmMetrics.gcCount",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/jvm/gcTimeMillis":{
+ "metric":"jvm.JvmMetrics.gcTimeMillis",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/jvm/logError":{
+ "metric":"jvm.JvmMetrics.logError",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/jvm/logFatal":{
+ "metric":"jvm.JvmMetrics.logFatal",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/jvm/logInfo":{
+ "metric":"jvm.JvmMetrics.logInfo",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/jvm/logWarn":{
+ "metric":"jvm.JvmMetrics.logWarn",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/jvm/memHeapCommittedM":{
+ "metric":"jvm.JvmMetrics.memHeapCommittedM",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/jvm/memHeapUsedM":{
+ "metric":"jvm.JvmMetrics.memHeapUsedM",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/jvm/memNonHeapCommittedM":{
+ "metric":"jvm.JvmMetrics.memNonHeapCommittedM",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/jvm/memNonHeapUsedM":{
+ "metric":"jvm.JvmMetrics.memNonHeapUsedM",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/jvm/threadsBlocked":{
+ "metric":"jvm.JvmMetrics.threadsBlocked",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/jvm/threadsNew":{
+ "metric":"jvm.JvmMetrics.threadsNew",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/jvm/threadsRunnable":{
+ "metric":"jvm.JvmMetrics.threadsRunnable",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/jvm/threadsTerminated":{
+ "metric":"jvm.JvmMetrics.threadsTerminated",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/jvm/threadsTimedWaiting":{
+ "metric":"jvm.JvmMetrics.threadsTimedWaiting",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/jvm/threadsWaiting":{
+ "metric":"jvm.JvmMetrics.threadsWaiting",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/load/load_fifteen":{
+ "metric":"load_fifteen",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/load/load_five":{
+ "metric":"load_five",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/load/load_one":{
+ "metric":"load_one",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/mapred/Queue/jobs_completed":{
+ "metric":"mapred.Queue.jobs_completed",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/Queue/jobs_failed":{
+ "metric":"mapred.Queue.jobs_failed",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/mapred/Queue/jobs_killed":{
+ "metric":"mapred.Queue.jobs_killed",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/mapred/Queue/jobs_preparing":{
+ "metric":"mapred.Queue.jobs_preparing",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/mapred/Queue/jobs_running":{
+ "metric":"mapred.Queue.jobs_running",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/Queue/jobs_submitted":{
+ "metric":"mapred.Queue.jobs_submitted",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/Queue/maps_completed":{
+ "metric":"mapred.Queue.maps_completed",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/mapred/Queue/maps_failed":{
+ "metric":"mapred.Queue.maps_failed",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/mapred/Queue/maps_killed":{
+ "metric":"mapred.Queue.maps_killed",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/mapred/Queue/maps_launched":{
+ "metric":"mapred.Queue.maps_launched",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/mapred/Queue/reduces_completed":{
+ "metric":"mapred.Queue.reduces_completed",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/mapred/Queue/reduces_failed":{
+ "metric":"mapred.Queue.reduces_failed",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/mapred/Queue/reduces_killed":{
+ "metric":"mapred.Queue.reduces_killed",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/mapred/Queue/reduces_launched":{
+ "metric":"mapred.Queue.reduces_launched",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/mapred/Queue/reserved_map_slots":{
+ "metric":"mapred.Queue.reserved_map_slots",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/Queue/reserved_reduce_slots":{
+ "metric":"mapred.Queue.reserved_reduce_slots",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/Queue/running_0":{
+ "metric":"mapred.Queue.running_0",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/Queue/running_1440":{
+ "metric":"mapred.Queue.running_1440",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/Queue/running_300":{
+ "metric":"mapred.Queue.running_300",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/Queue/running_60":{
+ "metric":"mapred.Queue.running_60",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/Queue/waiting_maps":{
+ "metric":"mapred.Queue.waiting_maps",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/Queue/waiting_reduces":{
+ "metric":"mapred.Queue.waiting_reduces",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/blacklisted_maps":{
+ "metric":"mapred.jobtracker.blacklisted_maps",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/blacklisted_reduces":{
+ "metric":"mapred.jobtracker.blacklisted_reduces",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/heartbeats":{
+ "metric":"mapred.jobtracker.heartbeats",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/jobs_completed":{
+ "metric":"mapred.jobtracker.jobs_completed",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/jobs_failed":{
+ "metric":"mapred.jobtracker.jobs_failed",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/jobs_killed":{
+ "metric":"mapred.jobtracker.jobs_killed",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/jobs_preparing":{
+ "metric":"mapred.jobtracker.jobs_preparing",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/jobs_running":{
+ "metric":"mapred.jobtracker.jobs_running",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/jobs_submitted":{
+ "metric":"mapred.jobtracker.jobs_submitted",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/map_slots":{
+ "metric":"mapred.jobtracker.map_slots",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/maps_completed":{
+ "metric":"mapred.jobtracker.maps_completed",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/maps_failed":{
+ "metric":"mapred.jobtracker.maps_failed",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/maps_killed":{
+ "metric":"mapred.jobtracker.maps_killed",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/maps_launched":{
+ "metric":"mapred.jobtracker.maps_launched",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/occupied_map_slots":{
+ "metric":"mapred.jobtracker.occupied_map_slots",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/occupied_reduce_slots":{
+ "metric":"mapred.jobtracker.occupied_reduce_slots",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/reduce_slots":{
+ "metric":"mapred.jobtracker.reduce_slots",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/reduces_completed":{
+ "metric":"mapred.jobtracker.reduces_completed",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/reduces_failed":{
+ "metric":"mapred.jobtracker.reduces_failed",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/reduces_killed":{
+ "metric":"mapred.jobtracker.reduces_killed",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/reduces_launched":{
+ "metric":"mapred.jobtracker.reduces_launched",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/reserved_map_slots":{
+ "metric":"mapred.jobtracker.reserved_map_slots",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/reserved_reduce_slots":{
+ "metric":"mapred.jobtracker.reserved_reduce_slots",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/running_maps":{
+ "metric":"mapred.jobtracker.running_maps",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/running_reduces":{
+ "metric":"mapred.jobtracker.running_reduces",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/trackers":{
+ "metric":"mapred.jobtracker.trackers",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/trackers_blacklisted":{
+ "metric":"mapred.jobtracker.trackers_blacklisted",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/trackers_decommissioned":{
+ "metric":"mapred.jobtracker.trackers_decommissioned",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/trackers_graylisted":{
+ "metric":"mapred.jobtracker.trackers_graylisted",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/waiting_maps":{
+ "metric":"mapred.jobtracker.waiting_maps",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/waiting_reduces":{
+ "metric":"mapred.jobtracker.waiting_reduces",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/memory/mem_buffers":{
+ "metric":"mem_buffers",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/mem_cached":{
+ "metric":"mem_cached",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/mem_free":{
+ "metric":"mem_free",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/mem_shared":{
+ "metric":"mem_shared",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/mem_total":{
+ "metric":"mem_total",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/swap_free":{
+ "metric":"swap_free",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/swap_total":{
+ "metric":"swap_total",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/network/bytes_in":{
+ "metric":"bytes_in",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/network/bytes_out":{
+ "metric":"bytes_out",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/network/pkts_in":{
+ "metric":"pkts_in",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/network/pkts_out":{
+ "metric":"pkts_out",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/process/proc_run":{
+ "metric":"proc_run",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/process/proc_total":{
+ "metric":"proc_total",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/NumOpenConnections":{
+ "metric":"rpc.rpc.NumOpenConnections",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpc/ReceivedBytes":{
+ "metric":"rpc.rpc.ReceivedBytes",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpc/RpcProcessingTime_avg_time":{
+ "metric":"rpc.rpc.RpcProcessingTime_avg_time",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpc/RpcProcessingTime_num_ops":{
+ "metric":"rpc.rpc.RpcProcessingTime_num_ops",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpc/RpcQueueTime_avg_time":{
+ "metric":"rpc.rpc.RpcQueueTime_avg_time",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpc/RpcQueueTime_num_ops":{
+ "metric":"rpc.rpc.RpcQueueTime_num_ops",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpc/SentBytes":{
+ "metric":"rpc.rpc.SentBytes",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpc/callQueueLen":{
+ "metric":"rpc.rpc.callQueueLen",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpc/rpcAuthenticationFailures":{
+ "metric":"rpc.rpc.rpcAuthenticationFailures",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpc/rpcAuthenticationSuccesses":{
+ "metric":"rpc.rpc.rpcAuthenticationSuccesses",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpc/rpcAuthorizationFailures":{
+ "metric":"rpc.rpc.rpcAuthorizationFailures",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpc/rpcAuthorizationSuccesses":{
+ "metric":"rpc.rpc.rpcAuthorizationSuccesses",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getBuildVersion_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.getBuildVersion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getBuildVersion_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.getBuildVersion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getDelegationToken_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.getDelegationToken_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getDelegationToken_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.getDelegationToken_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getJobCounters_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.getJobCounters_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getJobCounters_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.getJobCounters_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getJobProfile_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.getJobProfile_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getJobProfile_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.getJobProfile_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getJobStatus_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.getJobStatus_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getJobStatus_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.getJobStatus_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getMapTaskReports_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.getMapTaskReports_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getMapTaskReports_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.getMapTaskReports_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getNewJobId_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.getNewJobId_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getNewJobId_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.getNewJobId_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getProtocolVersion_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.getProtocolVersion_avg_time",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getProtocolVersion_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.getProtocolVersion_num_ops",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getQueueAdmins_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.getQueueAdmins_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getQueueAdmins_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.getQueueAdmins_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getReduceTaskReports_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.getReduceTaskReports_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getReduceTaskReports_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.getReduceTaskReports_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getStagingAreaDir_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.getStagingAreaDir_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getStagingAreaDir_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.getStagingAreaDir_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getSystemDir_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.getSystemDir_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getSystemDir_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.getSystemDir_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getTaskCompletionEvents_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.getTaskCompletionEvents_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getTaskCompletionEvents_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.getTaskCompletionEvents_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/heartbeat_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.heartbeat_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/heartbeat_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.heartbeat_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/jobsToComplete_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.jobsToComplete_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/jobsToComplete_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.jobsToComplete_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/submitJob_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.submitJob_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/submitJob_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.submitJob_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/ugi/loginFailure_avg_time":{
+ "metric":"ugi.ugi.loginFailure_avg_time",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/ugi/loginFailure_num_ops":{
+ "metric":"ugi.ugi.loginFailure_num_ops",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/ugi/loginSuccess_avg_time":{
+ "metric":"ugi.ugi.loginSuccess_avg_time",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/ugi/loginSuccess_num_ops":{
+ "metric":"ugi.ugi.loginSuccess_num_ops",
+ "pointInTime":false,
+ "temporal":true
+ }
+ },
+ "TASKTRACKER":{
+
+ "metrics/boottime":{
+ "metric":"boottime",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_aidle":{
+ "metric":"cpu_aidle",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_idle":{
+ "metric":"cpu_idle",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_nice":{
+ "metric":"cpu_nice",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_num":{
+ "metric":"cpu_num",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_speed":{
+ "metric":"cpu_speed",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_system":{
+ "metric":"cpu_system",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_user":{
+ "metric":"cpu_user",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_wio":{
+ "metric":"cpu_wio",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/disk/disk_free":{
+ "metric":"disk_free",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/disk/disk_total":{
+ "metric":"disk_total",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/disk/part_max_used":{
+ "metric":"part_max_used",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/gcCount":{
+ "metric":"jvm.JvmMetrics.gcCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/gcTimeMillis":{
+ "metric":"jvm.JvmMetrics.gcTimeMillis",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/jvm/logError":{
+ "metric":"jvm.JvmMetrics.logError",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/logFatal":{
+ "metric":"jvm.JvmMetrics.logFatal",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/logInfo":{
+ "metric":"jvm.JvmMetrics.logInfo",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/logWarn":{
+ "metric":"jvm.JvmMetrics.logWarn",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/maxMemoryM":{
+ "metric":"jvm.JvmMetrics.maxMemoryM",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/memHeapCommittedM":{
+ "metric":"jvm.JvmMetrics.memHeapCommittedM",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/memHeapUsedM":{
+ "metric":"jvm.JvmMetrics.memHeapUsedM",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/memNonHeapCommittedM":{
+ "metric":"jvm.JvmMetrics.memNonHeapCommittedM",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/memNonHeapUsedM":{
+ "metric":"jvm.JvmMetrics.memNonHeapUsedM",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsBlocked":{
+ "metric":"jvm.JvmMetrics.threadsBlocked",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsNew":{
+ "metric":"jvm.JvmMetrics.threadsNew",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsRunnable":{
+ "metric":"jvm.JvmMetrics.threadsRunnable",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsTerminated":{
+ "metric":"jvm.JvmMetrics.threadsTerminated",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsTimedWaiting":{
+ "metric":"jvm.JvmMetrics.threadsTimedWaiting",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsWaiting":{
+ "metric":"jvm.JvmMetrics.threadsWaiting",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/load/load_fifteen":{
+ "metric":"load_fifteen",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/load/load_five":{
+ "metric":"load_five",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/load/load_one":{
+ "metric":"load_one",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/mapred/shuffleOutput/shuffle_exceptions_caught":{
+ "metric":"mapred.shuffleOutput.shuffle_exceptions_caught",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/mapred/shuffleOutput/shuffle_failed_outputs":{
+ "metric":"mapred.shuffleOutput.shuffle_failed_outputs",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/mapred/shuffleOutput/shuffle_handler_busy_percent":{
+ "metric":"mapred.shuffleOutput.shuffle_handler_busy_percent",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/mapred/shuffleOutput/shuffle_output_bytes":{
+ "metric":"mapred.shuffleOutput.shuffle_output_bytes",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/mapred/shuffleOutput/shuffle_success_outputs":{
+ "metric":"mapred.shuffleOutput.shuffle_success_outputs",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/mapred/tasktracker/mapTaskSlots":{
+ "metric":"mapred.tasktracker.mapTaskSlots",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/mapred/tasktracker/maps_running":{
+ "metric":"mapred.tasktracker.maps_running",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/mapred/tasktracker/reduceTaskSlots":{
+ "metric":"mapred.tasktracker.reduceTaskSlots",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/mapred/tasktracker/reduces_running":{
+ "metric":"mapred.tasktracker.reduces_running",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/mapred/tasktracker/tasks_completed":{
+ "metric":"mapred.tasktracker.tasks_completed",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/mapred/tasktracker/tasks_failed_ping":{
+ "metric":"mapred.tasktracker.tasks_failed_ping",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/mapred/tasktracker/tasks_failed_timeout":{
+ "metric":"mapred.tasktracker.tasks_failed_timeout",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/mem_buffers":{
+ "metric":"mem_buffers",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/mem_cached":{
+ "metric":"mem_cached",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/mem_free":{
+ "metric":"mem_free",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/mem_shared":{
+ "metric":"mem_shared",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/mem_total":{
+ "metric":"mem_total",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/swap_free":{
+ "metric":"swap_free",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/swap_total":{
+ "metric":"swap_total",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/network/bytes_in":{
+ "metric":"bytes_in",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/network/bytes_out":{
+ "metric":"bytes_out",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/network/pkts_in":{
+ "metric":"pkts_in",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/network/pkts_out":{
+ "metric":"pkts_out",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/process/proc_run":{
+ "metric":"proc_run",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/process/proc_total":{
+ "metric":"proc_total",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/NumOpenConnections":{
+ "metric":"rpc.rpc.NumOpenConnections",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/ReceivedBytes":{
+ "metric":"rpc.rpc.ReceivedBytes",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/RpcProcessingTime_avg_time":{
+ "metric":"rpc.rpc.RpcProcessingTime_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/RpcProcessingTime_num_ops":{
+ "metric":"rpc.rpc.RpcProcessingTime_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/RpcQueueTime_avg_time":{
+ "metric":"rpc.rpc.RpcQueueTime_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/RpcQueueTime_num_ops":{
+ "metric":"rpc.rpc.RpcQueueTime_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/RpcSlowResponse_avg_time":{
+ "metric":"rpc.rpc.RpcSlowResponse_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/RpcSlowResponse_num_ops":{
+ "metric":"rpc.rpc.RpcSlowResponse_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/SentBytes":{
+ "metric":"rpc.rpc.SentBytes",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/abort/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.abort.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/abort/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.abort.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/abort_avg_time":{
+ "metric":"rpc.rpc.abort_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/abort_num_ops":{
+ "metric":"rpc.rpc.abort_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/addColumn_avg_time":{
+ "metric":"rpc.rpc.addColumn_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/addColumn_num_ops":{
+ "metric":"rpc.rpc.addColumn_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/addToOnlineRegions/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.addToOnlineRegions.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/addToOnlineRegions/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.addToOnlineRegions.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/addToOnlineRegions_avg_time":{
+ "metric":"rpc.rpc.addToOnlineRegions_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/addToOnlineRegions_num_ops":{
+ "metric":"rpc.rpc.addToOnlineRegions_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/assign_avg_time":{
+ "metric":"rpc.rpc.assign_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/assign_num_ops":{
+ "metric":"rpc.rpc.assign_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/balanceSwitch_avg_time":{
+ "metric":"rpc.rpc.balanceSwitch_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/balanceSwitch_num_ops":{
+ "metric":"rpc.rpc.balanceSwitch_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/balance_avg_time":{
+ "metric":"rpc.rpc.balance_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/balance_num_ops":{
+ "metric":"rpc.rpc.balance_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/bulkLoadHFiles/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.bulkLoadHFiles.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/bulkLoadHFiles/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.bulkLoadHFiles.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/bulkLoadHFiles_avg_time":{
+ "metric":"rpc.rpc.bulkLoadHFiles_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/bulkLoadHFiles_num_ops":{
+ "metric":"rpc.rpc.bulkLoadHFiles_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/callQueueLen":{
+ "metric":"rpc.rpc.callQueueLen",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndDelete/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.checkAndDelete.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndDelete/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.checkAndDelete.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndDelete_avg_time":{
+ "metric":"rpc.rpc.checkAndDelete_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndDelete_num_ops":{
+ "metric":"rpc.rpc.checkAndDelete_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndPut/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.checkAndPut.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndPut/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.checkAndPut.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndPut_avg_time":{
+ "metric":"rpc.rpc.checkAndPut_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndPut_num_ops":{
+ "metric":"rpc.rpc.checkAndPut_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkOOME/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.checkOOME.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkOOME/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.checkOOME.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkOOME_avg_time":{
+ "metric":"rpc.rpc.checkOOME_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkOOME_num_ops":{
+ "metric":"rpc.rpc.checkOOME_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/close/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.close.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/close/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.close.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/closeRegion/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.closeRegion.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/closeRegion/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.closeRegion.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/closeRegion_avg_time":{
+ "metric":"rpc.rpc.closeRegion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/closeRegion_num_ops":{
+ "metric":"rpc.rpc.closeRegion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/close_avg_time":{
+ "metric":"rpc.rpc.close_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/close_num_ops":{
+ "metric":"rpc.rpc.close_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/compactRegion/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.compactRegion.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/compactRegion/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.compactRegion.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/compactRegion_avg_time":{
+ "metric":"rpc.rpc.compactRegion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/compactRegion_num_ops":{
+ "metric":"rpc.rpc.compactRegion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/createTable_avg_time":{
+ "metric":"rpc.rpc.createTable_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/createTable_num_ops":{
+ "metric":"rpc.rpc.createTable_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/delete/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.delete.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/delete/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.delete.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/deleteColumn_avg_time":{
+ "metric":"rpc.rpc.deleteColumn_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/deleteColumn_num_ops":{
+ "metric":"rpc.rpc.deleteColumn_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/deleteTable_avg_time":{
+ "metric":"rpc.rpc.deleteTable_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/deleteTable_num_ops":{
+ "metric":"rpc.rpc.deleteTable_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/delete_avg_time":{
+ "metric":"rpc.rpc.delete_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/delete_num_ops":{
+ "metric":"rpc.rpc.delete_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/disableTable_avg_time":{
+ "metric":"rpc.rpc.disableTable_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/disableTable_num_ops":{
+ "metric":"rpc.rpc.disableTable_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/enableTable_avg_time":{
+ "metric":"rpc.rpc.enableTable_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/enableTable_num_ops":{
+ "metric":"rpc.rpc.enableTable_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/execCoprocessor/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.execCoprocessor.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/execCoprocessor/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.execCoprocessor.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/execCoprocessor_avg_time":{
+ "metric":"rpc.rpc.execCoprocessor_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/execCoprocessor_num_ops":{
+ "metric":"rpc.rpc.execCoprocessor_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/exists/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.exists.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/exists/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.exists.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/exists_avg_time":{
+ "metric":"rpc.rpc.exists_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/exists_num_ops":{
+ "metric":"rpc.rpc.exists_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/flushRegion/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.flushRegion.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/flushRegion/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.flushRegion.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/flushRegion_avg_time":{
+ "metric":"rpc.rpc.flushRegion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/flushRegion_num_ops":{
+ "metric":"rpc.rpc.flushRegion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/get/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.get.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/get/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.get.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getAlterStatus_avg_time":{
+ "metric":"rpc.rpc.getAlterStatus_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getAlterStatus_num_ops":{
+ "metric":"rpc.rpc.getAlterStatus_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getBlockCacheColumnFamilySummaries/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getBlockCacheColumnFamilySummaries/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getBlockCacheColumnFamilySummaries_avg_time":{
+ "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getBlockCacheColumnFamilySummaries_num_ops":{
+ "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getCatalogTracker/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getCatalogTracker.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getCatalogTracker/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getCatalogTracker.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getCatalogTracker_avg_time":{
+ "metric":"rpc.rpc.getCatalogTracker_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getCatalogTracker_num_ops":{
+ "metric":"rpc.rpc.getCatalogTracker_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getClosestRowBefore/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getClosestRowBefore.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getClosestRowBefore/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getClosestRowBefore.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getClosestRowBefore_avg_time":{
+ "metric":"rpc.rpc.getClosestRowBefore_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getClosestRowBefore_num_ops":{
+ "metric":"rpc.rpc.getClosestRowBefore_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getClusterStatus_avg_time":{
+ "metric":"rpc.rpc.getClusterStatus_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getClusterStatus_num_ops":{
+ "metric":"rpc.rpc.getClusterStatus_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getConfiguration/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getConfiguration.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getConfiguration/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getConfiguration.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getConfiguration_avg_time":{
+ "metric":"rpc.rpc.getConfiguration_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getConfiguration_num_ops":{
+ "metric":"rpc.rpc.getConfiguration_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getFromOnlineRegions/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getFromOnlineRegions.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getFromOnlineRegions/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getFromOnlineRegions.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getFromOnlineRegions_avg_time":{
+ "metric":"rpc.rpc.getFromOnlineRegions_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getFromOnlineRegions_num_ops":{
+ "metric":"rpc.rpc.getFromOnlineRegions_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getHServerInfo/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getHServerInfo.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getHServerInfo/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getHServerInfo.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getHServerInfo_avg_time":{
+ "metric":"rpc.rpc.getHServerInfo_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getHServerInfo_num_ops":{
+ "metric":"rpc.rpc.getHServerInfo_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getHTableDescriptors_avg_time":{
+ "metric":"rpc.rpc.getHTableDescriptors_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getHTableDescriptors_num_ops":{
+ "metric":"rpc.rpc.getHTableDescriptors_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getOnlineRegions/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getOnlineRegions.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getOnlineRegions/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getOnlineRegions.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getOnlineRegions_avg_time":{
+ "metric":"rpc.rpc.getOnlineRegions_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getOnlineRegions_num_ops":{
+ "metric":"rpc.rpc.getOnlineRegions_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolSignature/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getProtocolSignature.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolSignature/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getProtocolSignature.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolSignature_avg_time":{
+ "metric":"rpc.rpc.getProtocolSignature_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolSignature_num_ops":{
+ "metric":"rpc.rpc.getProtocolSignature_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolVersion/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getProtocolVersion.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolVersion/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getProtocolVersion.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolVersion_avg_time":{
+ "metric":"rpc.rpc.getProtocolVersion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolVersion_num_ops":{
+ "metric":"rpc.rpc.getProtocolVersion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getRegionInfo/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getRegionInfo.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getRegionInfo/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getRegionInfo.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getRegionInfo_avg_time":{
+ "metric":"rpc.rpc.getRegionInfo_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getRegionInfo_num_ops":{
+ "metric":"rpc.rpc.getRegionInfo_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getServerName/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getServerName.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getServerName/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getServerName.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getServerName_avg_time":{
+ "metric":"rpc.rpc.getServerName_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getServerName_num_ops":{
+ "metric":"rpc.rpc.getServerName_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getZooKeeper/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getZooKeeper.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getZooKeeper/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getZooKeeper.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getZooKeeper_avg_time":{
+ "metric":"rpc.rpc.getZooKeeper_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getZooKeeper_num_ops":{
+ "metric":"rpc.rpc.getZooKeeper_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/get_avg_time":{
+ "metric":"rpc.rpc.get_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/get_num_ops":{
+ "metric":"rpc.rpc.get_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/increment/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.increment.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/increment/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.increment.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/incrementColumnValue/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.incrementColumnValue.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/incrementColumnValue/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.incrementColumnValue.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/incrementColumnValue_avg_time":{
+ "metric":"rpc.rpc.incrementColumnValue_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/incrementColumnValue_num_ops":{
+ "metric":"rpc.rpc.incrementColumnValue_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/increment_avg_time":{
+ "metric":"rpc.rpc.increment_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/increment_num_ops":{
+ "metric":"rpc.rpc.increment_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isAborted/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.isAborted.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isAborted/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.isAborted.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isAborted_avg_time":{
+ "metric":"rpc.rpc.isAborted_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isAborted_num_ops":{
+ "metric":"rpc.rpc.isAborted_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isMasterRunning_avg_time":{
+ "metric":"rpc.rpc.isMasterRunning_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isMasterRunning_num_ops":{
+ "metric":"rpc.rpc.isMasterRunning_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isStopped/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.isStopped.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isStopped/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.isStopped.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isStopped_avg_time":{
+ "metric":"rpc.rpc.isStopped_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isStopped_num_ops":{
+ "metric":"rpc.rpc.isStopped_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/lockRow/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.lockRow.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/lockRow/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.lockRow.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/lockRow_avg_time":{
+ "metric":"rpc.rpc.lockRow_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/lockRow_num_ops":{
+ "metric":"rpc.rpc.lockRow_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/modifyColumn_avg_time":{
+ "metric":"rpc.rpc.modifyColumn_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/modifyColumn_num_ops":{
+ "metric":"rpc.rpc.modifyColumn_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/modifyTable_avg_time":{
+ "metric":"rpc.rpc.modifyTable_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/modifyTable_num_ops":{
+ "metric":"rpc.rpc.modifyTable_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/move_avg_time":{
+ "metric":"rpc.rpc.move_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/move_num_ops":{
+ "metric":"rpc.rpc.move_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/multi/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.multi.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/multi/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.multi.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/multi_avg_time":{
+ "metric":"rpc.rpc.multi_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/multi_num_ops":{
+ "metric":"rpc.rpc.multi_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/next/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.next.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/next/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.next.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/next_avg_time":{
+ "metric":"rpc.rpc.next_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/next_num_ops":{
+ "metric":"rpc.rpc.next_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/offline_avg_time":{
+ "metric":"rpc.rpc.offline_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/offline_num_ops":{
+ "metric":"rpc.rpc.offline_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegion/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.openRegion.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegion/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.openRegion.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegion_avg_time":{
+ "metric":"rpc.rpc.openRegion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegion_num_ops":{
+ "metric":"rpc.rpc.openRegion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegions/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.openRegions.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegions/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.openRegions.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegions_avg_time":{
+ "metric":"rpc.rpc.openRegions_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegions_num_ops":{
+ "metric":"rpc.rpc.openRegions_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openScanner/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.openScanner.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openScanner/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.openScanner.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openScanner_avg_time":{
+ "metric":"rpc.rpc.openScanner_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openScanner_num_ops":{
+ "metric":"rpc.rpc.openScanner_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/put/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.put.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/put/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.put.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/put_avg_time":{
+ "metric":"rpc.rpc.put_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/put_num_ops":{
+ "metric":"rpc.rpc.put_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/regionServerReport_avg_time":{
+ "metric":"rpc.rpc.regionServerReport_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/regionServerReport_num_ops":{
+ "metric":"rpc.rpc.regionServerReport_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/regionServerStartup_avg_time":{
+ "metric":"rpc.rpc.regionServerStartup_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/regionServerStartup_num_ops":{
+ "metric":"rpc.rpc.regionServerStartup_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/removeFromOnlineRegions/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.removeFromOnlineRegions.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/removeFromOnlineRegions/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.removeFromOnlineRegions.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/removeFromOnlineRegions_avg_time":{
+ "metric":"rpc.rpc.removeFromOnlineRegions_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/removeFromOnlineRegions_num_ops":{
+ "metric":"rpc.rpc.removeFromOnlineRegions_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/replicateLogEntries/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.replicateLogEntries.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/replicateLogEntries/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.replicateLogEntries.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/replicateLogEntries_avg_time":{
+ "metric":"rpc.rpc.replicateLogEntries_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/replicateLogEntries_num_ops":{
+ "metric":"rpc.rpc.replicateLogEntries_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/reportRSFatalError_avg_time":{
+ "metric":"rpc.rpc.reportRSFatalError_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/reportRSFatalError_num_ops":{
+ "metric":"rpc.rpc.reportRSFatalError_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rollHLogWriter/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.rollHLogWriter.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rollHLogWriter/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.rollHLogWriter.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rollHLogWriter_avg_time":{
+ "metric":"rpc.rpc.rollHLogWriter_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rollHLogWriter_num_ops":{
+ "metric":"rpc.rpc.rollHLogWriter_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rpcAuthenticationFailures":{
+ "metric":"rpc.rpc.rpcAuthenticationFailures",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rpcAuthenticationSuccesses":{
+ "metric":"rpc.rpc.rpcAuthenticationSuccesses",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rpcAuthorizationFailures":{
+ "metric":"rpc.rpc.rpcAuthorizationFailures",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rpcAuthorizationSuccesses":{
+ "metric":"rpc.rpc.rpcAuthorizationSuccesses",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/shutdown_avg_time":{
+ "metric":"rpc.rpc.shutdown_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/shutdown_num_ops":{
+ "metric":"rpc.rpc.shutdown_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/splitRegion/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.splitRegion.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/splitRegion/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.splitRegion.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/splitRegion_avg_time":{
+ "metric":"rpc.rpc.splitRegion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/splitRegion_num_ops":{
+ "metric":"rpc.rpc.splitRegion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/stop/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.stop.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/stop/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.stop.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/stopMaster_avg_time":{
+ "metric":"rpc.rpc.stopMaster_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/stopMaster_num_ops":{
+ "metric":"rpc.rpc.stopMaster_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/stop_avg_time":{
+ "metric":"rpc.rpc.stop_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/stop_num_ops":{
+ "metric":"rpc.rpc.stop_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/unassign_avg_time":{
+ "metric":"rpc.rpc.unassign_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/unassign_num_ops":{
+ "metric":"rpc.rpc.unassign_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/unlockRow/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.unlockRow.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/unlockRow/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.unlockRow.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/unlockRow_avg_time":{
+ "metric":"rpc.rpc.unlockRow_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/unlockRow_num_ops":{
+ "metric":"rpc.rpc.unlockRow_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/canCommit_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.canCommit_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/canCommit_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.canCommit_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/commitPending_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.commitPending_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/commitPending_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.commitPending_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/done_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.done_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/done_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.done_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getBlockLocalPathInfo_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.getBlockLocalPathInfo_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getBlockLocalPathInfo_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.getBlockLocalPathInfo_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getMapCompletionEvents_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.getMapCompletionEvents_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getMapCompletionEvents_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.getMapCompletionEvents_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getProtocolVersion_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.getProtocolVersion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getProtocolVersion_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.getProtocolVersion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getTask_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.getTask_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getTask_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.getTask_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/ping_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.ping_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/ping_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.ping_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/statusUpdate_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.statusUpdate_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/statusUpdate_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.statusUpdate_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/ugi/loginFailure_avg_time":{
+ "metric":"ugi.ugi.loginFailure_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/ugi/loginFailure_num_ops":{
+ "metric":"ugi.ugi.loginFailure_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/ugi/loginSuccess_avg_time":{
+ "metric":"ugi.ugi.loginSuccess_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/ugi/loginSuccess_num_ops":{
+ "metric":"ugi.ugi.loginSuccess_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ }
+ },
+ "HBASE_MASTER":{
+ "metrics/boottime":{
+ "metric":"boottime",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_aidle":{
+ "metric":"cpu_aidle",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_idle":{
+ "metric":"cpu_idle",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_nice":{
+ "metric":"cpu_nice",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_num":{
+ "metric":"cpu_num",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_speed":{
+ "metric":"cpu_speed",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_system":{
+ "metric":"cpu_system",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_user":{
+ "metric":"cpu_user",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_wio":{
+ "metric":"cpu_wio",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/disk/disk_free":{
+ "metric":"disk_free",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/disk/disk_total":{
+ "metric":"disk_total",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/disk/part_max_used":{
+ "metric":"part_max_used",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/master/cluster_requests":{
+ "metric":"master.Server.clusterRequests",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/hbase/master/splitSize_avg_time":{
+ "metric":"master.FileSystem.HlogSplitSize_mean",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/master/splitSize_num_ops":{
+ "metric":"master.FileSystem.HlogSplitSize_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/master/splitTime_avg_time":{
+ "metric":"master.FileSystem.HlogSplitTime_mean",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/master/splitTime_num_ops":{
+ "metric":"master.FileSystem.HlogSplitTime_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/gcCount":{
+ "metric":"jvm.JvmMetrics.gcCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/gcTimeMillis":{
+ "metric":"jvm.JvmMetrics.gcTimeMillis",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/logError":{
+ "metric":"jvm.JvmMetrics.logError",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/logFatal":{
+ "metric":"jvm.JvmMetrics.logFatal",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/logInfo":{
+ "metric":"jvm.JvmMetrics.logInfo",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/logWarn":{
+ "metric":"jvm.JvmMetrics.logWarn",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/maxMemoryM":{
+ "metric":"jvm.JvmMetrics.maxMemoryM",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/memHeapCommittedM":{
+ "metric":"jvm.JvmMetrics.memHeapCommittedM",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/memHeapUsedM":{
+ "metric":"jvm.JvmMetrics.memHeapUsedM",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/memNonHeapCommittedM":{
+ "metric":"jvm.JvmMetrics.memNonHeapCommittedM",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/memNonHeapUsedM":{
+ "metric":"jvm.JvmMetrics.memNonHeapUsedM",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsBlocked":{
+ "metric":"jvm.JvmMetrics.threadsBlocked",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsNew":{
+ "metric":"jvm.JvmMetrics.threadsNew",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsRunnable":{
+ "metric":"jvm.JvmMetrics.threadsRunnable",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsTerminated":{
+ "metric":"jvm.JvmMetrics.threadsTerminated",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsTimedWaiting":{
+ "metric":"jvm.JvmMetrics.threadsTimedWaiting",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsWaiting":{
+ "metric":"jvm.JvmMetrics.threadsWaiting",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/load/load_fifteen":{
+ "metric":"load_fifteen",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/load/load_five":{
+ "metric":"load_five",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/load/load_one":{
+ "metric":"load_one",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/mem_buffers":{
+ "metric":"mem_buffers",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/mem_cached":{
+ "metric":"mem_cached",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/mem_free":{
+ "metric":"mem_free",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/mem_shared":{
+ "metric":"mem_shared",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/mem_total":{
+ "metric":"mem_total",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/swap_free":{
+ "metric":"swap_free",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/swap_total":{
+ "metric":"swap_total",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/network/bytes_in":{
+ "metric":"bytes_in",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/network/bytes_out":{
+ "metric":"bytes_out",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/network/pkts_in":{
+ "metric":"pkts_in",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/network/pkts_out":{
+ "metric":"pkts_out",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/process/proc_run":{
+ "metric":"proc_run",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/process/proc_total":{
+ "metric":"proc_total",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/NumOpenConnections":{
+ "metric":"rpc.rpc.NumOpenConnections",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpc/ReceivedBytes":{
+ "metric":"rpc.rpc.ReceivedBytes",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpc/RpcProcessingTime_avg_time":{
+ "metric":"rpc.rpc.RpcProcessingTime_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/RpcProcessingTime_num_ops":{
+ "metric":"rpc.rpc.RpcProcessingTime_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/RpcQueueTime_avg_time":{
+ "metric":"rpc.rpc.RpcQueueTime_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/RpcQueueTime_num_ops":{
+ "metric":"rpc.rpc.RpcQueueTime_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/RpcSlowResponse_avg_time":{
+ "metric":"rpc.rpc.RpcSlowResponse_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/RpcSlowResponse_num_ops":{
+ "metric":"rpc.rpc.RpcSlowResponse_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/SentBytes":{
+ "metric":"rpc.rpc.SentBytes",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpc/addColumn/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.addColumn.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/addColumn/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.addColumn.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/addColumn_avg_time":{
+ "metric":"rpc.rpc.addColumn_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/addColumn_num_ops":{
+ "metric":"rpc.rpc.addColumn_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/assign/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.assign.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/assign/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.assign.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/assign_avg_time":{
+ "metric":"rpc.rpc.assign_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/assign_num_ops":{
+ "metric":"rpc.rpc.assign_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/balance/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.balance.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/balance/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.balance.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/balanceSwitch/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.balanceSwitch.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/balanceSwitch/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.balanceSwitch.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/balanceSwitch_avg_time":{
+ "metric":"rpc.rpc.balanceSwitch_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/balanceSwitch_num_ops":{
+ "metric":"rpc.rpc.balanceSwitch_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/balance_avg_time":{
+ "metric":"rpc.rpc.balance_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/balance_num_ops":{
+ "metric":"rpc.rpc.balance_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/bulkLoadHFiles_avg_time":{
+ "metric":"rpc.rpc.bulkLoadHFiles_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/bulkLoadHFiles_num_ops":{
+ "metric":"rpc.rpc.bulkLoadHFiles_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/callQueueLen":{
+ "metric":"rpc.rpc.callQueueLen",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndDelete_avg_time":{
+ "metric":"rpc.rpc.checkAndDelete_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndDelete_num_ops":{
+ "metric":"rpc.rpc.checkAndDelete_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndPut_avg_time":{
+ "metric":"rpc.rpc.checkAndPut_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndPut_num_ops":{
+ "metric":"rpc.rpc.checkAndPut_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/closeRegion_avg_time":{
+ "metric":"rpc.rpc.closeRegion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/closeRegion_num_ops":{
+ "metric":"rpc.rpc.closeRegion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/close_avg_time":{
+ "metric":"rpc.rpc.close_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/close_num_ops":{
+ "metric":"rpc.rpc.close_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/compactRegion_avg_time":{
+ "metric":"rpc.rpc.compactRegion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/compactRegion_num_ops":{
+ "metric":"rpc.rpc.compactRegion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/createTable/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.createTable.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/createTable/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.createTable.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/createTable_avg_time":{
+ "metric":"rpc.rpc.createTable_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/createTable_num_ops":{
+ "metric":"rpc.rpc.createTable_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/deleteColumn/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.deleteColumn.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/deleteColumn/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.deleteColumn.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/deleteColumn_avg_time":{
+ "metric":"rpc.rpc.deleteColumn_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/deleteColumn_num_ops":{
+ "metric":"rpc.rpc.deleteColumn_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/deleteTable/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.deleteTable.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/deleteTable/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.deleteTable.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/deleteTable_avg_time":{
+ "metric":"rpc.rpc.deleteTable_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/deleteTable_num_ops":{
+ "metric":"rpc.rpc.deleteTable_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/delete_avg_time":{
+ "metric":"rpc.rpc.delete_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/delete_num_ops":{
+ "metric":"rpc.rpc.delete_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/disableTable/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.disableTable.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/disableTable/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.disableTable.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/disableTable_avg_time":{
+ "metric":"rpc.rpc.disableTable_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/disableTable_num_ops":{
+ "metric":"rpc.rpc.disableTable_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/enableTable/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.enableTable.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/enableTable/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.enableTable.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/enableTable_avg_time":{
+ "metric":"rpc.rpc.enableTable_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/enableTable_num_ops":{
+ "metric":"rpc.rpc.enableTable_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/execCoprocessor_avg_time":{
+ "metric":"rpc.rpc.execCoprocessor_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/execCoprocessor_num_ops":{
+ "metric":"rpc.rpc.execCoprocessor_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/exists_avg_time":{
+ "metric":"rpc.rpc.exists_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/exists_num_ops":{
+ "metric":"rpc.rpc.exists_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/flushRegion_avg_time":{
+ "metric":"rpc.rpc.flushRegion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/flushRegion_num_ops":{
+ "metric":"rpc.rpc.flushRegion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getAlterStatus/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getAlterStatus.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getAlterStatus/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getAlterStatus.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getAlterStatus_avg_time":{
+ "metric":"rpc.rpc.getAlterStatus_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getAlterStatus_num_ops":{
+ "metric":"rpc.rpc.getAlterStatus_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getBlockCacheColumnFamilySummaries_avg_time":{
+ "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getBlockCacheColumnFamilySummaries_num_ops":{
+ "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getClosestRowBefore_avg_time":{
+ "metric":"rpc.rpc.getClosestRowBefore_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getClosestRowBefore_num_ops":{
+ "metric":"rpc.rpc.getClosestRowBefore_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getClusterStatus/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getClusterStatus.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getClusterStatus/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getClusterStatus.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getClusterStatus_avg_time":{
+ "metric":"rpc.rpc.getClusterStatus_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getClusterStatus_num_ops":{
+ "metric":"rpc.rpc.getClusterStatus_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getHServerInfo_avg_time":{
+ "metric":"rpc.rpc.getHServerInfo_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getHServerInfo_num_ops":{
+ "metric":"rpc.rpc.getHServerInfo_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getHTableDescriptors/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getHTableDescriptors.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getHTableDescriptors/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getHTableDescriptors.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getHTableDescriptors_avg_time":{
+ "metric":"rpc.rpc.getHTableDescriptors_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getHTableDescriptors_num_ops":{
+ "metric":"rpc.rpc.getHTableDescriptors_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getOnlineRegions_avg_time":{
+ "metric":"rpc.rpc.getOnlineRegions_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getOnlineRegions_num_ops":{
+ "metric":"rpc.rpc.getOnlineRegions_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolSignature/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getProtocolSignature.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolSignature/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getProtocolSignature.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolSignature_avg_time":{
+ "metric":"rpc.rpc.getProtocolSignature_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolSignature_num_ops":{
+ "metric":"rpc.rpc.getProtocolSignature_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolVersion/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getProtocolVersion.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolVersion/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getProtocolVersion.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolVersion_avg_time":{
+ "metric":"rpc.rpc.getProtocolVersion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolVersion_num_ops":{
+ "metric":"rpc.rpc.getProtocolVersion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getRegionInfo_avg_time":{
+ "metric":"rpc.rpc.getRegionInfo_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getRegionInfo_num_ops":{
+ "metric":"rpc.rpc.getRegionInfo_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/get_avg_time":{
+ "metric":"rpc.rpc.get_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/get_num_ops":{
+ "metric":"rpc.rpc.get_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/incrementColumnValue_avg_time":{
+ "metric":"rpc.rpc.incrementColumnValue_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/incrementColumnValue_num_ops":{
+ "metric":"rpc.rpc.incrementColumnValue_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/increment_avg_time":{
+ "metric":"rpc.rpc.increment_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/increment_num_ops":{
+ "metric":"rpc.rpc.increment_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isMasterRunning/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.isMasterRunning.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isMasterRunning/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.isMasterRunning.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isMasterRunning_avg_time":{
+ "metric":"rpc.rpc.isMasterRunning_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isMasterRunning_num_ops":{
+ "metric":"rpc.rpc.isMasterRunning_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/lockRow_avg_time":{
+ "metric":"rpc.rpc.lockRow_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/lockRow_num_ops":{
+ "metric":"rpc.rpc.lockRow_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/modifyColumn/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.modifyColumn.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/modifyColumn/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.modifyColumn.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/modifyColumn_avg_time":{
+ "metric":"rpc.rpc.modifyColumn_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/modifyColumn_num_ops":{
+ "metric":"rpc.rpc.modifyColumn_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/modifyTable/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.modifyTable.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/modifyTable/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.modifyTable.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/modifyTable_avg_time":{
+ "metric":"rpc.rpc.modifyTable_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/modifyTable_num_ops":{
+ "metric":"rpc.rpc.modifyTable_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/move/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.move.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/move/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.move.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/move_avg_time":{
+ "metric":"rpc.rpc.move_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/move_num_ops":{
+ "metric":"rpc.rpc.move_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/multi_avg_time":{
+ "metric":"rpc.rpc.multi_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/multi_num_ops":{
+ "metric":"rpc.rpc.multi_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/next_avg_time":{
+ "metric":"rpc.rpc.next_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/next_num_ops":{
+ "metric":"rpc.rpc.next_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/offline/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.offline.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/offline/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.offline.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/offline_avg_time":{
+ "metric":"rpc.rpc.offline_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/offline_num_ops":{
+ "metric":"rpc.rpc.offline_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegion_avg_time":{
+ "metric":"rpc.rpc.openRegion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegion_num_ops":{
+ "metric":"rpc.rpc.openRegion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegions_avg_time":{
+ "metric":"rpc.rpc.openRegions_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegions_num_ops":{
+ "metric":"rpc.rpc.openRegions_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openScanner_avg_time":{
+ "metric":"rpc.rpc.openScanner_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openScanner_num_ops":{
+ "metric":"rpc.rpc.openScanner_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/put_avg_time":{
+ "metric":"rpc.rpc.put_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/put_num_ops":{
+ "metric":"rpc.rpc.put_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/regionServerReport/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.regionServerReport.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/regionServerReport/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.regionServerReport.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/regionServerReport_avg_time":{
+ "metric":"rpc.rpc.regionServerReport_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/regionServerReport_num_ops":{
+ "metric":"rpc.rpc.regionServerReport_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/regionServerStartup/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.regionServerStartup.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/regionServerStartup/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.regionServerStartup.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/regionServerStartup_avg_time":{
+ "metric":"rpc.rpc.regionServerStartup_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/regionServerStartup_num_ops":{
+ "metric":"rpc.rpc.regionServerStartup_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/replicateLogEntries_avg_time":{
+ "metric":"rpc.rpc.replicateLogEntries_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/replicateLogEntries_num_ops":{
+ "metric":"rpc.rpc.replicateLogEntries_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/reportRSFatalError/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.reportRSFatalError.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/reportRSFatalError/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.reportRSFatalError.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/reportRSFatalError_avg_time":{
+ "metric":"rpc.rpc.reportRSFatalError_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/reportRSFatalError_num_ops":{
+ "metric":"rpc.rpc.reportRSFatalError_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rollHLogWriter_avg_time":{
+ "metric":"rpc.rpc.rollHLogWriter_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rollHLogWriter_num_ops":{
+ "metric":"rpc.rpc.rollHLogWriter_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rpcAuthenticationFailures":{
+ "metric":"rpc.rpc.rpcAuthenticationFailures",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpc/rpcAuthenticationSuccesses":{
+ "metric":"rpc.rpc.rpcAuthenticationSuccesses",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpc/rpcAuthorizationFailures":{
+ "metric":"rpc.rpc.rpcAuthorizationFailures",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpc/rpcAuthorizationSuccesses":{
+ "metric":"rpc.rpc.rpcAuthorizationSuccesses",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpc/shutdown/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.shutdown.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/shutdown/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.shutdown.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/shutdown_avg_time":{
+ "metric":"rpc.rpc.shutdown_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/shutdown_num_ops":{
+ "metric":"rpc.rpc.shutdown_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/splitRegion_avg_time":{
+ "metric":"rpc.rpc.splitRegion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/splitRegion_num_ops":{
+ "metric":"rpc.rpc.splitRegion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/stopMaster/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.stopMaster.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/stopMaster/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.stopMaster.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/stopMaster_avg_time":{
+ "metric":"rpc.rpc.stopMaster_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/stopMaster_num_ops":{
+ "metric":"rpc.rpc.stopMaster_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/stop_avg_time":{
+ "metric":"rpc.rpc.stop_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/stop_num_ops":{
+ "metric":"rpc.rpc.stop_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/unassign/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.unassign.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/unassign/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.unassign.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/unassign_avg_time":{
+ "metric":"rpc.rpc.unassign_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/unassign_num_ops":{
+ "metric":"rpc.rpc.unassign_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/unlockRow_avg_time":{
+ "metric":"rpc.rpc.unlockRow_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/unlockRow_num_ops":{
+ "metric":"rpc.rpc.unlockRow_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ }
+ },
+ "HBASE_CLIENT":{
+
+ "metrics/boottime":{
+ "metric":"boottime",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_aidle":{
+ "metric":"cpu_aidle",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_idle":{
+ "metric":"cpu_idle",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_nice":{
+ "metric":"cpu_nice",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_num":{
+ "metric":"cpu_num",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_speed":{
+ "metric":"cpu_speed",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_system":{
+ "metric":"cpu_system",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_user":{
+ "metric":"cpu_user",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_wio":{
+ "metric":"cpu_wio",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/disk/disk_free":{
+ "metric":"disk_free",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/disk/disk_total":{
+ "metric":"disk_total",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/disk/part_max_used":{
+ "metric":"part_max_used",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/gcCount":{
+ "metric":"jvm.JvmMetrics.gcCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/gcTimeMillis":{
+ "metric":"jvm.JvmMetrics.gcTimeMillis",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/logError":{
+ "metric":"jvm.JvmMetrics.logError",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/logFatal":{
+ "metric":"jvm.JvmMetrics.logFatal",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/logInfo":{
+ "metric":"jvm.JvmMetrics.logInfo",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/logWarn":{
+ "metric":"jvm.JvmMetrics.logWarn",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/maxMemoryM":{
+ "metric":"jvm.JvmMetrics.maxMemoryM",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/memHeapCommittedM":{
+ "metric":"jvm.JvmMetrics.memHeapCommittedM",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/memHeapUsedM":{
+ "metric":"jvm.JvmMetrics.memHeapUsedM",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/memNonHeapCommittedM":{
+ "metric":"jvm.JvmMetrics.memNonHeapCommittedM",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/memNonHeapUsedM":{
+ "metric":"jvm.JvmMetrics.memNonHeapUsedM",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsBlocked":{
+ "metric":"jvm.JvmMetrics.threadsBlocked",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsNew":{
+ "metric":"jvm.JvmMetrics.threadsNew",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsRunnable":{
+ "metric":"jvm.JvmMetrics.threadsRunnable",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsTerminated":{
+ "metric":"jvm.JvmMetrics.threadsTerminated",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsTimedWaiting":{
+ "metric":"jvm.JvmMetrics.threadsTimedWaiting",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsWaiting":{
+ "metric":"jvm.JvmMetrics.threadsWaiting",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/load/load_fifteen":{
+ "metric":"load_fifteen",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/load/load_five":{
+ "metric":"load_five",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/load/load_one":{
+ "metric":"load_one",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/mem_buffers":{
+ "metric":"mem_buffers",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/mem_cached":{
+ "metric":"mem_cached",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/mem_free":{
+ "metric":"mem_free",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/mem_shared":{
+ "metric":"mem_shared",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/mem_total":{
+ "metric":"mem_total",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/swap_free":{
+ "metric":"swap_free",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/swap_total":{
+ "metric":"swap_total",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/network/bytes_in":{
+ "metric":"bytes_in",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/network/bytes_out":{
+ "metric":"bytes_out",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/network/pkts_in":{
+ "metric":"pkts_in",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/network/pkts_out":{
+ "metric":"pkts_out",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/process/proc_run":{
+ "metric":"proc_run",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/process/proc_total":{
+ "metric":"proc_total",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/NumOpenConnections":{
+ "metric":"rpc.rpc.NumOpenConnections",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/ReceivedBytes":{
+ "metric":"rpc.rpc.ReceivedBytes",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/RpcProcessingTime_avg_time":{
+ "metric":"rpc.rpc.RpcProcessingTime_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/RpcProcessingTime_num_ops":{
+ "metric":"rpc.rpc.RpcProcessingTime_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/RpcQueueTime_avg_time":{
+ "metric":"rpc.rpc.RpcQueueTime_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/RpcQueueTime_num_ops":{
+ "metric":"rpc.rpc.RpcQueueTime_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/RpcSlowResponse_avg_time":{
+ "metric":"rpc.rpc.RpcSlowResponse_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/RpcSlowResponse_num_ops":{
+ "metric":"rpc.rpc.RpcSlowResponse_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/SentBytes":{
+ "metric":"rpc.rpc.SentBytes",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/abort/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.abort.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/abort/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.abort.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/abort_avg_time":{
+ "metric":"rpc.rpc.abort_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/abort_num_ops":{
+ "metric":"rpc.rpc.abort_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/addColumn_avg_time":{
+ "metric":"rpc.rpc.addColumn_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/addColumn_num_ops":{
+ "metric":"rpc.rpc.addColumn_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/addToOnlineRegions/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.addToOnlineRegions.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/addToOnlineRegions/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.addToOnlineRegions.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/addToOnlineRegions_avg_time":{
+ "metric":"rpc.rpc.addToOnlineRegions_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/addToOnlineRegions_num_ops":{
+ "metric":"rpc.rpc.addToOnlineRegions_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/assign_avg_time":{
+ "metric":"rpc.rpc.assign_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/assign_num_ops":{
+ "metric":"rpc.rpc.assign_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/balanceSwitch_avg_time":{
+ "metric":"rpc.rpc.balanceSwitch_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/balanceSwitch_num_ops":{
+ "metric":"rpc.rpc.balanceSwitch_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/balance_avg_time":{
+ "metric":"rpc.rpc.balance_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/balance_num_ops":{
+ "metric":"rpc.rpc.balance_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/bulkLoadHFiles/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.bulkLoadHFiles.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/bulkLoadHFiles/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.bulkLoadHFiles.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/bulkLoadHFiles_avg_time":{
+ "metric":"rpc.rpc.bulkLoadHFiles_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/bulkLoadHFiles_num_ops":{
+ "metric":"rpc.rpc.bulkLoadHFiles_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/callQueueLen":{
+ "metric":"rpc.rpc.callQueueLen",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndDelete/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.checkAndDelete.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndDelete/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.checkAndDelete.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndDelete_avg_time":{
+ "metric":"rpc.rpc.checkAndDelete_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndDelete_num_ops":{
+ "metric":"rpc.rpc.checkAndDelete_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndPut/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.checkAndPut.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndPut/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.checkAndPut.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndPut_avg_time":{
+ "metric":"rpc.rpc.checkAndPut_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndPut_num_ops":{
+ "metric":"rpc.rpc.checkAndPut_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkOOME/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.checkOOME.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkOOME/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.checkOOME.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkOOME_avg_time":{
+ "metric":"rpc.rpc.checkOOME_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkOOME_num_ops":{
+ "metric":"rpc.rpc.checkOOME_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/close/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.close.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/close/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.close.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/closeRegion/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.closeRegion.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/closeRegion/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.closeRegion.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/closeRegion_avg_time":{
+ "metric":"rpc.rpc.closeRegion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/closeRegion_num_ops":{
+ "metric":"rpc.rpc.closeRegion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/close_avg_time":{
+ "metric":"rpc.rpc.close_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/close_num_ops":{
+ "metric":"rpc.rpc.close_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/compactRegion/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.compactRegion.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/compactRegion/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.compactRegion.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/compactRegion_avg_time":{
+ "metric":"rpc.rpc.compactRegion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/compactRegion_num_ops":{
+ "metric":"rpc.rpc.compactRegion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/createTable_avg_time":{
+ "metric":"rpc.rpc.createTable_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/createTable_num_ops":{
+ "metric":"rpc.rpc.createTable_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/delete/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.delete.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/delete/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.delete.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/deleteColumn_avg_time":{
+ "metric":"rpc.rpc.deleteColumn_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/deleteColumn_num_ops":{
+ "metric":"rpc.rpc.deleteColumn_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/deleteTable_avg_time":{
+ "metric":"rpc.rpc.deleteTable_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/deleteTable_num_ops":{
+ "metric":"rpc.rpc.deleteTable_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/delete_avg_time":{
+ "metric":"rpc.rpc.delete_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/delete_num_ops":{
+ "metric":"rpc.rpc.delete_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/disableTable_avg_time":{
+ "metric":"rpc.rpc.disableTable_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/disableTable_num_ops":{
+ "metric":"rpc.rpc.disableTable_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/enableTable_avg_time":{
+ "metric":"rpc.rpc.enableTable_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/enableTable_num_ops":{
+ "metric":"rpc.rpc.enableTable_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/execCoprocessor/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.execCoprocessor.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/execCoprocessor/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.execCoprocessor.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/execCoprocessor_avg_time":{
+ "metric":"rpc.rpc.execCoprocessor_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/execCoprocessor_num_ops":{
+ "metric":"rpc.rpc.execCoprocessor_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/exists/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.exists.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/exists/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.exists.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/exists_avg_time":{
+ "metric":"rpc.rpc.exists_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/exists_num_ops":{
+ "metric":"rpc.rpc.exists_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/flushRegion/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.flushRegion.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/flushRegion/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.flushRegion.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/flushRegion_avg_time":{
+ "metric":"rpc.rpc.flushRegion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/flushRegion_num_ops":{
+ "metric":"rpc.rpc.flushRegion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/get/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.get.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/get/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.get.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getAlterStatus_avg_time":{
+ "metric":"rpc.rpc.getAlterStatus_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getAlterStatus_num_ops":{
+ "metric":"rpc.rpc.getAlterStatus_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getBlockCacheColumnFamilySummaries/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getBlockCacheColumnFamilySummaries/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getBlockCacheColumnFamilySummaries_avg_time":{
+ "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getBlockCacheColumnFamilySummaries_num_ops":{
+ "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getCatalogTracker/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getCatalogTracker.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getCatalogTracker/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getCatalogTracker.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getCatalogTracker_avg_time":{
+ "metric":"rpc.rpc.getCatalogTracker_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getCatalogTracker_num_ops":{
+ "metric":"rpc.rpc.getCatalogTracker_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getClosestRowBefore/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getClosestRowBefore.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getClosestRowBefore/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getClosestRowBefore.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getClosestRowBefore_avg_time":{
+ "metric":"rpc.rpc.getClosestRowBefore_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getClosestRowBefore_num_ops":{
+ "metric":"rpc.rpc.getClosestRowBefore_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getClusterStatus_avg_time":{
+ "metric":"rpc.rpc.getClusterStatus_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getClusterStatus_num_ops":{
+ "metric":"rpc.rpc.getClusterStatus_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getConfiguration/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getConfiguration.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getConfiguration/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getConfiguration.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getConfiguration_avg_time":{
+ "metric":"rpc.rpc.getConfiguration_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getConfiguration_num_ops":{
+ "metric":"rpc.rpc.getConfiguration_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getFromOnlineRegions/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getFromOnlineRegions.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getFromOnlineRegions/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getFromOnlineRegions.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getFromOnlineRegions_avg_time":{
+ "metric":"rpc.rpc.getFromOnlineRegions_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getFromOnlineRegions_num_ops":{
+ "metric":"rpc.rpc.getFromOnlineRegions_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getHServerInfo/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getHServerInfo.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getHServerInfo/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getHServerInfo.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getHServerInfo_avg_time":{
+ "metric":"rpc.rpc.getHServerInfo_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getHServerInfo_num_ops":{
+ "metric":"rpc.rpc.getHServerInfo_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getHTableDescriptors_avg_time":{
+ "metric":"rpc.rpc.getHTableDescriptors_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getHTableDescriptors_num_ops":{
+ "metric":"rpc.rpc.getHTableDescriptors_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getOnlineRegions/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getOnlineRegions.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getOnlineRegions/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getOnlineRegions.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getOnlineRegions_avg_time":{
+ "metric":"rpc.rpc.getOnlineRegions_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getOnlineRegions_num_ops":{
+ "metric":"rpc.rpc.getOnlineRegions_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolSignature/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getProtocolSignature.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolSignature/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getProtocolSignature.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolSignature_avg_time":{
+ "metric":"rpc.rpc.getProtocolSignature_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolSignature_num_ops":{
+ "metric":"rpc.rpc.getProtocolSignature_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolVersion/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getProtocolVersion.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolVersion/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getProtocolVersion.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolVersion_avg_time":{
+ "metric":"rpc.rpc.getProtocolVersion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolVersion_num_ops":{
+ "metric":"rpc.rpc.getProtocolVersion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getRegionInfo/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getRegionInfo.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getRegionInfo/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getRegionInfo.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getRegionInfo_avg_time":{
+ "metric":"rpc.rpc.getRegionInfo_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getRegionInfo_num_ops":{
+ "metric":"rpc.rpc.getRegionInfo_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getServerName/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getServerName.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getServerName/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getServerName.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getServerName_avg_time":{
+ "metric":"rpc.rpc.getServerName_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getServerName_num_ops":{
+ "metric":"rpc.rpc.getServerName_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getZooKeeper/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getZooKeeper.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getZooKeeper/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getZooKeeper.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getZooKeeper_avg_time":{
+ "metric":"rpc.rpc.getZooKeeper_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getZooKeeper_num_ops":{
+ "metric":"rpc.rpc.getZooKeeper_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/get_avg_time":{
+ "metric":"rpc.rpc.get_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/get_num_ops":{
+ "metric":"rpc.rpc.get_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/increment/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.increment.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/increment/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.increment.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/incrementColumnValue/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.incrementColumnValue.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/incrementColumnValue/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.incrementColumnValue.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/incrementColumnValue_avg_time":{
+ "metric":"rpc.rpc.incrementColumnValue_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/incrementColumnValue_num_ops":{
+ "metric":"rpc.rpc.incrementColumnValue_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/increment_avg_time":{
+ "metric":"rpc.rpc.increment_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/increment_num_ops":{
+ "metric":"rpc.rpc.increment_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isAborted/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.isAborted.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isAborted/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.isAborted.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isAborted_avg_time":{
+ "metric":"rpc.rpc.isAborted_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isAborted_num_ops":{
+ "metric":"rpc.rpc.isAborted_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isMasterRunning_avg_time":{
+ "metric":"rpc.rpc.isMasterRunning_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isMasterRunning_num_ops":{
+ "metric":"rpc.rpc.isMasterRunning_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isStopped/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.isStopped.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isStopped/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.isStopped.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isStopped_avg_time":{
+ "metric":"rpc.rpc.isStopped_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isStopped_num_ops":{
+ "metric":"rpc.rpc.isStopped_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/lockRow/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.lockRow.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/lockRow/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.lockRow.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/lockRow_avg_time":{
+ "metric":"rpc.rpc.lockRow_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/lockRow_num_ops":{
+ "metric":"rpc.rpc.lockRow_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/modifyColumn_avg_time":{
+ "metric":"rpc.rpc.modifyColumn_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/modifyColumn_num_ops":{
+ "metric":"rpc.rpc.modifyColumn_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/modifyTable_avg_time":{
+ "metric":"rpc.rpc.modifyTable_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/modifyTable_num_ops":{
+ "metric":"rpc.rpc.modifyTable_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/move_avg_time":{
+ "metric":"rpc.rpc.move_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/move_num_ops":{
+ "metric":"rpc.rpc.move_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/multi/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.multi.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/multi/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.multi.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/multi_avg_time":{
+ "metric":"rpc.rpc.multi_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/multi_num_ops":{
+ "metric":"rpc.rpc.multi_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/next/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.next.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/next/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.next.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/next_avg_time":{
+ "metric":"rpc.rpc.next_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/next_num_ops":{
+ "metric":"rpc.rpc.next_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/offline_avg_time":{
+ "metric":"rpc.rpc.offline_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/offline_num_ops":{
+ "metric":"rpc.rpc.offline_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegion/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.openRegion.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegion/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.openRegion.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegion_avg_time":{
+ "metric":"rpc.rpc.openRegion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegion_num_ops":{
+ "metric":"rpc.rpc.openRegion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegions/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.openRegions.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegions/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.openRegions.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegions_avg_time":{
+ "metric":"rpc.rpc.openRegions_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegions_num_ops":{
+ "metric":"rpc.rpc.openRegions_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openScanner/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.openScanner.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openScanner/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.openScanner.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openScanner_avg_time":{
+ "metric":"rpc.rpc.openScanner_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openScanner_num_ops":{
+ "metric":"rpc.rpc.openScanner_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/put/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.put.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/put/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.put.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/put_avg_time":{
+ "metric":"rpc.rpc.put_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/put_num_ops":{
+ "metric":"rpc.rpc.put_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/regionServerReport_avg_time":{
+ "metric":"rpc.rpc.regionServerReport_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/regionServerReport_num_ops":{
+ "metric":"rpc.rpc.regionServerReport_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/regionServerStartup_avg_time":{
+ "metric":"rpc.rpc.regionServerStartup_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/regionServerStartup_num_ops":{
+ "metric":"rpc.rpc.regionServerStartup_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/removeFromOnlineRegions/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.removeFromOnlineRegions.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/removeFromOnlineRegions/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.removeFromOnlineRegions.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/removeFromOnlineRegions_avg_time":{
+ "metric":"rpc.rpc.removeFromOnlineRegions_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/removeFromOnlineRegions_num_ops":{
+ "metric":"rpc.rpc.removeFromOnlineRegions_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/replicateLogEntries/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.replicateLogEntries.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/replicateLogEntries/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.replicateLogEntries.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/replicateLogEntries_avg_time":{
+ "metric":"rpc.rpc.replicateLogEntries_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/replicateLogEntries_num_ops":{
+ "metric":"rpc.rpc.replicateLogEntries_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/reportRSFatalError_avg_time":{
+ "metric":"rpc.rpc.reportRSFatalError_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/reportRSFatalError_num_ops":{
+ "metric":"rpc.rpc.reportRSFatalError_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rollHLogWriter/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.rollHLogWriter.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rollHLogWriter/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.rollHLogWriter.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rollHLogWriter_avg_time":{
+ "metric":"rpc.rpc.rollHLogWriter_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rollHLogWriter_num_ops":{
+ "metric":"rpc.rpc.rollHLogWriter_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rpcAuthenticationFailures":{
+ "metric":"rpc.rpc.rpcAuthenticationFailures",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rpcAuthenticationSuccesses":{
+ "metric":"rpc.rpc.rpcAuthenticationSuccesses",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rpcAuthorizationFailures":{
+ "metric":"rpc.rpc.rpcAuthorizationFailures",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rpcAuthorizationSuccesses":{
+ "metric":"rpc.rpc.rpcAuthorizationSuccesses",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/shutdown_avg_time":{
+ "metric":"rpc.rpc.shutdown_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/shutdown_num_ops":{
+ "metric":"rpc.rpc.shutdown_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/splitRegion/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.splitRegion.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/splitRegion/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.splitRegion.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/splitRegion_avg_time":{
+ "metric":"rpc.rpc.splitRegion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/splitRegion_num_ops":{
+ "metric":"rpc.rpc.splitRegion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/stop/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.stop.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/stop/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.stop.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/stopMaster_avg_time":{
+ "metric":"rpc.rpc.stopMaster_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/stopMaster_num_ops":{
+ "metric":"rpc.rpc.stopMaster_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/stop_avg_time":{
+ "metric":"rpc.rpc.stop_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/stop_num_ops":{
+ "metric":"rpc.rpc.stop_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/unassign_avg_time":{
+ "metric":"rpc.rpc.unassign_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/unassign_num_ops":{
+ "metric":"rpc.rpc.unassign_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/unlockRow/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.unlockRow.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/unlockRow/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.unlockRow.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/unlockRow_avg_time":{
+ "metric":"rpc.rpc.unlockRow_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/unlockRow_num_ops":{
+ "metric":"rpc.rpc.unlockRow_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/canCommit_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.canCommit_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/canCommit_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.canCommit_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/commitPending_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.commitPending_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/commitPending_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.commitPending_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/done_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.done_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/done_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.done_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getBlockLocalPathInfo_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.getBlockLocalPathInfo_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getBlockLocalPathInfo_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.getBlockLocalPathInfo_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getMapCompletionEvents_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.getMapCompletionEvents_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getMapCompletionEvents_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.getMapCompletionEvents_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getProtocolVersion_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.getProtocolVersion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getProtocolVersion_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.getProtocolVersion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getTask_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.getTask_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getTask_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.getTask_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/ping_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.ping_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/ping_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.ping_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/statusUpdate_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.statusUpdate_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/statusUpdate_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.statusUpdate_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/ugi/loginFailure_avg_time":{
+ "metric":"ugi.ugi.loginFailure_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/ugi/loginFailure_num_ops":{
+ "metric":"ugi.ugi.loginFailure_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/ugi/loginSuccess_avg_time":{
+ "metric":"ugi.ugi.loginSuccess_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/ugi/loginSuccess_num_ops":{
+ "metric":"ugi.ugi.loginSuccess_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ }
+ },
+ "HBASE_REGIONSERVER":{
+ "metrics/hbase/regionserver/mutationsWithoutWALSize": {
+ "metric": "regionserver.Server.mutationsWithoutWALSize",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/hbase/regionserver/slowAppendCount": {
+ "metric": "regionserver.Server.slowAppendCount",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/hbase/regionserver/slowPutCount": {
+ "metric": "regionserver.Server.slowPutCount",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/hbase/regionserver/slowIncrementCount": {
+ "metric": "regionserver.Server.slowIncrementCount",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/hbase/regionserver/percentFilesLocal": {
+ "metric": "regionserver.Server.percentFilesLocal",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/hbase/regionserver/slowGetCount": {
+ "metric": "regionserver.Server.slowGetCount",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/hbase/regionserver/slowDeleteCount": {
+ "metric": "regionserver.Server.slowDeleteCount",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/boottime":{
+ "metric":"boottime",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_aidle":{
+ "metric":"cpu_aidle",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_idle":{
+ "metric":"cpu_idle",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_nice":{
+ "metric":"cpu_nice",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_num":{
+ "metric":"cpu_num",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_speed":{
+ "metric":"cpu_speed",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_system":{
+ "metric":"cpu_system",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_user":{
+ "metric":"cpu_user",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_wio":{
+ "metric":"cpu_wio",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/disk/disk_free":{
+ "metric":"disk_free",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/disk/disk_total":{
+ "metric":"disk_total",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/disk/part_max_used":{
+ "metric":"part_max_used",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/blockCacheCount":{
+ "metric":"regionserver.Server.blockCacheCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/blockCacheEvictedCount":{
+ "metric":"regionserver.Server.blockCacheEvictedCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/blockCacheFree":{
+ "metric":"regionserver.Server.blockCacheFree",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/blockCacheHitCachingRatio":{
+ "metric":"regionserver.Server.blockCacheHitCachingRatio",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/blockCacheHitCount":{
+ "metric":"regionserver.Server.blockCacheHitCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/blockCacheHitRatio":{
+ "metric":"regionserver.Server.blockCacheHitRatio",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/blockCacheMissCount":{
+ "metric":"regionserver.Server.blockCacheMissCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/blockCacheSize":{
+ "metric":"regionserver.Server.blockCacheSize",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/compactionQueueSize":{
+ "metric":"regionserver.Server.compactionQueueLength",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/compactionSize_avg_time":{
+ "metric":"regionserver.Server.compactionSize_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/compactionSize_num_ops":{
+ "metric":"regionserver.Server.compactionSize_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/compactionTime_avg_time":{
+ "metric":"regionserver.Server.compactionTime_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/compactionTime_num_ops":{
+ "metric":"regionserver.Server.compactionTime_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/deleteRequestLatency_75th_percentile":{
+ "metric":"regionserver.Server.deleteRequestLatency_75th_percentile",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/deleteRequestLatency_95th_percentile":{
+ "metric":"regionserver.Server.deleteRequestLatency_95th_percentile",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/deleteRequestLatency_99th_percentile":{
+ "metric":"regionserver.Server.deleteRequestLatency_99th_percentile",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/deleteRequestLatency_max":{
+ "metric":"regionserver.Server.deleteRequestLatency_max",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/deleteRequestLatency_mean":{
+ "metric":"regionserver.Server.deleteRequestLatency_mean",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/deleteRequestLatency_median":{
+ "metric":"regionserver.Server.deleteRequestLatency_median",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/deleteRequestLatency_min":{
+ "metric":"regionserver.Server.deleteRequestLatency_min",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/deleteRequestLatency_num_ops":{
+ "metric":"regionserver.Server.deleteRequestLatency_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/deleteRequestLatency_std_dev":{
+ "metric":"regionserver.Server.deleteRequestLatency_std_dev",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/flushQueueSize":{
+ "metric":"regionserver.Server.flushQueueLength",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/flushSize_avg_time":{
+ "metric":"regionserver.Server.flushSize_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/flushSize_num_ops":{
+ "metric":"regionserver.Server.flushSize_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/flushTime_avg_time":{
+ "metric":"regionserver.Server.flushTime_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/flushTime_num_ops":{
+ "metric":"regionserver.Server.flushTime_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/fsReadLatencyHistogram_75th_percentile":{
+ "metric":"regionserver.Server.fsReadLatencyHistogram_75th_percentile",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/fsReadLatencyHistogram_95th_percentile":{
+ "metric":"regionserver.Server.fsReadLatencyHistogram_95th_percentile",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/fsReadLatencyHistogram_99th_percentile":{
+ "metric":"regionserver.Server.fsReadLatencyHistogram_99th_percentile",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/fsReadLatencyHistogram_max":{
+ "metric":"regionserver.Server.fsReadLatencyHistogram_max",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/fsReadLatencyHistogram_mean":{
+ "metric":"regionserver.Server.fsReadLatencyHistogram_mean",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/fsReadLatencyHistogram_median":{
+ "metric":"regionserver.Server.fsReadLatencyHistogram_median",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/fsReadLatencyHistogram_min":{
+ "metric":"regionserver.Server.fsReadLatencyHistogram_min",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/fsReadLatencyHistogram_num_ops":{
+ "metric":"regionserver.Server.fsReadLatencyHistogram_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/fsReadLatencyHistogram_std_dev":{
+ "metric":"regionserver.Server.fsReadLatencyHistogram_std_dev",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/fsReadLatency_avg_time":{
+ "metric":"regionserver.Server.fsReadLatency_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/fsReadLatency_num_ops":{
+ "metric":"regionserver.Server.fsReadLatency_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/fsSyncLatency_avg_time":{
+ "metric":"regionserver.Server.fsSyncLatency_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/fsSyncLatency_num_ops":{
+ "metric":"regionserver.Server.fsSyncLatency_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/fsWriteLatencyHistogram_75th_percentile":{
+ "metric":"regionserver.Server.fsWriteLatencyHistogram_75th_percentile",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/fsWriteLatencyHistogram_95th_percentile":{
+ "metric":"regionserver.Server.fsWriteLatencyHistogram_95th_percentile",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/fsWriteLatencyHistogram_99th_percentile":{
+ "metric":"regionserver.Server.fsWriteLatencyHistogram_99th_percentile",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/fsWriteLatencyHistogram_max":{
+ "metric":"regionserver.Server.fsWriteLatencyHistogram_max",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/fsWriteLatencyHistogram_mean":{
+ "metric":"regionserver.Server.fsWriteLatencyHistogram_mean",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/fsWriteLatencyHistogram_median":{
+ "metric":"regionserver.Server.fsWriteLatencyHistogram_median",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/fsWriteLatencyHistogram_min":{
+ "metric":"regionserver.Server.fsWriteLatencyHistogram_min",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/fsWriteLatencyHistogram_num_ops":{
+ "metric":"regionserver.Server.fsWriteLatencyHistogram_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/fsWriteLatencyHistogram_std_dev":{
+ "metric":"regionserver.Server.fsWriteLatencyHistogram_std_dev",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/fsWriteLatency_avg_time":{
+ "metric":"regionserver.Server.fsWriteLatency_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/fsWriteLatency_num_ops":{
+ "metric":"regionserver.Server.fsWriteLatency_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/getRequestLatency_75th_percentile":{
+ "metric":"regionserver.Server.getRequestLatency_75th_percentile",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/getRequestLatency_95th_percentile":{
+ "metric":"regionserver.Server.getRequestLatency_95th_percentile",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/getRequestLatency_99th_percentile":{
+ "metric":"regionserver.Server.getRequestLatency_99th_percentile",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/getRequestLatency_max":{
+ "metric":"regionserver.Server.getRequestLatency_max",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/getRequestLatency_mean":{
+ "metric":"regionserver.Server.getRequestLatency_mean",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/getRequestLatency_median":{
+ "metric":"regionserver.Server.getRequestLatency_median",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/getRequestLatency_min":{
+ "metric":"regionserver.Server.getRequestLatency_min",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/getRequestLatency_num_ops":{
+ "metric":"regionserver.Server.getRequestLatency_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/getRequestLatency_std_dev":{
+ "metric":"regionserver.Server.getRequestLatency_std_dev",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/hdfsBlocksLocalityIndex":{
+ "metric":"regionserver.Server.hdfsBlocksLocalityIndex",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/hlogFileCount":{
+ "metric":"regionserver.Server.hlogFileCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/memstoreSizeMB":{
+ "metric":"regionserver.Server.memstoreSizeMB",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/putRequestLatency_75th_percentile":{
+ "metric":"regionserver.Server.putRequestLatency_75th_percentile",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/putRequestLatency_95th_percentile":{
+ "metric":"regionserver.Server.putRequestLatency_95th_percentile",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/putRequestLatency_99th_percentile":{
+ "metric":"regionserver.Server.putRequestLatency_99th_percentile",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/putRequestLatency_max":{
+ "metric":"regionserver.Server.putRequestLatency_max",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/putRequestLatency_mean":{
+ "metric":"regionserver.Server.putRequestLatency_mean",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/putRequestLatency_median":{
+ "metric":"regionserver.Server.putRequestLatency_median",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/putRequestLatency_min":{
+ "metric":"regionserver.Server.putRequestLatency_min",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/putRequestLatency_num_ops":{
+ "metric":"regionserver.Server.putRequestLatency_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/putRequestLatency_std_dev":{
+ "metric":"regionserver.Server.putRequestLatency_std_dev",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/readRequestsCount":{
+ "metric":"regionserver.Server.readRequestCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/regions":{
+ "metric":"regionserver.Server.regionCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/requests":{
+ "metric":"regionserver.Server.requests",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/rootIndexSizeKB":{
+ "metric":"regionserver.Server.rootIndexSizeKB",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/storefileIndexSizeMB":{
+ "metric":"regionserver.Server.storefileIndexSizeMB",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/storefiles":{
+ "metric":"regionserver.Server.storefiles",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/stores":{
+ "metric":"regionserver.Server.stores",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/totalStaticBloomSizeKB":{
+ "metric":"regionserver.Server.totalStaticBloomSizeKB",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/totalStaticIndexSizeKB":{
+ "metric":"regionserver.Server.totalStaticIndexSizeKB",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/writeRequestsCount":{
+ "metric":"regionserver.Server.writeRequestCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/gcCount":{
+ "metric":"jvm.JvmMetrics.gcCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/gcTimeMillis":{
+ "metric":"jvm.JvmMetrics.gcTimeMillis",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/logError":{
+ "metric":"jvm.JvmMetrics.logError",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/logFatal":{
+ "metric":"jvm.JvmMetrics.logFatal",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/logInfo":{
+ "metric":"jvm.JvmMetrics.logInfo",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/logWarn":{
+ "metric":"jvm.JvmMetrics.logWarn",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/maxMemoryM":{
+ "metric":"jvm.JvmMetrics.maxMemoryM",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/memHeapCommittedM":{
+ "metric":"jvm.JvmMetrics.memHeapCommittedM",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/memHeapUsedM":{
+ "metric":"jvm.JvmMetrics.memHeapUsedM",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/memNonHeapCommittedM":{
+ "metric":"jvm.JvmMetrics.memNonHeapCommittedM",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/memNonHeapUsedM":{
+ "metric":"jvm.JvmMetrics.memNonHeapUsedM",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsBlocked":{
+ "metric":"jvm.JvmMetrics.threadsBlocked",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsNew":{
+ "metric":"jvm.JvmMetrics.threadsNew",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsRunnable":{
+ "metric":"jvm.JvmMetrics.threadsRunnable",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsTerminated":{
+ "metric":"jvm.JvmMetrics.threadsTerminated",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsTimedWaiting":{
+ "metric":"jvm.JvmMetrics.threadsTimedWaiting",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsWaiting":{
+ "metric":"jvm.JvmMetrics.threadsWaiting",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/load/load_fifteen":{
+ "metric":"load_fifteen",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/load/load_five":{
+ "metric":"load_five",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/load/load_one":{
+ "metric":"load_one",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/mem_buffers":{
+ "metric":"mem_buffers",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/mem_cached":{
+ "metric":"mem_cached",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/mem_free":{
+ "metric":"mem_free",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/mem_shared":{
+ "metric":"mem_shared",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/mem_total":{
+ "metric":"mem_total",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/swap_free":{
+ "metric":"swap_free",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/swap_total":{
+ "metric":"swap_total",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/network/bytes_in":{
+ "metric":"bytes_in",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/network/bytes_out":{
+ "metric":"bytes_out",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/network/pkts_in":{
+ "metric":"pkts_in",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/network/pkts_out":{
+ "metric":"pkts_out",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/process/proc_run":{
+ "metric":"proc_run",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/process/proc_total":{
+ "metric":"proc_total",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/NumOpenConnections":{
+ "metric":"rpc.rpc.NumOpenConnections",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/ReceivedBytes":{
+ "metric":"rpc.rpc.ReceivedBytes",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/RpcProcessingTime_avg_time":{
+ "metric":"rpc.rpc.RpcProcessingTime_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/RpcProcessingTime_num_ops":{
+ "metric":"rpc.rpc.RpcProcessingTime_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/RpcQueueTime_avg_time":{
+ "metric":"rpc.rpc.RpcQueueTime_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/RpcQueueTime_num_ops":{
+ "metric":"rpc.rpc.RpcQueueTime_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/RpcSlowResponse_avg_time":{
+ "metric":"rpc.rpc.RpcSlowResponse_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/RpcSlowResponse_num_ops":{
+ "metric":"rpc.rpc.RpcSlowResponse_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/SentBytes":{
+ "metric":"rpc.rpc.SentBytes",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/abort/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.abort.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/abort/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.abort.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/abort_avg_time":{
+ "metric":"rpc.rpc.abort_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/abort_num_ops":{
+ "metric":"rpc.rpc.abort_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/addColumn_avg_time":{
+ "metric":"rpc.rpc.addColumn_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/addColumn_num_ops":{
+ "metric":"rpc.rpc.addColumn_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/addToOnlineRegions/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.addToOnlineRegions.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/addToOnlineRegions/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.addToOnlineRegions.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/addToOnlineRegions_avg_time":{
+ "metric":"rpc.rpc.addToOnlineRegions_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/addToOnlineRegions_num_ops":{
+ "metric":"rpc.rpc.addToOnlineRegions_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/assign_avg_time":{
+ "metric":"rpc.rpc.assign_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/assign_num_ops":{
+ "metric":"rpc.rpc.assign_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/balanceSwitch_avg_time":{
+ "metric":"rpc.rpc.balanceSwitch_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/balanceSwitch_num_ops":{
+ "metric":"rpc.rpc.balanceSwitch_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/balance_avg_time":{
+ "metric":"rpc.rpc.balance_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/balance_num_ops":{
+ "metric":"rpc.rpc.balance_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/bulkLoadHFiles/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.bulkLoadHFiles.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/bulkLoadHFiles/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.bulkLoadHFiles.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/bulkLoadHFiles_avg_time":{
+ "metric":"rpc.rpc.bulkLoadHFiles_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/bulkLoadHFiles_num_ops":{
+ "metric":"rpc.rpc.bulkLoadHFiles_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/callQueueLen":{
+ "metric":"rpc.rpc.callQueueLen",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndDelete/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.checkAndDelete.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndDelete/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.checkAndDelete.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndDelete_avg_time":{
+ "metric":"rpc.rpc.checkAndDelete_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndDelete_num_ops":{
+ "metric":"rpc.rpc.checkAndDelete_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndPut/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.checkAndPut.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndPut/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.checkAndPut.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndPut_avg_time":{
+ "metric":"rpc.rpc.checkAndPut_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndPut_num_ops":{
+ "metric":"rpc.rpc.checkAndPut_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkOOME/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.checkOOME.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkOOME/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.checkOOME.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkOOME_avg_time":{
+ "metric":"rpc.rpc.checkOOME_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkOOME_num_ops":{
+ "metric":"rpc.rpc.checkOOME_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/close/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.close.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/close/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.close.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/closeRegion/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.closeRegion.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/closeRegion/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.closeRegion.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/closeRegion_avg_time":{
+ "metric":"rpc.rpc.closeRegion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/closeRegion_num_ops":{
+ "metric":"rpc.rpc.closeRegion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/close_avg_time":{
+ "metric":"rpc.rpc.close_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/close_num_ops":{
+ "metric":"rpc.rpc.close_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/compactRegion/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.compactRegion.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/compactRegion/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.compactRegion.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/compactRegion_avg_time":{
+ "metric":"rpc.rpc.compactRegion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/compactRegion_num_ops":{
+ "metric":"rpc.rpc.compactRegion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/createTable_avg_time":{
+ "metric":"rpc.rpc.createTable_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/createTable_num_ops":{
+ "metric":"rpc.rpc.createTable_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/delete/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.delete.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/delete/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.delete.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/deleteColumn_avg_time":{
+ "metric":"rpc.rpc.deleteColumn_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/deleteColumn_num_ops":{
+ "metric":"rpc.rpc.deleteColumn_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/deleteTable_avg_time":{
+ "metric":"rpc.rpc.deleteTable_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/deleteTable_num_ops":{
+ "metric":"rpc.rpc.deleteTable_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/delete_avg_time":{
+ "metric":"rpc.rpc.delete_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/delete_num_ops":{
+ "metric":"rpc.rpc.delete_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/disableTable_avg_time":{
+ "metric":"rpc.rpc.disableTable_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/disableTable_num_ops":{
+ "metric":"rpc.rpc.disableTable_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/enableTable_avg_time":{
+ "metric":"rpc.rpc.enableTable_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/enableTable_num_ops":{
+ "metric":"rpc.rpc.enableTable_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/execCoprocessor/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.execCoprocessor.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/execCoprocessor/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.execCoprocessor.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/execCoprocessor_avg_time":{
+ "metric":"rpc.rpc.execCoprocessor_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/execCoprocessor_num_ops":{
+ "metric":"rpc.rpc.execCoprocessor_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/exists/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.exists.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/exists/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.exists.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/exists_avg_time":{
+ "metric":"rpc.rpc.exists_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/exists_num_ops":{
+ "metric":"rpc.rpc.exists_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/flushRegion/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.flushRegion.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/flushRegion/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.flushRegion.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/flushRegion_avg_time":{
+ "metric":"rpc.rpc.flushRegion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/flushRegion_num_ops":{
+ "metric":"rpc.rpc.flushRegion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/get/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.get.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/get/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.get.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getAlterStatus_avg_time":{
+ "metric":"rpc.rpc.getAlterStatus_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getAlterStatus_num_ops":{
+ "metric":"rpc.rpc.getAlterStatus_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getBlockCacheColumnFamilySummaries/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getBlockCacheColumnFamilySummaries/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getBlockCacheColumnFamilySummaries_avg_time":{
+ "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getBlockCacheColumnFamilySummaries_num_ops":{
+ "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getCatalogTracker/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getCatalogTracker.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getCatalogTracker/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getCatalogTracker.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getCatalogTracker_avg_time":{
+ "metric":"rpc.rpc.getCatalogTracker_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getCatalogTracker_num_ops":{
+ "metric":"rpc.rpc.getCatalogTracker_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getClosestRowBefore/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getClosestRowBefore.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getClosestRowBefore/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getClosestRowBefore.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getClosestRowBefore_avg_time":{
+ "metric":"rpc.rpc.getClosestRowBefore_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getClosestRowBefore_num_ops":{
+ "metric":"rpc.rpc.getClosestRowBefore_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getClusterStatus_avg_time":{
+ "metric":"rpc.rpc.getClusterStatus_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getClusterStatus_num_ops":{
+ "metric":"rpc.rpc.getClusterStatus_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getConfiguration/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getConfiguration.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getConfiguration/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getConfiguration.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getConfiguration_avg_time":{
+ "metric":"rpc.rpc.getConfiguration_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getConfiguration_num_ops":{
+ "metric":"rpc.rpc.getConfiguration_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getFromOnlineRegions/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getFromOnlineRegions.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getFromOnlineRegions/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getFromOnlineRegions.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getFromOnlineRegions_avg_time":{
+ "metric":"rpc.rpc.getFromOnlineRegions_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getFromOnlineRegions_num_ops":{
+ "metric":"rpc.rpc.getFromOnlineRegions_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getHServerInfo/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getHServerInfo.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getHServerInfo/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getHServerInfo.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getHServerInfo_avg_time":{
+ "metric":"rpc.rpc.getHServerInfo_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getHServerInfo_num_ops":{
+ "metric":"rpc.rpc.getHServerInfo_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getHTableDescriptors_avg_time":{
+ "metric":"rpc.rpc.getHTableDescriptors_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getHTableDescriptors_num_ops":{
+ "metric":"rpc.rpc.getHTableDescriptors_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getOnlineRegions/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getOnlineRegions.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getOnlineRegions/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getOnlineRegions.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getOnlineRegions_avg_time":{
+ "metric":"rpc.rpc.getOnlineRegions_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getOnlineRegions_num_ops":{
+ "metric":"rpc.rpc.getOnlineRegions_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolSignature/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getProtocolSignature.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolSignature/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getProtocolSignature.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolSignature_avg_time":{
+ "metric":"rpc.rpc.getProtocolSignature_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolSignature_num_ops":{
+ "metric":"rpc.rpc.getProtocolSignature_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolVersion/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getProtocolVersion.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolVersion/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getProtocolVersion.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolVersion_avg_time":{
+ "metric":"rpc.rpc.getProtocolVersion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolVersion_num_ops":{
+ "metric":"rpc.rpc.getProtocolVersion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getRegionInfo/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getRegionInfo.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getRegionInfo/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getRegionInfo.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getRegionInfo_avg_time":{
+ "metric":"rpc.rpc.getRegionInfo_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getRegionInfo_num_ops":{
+ "metric":"rpc.rpc.getRegionInfo_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getServerName/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getServerName.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getServerName/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getServerName.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getServerName_avg_time":{
+ "metric":"rpc.rpc.getServerName_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getServerName_num_ops":{
+ "metric":"rpc.rpc.getServerName_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getZooKeeper/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getZooKeeper.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getZooKeeper/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getZooKeeper.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getZooKeeper_avg_time":{
+ "metric":"rpc.rpc.getZooKeeper_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getZooKeeper_num_ops":{
+ "metric":"rpc.rpc.getZooKeeper_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/get_avg_time":{
+ "metric":"rpc.rpc.get_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/get_num_ops":{
+ "metric":"rpc.rpc.get_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/increment/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.increment.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/increment/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.increment.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/incrementColumnValue/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.incrementColumnValue.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/incrementColumnValue/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.incrementColumnValue.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/incrementColumnValue_avg_time":{
+ "metric":"rpc.rpc.incrementColumnValue_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/incrementColumnValue_num_ops":{
+ "metric":"rpc.rpc.incrementColumnValue_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/increment_avg_time":{
+ "metric":"rpc.rpc.increment_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/increment_num_ops":{
+ "metric":"rpc.rpc.increment_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isAborted/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.isAborted.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isAborted/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.isAborted.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isAborted_avg_time":{
+ "metric":"rpc.rpc.isAborted_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isAborted_num_ops":{
+ "metric":"rpc.rpc.isAborted_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isMasterRunning_avg_time":{
+ "metric":"rpc.rpc.isMasterRunning_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isMasterRunning_num_ops":{
+ "metric":"rpc.rpc.isMasterRunning_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isStopped/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.isStopped.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isStopped/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.isStopped.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isStopped_avg_time":{
+ "metric":"rpc.rpc.isStopped_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isStopped_num_ops":{
+ "metric":"rpc.rpc.isStopped_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/lockRow/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.lockRow.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/lockRow/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.lockRow.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/lockRow_avg_time":{
+ "metric":"rpc.rpc.lockRow_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/lockRow_num_ops":{
+ "metric":"rpc.rpc.lockRow_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/modifyColumn_avg_time":{
+ "metric":"rpc.rpc.modifyColumn_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/modifyColumn_num_ops":{
+ "metric":"rpc.rpc.modifyColumn_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/modifyTable_avg_time":{
+ "metric":"rpc.rpc.modifyTable_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/modifyTable_num_ops":{
+ "metric":"rpc.rpc.modifyTable_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/move_avg_time":{
+ "metric":"rpc.rpc.move_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/move_num_ops":{
+ "metric":"rpc.rpc.move_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/multi/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.multi.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/multi/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.multi.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/multi_avg_time":{
+ "metric":"rpc.rpc.multi_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/multi_num_ops":{
+ "metric":"rpc.rpc.multi_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/next/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.next.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/next/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.next.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/next_avg_time":{
+ "metric":"rpc.rpc.next_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/next_num_ops":{
+ "metric":"rpc.rpc.next_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/offline_avg_time":{
+ "metric":"rpc.rpc.offline_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/offline_num_ops":{
+ "metric":"rpc.rpc.offline_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegion/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.openRegion.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegion/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.openRegion.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegion_avg_time":{
+ "metric":"rpc.rpc.openRegion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegion_num_ops":{
+ "metric":"rpc.rpc.openRegion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegions/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.openRegions.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegions/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.openRegions.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegions_avg_time":{
+ "metric":"rpc.rpc.openRegions_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegions_num_ops":{
+ "metric":"rpc.rpc.openRegions_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openScanner/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.openScanner.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openScanner/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.openScanner.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openScanner_avg_time":{
+ "metric":"rpc.rpc.openScanner_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openScanner_num_ops":{
+ "metric":"rpc.rpc.openScanner_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/put/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.put.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/put/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.put.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/put_avg_time":{
+ "metric":"rpc.rpc.put_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/put_num_ops":{
+ "metric":"rpc.rpc.put_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/regionServerReport_avg_time":{
+ "metric":"rpc.rpc.regionServerReport_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/regionServerReport_num_ops":{
+ "metric":"rpc.rpc.regionServerReport_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/regionServerStartup_avg_time":{
+ "metric":"rpc.rpc.regionServerStartup_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/regionServerStartup_num_ops":{
+ "metric":"rpc.rpc.regionServerStartup_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/removeFromOnlineRegions/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.removeFromOnlineRegions.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/removeFromOnlineRegions/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.removeFromOnlineRegions.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/removeFromOnlineRegions_avg_time":{
+ "metric":"rpc.rpc.removeFromOnlineRegions_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/removeFromOnlineRegions_num_ops":{
+ "metric":"rpc.rpc.removeFromOnlineRegions_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/replicateLogEntries/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.replicateLogEntries.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/replicateLogEntries/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.replicateLogEntries.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/replicateLogEntries_avg_time":{
+ "metric":"rpc.rpc.replicateLogEntries_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/replicateLogEntries_num_ops":{
+ "metric":"rpc.rpc.replicateLogEntries_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/reportRSFatalError_avg_time":{
+ "metric":"rpc.rpc.reportRSFatalError_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/reportRSFatalError_num_ops":{
+ "metric":"rpc.rpc.reportRSFatalError_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rollHLogWriter/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.rollHLogWriter.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rollHLogWriter/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.rollHLogWriter.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rollHLogWriter_avg_time":{
+ "metric":"rpc.rpc.rollHLogWriter_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rollHLogWriter_num_ops":{
+ "metric":"rpc.rpc.rollHLogWriter_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rpcAuthenticationFailures":{
+ "metric":"rpc.rpc.rpcAuthenticationFailures",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rpcAuthenticationSuccesses":{
+ "metric":"rpc.rpc.rpcAuthenticationSuccesses",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rpcAuthorizationFailures":{
+ "metric":"rpc.rpc.rpcAuthorizationFailures",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rpcAuthorizationSuccesses":{
+ "metric":"rpc.rpc.rpcAuthorizationSuccesses",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/shutdown_avg_time":{
+ "metric":"rpc.rpc.shutdown_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/shutdown_num_ops":{
+ "metric":"rpc.rpc.shutdown_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/splitRegion/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.splitRegion.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/splitRegion/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.splitRegion.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/splitRegion_avg_time":{
+ "metric":"rpc.rpc.splitRegion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/splitRegion_num_ops":{
+ "metric":"rpc.rpc.splitRegion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/stop/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.stop.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/stop/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.stop.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/stopMaster_avg_time":{
+ "metric":"rpc.rpc.stopMaster_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/stopMaster_num_ops":{
+ "metric":"rpc.rpc.stopMaster_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/stop_avg_time":{
+ "metric":"rpc.rpc.stop_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/stop_num_ops":{
+ "metric":"rpc.rpc.stop_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/unassign_avg_time":{
+ "metric":"rpc.rpc.unassign_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/unassign_num_ops":{
+ "metric":"rpc.rpc.unassign_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/unlockRow/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.unlockRow.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/unlockRow/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.unlockRow.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/unlockRow_avg_time":{
+ "metric":"rpc.rpc.unlockRow_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/unlockRow_num_ops":{
+ "metric":"rpc.rpc.unlockRow_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/canCommit_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.canCommit_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/canCommit_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.canCommit_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/commitPending_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.commitPending_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/commitPending_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.commitPending_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/done_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.done_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/done_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.done_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getBlockLocalPathInfo_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.getBlockLocalPathInfo_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getBlockLocalPathInfo_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.getBlockLocalPathInfo_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getMapCompletionEvents_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.getMapCompletionEvents_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getMapCompletionEvents_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.getMapCompletionEvents_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getProtocolVersion_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.getProtocolVersion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getProtocolVersion_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.getProtocolVersion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getTask_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.getTask_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getTask_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.getTask_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/ping_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.ping_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/ping_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.ping_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/statusUpdate_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.statusUpdate_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/statusUpdate_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.statusUpdate_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/ugi/loginFailure_avg_time":{
+ "metric":"ugi.ugi.loginFailure_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/ugi/loginFailure_num_ops":{
+ "metric":"ugi.ugi.loginFailure_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/ugi/loginSuccess_avg_time":{
+ "metric":"ugi.ugi.loginSuccess_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/ugi/loginSuccess_num_ops":{
+ "metric":"ugi.ugi.loginSuccess_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ }
+ },
+ "FLUME_SERVER":{
+ "metrics/boottime":{
+ "metric":"boottime",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_aidle":{
+ "metric":"cpu_aidle",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_idle":{
+ "metric":"cpu_idle",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_nice":{
+ "metric":"cpu_nice",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_num":{
+ "metric":"cpu_num",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_speed":{
+ "metric":"cpu_speed",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_system":{
+ "metric":"cpu_system",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_user":{
+ "metric":"cpu_user",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_wio":{
+ "metric":"cpu_wio",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/disk/disk_free":{
+ "metric":"disk_free",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/disk/disk_total":{
+ "metric":"disk_total",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/disk/part_max_used":{
+ "metric":"part_max_used",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/gcCount":{
+ "metric":"jvm.JvmMetrics.gcCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/gcTimeMillis":{
+ "metric":"jvm.JvmMetrics.gcTimeMillis",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/logError":{
+ "metric":"jvm.JvmMetrics.logError",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/logFatal":{
+ "metric":"jvm.JvmMetrics.logFatal",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/logInfo":{
+ "metric":"jvm.JvmMetrics.logInfo",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/logWarn":{
+ "metric":"jvm.JvmMetrics.logWarn",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/maxMemoryM":{
+ "metric":"jvm.JvmMetrics.maxMemoryM",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/memHeapCommittedM":{
+ "metric":"jvm.JvmMetrics.memHeapCommittedM",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/memHeapUsedM":{
+ "metric":"jvm.JvmMetrics.memHeapUsedM",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/memNonHeapCommittedM":{
+ "metric":"jvm.JvmMetrics.memNonHeapCommittedM",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/memNonHeapUsedM":{
+ "metric":"jvm.JvmMetrics.memNonHeapUsedM",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsBlocked":{
+ "metric":"jvm.JvmMetrics.threadsBlocked",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsNew":{
+ "metric":"jvm.JvmMetrics.threadsNew",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsRunnable":{
+ "metric":"jvm.JvmMetrics.threadsRunnable",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsTerminated":{
+ "metric":"jvm.JvmMetrics.threadsTerminated",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsTimedWaiting":{
+ "metric":"jvm.JvmMetrics.threadsTimedWaiting",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsWaiting":{
+ "metric":"jvm.JvmMetrics.threadsWaiting",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/CHANNEL/$2/ChannelCapacity":{
+ "metric":"(\\w+).CHANNEL.(\\w+).ChannelCapacity",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/CHANNEL/$2/StartTime":{
+ "metric":"(\\w+).CHANNEL.(\\w+).StartTime",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/CHANNEL/$2/EventTakeAttemptCount":{
+ "metric":"(\\w+).CHANNEL.(\\w+).EventTakeAttemptCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/CHANNEL/$2/EventTakeSuccessCount":{
+ "metric":"(\\w+).CHANNEL.(\\w+).EventTakeSuccessCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/CHANNEL/$2/EventPutAttemptCount":{
+ "metric":"(\\w+).CHANNEL.(\\w+).EventPutAttemptCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/CHANNEL/$2/StopTime":{
+ "metric":"(\\w+).CHANNEL.(\\w+).StopTime",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/CHANNEL/$2/ChannelFillPercentage":{
+ "metric":"(\\w+).CHANNEL.(\\w+).ChannelFillPercentage",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/CHANNEL/$2/ChannelSize":{
+ "metric":"(\\w+).CHANNEL.(\\w+).ChannelSize",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/CHANNEL/$2/EventPutSuccessCount":{
+ "metric":"(\\w+).CHANNEL.(\\w+).EventPutSuccessCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/SINK/$2/ConnectionCreatedCount":{
+ "metric":"(\\w+).SINK.(\\w+).ConnectionCreatedCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/SINK/$2/BatchCompleteCount":{
+ "metric":"(\\w+).SINK.(\\w+).BatchCompleteCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/SINK/$2/EventDrainSuccessCount":{
+ "metric":"(\\w+).SINK.(\\w+).EventDrainSuccessCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/SINK/$2/StartTime":{
+ "metric":"(\\w+).SINK.(\\w+).StartTime",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/SINK/$2/EventDrainAttemptCount":{
+ "metric":"(\\w+).SINK.(\\w+).EventDrainAttemptCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/SINK/$2/ConnectionFailedCount":{
+ "metric":"(\\w+).SINK.(\\w+).ConnectionFailedCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/SINK/$2/BatchUnderflowCount":{
+ "metric":"(\\w+).SINK.(\\w+).BatchUnderflowCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/SINK/$2/ConnectionClosedCount":{
+ "metric":"(\\w+).SINK.(\\w+).ConnectionClosedCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/SINK/$2/StopTime":{
+ "metric":"(\\w+).SINK.(\\w+).StopTime",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/SINK/$2/BatchEmptyCount":{
+ "metric":"(\\w+).SINK.(\\w+).BatchEmptyCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/SOURCE/$2/AppendBatchReceivedCount":{
+ "metric":"(\\w+).SOURCE.(\\w+).AppendBatchReceivedCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/SOURCE/$2/AppendAcceptedCount":{
+ "metric":"(\\w+).SOURCE.(\\w+).AppendAcceptedCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/SOURCE/$2/StartTime":{
+ "metric":"(\\w+).SOURCE.(\\w+).StartTime",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/SOURCE/$2/OpenConnectionCount":{
+ "metric":"(\\w+).SOURCE.(\\w+).OpenConnectionCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/SOURCE/$2/AppendBatchAcceptedCount":{
+ "metric":"(\\w+).SOURCE.(\\w+).AppendBatchAcceptedCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/SOURCE/$2/AppendReceivedCount":{
+ "metric":"(\\w+).SOURCE.(\\w+).AppendReceivedCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/SOURCE/$2/EventReceivedCount":{
+ "metric":"(\\w+).SOURCE.(\\w+).EventReceivedCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/SOURCE/$2/StopTime":{
+ "metric":"(\\w+).SOURCE.(\\w+).StopTime",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/SOURCE/$2/EventAcceptedCount":{
+ "metric":"(\\w+).SOURCE.(\\w+).EventAcceptedCount",
+ "pointInTime":true,
+ "temporal":true
+ }
+ },
+ "JOURNALNODE":{
+ "metrics/dfs/journalNode/syncs300s75thPercentileLatencyMicros": {
+ "metric": "dfs.JournalNode.Syncs300s75thPercentileLatencyMicros",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/journalNode/lastWrittenTxId": {
+ "metric": "dfs.JournalNode.LastWrittenTxId",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/journalNode/syncs60s90thPercentileLatencyMicros": {
+ "metric": "dfs.JournalNode.Syncs60s90thPercentileLatencyMicros",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/finalizeLogSegment_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.FinalizeLogSegmentNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/ugi/loginSuccess_avg_time": {
+ "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/startLogSegment_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.StartLogSegmentAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/load/load_one": {
+ "metric": "load_one",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/journalNode/currentLagTxns": {
+ "metric": "dfs.JournalNode.CurrentLagTxns",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/memNonHeapUsedM": {
+ "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/journalNode/bytesWritten": {
+ "metric": "dfs.JournalNode.BytesWritten",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/process/proc_run": {
+ "metric": "proc_run",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/acceptRecovery_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.AcceptRecoveryNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/memory/swap_total": {
+ "metric": "swap_total",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/journalNode/txnsWritten": {
+ "metric": "dfs.JournalNode.TxnsWritten",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/threadsBlocked": {
+ "metric": "jvm.JvmMetrics.ThreadsBlocked",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/RpcQueueTime_num_ops": {
+ "metric": "rpc.rpc.RpcQueueTimeNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/process/proc_total": {
+ "metric": "proc_total",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/disk/part_max_used": {
+ "metric": "part_max_used",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/newEpoch_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.NewEpochNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/journalNode/syncs3600s_num_ops": {
+ "metric": "dfs.JournalNode.Syncs3600sNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/journalNode/syncs3600s99thPercentileLatencyMicros": {
+ "metric": "dfs.JournalNode.Syncs3600s99thPercentileLatencyMicros",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/ugi/loginSuccess_num_ops": {
+ "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/journalNode/syncs60s95thPercentileLatencyMicros": {
+ "metric": "dfs.JournalNode.Syncs60s95thPercentileLatencyMicros",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_idle": {
+ "metric": "cpu_idle",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/journalNode/syncs60s99thPercentileLatencyMicros": {
+ "metric": "dfs.JournalNode.Syncs60s99thPercentileLatencyMicros",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_aidle": {
+ "metric": "cpu_aidle",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/memory/mem_free": {
+ "metric": "mem_free",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/journalNode/syncs300s50thPercentileLatencyMicros": {
+ "metric": "dfs.JournalNode.Syncs300s50thPercentileLatencyMicros",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/network/bytes_in": {
+ "metric": "bytes_in",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/journalNode/syncs300s_num_ops": {
+ "metric": "dfs.JournalNode.Syncs300sNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/journalNode/syncs3600s90thPercentileLatencyMicros": {
+ "metric": "dfs.JournalNode.Syncs3600s90thPercentileLatencyMicros",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/network/pkts_out": {
+ "metric": "pkts_out",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/memNonHeapCommittedM": {
+ "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/memory/mem_cached": {
+ "metric": "mem_cached",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/disk/disk_total": {
+ "metric": "disk_total",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/journalNode/lastPromisedEpoch": {
+ "metric": "dfs.JournalNode.LastPromisedEpoch",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/prepareRecovery_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.PrepareRecoveryAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/journalNode/lastWriterEpoch": {
+ "metric": "dfs.JournalNode.LastWriterEpoch",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/ugi/loginFailure_num_ops": {
+ "metric": "ugi.UgiMetrics.LoginFailureNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/getEditLogManifest_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.GetEditLogManifestNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_wio": {
+ "metric": "cpu_wio",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/acceptRecovery_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.AcceptRecoveryAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_speed": {
+ "metric": "cpu_speed",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/RpcProcessingTime_avg_time": {
+ "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/rpcAuthenticationSuccesses": {
+ "metric": "rpc.rpc.RpcAuthenticationSuccesses",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/rpcAuthenticationFailures": {
+ "metric": "rpc.rpc.RpcAuthenticationFailures",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/network/pkts_in": {
+ "metric": "pkts_in",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/memory/mem_total": {
+ "metric": "mem_total",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/memHeapCommittedM": {
+ "metric": "jvm.JvmMetrics.MemHeapCommittedM",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/threadsRunnable": {
+ "metric": "jvm.JvmMetrics.ThreadsRunnable",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/journalNode/syncs60s75thPercentileLatencyMicros": {
+ "metric": "dfs.JournalNode.Syncs60s75thPercentileLatencyMicros",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/threadsNew": {
+ "metric": "jvm.JvmMetrics.ThreadsNew",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/rpcAuthorizationFailures": {
+ "metric": "rpc.rpc.RpcAuthorizationFailures",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/journalNode/syncs300s95thPercentileLatencyMicros": {
+ "metric": "dfs.JournalNode.Syncs300s95thPercentileLatencyMicros",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/RpcQueueTime_avg_time": {
+ "metric": "rpc.rpc.RpcQueueTimeAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/journalNode/syncs3600s95thPercentileLatencyMicros": {
+ "metric": "dfs.JournalNode.Syncs3600s95thPercentileLatencyMicros",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/getEditLogManifest_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.GetEditLogManifestAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/SentBytes": {
+ "metric": "rpc.rpc.SentBytes",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/logWarn": {
+ "metric": "jvm.JvmMetrics.LogWarn",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/threadsTimedWaiting": {
+ "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/gcCount": {
+ "metric": "jvm.JvmMetrics.GcCount",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/ReceivedBytes": {
+ "metric": "rpc.rpc.ReceivedBytes",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_nice": {
+ "metric": "cpu_nice",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/journalNode/batchesWritten": {
+ "metric": "dfs.JournalNode.BatchesWritten",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/NumOpenConnections": {
+ "metric": "rpc.rpc.NumOpenConnections",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/journalNode/batchesWrittenWhileLagging": {
+ "metric": "dfs.JournalNode.BatchesWrittenWhileLagging",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/journalNode/syncs300s99thPercentileLatencyMicros": {
+ "metric": "dfs.JournalNode.Syncs300s99thPercentileLatencyMicros",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/memHeapUsedM": {
+ "metric": "jvm.JvmMetrics.MemHeapUsedM",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/prepareRecovery_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.PrepareRecoveryNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/threadsWaiting": {
+ "metric": "jvm.JvmMetrics.ThreadsWaiting",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/disk/disk_free": {
+ "metric": "disk_free",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/newEpoch_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.NewEpochAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/memory/mem_buffers": {
+ "metric": "mem_buffers",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/journalNode/syncs60s_num_ops": {
+ "metric": "dfs.JournalNode.Syncs60sNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/gcTimeMillis": {
+ "metric": "jvm.JvmMetrics.GcTimeMillis",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/threadsTerminated": {
+ "metric": "jvm.JvmMetrics.ThreadsTerminated",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/memMaxM": {
+ "metric": "jvm.JvmMetrics.MemMaxM",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/network/bytes_out": {
+ "metric": "bytes_out",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_user": {
+ "metric": "cpu_user",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/memory/swap_free": {
+ "metric": "swap_free",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/load/load_five": {
+ "metric": "load_five",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_system": {
+ "metric": "cpu_system",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/getJournalState_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.GetJournalStateAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/boottime": {
+ "metric": "boottime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/journalNode/syncs300s90thPercentileLatencyMicros": {
+ "metric": "dfs.JournalNode.Syncs300s90thPercentileLatencyMicros",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/callQueueLen": {
+ "metric": "rpc.rpc.CallQueueLength",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/journal_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.JournalAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/finalizeLogSegment_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.FinalizeLogSegmentAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/load/load_fifteen": {
+ "metric": "load_fifteen",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/startLogSegment_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.StartLogSegmentNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/journalNode/syncs60s50thPercentileLatencyMicros": {
+ "metric": "dfs.JournalNode.Syncs60s50thPercentileLatencyMicros",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/logInfo": {
+ "metric": "jvm.JvmMetrics.LogInfo",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/getJournalState_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.GetJournalStateNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/RpcProcessingTime_num_ops": {
+ "metric": "rpc.rpc.RpcProcessingTimeNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/memory/mem_shared": {
+ "metric": "mem_shared",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/journalNode/syncs3600s50thPercentileLatencyMicros": {
+ "metric": "dfs.JournalNode.Syncs3600s50thPercentileLatencyMicros",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/logError": {
+ "metric": "jvm.JvmMetrics.LogError",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/ugi/loginFailure_avg_time": {
+ "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_num": {
+ "metric": "cpu_num",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/journal_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.JournalNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/rpcAuthorizationSuccesses": {
+ "metric": "rpc.rpc.RpcAuthorizationSuccesses",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/logFatal": {
+ "metric": "jvm.JvmMetrics.LogFatal",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/journalNode/syncs3600s75thPercentileLatencyMicros": {
+ "metric": "dfs.JournalNode.Syncs3600s75thPercentileLatencyMicros",
+ "pointInTime": true,
+ "temporal": true
+ }
+ },
+ "NODEMANAGER":{
+ "metrics/memory/mem_total": {
+ "metric": "mem_total",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/memHeapCommittedM": {
+ "metric": "jvm.JvmMetrics.MemHeapCommittedM",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/mapred/ShuffleOutputsFailed": {
+ "metric": "mapred.ShuffleOutputsFailed",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/threadsRunnable": {
+ "metric": "jvm.JvmMetrics.ThreadsRunnable",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/threadsNew": {
+ "metric": "jvm.JvmMetrics.ThreadsNew",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/rpcAuthorizationFailures": {
+ "metric": "rpc.metrics.RpcAuthorizationFailures",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/ugi/loginSuccess_avg_time": {
+ "metric": "ugi.ugi.LoginSuccessAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/RpcQueueTime_avg_time": {
+ "metric": "rpc.rpc.RpcQueueTimeAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/yarn/ContainersCompleted": {
+ "metric": "yarn.NodeManagerMetrics.ContainersCompleted",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/SentBytes": {
+ "metric": "rpc.rpc.SentBytes",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/memNonHeapUsedM": {
+ "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/yarn/ContainersKilled": {
+ "metric": "yarn.NodeManagerMetrics.ContainersKilled",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/logWarn": {
+ "metric": "jvm.JvmMetrics.LogWarn",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/threadsTimedWaiting": {
+ "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/gcCount": {
+ "metric": "jvm.JvmMetrics.GcCount",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/process/proc_run": {
+ "metric": "proc_run",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/memory/swap_total": {
+ "metric": "swap_total",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/ReceivedBytes": {
+ "metric": "rpc.rpc.ReceivedBytes",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_nice": {
+ "metric": "cpu_nice",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/threadsBlocked": {
+ "metric": "jvm.JvmMetrics.ThreadsBlocked",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/RpcQueueTime_num_ops": {
+ "metric": "rpc.rpc.RpcQueueTimeNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/process/proc_total": {
+ "metric": "proc_total",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/yarn/AllocatedGB": {
+ "metric": "yarn.NodeManagerMetrics.AllocatedGB",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/disk/part_max_used": {
+ "metric": "part_max_used",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/NumOpenConnections": {
+ "metric": "rpc.rpc.NumOpenConnections",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/memHeapUsedM": {
+ "metric": "jvm.JvmMetrics.MemHeapUsedM",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/threadsWaiting": {
+ "metric": "jvm.JvmMetrics.ThreadsWaiting",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/disk/disk_free": {
+ "metric": "disk_free",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/mapred/ShuffleOutputsOK": {
+ "metric": "mapred.ShuffleOutputsOK",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/yarn/ContainersFailed": {
+ "metric": "yarn.NodeManagerMetrics.ContainersFailed",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/memory/mem_buffers": {
+ "metric": "mem_buffers",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/ugi/loginSuccess_num_ops": {
+ "metric": "ugi.ugi.LoginSuccessNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/gcTimeMillis": {
+ "metric": "jvm.JvmMetrics.GcTimeMillis",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_idle": {
+ "metric": "cpu_idle",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/yarn/AllocatedContainers": {
+ "metric": "yarn.NodeManagerMetrics.AllocatedContainers",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/threadsTerminated": {
+ "metric": "jvm.JvmMetrics.ThreadsTerminated",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/network/bytes_out": {
+ "metric": "bytes_out",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_aidle": {
+ "metric": "cpu_aidle",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/memory/mem_free": {
+ "metric": "mem_free",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_user": {
+ "metric": "cpu_user",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/memory/swap_free": {
+ "metric": "swap_free",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_system": {
+ "metric": "cpu_system",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/network/bytes_in": {
+ "metric": "bytes_in",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/boottime": {
+ "metric": "boottime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/network/pkts_out": {
+ "metric": "pkts_out",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/memNonHeapCommittedM": {
+ "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/callQueueLen": {
+ "metric": "rpc.rpc.CallQueueLength",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/memory/mem_cached": {
+ "metric": "mem_cached",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/yarn/ContainersRunning": {
+ "metric": "yarn.NodeManagerMetrics.ContainersRunning",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/disk/disk_total": {
+ "metric": "disk_total",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/logInfo": {
+ "metric": "jvm.JvmMetrics.LogInfo",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/yarn/ContainersLaunched": {
+ "metric": "yarn.NodeManagerMetrics.ContainersLaunched",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/RpcProcessingTime_num_ops": {
+ "metric": "rpc.rpc.RpcProcessingTimeNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/ugi/loginFailure_num_ops": {
+ "metric": "ugi.ugi.LoginFailureNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/memory/mem_shared": {
+ "metric": "mem_shared",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/yarn/AvailableGB": {
+ "metric": "yarn.NodeManagerMetrics.AvailableGB",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/mapred/ShuffleConnections": {
+ "metric": "mapred.ShuffleConnections",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_wio": {
+ "metric": "cpu_wio",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/yarn/ContainersIniting": {
+ "metric": "yarn.NodeManagerMetrics.ContainersIniting",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/logError": {
+ "metric": "jvm.JvmMetrics.LogError",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/ugi/loginFailure_avg_time": {
+ "metric": "ugi.ugi.LoginFailureAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_num": {
+ "metric": "cpu_num",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_speed": {
+ "metric": "cpu_speed",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/rpcAuthorizationSuccesses": {
+ "metric": "rpc.rpc.RpcAuthorizationSuccesses",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/logFatal": {
+ "metric": "jvm.JvmMetrics.LogFatal",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/RpcProcessingTime_avg_time": {
+ "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/mapred/ShuffleOutputBytes": {
+ "metric": "mapred.ShuffleOutputBytes",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/rpcAuthenticationSuccesses": {
+ "metric": "rpc.metrics.RpcAuthenticationSuccesses",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/rpcAuthenticationFailures": {
+ "metric": "rpc.metrics.RpcAuthenticationFailures",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/network/pkts_in": {
+ "metric": "pkts_in",
+ "pointInTime": true,
+ "temporal": true
+ }
+ },
+ "RESOURCEMANAGER":{
+ "metrics/rpcdetailed/FinishApplicationMasterNumOps": {
+ "metric": "rpcdetailed.rpcdetailed.FinishApplicationMasterNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/memory/mem_total": {
+ "metric": "mem_total",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/yarn/Queue/$1/AppsCompleted": {
+ "metric": "yarn.QueueMetrics.Queue:(.+),.AppsCompleted",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/yarn/ClusterMetrics/NumUnhealthyNMs": {
+ "metric": "yarn.ClusterMetrics.NumUnhealthyNMs",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/memHeapCommittedM": {
+ "metric": "jvm.JvmMetrics.MemHeapCommittedM",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/threadsRunnable": {
+ "metric": "jvm.JvmMetrics.ThreadsRunnable",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/yarn/ClusterMetrics/NumRebootedNMs": {
+ "metric": "yarn.ClusterMetrics.NumRebootedNMs",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/threadsNew": {
+ "metric": "jvm.JvmMetrics.ThreadsNew",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/yarn/Queue/$1/AppsSubmitted": {
+ "metric": "yarn.QueueMetrics.Queue:(.+),.AppsSubmitted",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/yarn/ClusterMetrics/NumLostNMs": {
+ "metric": "yarn.ClusterMetrics.NumLostNMs",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/ugi/loginSuccess_avg_time": {
+ "metric": "ugi.ugi.LoginSuccessAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/RpcQueueTime_avg_time": {
+ "metric": "rpc.rpc.RpcQueueTimeAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/yarn/Queue/$1/AllocatedContainers": {
+ "metric": "yarn.QueueMetrics.Queue:(.+),.AllocatedContainers",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/SentBytes": {
+ "metric": "rpc.rpc.SentBytes",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/yarn/Queue/$1/AppsKilled": {
+ "metric": "yarn.QueueMetrics.Queue:(.+),.AppsKilled",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/yarn/ClusterMetrics/NumActiveNMs": {
+ "metric": "yarn.ClusterMetrics.NumActiveNMs",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/memNonHeapUsedM": {
+ "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/logWarn": {
+ "metric": "jvm.JvmMetrics.LogWarn",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/threadsTimedWaiting": {
+ "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/yarn/Queue/$1/AppsFailed": {
+ "metric": "yarn.QueueMetrics.Queue:(.+),.AppsFailed",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/gcCount": {
+ "metric": "jvm.JvmMetrics.GcCount",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/process/proc_run": {
+ "metric": "proc_run",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/memory/swap_total": {
+ "metric": "swap_total",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/ReceivedBytes": {
+ "metric": "rpc.rpc.ReceivedBytes",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/AllocateNumOps": {
+ "metric": "rpcdetailed.rpcdetailed.AllocateNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_nice": {
+ "metric": "cpu_nice",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/gcCountMarkSweepCompact": {
+ "metric": "jvm.JvmMetrics.GcCountMarkSweepCompact",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/threadsBlocked": {
+ "metric": "jvm.JvmMetrics.ThreadsBlocked",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/yarn/Queue/$1/AppsRunning": {
+ "metric": "yarn.QueueMetrics.Queue:(.+),.AppsRunning",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/yarn/ClusterMetrics/NumDecommissionedNMs": {
+ "metric": "yarn.ClusterMetrics.NumDecommissionedNMs",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/RpcQueueTime_num_ops": {
+ "metric": "rpc.rpc.RpcQueueTimeNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/process/proc_total": {
+ "metric": "proc_total",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/disk/part_max_used": {
+ "metric": "part_max_used",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/NumOpenConnections": {
+ "metric": "rpc.rpc.NumOpenConnections",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/memHeapUsedM": {
+ "metric": "jvm.JvmMetrics.MemHeapUsedM",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/disk/disk_free": {
+ "metric": "disk_free",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/threadsWaiting": {
+ "metric": "jvm.JvmMetrics.ThreadsWaiting",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/load_one": {
+ "metric": "load_one",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/memory/mem_buffers": {
+ "metric": "mem_buffers",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/ugi/loginSuccess_num_ops": {
+ "metric": "ugi.ugi.LoginSuccessNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/gcTimeMillisCopy": {
+ "metric": "jvm.JvmMetrics.GcTimeMillisCopy",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/gcTimeMillis": {
+ "metric": "jvm.JvmMetrics.GcTimeMillis",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_idle": {
+ "metric": "cpu_idle",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/yarn/Queue/$1/PendingContainers": {
+ "metric": "yarn.QueueMetrics.Queue:(.+),.PendingContainers",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/memMaxM": {
+ "metric": "jvm.JvmMetrics.MemMaxM",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/threadsTerminated": {
+ "metric": "jvm.JvmMetrics.ThreadsTerminated",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/network/bytes_out": {
+ "metric": "bytes_out",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_aidle": {
+ "metric": "cpu_aidle",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/memory/mem_free": {
+ "metric": "mem_free",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/AllocateAvgTime": {
+ "metric": "rpcdetailed.rpcdetailed.AllocateAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_user": {
+ "metric": "cpu_user",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/memory/swap_free": {
+ "metric": "swap_free",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_system": {
+ "metric": "cpu_system",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/load_five": {
+ "metric": "load_five",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/network/bytes_in": {
+ "metric": "bytes_in",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/boottime": {
+ "metric": "boottime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/network/pkts_out": {
+ "metric": "pkts_out",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/memNonHeapCommittedM": {
+ "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/GetApplicationReportNumOps": {
+ "metric": "rpcdetailed.rpcdetailed.GetApplicationReportNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/FinishApplicationMasterAvgTime": {
+ "metric": "rpcdetailed.rpcdetailed.FinishApplicationMasterAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/callQueueLen": {
+ "metric": "rpc.rpc.CallQueueLength",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/memory/mem_cached": {
+ "metric": "mem_cached",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/RegisterApplicationMasterNumOps": {
+ "metric": "rpcdetailed.rpcdetailed.RegisterApplicationMasterNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/disk/disk_total": {
+ "metric": "disk_total",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/yarn/Queue/$1/AvailableMB": {
+ "metric": "yarn.QueueMetrics.Queue:(.+),.AvailableMB",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/yarn/Queue/$1/PendingMB": {
+ "metric": "yarn.QueueMetrics.Queue:(.+),.PendingMB",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/logInfo": {
+ "metric": "jvm.JvmMetrics.LogInfo",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/RpcProcessingTime_num_ops": {
+ "metric": "rpc.rpc.RpcProcessingTimeNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/ugi/loginFailure_num_ops": {
+ "metric": "ugi.ugi.LoginFailureNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/memory/mem_shared": {
+ "metric": "mem_shared",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/SubmitApplicationAvgTime": {
+ "metric": "rpcdetailed.rpcdetailed.SubmitApplicationAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_wio": {
+ "metric": "cpu_wio",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/GetNewApplicationNumOps": {
+ "metric": "rpcdetailed.rpcdetailed.GetNewApplicationNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/yarn/Queue/$1/AppsPending": {
+ "metric": "yarn.QueueMetrics.Queue:(.+),.AppsPending",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/gcCountCopy": {
+ "metric": "jvm.JvmMetrics.GcCountCopy",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/load_fifteen": {
+ "metric": "load_fifteen",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/logError": {
+ "metric": "jvm.JvmMetrics.LogError",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/ugi/loginFailure_avg_time": {
+ "metric": "ugi.ugi.LoginFailureAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_num": {
+ "metric": "cpu_num",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/SubmitApplicationNumOps": {
+ "metric": "rpcdetailed.rpcdetailed.SubmitApplicationNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/gcTimeMillisMarkSweepCompact": {
+ "metric": "jvm.JvmMetrics.GcTimeMillisMarkSweepCompact",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_speed": {
+ "metric": "cpu_speed",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/rpcAuthorizationSuccesses": {
+ "metric": "rpc.rpc.RpcAuthorizationSuccesses",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/yarn/Queue/$1/AllocatedMB": {
+ "metric": "yarn.QueueMetrics.Queue:(.+),.AllocatedMB",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/logFatal": {
+ "metric": "jvm.JvmMetrics.LogFatal",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/RpcProcessingTime_avg_time": {
+ "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/GetApplicationReportAvgTime": {
+ "metric": "rpcdetailed.rpcdetailed.GetApplicationReportAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/NodeHeartbeatAvgTime": {
+ "metric": "rpcdetailed.rpcdetailed.NodeHeartbeatAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/GetNewApplicationAvgTime": {
+ "metric": "rpcdetailed.rpcdetailed.GetNewApplicationAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/network/pkts_in": {
+ "metric": "pkts_in",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/RegisterApplicationMasterAvgTime": {
+ "metric": "rpcdetailed.rpcdetailed.RegisterApplicationMasterAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/yarn/Queue/$1/ReservedContainers": {
+ "metric": "yarn.QueueMetrics.Queue:(.+),.ReservedContainers",
+ "pointInTime": false,
+ "temporal": true
+ }
+ },
+ "HISTORYSERVER":{
+ "metrics/memory/mem_total": {
+ "metric": "mem_total",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/memHeapCommittedM": {
+ "metric": "jvm.JvmMetrics.MemHeapCommittedM",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/threadsRunnable": {
+ "metric": "jvm.JvmMetrics.ThreadsRunnable",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/threadsNew": {
+ "metric": "jvm.JvmMetrics.ThreadsNew",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/rpcAuthorizationFailures": {
+ "metric": "rpc.metrics.RpcAuthorizationFailures",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/ugi/loginSuccess_avg_time": {
+ "metric": "ugi.ugi.LoginSuccessAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/RpcQueueTime_avg_time": {
+ "metric": "rpc.rpc.RpcQueueTimeAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/SentBytes": {
+ "metric": "rpc.rpc.SentBytes",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/memNonHeapUsedM": {
+ "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/logWarn": {
+ "metric": "jvm.JvmMetrics.LogWarn",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/threadsTimedWaiting": {
+ "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/process/proc_run": {
+ "metric": "proc_run",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/gcCount": {
+ "metric": "jvm.JvmMetrics.GcCount",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/ReceivedBytes": {
+ "metric": "rpc.rpc.ReceivedBytes",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/memory/swap_total": {
+ "metric": "swap_total",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_nice": {
+ "metric": "cpu_nice",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/threadsBlocked": {
+ "metric": "jvm.JvmMetrics.ThreadsBlocked",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/RpcQueueTime_num_ops": {
+ "metric": "rpc.rpc.RpcQueueTimeNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/process/proc_total": {
+ "metric": "proc_total",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/disk/part_max_used": {
+ "metric": "part_max_used",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/NumOpenConnections": {
+ "metric": "rpc.rpc.NumOpenConnections",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/memHeapUsedM": {
+ "metric": "jvm.JvmMetrics.MemHeapUsedM",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/threadsWaiting": {
+ "metric": "jvm.JvmMetrics.ThreadsWaiting",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/disk/disk_free": {
+ "metric": "disk_free",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/memory/mem_buffers": {
+ "metric": "mem_buffers",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/ugi/loginSuccess_num_ops": {
+ "metric": "ugi.ugi.LoginSuccessNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/gcTimeMillis": {
+ "metric": "jvm.JvmMetrics.GcTimeMillis",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_idle": {
+ "metric": "cpu_idle",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/threadsTerminated": {
+ "metric": "jvm.JvmMetrics.ThreadsTerminated",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/network/bytes_out": {
+ "metric": "bytes_out",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_aidle": {
+ "metric": "cpu_aidle",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/memory/mem_free": {
+ "metric": "mem_free",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_user": {
+ "metric": "cpu_user",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/memory/swap_free": {
+ "metric": "swap_free",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_system": {
+ "metric": "cpu_system",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/network/bytes_in": {
+ "metric": "bytes_in",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/boottime": {
+ "metric": "boottime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/network/pkts_out": {
+ "metric": "pkts_out",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/memNonHeapCommittedM": {
+ "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/callQueueLen": {
+ "metric": "rpc.rpc.CallQueueLength",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/memory/mem_cached": {
+ "metric": "mem_cached",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/disk/disk_total": {
+ "metric": "disk_total",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/logInfo": {
+ "metric": "jvm.JvmMetrics.LogInfo",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/ugi/loginFailure_num_ops": {
+ "metric": "ugi.ugi.LoginFailureNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/RpcProcessingTime_num_ops": {
+ "metric": "rpc.rpc.RpcProcessingTimeNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/memory/mem_shared": {
+ "metric": "mem_shared",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_wio": {
+ "metric": "cpu_wio",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/logError": {
+ "metric": "jvm.JvmMetrics.LogError",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/ugi/loginFailure_avg_time": {
+ "metric": "ugi.ugi.LoginFailureAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_num": {
+ "metric": "cpu_num",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_speed": {
+ "metric": "cpu_speed",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/rpcAuthorizationSuccesses": {
+ "metric": "rpc.rpc.RpcAuthorizationSuccesses",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/logFatal": {
+ "metric": "jvm.JvmMetrics.LogFatal",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/RpcProcessingTime_avg_time": {
+ "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/rpcAuthenticationSuccesses": {
+ "metric": "rpc.metrics.RpcAuthenticationSuccesses",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/rpcAuthenticationFailures": {
+ "metric": "rpc.metrics.RpcAuthenticationFailures",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/network/pkts_in": {
+ "metric": "pkts_in",
+ "pointInTime": true,
+ "temporal": true
+ }
+ }
+ },
+
+ "HostComponent":{
+ "NAMENODE":{
+ "metrics/dfs/FSNamesystem/TotalLoad": {
+ "metric": "dfs.FSNamesystem.TotalLoad",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/FSNamesystem/BlockCapacity": {
+ "metric": "dfs.FSNamesystem.BlockCapacity",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/namenode/GetListingOps": {
+ "metric": "dfs.namenode.GetListingOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/namenode/FilesAppended": {
+ "metric": "dfs.namenode.FilesAppended",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/getProtocolVersion_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_num_ops",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/fsync_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.FsyncAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/ugi/loginSuccess_avg_time": {
+ "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/load/load_one": {
+ "metric": "load_one",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/renewLease_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.RenewLeaseNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/getFileInfo_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.GetFileInfoAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/memNonHeapUsedM": {
+ "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/complete_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.CompleteAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/setPermission_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.SetPermissionNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/FSNamesystem/CapacityTotalGB": {
+ "metric": "dfs.FSNamesystem.CapacityTotalGB",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/FSNamesystem/CapacityTotal": {
+ "metric": "dfs.FSNamesystem.CapacityTotal",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/setOwner_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.SetOwnerNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/getBlockLocations_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.GetBlockLocationsNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/process/proc_run": {
+ "metric": "proc_run",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/FSNamesystem/CapacityUsedGB": {
+ "metric": "dfs.FSNamesystem.CapacityUsedGB",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/FSNamesystem/CapacityUsed": {
+ "metric": "dfs.FSNamesystem.CapacityUsed",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/namenode/AddBlockOps": {
+ "metric": "dfs.namenode.AddBlockOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/memory/swap_total": {
+ "metric": "swap_total",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/namenode/FilesDeleted": {
+ "metric": "dfs.namenode.FilesDeleted",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/namenode/Syncs_avg_time": {
+ "metric": "dfs.namenode.SyncsAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/threadsBlocked": {
+ "metric": "jvm.JvmMetrics.ThreadsBlocked",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/RpcQueueTime_num_ops": {
+ "metric": "rpc.rpc.RpcQueueTimeNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/process/proc_total": {
+ "metric": "proc_total",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/namenode/blockReport_avg_time": {
+ "metric": "dfs.namenode.BlockReportAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/disk/part_max_used": {
+ "metric": "part_max_used",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/getFileInfo_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.GetFileInfoNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/getEditLogSize_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.GetEditLogManifestAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/ugi/loginSuccess_num_ops": {
+ "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/blockReceived_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.BlockReceivedAndDeletedAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_idle": {
+ "metric": "cpu_idle",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/versionRequest_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.VersionRequestAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_aidle": {
+ "metric": "cpu_aidle",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/memory/mem_free": {
+ "metric": "mem_free",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/versionRequest_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.VersionRequestNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/addBlock_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.AddBlockNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/namenode/FilesCreated": {
+ "metric": "dfs.namenode.FilesCreated",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/rename_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.RenameAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/network/bytes_in": {
+ "metric": "bytes_in",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/setSafeMode_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.SetSafeModeNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/network/pkts_out": {
+ "metric": "pkts_out",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/memNonHeapCommittedM": {
+ "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/memory/mem_cached": {
+ "metric": "mem_cached",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/disk/disk_total": {
+ "metric": "disk_total",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/setPermission_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.SetPermissionAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/namenode/FilesRenamed": {
+ "metric": "dfs.namenode.FilesRenamed",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/register_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.RegisterDatanodeAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/setReplication_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.setReplication_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/namenode/JournalTransactionsBatchedInSync": {
+ "metric": "dfs.namenode.JournalTransactionsBatchedInSync",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/ugi/loginFailure_num_ops": {
+ "metric": "ugi.UgiMetrics.LoginFailureNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/namenode/GetBlockLocations": {
+ "metric": "dfs.namenode.GetBlockLocations",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/fsync_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.FsyncNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_wio": {
+ "metric": "cpu_wio",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/create_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.CreateAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/FSNamesystem/PendingReplicationBlocks": {
+ "metric": "dfs.FSNamesystem.PendingReplicationBlocks",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_speed": {
+ "metric": "cpu_speed",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/delete_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.DeleteAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/namenode/FileInfoOps": {
+ "metric": "dfs.namenode.FileInfoOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/sendHeartbeat_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.SendHeartbeatNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/namenode/DeleteFileOps": {
+ "metric": "dfs.namenode.DeleteFileOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/RpcProcessingTime_avg_time": {
+ "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/blockReport_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.BlockReportNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/setSafeMode_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.SetSafeModeAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/rpcAuthenticationSuccesses": {
+ "metric": "rpc.rpc.RpcAuthenticationSuccesses",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/FSNamesystem/PendingDeletionBlocks": {
+ "metric": "dfs.FSNamesystem.PendingDeletionBlocks",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/rpcAuthenticationFailures": {
+ "metric": "rpc.rpc.RpcAuthenticationFailures",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/network/pkts_in": {
+ "metric": "pkts_in",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/memory/mem_total": {
+ "metric": "mem_total",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/getEditLogSize_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.GetEditLogManifestNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/memHeapCommittedM": {
+ "metric": "jvm.JvmMetrics.MemHeapCommittedM",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/namenode/FilesInGetListingOps": {
+ "metric": "dfs.namenode.FilesInGetListingOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/threadsRunnable": {
+ "metric": "jvm.JvmMetrics.ThreadsRunnable",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/complete_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.CompleteNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/threadsNew": {
+ "metric": "jvm.JvmMetrics.ThreadsNew",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/rollFsImage_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.rollFsImage_num_ops",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/rpcAuthorizationFailures": {
+ "metric": "rpc.rpc.RpcAuthorizationFailures",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/namenode/Syncs_num_ops": {
+ "metric": "dfs.namenode.SyncsNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/RpcQueueTime_avg_time": {
+ "metric": "rpc.rpc.RpcQueueTimeAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/blockReceived_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.BlockReceivedAndDeletedNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/setReplication_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.setReplication_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/rollEditLog_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.RollEditLogAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/SentBytes": {
+ "metric": "rpc.rpc.SentBytes",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/FSNamesystem/FilesTotal": {
+ "metric": "dfs.FSNamesystem.FilesTotal",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/logWarn": {
+ "metric": "jvm.JvmMetrics.LogWarn",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/FSNamesystem/ExcessBlocks": {
+ "metric": "dfs.FSNamesystem.ExcessBlocks",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/threadsTimedWaiting": {
+ "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/gcCount": {
+ "metric": "jvm.JvmMetrics.GcCount",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/ReceivedBytes": {
+ "metric": "rpc.rpc.ReceivedBytes",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_nice": {
+ "metric": "cpu_nice",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/namenode/blockReport_num_ops": {
+ "metric": "dfs.namenode.BlockReportNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/namenode/SafemodeTime": {
+ "metric": "dfs.namenode.SafemodeTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/rollFsImage_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.rollFsImage_avg_time",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/mkdirs_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.MkdirsAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/NumOpenConnections": {
+ "metric": "rpc.rpc.NumOpenConnections",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/memHeapUsedM": {
+ "metric": "jvm.JvmMetrics.MemHeapUsedM",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/FSNamesystem/ScheduledReplicationBlocks": {
+ "metric": "dfs.FSNamesystem.ScheduledReplicationBlocks",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/threadsWaiting": {
+ "metric": "jvm.JvmMetrics.ThreadsWaiting",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/disk/disk_free": {
+ "metric": "disk_free",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/FSNamesystem/BlocksTotal": {
+ "metric": "dfs.FSNamesystem.BlocksTotal",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/memory/mem_buffers": {
+ "metric": "mem_buffers",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/gcTimeMillis": {
+ "metric": "jvm.JvmMetrics.GcTimeMillis",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/getBlockLocations_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.GetBlockLocationsAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/namenode/Transactions_num_ops": {
+ "metric": "dfs.namenode.TransactionsNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/create_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.CreateNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/threadsTerminated": {
+ "metric": "jvm.JvmMetrics.ThreadsTerminated",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/network/bytes_out": {
+ "metric": "bytes_out",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_user": {
+ "metric": "cpu_user",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/memory/swap_free": {
+ "metric": "swap_free",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/load/load_five": {
+ "metric": "load_five",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_system": {
+ "metric": "cpu_system",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/FSNamesystem/CapacityRemainingGB": {
+ "metric": "dfs.FSNamesystem.CapacityRemainingGB",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/FSNamesystem/CapacityRemaining": {
+ "metric": "dfs.FSNamesystem.CapacityRemaining",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/FSNamesystem/CapacityNonDFSUsed":{
+ "metric": "dfs.FSNamesystem.CapacityUsedNonDFS",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/namenode/Transactions_avg_time": {
+ "metric": "dfs.namenode.TransactionsAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/boottime": {
+ "metric": "boottime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/FSNamesystem/MissingBlocks": {
+ "metric": "dfs.FSNamesystem.MissingBlocks",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/callQueueLen": {
+ "metric": "rpc.rpc.CallQueueLength",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/delete_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.DeleteNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/FSNamesystem/CorruptBlocks": {
+ "metric": "dfs.FSNamesystem.CorruptBlocks",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/rename_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.RenameNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/blockReport_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.BlockReportAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/mkdirs_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.MkdirsNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/load/load_fifteen": {
+ "metric": "load_fifteen",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/logInfo": {
+ "metric": "jvm.JvmMetrics.LogInfo",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/namenode/fsImageLoadTime": {
+ "metric": "dfs.namenode.FsImageLoadTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/getListing_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.GetListingNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/blocksBeingWrittenReport_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.blocksBeingWrittenReport_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/rollEditLog_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.RollEditLogNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/addBlock_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.AddBlockAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/blocksBeingWrittenReport_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.blocksBeingWrittenReport_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/setOwner_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.SetOwnerAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/RpcProcessingTime_num_ops": {
+ "metric": "rpc.rpc.RpcProcessingTimeNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/memory/mem_shared": {
+ "metric": "mem_shared",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/FSNamesystem/UnderReplicatedBlocks": {
+ "metric": "dfs.FSNamesystem.UnderReplicatedBlocks",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/sendHeartbeat_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.SendHeartbeatAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/namenode/CreateFileOps": {
+ "metric": "dfs.namenode.CreateFileOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/logError": {
+ "metric": "jvm.JvmMetrics.LogError",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/ugi/loginFailure_avg_time": {
+ "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_num": {
+ "metric": "cpu_num",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/getProtocolVersion_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_avg_time",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/register_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.RegisterDatanodeNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/rpcAuthorizationSuccesses": {
+ "metric": "rpc.rpc.RpcAuthorizationSuccesses",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/getListing_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.GetListingAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/logFatal": {
+ "metric": "jvm.JvmMetrics.LogFatal",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/renewLease_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.RenewLeaseAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ }
+ },
+ "DATANODE":{
+ "metrics/dfs/datanode/heartBeats_avg_time": {
+ "metric": "dfs.datanode.HeartbeatsAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/closeRegion_num_ops": {
+ "metric": "rpc.rpc.closeRegion_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/unassign_num_ops": {
+ "metric": "rpc.rpc.unassign_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/modifyTable_num_ops": {
+ "metric": "rpc.rpc.modifyTable_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/splitRegion/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.splitRegion.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/ugi/loginSuccess_avg_time": {
+ "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/getZooKeeper/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.getZooKeeper.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getProtocolVersion_avg_time": {
+ "metric": "rpc.rpc.getProtocolVersion_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getHServerInfo/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.getHServerInfo.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/load/load_one": {
+ "metric": "load_one",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/datanode/writes_from_remote_client": {
+ "metric": "dfs.datanode.WritesFromRemoteClient",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getClosestRowBefore_num_ops": {
+ "metric": "rpc.rpc.getClosestRowBefore_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getClosestRowBefore/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.getClosestRowBefore.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/datanode/blocks_replicated": {
+ "metric": "dfs.datanode.BlocksReplicated",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/memNonHeapUsedM": {
+ "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/replicateLogEntries_num_ops": {
+ "metric": "rpc.rpc.replicateLogEntries_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/flushRegion_avg_time": {
+ "metric": "rpc.rpc.flushRegion_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/lockRow_num_ops": {
+ "metric": "rpc.rpc.lockRow_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/memory/swap_total": {
+ "metric": "swap_total",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/multi_avg_time": {
+ "metric": "rpc.rpc.multi_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/openRegions/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.openRegions.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/stopMaster_num_ops": {
+ "metric": "rpc.rpc.stopMaster_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/datanode/blocks_get_local_pathinfo": {
+ "metric": "dfs.datanode.BlocksGetLocalPathInfo",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/process/proc_total": {
+ "metric": "proc_total",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/balance_avg_time": {
+ "metric": "rpc.rpc.balance_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/splitRegion_num_ops": {
+ "metric": "rpc.rpc.splitRegion_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/disk/part_max_used": {
+ "metric": "part_max_used",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getCatalogTracker_avg_time": {
+ "metric": "rpc.rpc.getCatalogTracker_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/datanode/readBlockOp_num_ops": {
+ "metric": "dfs.datanode.ReadBlockOpNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/datanode/writeBlockOp_num_ops": {
+ "metric": "dfs.datanode.WriteBlockOpNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/modifyColumn_avg_time": {
+ "metric": "rpc.rpc.modifyColumn_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/multi/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.multi.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/balance_num_ops": {
+ "metric": "rpc.rpc.balance_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getZooKeeper_num_ops": {
+ "metric": "rpc.rpc.getZooKeeper_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/shutdown_num_ops": {
+ "metric": "rpc.rpc.shutdown_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/flushRegion_num_ops": {
+ "metric": "rpc.rpc.flushRegion_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/ugi/loginSuccess_num_ops": {
+ "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/isAborted/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.isAborted.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_idle": {
+ "metric": "cpu_idle",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/rollHLogWriter/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.rollHLogWriter.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/removeFromOnlineRegions/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.removeFromOnlineRegions.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getProtocolSignature/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.getProtocolSignature.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/get_num_ops": {
+ "metric": "rpc.rpc.get_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/datanode/blockChecksumOp_num_ops": {
+ "metric": "dfs.datanode.BlockChecksumOpNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getServerName_avg_time": {
+ "metric": "rpc.rpc.getServerName_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/stopMaster_avg_time": {
+ "metric": "rpc.rpc.stopMaster_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getOnlineRegions/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.getOnlineRegions.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/network/bytes_in": {
+ "metric": "bytes_in",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/memNonHeapCommittedM": {
+ "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/removeFromOnlineRegions_num_ops": {
+ "metric": "rpc.rpc.removeFromOnlineRegions_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/abort_avg_time": {
+ "metric": "rpc.rpc.abort_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/openScanner/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.openScanner.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getRegionInfo_avg_time": {
+ "metric": "rpc.rpc.getRegionInfo_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getCatalogTracker/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.getCatalogTracker.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/enableTable_num_ops": {
+ "metric": "rpc.rpc.enableTable_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/lockRow/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.lockRow.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/lockRow_avg_time": {
+ "metric": "rpc.rpc.lockRow_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/checkOOME_num_ops": {
+ "metric": "rpc.rpc.checkOOME_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/addColumn_avg_time": {
+ "metric": "rpc.rpc.addColumn_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/addToOnlineRegions/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.addToOnlineRegions.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/reportRSFatalError_num_ops": {
+ "metric": "rpc.rpc.reportRSFatalError_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/ugi/loginFailure_num_ops": {
+ "metric": "ugi.UgiMetrics.LoginFailureNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_wio": {
+ "metric": "cpu_wio",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getConfiguration/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.getConfiguration.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getZooKeeper/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.getZooKeeper.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/datanode/reads_from_local_client": {
+ "metric": "dfs.datanode.ReadsFromLocalClient",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getServerName_num_ops": {
+ "metric": "rpc.rpc.getServerName_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getServerName/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.getServerName.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/datanode/blockReports_num_ops": {
+ "metric": "dfs.datanode.BlockReportsNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/reportRSFatalError_avg_time": {
+ "metric": "rpc.rpc.reportRSFatalError_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/rpcAuthenticationSuccesses": {
+ "metric": "rpc.rpc.RpcAuthenticationSuccesses",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/network/pkts_in": {
+ "metric": "pkts_in",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/isStopped/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.isStopped.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/disableTable_avg_time": {
+ "metric": "rpc.rpc.disableTable_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/memory/mem_total": {
+ "metric": "mem_total",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/rollHLogWriter/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.rollHLogWriter.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/memHeapCommittedM": {
+ "metric": "jvm.JvmMetrics.MemHeapCommittedM",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/abort/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.abort.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/threadsRunnable": {
+ "metric": "jvm.JvmMetrics.ThreadsRunnable",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/openRegion_avg_time": {
+ "metric": "rpc.rpc.openRegion_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/threadsNew": {
+ "metric": "jvm.JvmMetrics.ThreadsNew",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/unlockRow/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.unlockRow.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getClusterStatus_num_ops": {
+ "metric": "rpc.rpc.getClusterStatus_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getHTableDescriptors_avg_time": {
+ "metric": "rpc.rpc.getHTableDescriptors_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/rpcAuthorizationFailures": {
+ "metric": "rpc.rpc.RpcAuthorizationFailures",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/deleteColumn_num_ops": {
+ "metric": "rpc.rpc.deleteColumn_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/regionServerReport_avg_time": {
+ "metric": "rpc.rpc.regionServerReport_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/delete/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.delete.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/RpcQueueTime_avg_time": {
+ "metric": "rpc.rpc.RpcQueueTimeAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/increment_num_ops": {
+ "metric": "rpc.rpc.increment_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/stop/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.stop.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getAlterStatus_num_ops": {
+ "metric": "rpc.rpc.getAlterStatus_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/modifyColumn_num_ops": {
+ "metric": "rpc.rpc.modifyColumn_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/flushRegion/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.flushRegion.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/incrementColumnValue/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.incrementColumnValue.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/next_avg_time": {
+ "metric": "rpc.rpc.next_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/next/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.next.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/checkOOME_avg_time": {
+ "metric": "rpc.rpc.checkOOME_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/RpcSlowResponse_avg_time": {
+ "metric": "rpc.rpc.RpcSlowResponse_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getConfiguration_avg_time": {
+ "metric": "rpc.rpc.getConfiguration_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getServerName/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.getServerName.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/ReceivedBytes": {
+ "metric": "rpc.rpc.ReceivedBytes",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_nice": {
+ "metric": "cpu_nice",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/bulkLoadHFiles_num_ops": {
+ "metric": "rpc.rpc.bulkLoadHFiles_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/unassign_avg_time": {
+ "metric": "rpc.rpc.unassign_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/NumOpenConnections": {
+ "metric": "rpc.rpc.NumOpenConnections",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/gcTimeMillis": {
+ "metric": "jvm.JvmMetrics.GcTimeMillis",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/delete/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.delete.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/multi/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.multi.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/threadsTerminated": {
+ "metric": "jvm.JvmMetrics.ThreadsTerminated",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/getProtocolVersion/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.getProtocolVersion.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/network/bytes_out": {
+ "metric": "bytes_out",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/checkAndDelete/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.checkAndDelete.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/balanceSwitch_avg_time": {
+ "metric": "rpc.rpc.balanceSwitch_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/load/load_five": {
+ "metric": "load_five",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/datanode/blocks_read": {
+ "metric": "dfs.datanode.BlocksRead",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/boottime": {
+ "metric": "boottime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/openRegion/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.openRegion.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/lockRow/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.lockRow.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/compactRegion_avg_time": {
+ "metric": "rpc.rpc.compactRegion_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/openRegion_num_ops": {
+ "metric": "rpc.rpc.openRegion_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/callQueueLen": {
+ "metric": "rpc.rpc.CallQueueLength",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/datanode/blocks_removed": {
+ "metric": "dfs.datanode.BlocksRemoved",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/compactRegion/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.compactRegion.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getProtocolSignature/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.getProtocolSignature.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/execCoprocessor_num_ops": {
+ "metric": "rpc.rpc.execCoprocessor_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getHServerInfo/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.getHServerInfo.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/datanode/readBlockOp_avg_time": {
+ "metric": "dfs.datanode.ReadBlockOpAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getOnlineRegions_avg_time": {
+ "metric": "rpc.rpc.getOnlineRegions_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getCatalogTracker/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.getCatalogTracker.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/incrementColumnValue_avg_time": {
+ "metric": "rpc.rpc.incrementColumnValue_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/move_num_ops": {
+ "metric": "rpc.rpc.move_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/RpcProcessingTime_num_ops": {
+ "metric": "rpc.rpc.RpcProcessingTimeNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/stop_num_ops": {
+ "metric": "rpc.rpc.stop_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/replicateLogEntries_avg_time": {
+ "metric": "rpc.rpc.replicateLogEntries_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/deleteTable_num_ops": {
+ "metric": "rpc.rpc.deleteTable_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/datanode/blockChecksumOp_avg_time": {
+ "metric": "dfs.datanode.BlockChecksumOpAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/datanode/copyBlockOp_avg_time": {
+ "metric": "dfs.datanode.CopyBlockOpAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/get_avg_time": {
+ "metric": "rpc.rpc.get_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/logError": {
+ "metric": "jvm.JvmMetrics.LogError",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/multi_num_ops": {
+ "metric": "rpc.rpc.multi_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/next/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.next.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/datanode/writeBlockOp_avg_time": {
+ "metric": "dfs.datanode.WriteBlockOpAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getBlockCacheColumnFamilySummaries_num_ops": {
+ "metric": "rpc.rpc.getBlockCacheColumnFamilySummaries_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getFromOnlineRegions/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.getFromOnlineRegions.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/addToOnlineRegions_avg_time": {
+ "metric": "rpc.rpc.addToOnlineRegions_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getRegionInfo/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.getRegionInfo.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/deleteColumn_avg_time": {
+ "metric": "rpc.rpc.deleteColumn_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/bulkLoadHFiles_avg_time": {
+ "metric": "rpc.rpc.bulkLoadHFiles_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/isAborted/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.isAborted.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/exists_avg_time": {
+ "metric": "rpc.rpc.exists_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/stop/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.stop.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/addToOnlineRegions_num_ops": {
+ "metric": "rpc.rpc.addToOnlineRegions_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/abort/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.abort.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/removeFromOnlineRegions/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.removeFromOnlineRegions.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getOnlineRegions/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.getOnlineRegions.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/exists/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.exists.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/offline_avg_time": {
+ "metric": "rpc.rpc.offline_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/replicateLogEntries/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.replicateLogEntries.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/unlockRow_avg_time": {
+ "metric": "rpc.rpc.unlockRow_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/delete_num_ops": {
+ "metric": "rpc.rpc.delete_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getCatalogTracker_num_ops": {
+ "metric": "rpc.rpc.getCatalogTracker_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/regionServerStartup_avg_time": {
+ "metric": "rpc.rpc.regionServerStartup_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/checkOOME/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.checkOOME.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/exists_num_ops": {
+ "metric": "rpc.rpc.exists_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/checkAndDelete_num_ops": {
+ "metric": "rpc.rpc.checkAndDelete_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/checkAndPut/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.checkAndPut.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/closeRegion_avg_time": {
+ "metric": "rpc.rpc.closeRegion_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/close/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.close.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getProtocolSignature_avg_time": {
+ "metric": "rpc.rpc.getProtocolSignature_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/assign_avg_time": {
+ "metric": "rpc.rpc.assign_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/createTable_num_ops": {
+ "metric": "rpc.rpc.createTable_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/execCoprocessor_avg_time": {
+ "metric": "rpc.rpc.execCoprocessor_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/process/proc_run": {
+ "metric": "proc_run",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/close_avg_time": {
+ "metric": "rpc.rpc.close_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getConfiguration_num_ops": {
+ "metric": "rpc.rpc.getConfiguration_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/threadsBlocked": {
+ "metric": "jvm.JvmMetrics.ThreadsBlocked",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/getHServerInfo_num_ops": {
+ "metric": "rpc.rpc.getHServerInfo_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/isStopped_avg_time": {
+ "metric": "rpc.rpc.isStopped_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/RpcQueueTime_num_ops": {
+ "metric": "rpc.rpc.RpcQueueTimeNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/stop_avg_time": {
+ "metric": "rpc.rpc.stop_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/rollHLogWriter_avg_time": {
+ "metric": "rpc.rpc.rollHLogWriter_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/isStopped_num_ops": {
+ "metric": "rpc.rpc.isStopped_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/datanode/writes_from_local_client": {
+ "metric": "dfs.datanode.WritesFromLocalClient",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/addToOnlineRegions/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.addToOnlineRegions.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/datanode/heartBeats_num_ops": {
+ "metric": "dfs.datanode.HeartbeatsNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/isMasterRunning_avg_time": {
+ "metric": "rpc.rpc.isMasterRunning_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/datanode/replaceBlockOp_num_ops": {
+ "metric": "dfs.datanode.ReplaceBlockOpNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/incrementColumnValue_num_ops": {
+ "metric": "rpc.rpc.incrementColumnValue_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/datanode/copyBlockOp_num_ops": {
+ "metric": "dfs.datanode.CopyBlockOpNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/bulkLoadHFiles/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.bulkLoadHFiles.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_aidle": {
+ "metric": "cpu_aidle",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/memory/mem_free": {
+ "metric": "mem_free",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/execCoprocessor/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.execCoprocessor.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/close/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.close.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/assign_num_ops": {
+ "metric": "rpc.rpc.assign_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/enableTable_avg_time": {
+ "metric": "rpc.rpc.enableTable_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/incrementColumnValue/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.incrementColumnValue.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/checkAndDelete/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.checkAndDelete.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/network/pkts_out": {
+ "metric": "pkts_out",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getFromOnlineRegions_num_ops": {
+ "metric": "rpc.rpc.getFromOnlineRegions_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/close_num_ops": {
+ "metric": "rpc.rpc.close_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/memory/mem_cached": {
+ "metric": "mem_cached",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getConfiguration/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.getConfiguration.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/disk/disk_total": {
+ "metric": "disk_total",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/get/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.get.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/openScanner_avg_time": {
+ "metric": "rpc.rpc.openScanner_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/RpcSlowResponse_num_ops": {
+ "metric": "rpc.rpc.RpcSlowResponse_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getFromOnlineRegions_avg_time": {
+ "metric": "rpc.rpc.getFromOnlineRegions_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/datanode/reads_from_remote_client": {
+ "metric": "dfs.datanode.ReadsFromRemoteClient",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/splitRegion/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.splitRegion.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getClosestRowBefore/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.getClosestRowBefore.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/flushRegion/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.flushRegion.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/isAborted_avg_time": {
+ "metric": "rpc.rpc.isAborted_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/increment/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.increment.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getBlockCacheColumnFamilySummaries/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.getBlockCacheColumnFamilySummaries.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/deleteTable_avg_time": {
+ "metric": "rpc.rpc.deleteTable_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/datanode/bytes_read": {
+ "metric": "dfs.datanode.BytesRead",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/put/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.put.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/datanode/blocks_verified": {
+ "metric": "dfs.datanode.BlocksVerified",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/delete_avg_time": {
+ "metric": "rpc.rpc.delete_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getClosestRowBefore_avg_time": {
+ "metric": "rpc.rpc.getClosestRowBefore_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_speed": {
+ "metric": "cpu_speed",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/get/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.get.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/compactRegion/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.compactRegion.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/openRegions/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.openRegions.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/RpcProcessingTime_avg_time": {
+ "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/put_num_ops": {
+ "metric": "rpc.rpc.put_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/move_avg_time": {
+ "metric": "rpc.rpc.move_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/rpcAuthenticationFailures": {
+ "metric": "rpc.rpc.RpcAuthenticationFailures",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/openScanner/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.openScanner.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getClusterStatus_avg_time": {
+ "metric": "rpc.rpc.getClusterStatus_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/datanode/bytes_written": {
+ "metric": "dfs.datanode.BytesWritten",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/unlockRow/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.unlockRow.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/removeFromOnlineRegions_avg_time": {
+ "metric": "rpc.rpc.removeFromOnlineRegions_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/modifyTable_avg_time": {
+ "metric": "rpc.rpc.modifyTable_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/put/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.put.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/checkAndPut_avg_time": {
+ "metric": "rpc.rpc.checkAndPut_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/datanode/blockReports_avg_time": {
+ "metric": "dfs.datanode.BlockReportsAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/datanode/replaceBlockOp_avg_time": {
+ "metric": "dfs.datanode.ReplaceBlockOpAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/isStopped/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.isStopped.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/put_avg_time": {
+ "metric": "rpc.rpc.put_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/increment/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.increment.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/openRegion/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.openRegion.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/SentBytes": {
+ "metric": "rpc.rpc.SentBytes",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/createTable_avg_time": {
+ "metric": "rpc.rpc.createTable_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getRegionInfo/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.getRegionInfo.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/addColumn_num_ops": {
+ "metric": "rpc.rpc.addColumn_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getHTableDescriptors_num_ops": {
+ "metric": "rpc.rpc.getHTableDescriptors_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getAlterStatus_avg_time": {
+ "metric": "rpc.rpc.getAlterStatus_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getRegionInfo_num_ops": {
+ "metric": "rpc.rpc.getRegionInfo_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/maxMemoryM": {
+ "metric": "jvm.JvmMetrics.maxMemoryM",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/logWarn": {
+ "metric": "jvm.JvmMetrics.LogWarn",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/compactRegion_num_ops": {
+ "metric": "rpc.rpc.compactRegion_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getFromOnlineRegions/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.getFromOnlineRegions.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/threadsTimedWaiting": {
+ "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/isAborted_num_ops": {
+ "metric": "rpc.rpc.isAborted_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/gcCount": {
+ "metric": "jvm.JvmMetrics.GcCount",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/getOnlineRegions_num_ops": {
+ "metric": "rpc.rpc.getOnlineRegions_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/checkOOME/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.checkOOME.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/memHeapUsedM": {
+ "metric": "jvm.JvmMetrics.MemHeapUsedM",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/getProtocolVersion_num_ops": {
+ "metric": "rpc.rpc.getProtocolVersion_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/disk/disk_free": {
+ "metric": "disk_free",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/threadsWaiting": {
+ "metric": "jvm.JvmMetrics.ThreadsWaiting",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/unlockRow_num_ops": {
+ "metric": "rpc.rpc.unlockRow_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/replicateLogEntries/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.replicateLogEntries.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/disableTable_num_ops": {
+ "metric": "rpc.rpc.disableTable_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/memory/mem_buffers": {
+ "metric": "mem_buffers",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/shutdown_avg_time": {
+ "metric": "rpc.rpc.shutdown_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/openScanner_num_ops": {
+ "metric": "rpc.rpc.openScanner_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/regionServerStartup_num_ops": {
+ "metric": "rpc.rpc.regionServerStartup_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/bulkLoadHFiles/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.bulkLoadHFiles.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_user": {
+ "metric": "cpu_user",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/memory/swap_free": {
+ "metric": "swap_free",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_system": {
+ "metric": "cpu_system",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/exists/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.exists.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/openRegions_avg_time": {
+ "metric": "rpc.rpc.openRegions_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/regionServerReport_num_ops": {
+ "metric": "rpc.rpc.regionServerReport_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/next_num_ops": {
+ "metric": "rpc.rpc.next_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getBlockCacheColumnFamilySummaries_avg_time": {
+ "metric": "rpc.rpc.getBlockCacheColumnFamilySummaries_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/datanode/block_verification_failures": {
+ "metric": "dfs.datanode.BlockVerificationFailures",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/closeRegion/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.closeRegion.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/checkAndDelete_avg_time": {
+ "metric": "rpc.rpc.checkAndDelete_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/load/load_fifteen": {
+ "metric": "load_fifteen",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getHServerInfo_avg_time": {
+ "metric": "rpc.rpc.getHServerInfo_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/logInfo": {
+ "metric": "jvm.JvmMetrics.LogInfo",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/getBlockCacheColumnFamilySummaries/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.getBlockCacheColumnFamilySummaries.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getZooKeeper_avg_time": {
+ "metric": "rpc.rpc.getZooKeeper_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/execCoprocessor/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.execCoprocessor.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/datanode/blocks_written": {
+ "metric": "dfs.datanode.BlocksWritten",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/balanceSwitch_num_ops": {
+ "metric": "rpc.rpc.balanceSwitch_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/isMasterRunning_num_ops": {
+ "metric": "rpc.rpc.isMasterRunning_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/offline_num_ops": {
+ "metric": "rpc.rpc.offline_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/memory/mem_shared": {
+ "metric": "mem_shared",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getProtocolSignature_num_ops": {
+ "metric": "rpc.rpc.getProtocolSignature_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/abort_num_ops": {
+ "metric": "rpc.rpc.abort_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/getProtocolVersion/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.getProtocolVersion.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/ugi/loginFailure_avg_time": {
+ "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_num": {
+ "metric": "cpu_num",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/rollHLogWriter_num_ops": {
+ "metric": "rpc.rpc.rollHLogWriter_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/rpcAuthorizationSuccesses": {
+ "metric": "rpc.rpc.RpcAuthorizationSuccesses",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/openRegions_num_ops": {
+ "metric": "rpc.rpc.openRegions_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/logFatal": {
+ "metric": "jvm.JvmMetrics.LogFatal",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/splitRegion_avg_time": {
+ "metric": "rpc.rpc.splitRegion_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/closeRegion/aboveOneSec/_avg_time": {
+ "metric": "rpc.rpc.closeRegion.aboveOneSec._avg_time",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/checkAndPut_num_ops": {
+ "metric": "rpc.rpc.checkAndPut_num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/checkAndPut/aboveOneSec/_num_ops": {
+ "metric": "rpc.rpc.checkAndPut.aboveOneSec._num_ops",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/increment_avg_time": {
+ "metric": "rpc.rpc.increment_avg_time",
+ "pointInTime": true,
+ "temporal": true
+ }
+ },
+ "JOBTRACKER":{
+ "metrics/boottime":{
+ "metric":"boottime",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_aidle":{
+ "metric":"cpu_aidle",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_idle":{
+ "metric":"cpu_idle",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_nice":{
+ "metric":"cpu_nice",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_num":{
+ "metric":"cpu_num",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_speed":{
+ "metric":"cpu_speed",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_system":{
+ "metric":"cpu_system",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_user":{
+ "metric":"cpu_user",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_wio":{
+ "metric":"cpu_wio",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/disk/disk_free":{
+ "metric":"disk_free",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/disk/disk_total":{
+ "metric":"disk_total",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/disk/part_max_used":{
+ "metric":"part_max_used",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/gcCount":{
+ "metric":"jvm.JvmMetrics.gcCount",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/jvm/gcTimeMillis":{
+ "metric":"jvm.JvmMetrics.gcTimeMillis",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/jvm/logError":{
+ "metric":"jvm.JvmMetrics.logError",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/jvm/logFatal":{
+ "metric":"jvm.JvmMetrics.logFatal",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/jvm/logInfo":{
+ "metric":"jvm.JvmMetrics.logInfo",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/jvm/logWarn":{
+ "metric":"jvm.JvmMetrics.logWarn",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/jvm/memHeapCommittedM":{
+ "metric":"jvm.JvmMetrics.memHeapCommittedM",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/jvm/memHeapUsedM":{
+ "metric":"jvm.JvmMetrics.memHeapUsedM",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/jvm/memNonHeapCommittedM":{
+ "metric":"jvm.JvmMetrics.memNonHeapCommittedM",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/jvm/memNonHeapUsedM":{
+ "metric":"jvm.JvmMetrics.memNonHeapUsedM",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/jvm/threadsBlocked":{
+ "metric":"jvm.JvmMetrics.threadsBlocked",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/jvm/threadsNew":{
+ "metric":"jvm.JvmMetrics.threadsNew",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/jvm/threadsRunnable":{
+ "metric":"jvm.JvmMetrics.threadsRunnable",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/jvm/threadsTerminated":{
+ "metric":"jvm.JvmMetrics.threadsTerminated",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/jvm/threadsTimedWaiting":{
+ "metric":"jvm.JvmMetrics.threadsTimedWaiting",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/jvm/threadsWaiting":{
+ "metric":"jvm.JvmMetrics.threadsWaiting",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/load/load_fifteen":{
+ "metric":"load_fifteen",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/load/load_five":{
+ "metric":"load_five",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/load/load_one":{
+ "metric":"load_one",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/mapred/Queue/jobs_completed":{
+ "metric":"mapred.Queue.jobs_completed",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/Queue/jobs_failed":{
+ "metric":"mapred.Queue.jobs_failed",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/mapred/Queue/jobs_killed":{
+ "metric":"mapred.Queue.jobs_killed",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/mapred/Queue/jobs_preparing":{
+ "metric":"mapred.Queue.jobs_preparing",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/mapred/Queue/jobs_running":{
+ "metric":"mapred.Queue.jobs_running",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/Queue/jobs_submitted":{
+ "metric":"mapred.Queue.jobs_submitted",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/Queue/maps_completed":{
+ "metric":"mapred.Queue.maps_completed",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/mapred/Queue/maps_failed":{
+ "metric":"mapred.Queue.maps_failed",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/mapred/Queue/maps_killed":{
+ "metric":"mapred.Queue.maps_killed",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/mapred/Queue/maps_launched":{
+ "metric":"mapred.Queue.maps_launched",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/mapred/Queue/reduces_completed":{
+ "metric":"mapred.Queue.reduces_completed",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/mapred/Queue/reduces_failed":{
+ "metric":"mapred.Queue.reduces_failed",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/mapred/Queue/reduces_killed":{
+ "metric":"mapred.Queue.reduces_killed",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/mapred/Queue/reduces_launched":{
+ "metric":"mapred.Queue.reduces_launched",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/mapred/Queue/reserved_map_slots":{
+ "metric":"mapred.Queue.reserved_map_slots",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/Queue/reserved_reduce_slots":{
+ "metric":"mapred.Queue.reserved_reduce_slots",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/Queue/running_0":{
+ "metric":"mapred.Queue.running_0",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/Queue/running_1440":{
+ "metric":"mapred.Queue.running_1440",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/Queue/running_300":{
+ "metric":"mapred.Queue.running_300",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/Queue/running_60":{
+ "metric":"mapred.Queue.running_60",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/Queue/waiting_maps":{
+ "metric":"mapred.Queue.waiting_maps",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/Queue/waiting_reduces":{
+ "metric":"mapred.Queue.waiting_reduces",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/blacklisted_maps":{
+ "metric":"mapred.jobtracker.blacklisted_maps",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/blacklisted_reduces":{
+ "metric":"mapred.jobtracker.blacklisted_reduces",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/heartbeats":{
+ "metric":"mapred.jobtracker.heartbeats",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/jobs_completed":{
+ "metric":"mapred.jobtracker.jobs_completed",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/jobs_failed":{
+ "metric":"mapred.jobtracker.jobs_failed",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/jobs_killed":{
+ "metric":"mapred.jobtracker.jobs_killed",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/jobs_preparing":{
+ "metric":"mapred.jobtracker.jobs_preparing",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/jobs_running":{
+ "metric":"mapred.jobtracker.jobs_running",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/jobs_submitted":{
+ "metric":"mapred.jobtracker.jobs_submitted",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/map_slots":{
+ "metric":"mapred.jobtracker.map_slots",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/maps_completed":{
+ "metric":"mapred.jobtracker.maps_completed",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/maps_failed":{
+ "metric":"mapred.jobtracker.maps_failed",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/maps_killed":{
+ "metric":"mapred.jobtracker.maps_killed",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/maps_launched":{
+ "metric":"mapred.jobtracker.maps_launched",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/occupied_map_slots":{
+ "metric":"mapred.jobtracker.occupied_map_slots",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/occupied_reduce_slots":{
+ "metric":"mapred.jobtracker.occupied_reduce_slots",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/reduce_slots":{
+ "metric":"mapred.jobtracker.reduce_slots",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/reduces_completed":{
+ "metric":"mapred.jobtracker.reduces_completed",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/reduces_failed":{
+ "metric":"mapred.jobtracker.reduces_failed",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/reduces_killed":{
+ "metric":"mapred.jobtracker.reduces_killed",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/reduces_launched":{
+ "metric":"mapred.jobtracker.reduces_launched",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/reserved_map_slots":{
+ "metric":"mapred.jobtracker.reserved_map_slots",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/reserved_reduce_slots":{
+ "metric":"mapred.jobtracker.reserved_reduce_slots",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/running_maps":{
+ "metric":"mapred.jobtracker.running_maps",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/running_reduces":{
+ "metric":"mapred.jobtracker.running_reduces",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/trackers":{
+ "metric":"mapred.jobtracker.trackers",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/trackers_blacklisted":{
+ "metric":"mapred.jobtracker.trackers_blacklisted",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/trackers_decommissioned":{
+ "metric":"mapred.jobtracker.trackers_decommissioned",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/trackers_graylisted":{
+ "metric":"mapred.jobtracker.trackers_graylisted",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/waiting_maps":{
+ "metric":"mapred.jobtracker.waiting_maps",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/jobtracker/waiting_reduces":{
+ "metric":"mapred.jobtracker.waiting_reduces",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/memory/mem_buffers":{
+ "metric":"mem_buffers",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/mem_cached":{
+ "metric":"mem_cached",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/mem_free":{
+ "metric":"mem_free",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/mem_shared":{
+ "metric":"mem_shared",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/mem_total":{
+ "metric":"mem_total",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/swap_free":{
+ "metric":"swap_free",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/swap_total":{
+ "metric":"swap_total",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/network/bytes_in":{
+ "metric":"bytes_in",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/network/bytes_out":{
+ "metric":"bytes_out",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/network/pkts_in":{
+ "metric":"pkts_in",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/network/pkts_out":{
+ "metric":"pkts_out",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/process/proc_run":{
+ "metric":"proc_run",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/process/proc_total":{
+ "metric":"proc_total",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/NumOpenConnections":{
+ "metric":"rpc.rpc.NumOpenConnections",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpc/ReceivedBytes":{
+ "metric":"rpc.rpc.ReceivedBytes",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpc/RpcProcessingTime_avg_time":{
+ "metric":"rpc.rpc.RpcProcessingTime_avg_time",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpc/RpcProcessingTime_num_ops":{
+ "metric":"rpc.rpc.RpcProcessingTime_num_ops",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpc/RpcQueueTime_avg_time":{
+ "metric":"rpc.rpc.RpcQueueTime_avg_time",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpc/RpcQueueTime_num_ops":{
+ "metric":"rpc.rpc.RpcQueueTime_num_ops",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpc/SentBytes":{
+ "metric":"rpc.rpc.SentBytes",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpc/callQueueLen":{
+ "metric":"rpc.rpc.callQueueLen",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpc/rpcAuthenticationFailures":{
+ "metric":"rpc.rpc.rpcAuthenticationFailures",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpc/rpcAuthenticationSuccesses":{
+ "metric":"rpc.rpc.rpcAuthenticationSuccesses",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpc/rpcAuthorizationFailures":{
+ "metric":"rpc.rpc.rpcAuthorizationFailures",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpc/rpcAuthorizationSuccesses":{
+ "metric":"rpc.rpc.rpcAuthorizationSuccesses",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getBuildVersion_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.getBuildVersion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getBuildVersion_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.getBuildVersion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getDelegationToken_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.getDelegationToken_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getDelegationToken_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.getDelegationToken_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getJobCounters_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.getJobCounters_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getJobCounters_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.getJobCounters_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getJobProfile_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.getJobProfile_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getJobProfile_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.getJobProfile_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getJobStatus_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.getJobStatus_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getJobStatus_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.getJobStatus_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getMapTaskReports_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.getMapTaskReports_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getMapTaskReports_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.getMapTaskReports_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getNewJobId_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.getNewJobId_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getNewJobId_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.getNewJobId_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getProtocolVersion_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.getProtocolVersion_avg_time",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getProtocolVersion_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.getProtocolVersion_num_ops",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getQueueAdmins_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.getQueueAdmins_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getQueueAdmins_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.getQueueAdmins_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getReduceTaskReports_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.getReduceTaskReports_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getReduceTaskReports_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.getReduceTaskReports_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getStagingAreaDir_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.getStagingAreaDir_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getStagingAreaDir_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.getStagingAreaDir_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getSystemDir_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.getSystemDir_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getSystemDir_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.getSystemDir_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getTaskCompletionEvents_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.getTaskCompletionEvents_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/getTaskCompletionEvents_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.getTaskCompletionEvents_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/heartbeat_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.heartbeat_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/heartbeat_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.heartbeat_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/jobsToComplete_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.jobsToComplete_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/jobsToComplete_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.jobsToComplete_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/submitJob_avg_time":{
+ "metric":"rpcdetailed.rpcdetailed.submitJob_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpcdetailed/submitJob_num_ops":{
+ "metric":"rpcdetailed.rpcdetailed.submitJob_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/ugi/loginFailure_avg_time":{
+ "metric":"ugi.ugi.loginFailure_avg_time",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/ugi/loginFailure_num_ops":{
+ "metric":"ugi.ugi.loginFailure_num_ops",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/ugi/loginSuccess_avg_time":{
+ "metric":"ugi.ugi.loginSuccess_avg_time",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/ugi/loginSuccess_num_ops":{
+ "metric":"ugi.ugi.loginSuccess_num_ops",
+ "pointInTime":false,
+ "temporal":true
+ }
+ },
+ "TASKTRACKER":{
+ "metrics/boottime":{
+ "metric":"boottime",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_aidle":{
+ "metric":"cpu_aidle",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_idle":{
+ "metric":"cpu_idle",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_nice":{
+ "metric":"cpu_nice",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_num":{
+ "metric":"cpu_num",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_speed":{
+ "metric":"cpu_speed",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_system":{
+ "metric":"cpu_system",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_user":{
+ "metric":"cpu_user",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_wio":{
+ "metric":"cpu_wio",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/disk/disk_free":{
+ "metric":"disk_free",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/disk/disk_total":{
+ "metric":"disk_total",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/disk/part_max_used":{
+ "metric":"part_max_used",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/gcCount":{
+ "metric":"jvm.JvmMetrics.gcCount",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/jvm/gcTimeMillis":{
+ "metric":"jvm.JvmMetrics.gcTimeMillis",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/jvm/logError":{
+ "metric":"jvm.JvmMetrics.logError",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/jvm/logFatal":{
+ "metric":"jvm.JvmMetrics.logFatal",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/jvm/logInfo":{
+ "metric":"jvm.JvmMetrics.logInfo",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/jvm/logWarn":{
+ "metric":"jvm.JvmMetrics.logWarn",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/jvm/maxMemoryM":{
+ "metric":"jvm.JvmMetrics.maxMemoryM",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/memHeapCommittedM":{
+ "metric":"jvm.JvmMetrics.memHeapCommittedM",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/jvm/memHeapUsedM":{
+ "metric":"jvm.JvmMetrics.memHeapUsedM",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/jvm/memNonHeapCommittedM":{
+ "metric":"jvm.JvmMetrics.memNonHeapCommittedM",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/jvm/memNonHeapUsedM":{
+ "metric":"jvm.JvmMetrics.memNonHeapUsedM",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/jvm/threadsBlocked":{
+ "metric":"jvm.JvmMetrics.threadsBlocked",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/jvm/threadsNew":{
+ "metric":"jvm.JvmMetrics.threadsNew",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/jvm/threadsRunnable":{
+ "metric":"jvm.JvmMetrics.threadsRunnable",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/jvm/threadsTerminated":{
+ "metric":"jvm.JvmMetrics.threadsTerminated",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/jvm/threadsTimedWaiting":{
+ "metric":"jvm.JvmMetrics.threadsTimedWaiting",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/jvm/threadsWaiting":{
+ "metric":"jvm.JvmMetrics.threadsWaiting",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/load/load_fifteen":{
+ "metric":"load_fifteen",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/load/load_five":{
+ "metric":"load_five",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/load/load_one":{
+ "metric":"load_one",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/mapred/shuffleOutput/shuffle_exceptions_caught":{
+ "metric":"mapred.shuffleOutput.shuffle_exceptions_caught",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/shuffleOutput/shuffle_failed_outputs":{
+ "metric":"mapred.shuffleOutput.shuffle_failed_outputs",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/shuffleOutput/shuffle_handler_busy_percent":{
+ "metric":"mapred.shuffleOutput.shuffle_handler_busy_percent",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/shuffleOutput/shuffle_output_bytes":{
+ "metric":"mapred.shuffleOutput.shuffle_output_bytes",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/shuffleOutput/shuffle_success_outputs":{
+ "metric":"mapred.shuffleOutput.shuffle_success_outputs",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/tasktracker/mapTaskSlots":{
+ "metric":"mapred.tasktracker.mapTaskSlots",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/tasktracker/maps_running":{
+ "metric":"mapred.tasktracker.maps_running",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/tasktracker/reduceTaskSlots":{
+ "metric":"mapred.tasktracker.reduceTaskSlots",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/mapred/tasktracker/reduces_running":{
+ "metric":"mapred.tasktracker.reduces_running",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/memory/mem_buffers":{
+ "metric":"mem_buffers",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/mem_cached":{
+ "metric":"mem_cached",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/mem_free":{
+ "metric":"mem_free",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/mem_shared":{
+ "metric":"mem_shared",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/mem_total":{
+ "metric":"mem_total",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/swap_free":{
+ "metric":"swap_free",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/swap_total":{
+ "metric":"swap_total",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/network/bytes_in":{
+ "metric":"bytes_in",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/network/bytes_out":{
+ "metric":"bytes_out",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/network/pkts_in":{
+ "metric":"pkts_in",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/network/pkts_out":{
+ "metric":"pkts_out",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/process/proc_run":{
+ "metric":"proc_run",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/process/proc_total":{
+ "metric":"proc_total",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/NumOpenConnections":{
+ "metric":"rpc.rpc.NumOpenConnections",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpc/ReceivedBytes":{
+ "metric":"rpc.rpc.ReceivedBytes",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpc/RpcProcessingTime_avg_time":{
+ "metric":"rpc.rpc.RpcProcessingTime_avg_time",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpc/RpcProcessingTime_num_ops":{
+ "metric":"rpc.rpc.RpcProcessingTime_num_ops",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpc/RpcQueueTime_avg_time":{
+ "metric":"rpc.rpc.RpcQueueTime_avg_time",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpc/RpcQueueTime_num_ops":{
+ "metric":"rpc.rpc.RpcQueueTime_num_ops",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpc/RpcSlowResponse_avg_time":{
+ "metric":"rpc.rpc.RpcSlowResponse_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/RpcSlowResponse_num_ops":{
+ "metric":"rpc.rpc.RpcSlowResponse_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/SentBytes":{
+ "metric":"rpc.rpc.SentBytes",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpc/abort/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.abort.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/abort/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.abort.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/abort_avg_time":{
+ "metric":"rpc.rpc.abort_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/abort_num_ops":{
+ "metric":"rpc.rpc.abort_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/addColumn_avg_time":{
+ "metric":"rpc.rpc.addColumn_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/addColumn_num_ops":{
+ "metric":"rpc.rpc.addColumn_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/addToOnlineRegions/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.addToOnlineRegions.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/addToOnlineRegions/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.addToOnlineRegions.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/addToOnlineRegions_avg_time":{
+ "metric":"rpc.rpc.addToOnlineRegions_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/addToOnlineRegions_num_ops":{
+ "metric":"rpc.rpc.addToOnlineRegions_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/assign_avg_time":{
+ "metric":"rpc.rpc.assign_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/assign_num_ops":{
+ "metric":"rpc.rpc.assign_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/balanceSwitch_avg_time":{
+ "metric":"rpc.rpc.balanceSwitch_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/balanceSwitch_num_ops":{
+ "metric":"rpc.rpc.balanceSwitch_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/balance_avg_time":{
+ "metric":"rpc.rpc.balance_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/balance_num_ops":{
+ "metric":"rpc.rpc.balance_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/bulkLoadHFiles/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.bulkLoadHFiles.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/bulkLoadHFiles/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.bulkLoadHFiles.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/bulkLoadHFiles_avg_time":{
+ "metric":"rpc.rpc.bulkLoadHFiles_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/bulkLoadHFiles_num_ops":{
+ "metric":"rpc.rpc.bulkLoadHFiles_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/callQueueLen":{
+ "metric":"rpc.rpc.callQueueLen",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndDelete/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.checkAndDelete.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndDelete/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.checkAndDelete.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndDelete_avg_time":{
+ "metric":"rpc.rpc.checkAndDelete_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndDelete_num_ops":{
+ "metric":"rpc.rpc.checkAndDelete_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndPut/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.checkAndPut.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndPut/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.checkAndPut.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndPut_avg_time":{
+ "metric":"rpc.rpc.checkAndPut_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndPut_num_ops":{
+ "metric":"rpc.rpc.checkAndPut_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkOOME/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.checkOOME.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkOOME/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.checkOOME.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkOOME_avg_time":{
+ "metric":"rpc.rpc.checkOOME_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkOOME_num_ops":{
+ "metric":"rpc.rpc.checkOOME_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/close/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.close.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/close/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.close.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/closeRegion/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.closeRegion.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/closeRegion/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.closeRegion.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/closeRegion_avg_time":{
+ "metric":"rpc.rpc.closeRegion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/closeRegion_num_ops":{
+ "metric":"rpc.rpc.closeRegion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/close_avg_time":{
+ "metric":"rpc.rpc.close_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/close_num_ops":{
+ "metric":"rpc.rpc.close_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/compactRegion/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.compactRegion.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/compactRegion/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.compactRegion.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/compactRegion_avg_time":{
+ "metric":"rpc.rpc.compactRegion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/compactRegion_num_ops":{
+ "metric":"rpc.rpc.compactRegion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/createTable_avg_time":{
+ "metric":"rpc.rpc.createTable_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/createTable_num_ops":{
+ "metric":"rpc.rpc.createTable_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/delete/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.delete.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/delete/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.delete.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/deleteColumn_avg_time":{
+ "metric":"rpc.rpc.deleteColumn_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/deleteColumn_num_ops":{
+ "metric":"rpc.rpc.deleteColumn_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/deleteTable_avg_time":{
+ "metric":"rpc.rpc.deleteTable_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/deleteTable_num_ops":{
+ "metric":"rpc.rpc.deleteTable_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/delete_avg_time":{
+ "metric":"rpc.rpc.delete_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/delete_num_ops":{
+ "metric":"rpc.rpc.delete_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/disableTable_avg_time":{
+ "metric":"rpc.rpc.disableTable_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/disableTable_num_ops":{
+ "metric":"rpc.rpc.disableTable_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/enableTable_avg_time":{
+ "metric":"rpc.rpc.enableTable_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/enableTable_num_ops":{
+ "metric":"rpc.rpc.enableTable_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/execCoprocessor/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.execCoprocessor.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/execCoprocessor/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.execCoprocessor.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/execCoprocessor_avg_time":{
+ "metric":"rpc.rpc.execCoprocessor_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/execCoprocessor_num_ops":{
+ "metric":"rpc.rpc.execCoprocessor_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/exists/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.exists.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/exists/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.exists.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/exists_avg_time":{
+ "metric":"rpc.rpc.exists_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/exists_num_ops":{
+ "metric":"rpc.rpc.exists_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/flushRegion/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.flushRegion.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/flushRegion/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.flushRegion.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/flushRegion_avg_time":{
+ "metric":"rpc.rpc.flushRegion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/flushRegion_num_ops":{
+ "metric":"rpc.rpc.flushRegion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/get/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.get.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/get/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.get.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getAlterStatus_avg_time":{
+ "metric":"rpc.rpc.getAlterStatus_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getAlterStatus_num_ops":{
+ "metric":"rpc.rpc.getAlterStatus_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getBlockCacheColumnFamilySummaries/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getBlockCacheColumnFamilySummaries/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getBlockCacheColumnFamilySummaries_avg_time":{
+ "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getBlockCacheColumnFamilySummaries_num_ops":{
+ "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getCatalogTracker/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getCatalogTracker.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getCatalogTracker/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getCatalogTracker.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getCatalogTracker_avg_time":{
+ "metric":"rpc.rpc.getCatalogTracker_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getCatalogTracker_num_ops":{
+ "metric":"rpc.rpc.getCatalogTracker_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getClosestRowBefore/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getClosestRowBefore.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getClosestRowBefore/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getClosestRowBefore.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getClosestRowBefore_avg_time":{
+ "metric":"rpc.rpc.getClosestRowBefore_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getClosestRowBefore_num_ops":{
+ "metric":"rpc.rpc.getClosestRowBefore_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getClusterStatus_avg_time":{
+ "metric":"rpc.rpc.getClusterStatus_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getClusterStatus_num_ops":{
+ "metric":"rpc.rpc.getClusterStatus_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getConfiguration/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getConfiguration.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getConfiguration/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getConfiguration.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getConfiguration_avg_time":{
+ "metric":"rpc.rpc.getConfiguration_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getConfiguration_num_ops":{
+ "metric":"rpc.rpc.getConfiguration_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getFromOnlineRegions/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getFromOnlineRegions.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getFromOnlineRegions/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getFromOnlineRegions.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getFromOnlineRegions_avg_time":{
+ "metric":"rpc.rpc.getFromOnlineRegions_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getFromOnlineRegions_num_ops":{
+ "metric":"rpc.rpc.getFromOnlineRegions_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getHServerInfo/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getHServerInfo.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getHServerInfo/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getHServerInfo.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getHServerInfo_avg_time":{
+ "metric":"rpc.rpc.getHServerInfo_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getHServerInfo_num_ops":{
+ "metric":"rpc.rpc.getHServerInfo_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getHTableDescriptors_avg_time":{
+ "metric":"rpc.rpc.getHTableDescriptors_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getHTableDescriptors_num_ops":{
+ "metric":"rpc.rpc.getHTableDescriptors_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getOnlineRegions/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getOnlineRegions.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getOnlineRegions/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getOnlineRegions.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getOnlineRegions_avg_time":{
+ "metric":"rpc.rpc.getOnlineRegions_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getOnlineRegions_num_ops":{
+ "metric":"rpc.rpc.getOnlineRegions_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolSignature/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getProtocolSignature.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolSignature/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getProtocolSignature.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolSignature_avg_time":{
+ "metric":"rpc.rpc.getProtocolSignature_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolSignature_num_ops":{
+ "metric":"rpc.rpc.getProtocolSignature_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolVersion/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getProtocolVersion.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolVersion/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getProtocolVersion.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolVersion_avg_time":{
+ "metric":"rpc.rpc.getProtocolVersion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolVersion_num_ops":{
+ "metric":"rpc.rpc.getProtocolVersion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getRegionInfo/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getRegionInfo.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getRegionInfo/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getRegionInfo.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getRegionInfo_avg_time":{
+ "metric":"rpc.rpc.getRegionInfo_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getRegionInfo_num_ops":{
+ "metric":"rpc.rpc.getRegionInfo_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getServerName/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getServerName.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getServerName/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getServerName.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getServerName_avg_time":{
+ "metric":"rpc.rpc.getServerName_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getServerName_num_ops":{
+ "metric":"rpc.rpc.getServerName_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getZooKeeper/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getZooKeeper.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getZooKeeper/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getZooKeeper.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getZooKeeper_avg_time":{
+ "metric":"rpc.rpc.getZooKeeper_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getZooKeeper_num_ops":{
+ "metric":"rpc.rpc.getZooKeeper_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/get_avg_time":{
+ "metric":"rpc.rpc.get_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/get_num_ops":{
+ "metric":"rpc.rpc.get_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/increment/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.increment.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/increment/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.increment.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/incrementColumnValue/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.incrementColumnValue.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/incrementColumnValue/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.incrementColumnValue.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/incrementColumnValue_avg_time":{
+ "metric":"rpc.rpc.incrementColumnValue_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/incrementColumnValue_num_ops":{
+ "metric":"rpc.rpc.incrementColumnValue_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/increment_avg_time":{
+ "metric":"rpc.rpc.increment_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/increment_num_ops":{
+ "metric":"rpc.rpc.increment_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isAborted/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.isAborted.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isAborted/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.isAborted.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isAborted_avg_time":{
+ "metric":"rpc.rpc.isAborted_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isAborted_num_ops":{
+ "metric":"rpc.rpc.isAborted_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isMasterRunning_avg_time":{
+ "metric":"rpc.rpc.isMasterRunning_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isMasterRunning_num_ops":{
+ "metric":"rpc.rpc.isMasterRunning_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isStopped/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.isStopped.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isStopped/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.isStopped.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isStopped_avg_time":{
+ "metric":"rpc.rpc.isStopped_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isStopped_num_ops":{
+ "metric":"rpc.rpc.isStopped_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/lockRow/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.lockRow.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/lockRow/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.lockRow.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/lockRow_avg_time":{
+ "metric":"rpc.rpc.lockRow_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/lockRow_num_ops":{
+ "metric":"rpc.rpc.lockRow_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/modifyColumn_avg_time":{
+ "metric":"rpc.rpc.modifyColumn_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/modifyColumn_num_ops":{
+ "metric":"rpc.rpc.modifyColumn_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/modifyTable_avg_time":{
+ "metric":"rpc.rpc.modifyTable_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/modifyTable_num_ops":{
+ "metric":"rpc.rpc.modifyTable_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/move_avg_time":{
+ "metric":"rpc.rpc.move_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/move_num_ops":{
+ "metric":"rpc.rpc.move_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/multi/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.multi.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/multi/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.multi.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/multi_avg_time":{
+ "metric":"rpc.rpc.multi_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/multi_num_ops":{
+ "metric":"rpc.rpc.multi_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/next/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.next.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/next/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.next.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/next_avg_time":{
+ "metric":"rpc.rpc.next_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/next_num_ops":{
+ "metric":"rpc.rpc.next_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/offline_avg_time":{
+ "metric":"rpc.rpc.offline_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/offline_num_ops":{
+ "metric":"rpc.rpc.offline_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegion/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.openRegion.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegion/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.openRegion.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegion_avg_time":{
+ "metric":"rpc.rpc.openRegion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegion_num_ops":{
+ "metric":"rpc.rpc.openRegion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegions/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.openRegions.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegions/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.openRegions.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegions_avg_time":{
+ "metric":"rpc.rpc.openRegions_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegions_num_ops":{
+ "metric":"rpc.rpc.openRegions_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openScanner/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.openScanner.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openScanner/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.openScanner.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openScanner_avg_time":{
+ "metric":"rpc.rpc.openScanner_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openScanner_num_ops":{
+ "metric":"rpc.rpc.openScanner_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/put/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.put.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/put/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.put.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/put_avg_time":{
+ "metric":"rpc.rpc.put_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/put_num_ops":{
+ "metric":"rpc.rpc.put_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/regionServerReport_avg_time":{
+ "metric":"rpc.rpc.regionServerReport_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/regionServerReport_num_ops":{
+ "metric":"rpc.rpc.regionServerReport_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/regionServerStartup_avg_time":{
+ "metric":"rpc.rpc.regionServerStartup_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/regionServerStartup_num_ops":{
+ "metric":"rpc.rpc.regionServerStartup_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/removeFromOnlineRegions/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.removeFromOnlineRegions.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/removeFromOnlineRegions/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.removeFromOnlineRegions.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/removeFromOnlineRegions_avg_time":{
+ "metric":"rpc.rpc.removeFromOnlineRegions_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/removeFromOnlineRegions_num_ops":{
+ "metric":"rpc.rpc.removeFromOnlineRegions_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/replicateLogEntries/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.replicateLogEntries.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/replicateLogEntries/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.replicateLogEntries.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/replicateLogEntries_avg_time":{
+ "metric":"rpc.rpc.replicateLogEntries_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/replicateLogEntries_num_ops":{
+ "metric":"rpc.rpc.replicateLogEntries_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/reportRSFatalError_avg_time":{
+ "metric":"rpc.rpc.reportRSFatalError_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/reportRSFatalError_num_ops":{
+ "metric":"rpc.rpc.reportRSFatalError_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rollHLogWriter/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.rollHLogWriter.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rollHLogWriter/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.rollHLogWriter.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rollHLogWriter_avg_time":{
+ "metric":"rpc.rpc.rollHLogWriter_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rollHLogWriter_num_ops":{
+ "metric":"rpc.rpc.rollHLogWriter_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rpcAuthenticationFailures":{
+ "metric":"rpc.rpc.rpcAuthenticationFailures",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpc/rpcAuthenticationSuccesses":{
+ "metric":"rpc.rpc.rpcAuthenticationSuccesses",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpc/rpcAuthorizationFailures":{
+ "metric":"rpc.rpc.rpcAuthorizationFailures",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpc/rpcAuthorizationSuccesses":{
+ "metric":"rpc.rpc.rpcAuthorizationSuccesses",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpc/shutdown_avg_time":{
+ "metric":"rpc.rpc.shutdown_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/shutdown_num_ops":{
+ "metric":"rpc.rpc.shutdown_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/splitRegion/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.splitRegion.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/splitRegion/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.splitRegion.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/splitRegion_avg_time":{
+ "metric":"rpc.rpc.splitRegion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/splitRegion_num_ops":{
+ "metric":"rpc.rpc.splitRegion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/stop/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.stop.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/stop/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.stop.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/stopMaster_avg_time":{
+ "metric":"rpc.rpc.stopMaster_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/stopMaster_num_ops":{
+ "metric":"rpc.rpc.stopMaster_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/stop_avg_time":{
+ "metric":"rpc.rpc.stop_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/stop_num_ops":{
+ "metric":"rpc.rpc.stop_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/unassign_avg_time":{
+ "metric":"rpc.rpc.unassign_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/unassign_num_ops":{
+ "metric":"rpc.rpc.unassign_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/unlockRow/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.unlockRow.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/unlockRow/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.unlockRow.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/unlockRow_avg_time":{
+ "metric":"rpc.rpc.unlockRow_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/unlockRow_num_ops":{
+ "metric":"rpc.rpc.unlockRow_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/ugi/loginFailure_avg_time":{
+ "metric":"ugi.ugi.loginFailure_avg_time",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/ugi/loginFailure_num_ops":{
+ "metric":"ugi.ugi.loginFailure_num_ops",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/ugi/loginSuccess_avg_time":{
+ "metric":"ugi.ugi.loginSuccess_avg_time",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/ugi/loginSuccess_num_ops":{
+ "metric":"ugi.ugi.loginSuccess_num_ops",
+ "pointInTime":false,
+ "temporal":true
+ }
+ },
+ "HBASE_MASTER":{
+ "metrics/boottime":{
+ "metric":"boottime",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_aidle":{
+ "metric":"cpu_aidle",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_idle":{
+ "metric":"cpu_idle",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_nice":{
+ "metric":"cpu_nice",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_num":{
+ "metric":"cpu_num",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_speed":{
+ "metric":"cpu_speed",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_system":{
+ "metric":"cpu_system",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_user":{
+ "metric":"cpu_user",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_wio":{
+ "metric":"cpu_wio",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/disk/disk_free":{
+ "metric":"disk_free",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/disk/disk_total":{
+ "metric":"disk_total",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/disk/part_max_used":{
+ "metric":"part_max_used",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/master/cluster_requests":{
+ "metric":"master.Server.cluster_requests",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/hbase/master/splitSize_avg_time":{
+ "metric":"master.FileSystem.HlogSplitSize_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/master/splitSize_num_ops":{
+ "metric":"master.FileSystem.HlogSplitSize_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/master/splitTime_avg_time":{
+ "metric":"master.FileSystem.HlogSplitTime_mean",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/master/splitTime_num_ops":{
+ "metric":"master.FileSystem.HlogSplitTime_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/gcCount":{
+ "metric":"jvm.JvmMetrics.gcCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/gcTimeMillis":{
+ "metric":"jvm.JvmMetrics.gcTimeMillis",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/logError":{
+ "metric":"jvm.JvmMetrics.logError",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/logFatal":{
+ "metric":"jvm.JvmMetrics.logFatal",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/logInfo":{
+ "metric":"jvm.JvmMetrics.logInfo",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/logWarn":{
+ "metric":"jvm.JvmMetrics.logWarn",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/maxMemoryM":{
+ "metric":"jvm.JvmMetrics.maxMemoryM",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/memHeapCommittedM":{
+ "metric":"jvm.JvmMetrics.memHeapCommittedM",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/memHeapUsedM":{
+ "metric":"jvm.JvmMetrics.memHeapUsedM",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/memNonHeapCommittedM":{
+ "metric":"jvm.JvmMetrics.memNonHeapCommittedM",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/memNonHeapUsedM":{
+ "metric":"jvm.JvmMetrics.memNonHeapUsedM",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsBlocked":{
+ "metric":"jvm.JvmMetrics.threadsBlocked",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsNew":{
+ "metric":"jvm.JvmMetrics.threadsNew",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsRunnable":{
+ "metric":"jvm.JvmMetrics.threadsRunnable",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsTerminated":{
+ "metric":"jvm.JvmMetrics.threadsTerminated",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsTimedWaiting":{
+ "metric":"jvm.JvmMetrics.threadsTimedWaiting",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsWaiting":{
+ "metric":"jvm.JvmMetrics.threadsWaiting",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/load/load_fifteen":{
+ "metric":"load_fifteen",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/load/load_five":{
+ "metric":"load_five",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/load/load_one":{
+ "metric":"load_one",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/mem_buffers":{
+ "metric":"mem_buffers",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/mem_cached":{
+ "metric":"mem_cached",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/mem_free":{
+ "metric":"mem_free",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/mem_shared":{
+ "metric":"mem_shared",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/mem_total":{
+ "metric":"mem_total",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/swap_free":{
+ "metric":"swap_free",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/swap_total":{
+ "metric":"swap_total",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/network/bytes_in":{
+ "metric":"bytes_in",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/network/bytes_out":{
+ "metric":"bytes_out",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/network/pkts_in":{
+ "metric":"pkts_in",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/network/pkts_out":{
+ "metric":"pkts_out",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/process/proc_run":{
+ "metric":"proc_run",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/process/proc_total":{
+ "metric":"proc_total",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/NumOpenConnections":{
+ "metric":"rpc.rpc.NumOpenConnections",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpc/ReceivedBytes":{
+ "metric":"rpc.rpc.ReceivedBytes",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpc/RpcProcessingTime_avg_time":{
+ "metric":"rpc.rpc.RpcProcessingTime_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/RpcProcessingTime_num_ops":{
+ "metric":"rpc.rpc.RpcProcessingTime_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/RpcQueueTime_avg_time":{
+ "metric":"rpc.rpc.RpcQueueTime_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/RpcQueueTime_num_ops":{
+ "metric":"rpc.rpc.RpcQueueTime_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/RpcSlowResponse_avg_time":{
+ "metric":"rpc.rpc.RpcSlowResponse_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/RpcSlowResponse_num_ops":{
+ "metric":"rpc.rpc.RpcSlowResponse_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/SentBytes":{
+ "metric":"rpc.rpc.SentBytes",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpc/addColumn/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.addColumn.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/addColumn/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.addColumn.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/addColumn_avg_time":{
+ "metric":"rpc.rpc.addColumn_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/addColumn_num_ops":{
+ "metric":"rpc.rpc.addColumn_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/assign/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.assign.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/assign/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.assign.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/assign_avg_time":{
+ "metric":"rpc.rpc.assign_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/assign_num_ops":{
+ "metric":"rpc.rpc.assign_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/balance/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.balance.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/balance/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.balance.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/balanceSwitch/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.balanceSwitch.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/balanceSwitch/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.balanceSwitch.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/balanceSwitch_avg_time":{
+ "metric":"rpc.rpc.balanceSwitch_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/balanceSwitch_num_ops":{
+ "metric":"rpc.rpc.balanceSwitch_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/balance_avg_time":{
+ "metric":"rpc.rpc.balance_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/balance_num_ops":{
+ "metric":"rpc.rpc.balance_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/bulkLoadHFiles_avg_time":{
+ "metric":"rpc.rpc.bulkLoadHFiles_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/bulkLoadHFiles_num_ops":{
+ "metric":"rpc.rpc.bulkLoadHFiles_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/callQueueLen":{
+ "metric":"rpc.rpc.callQueueLen",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndDelete_avg_time":{
+ "metric":"rpc.rpc.checkAndDelete_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndDelete_num_ops":{
+ "metric":"rpc.rpc.checkAndDelete_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndPut_avg_time":{
+ "metric":"rpc.rpc.checkAndPut_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndPut_num_ops":{
+ "metric":"rpc.rpc.checkAndPut_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/closeRegion_avg_time":{
+ "metric":"rpc.rpc.closeRegion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/closeRegion_num_ops":{
+ "metric":"rpc.rpc.closeRegion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/close_avg_time":{
+ "metric":"rpc.rpc.close_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/close_num_ops":{
+ "metric":"rpc.rpc.close_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/compactRegion_avg_time":{
+ "metric":"rpc.rpc.compactRegion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/compactRegion_num_ops":{
+ "metric":"rpc.rpc.compactRegion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/createTable/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.createTable.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/createTable/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.createTable.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/createTable_avg_time":{
+ "metric":"rpc.rpc.createTable_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/createTable_num_ops":{
+ "metric":"rpc.rpc.createTable_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/deleteColumn/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.deleteColumn.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/deleteColumn/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.deleteColumn.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/deleteColumn_avg_time":{
+ "metric":"rpc.rpc.deleteColumn_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/deleteColumn_num_ops":{
+ "metric":"rpc.rpc.deleteColumn_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/deleteTable/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.deleteTable.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/deleteTable/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.deleteTable.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/deleteTable_avg_time":{
+ "metric":"rpc.rpc.deleteTable_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/deleteTable_num_ops":{
+ "metric":"rpc.rpc.deleteTable_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/delete_avg_time":{
+ "metric":"rpc.rpc.delete_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/delete_num_ops":{
+ "metric":"rpc.rpc.delete_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/disableTable/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.disableTable.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/disableTable/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.disableTable.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/disableTable_avg_time":{
+ "metric":"rpc.rpc.disableTable_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/disableTable_num_ops":{
+ "metric":"rpc.rpc.disableTable_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/enableTable/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.enableTable.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/enableTable/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.enableTable.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/enableTable_avg_time":{
+ "metric":"rpc.rpc.enableTable_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/enableTable_num_ops":{
+ "metric":"rpc.rpc.enableTable_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/execCoprocessor_avg_time":{
+ "metric":"rpc.rpc.execCoprocessor_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/execCoprocessor_num_ops":{
+ "metric":"rpc.rpc.execCoprocessor_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/exists_avg_time":{
+ "metric":"rpc.rpc.exists_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/exists_num_ops":{
+ "metric":"rpc.rpc.exists_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/flushRegion_avg_time":{
+ "metric":"rpc.rpc.flushRegion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/flushRegion_num_ops":{
+ "metric":"rpc.rpc.flushRegion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getAlterStatus/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getAlterStatus.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getAlterStatus/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getAlterStatus.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getAlterStatus_avg_time":{
+ "metric":"rpc.rpc.getAlterStatus_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getAlterStatus_num_ops":{
+ "metric":"rpc.rpc.getAlterStatus_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getBlockCacheColumnFamilySummaries_avg_time":{
+ "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getBlockCacheColumnFamilySummaries_num_ops":{
+ "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getClosestRowBefore_avg_time":{
+ "metric":"rpc.rpc.getClosestRowBefore_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getClosestRowBefore_num_ops":{
+ "metric":"rpc.rpc.getClosestRowBefore_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getClusterStatus/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getClusterStatus.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getClusterStatus/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getClusterStatus.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getClusterStatus_avg_time":{
+ "metric":"rpc.rpc.getClusterStatus_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getClusterStatus_num_ops":{
+ "metric":"rpc.rpc.getClusterStatus_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getHServerInfo_avg_time":{
+ "metric":"rpc.rpc.getHServerInfo_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getHServerInfo_num_ops":{
+ "metric":"rpc.rpc.getHServerInfo_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getHTableDescriptors/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getHTableDescriptors.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getHTableDescriptors/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getHTableDescriptors.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getHTableDescriptors_avg_time":{
+ "metric":"rpc.rpc.getHTableDescriptors_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getHTableDescriptors_num_ops":{
+ "metric":"rpc.rpc.getHTableDescriptors_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getOnlineRegions_avg_time":{
+ "metric":"rpc.rpc.getOnlineRegions_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getOnlineRegions_num_ops":{
+ "metric":"rpc.rpc.getOnlineRegions_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolSignature/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getProtocolSignature.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolSignature/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getProtocolSignature.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolSignature_avg_time":{
+ "metric":"rpc.rpc.getProtocolSignature_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolSignature_num_ops":{
+ "metric":"rpc.rpc.getProtocolSignature_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolVersion/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getProtocolVersion.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolVersion/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getProtocolVersion.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolVersion_avg_time":{
+ "metric":"rpc.rpc.getProtocolVersion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolVersion_num_ops":{
+ "metric":"rpc.rpc.getProtocolVersion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getRegionInfo_avg_time":{
+ "metric":"rpc.rpc.getRegionInfo_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getRegionInfo_num_ops":{
+ "metric":"rpc.rpc.getRegionInfo_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/get_avg_time":{
+ "metric":"rpc.rpc.get_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/get_num_ops":{
+ "metric":"rpc.rpc.get_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/incrementColumnValue_avg_time":{
+ "metric":"rpc.rpc.incrementColumnValue_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/incrementColumnValue_num_ops":{
+ "metric":"rpc.rpc.incrementColumnValue_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/increment_avg_time":{
+ "metric":"rpc.rpc.increment_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/increment_num_ops":{
+ "metric":"rpc.rpc.increment_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isMasterRunning/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.isMasterRunning.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isMasterRunning/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.isMasterRunning.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isMasterRunning_avg_time":{
+ "metric":"rpc.rpc.isMasterRunning_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isMasterRunning_num_ops":{
+ "metric":"rpc.rpc.isMasterRunning_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/lockRow_avg_time":{
+ "metric":"rpc.rpc.lockRow_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/lockRow_num_ops":{
+ "metric":"rpc.rpc.lockRow_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/modifyColumn/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.modifyColumn.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/modifyColumn/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.modifyColumn.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/modifyColumn_avg_time":{
+ "metric":"rpc.rpc.modifyColumn_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/modifyColumn_num_ops":{
+ "metric":"rpc.rpc.modifyColumn_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/modifyTable/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.modifyTable.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/modifyTable/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.modifyTable.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/modifyTable_avg_time":{
+ "metric":"rpc.rpc.modifyTable_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/modifyTable_num_ops":{
+ "metric":"rpc.rpc.modifyTable_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/move/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.move.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/move/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.move.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/move_avg_time":{
+ "metric":"rpc.rpc.move_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/move_num_ops":{
+ "metric":"rpc.rpc.move_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/multi_avg_time":{
+ "metric":"rpc.rpc.multi_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/multi_num_ops":{
+ "metric":"rpc.rpc.multi_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/next_avg_time":{
+ "metric":"rpc.rpc.next_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/next_num_ops":{
+ "metric":"rpc.rpc.next_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/offline/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.offline.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/offline/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.offline.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/offline_avg_time":{
+ "metric":"rpc.rpc.offline_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/offline_num_ops":{
+ "metric":"rpc.rpc.offline_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegion_avg_time":{
+ "metric":"rpc.rpc.openRegion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegion_num_ops":{
+ "metric":"rpc.rpc.openRegion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegions_avg_time":{
+ "metric":"rpc.rpc.openRegions_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegions_num_ops":{
+ "metric":"rpc.rpc.openRegions_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openScanner_avg_time":{
+ "metric":"rpc.rpc.openScanner_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openScanner_num_ops":{
+ "metric":"rpc.rpc.openScanner_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/put_avg_time":{
+ "metric":"rpc.rpc.put_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/put_num_ops":{
+ "metric":"rpc.rpc.put_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/regionServerReport/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.regionServerReport.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/regionServerReport/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.regionServerReport.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/regionServerReport_avg_time":{
+ "metric":"rpc.rpc.regionServerReport_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/regionServerReport_num_ops":{
+ "metric":"rpc.rpc.regionServerReport_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/regionServerStartup/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.regionServerStartup.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/regionServerStartup/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.regionServerStartup.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/regionServerStartup_avg_time":{
+ "metric":"rpc.rpc.regionServerStartup_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/regionServerStartup_num_ops":{
+ "metric":"rpc.rpc.regionServerStartup_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/replicateLogEntries_avg_time":{
+ "metric":"rpc.rpc.replicateLogEntries_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/replicateLogEntries_num_ops":{
+ "metric":"rpc.rpc.replicateLogEntries_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/reportRSFatalError/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.reportRSFatalError.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/reportRSFatalError/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.reportRSFatalError.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/reportRSFatalError_avg_time":{
+ "metric":"rpc.rpc.reportRSFatalError_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/reportRSFatalError_num_ops":{
+ "metric":"rpc.rpc.reportRSFatalError_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rollHLogWriter_avg_time":{
+ "metric":"rpc.rpc.rollHLogWriter_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rollHLogWriter_num_ops":{
+ "metric":"rpc.rpc.rollHLogWriter_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rpcAuthenticationFailures":{
+ "metric":"rpc.rpc.rpcAuthenticationFailures",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpc/rpcAuthenticationSuccesses":{
+ "metric":"rpc.rpc.rpcAuthenticationSuccesses",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpc/rpcAuthorizationFailures":{
+ "metric":"rpc.rpc.rpcAuthorizationFailures",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpc/rpcAuthorizationSuccesses":{
+ "metric":"rpc.rpc.rpcAuthorizationSuccesses",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpc/shutdown/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.shutdown.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/shutdown/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.shutdown.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/shutdown_avg_time":{
+ "metric":"rpc.rpc.shutdown_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/shutdown_num_ops":{
+ "metric":"rpc.rpc.shutdown_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/splitRegion_avg_time":{
+ "metric":"rpc.rpc.splitRegion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/splitRegion_num_ops":{
+ "metric":"rpc.rpc.splitRegion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/stopMaster/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.stopMaster.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/stopMaster/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.stopMaster.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/stopMaster_avg_time":{
+ "metric":"rpc.rpc.stopMaster_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/stopMaster_num_ops":{
+ "metric":"rpc.rpc.stopMaster_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/stop_avg_time":{
+ "metric":"rpc.rpc.stop_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/stop_num_ops":{
+ "metric":"rpc.rpc.stop_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/unassign/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.unassign.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/unassign/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.unassign.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/unassign_avg_time":{
+ "metric":"rpc.rpc.unassign_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/unassign_num_ops":{
+ "metric":"rpc.rpc.unassign_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/unlockRow_avg_time":{
+ "metric":"rpc.rpc.unlockRow_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/unlockRow_num_ops":{
+ "metric":"rpc.rpc.unlockRow_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ }
+ },
+ "HBASE_CLIENT":{
+ "metrics/boottime":{
+ "metric":"boottime",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_aidle":{
+ "metric":"cpu_aidle",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_idle":{
+ "metric":"cpu_idle",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_nice":{
+ "metric":"cpu_nice",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_num":{
+ "metric":"cpu_num",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_speed":{
+ "metric":"cpu_speed",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_system":{
+ "metric":"cpu_system",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_user":{
+ "metric":"cpu_user",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_wio":{
+ "metric":"cpu_wio",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/disk/disk_free":{
+ "metric":"disk_free",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/disk/disk_total":{
+ "metric":"disk_total",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/disk/part_max_used":{
+ "metric":"part_max_used",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/gcCount":{
+ "metric":"jvm.JvmMetrics.gcCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/gcTimeMillis":{
+ "metric":"jvm.JvmMetrics.gcTimeMillis",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/logError":{
+ "metric":"jvm.JvmMetrics.logError",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/logFatal":{
+ "metric":"jvm.JvmMetrics.logFatal",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/logInfo":{
+ "metric":"jvm.JvmMetrics.logInfo",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/logWarn":{
+ "metric":"jvm.JvmMetrics.logWarn",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/maxMemoryM":{
+ "metric":"jvm.JvmMetrics.maxMemoryM",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/memHeapCommittedM":{
+ "metric":"jvm.JvmMetrics.memHeapCommittedM",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/memHeapUsedM":{
+ "metric":"jvm.JvmMetrics.memHeapUsedM",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/memNonHeapCommittedM":{
+ "metric":"jvm.JvmMetrics.memNonHeapCommittedM",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/memNonHeapUsedM":{
+ "metric":"jvm.JvmMetrics.memNonHeapUsedM",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsBlocked":{
+ "metric":"jvm.JvmMetrics.threadsBlocked",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsNew":{
+ "metric":"jvm.JvmMetrics.threadsNew",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsRunnable":{
+ "metric":"jvm.JvmMetrics.threadsRunnable",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsTerminated":{
+ "metric":"jvm.JvmMetrics.threadsTerminated",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsTimedWaiting":{
+ "metric":"jvm.JvmMetrics.threadsTimedWaiting",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsWaiting":{
+ "metric":"jvm.JvmMetrics.threadsWaiting",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/load/load_fifteen":{
+ "metric":"load_fifteen",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/load/load_five":{
+ "metric":"load_five",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/load/load_one":{
+ "metric":"load_one",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/mem_buffers":{
+ "metric":"mem_buffers",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/mem_cached":{
+ "metric":"mem_cached",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/mem_free":{
+ "metric":"mem_free",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/mem_shared":{
+ "metric":"mem_shared",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/mem_total":{
+ "metric":"mem_total",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/swap_free":{
+ "metric":"swap_free",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/swap_total":{
+ "metric":"swap_total",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/network/bytes_in":{
+ "metric":"bytes_in",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/network/bytes_out":{
+ "metric":"bytes_out",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/network/pkts_in":{
+ "metric":"pkts_in",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/network/pkts_out":{
+ "metric":"pkts_out",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/process/proc_run":{
+ "metric":"proc_run",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/process/proc_total":{
+ "metric":"proc_total",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/NumOpenConnections":{
+ "metric":"rpc.rpc.NumOpenConnections",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/ReceivedBytes":{
+ "metric":"rpc.rpc.ReceivedBytes",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/RpcProcessingTime_avg_time":{
+ "metric":"rpc.rpc.RpcProcessingTime_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/RpcProcessingTime_num_ops":{
+ "metric":"rpc.rpc.RpcProcessingTime_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/RpcQueueTime_avg_time":{
+ "metric":"rpc.rpc.RpcQueueTime_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/RpcQueueTime_num_ops":{
+ "metric":"rpc.rpc.RpcQueueTime_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/RpcSlowResponse_avg_time":{
+ "metric":"rpc.rpc.RpcSlowResponse_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/RpcSlowResponse_num_ops":{
+ "metric":"rpc.rpc.RpcSlowResponse_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/SentBytes":{
+ "metric":"rpc.rpc.SentBytes",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/abort/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.abort.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/abort/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.abort.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/abort_avg_time":{
+ "metric":"rpc.rpc.abort_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/abort_num_ops":{
+ "metric":"rpc.rpc.abort_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/addColumn_avg_time":{
+ "metric":"rpc.rpc.addColumn_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/addColumn_num_ops":{
+ "metric":"rpc.rpc.addColumn_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/addToOnlineRegions/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.addToOnlineRegions.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/addToOnlineRegions/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.addToOnlineRegions.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/addToOnlineRegions_avg_time":{
+ "metric":"rpc.rpc.addToOnlineRegions_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/addToOnlineRegions_num_ops":{
+ "metric":"rpc.rpc.addToOnlineRegions_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/assign_avg_time":{
+ "metric":"rpc.rpc.assign_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/assign_num_ops":{
+ "metric":"rpc.rpc.assign_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/balanceSwitch_avg_time":{
+ "metric":"rpc.rpc.balanceSwitch_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/balanceSwitch_num_ops":{
+ "metric":"rpc.rpc.balanceSwitch_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/balance_avg_time":{
+ "metric":"rpc.rpc.balance_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/balance_num_ops":{
+ "metric":"rpc.rpc.balance_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/bulkLoadHFiles/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.bulkLoadHFiles.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/bulkLoadHFiles/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.bulkLoadHFiles.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/bulkLoadHFiles_avg_time":{
+ "metric":"rpc.rpc.bulkLoadHFiles_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/bulkLoadHFiles_num_ops":{
+ "metric":"rpc.rpc.bulkLoadHFiles_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/callQueueLen":{
+ "metric":"rpc.rpc.callQueueLen",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndDelete/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.checkAndDelete.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndDelete/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.checkAndDelete.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndDelete_avg_time":{
+ "metric":"rpc.rpc.checkAndDelete_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndDelete_num_ops":{
+ "metric":"rpc.rpc.checkAndDelete_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndPut/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.checkAndPut.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndPut/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.checkAndPut.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndPut_avg_time":{
+ "metric":"rpc.rpc.checkAndPut_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndPut_num_ops":{
+ "metric":"rpc.rpc.checkAndPut_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkOOME/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.checkOOME.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkOOME/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.checkOOME.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkOOME_avg_time":{
+ "metric":"rpc.rpc.checkOOME_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkOOME_num_ops":{
+ "metric":"rpc.rpc.checkOOME_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/close/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.close.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/close/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.close.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/closeRegion/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.closeRegion.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/closeRegion/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.closeRegion.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/closeRegion_avg_time":{
+ "metric":"rpc.rpc.closeRegion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/closeRegion_num_ops":{
+ "metric":"rpc.rpc.closeRegion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/close_avg_time":{
+ "metric":"rpc.rpc.close_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/close_num_ops":{
+ "metric":"rpc.rpc.close_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/compactRegion/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.compactRegion.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/compactRegion/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.compactRegion.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/compactRegion_avg_time":{
+ "metric":"rpc.rpc.compactRegion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/compactRegion_num_ops":{
+ "metric":"rpc.rpc.compactRegion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/createTable_avg_time":{
+ "metric":"rpc.rpc.createTable_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/createTable_num_ops":{
+ "metric":"rpc.rpc.createTable_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/delete/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.delete.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/delete/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.delete.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/deleteColumn_avg_time":{
+ "metric":"rpc.rpc.deleteColumn_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/deleteColumn_num_ops":{
+ "metric":"rpc.rpc.deleteColumn_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/deleteTable_avg_time":{
+ "metric":"rpc.rpc.deleteTable_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/deleteTable_num_ops":{
+ "metric":"rpc.rpc.deleteTable_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/delete_avg_time":{
+ "metric":"rpc.rpc.delete_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/delete_num_ops":{
+ "metric":"rpc.rpc.delete_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/disableTable_avg_time":{
+ "metric":"rpc.rpc.disableTable_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/disableTable_num_ops":{
+ "metric":"rpc.rpc.disableTable_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/enableTable_avg_time":{
+ "metric":"rpc.rpc.enableTable_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/enableTable_num_ops":{
+ "metric":"rpc.rpc.enableTable_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/execCoprocessor/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.execCoprocessor.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/execCoprocessor/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.execCoprocessor.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/execCoprocessor_avg_time":{
+ "metric":"rpc.rpc.execCoprocessor_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/execCoprocessor_num_ops":{
+ "metric":"rpc.rpc.execCoprocessor_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/exists/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.exists.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/exists/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.exists.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/exists_avg_time":{
+ "metric":"rpc.rpc.exists_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/exists_num_ops":{
+ "metric":"rpc.rpc.exists_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/flushRegion/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.flushRegion.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/flushRegion/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.flushRegion.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/flushRegion_avg_time":{
+ "metric":"rpc.rpc.flushRegion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/flushRegion_num_ops":{
+ "metric":"rpc.rpc.flushRegion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/get/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.get.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/get/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.get.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getAlterStatus_avg_time":{
+ "metric":"rpc.rpc.getAlterStatus_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getAlterStatus_num_ops":{
+ "metric":"rpc.rpc.getAlterStatus_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getBlockCacheColumnFamilySummaries/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getBlockCacheColumnFamilySummaries/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getBlockCacheColumnFamilySummaries_avg_time":{
+ "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getBlockCacheColumnFamilySummaries_num_ops":{
+ "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getCatalogTracker/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getCatalogTracker.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getCatalogTracker/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getCatalogTracker.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getCatalogTracker_avg_time":{
+ "metric":"rpc.rpc.getCatalogTracker_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getCatalogTracker_num_ops":{
+ "metric":"rpc.rpc.getCatalogTracker_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getClosestRowBefore/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getClosestRowBefore.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getClosestRowBefore/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getClosestRowBefore.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getClosestRowBefore_avg_time":{
+ "metric":"rpc.rpc.getClosestRowBefore_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getClosestRowBefore_num_ops":{
+ "metric":"rpc.rpc.getClosestRowBefore_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getClusterStatus_avg_time":{
+ "metric":"rpc.rpc.getClusterStatus_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getClusterStatus_num_ops":{
+ "metric":"rpc.rpc.getClusterStatus_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getConfiguration/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getConfiguration.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getConfiguration/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getConfiguration.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getConfiguration_avg_time":{
+ "metric":"rpc.rpc.getConfiguration_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getConfiguration_num_ops":{
+ "metric":"rpc.rpc.getConfiguration_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getFromOnlineRegions/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getFromOnlineRegions.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getFromOnlineRegions/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getFromOnlineRegions.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getFromOnlineRegions_avg_time":{
+ "metric":"rpc.rpc.getFromOnlineRegions_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getFromOnlineRegions_num_ops":{
+ "metric":"rpc.rpc.getFromOnlineRegions_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getHServerInfo/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getHServerInfo.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getHServerInfo/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getHServerInfo.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getHServerInfo_avg_time":{
+ "metric":"rpc.rpc.getHServerInfo_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getHServerInfo_num_ops":{
+ "metric":"rpc.rpc.getHServerInfo_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getHTableDescriptors_avg_time":{
+ "metric":"rpc.rpc.getHTableDescriptors_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getHTableDescriptors_num_ops":{
+ "metric":"rpc.rpc.getHTableDescriptors_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getOnlineRegions/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getOnlineRegions.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getOnlineRegions/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getOnlineRegions.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getOnlineRegions_avg_time":{
+ "metric":"rpc.rpc.getOnlineRegions_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getOnlineRegions_num_ops":{
+ "metric":"rpc.rpc.getOnlineRegions_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolSignature/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getProtocolSignature.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolSignature/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getProtocolSignature.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolSignature_avg_time":{
+ "metric":"rpc.rpc.getProtocolSignature_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolSignature_num_ops":{
+ "metric":"rpc.rpc.getProtocolSignature_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolVersion/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getProtocolVersion.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolVersion/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getProtocolVersion.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolVersion_avg_time":{
+ "metric":"rpc.rpc.getProtocolVersion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolVersion_num_ops":{
+ "metric":"rpc.rpc.getProtocolVersion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getRegionInfo/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getRegionInfo.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getRegionInfo/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getRegionInfo.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getRegionInfo_avg_time":{
+ "metric":"rpc.rpc.getRegionInfo_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getRegionInfo_num_ops":{
+ "metric":"rpc.rpc.getRegionInfo_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getServerName/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getServerName.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getServerName/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getServerName.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getServerName_avg_time":{
+ "metric":"rpc.rpc.getServerName_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getServerName_num_ops":{
+ "metric":"rpc.rpc.getServerName_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getZooKeeper/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getZooKeeper.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getZooKeeper/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getZooKeeper.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getZooKeeper_avg_time":{
+ "metric":"rpc.rpc.getZooKeeper_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getZooKeeper_num_ops":{
+ "metric":"rpc.rpc.getZooKeeper_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/get_avg_time":{
+ "metric":"rpc.rpc.get_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/get_num_ops":{
+ "metric":"rpc.rpc.get_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/increment/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.increment.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/increment/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.increment.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/incrementColumnValue/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.incrementColumnValue.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/incrementColumnValue/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.incrementColumnValue.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/incrementColumnValue_avg_time":{
+ "metric":"rpc.rpc.incrementColumnValue_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/incrementColumnValue_num_ops":{
+ "metric":"rpc.rpc.incrementColumnValue_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/increment_avg_time":{
+ "metric":"rpc.rpc.increment_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/increment_num_ops":{
+ "metric":"rpc.rpc.increment_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isAborted/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.isAborted.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isAborted/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.isAborted.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isAborted_avg_time":{
+ "metric":"rpc.rpc.isAborted_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isAborted_num_ops":{
+ "metric":"rpc.rpc.isAborted_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isMasterRunning_avg_time":{
+ "metric":"rpc.rpc.isMasterRunning_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isMasterRunning_num_ops":{
+ "metric":"rpc.rpc.isMasterRunning_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isStopped/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.isStopped.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isStopped/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.isStopped.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isStopped_avg_time":{
+ "metric":"rpc.rpc.isStopped_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isStopped_num_ops":{
+ "metric":"rpc.rpc.isStopped_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/lockRow/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.lockRow.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/lockRow/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.lockRow.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/lockRow_avg_time":{
+ "metric":"rpc.rpc.lockRow_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/lockRow_num_ops":{
+ "metric":"rpc.rpc.lockRow_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/modifyColumn_avg_time":{
+ "metric":"rpc.rpc.modifyColumn_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/modifyColumn_num_ops":{
+ "metric":"rpc.rpc.modifyColumn_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/modifyTable_avg_time":{
+ "metric":"rpc.rpc.modifyTable_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/modifyTable_num_ops":{
+ "metric":"rpc.rpc.modifyTable_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/move_avg_time":{
+ "metric":"rpc.rpc.move_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/move_num_ops":{
+ "metric":"rpc.rpc.move_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/multi/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.multi.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/multi/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.multi.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/multi_avg_time":{
+ "metric":"rpc.rpc.multi_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/multi_num_ops":{
+ "metric":"rpc.rpc.multi_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/next/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.next.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/next/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.next.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/next_avg_time":{
+ "metric":"rpc.rpc.next_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/next_num_ops":{
+ "metric":"rpc.rpc.next_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/offline_avg_time":{
+ "metric":"rpc.rpc.offline_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/offline_num_ops":{
+ "metric":"rpc.rpc.offline_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegion/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.openRegion.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegion/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.openRegion.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegion_avg_time":{
+ "metric":"rpc.rpc.openRegion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegion_num_ops":{
+ "metric":"rpc.rpc.openRegion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegions/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.openRegions.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegions/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.openRegions.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegions_avg_time":{
+ "metric":"rpc.rpc.openRegions_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegions_num_ops":{
+ "metric":"rpc.rpc.openRegions_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openScanner/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.openScanner.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openScanner/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.openScanner.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openScanner_avg_time":{
+ "metric":"rpc.rpc.openScanner_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openScanner_num_ops":{
+ "metric":"rpc.rpc.openScanner_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/put/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.put.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/put/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.put.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/put_avg_time":{
+ "metric":"rpc.rpc.put_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/put_num_ops":{
+ "metric":"rpc.rpc.put_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/regionServerReport_avg_time":{
+ "metric":"rpc.rpc.regionServerReport_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/regionServerReport_num_ops":{
+ "metric":"rpc.rpc.regionServerReport_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/regionServerStartup_avg_time":{
+ "metric":"rpc.rpc.regionServerStartup_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/regionServerStartup_num_ops":{
+ "metric":"rpc.rpc.regionServerStartup_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/removeFromOnlineRegions/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.removeFromOnlineRegions.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/removeFromOnlineRegions/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.removeFromOnlineRegions.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/removeFromOnlineRegions_avg_time":{
+ "metric":"rpc.rpc.removeFromOnlineRegions_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/removeFromOnlineRegions_num_ops":{
+ "metric":"rpc.rpc.removeFromOnlineRegions_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/replicateLogEntries/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.replicateLogEntries.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/replicateLogEntries/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.replicateLogEntries.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/replicateLogEntries_avg_time":{
+ "metric":"rpc.rpc.replicateLogEntries_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/replicateLogEntries_num_ops":{
+ "metric":"rpc.rpc.replicateLogEntries_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/reportRSFatalError_avg_time":{
+ "metric":"rpc.rpc.reportRSFatalError_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/reportRSFatalError_num_ops":{
+ "metric":"rpc.rpc.reportRSFatalError_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rollHLogWriter/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.rollHLogWriter.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rollHLogWriter/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.rollHLogWriter.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rollHLogWriter_avg_time":{
+ "metric":"rpc.rpc.rollHLogWriter_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rollHLogWriter_num_ops":{
+ "metric":"rpc.rpc.rollHLogWriter_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rpcAuthenticationFailures":{
+ "metric":"rpc.rpc.rpcAuthenticationFailures",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rpcAuthenticationSuccesses":{
+ "metric":"rpc.rpc.rpcAuthenticationSuccesses",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rpcAuthorizationFailures":{
+ "metric":"rpc.rpc.rpcAuthorizationFailures",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rpcAuthorizationSuccesses":{
+ "metric":"rpc.rpc.rpcAuthorizationSuccesses",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/shutdown_avg_time":{
+ "metric":"rpc.rpc.shutdown_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/shutdown_num_ops":{
+ "metric":"rpc.rpc.shutdown_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/splitRegion/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.splitRegion.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/splitRegion/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.splitRegion.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/splitRegion_avg_time":{
+ "metric":"rpc.rpc.splitRegion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/splitRegion_num_ops":{
+ "metric":"rpc.rpc.splitRegion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/stop/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.stop.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/stop/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.stop.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/stopMaster_avg_time":{
+ "metric":"rpc.rpc.stopMaster_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/stopMaster_num_ops":{
+ "metric":"rpc.rpc.stopMaster_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/stop_avg_time":{
+ "metric":"rpc.rpc.stop_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/stop_num_ops":{
+ "metric":"rpc.rpc.stop_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/unassign_avg_time":{
+ "metric":"rpc.rpc.unassign_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/unassign_num_ops":{
+ "metric":"rpc.rpc.unassign_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/unlockRow/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.unlockRow.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/unlockRow/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.unlockRow.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/unlockRow_avg_time":{
+ "metric":"rpc.rpc.unlockRow_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/unlockRow_num_ops":{
+ "metric":"rpc.rpc.unlockRow_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/ugi/loginFailure_avg_time":{
+ "metric":"ugi.ugi.loginFailure_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/ugi/loginFailure_num_ops":{
+ "metric":"ugi.ugi.loginFailure_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/ugi/loginSuccess_avg_time":{
+ "metric":"ugi.ugi.loginSuccess_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/ugi/loginSuccess_num_ops":{
+ "metric":"ugi.ugi.loginSuccess_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ }
+ },
+ "HBASE_REGIONSERVER":{
+ "metrics/hbase/regionserver/mutationsWithoutWALSize": {
+ "metric": "regionserver.Server.mutationsWithoutWALSize",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/hbase/regionserver/slowAppendCount": {
+ "metric": "regionserver.Server.slowAppendCount",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/hbase/regionserver/slowIncrementCount": {
+ "metric": "regionserver.Server.slowIncrementCount",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/hbase/regionserver/slowPutCount": {
+ "metric": "regionserver.Server.slowPutCount",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/hbase/regionserver/mutationsWithoutWALCount": {
+ "metric": "regionserver.Server.mutationsWithoutWALCount",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/hbase/regionserver/percentFilesLocal": {
+ "metric": "regionserver.Server.percentFilesLocal",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/hbase/regionserver/slowGetCount": {
+ "metric": "regionserver.Server.slowGetCount",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/hbase/regionserver/slowDeleteCount": {
+ "metric": "regionserver.Server.slowDeleteCount",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/boottime":{
+ "metric":"boottime",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_aidle":{
+ "metric":"cpu_aidle",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_idle":{
+ "metric":"cpu_idle",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_nice":{
+ "metric":"cpu_nice",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_num":{
+ "metric":"cpu_num",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_speed":{
+ "metric":"cpu_speed",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_system":{
+ "metric":"cpu_system",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_user":{
+ "metric":"cpu_user",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_wio":{
+ "metric":"cpu_wio",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/disk/disk_free":{
+ "metric":"disk_free",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/disk/disk_total":{
+ "metric":"disk_total",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/disk/part_max_used":{
+ "metric":"part_max_used",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/blockCacheCount":{
+ "metric":"regionserver.Server.blockCacheCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/blockCacheEvictedCount":{
+ "metric":"regionserver.Server.blockCacheEvictedCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/blockCacheFree":{
+ "metric":"regionserver.Server.blockCacheFree",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/blockCacheHitCachingRatio":{
+ "metric":"regionserver.Server.blockCacheHitCachingRatio",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/blockCacheHitCount":{
+ "metric":"regionserver.Server.blockCacheHitCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/blockCacheHitRatio":{
+ "metric":"regionserver.Server.blockCacheHitRatio",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/blockCacheMissCount":{
+ "metric":"regionserver.Server.blockCacheMissCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/blockCacheSize":{
+ "metric":"regionserver.Server.blockCacheSize",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/compactionQueueSize":{
+ "metric":"regionserver.Server.compactionQueueLength",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/compactionSize_avg_time":{
+ "metric":"regionserver.Server.compactionSize_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/compactionSize_num_ops":{
+ "metric":"regionserver.Server.compactionSize_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/compactionTime_avg_time":{
+ "metric":"regionserver.Server.compactionTime_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/compactionTime_num_ops":{
+ "metric":"regionserver.Server.compactionTime_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/deleteRequestLatency_75th_percentile":{
+ "metric":"regionserver.Server.deleteRequestLatency_75th_percentile",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/deleteRequestLatency_95th_percentile":{
+ "metric":"regionserver.Server.deleteRequestLatency_95th_percentile",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/deleteRequestLatency_99th_percentile":{
+ "metric":"regionserver.Server.deleteRequestLatency_99th_percentile",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/deleteRequestLatency_max":{
+ "metric":"regionserver.Server.deleteRequestLatency_max",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/deleteRequestLatency_mean":{
+ "metric":"regionserver.Server.deleteRequestLatency_mean",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/deleteRequestLatency_median":{
+ "metric":"regionserver.Server.deleteRequestLatency_median",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/deleteRequestLatency_min":{
+ "metric":"regionserver.Server.deleteRequestLatency_min",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/deleteRequestLatency_num_ops":{
+ "metric":"regionserver.Server.deleteRequestLatency_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/deleteRequestLatency_std_dev":{
+ "metric":"regionserver.Server.deleteRequestLatency_std_dev",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/flushQueueSize":{
+ "metric":"regionserver.Server.flushQueueLength",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/flushSize_avg_time":{
+ "metric":"regionserver.Server.flushSize_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/flushSize_num_ops":{
+ "metric":"regionserver.Server.flushSize_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/flushTime_avg_time":{
+ "metric":"regionserver.Server.flushTime_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/flushTime_num_ops":{
+ "metric":"regionserver.Server.flushTime_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/fsReadLatencyHistogram_75th_percentile":{
+ "metric":"regionserver.Server.fsReadLatencyHistogram_75th_percentile",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/fsReadLatencyHistogram_95th_percentile":{
+ "metric":"regionserver.Server.fsReadLatencyHistogram_95th_percentile",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/fsReadLatencyHistogram_99th_percentile":{
+ "metric":"regionserver.Server.fsReadLatencyHistogram_99th_percentile",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/fsReadLatencyHistogram_max":{
+ "metric":"regionserver.Server.fsReadLatencyHistogram_max",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/fsReadLatencyHistogram_mean":{
+ "metric":"regionserver.Server.fsReadLatencyHistogram_mean",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/fsReadLatencyHistogram_median":{
+ "metric":"regionserver.Server.fsReadLatencyHistogram_median",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/fsReadLatencyHistogram_min":{
+ "metric":"regionserver.Server.fsReadLatencyHistogram_min",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/fsReadLatencyHistogram_num_ops":{
+ "metric":"regionserver.Server.fsReadLatencyHistogram_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/fsReadLatencyHistogram_std_dev":{
+ "metric":"regionserver.Server.fsReadLatencyHistogram_std_dev",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/fsReadLatency_avg_time":{
+ "metric":"regionserver.Server.fsReadLatency_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/fsReadLatency_num_ops":{
+ "metric":"regionserver.Server.fsReadLatency_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/fsSyncLatency_avg_time":{
+ "metric":"regionserver.Server.fsSyncLatency_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/fsSyncLatency_num_ops":{
+ "metric":"regionserver.Server.fsSyncLatency_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/fsWriteLatencyHistogram_75th_percentile":{
+ "metric":"regionserver.Server.fsWriteLatencyHistogram_75th_percentile",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/fsWriteLatencyHistogram_95th_percentile":{
+ "metric":"regionserver.Server.fsWriteLatencyHistogram_95th_percentile",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/fsWriteLatencyHistogram_99th_percentile":{
+ "metric":"regionserver.Server.fsWriteLatencyHistogram_99th_percentile",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/fsWriteLatencyHistogram_max":{
+ "metric":"regionserver.Server.fsWriteLatencyHistogram_max",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/fsWriteLatencyHistogram_mean":{
+ "metric":"regionserver.Server.fsWriteLatencyHistogram_mean",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/fsWriteLatencyHistogram_median":{
+ "metric":"regionserver.Server.fsWriteLatencyHistogram_median",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/fsWriteLatencyHistogram_min":{
+ "metric":"regionserver.Server.fsWriteLatencyHistogram_min",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/fsWriteLatencyHistogram_num_ops":{
+ "metric":"regionserver.Server.fsWriteLatencyHistogram_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/fsWriteLatencyHistogram_std_dev":{
+ "metric":"regionserver.Server.fsWriteLatencyHistogram_std_dev",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/fsWriteLatency_avg_time":{
+ "metric":"regionserver.Server.fsWriteLatency_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/fsWriteLatency_num_ops":{
+ "metric":"regionserver.Server.fsWriteLatency_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/getRequestLatency_75th_percentile":{
+ "metric":"regionserver.Server.getRequestLatency_75th_percentile",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/getRequestLatency_95th_percentile":{
+ "metric":"regionserver.Server.getRequestLatency_95th_percentile",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/getRequestLatency_99th_percentile":{
+ "metric":"regionserver.Server.getRequestLatency_99th_percentile",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/getRequestLatency_max":{
+ "metric":"regionserver.Server.getRequestLatency_max",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/getRequestLatency_mean":{
+ "metric":"regionserver.Server.getRequestLatency_mean",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/getRequestLatency_median":{
+ "metric":"regionserver.Server.getRequestLatency_median",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/getRequestLatency_min":{
+ "metric":"regionserver.Server.getRequestLatency_min",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/getRequestLatency_num_ops":{
+ "metric":"regionserver.Server.getRequestLatency_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/getRequestLatency_std_dev":{
+ "metric":"regionserver.Server.getRequestLatency_std_dev",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/hdfsBlocksLocalityIndex":{
+ "metric":"regionserver.Server.hdfsBlocksLocalityIndex",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/hlogFileCount":{
+ "metric":"regionserver.Server.hlogFileCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/memstoreSizeMB":{
+ "metric":"regionserver.Server.memstoreSizeMB",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/putRequestLatency_75th_percentile":{
+ "metric":"regionserver.Server.putRequestLatency_75th_percentile",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/putRequestLatency_95th_percentile":{
+ "metric":"regionserver.Server.putRequestLatency_95th_percentile",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/putRequestLatency_99th_percentile":{
+ "metric":"regionserver.Server.putRequestLatency_99th_percentile",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/putRequestLatency_max":{
+ "metric":"regionserver.Server.putRequestLatency_max",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/putRequestLatency_mean":{
+ "metric":"regionserver.Server.putRequestLatency_mean",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/putRequestLatency_median":{
+ "metric":"regionserver.Server.putRequestLatency_median",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/putRequestLatency_min":{
+ "metric":"regionserver.Server.putRequestLatency_min",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/putRequestLatency_num_ops":{
+ "metric":"regionserver.Server.putRequestLatency_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/putRequestLatency_std_dev":{
+ "metric":"regionserver.Server.putRequestLatency_std_dev",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/readRequestsCount":{
+ "metric":"regionserver.Server.readRequestCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/regions":{
+ "metric":"regionserver.Server.regionCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/requests":{
+ "metric":"regionserver.Server.requests",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/rootIndexSizeKB":{
+ "metric":"regionserver.Server.rootIndexSizeKB",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/storefileIndexSizeMB":{
+ "metric":"regionserver.Server.storefileIndexSizeMB",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/storefiles":{
+ "metric":"regionserver.Server.storefiles",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/stores":{
+ "metric":"regionserver.Server.stores",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/totalStaticBloomSizeKB":{
+ "metric":"regionserver.Server.totalStaticBloomSizeKB",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/totalStaticIndexSizeKB":{
+ "metric":"regionserver.Server.totalStaticIndexSizeKB",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/hbase/regionserver/writeRequestsCount":{
+ "metric":"regionserver.Server.writeRequestCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/gcCount":{
+ "metric":"jvm.JvmMetrics.gcCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/gcTimeMillis":{
+ "metric":"jvm.JvmMetrics.gcTimeMillis",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/logError":{
+ "metric":"jvm.JvmMetrics.logError",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/logFatal":{
+ "metric":"jvm.JvmMetrics.logFatal",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/logInfo":{
+ "metric":"jvm.JvmMetrics.logInfo",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/logWarn":{
+ "metric":"jvm.JvmMetrics.logWarn",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/maxMemoryM":{
+ "metric":"jvm.JvmMetrics.maxMemoryM",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/memHeapCommittedM":{
+ "metric":"jvm.JvmMetrics.memHeapCommittedM",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/memHeapUsedM":{
+ "metric":"jvm.JvmMetrics.memHeapUsedM",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/memNonHeapCommittedM":{
+ "metric":"jvm.JvmMetrics.memNonHeapCommittedM",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/memNonHeapUsedM":{
+ "metric":"jvm.JvmMetrics.memNonHeapUsedM",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsBlocked":{
+ "metric":"jvm.JvmMetrics.threadsBlocked",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsNew":{
+ "metric":"jvm.JvmMetrics.threadsNew",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsRunnable":{
+ "metric":"jvm.JvmMetrics.threadsRunnable",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsTerminated":{
+ "metric":"jvm.JvmMetrics.threadsTerminated",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsTimedWaiting":{
+ "metric":"jvm.JvmMetrics.threadsTimedWaiting",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsWaiting":{
+ "metric":"jvm.JvmMetrics.threadsWaiting",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/load/load_fifteen":{
+ "metric":"load_fifteen",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/load/load_five":{
+ "metric":"load_five",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/load/load_one":{
+ "metric":"load_one",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/mem_buffers":{
+ "metric":"mem_buffers",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/mem_cached":{
+ "metric":"mem_cached",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/mem_free":{
+ "metric":"mem_free",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/mem_shared":{
+ "metric":"mem_shared",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/mem_total":{
+ "metric":"mem_total",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/swap_free":{
+ "metric":"swap_free",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/memory/swap_total":{
+ "metric":"swap_total",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/network/bytes_in":{
+ "metric":"bytes_in",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/network/bytes_out":{
+ "metric":"bytes_out",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/network/pkts_in":{
+ "metric":"pkts_in",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/network/pkts_out":{
+ "metric":"pkts_out",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/process/proc_run":{
+ "metric":"proc_run",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/process/proc_total":{
+ "metric":"proc_total",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/NumOpenConnections":{
+ "metric":"rpc.rpc.NumOpenConnections",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/ReceivedBytes":{
+ "metric":"rpc.rpc.ReceivedBytes",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/RpcProcessingTime_avg_time":{
+ "metric":"rpc.rpc.RpcProcessingTime_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/RpcProcessingTime_num_ops":{
+ "metric":"rpc.rpc.RpcProcessingTime_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/RpcQueueTime_avg_time":{
+ "metric":"rpc.rpc.RpcQueueTime_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/RpcQueueTime_num_ops":{
+ "metric":"rpc.rpc.RpcQueueTime_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/RpcSlowResponse_avg_time":{
+ "metric":"rpc.rpc.RpcSlowResponse_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/RpcSlowResponse_num_ops":{
+ "metric":"rpc.rpc.RpcSlowResponse_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/SentBytes":{
+ "metric":"rpc.rpc.SentBytes",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/abort/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.abort.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/abort/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.abort.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/abort_avg_time":{
+ "metric":"rpc.rpc.abort_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/abort_num_ops":{
+ "metric":"rpc.rpc.abort_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/addColumn_avg_time":{
+ "metric":"rpc.rpc.addColumn_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/addColumn_num_ops":{
+ "metric":"rpc.rpc.addColumn_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/addToOnlineRegions/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.addToOnlineRegions.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/addToOnlineRegions/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.addToOnlineRegions.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/addToOnlineRegions_avg_time":{
+ "metric":"rpc.rpc.addToOnlineRegions_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/addToOnlineRegions_num_ops":{
+ "metric":"rpc.rpc.addToOnlineRegions_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/assign_avg_time":{
+ "metric":"rpc.rpc.assign_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/assign_num_ops":{
+ "metric":"rpc.rpc.assign_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/balanceSwitch_avg_time":{
+ "metric":"rpc.rpc.balanceSwitch_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/balanceSwitch_num_ops":{
+ "metric":"rpc.rpc.balanceSwitch_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/balance_avg_time":{
+ "metric":"rpc.rpc.balance_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/balance_num_ops":{
+ "metric":"rpc.rpc.balance_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/bulkLoadHFiles/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.bulkLoadHFiles.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/bulkLoadHFiles/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.bulkLoadHFiles.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/bulkLoadHFiles_avg_time":{
+ "metric":"rpc.rpc.bulkLoadHFiles_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/bulkLoadHFiles_num_ops":{
+ "metric":"rpc.rpc.bulkLoadHFiles_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/callQueueLen":{
+ "metric":"rpc.rpc.callQueueLen",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndDelete/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.checkAndDelete.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndDelete/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.checkAndDelete.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndDelete_avg_time":{
+ "metric":"rpc.rpc.checkAndDelete_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndDelete_num_ops":{
+ "metric":"rpc.rpc.checkAndDelete_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndPut/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.checkAndPut.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndPut/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.checkAndPut.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndPut_avg_time":{
+ "metric":"rpc.rpc.checkAndPut_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkAndPut_num_ops":{
+ "metric":"rpc.rpc.checkAndPut_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkOOME/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.checkOOME.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkOOME/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.checkOOME.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkOOME_avg_time":{
+ "metric":"rpc.rpc.checkOOME_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/checkOOME_num_ops":{
+ "metric":"rpc.rpc.checkOOME_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/close/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.close.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/close/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.close.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/closeRegion/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.closeRegion.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/closeRegion/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.closeRegion.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/closeRegion_avg_time":{
+ "metric":"rpc.rpc.closeRegion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/closeRegion_num_ops":{
+ "metric":"rpc.rpc.closeRegion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/close_avg_time":{
+ "metric":"rpc.rpc.close_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/close_num_ops":{
+ "metric":"rpc.rpc.close_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/compactRegion/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.compactRegion.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/compactRegion/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.compactRegion.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/compactRegion_avg_time":{
+ "metric":"rpc.rpc.compactRegion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/compactRegion_num_ops":{
+ "metric":"rpc.rpc.compactRegion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/createTable_avg_time":{
+ "metric":"rpc.rpc.createTable_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/createTable_num_ops":{
+ "metric":"rpc.rpc.createTable_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/delete/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.delete.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/delete/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.delete.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/deleteColumn_avg_time":{
+ "metric":"rpc.rpc.deleteColumn_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/deleteColumn_num_ops":{
+ "metric":"rpc.rpc.deleteColumn_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/deleteTable_avg_time":{
+ "metric":"rpc.rpc.deleteTable_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/deleteTable_num_ops":{
+ "metric":"rpc.rpc.deleteTable_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/delete_avg_time":{
+ "metric":"rpc.rpc.delete_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/delete_num_ops":{
+ "metric":"rpc.rpc.delete_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/disableTable_avg_time":{
+ "metric":"rpc.rpc.disableTable_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/disableTable_num_ops":{
+ "metric":"rpc.rpc.disableTable_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/enableTable_avg_time":{
+ "metric":"rpc.rpc.enableTable_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/enableTable_num_ops":{
+ "metric":"rpc.rpc.enableTable_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/execCoprocessor/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.execCoprocessor.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/execCoprocessor/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.execCoprocessor.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/execCoprocessor_avg_time":{
+ "metric":"rpc.rpc.execCoprocessor_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/execCoprocessor_num_ops":{
+ "metric":"rpc.rpc.execCoprocessor_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/exists/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.exists.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/exists/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.exists.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/exists_avg_time":{
+ "metric":"rpc.rpc.exists_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/exists_num_ops":{
+ "metric":"rpc.rpc.exists_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/flushRegion/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.flushRegion.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/flushRegion/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.flushRegion.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/flushRegion_avg_time":{
+ "metric":"rpc.rpc.flushRegion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/flushRegion_num_ops":{
+ "metric":"rpc.rpc.flushRegion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/get/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.get.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/get/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.get.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getAlterStatus_avg_time":{
+ "metric":"rpc.rpc.getAlterStatus_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getAlterStatus_num_ops":{
+ "metric":"rpc.rpc.getAlterStatus_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getBlockCacheColumnFamilySummaries/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getBlockCacheColumnFamilySummaries/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getBlockCacheColumnFamilySummaries_avg_time":{
+ "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getBlockCacheColumnFamilySummaries_num_ops":{
+ "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getCatalogTracker/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getCatalogTracker.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getCatalogTracker/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getCatalogTracker.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getCatalogTracker_avg_time":{
+ "metric":"rpc.rpc.getCatalogTracker_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getCatalogTracker_num_ops":{
+ "metric":"rpc.rpc.getCatalogTracker_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getClosestRowBefore/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getClosestRowBefore.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getClosestRowBefore/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getClosestRowBefore.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getClosestRowBefore_avg_time":{
+ "metric":"rpc.rpc.getClosestRowBefore_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getClosestRowBefore_num_ops":{
+ "metric":"rpc.rpc.getClosestRowBefore_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getClusterStatus_avg_time":{
+ "metric":"rpc.rpc.getClusterStatus_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getClusterStatus_num_ops":{
+ "metric":"rpc.rpc.getClusterStatus_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getConfiguration/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getConfiguration.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getConfiguration/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getConfiguration.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getConfiguration_avg_time":{
+ "metric":"rpc.rpc.getConfiguration_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getConfiguration_num_ops":{
+ "metric":"rpc.rpc.getConfiguration_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getFromOnlineRegions/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getFromOnlineRegions.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getFromOnlineRegions/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getFromOnlineRegions.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getFromOnlineRegions_avg_time":{
+ "metric":"rpc.rpc.getFromOnlineRegions_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getFromOnlineRegions_num_ops":{
+ "metric":"rpc.rpc.getFromOnlineRegions_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getHServerInfo/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getHServerInfo.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getHServerInfo/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getHServerInfo.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getHServerInfo_avg_time":{
+ "metric":"rpc.rpc.getHServerInfo_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getHServerInfo_num_ops":{
+ "metric":"rpc.rpc.getHServerInfo_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getHTableDescriptors_avg_time":{
+ "metric":"rpc.rpc.getHTableDescriptors_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getHTableDescriptors_num_ops":{
+ "metric":"rpc.rpc.getHTableDescriptors_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getOnlineRegions/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getOnlineRegions.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getOnlineRegions/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getOnlineRegions.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getOnlineRegions_avg_time":{
+ "metric":"rpc.rpc.getOnlineRegions_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getOnlineRegions_num_ops":{
+ "metric":"rpc.rpc.getOnlineRegions_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolSignature/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getProtocolSignature.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolSignature/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getProtocolSignature.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolSignature_avg_time":{
+ "metric":"rpc.rpc.getProtocolSignature_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolSignature_num_ops":{
+ "metric":"rpc.rpc.getProtocolSignature_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolVersion/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getProtocolVersion.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolVersion/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getProtocolVersion.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolVersion_avg_time":{
+ "metric":"rpc.rpc.getProtocolVersion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getProtocolVersion_num_ops":{
+ "metric":"rpc.rpc.getProtocolVersion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getRegionInfo/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getRegionInfo.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getRegionInfo/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getRegionInfo.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getRegionInfo_avg_time":{
+ "metric":"rpc.rpc.getRegionInfo_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getRegionInfo_num_ops":{
+ "metric":"rpc.rpc.getRegionInfo_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getServerName/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getServerName.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getServerName/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getServerName.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getServerName_avg_time":{
+ "metric":"rpc.rpc.getServerName_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getServerName_num_ops":{
+ "metric":"rpc.rpc.getServerName_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getZooKeeper/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.getZooKeeper.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getZooKeeper/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.getZooKeeper.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getZooKeeper_avg_time":{
+ "metric":"rpc.rpc.getZooKeeper_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/getZooKeeper_num_ops":{
+ "metric":"rpc.rpc.getZooKeeper_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/get_avg_time":{
+ "metric":"rpc.rpc.get_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/get_num_ops":{
+ "metric":"rpc.rpc.get_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/increment/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.increment.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/increment/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.increment.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/incrementColumnValue/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.incrementColumnValue.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/incrementColumnValue/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.incrementColumnValue.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/incrementColumnValue_avg_time":{
+ "metric":"rpc.rpc.incrementColumnValue_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/incrementColumnValue_num_ops":{
+ "metric":"rpc.rpc.incrementColumnValue_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/increment_avg_time":{
+ "metric":"rpc.rpc.increment_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/increment_num_ops":{
+ "metric":"rpc.rpc.increment_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isAborted/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.isAborted.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isAborted/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.isAborted.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isAborted_avg_time":{
+ "metric":"rpc.rpc.isAborted_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isAborted_num_ops":{
+ "metric":"rpc.rpc.isAborted_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isMasterRunning_avg_time":{
+ "metric":"rpc.rpc.isMasterRunning_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isMasterRunning_num_ops":{
+ "metric":"rpc.rpc.isMasterRunning_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isStopped/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.isStopped.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isStopped/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.isStopped.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isStopped_avg_time":{
+ "metric":"rpc.rpc.isStopped_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/isStopped_num_ops":{
+ "metric":"rpc.rpc.isStopped_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/lockRow/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.lockRow.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/lockRow/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.lockRow.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/lockRow_avg_time":{
+ "metric":"rpc.rpc.lockRow_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/lockRow_num_ops":{
+ "metric":"rpc.rpc.lockRow_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/modifyColumn_avg_time":{
+ "metric":"rpc.rpc.modifyColumn_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/modifyColumn_num_ops":{
+ "metric":"rpc.rpc.modifyColumn_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/modifyTable_avg_time":{
+ "metric":"rpc.rpc.modifyTable_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/modifyTable_num_ops":{
+ "metric":"rpc.rpc.modifyTable_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/move_avg_time":{
+ "metric":"rpc.rpc.move_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/move_num_ops":{
+ "metric":"rpc.rpc.move_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/multi/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.multi.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/multi/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.multi.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/multi_avg_time":{
+ "metric":"rpc.rpc.multi_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/multi_num_ops":{
+ "metric":"rpc.rpc.multi_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/next/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.next.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/next/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.next.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/next_avg_time":{
+ "metric":"rpc.rpc.next_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/next_num_ops":{
+ "metric":"rpc.rpc.next_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/offline_avg_time":{
+ "metric":"rpc.rpc.offline_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/offline_num_ops":{
+ "metric":"rpc.rpc.offline_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegion/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.openRegion.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegion/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.openRegion.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegion_avg_time":{
+ "metric":"rpc.rpc.openRegion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegion_num_ops":{
+ "metric":"rpc.rpc.openRegion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegions/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.openRegions.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegions/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.openRegions.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegions_avg_time":{
+ "metric":"rpc.rpc.openRegions_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openRegions_num_ops":{
+ "metric":"rpc.rpc.openRegions_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openScanner/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.openScanner.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openScanner/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.openScanner.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openScanner_avg_time":{
+ "metric":"rpc.rpc.openScanner_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/openScanner_num_ops":{
+ "metric":"rpc.rpc.openScanner_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/put/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.put.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/put/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.put.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/put_avg_time":{
+ "metric":"rpc.rpc.put_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/put_num_ops":{
+ "metric":"rpc.rpc.put_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/regionServerReport_avg_time":{
+ "metric":"rpc.rpc.regionServerReport_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/regionServerReport_num_ops":{
+ "metric":"rpc.rpc.regionServerReport_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/regionServerStartup_avg_time":{
+ "metric":"rpc.rpc.regionServerStartup_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/regionServerStartup_num_ops":{
+ "metric":"rpc.rpc.regionServerStartup_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/removeFromOnlineRegions/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.removeFromOnlineRegions.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/removeFromOnlineRegions/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.removeFromOnlineRegions.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/removeFromOnlineRegions_avg_time":{
+ "metric":"rpc.rpc.removeFromOnlineRegions_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/removeFromOnlineRegions_num_ops":{
+ "metric":"rpc.rpc.removeFromOnlineRegions_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/replicateLogEntries/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.replicateLogEntries.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/replicateLogEntries/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.replicateLogEntries.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/replicateLogEntries_avg_time":{
+ "metric":"rpc.rpc.replicateLogEntries_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/replicateLogEntries_num_ops":{
+ "metric":"rpc.rpc.replicateLogEntries_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/reportRSFatalError_avg_time":{
+ "metric":"rpc.rpc.reportRSFatalError_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/reportRSFatalError_num_ops":{
+ "metric":"rpc.rpc.reportRSFatalError_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rollHLogWriter/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.rollHLogWriter.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rollHLogWriter/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.rollHLogWriter.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rollHLogWriter_avg_time":{
+ "metric":"rpc.rpc.rollHLogWriter_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rollHLogWriter_num_ops":{
+ "metric":"rpc.rpc.rollHLogWriter_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rpcAuthenticationFailures":{
+ "metric":"rpc.rpc.rpcAuthenticationFailures",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpc/rpcAuthenticationSuccesses":{
+ "metric":"rpc.rpc.rpcAuthenticationSuccesses",
+ "pointInTime":false,
+ "temporal":true
+ },
+ "metrics/rpc/rpcAuthorizationFailures":{
+ "metric":"rpc.rpc.rpcAuthorizationFailures",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/rpcAuthorizationSuccesses":{
+ "metric":"rpc.rpc.rpcAuthorizationSuccesses",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/shutdown_avg_time":{
+ "metric":"rpc.rpc.shutdown_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/shutdown_num_ops":{
+ "metric":"rpc.rpc.shutdown_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/splitRegion/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.splitRegion.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/splitRegion/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.splitRegion.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/splitRegion_avg_time":{
+ "metric":"rpc.rpc.splitRegion_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/splitRegion_num_ops":{
+ "metric":"rpc.rpc.splitRegion_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/stop/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.stop.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/stop/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.stop.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/stopMaster_avg_time":{
+ "metric":"rpc.rpc.stopMaster_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/stopMaster_num_ops":{
+ "metric":"rpc.rpc.stopMaster_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/stop_avg_time":{
+ "metric":"rpc.rpc.stop_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/stop_num_ops":{
+ "metric":"rpc.rpc.stop_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/unassign_avg_time":{
+ "metric":"rpc.rpc.unassign_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/unassign_num_ops":{
+ "metric":"rpc.rpc.unassign_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/unlockRow/aboveOneSec/_avg_time":{
+ "metric":"rpc.rpc.unlockRow.aboveOneSec._avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/unlockRow/aboveOneSec/_num_ops":{
+ "metric":"rpc.rpc.unlockRow.aboveOneSec._num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/unlockRow_avg_time":{
+ "metric":"rpc.rpc.unlockRow_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/rpc/unlockRow_num_ops":{
+ "metric":"rpc.rpc.unlockRow_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/ugi/loginFailure_avg_time":{
+ "metric":"ugi.ugi.loginFailure_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/ugi/loginFailure_num_ops":{
+ "metric":"ugi.ugi.loginFailure_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/ugi/loginSuccess_avg_time":{
+ "metric":"ugi.ugi.loginSuccess_avg_time",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/ugi/loginSuccess_num_ops":{
+ "metric":"ugi.ugi.loginSuccess_num_ops",
+ "pointInTime":true,
+ "temporal":true
+ }
+ },
+ "FLUME_SERVER":{
+ "metrics/boottime":{
+ "metric":"boottime",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_aidle":{
+ "metric":"cpu_aidle",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_idle":{
+ "metric":"cpu_idle",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_nice":{
+ "metric":"cpu_nice",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_num":{
+ "metric":"cpu_num",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_speed":{
+ "metric":"cpu_speed",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_system":{
+ "metric":"cpu_system",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_user":{
+ "metric":"cpu_user",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/cpu/cpu_wio":{
+ "metric":"cpu_wio",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/disk/disk_free":{
+ "metric":"disk_free",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/disk/disk_total":{
+ "metric":"disk_total",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/disk/part_max_used":{
+ "metric":"part_max_used",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/gcCount":{
+ "metric":"jvm.JvmMetrics.gcCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/gcTimeMillis":{
+ "metric":"jvm.JvmMetrics.gcTimeMillis",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/logError":{
+ "metric":"jvm.JvmMetrics.logError",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/logFatal":{
+ "metric":"jvm.JvmMetrics.logFatal",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/logInfo":{
+ "metric":"jvm.JvmMetrics.logInfo",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/logWarn":{
+ "metric":"jvm.JvmMetrics.logWarn",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/maxMemoryM":{
+ "metric":"jvm.JvmMetrics.maxMemoryM",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/memHeapCommittedM":{
+ "metric":"jvm.JvmMetrics.memHeapCommittedM",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/memHeapUsedM":{
+ "metric":"jvm.JvmMetrics.memHeapUsedM",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/memNonHeapCommittedM":{
+ "metric":"jvm.JvmMetrics.memNonHeapCommittedM",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/memNonHeapUsedM":{
+ "metric":"jvm.JvmMetrics.memNonHeapUsedM",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsBlocked":{
+ "metric":"jvm.JvmMetrics.threadsBlocked",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsNew":{
+ "metric":"jvm.JvmMetrics.threadsNew",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsRunnable":{
+ "metric":"jvm.JvmMetrics.threadsRunnable",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsTerminated":{
+ "metric":"jvm.JvmMetrics.threadsTerminated",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsTimedWaiting":{
+ "metric":"jvm.JvmMetrics.threadsTimedWaiting",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/jvm/threadsWaiting":{
+ "metric":"jvm.JvmMetrics.threadsWaiting",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/CHANNEL/$2/ChannelCapacity":{
+ "metric":"(\\w+).CHANNEL.(\\w+).ChannelCapacity",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/CHANNEL/$2/StartTime":{
+ "metric":"(\\w+).CHANNEL.(\\w+).StartTime",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/CHANNEL/$2/EventTakeAttemptCount":{
+ "metric":"(\\w+).CHANNEL.(\\w+).EventTakeAttemptCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/CHANNEL/$2/EventTakeSuccessCount":{
+ "metric":"(\\w+).CHANNEL.(\\w+).EventTakeSuccessCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/CHANNEL/$2/EventPutAttemptCount":{
+ "metric":"(\\w+).CHANNEL.(\\w+).EventPutAttemptCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/CHANNEL/$2/StopTime":{
+ "metric":"(\\w+).CHANNEL.(\\w+).StopTime",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/CHANNEL/$2/ChannelFillPercentage":{
+ "metric":"(\\w+).CHANNEL.(\\w+).ChannelFillPercentage",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/CHANNEL/$2/ChannelSize":{
+ "metric":"(\\w+).CHANNEL.(\\w+).ChannelSize",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/CHANNEL/$2/EventPutSuccessCount":{
+ "metric":"(\\w+).CHANNEL.(\\w+).EventPutSuccessCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/SINK/$2/ConnectionCreatedCount":{
+ "metric":"(\\w+).SINK.(\\w+).ConnectionCreatedCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/SINK/$2/BatchCompleteCount":{
+ "metric":"(\\w+).SINK.(\\w+).BatchCompleteCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/SINK/$2/EventDrainSuccessCount":{
+ "metric":"(\\w+).SINK.(\\w+).EventDrainSuccessCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/SINK/$2/StartTime":{
+ "metric":"(\\w+).SINK.(\\w+).StartTime",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/SINK/$2/EventDrainAttemptCount":{
+ "metric":"(\\w+).SINK.(\\w+).EventDrainAttemptCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/SINK/$2/ConnectionFailedCount":{
+ "metric":"(\\w+).SINK.(\\w+).ConnectionFailedCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/SINK/$2/BatchUnderflowCount":{
+ "metric":"(\\w+).SINK.(\\w+).BatchUnderflowCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/SINK/$2/ConnectionClosedCount":{
+ "metric":"(\\w+).SINK.(\\w+).ConnectionClosedCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/SINK/$2/StopTime":{
+ "metric":"(\\w+).SINK.(\\w+).StopTime",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/SINK/$2/BatchEmptyCount":{
+ "metric":"(\\w+).SINK.(\\w+).BatchEmptyCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/SOURCE/$2/AppendBatchReceivedCount":{
+ "metric":"(\\w+).SOURCE.(\\w+).AppendBatchReceivedCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/SOURCE/$2/AppendAcceptedCount":{
+ "metric":"(\\w+).SOURCE.(\\w+).AppendAcceptedCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/SOURCE/$2/StartTime":{
+ "metric":"(\\w+).SOURCE.(\\w+).StartTime",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/SOURCE/$2/OpenConnectionCount":{
+ "metric":"(\\w+).SOURCE.(\\w+).OpenConnectionCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/SOURCE/$2/AppendBatchAcceptedCount":{
+ "metric":"(\\w+).SOURCE.(\\w+).AppendBatchAcceptedCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/SOURCE/$2/AppendReceivedCount":{
+ "metric":"(\\w+).SOURCE.(\\w+).AppendReceivedCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/SOURCE/$2/EventReceivedCount":{
+ "metric":"(\\w+).SOURCE.(\\w+).EventReceivedCount",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/SOURCE/$2/StopTime":{
+ "metric":"(\\w+).SOURCE.(\\w+).StopTime",
+ "pointInTime":true,
+ "temporal":true
+ },
+ "metrics/flume/$1/SOURCE/$2/EventAcceptedCount":{
+ "metric":"(\\w+).SOURCE.(\\w+).EventAcceptedCount",
+ "pointInTime":true,
+ "temporal":true
+ }
+ },
+ "JOURNALNODE":{
+ "metrics/dfs/journalNode/syncs300s75thPercentileLatencyMicros": {
+ "metric": "dfs.JournalNode.Syncs300s75thPercentileLatencyMicros",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/journalNode/lastWrittenTxId": {
+ "metric": "dfs.JournalNode.LastWrittenTxId",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/journalNode/syncs60s90thPercentileLatencyMicros": {
+ "metric": "dfs.JournalNode.Syncs60s90thPercentileLatencyMicros",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/finalizeLogSegment_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.FinalizeLogSegmentNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/ugi/loginSuccess_avg_time": {
+ "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/startLogSegment_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.StartLogSegmentAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/load/load_one": {
+ "metric": "load_one",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/journalNode/currentLagTxns": {
+ "metric": "dfs.JournalNode.CurrentLagTxns",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/memNonHeapUsedM": {
+ "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/journalNode/bytesWritten": {
+ "metric": "dfs.JournalNode.BytesWritten",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/process/proc_run": {
+ "metric": "proc_run",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/acceptRecovery_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.AcceptRecoveryNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/memory/swap_total": {
+ "metric": "swap_total",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/journalNode/txnsWritten": {
+ "metric": "dfs.JournalNode.TxnsWritten",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/threadsBlocked": {
+ "metric": "jvm.JvmMetrics.ThreadsBlocked",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/RpcQueueTime_num_ops": {
+ "metric": "rpc.rpc.RpcQueueTimeNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/process/proc_total": {
+ "metric": "proc_total",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/disk/part_max_used": {
+ "metric": "part_max_used",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/newEpoch_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.NewEpochNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/journalNode/syncs3600s_num_ops": {
+ "metric": "dfs.JournalNode.Syncs3600sNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/journalNode/syncs3600s99thPercentileLatencyMicros": {
+ "metric": "dfs.JournalNode.Syncs3600s99thPercentileLatencyMicros",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/ugi/loginSuccess_num_ops": {
+ "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/journalNode/syncs60s95thPercentileLatencyMicros": {
+ "metric": "dfs.JournalNode.Syncs60s95thPercentileLatencyMicros",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_idle": {
+ "metric": "cpu_idle",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/journalNode/syncs60s99thPercentileLatencyMicros": {
+ "metric": "dfs.JournalNode.Syncs60s99thPercentileLatencyMicros",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_aidle": {
+ "metric": "cpu_aidle",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/memory/mem_free": {
+ "metric": "mem_free",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/journalNode/syncs300s50thPercentileLatencyMicros": {
+ "metric": "dfs.JournalNode.Syncs300s50thPercentileLatencyMicros",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/network/bytes_in": {
+ "metric": "bytes_in",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/journalNode/syncs300s_num_ops": {
+ "metric": "dfs.JournalNode.Syncs300sNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/journalNode/syncs3600s90thPercentileLatencyMicros": {
+ "metric": "dfs.JournalNode.Syncs3600s90thPercentileLatencyMicros",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/network/pkts_out": {
+ "metric": "pkts_out",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/memNonHeapCommittedM": {
+ "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/memory/mem_cached": {
+ "metric": "mem_cached",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/disk/disk_total": {
+ "metric": "disk_total",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/journalNode/lastPromisedEpoch": {
+ "metric": "dfs.JournalNode.LastPromisedEpoch",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/prepareRecovery_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.PrepareRecoveryAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/journalNode/lastWriterEpoch": {
+ "metric": "dfs.JournalNode.LastWriterEpoch",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/ugi/loginFailure_num_ops": {
+ "metric": "ugi.UgiMetrics.LoginFailureNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/getEditLogManifest_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.GetEditLogManifestNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_wio": {
+ "metric": "cpu_wio",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/acceptRecovery_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.AcceptRecoveryAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_speed": {
+ "metric": "cpu_speed",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/RpcProcessingTime_avg_time": {
+ "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/rpcAuthenticationSuccesses": {
+ "metric": "rpc.rpc.RpcAuthenticationSuccesses",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/rpcAuthenticationFailures": {
+ "metric": "rpc.rpc.RpcAuthenticationFailures",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/network/pkts_in": {
+ "metric": "pkts_in",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/memory/mem_total": {
+ "metric": "mem_total",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/memHeapCommittedM": {
+ "metric": "jvm.JvmMetrics.MemHeapCommittedM",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/threadsRunnable": {
+ "metric": "jvm.JvmMetrics.ThreadsRunnable",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/journalNode/syncs60s75thPercentileLatencyMicros": {
+ "metric": "dfs.JournalNode.Syncs60s75thPercentileLatencyMicros",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/threadsNew": {
+ "metric": "jvm.JvmMetrics.ThreadsNew",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/rpcAuthorizationFailures": {
+ "metric": "rpc.rpc.RpcAuthorizationFailures",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/journalNode/syncs300s95thPercentileLatencyMicros": {
+ "metric": "dfs.JournalNode.Syncs300s95thPercentileLatencyMicros",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/RpcQueueTime_avg_time": {
+ "metric": "rpc.rpc.RpcQueueTimeAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/journalNode/syncs3600s95thPercentileLatencyMicros": {
+ "metric": "dfs.JournalNode.Syncs3600s95thPercentileLatencyMicros",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/getEditLogManifest_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.GetEditLogManifestAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/SentBytes": {
+ "metric": "rpc.rpc.SentBytes",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/logWarn": {
+ "metric": "jvm.JvmMetrics.LogWarn",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/threadsTimedWaiting": {
+ "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/gcCount": {
+ "metric": "jvm.JvmMetrics.GcCount",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/ReceivedBytes": {
+ "metric": "rpc.rpc.ReceivedBytes",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_nice": {
+ "metric": "cpu_nice",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/journalNode/batchesWritten": {
+ "metric": "dfs.JournalNode.BatchesWritten",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/NumOpenConnections": {
+ "metric": "rpc.rpc.NumOpenConnections",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/journalNode/batchesWrittenWhileLagging": {
+ "metric": "dfs.JournalNode.BatchesWrittenWhileLagging",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/journalNode/syncs300s99thPercentileLatencyMicros": {
+ "metric": "dfs.JournalNode.Syncs300s99thPercentileLatencyMicros",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/memHeapUsedM": {
+ "metric": "jvm.JvmMetrics.MemHeapUsedM",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/prepareRecovery_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.PrepareRecoveryNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/threadsWaiting": {
+ "metric": "jvm.JvmMetrics.ThreadsWaiting",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/disk/disk_free": {
+ "metric": "disk_free",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/newEpoch_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.NewEpochAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/memory/mem_buffers": {
+ "metric": "mem_buffers",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/journalNode/syncs60s_num_ops": {
+ "metric": "dfs.JournalNode.Syncs60sNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/gcTimeMillis": {
+ "metric": "jvm.JvmMetrics.GcTimeMillis",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/threadsTerminated": {
+ "metric": "jvm.JvmMetrics.ThreadsTerminated",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/memMaxM": {
+ "metric": "jvm.JvmMetrics.MemMaxM",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/network/bytes_out": {
+ "metric": "bytes_out",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_user": {
+ "metric": "cpu_user",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/memory/swap_free": {
+ "metric": "swap_free",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/load/load_five": {
+ "metric": "load_five",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_system": {
+ "metric": "cpu_system",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/getJournalState_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.GetJournalStateAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/boottime": {
+ "metric": "boottime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/journalNode/syncs300s90thPercentileLatencyMicros": {
+ "metric": "dfs.JournalNode.Syncs300s90thPercentileLatencyMicros",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/callQueueLen": {
+ "metric": "rpc.rpc.CallQueueLength",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/journal_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.JournalAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/finalizeLogSegment_avg_time": {
+ "metric": "rpcdetailed.rpcdetailed.FinalizeLogSegmentAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/load/load_fifteen": {
+ "metric": "load_fifteen",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/startLogSegment_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.StartLogSegmentNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/journalNode/syncs60s50thPercentileLatencyMicros": {
+ "metric": "dfs.JournalNode.Syncs60s50thPercentileLatencyMicros",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/logInfo": {
+ "metric": "jvm.JvmMetrics.LogInfo",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/getJournalState_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.GetJournalStateNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/RpcProcessingTime_num_ops": {
+ "metric": "rpc.rpc.RpcProcessingTimeNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/memory/mem_shared": {
+ "metric": "mem_shared",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/dfs/journalNode/syncs3600s50thPercentileLatencyMicros": {
+ "metric": "dfs.JournalNode.Syncs3600s50thPercentileLatencyMicros",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/logError": {
+ "metric": "jvm.JvmMetrics.LogError",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/ugi/loginFailure_avg_time": {
+ "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_num": {
+ "metric": "cpu_num",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/journal_num_ops": {
+ "metric": "rpcdetailed.rpcdetailed.JournalNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/rpcAuthorizationSuccesses": {
+ "metric": "rpc.rpc.RpcAuthorizationSuccesses",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/logFatal": {
+ "metric": "jvm.JvmMetrics.LogFatal",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/dfs/journalNode/syncs3600s75thPercentileLatencyMicros": {
+ "metric": "dfs.JournalNode.Syncs3600s75thPercentileLatencyMicros",
+ "pointInTime": true,
+ "temporal": true
+ }
+ },
+ "NODEMANAGER":{
+ "metrics/memory/mem_total": {
+ "metric": "mem_total",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/memHeapCommittedM": {
+ "metric": "jvm.JvmMetrics.MemHeapCommittedM",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/mapred/ShuffleOutputsFailed": {
+ "metric": "mapred.ShuffleOutputsFailed",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/threadsRunnable": {
+ "metric": "jvm.JvmMetrics.ThreadsRunnable",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/threadsNew": {
+ "metric": "jvm.JvmMetrics.ThreadsNew",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/rpcAuthorizationFailures": {
+ "metric": "rpc.metrics.RpcAuthorizationFailures",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/ugi/loginSuccess_avg_time": {
+ "metric": "ugi.ugi.LoginSuccessAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/RpcQueueTime_avg_time": {
+ "metric": "rpc.rpc.RpcQueueTimeAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/yarn/ContainersCompleted": {
+ "metric": "yarn.NodeManagerMetrics.ContainersCompleted",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/SentBytes": {
+ "metric": "rpc.rpc.SentBytes",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/memNonHeapUsedM": {
+ "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/yarn/ContainersKilled": {
+ "metric": "yarn.NodeManagerMetrics.ContainersKilled",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/logWarn": {
+ "metric": "jvm.JvmMetrics.LogWarn",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/threadsTimedWaiting": {
+ "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/gcCount": {
+ "metric": "jvm.JvmMetrics.GcCount",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/process/proc_run": {
+ "metric": "proc_run",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/memory/swap_total": {
+ "metric": "swap_total",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/ReceivedBytes": {
+ "metric": "rpc.rpc.ReceivedBytes",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_nice": {
+ "metric": "cpu_nice",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/threadsBlocked": {
+ "metric": "jvm.JvmMetrics.ThreadsBlocked",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/RpcQueueTime_num_ops": {
+ "metric": "rpc.rpc.RpcQueueTimeNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/process/proc_total": {
+ "metric": "proc_total",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/yarn/AllocatedGB": {
+ "metric": "yarn.NodeManagerMetrics.AllocatedGB",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/disk/part_max_used": {
+ "metric": "part_max_used",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/NumOpenConnections": {
+ "metric": "rpc.rpc.NumOpenConnections",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/memHeapUsedM": {
+ "metric": "jvm.JvmMetrics.MemHeapUsedM",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/threadsWaiting": {
+ "metric": "jvm.JvmMetrics.ThreadsWaiting",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/disk/disk_free": {
+ "metric": "disk_free",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/mapred/ShuffleOutputsOK": {
+ "metric": "mapred.ShuffleOutputsOK",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/yarn/ContainersFailed": {
+ "metric": "yarn.NodeManagerMetrics.ContainersFailed",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/memory/mem_buffers": {
+ "metric": "mem_buffers",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/ugi/loginSuccess_num_ops": {
+ "metric": "ugi.ugi.LoginSuccessNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/gcTimeMillis": {
+ "metric": "jvm.JvmMetrics.GcTimeMillis",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_idle": {
+ "metric": "cpu_idle",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/yarn/AllocatedContainers": {
+ "metric": "yarn.NodeManagerMetrics.AllocatedContainers",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/threadsTerminated": {
+ "metric": "jvm.JvmMetrics.ThreadsTerminated",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/network/bytes_out": {
+ "metric": "bytes_out",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_aidle": {
+ "metric": "cpu_aidle",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/memory/mem_free": {
+ "metric": "mem_free",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_user": {
+ "metric": "cpu_user",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/memory/swap_free": {
+ "metric": "swap_free",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_system": {
+ "metric": "cpu_system",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/network/bytes_in": {
+ "metric": "bytes_in",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/boottime": {
+ "metric": "boottime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/network/pkts_out": {
+ "metric": "pkts_out",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/memNonHeapCommittedM": {
+ "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/callQueueLen": {
+ "metric": "rpc.rpc.CallQueueLength",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/memory/mem_cached": {
+ "metric": "mem_cached",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/yarn/ContainersRunning": {
+ "metric": "yarn.NodeManagerMetrics.ContainersRunning",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/disk/disk_total": {
+ "metric": "disk_total",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/logInfo": {
+ "metric": "jvm.JvmMetrics.LogInfo",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/yarn/ContainersLaunched": {
+ "metric": "yarn.NodeManagerMetrics.ContainersLaunched",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/RpcProcessingTime_num_ops": {
+ "metric": "rpc.rpc.RpcProcessingTimeNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/ugi/loginFailure_num_ops": {
+ "metric": "ugi.ugi.LoginFailureNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/memory/mem_shared": {
+ "metric": "mem_shared",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/yarn/AvailableGB": {
+ "metric": "yarn.NodeManagerMetrics.AvailableGB",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/mapred/ShuffleConnections": {
+ "metric": "mapred.ShuffleConnections",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_wio": {
+ "metric": "cpu_wio",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/yarn/ContainersIniting": {
+ "metric": "yarn.NodeManagerMetrics.ContainersIniting",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/logError": {
+ "metric": "jvm.JvmMetrics.LogError",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/ugi/loginFailure_avg_time": {
+ "metric": "ugi.ugi.LoginFailureAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_num": {
+ "metric": "cpu_num",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_speed": {
+ "metric": "cpu_speed",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/rpcAuthorizationSuccesses": {
+ "metric": "rpc.rpc.RpcAuthorizationSuccesses",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/logFatal": {
+ "metric": "jvm.JvmMetrics.LogFatal",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/RpcProcessingTime_avg_time": {
+ "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/mapred/ShuffleOutputBytes": {
+ "metric": "mapred.ShuffleOutputBytes",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/rpcAuthenticationSuccesses": {
+ "metric": "rpc.metrics.RpcAuthenticationSuccesses",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/rpcAuthenticationFailures": {
+ "metric": "rpc.metrics.RpcAuthenticationFailures",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/network/pkts_in": {
+ "metric": "pkts_in",
+ "pointInTime": true,
+ "temporal": true
+ }
+ },
+ "RESOURCEMANAGER":{
+ "metrics/rpcdetailed/FinishApplicationMasterNumOps": {
+ "metric": "rpcdetailed.rpcdetailed.FinishApplicationMasterNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/memory/mem_total": {
+ "metric": "mem_total",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/yarn/Queue/$1/AppsCompleted": {
+ "metric": "yarn.QueueMetrics.Queue:(.+),.AppsCompleted",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/yarn/ClusterMetrics/NumUnhealthyNMs": {
+ "metric": "yarn.ClusterMetrics.NumUnhealthyNMs",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/memHeapCommittedM": {
+ "metric": "jvm.JvmMetrics.MemHeapCommittedM",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/threadsRunnable": {
+ "metric": "jvm.JvmMetrics.ThreadsRunnable",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/yarn/ClusterMetrics/NumRebootedNMs": {
+ "metric": "yarn.ClusterMetrics.NumRebootedNMs",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/threadsNew": {
+ "metric": "jvm.JvmMetrics.ThreadsNew",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/yarn/Queue/$1/AppsSubmitted": {
+ "metric": "yarn.QueueMetrics.Queue:(.+),.AppsSubmitted",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/yarn/ClusterMetrics/NumLostNMs": {
+ "metric": "yarn.ClusterMetrics.NumLostNMs",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/ugi/loginSuccess_avg_time": {
+ "metric": "ugi.ugi.LoginSuccessAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/RpcQueueTime_avg_time": {
+ "metric": "rpc.rpc.RpcQueueTimeAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/yarn/Queue/$1/AllocatedContainers": {
+ "metric": "yarn.QueueMetrics.Queue:(.+),.AllocatedContainers",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/SentBytes": {
+ "metric": "rpc.rpc.SentBytes",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/yarn/Queue/$1/AppsKilled": {
+ "metric": "yarn.QueueMetrics.Queue:(.+),.AppsKilled",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/yarn/ClusterMetrics/NumActiveNMs": {
+ "metric": "yarn.ClusterMetrics.NumActiveNMs",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/memNonHeapUsedM": {
+ "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/logWarn": {
+ "metric": "jvm.JvmMetrics.LogWarn",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/threadsTimedWaiting": {
+ "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/yarn/Queue/$1/AppsFailed": {
+ "metric": "yarn.QueueMetrics.Queue:(.+),.AppsFailed",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/gcCount": {
+ "metric": "jvm.JvmMetrics.GcCount",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/process/proc_run": {
+ "metric": "proc_run",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/memory/swap_total": {
+ "metric": "swap_total",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/ReceivedBytes": {
+ "metric": "rpc.rpc.ReceivedBytes",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/AllocateNumOps": {
+ "metric": "rpcdetailed.rpcdetailed.AllocateNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_nice": {
+ "metric": "cpu_nice",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/gcCountMarkSweepCompact": {
+ "metric": "jvm.JvmMetrics.GcCountMarkSweepCompact",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/threadsBlocked": {
+ "metric": "jvm.JvmMetrics.ThreadsBlocked",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/yarn/Queue/$1/AppsRunning": {
+ "metric": "yarn.QueueMetrics.Queue:(.+),.AppsRunning",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/yarn/ClusterMetrics/NumDecommissionedNMs": {
+ "metric": "yarn.ClusterMetrics.NumDecommissionedNMs",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/RpcQueueTime_num_ops": {
+ "metric": "rpc.rpc.RpcQueueTimeNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/process/proc_total": {
+ "metric": "proc_total",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/disk/part_max_used": {
+ "metric": "part_max_used",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/NumOpenConnections": {
+ "metric": "rpc.rpc.NumOpenConnections",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/memHeapUsedM": {
+ "metric": "jvm.JvmMetrics.MemHeapUsedM",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/disk/disk_free": {
+ "metric": "disk_free",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/threadsWaiting": {
+ "metric": "jvm.JvmMetrics.ThreadsWaiting",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/load_one": {
+ "metric": "load_one",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/memory/mem_buffers": {
+ "metric": "mem_buffers",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/ugi/loginSuccess_num_ops": {
+ "metric": "ugi.ugi.LoginSuccessNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/gcTimeMillisCopy": {
+ "metric": "jvm.JvmMetrics.GcTimeMillisCopy",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/gcTimeMillis": {
+ "metric": "jvm.JvmMetrics.GcTimeMillis",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_idle": {
+ "metric": "cpu_idle",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/yarn/Queue/$1/PendingContainers": {
+ "metric": "yarn.QueueMetrics.Queue:(.+),.PendingContainers",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/memMaxM": {
+ "metric": "jvm.JvmMetrics.MemMaxM",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/threadsTerminated": {
+ "metric": "jvm.JvmMetrics.ThreadsTerminated",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/network/bytes_out": {
+ "metric": "bytes_out",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_aidle": {
+ "metric": "cpu_aidle",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/memory/mem_free": {
+ "metric": "mem_free",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/AllocateAvgTime": {
+ "metric": "rpcdetailed.rpcdetailed.AllocateAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_user": {
+ "metric": "cpu_user",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/memory/swap_free": {
+ "metric": "swap_free",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_system": {
+ "metric": "cpu_system",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/load_five": {
+ "metric": "load_five",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/network/bytes_in": {
+ "metric": "bytes_in",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/boottime": {
+ "metric": "boottime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/network/pkts_out": {
+ "metric": "pkts_out",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/memNonHeapCommittedM": {
+ "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/GetApplicationReportNumOps": {
+ "metric": "rpcdetailed.rpcdetailed.GetApplicationReportNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/FinishApplicationMasterAvgTime": {
+ "metric": "rpcdetailed.rpcdetailed.FinishApplicationMasterAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/callQueueLen": {
+ "metric": "rpc.rpc.CallQueueLength",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/memory/mem_cached": {
+ "metric": "mem_cached",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/RegisterApplicationMasterNumOps": {
+ "metric": "rpcdetailed.rpcdetailed.RegisterApplicationMasterNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/disk/disk_total": {
+ "metric": "disk_total",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/yarn/Queue/$1/AvailableMB": {
+ "metric": "yarn.QueueMetrics.Queue:(.+),.AvailableMB",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/yarn/Queue/$1/PendingMB": {
+ "metric": "yarn.QueueMetrics.Queue:(.+),.PendingMB",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/logInfo": {
+ "metric": "jvm.JvmMetrics.LogInfo",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/RpcProcessingTime_num_ops": {
+ "metric": "rpc.rpc.RpcProcessingTimeNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/ugi/loginFailure_num_ops": {
+ "metric": "ugi.ugi.LoginFailureNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/memory/mem_shared": {
+ "metric": "mem_shared",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/SubmitApplicationAvgTime": {
+ "metric": "rpcdetailed.rpcdetailed.SubmitApplicationAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_wio": {
+ "metric": "cpu_wio",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/GetNewApplicationNumOps": {
+ "metric": "rpcdetailed.rpcdetailed.GetNewApplicationNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/yarn/Queue/$1/AppsPending": {
+ "metric": "yarn.QueueMetrics.Queue:(.+),.AppsPending",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/gcCountCopy": {
+ "metric": "jvm.JvmMetrics.GcCountCopy",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/load_fifteen": {
+ "metric": "load_fifteen",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/logError": {
+ "metric": "jvm.JvmMetrics.LogError",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/ugi/loginFailure_avg_time": {
+ "metric": "ugi.ugi.LoginFailureAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_num": {
+ "metric": "cpu_num",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/SubmitApplicationNumOps": {
+ "metric": "rpcdetailed.rpcdetailed.SubmitApplicationNumOps",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/gcTimeMillisMarkSweepCompact": {
+ "metric": "jvm.JvmMetrics.GcTimeMillisMarkSweepCompact",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_speed": {
+ "metric": "cpu_speed",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/rpcAuthorizationSuccesses": {
+ "metric": "rpc.rpc.RpcAuthorizationSuccesses",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/yarn/Queue/$1/AllocatedMB": {
+ "metric": "yarn.QueueMetrics.Queue:(.+),.AllocatedMB",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/logFatal": {
+ "metric": "jvm.JvmMetrics.LogFatal",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/RpcProcessingTime_avg_time": {
+ "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/GetApplicationReportAvgTime": {
+ "metric": "rpcdetailed.rpcdetailed.GetApplicationReportAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/NodeHeartbeatAvgTime": {
+ "metric": "rpcdetailed.rpcdetailed.NodeHeartbeatAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/GetNewApplicationAvgTime": {
+ "metric": "rpcdetailed.rpcdetailed.GetNewApplicationAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/network/pkts_in": {
+ "metric": "pkts_in",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpcdetailed/RegisterApplicationMasterAvgTime": {
+ "metric": "rpcdetailed.rpcdetailed.RegisterApplicationMasterAvgTime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/yarn/Queue/$1/ReservedContainers": {
+ "metric": "yarn.QueueMetrics.Queue:(.+),.ReservedContainers",
+ "pointInTime": false,
+ "temporal": true
+ }
+ },
+ "HISTORYSERVER":{
+ "metrics/memory/mem_total": {
+ "metric": "mem_total",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/memHeapCommittedM": {
+ "metric": "jvm.JvmMetrics.MemHeapCommittedM",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/threadsRunnable": {
+ "metric": "jvm.JvmMetrics.ThreadsRunnable",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/threadsNew": {
+ "metric": "jvm.JvmMetrics.ThreadsNew",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/rpcAuthorizationFailures": {
+ "metric": "rpc.metrics.RpcAuthorizationFailures",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/ugi/loginSuccess_avg_time": {
+ "metric": "ugi.ugi.LoginSuccessAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/RpcQueueTime_avg_time": {
+ "metric": "rpc.rpc.RpcQueueTimeAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/SentBytes": {
+ "metric": "rpc.rpc.SentBytes",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/memNonHeapUsedM": {
+ "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/logWarn": {
+ "metric": "jvm.JvmMetrics.LogWarn",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/threadsTimedWaiting": {
+ "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/process/proc_run": {
+ "metric": "proc_run",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/gcCount": {
+ "metric": "jvm.JvmMetrics.GcCount",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/ReceivedBytes": {
+ "metric": "rpc.rpc.ReceivedBytes",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/memory/swap_total": {
+ "metric": "swap_total",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_nice": {
+ "metric": "cpu_nice",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/threadsBlocked": {
+ "metric": "jvm.JvmMetrics.ThreadsBlocked",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/RpcQueueTime_num_ops": {
+ "metric": "rpc.rpc.RpcQueueTimeNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/process/proc_total": {
+ "metric": "proc_total",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/disk/part_max_used": {
+ "metric": "part_max_used",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/NumOpenConnections": {
+ "metric": "rpc.rpc.NumOpenConnections",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/memHeapUsedM": {
+ "metric": "jvm.JvmMetrics.MemHeapUsedM",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/threadsWaiting": {
+ "metric": "jvm.JvmMetrics.ThreadsWaiting",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/disk/disk_free": {
+ "metric": "disk_free",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/memory/mem_buffers": {
+ "metric": "mem_buffers",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/ugi/loginSuccess_num_ops": {
+ "metric": "ugi.ugi.LoginSuccessNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/gcTimeMillis": {
+ "metric": "jvm.JvmMetrics.GcTimeMillis",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_idle": {
+ "metric": "cpu_idle",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/threadsTerminated": {
+ "metric": "jvm.JvmMetrics.ThreadsTerminated",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/network/bytes_out": {
+ "metric": "bytes_out",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_aidle": {
+ "metric": "cpu_aidle",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/memory/mem_free": {
+ "metric": "mem_free",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_user": {
+ "metric": "cpu_user",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/memory/swap_free": {
+ "metric": "swap_free",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_system": {
+ "metric": "cpu_system",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/network/bytes_in": {
+ "metric": "bytes_in",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/boottime": {
+ "metric": "boottime",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/network/pkts_out": {
+ "metric": "pkts_out",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/memNonHeapCommittedM": {
+ "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/callQueueLen": {
+ "metric": "rpc.rpc.CallQueueLength",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/memory/mem_cached": {
+ "metric": "mem_cached",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/disk/disk_total": {
+ "metric": "disk_total",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/logInfo": {
+ "metric": "jvm.JvmMetrics.LogInfo",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/ugi/loginFailure_num_ops": {
+ "metric": "ugi.ugi.LoginFailureNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/RpcProcessingTime_num_ops": {
+ "metric": "rpc.rpc.RpcProcessingTimeNumOps",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/memory/mem_shared": {
+ "metric": "mem_shared",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_wio": {
+ "metric": "cpu_wio",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/jvm/logError": {
+ "metric": "jvm.JvmMetrics.LogError",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/ugi/loginFailure_avg_time": {
+ "metric": "ugi.ugi.LoginFailureAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_num": {
+ "metric": "cpu_num",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/cpu/cpu_speed": {
+ "metric": "cpu_speed",
+ "pointInTime": true,
+ "temporal": true
+ },
+ "metrics/rpc/rpcAuthorizationSuccesses": {
+ "metric": "rpc.rpc.RpcAuthorizationSuccesses",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/jvm/logFatal": {
+ "metric": "jvm.JvmMetrics.LogFatal",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/RpcProcessingTime_avg_time": {
+ "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/rpcAuthenticationSuccesses": {
+ "metric": "rpc.metrics.RpcAuthenticationSuccesses",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/rpc/rpcAuthenticationFailures": {
+ "metric": "rpc.metrics.RpcAuthenticationFailures",
+ "pointInTime": false,
+ "temporal": true
+ },
+ "metrics/network/pkts_in": {
+ "metric": "pkts_in",
+ "pointInTime": true,
+ "temporal": true
+ }
+ }
+ }
+}
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/configuration/cluster-env.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/configuration/cluster-env.xml
new file mode 100644
index 0000000000..de389310e6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/configuration/cluster-env.xml
@@ -0,0 +1,111 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+ <!-- metrics sink properties -->
+ <property>
+ <name>sink_database</name>
+ <value>Existing MSSQL Server database with sql auth</value>
+ </property>
+
+ <property>
+ <name>sink.dbservername</name>
+ <value/>
+ </property>
+
+ <property>
+ <name>sink.dblogin</name>
+ <value>hadoop</value>
+ <description>
+ DB user name.
+ </description>
+ </property>
+
+ <property>
+ <name>sink.dbpassword</name>
+ <value/>
+ <description>
+ DB password.
+ </description>
+ </property>
+
+ <property>
+ <name>sink.db.schema.name</name>
+ <value>HadoopMetrics</value>
+ </property>
+
+ <property>
+ <name>sink.jdbc.driver</name>
+ <value>com.microsoft.sqlserver.jdbc.SQLServerDriver</value>
+ <description>
+ JDBC driver class.
+ </description>
+ </property>
+
+ <property>
+ <name>sink.jdbc.url</name>
+ <value/>
+ <description>
+ JDBC URL.
+ </description>
+ </property>
+
+ <property>
+ <name>hadoop.user.name</name>
+ <value>hadoop</value>
+ </property>
+
+
+
+ <property>
+ <name>security_enabled</name>
+ <value>false</value>
+ <description>Hadoop Security</description>
+ </property>
+ <property>
+ <name>kerberos_domain</name>
+ <value>EXAMPLE.COM</value>
+ <description>Kerberos realm.</description>
+ </property>
+ <property>
+ <name>ignore_groupsusers_create</name>
+ <value>false</value>
+ <description>Whether to ignore failures on users and group creation</description>
+ </property>
+ <property>
+ <name>smokeuser</name>
+ <value>ambari-qa</value>
+ <property-type>USER</property-type>
+ <description>User executing service checks</description>
+ </property>
+ <property>
+ <name>smokeuser_keytab</name>
+ <value>/etc/security/keytabs/smokeuser.headless.keytab</value>
+ <description>Path to smoke test user keytab file</description>
+ </property>
+ <property>
+ <name>user_group</name>
+ <value>hadoop</value>
+ <property-type>GROUP</property-type>
+ <description>Hadoop user group.</description>
+ </property>
+</configuration> \ No newline at end of file
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/after-INSTALL/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/after-INSTALL/scripts/hook.py
new file mode 100644
index 0000000000..6b3ca8fed6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/after-INSTALL/scripts/hook.py
@@ -0,0 +1,66 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+
+from ambari_commons.inet_utils import download_file
+from resource_management import *
+from resource_management.libraries import Hook
+
+
+#Hook for hosts with only client without other components
+class AfterInstallHook(Hook):
+
+ def hook(self, env):
+ import params
+ env.set_params(params)
+
+ XmlConfig("core-site.xml",
+ conf_dir=params.hadoop_conf_dir,
+ configurations=params.config['configurations']['core-site'],
+ owner=params.hdfs_user,
+ mode="f"
+ )
+ download_file(os.path.join(params.config['hostLevelParams']['jdk_location'], "sqljdbc4.jar"),
+ os.path.join(params.hadoop_common_dir, "sqljdbc4.jar")
+ )
+ download_file(os.path.join(params.config['hostLevelParams']['jdk_location'], "sqljdbc_auth.dll"),
+ os.path.join(params.hadoop_common_dir, "sqljdbc_auth.dll")
+ )
+ download_file(os.path.join(params.config['hostLevelParams']['jdk_location'], "sqljdbc4.jar"),
+ os.path.join(params.hbase_lib_dir, "sqljdbc4.jar")
+ )
+ download_file(os.path.join(params.config['hostLevelParams']['jdk_location'], "sqljdbc_auth.dll"),
+ os.path.join(params.hadoop_common_bin, "sqljdbc_auth.dll")
+ )
+ download_file(os.path.join(params.config['hostLevelParams']['jdk_location'], "metrics-sink-1.0.0.jar"),
+ os.path.join(params.hadoop_common_dir, "metrics-sink-1.0.0.jar")
+ )
+ download_file(os.path.join(params.config['hostLevelParams']['jdk_location'], "metrics-sink-1.0.0.jar"),
+ os.path.join(params.hbase_lib_dir, "metrics-sink-1.0.0.jar")
+ )
+
+ File(format("{params.hadoop_install_root}/cluster.properties"),
+ content=Template("cluster.properties.j2"),
+ owner=params.hdfs_user,
+ mode="f"
+ )
+
+if __name__ == "__main__":
+ AfterInstallHook().execute()
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/after-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/after-INSTALL/scripts/params.py
new file mode 100644
index 0000000000..7d48ce6269
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/after-INSTALL/scripts/params.py
@@ -0,0 +1,101 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management import *
+import os
+from urlparse import urlparse
+
+config = Script.get_config()
+hadoop_conf_dir = None
+hadoop_common_dir = os.path.join("share", "hadoop", "common", "lib")
+hdfs_user="hadoop"
+hadoop_common_bin = "bin"
+
+if os.environ.has_key("HADOOP_CONF_DIR"):
+ hadoop_conf_dir = os.environ["HADOOP_CONF_DIR"]
+if os.environ.has_key("HADOOP_COMMON_HOME"):
+ hadoop_common_dir = os.path.join(os.environ["HADOOP_COMMON_HOME"], "share", "hadoop", "common", "lib")
+ hadoop_common_bin = os.path.join(os.environ["HADOOP_COMMON_HOME"], "bin")
+if os.environ.has_key("HBASE_HOME"):
+ hbase_lib_dir = os.path.join(os.environ["HBASE_HOME"], "lib")
+
+if os.environ.has_key("HADOOP_NODE_INSTALL_ROOT"):
+ hadoop_install_root = os.environ["HADOOP_NODE_INSTALL_ROOT"]
+
+
+hdp_log_dir = "c:\\hadoop\\logs"
+hdp_data_dir = "c:\\hadoop"
+db_flavor = "MSSQL"
+db_hostname = "localhost"
+db_port = "1433"
+
+hive_db_flavor = default("/configurations/hive-env/hive_database_type", None)
+hive_jdbc_url = default("/configurations/hive-site/javax.jdo.option.ConnectionURL", None)
+hive_db_hostname = None
+hive_db_port = None
+if hive_jdbc_url:
+ hive_db_hostname = urlparse(hive_jdbc_url.split(";")[0].replace('jdbc:', '')).hostname
+ hive_db_port = urlparse(hive_jdbc_url.split(";")[0].replace('jdbc:', '')).port
+
+oozie_db_flavor = default("/configurations/oozie-env/oozie_ambari_database", None)
+oozie_jdbc_url = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.url", None)
+oozie_db_hostname = None
+oozie_db_port = None
+if oozie_jdbc_url:
+ oozie_db_hostname = urlparse(oozie_jdbc_url.split(";")[0].replace('jdbc:', '')).hostname
+ oozie_db_port = urlparse(oozie_jdbc_url.split(";")[0].replace('jdbc:', '')).port
+
+if hive_db_hostname:
+ db_hostname = hive_db_hostname
+ if hive_db_port:
+ db_port = hive_db_port
+ if hive_db_flavor:
+ db_flavor = hive_db_flavor
+elif oozie_db_hostname:
+ db_hostname = oozie_db_hostname
+ if oozie_db_port:
+ db_port = oozie_db_port
+ if oozie_db_flavor:
+ db_flavor = oozie_db_flavor
+
+hive_db_name = default("/configurations/hive-site/ambari.hive.db.schema.name", "hive")
+hive_db_username = default("/configurations/hive-site/javax.jdo.option.ConnectionUserName", None)
+hive_db_password = default("/configurations/hive-site/javax.jdo.option.ConnectionPassword", None)
+
+oozie_db_name = default("/configurations/oozie-site/oozie.db.schema.name", "oozie")
+oozie_db_username = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.username", None)
+oozie_db_password = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.password", None)
+
+delimiter = ','
+namenode_host = default_string("/clusterHostInfo/namenode_host", [], delimiter)
+secondary_namenode_host = default_string("/clusterHostInfo/snamenode_host", [], delimiter)
+resourcemanager_host = default_string("/clusterHostInfo/rm_host", [], delimiter)
+hive_server_host = default_string("/clusterHostInfo/hive_server_host", [], delimiter)
+oozie_server_host = default_string("/clusterHostInfo/oozie_server", [], delimiter)
+webhcat_host = default_string("/clusterHostInfo/webhcat_server_host", [], delimiter)
+slave_hosts = default_string("/clusterHostInfo/slave_hosts", [], delimiter)
+zookeeper_hosts = default_string("/clusterHostInfo/zookeeper_hosts", [], delimiter)
+client_hosts = default_string("/clusterHostInfo/client_hosts", [], delimiter)
+hbase_master = default_string("/clusterHostInfo/hbase_master_hosts", [], delimiter)
+hbase_regionservers = default_string("/clusterHostInfo/hbase_rs_hosts", [], delimiter)
+flume_hosts = default_string("/clusterHostInfo/flume_hosts", [], delimiter)
+falcon_host = default_string("/clusterHostInfo/falcon_server_hosts", [], delimiter)
+knox_host = default_string("/clusterHostInfo/knox_host", [], delimiter)
+storm_nimbus = default_string("/clusterHostInfo/nimbus_hosts", [], delimiter)
+storm_supervisors = default_string("/clusterHostInfo/supervisor_hosts", [], delimiter)
+ambari_server_host = default_string("/clusterHostInfo/ambari_server_host", [], delimiter)
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/after-INSTALL/templates/cluster.properties.j2 b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/after-INSTALL/templates/cluster.properties.j2
new file mode 100644
index 0000000000..187a52ee2d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/after-INSTALL/templates/cluster.properties.j2
@@ -0,0 +1,38 @@
+#Log directory
+HDP_LOG_DIR={{hdp_log_dir}}
+
+#Data directory
+HDP_DATA_DIR={{hdp_data_dir}}
+
+#hosts
+NAMENODE_HOST={{namenode_host}}
+SECONDARY_NAMENODE_HOST={{secondary_namenode_host}}
+RESOURCEMANAGER_HOST={{resourcemanager_host}}
+HIVE_SERVER_HOST={{hive_server_host}}
+OOZIE_SERVER_HOST={{oozie_server_host}}
+WEBHCAT_HOST={{webhcat_host}}
+SLAVE_HOSTS={{slave_hosts}}
+ZOOKEEPER_HOSTS={{zookeeper_hosts}}
+CLIENT_HOSTS={{client_hosts}}
+HBASE_MASTER={{hbase_master}}
+HBASE_REGIONSERVERS={{hbase_regionservers}}
+FLUME_HOSTS={{flume_hosts}}
+FALCON_HOST={{falcon_host}}
+KNOX_HOST={{knox_host}}
+STORM_NIMBUS={{storm_nimbus}}
+STORM_SUPERVISORS={{storm_supervisors}}
+
+#Database host
+DB_FLAVOR={{db_flavor}}
+DB_HOSTNAME={{db_hostname}}
+DB_PORT={{db_port}}
+
+#Hive properties
+HIVE_DB_NAME={{hive_db_name}}
+HIVE_DB_USERNAME={{hive_db_username}}
+HIVE_DB_PASSWORD={{hive_db_password}}
+
+#Oozie properties
+OOZIE_DB_NAME={{oozie_db_name}}
+OOZIE_DB_USERNAME={{oozie_db_username}}
+OOZIE_DB_PASSWORD={{oozie_db_password}} \ No newline at end of file
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/before-ANY/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/before-ANY/scripts/hook.py
new file mode 100644
index 0000000000..002051b6a6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/before-ANY/scripts/hook.py
@@ -0,0 +1,30 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from setup_jdk import setup_jdk
+class BeforeAnyHook(Hook):
+
+ def hook(self, env):
+ import params
+ env.set_params(params)
+ setup_jdk()
+
+if __name__ == "__main__":
+ BeforeAnyHook().execute()
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/before-ANY/scripts/params.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/before-ANY/scripts/params.py
new file mode 100644
index 0000000000..70dd73886c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/before-ANY/scripts/params.py
@@ -0,0 +1,27 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+java_home = config['hostLevelParams']['java_home']
+jdk_name = default("/hostLevelParams/jdk_name", None) # None when jdk is already installed by user
+jce_policy_zip = default("/hostLevelParams/jce_name", None) # None when jdk is already installed by user
+jce_location = config['hostLevelParams']['jdk_location']
+jdk_location = config['hostLevelParams']['jdk_location'] \ No newline at end of file
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/before-ANY/scripts/setup_jdk.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/before-ANY/scripts/setup_jdk.py
new file mode 100644
index 0000000000..d48bb523e9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/before-ANY/scripts/setup_jdk.py
@@ -0,0 +1,49 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+import urllib2
+
+from ambari_commons.inet_utils import download_file
+from resource_management import *
+
+
+_install_cmd = '{} /s INSTALLDIR={} ADDLOCAL="ToolsFeature,SourceFeature"'
+
+
+def _check_installed():
+ import params
+ return os.path.exists(os.path.join(params.java_home, 'bin', 'java.exe'))
+
+
+def setup_jdk():
+ import params
+ if not params.jdk_name:
+ return
+ if _check_installed():
+ return
+
+ if not os.path.exists(params.java_home):
+ os.makedirs(params.java_home)
+ jdk_setup_savepath = os.path.join(params.java_home, params.jdk_name)
+ jdk_download_url = "{}/{}".format(params.jdk_location, params.jdk_name)
+ download_file(jdk_download_url, jdk_setup_savepath)
+ Execute(_install_cmd.format(jdk_setup_savepath, params.java_home))
+ if not _check_installed():
+ raise Fail("Error when installing jdk") \ No newline at end of file
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/before-INSTALL/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/before-INSTALL/scripts/hook.py
new file mode 100644
index 0000000000..81cfd78706
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/before-INSTALL/scripts/hook.py
@@ -0,0 +1,32 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+class BeforeInstallHook(Hook):
+
+ def hook(self, env):
+ import params
+
+ self.run_custom_hook('before-ANY')
+ env.set_params(params)
+
+if __name__ == "__main__":
+ BeforeInstallHook().execute()
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/before-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/before-INSTALL/scripts/params.py
new file mode 100644
index 0000000000..eee2c2e323
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/before-INSTALL/scripts/params.py
@@ -0,0 +1,22 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/before-RESTART/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/before-RESTART/scripts/hook.py
new file mode 100644
index 0000000000..2731dd8821
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/before-RESTART/scripts/hook.py
@@ -0,0 +1,28 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+class BeforeRestartHook(Hook):
+
+ def hook(self, env):
+ self.run_custom_hook('before-START')
+
+if __name__ == "__main__":
+ BeforeRestartHook().execute()
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/before-START/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/before-START/scripts/hook.py
new file mode 100644
index 0000000000..a81a09a146
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/before-START/scripts/hook.py
@@ -0,0 +1,33 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+class BeforeStartHook(Hook):
+
+ def hook(self, env):
+ import params
+
+ self.run_custom_hook('before-ANY')
+ self.run_custom_hook('after-INSTALL')
+ env.set_params(params)
+
+if __name__ == "__main__":
+ BeforeStartHook().execute()
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/before-START/scripts/params.py
new file mode 100644
index 0000000000..eee2c2e323
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/before-START/scripts/params.py
@@ -0,0 +1,22 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/metainfo.xml
new file mode 100644
index 0000000000..ca45822604
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/metainfo.xml
@@ -0,0 +1,22 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<metainfo>
+ <versions>
+ <active>true</active>
+ </versions>
+</metainfo>
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/repos/repoinfo.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/repos/repoinfo.xml
new file mode 100644
index 0000000000..7f9e075ce9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/repos/repoinfo.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<reposinfo>
+ <os family="winsrv6">
+ <repo>
+ <baseurl>http://dummy_repo</baseurl>
+ <repoid>HDPWIN-2.1</repoid>
+ <reponame>HDPWIN</reponame>
+ </repo>
+ </os>
+</reposinfo>
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/role_command_order.json b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/role_command_order.json
new file mode 100644
index 0000000000..47cd50d448
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/role_command_order.json
@@ -0,0 +1,101 @@
+{
+ "_comment" : "Record format:",
+ "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, blockerRole2-blockerCommand2, ...]",
+ "general_deps" : {
+ "_comment" : "dependencies for all cases",
+ "NAGIOS_SERVER-INSTALL" : ["HIVE_CLIENT-INSTALL", "HCAT-INSTALL",
+ "MAPREDUCE_CLIENT-INSTALL", "OOZIE_CLIENT-INSTALL"],
+ "HBASE_MASTER-START": ["ZOOKEEPER_SERVER-START"],
+ "HBASE_REGIONSERVER-START": ["HBASE_MASTER-START"],
+ "OOZIE_SERVER-START": ["JOBTRACKER-START", "TASKTRACKER-START"],
+ "WEBHCAT_SERVER-START": ["TASKTRACKER-START", "HIVE_SERVER-START"],
+ "HIVE_METASTORE-START": ["MYSQL_SERVER-START", "NAMENODE-START", "DATANODE-START", "SECONDARY_NAMENODE-START"],
+ "HIVE_SERVER-START": ["TASKTRACKER-START", "MYSQL_SERVER-START", "NAMENODE-START", "DATANODE-START", "SECONDARY_NAMENODE-START"],
+ "HUE_SERVER-START": ["HIVE_SERVER-START", "HCAT-START", "OOZIE_SERVER-START"],
+ "FLUME_HANDLER-START": ["OOZIE_SERVER-START"],
+ "NAGIOS_SERVER-START": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START",
+ "GANGLIA_SERVER-START", "GANGLIA_MONITOR-START", "HCAT-START",
+ "HIVE_SERVER-START", "HIVE_METASTORE-START", "HUE_SERVER-START",
+ "JOBTRACKER-START", "TASKTRACKER-START", "ZOOKEEPER_SERVER-START",
+ "MYSQL_SERVER-START", "OOZIE_SERVER-START", "PIG-START", "SQOOP-START",
+ "WEBHCAT_SERVER-START", "FLUME_HANDLER-START"],
+ "MAPREDUCE_SERVICE_CHECK-SERVICE_CHECK": ["JOBTRACKER-START", "TASKTRACKER-START"],
+ "OOZIE_SERVICE_CHECK-SERVICE_CHECK": ["OOZIE_SERVER-START", "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK"],
+ "HBASE_SERVICE_CHECK-SERVICE_CHECK": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START"],
+ "HIVE_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START", "HIVE_METASTORE-START", "WEBHCAT_SERVER-START"],
+ "PIG_SERVICE_CHECK-SERVICE_CHECK": ["JOBTRACKER-START", "TASKTRACKER-START"],
+ "SQOOP_SERVICE_CHECK-SERVICE_CHECK": ["JOBTRACKER-START", "TASKTRACKER-START"],
+ "ZOOKEEPER_SERVICE_CHECK-SERVICE_CHECK": ["ZOOKEEPER_SERVER-START"],
+ "ZOOKEEPER_QUORUM_SERVICE_CHECK-SERVICE_CHECK": ["ZOOKEEPER_SERVER-START"],
+ "ZOOKEEPER_SERVER-STOP" : ["HBASE_MASTER-STOP", "HBASE_REGIONSERVER-STOP"],
+ "HBASE_MASTER-STOP": ["HBASE_REGIONSERVER-STOP"],
+ "TASKTRACKER-UPGRADE": ["JOBTRACKER-UPGRADE"],
+ "MAPREDUCE_CLIENT-UPGRADE": ["TASKTRACKER-UPGRADE", "JOBTRACKER-UPGRADE"],
+ "ZOOKEEPER_SERVER-UPGRADE": ["MAPREDUCE_CLIENT-UPGRADE"],
+ "ZOOKEEPER_CLIENT-UPGRADE": ["ZOOKEEPER_SERVER-UPGRADE"],
+ "HBASE_MASTER-UPGRADE": ["ZOOKEEPER_CLIENT-UPGRADE"],
+ "HBASE_REGIONSERVER-UPGRADE": ["HBASE_MASTER-UPGRADE"],
+ "HBASE_CLIENT-UPGRADE": ["HBASE_REGIONSERVER-UPGRADE"],
+ "HIVE_SERVER-UPGRADE" : ["HBASE_CLIENT-UPGRADE"],
+ "HIVE_METASTORE-UPGRADE" : ["HIVE_SERVER-UPGRADE"],
+ "MYSQL_SERVER-UPGRADE": ["HIVE_METASTORE-UPGRADE"],
+ "HIVE_CLIENT-UPGRADE": ["MYSQL_SERVER-UPGRADE"],
+ "HCAT-UPGRADE": ["HIVE_CLIENT-UPGRADE"],
+ "OOZIE_SERVER-UPGRADE" : ["HCAT-UPGRADE"],
+ "OOZIE_CLIENT-UPGRADE" : ["OOZIE_SERVER-UPGRADE"],
+ "WEBHCAT_SERVER-UPGRADE" : ["OOZIE_CLIENT-UPGRADE"],
+ "PIG-UPGRADE" : ["WEBHCAT_SERVER-UPGRADE"],
+ "SQOOP-UPGRADE" : ["PIG-UPGRADE"],
+ "NAGIOS_SERVER-UPGRADE" : ["SQOOP-UPGRADE"],
+ "GANGLIA_SERVER-UPGRADE" : ["NAGIOS_SERVER-UPGRADE"],
+ "GANGLIA_MONITOR-UPGRADE" : ["GANGLIA_SERVER-UPGRADE"]
+ },
+ "_comment" : "GLUSTERFS-specific dependencies",
+ "optional_glusterfs": {
+ "HBASE_MASTER-START": ["PEERSTATUS-START"],
+ "JOBTRACKER-START": ["PEERSTATUS-START"],
+ "TASKTRACKER-START": ["PEERSTATUS-START"],
+ "GLUSTERFS_SERVICE_CHECK-SERVICE_CHECK": ["PEERSTATUS-START"],
+ "JOBTRACKER-UPGRADE": ["GLUSTERFS_CLIENT-UPGRADE"]
+ },
+ "_comment" : "Dependencies that are used when GLUSTERFS is not present in cluster",
+ "optional_no_glusterfs": {
+ "SECONDARY_NAMENODE-START": ["NAMENODE-START"],
+ "RESOURCEMANAGER-START": ["NAMENODE-START", "DATANODE-START"],
+ "NODEMANAGER-START": ["NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START"],
+ "HISTORYSERVER-START": ["NAMENODE-START", "DATANODE-START"],
+ "HBASE_MASTER-START": ["NAMENODE-START", "DATANODE-START"],
+ "JOBTRACKER-START": ["NAMENODE-START", "DATANODE-START"],
+ "TASKTRACKER-START": ["NAMENODE-START", "DATANODE-START"],
+ "HIVE_METASTORE-START": ["NAMENODE-START", "DATANODE-START", "SECONDARY_NAMENODE-START"],
+ "HIVE_SERVER-START": ["NAMENODE-START", "DATANODE-START", "SECONDARY_NAMENODE-START"],
+ "WEBHCAT_SERVER-START": ["DATANODE-START"],
+ "NAGIOS_SERVER-START": ["NAMENODE-START", "SECONDARY_NAMENODE-START",
+ "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START", "HISTORYSERVER-START"],
+ "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["NAMENODE-START", "DATANODE-START",
+ "SECONDARY_NAMENODE-START"],
+ "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START",
+ "RESOURCEMANAGER-START", "HISTORYSERVER-START", "YARN_SERVICE_CHECK-SERVICE_CHECK"],
+ "YARN_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
+ "RESOURCEMANAGER_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START"],
+ "PIG_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START", "NODEMANAGER-START"],
+ "NAMENODE-STOP": ["JOBTRACKER-STOP", "TASKTRACKER-STOP", "RESOURCEMANAGER-STOP",
+ "NODEMANAGER-STOP", "HISTORYSERVER-STOP", "HBASE_MASTER-STOP"],
+ "DATANODE-STOP": ["JOBTRACKER-STOP", "TASKTRACKER-STOP", "RESOURCEMANAGER-STOP",
+ "NODEMANAGER-STOP", "HISTORYSERVER-STOP", "HBASE_MASTER-STOP"],
+ "SECONDARY_NAMENODE-UPGRADE": ["NAMENODE-UPGRADE"],
+ "DATANODE-UPGRADE": ["SECONDARY_NAMENODE-UPGRADE"],
+ "HDFS_CLIENT-UPGRADE": ["DATANODE-UPGRADE"],
+ "JOBTRACKER-UPGRADE": ["HDFS_CLIENT-UPGRADE"]
+ },
+ "_comment" : "Dependencies that are used in HA NameNode cluster",
+ "namenode_optional_ha": {
+ "NAMENODE-START": ["ZKFC-START", "JOURNALNODE-START", "ZOOKEEPER_SERVER-START"],
+ "ZKFC-START": ["ZOOKEEPER_SERVER-START"],
+ "NAGIOS_SERVER-START": ["ZKFC-START", "JOURNALNODE-START"]
+ },
+ "_comment" : "Dependencies that are used in ResourceManager HA cluster",
+ "resourcemanager_optional_ha" : {
+ "RESOURCEMANAGER-START": ["ZOOKEEPER_SERVER-START"]
+ }
+}
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/configuration/falcon-env.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/configuration/falcon-env.xml
new file mode 100644
index 0000000000..0a120514a1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/configuration/falcon-env.xml
@@ -0,0 +1,109 @@
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+ <property>
+ <name>falcon_user</name>
+ <value>falcon</value>
+ <description>Falcon user.</description>
+ </property>
+ <property>
+ <name>falcon_port</name>
+ <value>15000</value>
+ <description>Port the Falcon Server listens on.</description>
+ </property>
+ <property>
+ <name>falcon_log_dir</name>
+ <value>/var/log/falcon</value>
+ <description>Falcon log directory.</description>
+ </property>
+ <property>
+ <name>falcon_pid_dir</name>
+ <value>/var/run/falcon</value>
+ <description>Falcon pid-file directory.</description>
+ </property>
+ <property>
+ <name>falcon_local_dir</name>
+ <value>/hadoop/falcon</value>
+ <description>Directory where Falcon data, such as activemq data, is stored.</description>
+ </property>
+ <!--embeddedmq properties-->
+ <property>
+ <name>falcon.embeddedmq.data</name>
+ <value>/hadoop/falcon/embeddedmq/data</value>
+ <description>Directory in which embeddedmq data is stored.</description>
+ </property>
+ <property>
+ <name>falcon.embeddedmq</name>
+ <value>true</value>
+ <description>Whether embeddedmq is enabled or not.</description>
+ </property>
+ <property>
+ <name>falcon.emeddedmq.port</name>
+ <value>61616</value>
+ <description>Port that embeddedmq will listen on.</description>
+ </property>
+
+ <!-- falcon-env.sh -->
+ <property>
+ <name>content</name>
+ <description>falcon-env.sh content</description>
+ <value>
+# The java implementation to use. If JAVA_HOME is not found we expect java and jar to be in path
+export JAVA_HOME={{java_home}}
+
+# any additional java opts you want to set. This will apply to both client and server operations
+#export FALCON_OPTS=
+
+# any additional java opts that you want to set for client only
+#export FALCON_CLIENT_OPTS=
+
+# java heap size we want to set for the client. Default is 1024MB
+#export FALCON_CLIENT_HEAP=
+
+# any additional opts you want to set for prisim service.
+#export FALCON_PRISM_OPTS=
+
+# java heap size we want to set for the prisim service. Default is 1024MB
+#export FALCON_PRISM_HEAP=
+
+# any additional opts you want to set for falcon service.
+export FALCON_SERVER_OPTS="-Dfalcon.embeddedmq={{falcon_embeddedmq_enabled}} -Dfalcon.emeddedmq.port={{falcon_emeddedmq_port}}"
+
+# java heap size we want to set for the falcon server. Default is 1024MB
+#export FALCON_SERVER_HEAP=
+
+# What is is considered as falcon home dir. Default is the base locaion of the installed software
+#export FALCON_HOME_DIR=
+
+# Where log files are stored. Defatult is logs directory under the base install location
+export FALCON_LOG_DIR={{falcon_log_dir}}
+
+# Where pid files are stored. Defatult is logs directory under the base install location
+export FALCON_PID_DIR={{falcon_pid_dir}}
+
+# where the falcon active mq data is stored. Defatult is logs/data directory under the base install location
+export FALCON_DATA_DIR={{falcon_embeddedmq_data}}
+
+# Where do you want to expand the war file. By Default it is in /server/webapp dir under the base install dir.
+#export FALCON_EXPANDED_WEBAPP_DIR=
+ </value>
+ </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/configuration/falcon-runtime.properties.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/configuration/falcon-runtime.properties.xml
new file mode 100644
index 0000000000..94c875520d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/configuration/falcon-runtime.properties.xml
@@ -0,0 +1,47 @@
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false">
+ <property>
+ <name>*.domain</name>
+ <value>${falcon.app.type}</value>
+ <description></description>
+ </property>
+ <property>
+ <name>*.log.cleanup.frequency.minutes.retention</name>
+ <value>hours(6)</value>
+ <description></description>
+ </property>
+ <property>
+ <name>*.log.cleanup.frequency.hours.retention</name>
+ <value>minutes(1)</value>
+ <description></description>
+ </property>
+ <property>
+ <name>*.log.cleanup.frequency.days.retention</name>
+ <value>days(7)</value>
+ <description></description>
+ </property>
+ <property>
+ <name>*.log.cleanup.frequency.months.retention</name>
+ <value>months(3)</value>
+ <description></description>
+ </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/configuration/falcon-startup.properties.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/configuration/falcon-startup.properties.xml
new file mode 100644
index 0000000000..7459429ae9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/configuration/falcon-startup.properties.xml
@@ -0,0 +1,207 @@
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false">
+ <!--advanced properties-->
+ <property>
+ <name>*.workflow.engine.impl</name>
+ <value>org.apache.falcon.workflow.engine.OozieWorkflowEngine</value>
+ <description></description>
+ </property>
+ <property>
+ <name>*.oozie.process.workflow.builder</name>
+ <value>org.apache.falcon.workflow.OozieProcessWorkflowBuilder</value>
+ <description></description>
+ </property>
+ <property>
+ <name>*.oozie.feed.workflow.builder</name>
+ <value>org.apache.falcon.workflow.OozieFeedWorkflowBuilder</value>
+ <description></description>
+ </property>
+ <property>
+ <name>*.SchedulableEntityManager.impl</name>
+ <value>org.apache.falcon.resource.SchedulableEntityManager</value>
+ <description></description>
+ </property>
+ <property>
+ <name>*.ConfigSyncService.impl</name>
+ <value>org.apache.falcon.resource.ConfigSyncService</value>
+ <description></description>
+ </property>
+ <property>
+ <name>*.ProcessInstanceManager.impl</name>
+ <value>org.apache.falcon.resource.InstanceManager</value>
+ <description></description>
+ </property>
+ <property>
+ <name>*.catalog.service.impl</name>
+ <value>org.apache.falcon.catalog.HiveCatalogService</value>
+ <description></description>
+ </property>
+ <property>
+ <name>*.application.services</name>
+ <value>org.apache.falcon.security.AuthenticationInitializationService,\
+ org.apache.falcon.service.ProcessSubscriberService,\
+ org.apache.falcon.entity.store.ConfigurationStore,\
+ org.apache.falcon.rerun.service.RetryService,\
+ org.apache.falcon.rerun.service.LateRunService,\
+ org.apache.falcon.service.LogCleanupService
+ </value>
+ <description></description>
+ </property>
+ <property>
+ <name>*.configstore.listeners</name>
+ <value>org.apache.falcon.entity.v0.EntityGraph,\
+ org.apache.falcon.entity.ColoClusterRelation,\
+ org.apache.falcon.group.FeedGroupMap,\
+ org.apache.falcon.service.SharedLibraryHostingService
+ </value>
+ <description></description>
+ </property>
+ <property>
+ <name>*.broker.impl.class</name>
+ <value>org.apache.activemq.ActiveMQConnectionFactory</value>
+ <description></description>
+ </property>
+ <property>
+ <name>*.shared.libs</name>
+ <value>activemq-core,ant,geronimo-j2ee-management,hadoop-distcp,jms,json-simple,oozie-client,spring-jms</value>
+ <description></description>
+ </property>
+ <!--common properties-->
+ <property>
+ <name>*.domain</name>
+ <value>${falcon.app.type}</value>
+ <description></description>
+ </property>
+ <property>
+ <name>*.config.store.uri</name>
+ <value>file:///hadoop/falcon/store</value>
+ <description>Location to store user entity configurations</description>
+ </property>
+ <property>
+ <name>*.system.lib.location</name>
+ <value>${falcon.home}/server/webapp/${falcon.app.type}/WEB-INF/lib</value>
+ <description>Location of libraries that is shipped to Hadoop</description>
+ </property>
+ <property>
+ <name>*.retry.recorder.path</name>
+ <value>${falcon.log.dir}/retry</value>
+ <description></description>
+ </property>
+ <property>
+ <name>*.falcon.cleanup.service.frequency</name>
+ <value>days(1)</value>
+ <description></description>
+ </property>
+ <property>
+ <name>*.broker.url</name>
+ <value>tcp://localhost:61616</value>
+ <description>Default Active MQ url</description>
+ </property>
+ <property>
+ <name>*.broker.ttlInMins</name>
+ <value>4320</value>
+ <description></description>
+ </property>
+ <property>
+ <name>*.entity.topic</name>
+ <value>FALCON.ENTITY.TOPIC</value>
+ <description></description>
+ </property>
+ <property>
+ <name>*.max.retry.failure.count</name>
+ <value>1</value>
+ <description></description>
+ </property>
+ <property>
+ <name>*.internal.queue.size</name>
+ <value>1000</value>
+ <description></description>
+ </property>
+ <!--properties without default values-->
+ <property>
+ <name>*.falcon.http.authentication.cookie.domain</name>
+ <value>EXAMPLE.COM</value>
+ <description></description>
+ </property>
+ <property>
+ <name>*.falcon.http.authentication.blacklisted.users</name>
+ <value></value>
+ <description>Comma separated list of black listed users</description>
+ </property>
+ <!--authentication properties-->
+ <property>
+ <name>*.falcon.authentication.type</name>
+ <value>simple</value>
+ <description></description>
+ </property>
+ <property>
+ <name>*.falcon.http.authentication.type</name>
+ <value>simple</value>
+ <description></description>
+ </property>
+ <property>
+ <name>*.falcon.http.authentication.token.validity</name>
+ <value>36000</value>
+ <description></description>
+ </property>
+ <property>
+ <name>*.falcon.http.authentication.signature.secret</name>
+ <value>falcon</value>
+ <description></description>
+ </property>
+ <property>
+ <name>*.falcon.http.authentication.simple.anonymous.allowed</name>
+ <value>true</value>
+ <description>Indicates if anonymous requests are allowed when using 'simple' authentication</description>
+ </property>
+ <property>
+ <name>*.falcon.http.authentication.kerberos.name.rules</name>
+ <value>DEFAULT</value>
+ <description>The kerberos names rules is to resolve kerberos principal names, refer to Hadoop's KerberosName for more details.</description>
+ </property>
+ <!--kerberos params, must be set during security enabling-->
+ <property>
+ <name>*.falcon.service.authentication.kerberos.principal</name>
+ <value>falcon/_HOST@EXAMPLE.COM</value>
+ <description></description>
+ </property>
+ <property>
+ <name>*.falcon.service.authentication.kerberos.keytab</name>
+ <value>/etc/security/keytabs/falcon.service.keytab</value>
+ <description></description>
+ </property>
+ <property>
+ <name>*.dfs.namenode.kerberos.principal</name>
+ <value>nn/_HOST@EXAMPLE.COM</value>
+ <description>name node principal to talk to config store</description>
+ </property>
+ <property>
+ <name>*.falcon.http.authentication.kerberos.principal</name>
+ <value>HTTP/_HOST@EXAMPLE.COM</value>
+ <description>Indicates the Kerberos principal to be used for HTTP endpoint</description>
+ </property>
+ <property>
+ <name>*.falcon.http.authentication.kerberos.keytab</name>
+ <value>/etc/security/keytabs/spnego.service.keytab</value>
+ <description>Location of the keytab file with the credentials for the HTTP principal</description>
+ </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/configuration/oozie-site.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/configuration/oozie-site.xml
new file mode 100644
index 0000000000..4b0bf70846
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/configuration/oozie-site.xml
@@ -0,0 +1,167 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements. See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership. The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<configuration supports_final="true">
+ <property>
+ <name>oozie.service.ELService.ext.functions.coord-job-submit-instances</name>
+ <value>
+ now=org.apache.oozie.extensions.OozieELExtensions#ph1_now_echo,
+ today=org.apache.oozie.extensions.OozieELExtensions#ph1_today_echo,
+ yesterday=org.apache.oozie.extensions.OozieELExtensions#ph1_yesterday_echo,
+ currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_currentMonth_echo,
+ lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_lastMonth_echo,
+ currentYear=org.apache.oozie.extensions.OozieELExtensions#ph1_currentYear_echo,
+ lastYear=org.apache.oozie.extensions.OozieELExtensions#ph1_lastYear_echo,
+ formatTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_formatTime_echo,
+ latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo,
+ future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo
+ </value>
+ <description>
+ EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.
+ This property is a convenience property to add extensions to the built in executors without having to
+ include all the built in ones.
+ </description>
+ </property>
+
+ <property>
+ <name>oozie.service.ELService.ext.functions.coord-action-create-inst</name>
+ <value>
+ now=org.apache.oozie.extensions.OozieELExtensions#ph2_now_inst,
+ today=org.apache.oozie.extensions.OozieELExtensions#ph2_today_inst,
+ yesterday=org.apache.oozie.extensions.OozieELExtensions#ph2_yesterday_inst,
+ currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_currentMonth_inst,
+ lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_lastMonth_inst,
+ currentYear=org.apache.oozie.extensions.OozieELExtensions#ph2_currentYear_inst,
+ lastYear=org.apache.oozie.extensions.OozieELExtensions#ph2_lastYear_inst,
+ latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo,
+ future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo,
+ formatTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_formatTime,
+ user=org.apache.oozie.coord.CoordELFunctions#coord_user
+ </value>
+ <description>
+ EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.
+ This property is a convenience property to add extensions to the built in executors without having to
+ include all the built in ones.
+ </description>
+ </property>
+
+ <property>
+ <name>oozie.service.ELService.ext.functions.coord-action-create</name>
+ <value>
+ now=org.apache.oozie.extensions.OozieELExtensions#ph2_now,
+ today=org.apache.oozie.extensions.OozieELExtensions#ph2_today,
+ yesterday=org.apache.oozie.extensions.OozieELExtensions#ph2_yesterday,
+ currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_currentMonth,
+ lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_lastMonth,
+ currentYear=org.apache.oozie.extensions.OozieELExtensions#ph2_currentYear,
+ lastYear=org.apache.oozie.extensions.OozieELExtensions#ph2_lastYear,
+ latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo,
+ future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo,
+ formatTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_formatTime,
+ user=org.apache.oozie.coord.CoordELFunctions#coord_user
+ </value>
+ <description>
+ EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.
+ This property is a convenience property to add extensions to the built in executors without having to
+ include all the built in ones.
+ </description>
+ </property>
+
+ <property>
+ <name>oozie.service.ELService.ext.functions.coord-job-submit-data</name>
+ <value>
+ now=org.apache.oozie.extensions.OozieELExtensions#ph1_now_echo,
+ today=org.apache.oozie.extensions.OozieELExtensions#ph1_today_echo,
+ yesterday=org.apache.oozie.extensions.OozieELExtensions#ph1_yesterday_echo,
+ currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_currentMonth_echo,
+ lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_lastMonth_echo,
+ currentYear=org.apache.oozie.extensions.OozieELExtensions#ph1_currentYear_echo,
+ lastYear=org.apache.oozie.extensions.OozieELExtensions#ph1_lastYear_echo,
+ dataIn=org.apache.oozie.extensions.OozieELExtensions#ph1_dataIn_echo,
+ instanceTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_nominalTime_echo_wrap,
+ formatTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_formatTime_echo,
+ dateOffset=org.apache.oozie.coord.CoordELFunctions#ph1_coord_dateOffset_echo,
+ user=org.apache.oozie.coord.CoordELFunctions#coord_user
+ </value>
+ <description>
+ EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT.
+ This property is a convenience property to add extensions to the built in executors without having to
+ include all the built in ones.
+ </description>
+ </property>
+
+ <property>
+ <name>oozie.service.ELService.ext.functions.coord-action-start</name>
+ <value>
+ now=org.apache.oozie.extensions.OozieELExtensions#ph2_now,
+ today=org.apache.oozie.extensions.OozieELExtensions#ph2_today,
+ yesterday=org.apache.oozie.extensions.OozieELExtensions#ph2_yesterday,
+ currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_currentMonth,
+ lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_lastMonth,
+ currentYear=org.apache.oozie.extensions.OozieELExtensions#ph2_currentYear,
+ lastYear=org.apache.oozie.extensions.OozieELExtensions#ph2_lastYear,
+ latest=org.apache.oozie.coord.CoordELFunctions#ph3_coord_latest,
+ future=org.apache.oozie.coord.CoordELFunctions#ph3_coord_future,
+ dataIn=org.apache.oozie.extensions.OozieELExtensions#ph3_dataIn,
+ instanceTime=org.apache.oozie.coord.CoordELFunctions#ph3_coord_nominalTime,
+ dateOffset=org.apache.oozie.coord.CoordELFunctions#ph3_coord_dateOffset,
+ formatTime=org.apache.oozie.coord.CoordELFunctions#ph3_coord_formatTime,
+ user=org.apache.oozie.coord.CoordELFunctions#coord_user
+ </value>
+ <description>
+ EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.
+ This property is a convenience property to add extensions to the built in executors without having to
+ include all the built in ones.
+ </description>
+ </property>
+
+ <property>
+ <name>oozie.service.ELService.ext.functions.coord-sla-submit</name>
+ <value>
+ instanceTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_nominalTime_echo_fixed,
+ user=org.apache.oozie.coord.CoordELFunctions#coord_user
+ </value>
+ <description>
+ EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.
+ </description>
+ </property>
+
+ <property>
+ <name>oozie.service.ELService.ext.functions.coord-sla-create</name>
+ <value>
+ instanceTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_nominalTime,
+ user=org.apache.oozie.coord.CoordELFunctions#coord_user
+ </value>
+ <description>
+ EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.
+ </description>
+ </property>
+ <!--web ui should add following properties to oozie site accordingly to FALCON_USER-->
+ <!--<property>-->
+ <!--<name>oozie.service.ProxyUserService.proxyuser.#FALCON_USER#.hosts</name>-->
+ <!--<value>*</value>-->
+ <!--<description>Falcon proxyuser hosts</description>-->
+ <!--</property>-->
+
+ <!--<property>-->
+ <!--<name>oozie.service.ProxyUserService.proxyuser.#FALCON_USER#.groups</name>-->
+ <!--<value>*</value>-->
+ <!--<description>Falcon proxyuser groups</description>-->
+ <!--</property>-->
+</configuration>
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/metainfo.xml
new file mode 100644
index 0000000000..7938777037
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/metainfo.xml
@@ -0,0 +1,106 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<metainfo>
+ <schemaVersion>2.0</schemaVersion>
+ <services>
+ <service>
+ <name>FALCON</name>
+ <displayName>Falcon</displayName>
+ <comment>Data management and processing platform</comment>
+ <version>0.5.0.2.1</version>
+ <components>
+ <component>
+ <name>FALCON_CLIENT</name>
+ <displayName>Falcon Client</displayName>
+ <category>CLIENT</category>
+ <cardinality>1+</cardinality>
+ <commandScript>
+ <script>scripts/falcon_client.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>600</timeout>
+ </commandScript>
+ <configFiles>
+ <configFile>
+ <type>env</type>
+ <fileName>falcon-env.cmd</fileName>
+ <dictionaryName>falcon-env</dictionaryName>
+ </configFile>
+ <configFile>
+ <type>xml</type>
+ <fileName>runtime.properties</fileName>
+ <dictionaryName>falcon-runtime.properties</dictionaryName>
+ </configFile>
+ <configFile>
+ <type>xml</type>
+ <fileName>startup.properties</fileName>
+ <dictionaryName>falcon-startup.properties</dictionaryName>
+ </configFile>
+ </configFiles>
+ </component>
+ <component>
+ <name>FALCON_SERVER</name>
+ <displayName>Falcon Server</displayName>
+ <category>MASTER</category>
+ <cardinality>1</cardinality>
+ <dependencies>
+ <dependency>
+ <name>OOZIE/OOZIE_SERVER</name>
+ <scope>cluster</scope>
+ <auto-deploy>
+ <enabled>true</enabled>
+ </auto-deploy>
+ </dependency>
+ <dependency>
+ <name>OOZIE/OOZIE_CLIENT</name>
+ <scope>cluster</scope>
+ <auto-deploy>
+ <enabled>true</enabled>
+ </auto-deploy>
+ </dependency>
+ </dependencies>
+ <commandScript>
+ <script>scripts/falcon_server.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>600</timeout>
+ </commandScript>
+ </component>
+ </components>
+
+ <commandScript>
+ <script>scripts/service_check.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>300</timeout>
+ </commandScript>
+
+ <requiredServices>
+ <service>OOZIE</service>
+ </requiredServices>
+
+ <configuration-dependencies>
+ <config-type>falcon-env</config-type>
+ <config-type>falcon-startup.properties</config-type>
+ <config-type>falcon-runtime.properties</config-type>
+ </configuration-dependencies>
+
+ <excluded-config-types>
+ <config-type>oozie-site</config-type>
+ </excluded-config-types>
+
+ </service>
+ </services>
+</metainfo>
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/package/scripts/falcon.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/package/scripts/falcon.py
new file mode 100644
index 0000000000..a0b182cfa8
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/package/scripts/falcon.py
@@ -0,0 +1,41 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+def falcon():
+ import params
+
+ env = Environment.get_instance()
+ # These 2 parameters are used in ../templates/client.properties.j2
+ env.config.params["falcon_host"] = params.falcon_host
+ env.config.params["falcon_port"] = params.falcon_port
+
+ File(os.path.join(params.falcon_conf_dir, 'falcon-env.sh'),
+ content=InlineTemplate(params.falcon_env_sh_template)
+ )
+ File(os.path.join(params.falcon_conf_dir, 'client.properties'),
+ content=Template('client.properties.j2')
+ )
+ PropertiesFile(os.path.join(params.falcon_conf_dir, 'runtime.properties'),
+ properties=params.falcon_runtime_properties
+ )
+ PropertiesFile(os.path.join(params.falcon_conf_dir, 'startup.properties'),
+ properties=params.falcon_startup_properties
+ )
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/package/scripts/falcon_client.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/package/scripts/falcon_client.py
new file mode 100644
index 0000000000..a5d47daf13
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/package/scripts/falcon_client.py
@@ -0,0 +1,37 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+class FalconClient(Script):
+ def install(self, env):
+ import params
+ if params.falcon_home is None:
+ self.install_packages(env)
+ self.configure(env)
+
+ def configure(self, env):
+ import params
+ env.set_params(params)
+
+ def status(self, env):
+ raise ClientComponentHasNoStatus()
+
+if __name__ == "__main__":
+ FalconClient().execute()
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/package/scripts/falcon_server.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/package/scripts/falcon_server.py
new file mode 100644
index 0000000000..225456b466
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/package/scripts/falcon_server.py
@@ -0,0 +1,44 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from falcon import falcon
+import service_mapping
+
+class FalconServer(Script):
+ def install(self, env):
+ if not check_windows_service_exists(service_mapping.falcon_win_service_name):
+ self.install_packages(env)
+
+ def start(self, env):
+ import params
+ self.configure(env)
+ Service(service_mapping.falcon_win_service_name, action="start")
+
+ def stop(self, env):
+ Service(service_mapping.falcon_win_service_name, action="stop")
+
+ def configure(self, env):
+ falcon()
+
+ def status(self, env):
+ check_windows_service_status(service_mapping.falcon_win_service_name)
+
+if __name__ == "__main__":
+ FalconServer().execute()
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/package/scripts/params.py
new file mode 100644
index 0000000000..815f45b188
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/package/scripts/params.py
@@ -0,0 +1,45 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import os
+
+config = Script.get_config()
+falcon_runtime_properties = config['configurations']['falcon-runtime.properties']
+falcon_startup_properties = config['configurations']['falcon-startup.properties']
+falcon_env_sh_template = config['configurations']['falcon-env']['content']
+
+falcon_host = config['clusterHostInfo']['falcon_server_hosts'][0]
+falcon_port = config['configurations']['falcon-env']['falcon_port']
+
+falcon_conf_dir = "."
+falcon_data_dir = "."
+falcon_home = None
+falcon_log_dir = "."
+
+hdp_root = os.path.abspath(os.path.join(os.environ["HADOOP_HOME"], ".."))
+
+if os.environ.has_key("FALCON_CONF_DIR"):
+ falcon_conf_dir = os.environ["FALCON_CONF_DIR"]
+if os.environ.has_key("FALCON_DATA_DIR"):
+ falcon_data_dir = os.environ["FALCON_DATA_DIR"]
+if os.environ.has_key("FALCON_HOME"):
+ falcon_home = os.environ["FALCON_HOME"]
+if os.environ.has_key("FALCON_LOG_DIR"):
+ falcon_log_dir = os.environ["FALCON_LOG_DIR"]
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/package/scripts/service_check.py
new file mode 100644
index 0000000000..3dcfb5bce4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/package/scripts/service_check.py
@@ -0,0 +1,33 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+
+class FalconServiceCheck(Script):
+
+ def service_check(self, env):
+ import params
+ env.set_params(params)
+ smoke_cmd = os.path.join(params.hdp_root,"Run-SmokeTests.cmd")
+ service = "FALCON"
+ Execute(format("cmd /C {smoke_cmd} {service}"), logoutput=True)
+
+if __name__ == "__main__":
+ FalconServiceCheck().execute()
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/package/scripts/service_mapping.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/package/scripts/service_mapping.py
new file mode 100644
index 0000000000..fc4d9f92ec
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/package/scripts/service_mapping.py
@@ -0,0 +1,20 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+falcon_win_service_name = "falcon"
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/package/templates/client.properties.j2 b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/package/templates/client.properties.j2
new file mode 100644
index 0000000000..63749db702
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/package/templates/client.properties.j2
@@ -0,0 +1,42 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#########################################################################
+########## This is used for falcon packaging only. ###################
+## Uses default port. Please change if configured for non-default port ##
+#########################################################################
+
+falcon.url=http://{{falcon_host}}:{{falcon_port}}/
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/configuration/hbase-env.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/configuration/hbase-env.xml
new file mode 100644
index 0000000000..5638505044
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/configuration/hbase-env.xml
@@ -0,0 +1,127 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+ <property>
+ <name>hbase_log_dir</name>
+ <value>/var/log/hbase</value>
+ <description>Log Directories for HBase.</description>
+ </property>
+ <property>
+ <name>hbase_pid_dir</name>
+ <value>/var/run/hbase</value>
+ <description>Pid Directory for HBase.</description>
+ </property>
+ <property>
+ <name>hbase_regionserver_heapsize</name>
+ <value>1024</value>
+ <description>HBase RegionServer Heap Size.</description>
+ </property>
+ <property>
+ <name>hbase_regionserver_xmn_max</name>
+ <value>512</value>
+ <description>HBase RegionServer maximum value for minimum heap size.</description>
+ </property>
+ <property>
+ <name>hbase_regionserver_xmn_ratio</name>
+ <value>0.2</value>
+ <description>HBase RegionServer minimum heap size is calculated as a percentage of max heap size.</description>
+ </property>
+ <property>
+ <name>hbase_master_heapsize</name>
+ <value>1024</value>
+ <description>HBase Master Heap Size</description>
+ </property>
+ <property>
+ <name>hbase_user</name>
+ <value>hbase</value>
+ <property-type>USER</property-type>
+ <description>HBase User Name.</description>
+ </property>
+
+ <!-- hbase-env.cmd -->
+ <property>
+ <name>content</name>
+ <description>This is the jinja template for hbase-env.cmd file</description>
+ <value>
+@rem Set environment variables here.
+
+@rem The java implementation to use. Java 1.6 required.
+set JAVA_HOME=%JAVA_HOME%
+
+@rem Extra Java CLASSPATH elements. Optional.
+if defined HADOOP_CONF_DIR (
+ set HBASE_CLASSPATH=%HBASE_CLASSPATH%;%HADOOP_CONF_DIR%
+) else if defined HADOOP_HOME (
+ set HBASE_CLASSPATH=%HBASE_CLASSPATH%;%HADOOP_HOME%\conf;%HADOOP_HOME%\etc\hadoop
+)
+
+@rem The maximum amount of heap to use, in MB. Default is 1000.
+@rem set HBASE_HEAPSIZE=1000
+
+@rem Extra Java runtime options.
+@rem Below are what we set by default. May only work with SUN JVM.
+@rem For more on why as well as other possible settings,
+@rem see http://wiki.apache.org/hadoop/PerformanceTuning
+@rem JDK6 on Windows has a known bug for IPv6, use preferIPv4Stack unless JDK7.
+@rem See TestIPv6NIOServerSocketChannel.
+set HBASE_OPTS="-XX:+UseConcMarkSweepGC" "-XX:CMSInitiatingOccupancyFraction=70" "-Djava.net.preferIPv4Stack=true" "-XX:+ForceTimeHighResolution"
+
+@rem Uncomment below to enable java garbage collection logging for the server-side processes
+@rem this enables basic gc logging for the server processes to the .out file
+set SERVER_GC_OPTS="-verbose:gc" "-XX:+PrintGCDetails" "-XX:+PrintGCDateStamps" %HBASE_GC_OPTS%
+
+@rem this enables gc logging using automatic GC log rolling. Only applies to jdk 1.6.0_34+ and 1.7.0_2+. Either use this set of options or the one above
+@rem set SERVER_GC_OPTS="-verbose:gc" "-XX:+PrintGCDetails" "-XX:+PrintGCDateStamps" "-XX:+UseGCLogFileRotation" "-XX:NumberOfGCLogFiles=1" "-XX:GCLogFileSize=512M" %HBASE_GC_OPTS%
+
+@rem Uncomment below to enable java garbage collection logging for the client processes in the .out file.
+@rem set CLIENT_GC_OPTS="-verbose:gc" "-XX:+PrintGCDetails" "-XX:+PrintGCDateStamps" %HBASE_GC_OPTS%
+
+@rem Uncomment below (along with above GC logging) to put GC information in its own logfile (will set HBASE_GC_OPTS)
+set HBASE_USE_GC_LOGFILE=true
+
+@rem Uncomment and adjust to enable JMX exporting
+@rem See jmxremote.password and jmxremote.access in %JRE_HOME%\lib\management to configure remote password access.
+@rem More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
+@rem
+@rem set HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false" "-Dcom.sun.management.jmxremote.authenticate=false"
+@rem set HBASE_MASTER_OPTS=%HBASE_JMX_BASE% "-Dcom.sun.management.jmxremote.port=10101"
+@rem set HBASE_REGIONSERVER_OPTS=%HBASE_JMX_BASE% "-Dcom.sun.management.jmxremote.port=10102"
+@rem set HBASE_THRIFT_OPTS=%HBASE_JMX_BASE% "-Dcom.sun.management.jmxremote.port=10103"
+@rem set HBASE_ZOOKEEPER_OPTS=%HBASE_JMX_BASE% "-Dcom.sun.management.jmxremote.port=10104"
+
+@rem File naming hosts on which HRegionServers will run. %HBASE_HOME%\conf\regionservers by default.
+@rem set HBASE_REGIONSERVERS=%HBASE_HOME%\conf\regionservers
+
+@rem Where log files are stored. $HBASE_HOME/logs by default.
+@rem set HBASE_LOG_DIR=%HBASE_HOME%\logs
+
+@rem A string representing this instance of hbase. $USER by default.
+@rem set HBASE_IDENT_STRING=%USERNAME%
+
+@rem Seconds to sleep between slave commands. Unset by default. This
+@rem can be useful in large clusters, where, e.g., slave rsyncs can
+@rem otherwise arrive faster than the master can service them.
+@rem set HBASE_SLAVE_SLEEP=0.1
+
+@rem Tell HBase whether it should manage it's own instance of Zookeeper or not.
+@rem set HBASE_MANAGES_ZK=true
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/configuration/hbase-policy.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/configuration/hbase-policy.xml
new file mode 100644
index 0000000000..b0807b6cf2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/configuration/hbase-policy.xml
@@ -0,0 +1,53 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="true">
+ <property>
+ <name>security.client.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for HRegionInterface protocol implementations (ie.
+ clients talking to HRegionServers)
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.admin.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for HMasterInterface protocol implementation (ie.
+ clients talking to HMaster for admin operations).
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.masterregion.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for HMasterRegionInterface protocol implementations
+ (for HRegionServers communicating with HMaster)
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/configuration/hbase-site.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/configuration/hbase-site.xml
new file mode 100644
index 0000000000..cbaaacdc38
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/configuration/hbase-site.xml
@@ -0,0 +1,318 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+ <property>
+ <name>hbase.rootdir</name>
+ <value>hdfs://localhost:8020/apps/hbase/data</value>
+ <description>The directory shared by region servers and into
+ which HBase persists. The URL should be 'fully-qualified'
+ to include the filesystem scheme. For example, to specify the
+ HDFS directory '/hbase' where the HDFS instance's namenode is
+ running at namenode.example.org on port 9000, set this value to:
+ hdfs://namenode.example.org:9000/hbase. By default HBase writes
+ into /tmp. Change this configuration else all data will be lost
+ on machine restart.
+ </description>
+ </property>
+ <property>
+ <name>hbase.cluster.distributed</name>
+ <value>true</value>
+ <description>The mode the cluster will be in. Possible values are
+ false for standalone mode and true for distributed mode. If
+ false, startup will run all HBase and ZooKeeper daemons together
+ in the one JVM.
+ </description>
+ </property>
+ <property>
+ <name>hbase.master.port</name>
+ <value>60000</value>
+ <description>The port the HBase Master should bind to.</description>
+ </property>
+ <property >
+ <name>hbase.tmp.dir</name>
+ <value>${java.io.tmpdir}/hbase-${user.name}</value>
+ <description>Temporary directory on the local filesystem.
+ Change this setting to point to a location more permanent
+ than '/tmp', the usual resolve for java.io.tmpdir, as the
+ '/tmp' directory is cleared on machine restart.</description>
+ </property>
+ <property>
+ <name>hbase.local.dir</name>
+ <value>${hbase.tmp.dir}/local</value>
+ <description>Directory on the local filesystem to be used as a local storage
+ </description>
+ </property>
+ <property>
+ <name>hbase.master.info.bindAddress</name>
+ <value>0.0.0.0</value>
+ <description>The bind address for the HBase Master web UI
+ </description>
+ </property>
+ <property>
+ <name>hbase.master.info.port</name>
+ <value>60010</value>
+ <description>The port for the HBase Master web UI.</description>
+ </property>
+ <property>
+ <name>hbase.regionserver.info.port</name>
+ <value>60030</value>
+ <description>The port for the HBase RegionServer web UI.</description>
+ </property>
+ <property>
+ <name>hbase.regionserver.global.memstore.upperLimit</name>
+ <value>0.4</value>
+ <description>Maximum size of all memstores in a region server before new
+ updates are blocked and flushes are forced. Defaults to 40% of heap
+ </description>
+ </property>
+ <property>
+ <name>hbase.regionserver.handler.count</name>
+ <value>30</value>
+ <description>Count of RPC Listener instances spun up on RegionServers.
+ Same property is used by the Master for count of master handlers.
+ </description>
+ </property>
+ <property>
+ <name>hbase.hregion.majorcompaction</name>
+ <value>604800000</value>
+ <description>The time (in miliseconds) between 'major' compactions of all
+ HStoreFiles in a region. Default: Set to 7 days. Major compactions tend to
+ happen exactly when you need them least so enable them such that they run at
+ off-peak for your deploy; or, since this setting is on a periodicity that is
+ unlikely to match your loading, run the compactions via an external
+ invocation out of a cron job or some such.
+ </description>
+ </property>
+
+ <property>
+ <name>hbase.regionserver.global.memstore.lowerLimit</name>
+ <value>0.38</value>
+ <description>When memstores are being forced to flush to make room in
+ memory, keep flushing until we hit this mark. Defaults to 35% of heap.
+ This value equal to hbase.regionserver.global.memstore.upperLimit causes
+ the minimum possible flushing to occur when updates are blocked due to
+ memstore limiting.
+ </description>
+ </property>
+ <property>
+ <name>hbase.hregion.memstore.block.multiplier</name>
+ <value>2</value>
+ <description>Block updates if memstore has hbase.hregion.memstore.block.multiplier
+ time hbase.hregion.flush.size bytes. Useful preventing
+ runaway memstore during spikes in update traffic. Without an
+ upper-bound, memstore fills such that when it flushes the
+ resultant flush files take a long time to compact or split, or
+ worse, we OOME
+ </description>
+ </property>
+ <property>
+ <name>hbase.hregion.memstore.flush.size</name>
+ <value>134217728</value>
+ <description>
+ Memstore will be flushed to disk if size of the memstore
+ exceeds this number of bytes. Value is checked by a thread that runs
+ every hbase.server.thread.wakefrequency.
+ </description>
+ </property>
+ <property>
+ <name>hbase.hregion.memstore.mslab.enabled</name>
+ <value>true</value>
+ <description>
+ Enables the MemStore-Local Allocation Buffer,
+ a feature which works to prevent heap fragmentation under
+ heavy write loads. This can reduce the frequency of stop-the-world
+ GC pauses on large heaps.
+ </description>
+ </property>
+ <property>
+ <name>hbase.hregion.max.filesize</name>
+ <value>10737418240</value>
+ <description>
+ Maximum HStoreFile size. If any one of a column families' HStoreFiles has
+ grown to exceed this value, the hosting HRegion is split in two.
+ Default: 1G.
+ </description>
+ </property>
+ <property>
+ <name>hbase.client.scanner.caching</name>
+ <value>100</value>
+ <description>Number of rows that will be fetched when calling next
+ on a scanner if it is not served from (local, client) memory. Higher
+ caching values will enable faster scanners but will eat up more memory
+ and some calls of next may take longer and longer times when the cache is empty.
+ Do not set this value such that the time between invocations is greater
+ than the scanner timeout; i.e. hbase.regionserver.lease.period
+ </description>
+ </property>
+ <property>
+ <name>zookeeper.session.timeout</name>
+ <value>90000</value>
+ <description>ZooKeeper session timeout.
+ HBase passes this to the zk quorum as suggested maximum time for a
+ session (This setting becomes zookeeper's 'maxSessionTimeout'). See
+ http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions
+ "The client sends a requested timeout, the server responds with the
+ timeout that it can give the client. " In milliseconds.
+ </description>
+ </property>
+ <property>
+ <name>hbase.client.keyvalue.maxsize</name>
+ <value>10485760</value>
+ <description>Specifies the combined maximum allowed size of a KeyValue
+ instance. This is to set an upper boundary for a single entry saved in a
+ storage file. Since they cannot be split it helps avoiding that a region
+ cannot be split any further because the data is too large. It seems wise
+ to set this to a fraction of the maximum region size. Setting it to zero
+ or less disables the check.
+ </description>
+ </property>
+ <property>
+ <name>hbase.hstore.compactionThreshold</name>
+ <value>3</value>
+ <description>
+ If more than this number of HStoreFiles in any one HStore
+ (one HStoreFile is written per flush of memstore) then a compaction
+ is run to rewrite all HStoreFiles files as one. Larger numbers
+ put off compaction but when it runs, it takes longer to complete.
+ </description>
+ </property>
+ <property>
+ <name>hbase.hstore.flush.retries.number</name>
+ <value>120</value>
+ <description>
+ The number of times the region flush operation will be retried.
+ </description>
+ </property>
+
+ <property>
+ <name>hbase.hstore.blockingStoreFiles</name>
+ <value>10</value>
+ <description>
+ If more than this number of StoreFiles in any one Store
+ (one StoreFile is written per flush of MemStore) then updates are
+ blocked for this HRegion until a compaction is completed, or
+ until hbase.hstore.blockingWaitTime has been exceeded.
+ </description>
+ </property>
+ <property>
+ <name>hfile.block.cache.size</name>
+ <value>0.4</value>
+ <description>
+ Percentage of maximum heap (-Xmx setting) to allocate to block cache
+ used by HFile/StoreFile. Default of 0.25 means allocate 25%.
+ Set to 0 to disable but it's not recommended.
+ </description>
+ </property>
+
+
+ <!-- Additional configuration specific to HBase security -->
+ <property>
+ <name>hbase.security.authentication</name>
+ <value>simple</value>
+ <description> Controls whether or not secure authentication is enabled for HBase. Possible values are 'simple'
+ (no authentication), and 'kerberos'.
+ </description>
+ </property>
+
+ <property>
+ <name>hbase.security.authorization</name>
+ <value>false</value>
+ <description>Enables HBase authorization. Set the value of this property to false to disable HBase authorization.
+ </description>
+ </property>
+
+ <property>
+ <name>hbase.coprocessor.region.classes</name>
+ <value></value>
+ <description>A comma-separated list of Coprocessors that are loaded by
+ default on all tables. For any override coprocessor method, these classes
+ will be called in order. After implementing your own Coprocessor, just put
+ it in HBase's classpath and add the fully qualified class name here.
+ A coprocessor can also be loaded on demand by setting HTableDescriptor.
+ </description>
+ </property>
+
+ <property>
+ <name>hbase.coprocessor.master.classes</name>
+ <value></value>
+ <description>A comma-separated list of
+ org.apache.hadoop.hbase.coprocessor.MasterObserver coprocessors that are
+ loaded by default on the active HMaster process. For any implemented
+ coprocessor methods, the listed classes will be called in order. After
+ implementing your own MasterObserver, just put it in HBase's classpath
+ and add the fully qualified class name here.
+ </description>
+ </property>
+
+ <property>
+ <name>hbase.zookeeper.property.clientPort</name>
+ <value>2181</value>
+ <description>Property from ZooKeeper's config zoo.cfg.
+ The port at which the clients will connect.
+ </description>
+ </property>
+
+ <!--
+ The following three properties are used together to create the list of
+ host:peer_port:leader_port quorum servers for ZooKeeper.
+ -->
+ <property>
+ <name>hbase.zookeeper.quorum</name>
+ <value>localhost</value>
+ <description>Comma separated list of servers in the ZooKeeper Quorum.
+ For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
+ By default this is set to localhost for local and pseudo-distributed modes
+ of operation. For a fully-distributed setup, this should be set to a full
+ list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
+ this is the list of servers which we will start/stop ZooKeeper on.
+ </description>
+ </property>
+ <!-- End of properties used to generate ZooKeeper host:port quorum list. -->
+
+ <property>
+ <name>hbase.zookeeper.useMulti</name>
+ <value>false</value>
+ <description>Instructs HBase to make use of ZooKeeper's multi-update functionality.
+ This allows certain ZooKeeper operations to complete more quickly and prevents some issues
+ with rare Replication failure scenarios (see the release note of HBASE-2611 for an example).·
+ IMPORTANT: only set this to true if all ZooKeeper servers in the cluster are on version 3.4+
+ and will not be downgraded. ZooKeeper versions before 3.4 do not support multi-update and will
+ not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495).
+ </description>
+ </property>
+ <property>
+ <name>zookeeper.znode.parent</name>
+ <value>/hbase</value>
+ <description>Root ZNode for HBase in ZooKeeper. All of HBase's ZooKeeper
+ files that are configured with a relative path will go under this node.
+ By default, all of HBase's ZooKeeper file path are configured with a
+ relative path, so they will all go under this directory unless changed.
+ </description>
+ </property>
+
+ <property>
+ <name>hbase.defaults.for.version.skip</name>
+ <value>false</value>
+ <description>Disables version verification.</description>
+ </property>
+
+</configuration>
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/metainfo.xml
new file mode 100644
index 0000000000..afd756bf0b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/metainfo.xml
@@ -0,0 +1,128 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<metainfo>
+ <schemaVersion>2.0</schemaVersion>
+ <services>
+ <service>
+ <name>HBASE</name>
+ <displayName>HBase</displayName>
+ <comment>Non-relational distributed database and centralized service for configuration management &amp;
+ synchronization
+ </comment>
+ <version>0.96.0.2.0</version>
+ <components>
+ <component>
+ <name>HBASE_MASTER</name>
+ <displayName>HBase Master</displayName>
+ <category>MASTER</category>
+ <cardinality>1+</cardinality>
+ <dependencies>
+ <dependency>
+ <name>HDFS/HDFS_CLIENT</name>
+ <scope>host</scope>
+ <auto-deploy>
+ <enabled>true</enabled>
+ </auto-deploy>
+ </dependency>
+ <dependency>
+ <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
+ <scope>cluster</scope>
+ <auto-deploy>
+ <enabled>true</enabled>
+ <co-locate>HBASE/HBASE_MASTER</co-locate>
+ </auto-deploy>
+ </dependency>
+ </dependencies>
+ <commandScript>
+ <script>scripts/hbase_master.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>600</timeout>
+ </commandScript>
+ <customCommands>
+ <customCommand>
+ <name>DECOMMISSION</name>
+ <commandScript>
+ <script>scripts/hbase_master.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>600</timeout>
+ </commandScript>
+ </customCommand>
+ </customCommands>
+ </component>
+
+ <component>
+ <name>HBASE_REGIONSERVER</name>
+ <displayName>RegionServer</displayName>
+ <category>SLAVE</category>
+ <cardinality>1+</cardinality>
+ <commandScript>
+ <script>scripts/hbase_regionserver.py</script>
+ <scriptType>PYTHON</scriptType>
+ </commandScript>
+ </component>
+
+ <component>
+ <name>HBASE_CLIENT</name>
+ <displayName>HBase Client</displayName>
+ <category>CLIENT</category>
+ <cardinality>1+</cardinality>
+ <commandScript>
+ <script>scripts/hbase_client.py</script>
+ <scriptType>PYTHON</scriptType>
+ </commandScript>
+ <configFiles>
+ <configFile>
+ <type>xml</type>
+ <fileName>hbase-site.xml</fileName>
+ <dictionaryName>hbase-site</dictionaryName>
+ </configFile>
+ <configFile>
+ <type>env</type>
+ <fileName>hbase-env.cmd</fileName>
+ <dictionaryName>hbase-env</dictionaryName>
+ </configFile>
+ <configFile>
+ <type>env</type>
+ <fileName>log4j.properties</fileName>
+ <dictionaryName>hbase-log4j</dictionaryName>
+ </configFile>
+ </configFiles>
+ </component>
+ </components>
+
+ <commandScript>
+ <script>scripts/service_check.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>300</timeout>
+ </commandScript>
+
+ <requiredServices>
+ <service>ZOOKEEPER</service>
+ <service>HDFS</service>
+ </requiredServices>
+
+ <configuration-dependencies>
+ <config-type>hbase-policy</config-type>
+ <config-type>hbase-site</config-type>
+ <config-type>hbase-env</config-type>
+ <config-type>hbase-log4j</config-type>
+ </configuration-dependencies>
+
+ </service>
+ </services>
+</metainfo>
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/metrics.json b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/metrics.json
new file mode 100644
index 0000000000..d04235e80f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/metrics.json
@@ -0,0 +1,4659 @@
+{
+ "HBASE_REGIONSERVER": {
+ "Component": [
+ {
+ "type": "jmx",
+ "metrics": {
+ "metrics/hbase/regionserver/slowPutCount": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowPutCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/percentFilesLocal": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.percentFilesLocal",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/deleteRequestLatency_min": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_min",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/blockCacheFree": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheFreeSize",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/mutationsWithoutWALSize": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.mutationsWithoutWALSize",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/blockCacheMissCount": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheMissCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/flushQueueSize": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.flushQueueLength",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/deleteRequestLatency_99th_percentile": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_99th_percentile",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/getRequestLatency_num_ops": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_num_ops",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/slowAppendCount": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowAppendCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/blockCacheSize": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheSize",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/putRequestLatency_num_ops": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_num_ops",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/slowIncrementCount": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowIncrementCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/blockCacheEvictedCount": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheEvictionCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/putRequestLatency_95th_percentile": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_95th_percentile",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/compactionQueueSize": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.compactionQueueLength",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/putRequestLatency_median": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_median",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/deleteRequestLatency_mean": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_mean",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/slowGetCount": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowGetCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/blockCacheCount": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/getRequestLatency_75th_percentile": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_75th_percentile",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/readRequestsCount": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.readRequestCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/putRequestLatency_min": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_min",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/storefileIndexSizeMB": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.storeFileIndexSize",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/deleteRequestLatency_median": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_median",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/putRequestLatency_max": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_max",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/totalStaticIndexSizeKB": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.staticIndexSize",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/deleteRequestLatency_num_ops": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_num_ops",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/putRequestLatency_mean": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_mean",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/requests": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.totalRequestCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/storefiles": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.storeFileCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/mutationsWithoutWALCount": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.mutationsWithoutWALCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/writeRequestsCount": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.writeRequestCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/getRequestLatency_median": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_median",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/slowDeleteCount": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowDeleteCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/putRequestLatency_99th_percentile": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_99th_percentile",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/stores": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.storeCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/getRequestLatency_min": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_min",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/getRequestLatency_95th_percentile": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_95th_percentile",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/deleteRequestLatency_95th_percentile": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_95th_percentile",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/memstoreSizeMB": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.memStoreSize",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/getRequestLatency_max": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_max",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/getRequestLatency_mean": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_mean",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/deleteRequestLatency_75th_percentile": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_75th_percentile",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/deleteRequestLatency_max": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_max",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/putRequestLatency_75th_percentile": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_75th_percentile",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/regions": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.regionCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/totalStaticBloomSizeKB": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.staticBloomSize",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/blockCacheHitCount": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheHitCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/getRequestLatency_99th_percentile": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_99th_percentile",
+ "pointInTime": true,
+ "temporal": false
+ }
+ }
+ }
+ ],
+ "HostComponent": [
+ {
+ "type": "jmx",
+ "metrics": {
+ "metrics/hbase/regionserver/slowPutCount": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowPutCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/percentFilesLocal": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.percentFilesLocal",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/deleteRequestLatency_min": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_min",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/blockCacheFree": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheFreeSize",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/mutationsWithoutWALSize": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.mutationsWithoutWALSize",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/blockCacheMissCount": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheMissCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/flushQueueSize": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.flushQueueLength",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/deleteRequestLatency_99th_percentile": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_99th_percentile",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/getRequestLatency_num_ops": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_num_ops",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/slowAppendCount": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowAppendCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/blockCacheSize": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheSize",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/putRequestLatency_num_ops": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_num_ops",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/slowIncrementCount": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowIncrementCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/blockCacheEvictedCount": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheEvictionCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/putRequestLatency_95th_percentile": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_95th_percentile",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/compactionQueueSize": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.compactionQueueLength",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/putRequestLatency_median": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_median",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/deleteRequestLatency_mean": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_mean",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/slowGetCount": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowGetCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/blockCacheCount": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/getRequestLatency_75th_percentile": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_75th_percentile",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/readRequestsCount": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.readRequestCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/putRequestLatency_min": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_min",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/storefileIndexSizeMB": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.storeFileIndexSize",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/deleteRequestLatency_median": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_median",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/putRequestLatency_max": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_max",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/totalStaticIndexSizeKB": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.staticIndexSize",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/deleteRequestLatency_num_ops": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_num_ops",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/putRequestLatency_mean": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_mean",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/requests": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.totalRequestCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/storefiles": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.storeFileCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/mutationsWithoutWALCount": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.mutationsWithoutWALCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/writeRequestsCount": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.writeRequestCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/getRequestLatency_median": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_median",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/slowDeleteCount": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowDeleteCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/putRequestLatency_99th_percentile": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_99th_percentile",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/stores": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.storeCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/getRequestLatency_min": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_min",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/getRequestLatency_95th_percentile": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_95th_percentile",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/deleteRequestLatency_95th_percentile": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_95th_percentile",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/memstoreSizeMB": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.memStoreSize",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/getRequestLatency_max": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_max",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/getRequestLatency_mean": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_mean",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/deleteRequestLatency_75th_percentile": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_75th_percentile",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/deleteRequestLatency_max": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_max",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/putRequestLatency_75th_percentile": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_75th_percentile",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/regions": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.regionCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/totalStaticBloomSizeKB": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.staticBloomSize",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/blockCacheHitCount": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheHitCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/regionserver/getRequestLatency_99th_percentile": {
+ "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_99th_percentile",
+ "pointInTime": true,
+ "temporal": false
+ }
+ }
+ }
+ ]
+ },
+ "HBASE_MASTER": {
+ "Component": [
+ {
+ "type": "jmx",
+ "metrics": {
+ "metrics/rpc/regionServerReport.aboveOneSec.MaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerReport.aboveOneSec.MaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/memMaxM":{
+ "metric" : "Hadoop:service=HBase,name=JvmMetrics.MemMaxM",
+ "pointInTime" : true,
+ "temporal" : false
+ },
+ "metrics/rpc/reportRSFatalError.aboveOneSec.AvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.reportRSFatalError.aboveOneSec.AvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/closeRegionMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.closeRegionMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/isMasterRunningAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.isMasterRunningAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcQueueTimeAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.RpcQueueTimeAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/deleteTableNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.deleteTableNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "ServiceComponentInfo/Revision": {
+ "metric": "hadoop:service=HBase,name=Info.revision",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/splitRegionAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.splitRegionAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getHTableDescriptorsMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getHTableDescriptorsMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getClusterStatus.aboveOneSec.MaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getClusterStatus.aboveOneSec.MaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/splitRegionNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.splitRegionNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getProtocolVersionAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolVersionAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getBlockCacheColumnFamilySummariesMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getBlockCacheColumnFamilySummariesMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/modifyColumnMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.modifyColumnMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getClosestRowBeforeMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getClosestRowBeforeMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getProtocolSignatureNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolSignatureNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcSlowResponseMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.RpcSlowResponseMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/AverageLoad": {
+ "metric": "Hadoop:service=HBase,name=Master,sub=Server.averageLoad",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/openScannerNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.openScannerNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/deleteTable.aboveOneSec.MinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.deleteTable.aboveOneSec.MinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/deleteColumn.aboveOneSec.MaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.deleteColumn.aboveOneSec.MaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getCompactionStateMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getCompactionStateMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/regionServerReport.aboveOneSec.MinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerReport.aboveOneSec.MinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/stopMaster.aboveOneSec.MaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.stopMaster.aboveOneSec.MaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/modifyColumn.aboveOneSec.AvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.modifyColumn.aboveOneSec.AvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getHServerInfoAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getHServerInfoAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/rollHLogWriterMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.rollHLogWriterMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/offlineMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.offlineMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getProtocolSignatureMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolSignatureMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/ServerName": {
+ "metric": "Hadoop:service=HBase,name=Master,sub=Server.tag.serverName",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getHServerInfoMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getHServerInfoMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/splitSizeMaxTime": {
+ "metric": "Hadoop:service=HBase,name=Master,sub=Server.HlogSplitSize_max",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/execCoprocessorAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.execCoprocessorAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcProcessingTimeMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.RpcProcessingTimeMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getClusterStatusMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getClusterStatusMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/offline.aboveOneSec.NumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.offline.aboveOneSec.NumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/ZookeeperQuorum": {
+ "metric": "Hadoop:service=HBase,name=Master,sub=Server.tag.zookeeperQuorum",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/hdfsDate": {
+ "metric": "hadoop:service=HBase,name=Info.hdfsDate",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/createTable.aboveOneSec.MaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.createTable.aboveOneSec.MaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/offlineMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.offlineMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/offline.aboveOneSec.MaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.offline.aboveOneSec.MaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/balanceNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.balanceNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getClosestRowBeforeNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getClosestRowBeforeNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/modifyColumnNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.modifyColumnNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getLastFlushTimeNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getLastFlushTimeNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/stopMaster.aboveOneSec.NumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.stopMaster.aboveOneSec.NumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/deleteTable.aboveOneSec.NumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.deleteTable.aboveOneSec.NumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/hdfsUrl": {
+ "metric": "hadoop:service=HBase,name=Info.hdfsUrl",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/multiMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.multiMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/revision": {
+ "metric": "hadoop:service=HBase,name=Info.revision",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/modifyColumnMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.modifyColumnMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/addColumnMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.addColumnMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getHTableDescriptorsNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getHTableDescriptorsNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcQueueTimeMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.RpcQueueTimeMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/multiNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.multiNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getProtocolVersion.aboveOneSec.AvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolVersion.aboveOneSec.AvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/offline.aboveOneSec.MinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.offline.aboveOneSec.MinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/deleteTable.aboveOneSec.MaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.deleteTable.aboveOneSec.MaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/enableTableMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.enableTableMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/regionServerReportMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerReportMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/reportRSFatalErrorNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.reportRSFatalErrorNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/addColumn.aboveOneSec.NumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.addColumn.aboveOneSec.NumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/unassign.aboveOneSec.AvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.unassign.aboveOneSec.AvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/reportRSFatalErrorMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.reportRSFatalErrorMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/disableTable.aboveOneSec.MaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.disableTable.aboveOneSec.MaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/existsAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.existsAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/MasterActiveTime": {
+ "metric": "Hadoop:service=HBase,name=Master,sub=Server.masterActiveTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getBlockCacheColumnFamilySummariesNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getBlockCacheColumnFamilySummariesNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/createTable.aboveOneSec.MinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.createTable.aboveOneSec.MinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/rpcAuthorizationFailures": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.rpcAuthorizationFailures",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/hdfsUser": {
+ "metric": "hadoop:service=HBase,name=Info.hdfsUser",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/regionServerStartupAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerStartupAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/deleteMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.deleteMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/regionServerStartupNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerStartupNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/deleteColumnNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.deleteColumnNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/version": {
+ "metric": "hadoop:service=HBase,name=Info.version",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/splitTimeMaxTime": {
+ "metric": "Hadoop:service=HBase,name=Master,sub=Server.HlogSplitTime_max",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/synchronousBalanceSwitchNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.synchronousBalanceSwitchNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/stopMasterNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.stopMasterNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/splitTimeNumOps": {
+ "metric": "hadoop:service=Master,name=MasterStatistics.splitTimeNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/reportRSFatalErrorAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.reportRSFatalErrorAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/replicateLogEntriesNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.replicateLogEntriesNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/multiMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.multiMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/deleteColumnMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.deleteColumnMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getProtocolSignature.aboveOneSec.AvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolSignature.aboveOneSec.AvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getLastFlushTimeMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getLastFlushTimeMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/disableTable.aboveOneSec.MinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.disableTable.aboveOneSec.MinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/NumOpenConnections": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.NumOpenConnections",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcQueueTimeNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.RpcQueueTimeNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/regionServerReportMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerReportMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/IsActiveMaster": {
+ "metric": "Hadoop:service=HBase,name=Master,sub=Server.tag.isActiveMaster",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/bulkLoadHFilesMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.bulkLoadHFilesMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/synchronousBalanceSwitch.aboveOneSec.NumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.synchronousBalanceSwitch.aboveOneSec.NumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/MasterStartTime": {
+ "metric": "Hadoop:service=HBase,name=Master,sub=Server.masterStartTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/balanceSwitchMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.balanceSwitchMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/unlockRowMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.unlockRowMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/incrementNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.incrementNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/reportRSFatalError.aboveOneSec.MinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.reportRSFatalError.aboveOneSec.MinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/balanceSwitch.aboveOneSec.MinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.balanceSwitch.aboveOneSec.MinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/execCoprocessorMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.execCoprocessorMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/enableTable.aboveOneSec.MaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.enableTable.aboveOneSec.MaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/putMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.putMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/flushRegionMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.flushRegionMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/nextNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.nextNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getOnlineRegionsAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getOnlineRegionsAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/createTableAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.createTableAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/deleteNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.deleteNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/modifyTable.aboveOneSec.AvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.modifyTable.aboveOneSec.AvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getAlterStatusAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getAlterStatusAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/assign.aboveOneSec.NumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.assign.aboveOneSec.NumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/regionServerStartup.aboveOneSec.MinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerStartup.aboveOneSec.MinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/balanceSwitch.aboveOneSec.AvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.balanceSwitch.aboveOneSec.AvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/isMasterRunningMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.isMasterRunningMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/existsNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.existsNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/compactRegionMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.compactRegionMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/bulkLoadHFilesMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.bulkLoadHFilesMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/rollHLogWriterNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.rollHLogWriterNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/unlockRowAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.unlockRowAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/openRegionsNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.openRegionsNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/checkAndDeleteMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.checkAndDeleteMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/stopMaster.aboveOneSec.AvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.stopMaster.aboveOneSec.AvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/splitRegionMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.splitRegionMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getHTableDescriptorsMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getHTableDescriptorsMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/deleteColumn.aboveOneSec.MinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.deleteColumn.aboveOneSec.MinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/moveMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.moveMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/shutdown.aboveOneSec.AvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.shutdown.aboveOneSec.AvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/appendNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.appendNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/appendAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.appendAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getClusterStatusNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getClusterStatusNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcSlowResponseNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.RpcSlowResponseNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/splitSize_num_ops": {
+ "metric": "Hadoop:service=HBase,name=Master,sub=Server.HlogSplitSize_num_ops",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "ServiceComponentInfo/MasterActiveTime": {
+ "metric": "Hadoop:service=HBase,name=Master,sub=Server.masterActiveTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getLastFlushTimeMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getLastFlushTimeMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/checkAndPutNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.checkAndPutNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/splitTime_avg_time": {
+ "metric": "Hadoop:service=HBase,name=Master,sub=Server.HlogSplitTime_mean",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/createTableMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.createTableMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/incrementMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.incrementMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getAlterStatus.aboveOneSec.MaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getAlterStatus.aboveOneSec.MaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getCompactionStateMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getCompactionStateMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/splitTimeAvgTime": {
+ "metric": "hadoop:service=Master,name=MasterStatistics.splitTimeAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getStoreFileListMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getStoreFileListMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getProtocolSignature.aboveOneSec.MinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolSignature.aboveOneSec.MinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcProcessingTimeAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.RpcProcessingTimeAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/incrementColumnValueNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.incrementColumnValueNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/enableTable.aboveOneSec.MinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.enableTable.aboveOneSec.MinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/stopMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.stopMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/multiAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.multiAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/createTable.aboveOneSec.NumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.createTable.aboveOneSec.NumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/shutdownAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.shutdownAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/openRegionMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.openRegionMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getBlockCacheColumnFamilySummariesMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getBlockCacheColumnFamilySummariesMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/deleteColumn.aboveOneSec.NumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.deleteColumn.aboveOneSec.NumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/modifyColumn.aboveOneSec.MinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.modifyColumn.aboveOneSec.MinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/disableTableNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.disableTableNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getProtocolVersion.aboveOneSec.MaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolVersion.aboveOneSec.MaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/replicateLogEntriesAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.replicateLogEntriesAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/cluster_requests": {
+ "metric": "Hadoop:service=HBase,name=Master,sub=Server.clusterRequests",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getHServerInfoMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getHServerInfoMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getClusterStatusMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getClusterStatusMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/rpcAuthenticationFailures": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.rpcAuthenticationFailures",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/Coprocessors": {
+ "metric": "hadoop:service=Master,name=Master.Coprocessors",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/unlockRowNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.unlockRowNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getAlterStatus.aboveOneSec.AvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getAlterStatus.aboveOneSec.AvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/regionServerStartup.aboveOneSec.AvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerStartup.aboveOneSec.AvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/balanceMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.balanceMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/incrementColumnValueMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.incrementColumnValueMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/modifyColumn.aboveOneSec.MaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.modifyColumn.aboveOneSec.MaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getProtocolVersionMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolVersionMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/RegionsInTransition": {
+ "metric": "hadoop:service=Master,name=Master.RegionsInTransition",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/balanceSwitchAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.balanceSwitchAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getAlterStatusMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getAlterStatusMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/unassignMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.unassignMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/nextAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.nextAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/rollHLogWriterMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.rollHLogWriterMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getAlterStatus.aboveOneSec.MinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getAlterStatus.aboveOneSec.MinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/stopAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.stopAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/hdfsVersion": {
+ "metric": "hadoop:service=HBase,name=Info.hdfsVersion",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/unassignMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.unassignMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcSlowResponseAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.RpcSlowResponseAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/assignNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.assignNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getLastFlushTimeAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getLastFlushTimeAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getClusterStatusAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getClusterStatusAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/mutateRowNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.mutateRowNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getClosestRowBeforeMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getClosestRowBeforeMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/regionServerReport.aboveOneSec.NumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerReport.aboveOneSec.NumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getClusterStatus.aboveOneSec.NumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getClusterStatus.aboveOneSec.NumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/balanceAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.balanceAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/balanceSwitchNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.balanceSwitchNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/RegionServers": {
+ "metric": "Hadoop:service=HBase,name=Master,sub=Server.numRegionServers",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/bulkLoadHFilesAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.bulkLoadHFilesAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/compactRegionAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.compactRegionAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/disableTableMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.disableTableMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/closeMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.closeMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/enableTableNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.enableTableNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/openScannerMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.openScannerMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/moveMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.moveMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/checkAndPutMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.checkAndPutMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/deleteTableAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.deleteTableAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/assign.aboveOneSec.MinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.assign.aboveOneSec.MinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/ClusterId": {
+ "metric": "Hadoop:service=HBase,name=Master,sub=Server.tag.clusterId",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getProtocolVersionMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolVersionMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/regionServerStartupMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerStartupMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcQueueTimeMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.RpcQueueTimeMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/createTableNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.createTableNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcProcessingTimeMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.RpcProcessingTimeMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getAlterStatus.aboveOneSec.NumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getAlterStatus.aboveOneSec.NumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/addColumnNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.addColumnNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/splitSizeNumOps": {
+ "metric": "hadoop:service=Master,name=MasterStatistics.splitSizeNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/unassignNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.unassignNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/balance.aboveOneSec.MinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.balance.aboveOneSec.MinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/closeRegionNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.closeRegionNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getProtocolSignature.aboveOneSec.MaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolSignature.aboveOneSec.MaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/synchronousBalanceSwitchAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.synchronousBalanceSwitchAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/appendMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.appendMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/unlockRowMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.unlockRowMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getStoreFileListMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getStoreFileListMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/moveAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.moveAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/mutateRowMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.mutateRowMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getHTableDescriptors.aboveOneSec.MinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getHTableDescriptors.aboveOneSec.MinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getCompactionStateAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getCompactionStateAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/closeRegionMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.closeRegionMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/reportRSFatalError.aboveOneSec.NumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.reportRSFatalError.aboveOneSec.NumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcSlowResponseMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.RpcSlowResponseMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/balanceSwitchMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.balanceSwitchMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/balanceMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.balanceMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/lockRowMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.lockRowMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/openScannerMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.openScannerMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getStoreFileListAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getStoreFileListAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/openRegionsAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.openRegionsAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/disableTableMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.disableTableMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getOnlineRegionsNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getOnlineRegionsNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/deleteColumnAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.deleteColumnAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/putMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.putMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/replicationCallQueueLen": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.replicationCallQueueLen",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/modifyTableMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.modifyTableMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/reportRSFatalErrorMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.reportRSFatalErrorMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/flushRegionAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.flushRegionAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/deleteColumn.aboveOneSec.AvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.deleteColumn.aboveOneSec.AvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/offline.aboveOneSec.AvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.offline.aboveOneSec.AvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/splitTimeMinTime": {
+ "metric": "Hadoop:service=HBase,name=Master,sub=Server.HlogSplitTime_min",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/createTable.aboveOneSec.AvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.createTable.aboveOneSec.AvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/lockRowAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.lockRowAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/lockRowMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.lockRowMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/synchronousBalanceSwitch.aboveOneSec.AvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.synchronousBalanceSwitch.aboveOneSec.AvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getClusterStatus.aboveOneSec.MinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getClusterStatus.aboveOneSec.MinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/addColumn.aboveOneSec.AvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.addColumn.aboveOneSec.AvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/isMasterRunningNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.isMasterRunningNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/closeAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.closeAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/addColumn.aboveOneSec.MaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.addColumn.aboveOneSec.MaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/closeMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.closeMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/replicateLogEntriesMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.replicateLogEntriesMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/openRegionsMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.openRegionsMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/balance.aboveOneSec.NumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.balance.aboveOneSec.NumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/replicateLogEntriesMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.replicateLogEntriesMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/execCoprocessorMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.execCoprocessorMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/splitTime_num_ops": {
+ "metric": "Hadoop:service=HBase,name=Master,sub=Server.HlogSplitTime_num_ops",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "ServiceComponentInfo/RegionsInTransition": {
+ "metric": "Hadoop:service=HBase,name=Master,sub=Server.ritCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/modifyTableAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.modifyTableAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/assignMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.assignMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/isMasterRunning.aboveOneSec.AvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.isMasterRunning.aboveOneSec.AvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/regionServerReport.aboveOneSec.AvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerReport.aboveOneSec.AvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/rpcAuthenticationSuccesses": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.rpcAuthenticationSuccesses",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/enableTable.aboveOneSec.NumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.enableTable.aboveOneSec.NumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/execCoprocessorNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.execCoprocessorNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/move.aboveOneSec.NumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.move.aboveOneSec.NumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/shutdownMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.shutdownMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/assign.aboveOneSec.MaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.assign.aboveOneSec.MaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/regionServerReportNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerReportNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/addColumnAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.addColumnAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/isMasterRunning.aboveOneSec.MinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.isMasterRunning.aboveOneSec.MinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getProtocolVersion.aboveOneSec.NumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolVersion.aboveOneSec.NumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/modifyTable.aboveOneSec.MinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.modifyTable.aboveOneSec.MinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "ServiceComponentInfo/HeapMemoryUsed": {
+ "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/rollHLogWriterAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.rollHLogWriterAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getRegionInfoNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getRegionInfoNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/ReceivedBytes": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.ReceivedBytes",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/move.aboveOneSec.MaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.move.aboveOneSec.MaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getProtocolSignatureAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolSignatureAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "ServiceComponentInfo/NonHeapMemoryMax": {
+ "metric": "java.lang:type=Memory.NonHeapMemoryUsage[max]",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/modifyTableMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.modifyTableMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/incrementColumnValueAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.incrementColumnValueAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/modifyTableNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.modifyTableNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/disableTable.aboveOneSec.AvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.disableTable.aboveOneSec.AvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getClusterStatus.aboveOneSec.AvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getClusterStatus.aboveOneSec.AvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/modifyColumnAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.modifyColumnAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/reportRSFatalError.aboveOneSec.MaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.reportRSFatalError.aboveOneSec.MaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/DeadRegionServers": {
+ "metric": "Hadoop:service=HBase,name=Master,sub=Server.numDeadRegionServers",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/closeNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.closeNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/unassign.aboveOneSec.MaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.unassign.aboveOneSec.MaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/balance.aboveOneSec.AvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.balance.aboveOneSec.AvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/nextMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.nextMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "ServiceComponentInfo/AverageLoad": {
+ "metric": "Hadoop:service=HBase,name=Master,sub=Server.averageLoad",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "ServiceComponentInfo/MasterStartTime": {
+ "metric": "Hadoop:service=HBase,name=Master,sub=Server.masterStartTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/appendMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.appendMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/priorityCallQueueLen": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.priorityCallQueueLen",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getAlterStatusMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getAlterStatusMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/bulkLoadHFilesNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.bulkLoadHFilesNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/callQueueLen": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.callQueueLen",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/synchronousBalanceSwitchMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.synchronousBalanceSwitchMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/flushRegionNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.flushRegionNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/regionServerStartup.aboveOneSec.NumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerStartup.aboveOneSec.NumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/unassignAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.unassignAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getProtocolVersionNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolVersionNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/shutdown.aboveOneSec.NumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.shutdown.aboveOneSec.NumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/checkAndPutMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.checkAndPutMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/deleteAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.deleteAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/assignAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.assignAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/checkAndDeleteMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.checkAndDeleteMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/shutdownNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.shutdownNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getAlterStatusNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getAlterStatusNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/disableTable.aboveOneSec.NumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.disableTable.aboveOneSec.NumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/openRegionNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.openRegionNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/modifyColumn.aboveOneSec.NumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.modifyColumn.aboveOneSec.NumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/synchronousBalanceSwitch.aboveOneSec.MinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.synchronousBalanceSwitch.aboveOneSec.MinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getCompactionStateNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getCompactionStateNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getHTableDescriptorsAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getHTableDescriptorsAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/openRegionAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.openRegionAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/stopMaster.aboveOneSec.MinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.stopMaster.aboveOneSec.MinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getRegionInfoMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getRegionInfoMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/putNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.putNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/hdfsRevision": {
+ "metric": "hadoop:service=HBase,name=Info.hdfsRevision",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/url": {
+ "metric": "hadoop:service=HBase,name=Info.url",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getProtocolSignature.aboveOneSec.NumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolSignature.aboveOneSec.NumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/openRegionsMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.openRegionsMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/deleteMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.deleteMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/compactRegionNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.compactRegionNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/nextMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.nextMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getOnlineRegionsMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getOnlineRegionsMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/checkAndDeleteNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.checkAndDeleteNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/isMasterRunning.aboveOneSec.MaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.isMasterRunning.aboveOneSec.MaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/deleteTable.aboveOneSec.AvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.deleteTable.aboveOneSec.AvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/unassign.aboveOneSec.NumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.unassign.aboveOneSec.NumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getRegionInfoAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getRegionInfoAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/balance.aboveOneSec.MaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.balance.aboveOneSec.MaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getHServerInfoNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getHServerInfoNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/shutdown.aboveOneSec.MinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.shutdown.aboveOneSec.MinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "ServiceComponentInfo/NonHeapMemoryUsed": {
+ "metric": "java.lang:type=Memory.NonHeapMemoryUsage[used]",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/deleteColumnMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.deleteColumnMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getStoreFileListNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getStoreFileListNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/deleteTableMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.deleteTableMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/addColumn.aboveOneSec.MinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.addColumn.aboveOneSec.MinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/synchronousBalanceSwitchMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.synchronousBalanceSwitchMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/isMasterRunningMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.isMasterRunningMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/closeRegionAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.closeRegionAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/disableTableAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.disableTableAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/assign.aboveOneSec.AvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.assign.aboveOneSec.AvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/offlineAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.offlineAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "ServiceComponentInfo/HeapMemoryMax": {
+ "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/moveNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.moveNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/incrementColumnValueMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.incrementColumnValueMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/splitSize_avg_time": {
+ "metric": "Hadoop:service=HBase,name=Master,sub=Server.HlogSplitSize_mean",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/checkAndDeleteAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.checkAndDeleteAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/mutateRowAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.mutateRowAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/existsMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.existsMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/balanceSwitch.aboveOneSec.NumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.balanceSwitch.aboveOneSec.NumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/modifyTable.aboveOneSec.NumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.modifyTable.aboveOneSec.NumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/synchronousBalanceSwitch.aboveOneSec.MaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.synchronousBalanceSwitch.aboveOneSec.MaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/date": {
+ "metric": "hadoop:service=HBase,name=Info.date",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/incrementAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.incrementAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/flushRegionMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.flushRegionMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getOnlineRegionsMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getOnlineRegionsMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/stopNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.stopNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/user": {
+ "metric": "java.lang:type=Runtime.SystemProperties.user.name",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/stopMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.stopMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getHTableDescriptors.aboveOneSec.AvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getHTableDescriptors.aboveOneSec.AvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getClosestRowBeforeAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getClosestRowBeforeAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/offlineNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.offlineNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/SentBytes": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.SentBytes",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/incrementMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.incrementMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/deleteTableMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.deleteTableMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/checkAndPutAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.checkAndPutAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/openScannerAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.openScannerAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/stopMasterAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.stopMasterAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/assignMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.assignMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/compactRegionMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.compactRegionMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/openRegionMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.openRegionMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/addColumnMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.addColumnMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcProcessingTimeNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.RpcProcessingTimeNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/existsMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.existsMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/enableTableMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.enableTableMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/splitSizeMinTime": {
+ "metric": "Hadoop:service=HBase,name=Master,sub=Server.HlogSplitSize_min",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/shutdown.aboveOneSec.MaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.shutdown.aboveOneSec.MaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/enableTableAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.enableTableAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/regionServerStartup.aboveOneSec.MaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerStartup.aboveOneSec.MaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/lockRowNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.lockRowNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/stopMasterMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.stopMasterMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getProtocolVersion.aboveOneSec.MinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolVersion.aboveOneSec.MinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/balanceSwitch.aboveOneSec.MaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.balanceSwitch.aboveOneSec.MaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/move.aboveOneSec.AvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.move.aboveOneSec.AvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getProtocolSignatureMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolSignatureMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/modifyTable.aboveOneSec.MaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.modifyTable.aboveOneSec.MaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/splitRegionMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.splitRegionMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/mutateRowMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.mutateRowMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/splitSizeAvgTime": {
+ "metric": "hadoop:service=Master,name=MasterStatistics.splitSizeAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/regionServerReportAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerReportAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/putAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.putAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/shutdownMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.shutdownMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getBlockCacheColumnFamilySummariesAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getBlockCacheColumnFamilySummariesAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getHTableDescriptors.aboveOneSec.MaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getHTableDescriptors.aboveOneSec.MaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/regionServerStartupMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerStartupMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/createTableMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.createTableMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getHTableDescriptors.aboveOneSec.NumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getHTableDescriptors.aboveOneSec.NumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/rpcAuthorizationSuccesses": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.rpcAuthorizationSuccesses",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/isMasterRunning.aboveOneSec.NumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.isMasterRunning.aboveOneSec.NumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/enableTable.aboveOneSec.AvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.enableTable.aboveOneSec.AvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "ServiceComponentInfo/Version": {
+ "metric": "hadoop:service=HBase,name=Info.version",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getRegionInfoMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getRegionInfoMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/unassign.aboveOneSec.MinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.unassign.aboveOneSec.MinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/stopMasterMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.stopMasterMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/move.aboveOneSec.MinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.move.aboveOneSec.MinTime",
+ "pointInTime": true,
+ "temporal": false
+ }
+ }
+ }
+ ],
+ "HostComponent": [
+ {
+ "type": "jmx",
+ "metrics": {
+ "metrics/rpc/regionServerReport.aboveOneSec.MaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerReport.aboveOneSec.MaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/memMaxM":{
+ "metric" : "Hadoop:service=HBase,name=JvmMetrics.MemMaxM",
+ "pointInTime" : true,
+ "temporal" : false
+ },
+ "metrics/rpc/reportRSFatalError.aboveOneSec.AvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.reportRSFatalError.aboveOneSec.AvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/closeRegionMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.closeRegionMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/isMasterRunningAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.isMasterRunningAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcQueueTimeAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.RpcQueueTimeAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/deleteTableNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.deleteTableNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/splitRegionAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.splitRegionAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getHTableDescriptorsMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getHTableDescriptorsMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getClusterStatus.aboveOneSec.MaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getClusterStatus.aboveOneSec.MaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/splitRegionNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.splitRegionNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getProtocolVersionAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolVersionAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/modifyColumnMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.modifyColumnMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getBlockCacheColumnFamilySummariesMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getBlockCacheColumnFamilySummariesMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getClosestRowBeforeMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getClosestRowBeforeMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getProtocolSignatureNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolSignatureNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcSlowResponseMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.RpcSlowResponseMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/AverageLoad": {
+ "metric": "Hadoop:service=HBase,name=Master,sub=Server.averageLoad",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/openScannerNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.openScannerNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/deleteTable.aboveOneSec.MinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.deleteTable.aboveOneSec.MinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/deleteColumn.aboveOneSec.MaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.deleteColumn.aboveOneSec.MaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getCompactionStateMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getCompactionStateMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/stopMaster.aboveOneSec.MaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.stopMaster.aboveOneSec.MaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/regionServerReport.aboveOneSec.MinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerReport.aboveOneSec.MinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getHServerInfoAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getHServerInfoAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/modifyColumn.aboveOneSec.AvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.modifyColumn.aboveOneSec.AvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/rollHLogWriterMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.rollHLogWriterMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/offlineMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.offlineMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getProtocolSignatureMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolSignatureMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/ServerName": {
+ "metric": "Hadoop:service=HBase,name=Master,sub=Server.tag.serverName",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getHServerInfoMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getHServerInfoMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/splitSizeMaxTime": {
+ "metric": "Hadoop:service=HBase,name=Master,sub=Server.HlogSplitSize_max",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/execCoprocessorAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.execCoprocessorAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcProcessingTimeMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.RpcProcessingTimeMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getClusterStatusMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getClusterStatusMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/offline.aboveOneSec.NumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.offline.aboveOneSec.NumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/ZookeeperQuorum": {
+ "metric": "Hadoop:service=HBase,name=Master,sub=Server.tag.zookeeperQuorum",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/hdfsDate": {
+ "metric": "hadoop:service=HBase,name=Info.hdfsDate",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/createTable.aboveOneSec.MaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.createTable.aboveOneSec.MaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/offlineMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.offlineMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/offline.aboveOneSec.MaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.offline.aboveOneSec.MaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/balanceNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.balanceNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getClosestRowBeforeNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getClosestRowBeforeNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/modifyColumnNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.modifyColumnNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getLastFlushTimeNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getLastFlushTimeNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/stopMaster.aboveOneSec.NumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.stopMaster.aboveOneSec.NumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/deleteTable.aboveOneSec.NumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.deleteTable.aboveOneSec.NumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/hdfsUrl": {
+ "metric": "hadoop:service=HBase,name=Info.hdfsUrl",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/NonHeapMemoryMax": {
+ "metric": "java.lang:type=Memory.NonHeapMemoryUsage[max]",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/multiMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.multiMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/revision": {
+ "metric": "hadoop:service=HBase,name=Info.revision",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/modifyColumnMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.modifyColumnMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/addColumnMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.addColumnMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getHTableDescriptorsNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getHTableDescriptorsNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcQueueTimeMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.RpcQueueTimeMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/multiNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.multiNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getProtocolVersion.aboveOneSec.AvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolVersion.aboveOneSec.AvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/offline.aboveOneSec.MinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.offline.aboveOneSec.MinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/deleteTable.aboveOneSec.MaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.deleteTable.aboveOneSec.MaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/enableTableMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.enableTableMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/regionServerReportMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerReportMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/reportRSFatalErrorNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.reportRSFatalErrorNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/addColumn.aboveOneSec.NumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.addColumn.aboveOneSec.NumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/unassign.aboveOneSec.AvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.unassign.aboveOneSec.AvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/reportRSFatalErrorMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.reportRSFatalErrorMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/disableTable.aboveOneSec.MaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.disableTable.aboveOneSec.MaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/existsAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.existsAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/MasterActiveTime": {
+ "metric": "Hadoop:service=HBase,name=Master,sub=Server.masterActiveTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getBlockCacheColumnFamilySummariesNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getBlockCacheColumnFamilySummariesNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/createTable.aboveOneSec.MinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.createTable.aboveOneSec.MinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/rpcAuthorizationFailures": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.rpcAuthorizationFailures",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/hdfsUser": {
+ "metric": "hadoop:service=HBase,name=Info.hdfsUser",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/regionServerStartupAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerStartupAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/deleteMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.deleteMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/regionServerStartupNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerStartupNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/deleteColumnNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.deleteColumnNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/version": {
+ "metric": "hadoop:service=HBase,name=Info.version",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/splitTimeMaxTime": {
+ "metric": "Hadoop:service=HBase,name=Master,sub=Server.HlogSplitTime_max",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/synchronousBalanceSwitchNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.synchronousBalanceSwitchNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/stopMasterNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.stopMasterNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/splitTimeNumOps": {
+ "metric": "hadoop:service=Master,name=MasterStatistics.splitTimeNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/reportRSFatalErrorAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.reportRSFatalErrorAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/replicateLogEntriesNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.replicateLogEntriesNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/multiMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.multiMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/deleteColumnMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.deleteColumnMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getProtocolSignature.aboveOneSec.AvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolSignature.aboveOneSec.AvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getLastFlushTimeMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getLastFlushTimeMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/disableTable.aboveOneSec.MinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.disableTable.aboveOneSec.MinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/NumOpenConnections": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.NumOpenConnections",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcQueueTimeNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.RpcQueueTimeNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/regionServerReportMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerReportMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/IsActiveMaster": {
+ "metric": "Hadoop:service=HBase,name=Master,sub=Server.tag.isActiveMaster",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/bulkLoadHFilesMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.bulkLoadHFilesMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/synchronousBalanceSwitch.aboveOneSec.NumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.synchronousBalanceSwitch.aboveOneSec.NumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/MasterStartTime": {
+ "metric": "Hadoop:service=HBase,name=Master,sub=Server.masterStartTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/balanceSwitchMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.balanceSwitchMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/unlockRowMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.unlockRowMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/incrementNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.incrementNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/reportRSFatalError.aboveOneSec.MinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.reportRSFatalError.aboveOneSec.MinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/balanceSwitch.aboveOneSec.MinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.balanceSwitch.aboveOneSec.MinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/execCoprocessorMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.execCoprocessorMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/enableTable.aboveOneSec.MaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.enableTable.aboveOneSec.MaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/putMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.putMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/flushRegionMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.flushRegionMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/nextNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.nextNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getOnlineRegionsAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getOnlineRegionsAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/createTableAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.createTableAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/deleteNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.deleteNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/modifyTable.aboveOneSec.AvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.modifyTable.aboveOneSec.AvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getAlterStatusAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getAlterStatusAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/assign.aboveOneSec.NumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.assign.aboveOneSec.NumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/regionServerStartup.aboveOneSec.MinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerStartup.aboveOneSec.MinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/balanceSwitch.aboveOneSec.AvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.balanceSwitch.aboveOneSec.AvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/isMasterRunningMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.isMasterRunningMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/existsNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.existsNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/compactRegionMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.compactRegionMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/bulkLoadHFilesMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.bulkLoadHFilesMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/rollHLogWriterNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.rollHLogWriterNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/unlockRowAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.unlockRowAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/openRegionsNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.openRegionsNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/checkAndDeleteMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.checkAndDeleteMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/stopMaster.aboveOneSec.AvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.stopMaster.aboveOneSec.AvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/splitRegionMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.splitRegionMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getHTableDescriptorsMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getHTableDescriptorsMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/deleteColumn.aboveOneSec.MinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.deleteColumn.aboveOneSec.MinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/moveMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.moveMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/shutdown.aboveOneSec.AvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.shutdown.aboveOneSec.AvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/appendNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.appendNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/appendAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.appendAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getClusterStatusNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getClusterStatusNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcSlowResponseNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.RpcSlowResponseNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/splitSize_num_ops": {
+ "metric": "Hadoop:service=HBase,name=Master,sub=Server.HlogSplitSize_num_ops",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getLastFlushTimeMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getLastFlushTimeMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/checkAndPutNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.checkAndPutNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/splitTime_avg_time": {
+ "metric": "Hadoop:service=HBase,name=Master,sub=Server.HlogSplitTime_mean",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/createTableMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.createTableMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/incrementMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.incrementMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getAlterStatus.aboveOneSec.MaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getAlterStatus.aboveOneSec.MaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getCompactionStateMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getCompactionStateMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/splitTimeAvgTime": {
+ "metric": "hadoop:service=Master,name=MasterStatistics.splitTimeAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getStoreFileListMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getStoreFileListMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getProtocolSignature.aboveOneSec.MinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolSignature.aboveOneSec.MinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcProcessingTimeAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.RpcProcessingTimeAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/incrementColumnValueNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.incrementColumnValueNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/enableTable.aboveOneSec.MinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.enableTable.aboveOneSec.MinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/stopMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.stopMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/multiAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.multiAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/createTable.aboveOneSec.NumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.createTable.aboveOneSec.NumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/shutdownAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.shutdownAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/openRegionMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.openRegionMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getBlockCacheColumnFamilySummariesMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getBlockCacheColumnFamilySummariesMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/deleteColumn.aboveOneSec.NumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.deleteColumn.aboveOneSec.NumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/modifyColumn.aboveOneSec.MinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.modifyColumn.aboveOneSec.MinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/disableTableNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.disableTableNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getProtocolVersion.aboveOneSec.MaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolVersion.aboveOneSec.MaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/replicateLogEntriesAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.replicateLogEntriesAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/cluster_requests": {
+ "metric": "Hadoop:service=HBase,name=Master,sub=Server.clusterRequests",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getHServerInfoMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getHServerInfoMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getClusterStatusMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getClusterStatusMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/rpcAuthenticationFailures": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.rpcAuthenticationFailures",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/Coprocessors": {
+ "metric": "hadoop:service=Master,name=Master.Coprocessors",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/unlockRowNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.unlockRowNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getAlterStatus.aboveOneSec.AvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getAlterStatus.aboveOneSec.AvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/regionServerStartup.aboveOneSec.AvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerStartup.aboveOneSec.AvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/balanceMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.balanceMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/modifyColumn.aboveOneSec.MaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.modifyColumn.aboveOneSec.MaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/incrementColumnValueMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.incrementColumnValueMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getProtocolVersionMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolVersionMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/RegionsInTransition": {
+ "metric": "hadoop:service=Master,name=Master.RegionsInTransition",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/balanceSwitchAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.balanceSwitchAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getAlterStatusMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getAlterStatusMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/unassignMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.unassignMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/nextAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.nextAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/rollHLogWriterMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.rollHLogWriterMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getAlterStatus.aboveOneSec.MinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getAlterStatus.aboveOneSec.MinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/stopAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.stopAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/hdfsVersion": {
+ "metric": "hadoop:service=HBase,name=Info.hdfsVersion",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/unassignMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.unassignMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcSlowResponseAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.RpcSlowResponseAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/assignNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.assignNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getLastFlushTimeAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getLastFlushTimeAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getClusterStatusAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getClusterStatusAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/mutateRowNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.mutateRowNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getClosestRowBeforeMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getClosestRowBeforeMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/regionServerReport.aboveOneSec.NumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerReport.aboveOneSec.NumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getClusterStatus.aboveOneSec.NumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getClusterStatus.aboveOneSec.NumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/balanceAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.balanceAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/balanceSwitchNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.balanceSwitchNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/RegionServers": {
+ "metric": "Hadoop:service=HBase,name=Master,sub=Server.numRegionServers",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/bulkLoadHFilesAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.bulkLoadHFilesAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/compactRegionAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.compactRegionAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/disableTableMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.disableTableMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/closeMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.closeMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/enableTableNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.enableTableNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/openScannerMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.openScannerMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/moveMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.moveMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/checkAndPutMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.checkAndPutMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/deleteTableAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.deleteTableAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/assign.aboveOneSec.MinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.assign.aboveOneSec.MinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/ClusterId": {
+ "metric": "Hadoop:service=HBase,name=Master,sub=Server.tag.clusterId",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getProtocolVersionMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolVersionMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/regionServerStartupMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerStartupMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcQueueTimeMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.RpcQueueTimeMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/createTableNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.createTableNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcProcessingTimeMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.RpcProcessingTimeMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getAlterStatus.aboveOneSec.NumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getAlterStatus.aboveOneSec.NumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/splitSizeNumOps": {
+ "metric": "hadoop:service=Master,name=MasterStatistics.splitSizeNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/addColumnNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.addColumnNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/unassignNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.unassignNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/balance.aboveOneSec.MinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.balance.aboveOneSec.MinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/closeRegionNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.closeRegionNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getProtocolSignature.aboveOneSec.MaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolSignature.aboveOneSec.MaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/synchronousBalanceSwitchAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.synchronousBalanceSwitchAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/appendMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.appendMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/unlockRowMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.unlockRowMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getStoreFileListMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getStoreFileListMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/moveAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.moveAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/mutateRowMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.mutateRowMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getHTableDescriptors.aboveOneSec.MinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getHTableDescriptors.aboveOneSec.MinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getCompactionStateAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getCompactionStateAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/closeRegionMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.closeRegionMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/reportRSFatalError.aboveOneSec.NumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.reportRSFatalError.aboveOneSec.NumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/HeapMemoryMax": {
+ "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcSlowResponseMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.RpcSlowResponseMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/balanceSwitchMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.balanceSwitchMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/HeapMemoryUsed": {
+ "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/balanceMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.balanceMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/lockRowMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.lockRowMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/openScannerMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.openScannerMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getStoreFileListAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getStoreFileListAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/openRegionsAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.openRegionsAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/disableTableMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.disableTableMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getOnlineRegionsNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getOnlineRegionsNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/deleteColumnAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.deleteColumnAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/replicationCallQueueLen": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.replicationCallQueueLen",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/putMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.putMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/modifyTableMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.modifyTableMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/reportRSFatalErrorMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.reportRSFatalErrorMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/flushRegionAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.flushRegionAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/deleteColumn.aboveOneSec.AvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.deleteColumn.aboveOneSec.AvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/offline.aboveOneSec.AvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.offline.aboveOneSec.AvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/splitTimeMinTime": {
+ "metric": "Hadoop:service=HBase,name=Master,sub=Server.HlogSplitTime_min",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/createTable.aboveOneSec.AvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.createTable.aboveOneSec.AvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/lockRowAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.lockRowAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/lockRowMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.lockRowMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/synchronousBalanceSwitch.aboveOneSec.AvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.synchronousBalanceSwitch.aboveOneSec.AvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getClusterStatus.aboveOneSec.MinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getClusterStatus.aboveOneSec.MinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/addColumn.aboveOneSec.AvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.addColumn.aboveOneSec.AvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/isMasterRunningNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.isMasterRunningNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/closeAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.closeAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/addColumn.aboveOneSec.MaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.addColumn.aboveOneSec.MaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/closeMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.closeMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/replicateLogEntriesMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.replicateLogEntriesMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/openRegionsMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.openRegionsMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/balance.aboveOneSec.NumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.balance.aboveOneSec.NumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/replicateLogEntriesMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.replicateLogEntriesMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/execCoprocessorMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.execCoprocessorMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/splitTime_num_ops": {
+ "metric": "Hadoop:service=HBase,name=Master,sub=Server.HlogSplitTime_num_ops",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/modifyTableAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.modifyTableAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/assignMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.assignMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/isMasterRunning.aboveOneSec.AvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.isMasterRunning.aboveOneSec.AvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/regionServerReport.aboveOneSec.AvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerReport.aboveOneSec.AvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/rpcAuthenticationSuccesses": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.rpcAuthenticationSuccesses",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/enableTable.aboveOneSec.NumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.enableTable.aboveOneSec.NumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/execCoprocessorNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.execCoprocessorNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/NonHeapMemoryUsed": {
+ "metric": "java.lang:type=Memory.NonHeapMemoryUsage[used]",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/move.aboveOneSec.NumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.move.aboveOneSec.NumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/shutdownMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.shutdownMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/assign.aboveOneSec.MaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.assign.aboveOneSec.MaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/regionServerReportNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerReportNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/addColumnAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.addColumnAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/isMasterRunning.aboveOneSec.MinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.isMasterRunning.aboveOneSec.MinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getProtocolVersion.aboveOneSec.NumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolVersion.aboveOneSec.NumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/modifyTable.aboveOneSec.MinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.modifyTable.aboveOneSec.MinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/rollHLogWriterAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.rollHLogWriterAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getRegionInfoNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getRegionInfoNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/ReceivedBytes": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.ReceivedBytes",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/move.aboveOneSec.MaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.move.aboveOneSec.MaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getProtocolSignatureAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolSignatureAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/modifyTableMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.modifyTableMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/incrementColumnValueAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.incrementColumnValueAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/modifyTableNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.modifyTableNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/disableTable.aboveOneSec.AvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.disableTable.aboveOneSec.AvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getClusterStatus.aboveOneSec.AvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getClusterStatus.aboveOneSec.AvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/modifyColumnAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.modifyColumnAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/reportRSFatalError.aboveOneSec.MaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.reportRSFatalError.aboveOneSec.MaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/DeadRegionServers": {
+ "metric": "Hadoop:service=HBase,name=Master,sub=Server.numDeadRegionServers",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/closeNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.closeNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/unassign.aboveOneSec.MaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.unassign.aboveOneSec.MaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/balance.aboveOneSec.AvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.balance.aboveOneSec.AvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/nextMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.nextMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/appendMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.appendMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/priorityCallQueueLen": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.priorityCallQueueLen",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getAlterStatusMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getAlterStatusMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/bulkLoadHFilesNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.bulkLoadHFilesNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/callQueueLen": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.callQueueLen",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/synchronousBalanceSwitchMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.synchronousBalanceSwitchMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/flushRegionNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.flushRegionNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/regionServerStartup.aboveOneSec.NumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerStartup.aboveOneSec.NumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/unassignAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.unassignAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getProtocolVersionNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolVersionNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/shutdown.aboveOneSec.NumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.shutdown.aboveOneSec.NumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/checkAndPutMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.checkAndPutMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/deleteAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.deleteAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/assignAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.assignAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/checkAndDeleteMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.checkAndDeleteMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/shutdownNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.shutdownNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getAlterStatusNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getAlterStatusNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/disableTable.aboveOneSec.NumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.disableTable.aboveOneSec.NumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/openRegionNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.openRegionNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/modifyColumn.aboveOneSec.NumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.modifyColumn.aboveOneSec.NumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/synchronousBalanceSwitch.aboveOneSec.MinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.synchronousBalanceSwitch.aboveOneSec.MinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getCompactionStateNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getCompactionStateNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getHTableDescriptorsAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getHTableDescriptorsAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/openRegionAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.openRegionAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/stopMaster.aboveOneSec.MinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.stopMaster.aboveOneSec.MinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getRegionInfoMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getRegionInfoMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/putNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.putNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/hdfsRevision": {
+ "metric": "hadoop:service=HBase,name=Info.hdfsRevision",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/url": {
+ "metric": "hadoop:service=HBase,name=Info.url",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getProtocolSignature.aboveOneSec.NumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolSignature.aboveOneSec.NumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/openRegionsMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.openRegionsMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/deleteMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.deleteMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/compactRegionNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.compactRegionNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/nextMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.nextMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getOnlineRegionsMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getOnlineRegionsMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/checkAndDeleteNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.checkAndDeleteNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/isMasterRunning.aboveOneSec.MaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.isMasterRunning.aboveOneSec.MaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/deleteTable.aboveOneSec.AvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.deleteTable.aboveOneSec.AvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/unassign.aboveOneSec.NumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.unassign.aboveOneSec.NumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getRegionInfoAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getRegionInfoAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/balance.aboveOneSec.MaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.balance.aboveOneSec.MaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getHServerInfoNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getHServerInfoNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/shutdown.aboveOneSec.MinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.shutdown.aboveOneSec.MinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/deleteColumnMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.deleteColumnMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getStoreFileListNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getStoreFileListNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/deleteTableMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.deleteTableMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/addColumn.aboveOneSec.MinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.addColumn.aboveOneSec.MinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/synchronousBalanceSwitchMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.synchronousBalanceSwitchMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/isMasterRunningMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.isMasterRunningMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/closeRegionAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.closeRegionAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/disableTableAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.disableTableAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/assign.aboveOneSec.AvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.assign.aboveOneSec.AvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/offlineAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.offlineAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/moveNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.moveNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/incrementColumnValueMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.incrementColumnValueMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/splitSize_avg_time": {
+ "metric": "Hadoop:service=HBase,name=Master,sub=Server.HlogSplitSize_mean",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/load/AverageLoad": {
+ "metric": "hadoop:service=Master,name=Master.AverageLoad",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/checkAndDeleteAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.checkAndDeleteAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/mutateRowAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.mutateRowAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/existsMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.existsMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/balanceSwitch.aboveOneSec.NumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.balanceSwitch.aboveOneSec.NumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/modifyTable.aboveOneSec.NumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.modifyTable.aboveOneSec.NumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/synchronousBalanceSwitch.aboveOneSec.MaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.synchronousBalanceSwitch.aboveOneSec.MaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/date": {
+ "metric": "hadoop:service=HBase,name=Info.date",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/incrementAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.incrementAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/flushRegionMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.flushRegionMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getOnlineRegionsMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getOnlineRegionsMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/stopNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.stopNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/user": {
+ "metric": "java.lang:type=Runtime.SystemProperties.user.name",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/stopMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.stopMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getHTableDescriptors.aboveOneSec.AvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getHTableDescriptors.aboveOneSec.AvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getClosestRowBeforeAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getClosestRowBeforeAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/offlineNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.offlineNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/SentBytes": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.SentBytes",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/incrementMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.incrementMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/deleteTableMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.deleteTableMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/checkAndPutAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.checkAndPutAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/openScannerAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.openScannerAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/stopMasterAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.stopMasterAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/assignMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.assignMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/compactRegionMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.compactRegionMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/openRegionMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.openRegionMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/addColumnMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.addColumnMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcProcessingTimeNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.RpcProcessingTimeNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/existsMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.existsMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/enableTableMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.enableTableMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/splitSizeMinTime": {
+ "metric": "Hadoop:service=HBase,name=Master,sub=Server.HlogSplitSize_min",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/shutdown.aboveOneSec.MaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.shutdown.aboveOneSec.MaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/enableTableAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.enableTableAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/regionServerStartup.aboveOneSec.MaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerStartup.aboveOneSec.MaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/lockRowNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.lockRowNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getProtocolVersion.aboveOneSec.MinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolVersion.aboveOneSec.MinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/stopMasterMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.stopMasterMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/balanceSwitch.aboveOneSec.MaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.balanceSwitch.aboveOneSec.MaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/move.aboveOneSec.AvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.move.aboveOneSec.AvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getProtocolSignatureMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolSignatureMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/modifyTable.aboveOneSec.MaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.modifyTable.aboveOneSec.MaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/splitRegionMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.splitRegionMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/mutateRowMinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.mutateRowMinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/hbase/master/splitSizeAvgTime": {
+ "metric": "hadoop:service=Master,name=MasterStatistics.splitSizeAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/regionServerReportAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerReportAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/putAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.putAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getNumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/shutdownMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.shutdownMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getBlockCacheColumnFamilySummariesAvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getBlockCacheColumnFamilySummariesAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getHTableDescriptors.aboveOneSec.MaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getHTableDescriptors.aboveOneSec.MaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/regionServerStartupMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerStartupMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/createTableMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.createTableMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getHTableDescriptors.aboveOneSec.NumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getHTableDescriptors.aboveOneSec.NumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/rpcAuthorizationSuccesses": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.rpcAuthorizationSuccesses",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/isMasterRunning.aboveOneSec.NumOps": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.isMasterRunning.aboveOneSec.NumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/enableTable.aboveOneSec.AvgTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.enableTable.aboveOneSec.AvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/getRegionInfoMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.getRegionInfoMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/unassign.aboveOneSec.MinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.unassign.aboveOneSec.MinTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/stopMasterMaxTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.stopMasterMaxTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/move.aboveOneSec.MinTime": {
+ "metric": "hadoop:service=HBase,name=RPCStatistics.move.aboveOneSec.MinTime",
+ "pointInTime": true,
+ "temporal": false
+ }
+ }
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/files/draining_servers.rb b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/files/draining_servers.rb
new file mode 100644
index 0000000000..a3958a6206
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/files/draining_servers.rb
@@ -0,0 +1,164 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Add or remove servers from draining mode via zookeeper
+
+require 'optparse'
+include Java
+
+import org.apache.hadoop.hbase.HBaseConfiguration
+import org.apache.hadoop.hbase.client.HBaseAdmin
+import org.apache.hadoop.hbase.zookeeper.ZKUtil
+import org.apache.commons.logging.Log
+import org.apache.commons.logging.LogFactory
+
+# Name of this script
+NAME = "draining_servers"
+
+# Do command-line parsing
+options = {}
+optparse = OptionParser.new do |opts|
+ opts.banner = "Usage: ./hbase org.jruby.Main #{NAME}.rb [options] add|remove|list <hostname>|<host:port>|<servername> ..."
+ opts.separator 'Add remove or list servers in draining mode. Can accept either hostname to drain all region servers' +
+ 'in that host, a host:port pair or a host,port,startCode triplet. More than one server can be given separated by space'
+ opts.on('-h', '--help', 'Display usage information') do
+ puts opts
+ exit
+ end
+ options[:debug] = false
+ opts.on('-d', '--debug', 'Display extra debug logging') do
+ options[:debug] = true
+ end
+end
+optparse.parse!
+
+# Return array of servernames where servername is hostname+port+startcode
+# comma-delimited
+def getServers(admin)
+ serverInfos = admin.getClusterStatus().getServerInfo()
+ servers = []
+ for server in serverInfos
+ servers << server.getServerName()
+ end
+ return servers
+end
+
+def getServerNames(hostOrServers, config)
+ ret = []
+
+ for hostOrServer in hostOrServers
+ # check whether it is already serverName. No need to connect to cluster
+ parts = hostOrServer.split(',')
+ if parts.size() == 3
+ ret << hostOrServer
+ else
+ admin = HBaseAdmin.new(config) if not admin
+ servers = getServers(admin)
+
+ hostOrServer = hostOrServer.gsub(/:/, ",")
+ for server in servers
+ ret << server if server.start_with?(hostOrServer)
+ end
+ end
+ end
+
+ admin.close() if admin
+ return ret
+end
+
+def addServers(options, hostOrServers)
+ config = HBaseConfiguration.create()
+ servers = getServerNames(hostOrServers, config)
+
+ zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, "draining_servers", nil)
+ parentZnode = zkw.drainingZNode
+
+ begin
+ for server in servers
+ node = ZKUtil.joinZNode(parentZnode, server)
+ ZKUtil.createAndFailSilent(zkw, node)
+ end
+ ensure
+ zkw.close()
+ end
+end
+
+def removeServers(options, hostOrServers)
+ config = HBaseConfiguration.create()
+ servers = getServerNames(hostOrServers, config)
+
+ zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, "draining_servers", nil)
+ parentZnode = zkw.drainingZNode
+
+ begin
+ for server in servers
+ node = ZKUtil.joinZNode(parentZnode, server)
+ ZKUtil.deleteNodeFailSilent(zkw, node)
+ end
+ ensure
+ zkw.close()
+ end
+end
+
+# list servers in draining mode
+def listServers(options)
+ config = HBaseConfiguration.create()
+
+ zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, "draining_servers", nil)
+ parentZnode = zkw.drainingZNode
+
+ servers = ZKUtil.listChildrenNoWatch(zkw, parentZnode)
+ servers.each {|server| puts server}
+end
+
+hostOrServers = ARGV[1..ARGV.size()]
+
+# Create a logger and disable the DEBUG-level annoying client logging
+def configureLogging(options)
+ apacheLogger = LogFactory.getLog(NAME)
+ # Configure log4j to not spew so much
+ unless (options[:debug])
+ logger = org.apache.log4j.Logger.getLogger("org.apache.hadoop.hbase")
+ logger.setLevel(org.apache.log4j.Level::WARN)
+ logger = org.apache.log4j.Logger.getLogger("org.apache.zookeeper")
+ logger.setLevel(org.apache.log4j.Level::WARN)
+ end
+ return apacheLogger
+end
+
+# Create a logger and save it to ruby global
+$LOG = configureLogging(options)
+case ARGV[0]
+ when 'add'
+ if ARGV.length < 2
+ puts optparse
+ exit 1
+ end
+ addServers(options, hostOrServers)
+ when 'remove'
+ if ARGV.length < 2
+ puts optparse
+ exit 1
+ end
+ removeServers(options, hostOrServers)
+ when 'list'
+ listServers(options)
+ else
+ puts optparse
+ exit 3
+end
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/hbase.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/hbase.py
new file mode 100644
index 0000000000..a427fb3856
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/hbase.py
@@ -0,0 +1,29 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+def hbase():
+ import params
+ XmlConfig("hbase-site.xml",
+ conf_dir = params.hbase_conf_dir,
+ configurations = params.config['configurations']['hbase-site']
+ ) \ No newline at end of file
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/hbase_client.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/hbase_client.py
new file mode 100644
index 0000000000..bf2aad0e80
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/hbase_client.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+class HbaseClient(Script):
+ def install(self, env):
+ self.configure(env)
+
+ def configure(self, env):
+ import params
+ env.set_params(params)
+
+ def status(self, env):
+ raise ClientComponentHasNoStatus()
+
+if __name__ == "__main__":
+ HbaseClient().execute()
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/hbase_decommission.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/hbase_decommission.py
new file mode 100644
index 0000000000..f2a2ad6611
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/hbase_decommission.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+
+def hbase_decomission():
+ import params
+
+ File(params.region_drainer,
+ content=StaticFile("draining_servers.rb"),
+ owner=params.hbase_user,
+ mode="f"
+ )
+ if params.hbase_drain_only == True:
+ hosts = params.hbase_excluded_hosts.split(",")
+ for host in hosts:
+ if host:
+ regiondrainer_cmd = format(
+ "cmd /c {hbase_executable} org.jruby.Main {region_drainer} remove {host}")
+ Execute(regiondrainer_cmd,
+ user=params.hbase_user,
+ logoutput=True
+ )
+ pass
+ pass
+
+ else:
+
+ hosts = params.hbase_excluded_hosts.split(",")
+ for host in hosts:
+ if host:
+ regiondrainer_cmd = format(
+ "cmd /c {hbase_executable} org.jruby.Main {region_drainer} add {host}")
+ regionmover_cmd = format(
+ "cmd /c {hbase_executable} org.jruby.Main {region_mover} unload {host}")
+
+ Execute(regiondrainer_cmd,
+ user=params.hbase_user,
+ logoutput=True
+ )
+
+ Execute(regionmover_cmd,
+ user=params.hbase_user,
+ logoutput=True
+ )
+ pass
+ pass
+ pass \ No newline at end of file
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/hbase_master.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/hbase_master.py
new file mode 100644
index 0000000000..d3720d561c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/hbase_master.py
@@ -0,0 +1,52 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from hbase import hbase
+import service_mapping
+from hbase_decommission import *
+
+class HbaseMaster(Script):
+ def install(self, env):
+ if not check_windows_service_exists(service_mapping.hbase_master_win_service_name):
+ self.install_packages(env)
+
+ def configure(self, env):
+ hbase()
+
+ def start(self, env):
+ import params
+ self.configure(env) # for security
+ Service(service_mapping.hbase_master_win_service_name, action="start")
+
+ def stop(self, env):
+ Service(service_mapping.hbase_master_win_service_name, action="stop")
+
+ def status(self, env):
+ check_windows_service_status(service_mapping.hbase_master_win_service_name)
+
+ def decommission(self, env):
+ import params
+ env.set_params(params)
+ hbase_decomission()
+
+if __name__ == "__main__":
+ HbaseMaster().execute()
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/hbase_regionserver.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/hbase_regionserver.py
new file mode 100644
index 0000000000..d69d402c24
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/hbase_regionserver.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from hbase import hbase
+import service_mapping
+
+class HbaseRegionServer(Script):
+ def install(self, env):
+ if not check_windows_service_exists(service_mapping.hbase_regionserver_win_service_name):
+ self.install_packages(env)
+
+ def configure(self, env):
+ hbase()
+
+ def start(self, env):
+ import params
+ Service(service_mapping.hbase_regionserver_win_service_name, action="start")
+
+ def stop(self, env):
+ Service(service_mapping.hbase_regionserver_win_service_name, action="stop")
+
+ def status(self, env):
+ check_windows_service_status(service_mapping.hbase_regionserver_win_service_name)
+
+ def decommission(self, env):
+ print "Decommission not yet implemented!"
+
+if __name__ == "__main__":
+ HbaseRegionServer().execute()
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/params.py
new file mode 100644
index 0000000000..cd9a74a195
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/params.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import os
+
+# server configurations
+config = Script.get_config()
+hbase_conf_dir = os.environ["HBASE_CONF_DIR"]
+hbase_bin_dir = os.path.join(os.environ["HBASE_HOME"],'bin')
+hbase_executable = os.path.join(hbase_bin_dir,"hbase.cmd")
+hdp_root = os.path.abspath(os.path.join(os.environ["HADOOP_HOME"],".."))
+hbase_user = "hadoop"
+
+#decomm params
+region_drainer = os.path.join(hbase_bin_dir,"draining_servers.rb")
+region_mover = os.path.join(hbase_bin_dir,"region_mover.rb")
+hbase_excluded_hosts = config['commandParams']['excluded_hosts']
+hbase_drain_only = config['commandParams']['mark_draining_only'] \ No newline at end of file
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/service_check.py
new file mode 100644
index 0000000000..c12967b236
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/service_check.py
@@ -0,0 +1,34 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from resource_management.libraries.functions.format import format
+
+
+class HbaseServiceCheck(Script):
+ def service_check(self, env):
+ import params
+ env.set_params(params)
+ smoke_cmd = os.path.join(params.hdp_root,"Run-SmokeTests.cmd")
+ service = "HBASE"
+ Execute(format("cmd /C {smoke_cmd} {service}"), logoutput=True)
+
+if __name__ == "__main__":
+ HbaseServiceCheck().execute()
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/service_mapping.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/service_mapping.py
new file mode 100644
index 0000000000..186b64e034
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/service_mapping.py
@@ -0,0 +1,21 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+hbase_master_win_service_name = "master"
+hbase_regionserver_win_service_name = "regionserver"
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/core-site.xml
new file mode 100644
index 0000000000..2dbda14833
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/core-site.xml
@@ -0,0 +1,202 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!-- Put site-specific property overrides in this file. -->
+<configuration supports_final="true" xmlns:xi="http://www.w3.org/2001/XInclude">
+ <!-- i/o properties -->
+ <property>
+ <name>io.file.buffer.size</name>
+ <value>131072</value>
+ <description>The size of buffer for use in sequence files.
+ The size of this buffer should probably be a multiple of hardware
+ page size (4096 on Intel x86), and it determines how much data is
+ buffered during read and write operations.</description>
+ </property>
+ <property>
+ <name>io.serializations</name>
+ <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
+ </property>
+ <property>
+ <name>io.compression.codecs</name>
+ <value>org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec</value>
+ <description>A list of the compression codec classes that can be used
+ for compression/decompression.</description>
+ </property>
+ <!-- file system properties -->
+ <property>
+ <name>fs.defaultFS</name>
+ <value>hdfs://localhost:8020</value>
+ <description>The name of the default file system. Either the
+ literal string "local" or a host:port for NDFS.</description>
+ <final>true</final>
+ </property>
+ <property>
+ <name>fs.trash.interval</name>
+ <value>360</value>
+ <description>Number of minutes after which the checkpoint
+ gets deleted. If zero, the trash feature is disabled.
+ </description>
+ </property>
+ <property>
+ <name>fs.trash.checkpoint.interval</name>
+ <value>0</value>
+ <description>Number of minutes between trash checkpoints.
+ Should be smaller or equal to fs.trash.interval. If zero,
+ the value is set to the value of fs.trash.interval.
+ </description>
+ </property>
+ <property>
+ <name>fs.permissions.umask-mode</name>
+ <value>022</value>
+ <description>The umask used when creating files and directories.
+ Can be in octal or in symbolic. Examples are: "022" (octal for
+ u=rwx,g=r-x,o=r-x in symbolic), or "u=rwx,g=rwx,o=" (symbolic
+ for 007 in octal).</description>
+ </property>
+ <property>
+ <name>ipc.client.idlethreshold</name>
+ <value>8000</value>
+ <description>Defines the threshold number of connections after which
+ connections will be inspected for idleness.
+ </description>
+ </property>
+ <property>
+ <name>ipc.client.connection.maxidletime</name>
+ <value>30000</value>
+ <description>The maximum time after which a client will bring down the
+ connection to the server.
+ </description>
+ </property>
+ <property>
+ <name>ipc.client.connect.max.retries</name>
+ <value>50</value>
+ <description>Defines the maximum number of retries for IPC connections.</description>
+ </property>
+ <!-- Web Interface Configuration -->
+ <property>
+ <name>hadoop.http.staticuser.user</name>
+ <value>gopher</value>
+ <description>
+ The user name to filter as, on static web filters
+ while rendering content. An example use is the HDFS
+ web UI (user to be used for browsing files).
+ </description>
+ </property>
+ <property>
+ <name>webinterface.private.actions</name>
+ <value>false</value>
+ <description> If set to true, the web interfaces of RM and NN may contain
+ actions, such as kill job, delete file, etc., that should
+ not be exposed to public. Enable this option if the interfaces
+ are only reachable by those who have the right authorization.
+ </description>
+ </property>
+ <property>
+ <name>hadoop.security.authentication</name>
+ <value>simple</value>
+ <description>
+ Set the authentication for the cluster. Valid values are: simple or
+ kerberos.
+ </description>
+ </property>
+ <property>
+ <name>hadoop.security.authorization</name>
+ <value>false</value>
+ <description>
+ Enable authorization for different protocols.
+ </description>
+ </property>
+ <property>
+ <name>hadoop.proxyuser.hadoop.groups</name>
+ <value>HadoopUsers</value>
+ <description>
+ Proxy group for Hadoop.
+ </description>
+ </property>
+ <property>
+ <name>hadoop.ssl.enabled</name>
+ <value>false</value>
+ </property>
+ <property>
+ <name>hadoop.ssl.require.client.cert</name>
+ <value>false</value>
+ </property>
+ <property>
+ <name>hadoop.ssl.hostname.verifier</name>
+ <value>DEFAULT</value>
+ </property>
+ <property>
+ <name>hadoop.ssl.keystores.factory.class</name>
+ <value>org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory</value>
+ </property>
+ <property>
+ <name>hadoop.ssl.server.conf</name>
+ <value>ssl-server.xml</value>
+ </property>
+ <property>
+ <name>hadoop.ssl.client.conf</name>
+ <value>ssl-client.xml</value>
+ </property>
+ <property>
+ <name>hadoop.rpc.protection</name>
+ <value>authentication</value>
+ </property>
+ <property>
+ <name>hadoop.tmp.dir</name>
+ <value>c:\hdp\temp\hadoop</value>
+ </property>
+
+ <property>
+ <name>hadoop.proxyuser.hadoop.hosts</name>
+ <value>192.168.145.128</value>
+ </property>
+
+ <property>
+ <name>hadoop.security.auth_to_local</name>
+ <value>
+ RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/
+ RULE:[2:$1@$0](jhs@.*)s/.*/mapred/
+ RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/
+ RULE:[2:$1@$0](hm@.*)s/.*/hbase/
+ RULE:[2:$1@$0](rs@.*)s/.*/hbase/
+ DEFAULT
+ </value>
+<description>The mapping from kerberos principal names to local OS mapreduce.job.user.names.
+ So the default rule is just "DEFAULT" which takes all principals in your default domain to their first component.
+ "omalley@APACHE.ORG" and "omalley/admin@APACHE.ORG" to "omalley", if your default domain is APACHE.ORG.
+The translations rules have 3 sections:
+ base filter substitution
+The base consists of a number that represents the number of components in the principal name excluding the realm and the pattern for building the name from the sections of the principal name. The base uses $0 to mean the realm, $1 to mean the first component and $2 to mean the second component.
+
+[1:$1@$0] translates "omalley@APACHE.ORG" to "omalley@APACHE.ORG"
+[2:$1] translates "omalley/admin@APACHE.ORG" to "omalley"
+[2:$1%$2] translates "omalley/admin@APACHE.ORG" to "omalley%admin"
+
+The filter is a regex in parens that must the generated string for the rule to apply.
+
+"(.*%admin)" will take any string that ends in "%admin"
+"(.*@ACME.COM)" will take any string that ends in "@ACME.COM"
+
+Finally, the substitution is a sed rule to translate a regex into a fixed string.
+
+"s/@ACME\.COM//" removes the first instance of "@ACME.COM".
+"s/@[A-Z]*\.COM//" removes the first instance of "@" followed by a name followed by ".COM".
+"s/X/Y/g" replaces all of the "X" in the name with "Y"
+
+So, if your default realm was APACHE.ORG, but you also wanted to take all principals from ACME.COM that had a single component "joe@ACME.COM", you'd do:
+
+RULE:[1:$1@$0](.@ACME.ORG)s/@.//
+DEFAULT
+
+To also translate the names with a second component, you'd make the rules:
+
+RULE:[1:$1@$0](.@ACME.ORG)s/@.//
+RULE:[2:$1@$0](.@ACME.ORG)s/@.//
+DEFAULT
+
+If you want to treat all principals from APACHE.ORG with /admin as "admin", your rules would look like:
+
+RULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/
+DEFAULT
+ </description>
+ </property>
+</configuration> \ No newline at end of file
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hadoop-env.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hadoop-env.xml
new file mode 100644
index 0000000000..85ca907c06
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hadoop-env.xml
@@ -0,0 +1,41 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+
+ Copyright 2011 The Apache Software Foundation
+
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements. See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership. The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+ <property>
+ <name>dfs.datanode.data.dir.mount.file</name>
+ <value>c:\hadoop\hdfs\dfs_data_dir_mount.hist</value>
+ <description>File path that contains the last known mount point for each data dir. This file is used to avoid creating a DFS data dir on the root drive (and filling it up) if a path was previously mounted on a drive.</description>
+ </property>
+
+ <property>
+ <name>dfs.datanode.data.dir.mount.file</name>
+ <value>file:///c:/hadoop/conf/dfs_data_dir_mount.hist</value>
+ <description>File path that contains the last known mount point for each data dir. This file is used to avoid creating a DFS data dir on the root drive (and filling it up) if a path was previously mounted on a drive.</description>
+ </property>
+
+</configuration> \ No newline at end of file
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hadoop-policy.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hadoop-policy.xml
new file mode 100644
index 0000000000..1549b41b45
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hadoop-policy.xml
@@ -0,0 +1,219 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+
+ Copyright 2011 The Apache Software Foundation
+
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements. See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership. The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration supports_final="true">
+ <property>
+ <name>security.client.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for ClientProtocol, which is used by user code
+ via the DistributedFileSystem.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.client.datanode.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
+ for block recovery.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.datanode.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for DatanodeProtocol, which is used by datanodes to
+ communicate with the namenode.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.inter.datanode.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
+ for updating generation timestamp.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.namenode.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for NamenodeProtocol, the protocol used by the secondary
+ namenode to communicate with the namenode.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.admin.operations.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for AdminOperationsProtocol. Used for admin commands.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.refresh.usertogroups.mappings.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for RefreshUserMappingsProtocol. Used to refresh
+ users mappings. The ACL is a comma-separated list of user and
+ group names. The user and group list is separated by a blank. For
+ e.g. "alice,bob users,wheel". A special value of "*" means all
+ users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.refresh.policy.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
+ dfsadmin and mradmin commands to refresh the security policy in-effect.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.ha.service.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for HAService protocol used by HAAdmin to manage the
+ active and stand-by states of namenode.</description>
+ </property>
+
+ <property>
+ <name>security.zkfc.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for access to the ZK Failover Controller
+ </description>
+ </property>
+
+ <property>
+ <name>security.qjournal.service.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for QJournalProtocol, used by the NN to communicate with
+ JNs when using the QuorumJournalManager for edit logs.</description>
+ </property>
+
+ <property>
+ <name>security.mrhs.client.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for HSClientProtocol, used by job clients to
+ communciate with the MR History Server job status etc.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <!-- YARN Protocols -->
+
+ <property>
+ <name>security.resourcetracker.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for ResourceTrackerProtocol, used by the
+ ResourceManager and NodeManager to communicate with each other.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.resourcemanager-administration.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for ResourceManagerAdministrationProtocol, for admin commands.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.applicationclient.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for ApplicationClientProtocol, used by the ResourceManager
+ and applications submission clients to communicate with each other.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.applicationmaster.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for ApplicationMasterProtocol, used by the ResourceManager
+ and ApplicationMasters to communicate with each other.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.containermanagement.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for ContainerManagementProtocol protocol, used by the NodeManager
+ and ApplicationMasters to communicate with each other.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.resourcelocalizer.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for ResourceLocalizer protocol, used by the NodeManager
+ and ResourceLocalizer to communicate with each other.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.job.task.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
+ tasks to communicate with the parent tasktracker.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.job.client.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for MRClientProtocol, used by job clients to
+ communciate with the MR ApplicationMaster to query job status etc.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+</configuration>
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hdfs-site.xml
new file mode 100644
index 0000000000..f267e51db1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hdfs-site.xml
@@ -0,0 +1,272 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!-- Put site-specific property overrides in this file. -->
+<configuration supports_final="true">
+ <!-- file system properties -->
+ <property>
+ <name>dfs.namenode.name.dir</name>
+ <value>file:///c:/hdpdata/hdfs/nn</value>
+ <description>Determines where on the local filesystem the DFS name node
+ should store the name table. If this is a comma-delimited list
+ of directories then the name table is replicated in all of the
+ directories, for redundancy. </description>
+ <final>true</final>
+ </property>
+ <property>
+ <name>dfs.support.append</name>
+ <value>true</value>
+ <description>to enable dfs append</description>
+ <final>true</final>
+ </property>
+ <property>
+ <name>dfs.webhdfs.enabled</name>
+ <value>true</value>
+ <description>to enable webhdfs</description>
+ <final>true</final>
+ </property>
+ <property>
+ <name>dfs.datanode.failed.volumes.tolerated</name>
+ <value>0</value>
+ <description>#of failed disks dn would tolerate</description>
+ <final>true</final>
+ </property>
+ <property>
+ <name>dfs.datanode.data.dir</name>
+ <value>file:///c:/hdpdata/hdfs/dn</value>
+ <description>Determines where on the local filesystem an DFS data node
+ should store its blocks. If this is a comma-delimited
+ list of directories, then data will be stored in all named
+ directories, typically on different devices.
+ Directories that do not exist are ignored.
+ </description>
+ <final>true</final>
+ </property>
+ <property>
+ <name>dfs.checksum.type</name>
+ <value>CRC32</value>
+ <description>The checksum method to be used by default. To maintain
+ compatibility, it is being set to CRC32. Once all migration steps
+ are complete, we can change it to CRC32C and take advantage of the
+ additional performance benefit.</description>
+ </property>
+ <property>
+ <name>dfs.replication.max</name>
+ <value>50</value>
+ <description>Maximal block replication.
+ </description>
+ </property>
+ <property>
+ <name>dfs.heartbeat.interval</name>
+ <value>3</value>
+ <description>Determines datanode heartbeat interval in seconds.</description>
+ </property>
+ <property>
+ <name>dfs.namenode.safemode.threshold-pct</name>
+ <value>1.0f</value>
+ <description>
+ Specifies the percentage of blocks that should satisfy
+ the minimal replication requirement defined by dfs.replication.min.
+ Values less than or equal to 0 mean not to start in safe mode.
+ Values greater than 1 will make safe mode permanent.
+ </description>
+ </property>
+ <property>
+ <name>dfs.datanode.balance.bandwidthPerSec</name>
+ <value>6250000</value>
+ <description>
+ Specifies the maximum amount of bandwidth that each datanode
+ can utilize for the balancing purpose in term of
+ the number of bytes per second.
+ </description>
+ </property>
+ <property>
+ <name>dfs.datanode.address</name>
+ <value>0.0.0.0:50010</value>
+ </property>
+ <property>
+ <name>dfs.datanode.http.address</name>
+ <value>0.0.0.0:50075</value>
+ </property>
+ <property>
+ <name>dfs.datanode.https.address</name>
+ <value>0.0.0.0:50076</value>
+ </property>
+ <property>
+ <name>dfs.blocksize</name>
+ <value>134217728</value>
+ <description>The default block size for new files, in bytes.
+ You can use the following suffix (case insensitive): k(kilo),
+ m(mega), g(giga), t(tera), p(peta), e(exa) to specify the
+ size (such as 128k, 512m, 1g, etc.), Or provide complete size
+ in bytes (such as 134217728 for 128 MB).</description>
+ </property>
+ <property>
+ <name>dfs.namenode.http-address</name>
+ <value>localhost:50070</value>
+ <description>The address and the base port where the dfs namenode
+ web ui will listen on. If the port is 0 then the server will
+ start on a free port.</description>
+ <final>true</final>
+ </property>
+ <property>
+ <name>dfs.https.port</name>
+ <value>50070</value>
+ <final>true</final>
+ </property>
+ <property>
+ <name>dfs.datanode.du.reserved</name>
+ <value>1073741824</value>
+ <description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
+ </description>
+ </property>
+ <property>
+ <name>dfs.datanode.ipc.address</name>
+ <value>0.0.0.0:8010</value>
+ <description>The datanode ipc server address and port.
+ If the port is 0 then the server will start on a free port.
+ </description>
+ </property>
+ <property>
+ <name>dfs.blockreport.initialDelay</name>
+ <value>120</value>
+ <description>Delay for first block report in seconds.</description>
+ </property>
+ <property>
+ <name>dfs.datanode.du.pct</name>
+ <value>0.85f</value>
+ <description>When calculating remaining space, only use this percentage of the real available space
+ </description>
+ </property>
+ <property>
+ <name>dfs.namenode.handler.count</name>
+ <value>40</value>
+ <description>The number of server threads for the namenode.</description>
+ </property>
+ <property>
+ <name>dfs.namenode.checkpoint.dir</name>
+ <value>file:///c:/hdpdata/hdfs/snn</value>
+ <description>Determines where on the local filesystem the DFS secondary
+ name node should store the temporary images to merge.
+ If this is a comma-delimited list of directories then the image is
+ replicated in all of the directories for redundancy.
+ </description>
+ </property>
+ <property>
+ <name>dfs.namenode.checkpoint.edits.dir</name>
+ <value>file:///c:/hadoop/hdfs/namesecondary</value>
+ <description>Determines where on the local filesystem the DFS secondary
+ name node should store the temporary edits to merge.
+ If this is a comma-delimited list of directoires then teh edits is
+ replicated in all of the directoires for redundancy.
+ Default value is same as dfs.namenode.checkpoint.dir
+ </description>
+ </property>
+ <property>
+ <name>dfs.namenode.checkpoint.period</name>
+ <value>86400</value>
+ <description>The number of seconds between two periodic checkpoints.
+ </description>
+ </property>
+ <property>
+ <name>dfs.datanode.max.transfer.threads</name>
+ <value>1024</value>
+ <description>Specifies the maximum number of threads to use for
+ transferring data in and out of the DN.</description>
+ </property>
+ <!-- Permissions configuration -->
+ <property>
+ <name>dfs.permissions.enabled</name>
+ <value>true</value>
+ <description>
+ If "true", enable permission checking in HDFS.
+ If "false", permission checking is turned off,
+ but all other behavior is unchanged.
+ Switching from one parameter value to the other does not change the mode,
+ owner or group of files or directories.
+ </description>
+ </property>
+ <property>
+ <name>dfs.permissions.superusergroup</name>
+ <value>hdfs</value>
+ <description>The name of the group of super-users.</description>
+ </property>
+ <property>
+ <name>ipc.server.max.response.size</name>
+ <value>5242880</value>
+ </property>
+ <property>
+ <name>dfs.block.access.token.enable</name>
+ <value>false</value>
+ <description>
+ If "true", access tokens are used as capabilities for accessing datanodes.
+ If "false", no access tokens are checked on accessing datanodes.
+ </description>
+ </property>
+ <property>
+ <name>dfs.namenode.secondary.http-address</name>
+ <value>localhost:50090</value>
+ <description>Address of secondary namenode web server</description>
+ </property>
+ <property>
+ <name>dfs.secondary.https.port</name>
+ <value>50091</value>
+ <description>The https port where secondary-namenode binds</description>
+ </property>
+ <property>
+ <name>dfs.namenode.https-address</name>
+ <value>localhost:50701</value>
+ <description>The https address where namenode binds</description>
+ </property>
+ <property>
+ <name>dfs.datanode.data.dir.perm</name>
+ <value>755</value>
+ <description>The permissions that should be there on dfs.data.dir
+ directories. The datanode will not come up if the permissions are
+ different on existing dfs.data.dir directories. If the directories
+ don't exist, they will be created with this permission.</description>
+ </property>
+ <property>
+ <name>dfs.namenode.accesstime.precision</name>
+ <value>0</value>
+ <description>The access time for HDFS file is precise upto this value.
+ The default value is 1 hour. Setting a value of 0 disables
+ access times for HDFS.
+ </description>
+ </property>
+ <property>
+ <name>dfs.cluster.administrators</name>
+ <value>hdfs</value>
+ <description>ACL for who all can view the default servlets in the HDFS</description>
+ </property>
+ <property>
+ <name>ipc.server.read.threadpool.size</name>
+ <value>5</value>
+ <description />
+ </property>
+ <property>
+ <name>dfs.encrypt.data.transfer</name>
+ <value>false</value>
+ </property>
+ <property>
+ <name>dfs.encrypt.data.transfer.algorithm</name>
+ <value>3des</value>
+ </property>
+ <property>
+ <name>dfs.https.enable</name>
+ <value>false</value>
+ </property>
+ <property>
+ <name>dfs.replication</name>
+ <value>1</value>
+ </property>
+
+ <property>
+ <name>dfs.hosts.exclude</name>
+ <value>c:\hdp\hadoop\etc\hadoop\dfs.exclude</value>
+ </property>
+
+ <property>
+ <name>dfs.hosts</name>
+ <value>c:\hdp\hadoop\etc\hadoop\dfs.include</value>
+ </property>
+</configuration> \ No newline at end of file
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/metainfo.xml
new file mode 100644
index 0000000000..127e20522a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/metainfo.xml
@@ -0,0 +1,162 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<metainfo>
+ <schemaVersion>2.0</schemaVersion>
+ <services>
+ <service>
+ <name>HDFS</name>
+ <displayName>HDFS</displayName>
+ <comment>Apache Hadoop Distributed File System</comment>
+ <version>2.1.0.2.0</version>
+
+ <components>
+ <component>
+ <name>NAMENODE</name>
+ <displayName>NameNode</displayName>
+ <category>MASTER</category>
+ <cardinality>1-2</cardinality>
+ <commandScript>
+ <script>scripts/namenode.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>600</timeout>
+ </commandScript>
+ <customCommands>
+ <customCommand>
+ <name>DECOMMISSION</name>
+ <commandScript>
+ <script>scripts/namenode.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>600</timeout>
+ </commandScript>
+ </customCommand>
+ <customCommand>
+ <name>REBALANCEHDFS</name>
+ <background>true</background>
+ <commandScript>
+ <script>scripts/namenode.py</script>
+ <scriptType>PYTHON</scriptType>
+ </commandScript>
+ </customCommand>
+ </customCommands>
+ </component>
+
+ <component>
+ <name>DATANODE</name>
+ <displayName>DataNode</displayName>
+ <category>SLAVE</category>
+ <cardinality>1+</cardinality>
+ <commandScript>
+ <script>scripts/datanode.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>600</timeout>
+ </commandScript>
+ </component>
+
+ <component>
+ <name>SECONDARY_NAMENODE</name>
+ <displayName>SNameNode</displayName>
+ <!-- TODO: cardinality is conditional on HA usage -->
+ <cardinality>1</cardinality>
+ <category>MASTER</category>
+ <commandScript>
+ <script>scripts/snamenode.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>600</timeout>
+ </commandScript>
+ </component>
+
+ <component>
+ <name>HDFS_CLIENT</name>
+ <displayName>HDFS Client</displayName>
+ <category>CLIENT</category>
+ <cardinality>1+</cardinality>
+ <commandScript>
+ <script>scripts/hdfs_client.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>600</timeout>
+ </commandScript>
+ <configFiles>
+ <configFile>
+ <type>xml</type>
+ <fileName>hdfs-site.xml</fileName>
+ <dictionaryName>hdfs-site</dictionaryName>
+ </configFile>
+ <configFile>
+ <type>xml</type>
+ <fileName>core-site.xml</fileName>
+ <dictionaryName>core-site</dictionaryName>
+ </configFile>
+ <configFile>
+ <type>env</type>
+ <fileName>log4j.properties</fileName>
+ <dictionaryName>hdfs-log4j,yarn-log4j</dictionaryName>
+ </configFile>
+ <configFile>
+ <type>env</type>
+ <fileName>hadoop-env.cmds</fileName>
+ <dictionaryName>hadoop-env</dictionaryName>
+ </configFile>
+ </configFiles>
+ </component>
+
+ <component>
+ <name>JOURNALNODE</name>
+ <displayName>JournalNode</displayName>
+ <category>SLAVE</category>
+ <cardinality>0+</cardinality>
+ <commandScript>
+ <script>scripts/journalnode.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>600</timeout>
+ </commandScript>
+ </component>
+
+ <component>
+ <name>ZKFC</name>
+ <displayName>ZKFailoverController</displayName>
+ <category>SLAVE</category>
+ <!-- TODO: cardinality is conditional on HA topology -->
+ <cardinality>0+</cardinality>
+ <commandScript>
+ <script>scripts/zkfc_slave.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>600</timeout>
+ </commandScript>
+ </component>
+ </components>
+
+ <commandScript>
+ <script>scripts/service_check.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>300</timeout>
+ </commandScript>
+
+ <requiredServices>
+ <service>ZOOKEEPER</service>
+ </requiredServices>
+
+ <configuration-dependencies>
+ <config-type>core-site</config-type>
+ <config-type>hdfs-site</config-type>
+ <config-type>hadoop-env</config-type>
+ <config-type>hadoop-policy</config-type>
+ <config-type>hdfs-log4j</config-type>
+ </configuration-dependencies>
+ </service>
+ </services>
+</metainfo>
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/metrics.json b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/metrics.json
new file mode 100644
index 0000000000..70b551cb24
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/metrics.json
@@ -0,0 +1,2126 @@
+{
+ "NAMENODE": {
+ "Component": [
+ {
+ "type": "jmx",
+ "metrics": {
+ "metrics/dfs/namenode/Used": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Used",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/TotalLoad": {
+ "metric": "Hadoop:service=NameNode,name=FSNamesystem.TotalLoad",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/memMaxM":{
+ "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemMaxM",
+ "pointInTime" : true,
+ "temporal" : false
+ },
+ "metrics/dfs/FSNamesystem/BlockCapacity": {
+ "metric": "Hadoop:service=NameNode,name=FSNamesystem.BlockCapacity",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/TotalFiles": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeInfo.TotalFiles",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/HostName": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeInfo.HostName",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/GetListingOps": {
+ "metric": "Hadoop:service=NameNode,name=NameNode.GetListingOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/UpgradeFinalized": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeInfo.UpgradeFinalized",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/getProtocolVersion_num_ops": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getProtocolVersion_num_ops",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/fsync_avg_time": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.fsync_avg_time",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/ugi/loginSuccess_avg_time": {
+ "metric": "Hadoop:service=NameNode,name=ugi.loginSuccess_avg_time",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "ServiceComponentInfo/Safemode": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Safemode",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "ServiceComponentInfo/CorruptBlocks": {
+ "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.CorruptBlocks",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "ServiceComponentInfo/LiveNodes": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeInfo.LiveNodes",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/renewLease_num_ops": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.renewLease_num_ops",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/getFileInfo_avg_time": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getFileInfo_avg_time",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/CapacityRemaining": {
+ "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityRemaining",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/PercentRemaining": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentRemaining",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/memNonHeapUsedM": {
+ "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemNonHeapUsedM",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/complete_avg_time": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.complete_avg_time",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/CapacityTotalGB": {
+ "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityTotalGB",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/getBlockLocations_num_ops": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getBlockLocations_num_ops",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/AddBlockOps": {
+ "metric": "Hadoop:service=NameNode,name=NameNode.AddBlockOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/CapacityUsedGB": {
+ "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityUsedGB",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/Syncs_avg_time": {
+ "metric": "Hadoop:service=NameNode,name=NameNode.Syncs_avg_time",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/threadsBlocked": {
+ "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsBlocked",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcQueueTime_num_ops": {
+ "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcQueueTime_num_ops",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/PercentUsed": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentUsed",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "ServiceComponentInfo/DecomNodes": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeInfo.DecomNodes",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/blockReport_avg_time": {
+ "metric": "Hadoop:service=NameNode,name=NameNode.blockReport_avg_time",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "ServiceComponentInfo/NonDfsUsedSpace": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeInfo.NonDfsUsedSpace",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "ServiceComponentInfo/UpgradeFinalized": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeInfo.UpgradeFinalized",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/getFileInfo_num_ops": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getFileInfo_num_ops",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/getEditLogSize_avg_time": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getEditLogSize_avg_time",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/ugi/loginSuccess_num_ops": {
+ "metric": "Hadoop:service=NameNode,name=ugi.loginSuccess_num_ops",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/blockReceived_avg_time": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReceived_avg_time",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/Safemode": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Safemode",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/FilesCreated": {
+ "metric": "Hadoop:service=NameNode,name=NameNode.FilesCreated",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/addBlock_num_ops": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.addBlock_num_ops",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/DecomNodes": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeInfo.DecomNodes",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/CapacityUsed": {
+ "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityUsed",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "ServiceComponentInfo/NonHeapMemoryUsed": {
+ "metric": "java.lang:type=Memory.NonHeapMemoryUsage[used]",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/memNonHeapCommittedM": {
+ "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemNonHeapCommittedM",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "ServiceComponentInfo/DeadNodes": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeInfo.DeadNodes",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "ServiceComponentInfo/PercentUsed": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentUsed",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/Free": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Free",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/Total": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Total",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/GetBlockLocations": {
+ "metric": "Hadoop:service=NameNode,name=NameNode.GetBlockLocations",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/ugi/loginFailure_num_ops": {
+ "metric": "Hadoop:service=NameNode,name=ugi.loginFailure_num_ops",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/fsync_num_ops": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.fsync_num_ops",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "ServiceComponentInfo/HeapMemoryMax": {
+ "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/create_avg_time": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.create_avg_time",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/PendingReplicationBlocks": {
+ "metric": "Hadoop:service=NameNode,name=FSNamesystem.PendingReplicationBlocks",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "ServiceComponentInfo/UnderReplicatedBlocks": {
+ "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.UnderReplicatedBlocks",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/FileInfoOps": {
+ "metric": "Hadoop:service=NameNode,name=NameNode.FileInfoOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "ServiceComponentInfo/MissingBlocks": {
+ "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.MissingBlocks",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/sendHeartbeat_num_ops": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.sendHeartbeat_num_ops",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcProcessingTime_avg_time": {
+ "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcProcessingTime_avg_time",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/blockReport_num_ops": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReport_num_ops",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "ServiceComponentInfo/CapacityRemaining": {
+ "metric": "Hadoop:service=NameNode,name=FSNamesystemState.CapacityRemaining",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/rpcAuthenticationSuccesses": {
+ "metric": "Hadoop:service=NameNode,name=RpcActivity.rpcAuthenticationSuccesses",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/PendingDeletionBlocks": {
+ "metric": "Hadoop:service=NameNode,name=FSNamesystem.PendingDeletionBlocks",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/rpcAuthenticationFailures": {
+ "metric": "Hadoop:service=NameNode,name=RpcActivity.rpcAuthenticationFailures",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/getEditLogSize_num_ops": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getEditLogSize_num_ops",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/memHeapCommittedM": {
+ "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemHeapCommittedM",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/FilesInGetListingOps": {
+ "metric": "Hadoop:service=NameNode,name=NameNode.FilesInGetListingOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/threadsRunnable": {
+ "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsRunnable",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "ServiceComponentInfo/BlocksTotal": {
+ "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.BlocksTotal",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/CapacityTotal": {
+ "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityTotal",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/complete_num_ops": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.complete_num_ops",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/LiveNodes": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeInfo.LiveNodes",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/threadsNew": {
+ "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsNew",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/rollFsImage_num_ops": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.rollFsImage_num_ops",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/rpcAuthorizationFailures": {
+ "metric": "Hadoop:service=NameNode,name=RpcActivity.rpcAuthorizationFailures",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/Syncs_num_ops": {
+ "metric": "Hadoop:service=NameNode,name=NameNode.Syncs_num_ops",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "ServiceComponentInfo/StartTime": {
+ "metric": "java.lang:type=Runtime.StartTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcQueueTime_avg_time": {
+ "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcQueueTime_avg_time",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/blockReceived_num_ops": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReceived_num_ops",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/rollEditLog_avg_time": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.rollEditLog_avg_time",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/DeadNodes": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeInfo.DeadNodes",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/SentBytes": {
+ "metric": "Hadoop:service=NameNode,name=RpcActivity.SentBytes",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "ServiceComponentInfo/HeapMemoryUsed": {
+ "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/FilesTotal": {
+ "metric": "Hadoop:service=NameNode,name=FSNamesystem.FilesTotal",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/Version": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Version",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/logWarn": {
+ "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogWarn",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/ExcessBlocks": {
+ "metric": "Hadoop:service=NameNode,name=FSNamesystem.ExcessBlocks",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/threadsTimedWaiting": {
+ "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsTimedWaiting",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/gcCount": {
+ "metric": "Hadoop:service=NameNode,name=JvmMetrics.GcCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "ServiceComponentInfo/PercentRemaining": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentRemaining",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/ReceivedBytes": {
+ "metric": "Hadoop:service=NameNode,name=RpcActivity.ReceivedBytes",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/blockReport_num_ops": {
+ "metric": "Hadoop:service=NameNode,name=NameNode.blockReport_num_ops",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "ServiceComponentInfo/NonHeapMemoryMax": {
+ "metric": "java.lang:type=Memory.NonHeapMemoryUsage[max]",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/rollFsImage_avg_time": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.rollFsImage_avg_time",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/NumOpenConnections": {
+ "metric": "Hadoop:service=NameNode,name=RpcActivity.NumOpenConnections",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/memHeapUsedM": {
+ "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemHeapUsedM",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/ScheduledReplicationBlocks": {
+ "metric": "Hadoop:service=NameNode,name=FSNamesystem.ScheduledReplicationBlocks",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/threadsWaiting": {
+ "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsWaiting",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/BlocksTotal": {
+ "metric": "Hadoop:service=NameNode,name=FSNamesystem.BlocksTotal",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/gcTimeMillis": {
+ "metric": "Hadoop:service=NameNode,name=JvmMetrics.GcTimeMillis",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/getBlockLocations_avg_time": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getBlockLocations_avg_time",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/Transactions_num_ops": {
+ "metric": "Hadoop:service=NameNode,name=NameNode.Transactions_num_ops",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/create_num_ops": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.create_num_ops",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "ServiceComponentInfo/CapacityTotal": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Total",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/threadsTerminated": {
+ "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsTerminated",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/CapacityRemainingGB": {
+ "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityRemainingGB",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/Transactions_avg_time": {
+ "metric": "Hadoop:service=NameNode,name=NameNode.Transactions_avg_time",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/MissingBlocks": {
+ "metric": "Hadoop:service=NameNode,name=FSNamesystem.MissingBlocks",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/Threads": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Threads",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/callQueueLen": {
+ "metric": "Hadoop:service=NameNode,name=RpcActivity.callQueueLen",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/CorruptBlocks": {
+ "metric": "Hadoop:service=NameNode,name=FSNamesystem.CorruptBlocks",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/blockReport_avg_time": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReport_avg_time",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "ServiceComponentInfo/TotalFiles": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeInfo.TotalFiles",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/logInfo": {
+ "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogInfo",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/NameDirStatuses": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeInfo.NameDirStatuses",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/getListing_num_ops": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getListing_num_ops",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/rollEditLog_num_ops": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.rollEditLog_num_ops",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/addBlock_avg_time": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.addBlock_avg_time",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcProcessingTime_num_ops": {
+ "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcProcessingTime_num_ops",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "ServiceComponentInfo/CapacityUsed": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Used",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/UnderReplicatedBlocks": {
+ "metric": "Hadoop:service=NameNode,name=FSNamesystem.UnderReplicatedBlocks",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/sendHeartbeat_avg_time": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.sendHeartbeat_avg_time",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/CreateFileOps": {
+ "metric": "Hadoop:service=NameNode,name=NameNode.CreateFileOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/logError": {
+ "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogError",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/ugi/loginFailure_avg_time": {
+ "metric": "Hadoop:service=NameNode,name=ugi.loginFailure_avg_time",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/getProtocolVersion_avg_time": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getProtocolVersion_avg_time",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/rpcAuthorizationSuccesses": {
+ "metric": "Hadoop:service=NameNode,name=RpcActivity.rpcAuthorizationSuccesses",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "ServiceComponentInfo/Version": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Version",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/getListing_avg_time": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getListing_avg_time",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/logFatal": {
+ "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogFatal",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/NonDfsUsedSpace": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeInfo.NonDfsUsedSpace",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/renewLease_avg_time": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.renewLease_avg_time",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/TotalBlocks": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeInfo.TotalBlocks",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/CapacityNonDFSUsed": {
+ "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityNonDFSUsed",
+ "pointInTime": true,
+ "temporal": false
+ }
+ }
+ }
+ ],
+ "HostComponent": [
+ {
+ "type": "jmx",
+ "metrics": {
+ "metrics/dfs/namenode/Used": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Used",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/TotalLoad": {
+ "metric": "Hadoop:service=NameNode,name=FSNamesystem.TotalLoad",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/memMaxM":{
+ "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemMaxM",
+ "pointInTime" : true,
+ "temporal" : false
+ },
+ "metrics/dfs/FSNamesystem/BlockCapacity": {
+ "metric": "Hadoop:service=NameNode,name=FSNamesystem.BlockCapacity",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/TotalFiles": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeInfo.TotalFiles",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/GetListingOps": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeActivity.GetListingOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/HostName": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeActivity.tag.Hostname",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/runtime/StartTime": {
+ "metric": "java.lang:type=Runtime.StartTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/UpgradeFinalized": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeInfo.UpgradeFinalized",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/getProtocolVersion_num_ops": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.VersionRequestNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/fsync_avg_time": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.FsyncAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/ugi/loginSuccess_avg_time": {
+ "metric": "Hadoop:service=NameNode,name=UgiMetrics.LoginSuccessAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/renewLease_num_ops": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.renewLease_num_ops",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/CapacityRemaining": {
+ "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityRemaining",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/getFileInfo_avg_time": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.GetFileInfoAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/PercentRemaining": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentRemaining",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/memNonHeapUsedM": {
+ "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemNonHeapUsedM",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/complete_avg_time": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.CompleteAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/CapacityTotalGB": {
+ "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityTotalGB",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/getBlockLocations_num_ops": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.GetBlockLocationsNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/CapacityUsedGB": {
+ "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityUsedGB",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/AddBlockOps": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeActivity.AddBlockOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/Syncs_avg_time": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeActivity.SyncsAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/threadsBlocked": {
+ "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsBlocked",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/PercentUsed": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentUsed",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcQueueTime_num_ops": {
+ "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcQueueTimeNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/blockReport_avg_time": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeActivity.BlockReportAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/HeapMemoryMax": {
+ "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/HeapMemoryUsed": {
+ "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/getFileInfo_num_ops": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.GetFileInfoNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/getEditLogSize_avg_time": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.GetEditLogManifestAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/ugi/loginSuccess_num_ops": {
+ "metric": "Hadoop:service=NameNode,name=UgiMetrics.LoginSuccessNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/blockReceived_avg_time": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.BlockReceivedAndDeletedAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/addBlock_num_ops": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.AddBlockNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/FilesCreated": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeActivity.FilesCreated",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/Safemode": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Safemode",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/CapacityUsed": {
+ "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityUsed",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/DecomNodes": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeInfo.DecomNodes",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/memNonHeapCommittedM": {
+ "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemNonHeapCommittedM",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/Free": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Free",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/Total": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Total",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/HAState": {
+ "metric": "Hadoop:service=NameNode,name=FSNamesystem.tag#HAState",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/GetBlockLocations": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeActivity.GetBlockLocations",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/ugi/loginFailure_num_ops": {
+ "metric": "Hadoop:service=NameNode,name=UgiMetrics.LoginFailureNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/fsync_num_ops": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.FsyncNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/NonHeapMemoryMax": {
+ "metric": "java.lang:type=Memory.NonHeapMemoryUsage[max]",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/create_avg_time": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.CreateAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/PendingReplicationBlocks": {
+ "metric": "Hadoop:service=NameNode,name=FSNamesystem.PendingReplicationBlocks",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/FileInfoOps": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeActivity.FileInfoOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/sendHeartbeat_num_ops": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.SendHeartbeatNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcProcessingTime_avg_time": {
+ "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcProcessingTimeAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/blockReport_num_ops": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.BlockReportNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/rpcAuthenticationSuccesses": {
+ "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcAuthenticationSuccesses",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/PendingDeletionBlocks": {
+ "metric": "Hadoop:service=NameNode,name=FSNamesystem.PendingDeletionBlocks",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/rpcAuthenticationFailures": {
+ "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcAuthenticationFailures",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/memHeapCommittedM": {
+ "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemHeapCommittedM",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/getEditLogSize_num_ops": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.GetEditLogManifestNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/NonHeapMemoryUsed": {
+ "metric": "java.lang:type=Memory.NonHeapMemoryUsage[used]",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/FilesInGetListingOps": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeActivity.FilesInGetListingOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/threadsRunnable": {
+ "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsRunnable",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/CapacityTotal": {
+ "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityTotal",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/LiveNodes": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeInfo.LiveNodes",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/complete_num_ops": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.CompleteNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/threadsNew": {
+ "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsNew",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/rollFsImage_num_ops": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.rollFsImage_num_ops",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/rpcAuthorizationFailures": {
+ "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcAuthorizationFailures",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/Syncs_num_ops": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeActivity.SyncsNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcQueueTime_avg_time": {
+ "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcQueueTimeAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/blockReceived_num_ops": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.BlockReceivedAndDeletedNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/rollEditLog_avg_time": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.RollEditLogAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/DeadNodes": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeInfo.DeadNodes",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/SentBytes": {
+ "metric": "Hadoop:service=NameNode,name=RpcActivity.SentBytes",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/FilesTotal": {
+ "metric": "Hadoop:service=NameNode,name=FSNamesystem.FilesTotal",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/Version": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Version",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/logWarn": {
+ "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogWarn",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/ExcessBlocks": {
+ "metric": "Hadoop:service=NameNode,name=FSNamesystem.ExcessBlocks",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/threadsTimedWaiting": {
+ "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsTimedWaiting",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/gcCount": {
+ "metric": "Hadoop:service=NameNode,name=JvmMetrics.GcCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/ReceivedBytes": {
+ "metric": "Hadoop:service=NameNode,name=RpcActivity.ReceivedBytes",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/blockReport_num_ops": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeActivity.BlockReportNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/rollFsImage_avg_time": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.rollFsImage_avg_time",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/NumOpenConnections": {
+ "metric": "Hadoop:service=NameNode,name=RpcActivity.NumOpenConnections",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/memHeapUsedM": {
+ "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemHeapUsedM",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/ScheduledReplicationBlocks": {
+ "metric": "Hadoop:service=NameNode,name=FSNamesystem.ScheduledReplicationBlocks",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/threadsWaiting": {
+ "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsWaiting",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/BlocksTotal": {
+ "metric": "Hadoop:service=NameNode,name=FSNamesystem.BlocksTotal",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/gcTimeMillis": {
+ "metric": "Hadoop:service=NameNode,name=JvmMetrics.GcTimeMillis",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/getBlockLocations_avg_time": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.GetBlockLocationsAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/Transactions_num_ops": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeActivity.TransactionsNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/create_num_ops": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.CreateNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/threadsTerminated": {
+ "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsTerminated",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/CapacityRemainingGB": {
+ "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityRemainingGB",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/Transactions_avg_time": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeActivity.TransactionsAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/MissingBlocks": {
+ "metric": "Hadoop:service=NameNode,name=FSNamesystem.MissingBlocks",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/Threads": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Threads",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/callQueueLen": {
+ "metric": "Hadoop:service=NameNode,name=RpcActivity.CallQueueLength",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/CorruptBlocks": {
+ "metric": "Hadoop:service=NameNode,name=FSNamesystem.CorruptBlocks",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/blockReport_avg_time": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.BlockReportAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/logInfo": {
+ "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogInfo",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/NameDirStatuses": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeInfo.NameDirStatuses",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/getListing_num_ops": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.GetListingNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/rollEditLog_num_ops": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.RollEditLogNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/addBlock_avg_time": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.AddBlockAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcProcessingTime_num_ops": {
+ "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcProcessingTimeNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/UnderReplicatedBlocks": {
+ "metric": "Hadoop:service=NameNode,name=FSNamesystem.UnderReplicatedBlocks",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/sendHeartbeat_avg_time": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.SendHeartbeatAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/CreateFileOps": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeActivity.CreateFileOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/logError": {
+ "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogError",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/ugi/loginFailure_avg_time": {
+ "metric": "Hadoop:service=NameNode,name=UgiMetrics.LoginFailureAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/getProtocolVersion_avg_time": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.VersionRequestAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/rpcAuthorizationSuccesses": {
+ "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcAuthorizationSuccesses",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/getListing_avg_time": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.GetListingAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/logFatal": {
+ "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogFatal",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/NonDfsUsedSpace": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeInfo.NonDfsUsedSpace",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/renewLease_avg_time": {
+ "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.renewLease_avg_time",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/JournalTransactionInfo": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeInfo.JournalTransactionInfo",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/CapacityNonDFSUsed": {
+ "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityUsedNonDFS",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/namenode/TotalBlocks": {
+ "metric": "Hadoop:service=NameNode,name=NameNodeInfo.TotalBlocks",
+ "pointInTime": true,
+ "temporal": false
+ }
+ }
+ }
+ ]
+ },
+ "DATANODE": {
+ "HostComponent": [
+ {
+ "type": "jmx",
+ "metrics": {
+ "metrics/rpcdetailed/getEditLogSize_num_ops": {
+ "metric": "Hadoop:service=DataNode,name=RpcDetailedActivity.getEditLogSize_num_ops",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/TotalLoad": {
+ "metric": "Hadoop:service=DataNode,name=DataNode.TotalLoad",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/memHeapCommittedM": {
+ "metric": "Hadoop:service=DataNode,name=JvmMetrics.MemHeapCommittedM",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/NonHeapMemoryUsed": {
+ "metric": "java.lang:type=Memory.NonHeapMemoryUsage[used]",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/BlockCapacity": {
+ "metric": "Hadoop:service=DataNode,name=DataNode.BlockCapacity",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/threadsRunnable": {
+ "metric": "Hadoop:service=DataNode,name=JvmMetrics.ThreadsRunnable",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/complete_num_ops": {
+ "metric": "Hadoop:service=DataNode,name=RpcDetailedActivity.complete_num_ops",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/threadsNew": {
+ "metric": "Hadoop:service=DataNode,name=JvmMetrics.ThreadsNew",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/rollFsImage_num_ops": {
+ "metric": "Hadoop:service=DataNode,name=RpcDetailedActivity.rollFsImage_num_ops",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/rpcAuthorizationFailures": {
+ "metric": "Hadoop:service=DataNode,name=RpcActivity.RpcAuthorizationFailures",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/RpcPort": {
+ "metric": "Hadoop:service=DataNode,name=DataNodeInfo.RpcPort",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/getProtocolVersion_num_ops": {
+ "metric": "Hadoop:service=DataNode,name=RpcDetailedActivity.getProtocolVersion_num_ops",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcQueueTime_avg_time": {
+ "metric": "Hadoop:service=DataNode,name=RpcActivity.RpcQueueTimeAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/fsync_avg_time": {
+ "metric": "Hadoop:service=DataNode,name=RpcDetailedActivity.fsync_avg_time",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/ugi/loginSuccess_avg_time": {
+ "metric": "Hadoop:service=NameNode,name=UgiMetrics.LoginSuccessAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/blockReceived_num_ops": {
+ "metric": "Hadoop:service=DataNode,name=RpcDetailedActivity.blockReceived_num_ops",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/rollEditLog_avg_time": {
+ "metric": "Hadoop:service=DataNode,name=RpcDetailedActivity.rollEditLog_avg_time",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/SentBytes": {
+ "metric": "Hadoop:service=DataNode,name=RpcActivity.SentBytes",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/renewLease_num_ops": {
+ "metric": "Hadoop:service=DataNode,name=RpcDetailedActivity.renewLease_num_ops",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/getFileInfo_avg_time": {
+ "metric": "Hadoop:service=DataNode,name=RpcDetailedActivity.getFileInfo_avg_time",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/FilesTotal": {
+ "metric": "Hadoop:service=DataNode,name=DataNode.FilesTotal",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/memNonHeapUsedM": {
+ "metric": "Hadoop:service=DataNode,name=JvmMetrics.MemNonHeapUsedM",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/VolumeInfo": {
+ "metric": "Hadoop:service=DataNode,name=DataNodeInfo.VolumeInfo",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/complete_avg_time": {
+ "metric": "Hadoop:service=DataNode,name=RpcDetailedActivity.complete_avg_time",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/logWarn": {
+ "metric": "Hadoop:service=DataNode,name=JvmMetrics.LogWarn",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/CapacityTotalGB": {
+ "metric": "Hadoop:service=DataNode,name=DataNode.CapacityTotalGB",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/ExcessBlocks": {
+ "metric": "Hadoop:service=DataNode,name=DataNode.ExcessBlocks",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/getBlockLocations_num_ops": {
+ "metric": "Hadoop:service=DataNode,name=RpcDetailedActivity.getBlockLocations_num_ops",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/HttpPort": {
+ "metric": "Hadoop:service=DataNode,name=DataNodeInfo.HttpPort",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/threadsTimedWaiting": {
+ "metric": "Hadoop:service=DataNode,name=JvmMetrics.ThreadsTimedWaiting",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/gcCount": {
+ "metric": "Hadoop:service=DataNode,name=JvmMetrics.GcCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/CapacityUsedGB": {
+ "metric": "Hadoop:service=DataNode,name=DataNode.CapacityUsedGB",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/ReceivedBytes": {
+ "metric": "Hadoop:service=DataNode,name=RpcActivity.ReceivedBytes",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/HostName": {
+ "metric": "Hadoop:service=DataNode,name=MetricsSystem,sub=Stats.tag.Hostname",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/threadsBlocked": {
+ "metric": "Hadoop:service=DataNode,name=JvmMetrics.ThreadsBlocked",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcQueueTime_num_ops": {
+ "metric": "Hadoop:service=DataNode,name=RpcActivity.RpcQueueTimeNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/HeapMemoryMax": {
+ "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/HeapMemoryUsed": {
+ "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/rollFsImage_avg_time": {
+ "metric": "Hadoop:service=DataNode,name=RpcDetailedActivity.rollFsImage_avg_time",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/NumOpenConnections": {
+ "metric": "Hadoop:service=DataNode,name=RpcActivity.NumOpenConnections",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/getFileInfo_num_ops": {
+ "metric": "Hadoop:service=DataNode,name=RpcDetailedActivity.getFileInfo_num_ops",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/memHeapUsedM": {
+ "metric": "Hadoop:service=DataNode,name=JvmMetrics.MemHeapUsedM",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/ScheduledReplicationBlocks": {
+ "metric": "Hadoop:service=DataNode,name=DataNode.ScheduledReplicationBlocks",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/threadsWaiting": {
+ "metric": "Hadoop:service=DataNode,name=JvmMetrics.ThreadsWaiting",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/BlocksTotal": {
+ "metric": "Hadoop:service=DataNode,name=DataNode.BlocksTotal",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/ugi/loginSuccess_num_ops": {
+ "metric": "Hadoop:service=NameNode,name=UgiMetrics.LoginSuccessNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/getEditLogSize_avg_time": {
+ "metric": "Hadoop:service=DataNode,name=RpcDetailedActivity.getEditLogSize_avg_time",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/getBlockLocations_avg_time": {
+ "metric": "Hadoop:service=DataNode,name=RpcDetailedActivity.getBlockLocations_avg_time",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/gcTimeMillis": {
+ "metric": "Hadoop:service=DataNode,name=JvmMetrics.GcTimeMillis",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/blockReceived_avg_time": {
+ "metric": "Hadoop:service=DataNode,name=RpcDetailedActivity.blockReceived_avg_time",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/create_num_ops": {
+ "metric": "Hadoop:service=DataNode,name=RpcDetailedActivity.create_num_ops",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/threadsTerminated": {
+ "metric": "Hadoop:service=DataNode,name=JvmMetrics.ThreadsTerminated",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/addBlock_num_ops": {
+ "metric": "Hadoop:service=DataNode,name=RpcDetailedActivity.addBlock_num_ops",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/CapacityRemainingGB": {
+ "metric": "Hadoop:service=DataNode,name=DataNode.CapacityRemainingGB",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/MissingBlocks": {
+ "metric": "Hadoop:service=DataNode,name=DataNode.MissingBlocks",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/memNonHeapCommittedM": {
+ "metric": "Hadoop:service=DataNode,name=JvmMetrics.MemNonHeapCommittedM",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/callQueueLen": {
+ "metric": "Hadoop:service=DataNode,name=RpcActivity.CallQueueLength",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/CorruptBlocks": {
+ "metric": "Hadoop:service=DataNode,name=DataNode.CorruptBlocks",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/Version": {
+ "metric": "Hadoop:service=DataNode,name=DataNodeInfo.Version",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/blockReport_avg_time": {
+ "metric": "Hadoop:service=DataNode,name=RpcDetailedActivity.blockReport_avg_time",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/logInfo": {
+ "metric": "Hadoop:service=DataNode,name=JvmMetrics.LogInfo",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/getListing_num_ops": {
+ "metric": "Hadoop:service=DataNode,name=RpcDetailedActivity.getListing_num_ops",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/rollEditLog_num_ops": {
+ "metric": "Hadoop:service=DataNode,name=RpcDetailedActivity.rollEditLog_num_ops",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/addBlock_avg_time": {
+ "metric": "Hadoop:service=DataNode,name=RpcDetailedActivity.addBlock_avg_time",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/NamenodeAddress": {
+ "metric": "Hadoop:service=DataNode,name=DataNodeInfo.NamenodeAddresses",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcProcessingTime_num_ops": {
+ "metric": "Hadoop:service=DataNode,name=RpcActivity.RpcProcessingTimeNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/ugi/loginFailure_num_ops": {
+ "metric": "Hadoop:service=NameNode,name=UgiMetrics.LoginFailureNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/fsync_num_ops": {
+ "metric": "Hadoop:service=DataNode,name=RpcDetailedActivity.fsync_num_ops",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/UnderReplicatedBlocks": {
+ "metric": "Hadoop:service=DataNode,name=DataNode.UnderReplicatedBlocks",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/NonHeapMemoryMax": {
+ "metric": "java.lang:type=Memory.NonHeapMemoryUsage[max]",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/PendingReplicationBlocks": {
+ "metric": "Hadoop:service=DataNode,name=DataNode.PendingReplicationBlocks",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/create_avg_time": {
+ "metric": "Hadoop:service=DataNode,name=RpcDetailedActivity.create_avg_time",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/sendHeartbeat_avg_time": {
+ "metric": "Hadoop:service=DataNode,name=RpcDetailedActivity.sendHeartbeat_avg_time",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/logError": {
+ "metric": "Hadoop:service=DataNode,name=JvmMetrics.LogError",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/ugi/loginFailure_avg_time": {
+ "metric": "Hadoop:service=NameNode,name=UgiMetrics.LoginFailureAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/getProtocolVersion_avg_time": {
+ "metric": "Hadoop:service=DataNode,name=RpcDetailedActivity.getProtocolVersion_avg_time",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/rpcAuthorizationSuccesses": {
+ "metric": "Hadoop:service=DataNode,name=RpcActivity.RpcAuthorizationSuccesses",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/getListing_avg_time": {
+ "metric": "Hadoop:service=DataNode,name=RpcDetailedActivity.getListing_avg_time",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/sendHeartbeat_num_ops": {
+ "metric": "Hadoop:service=DataNode,name=RpcDetailedActivity.sendHeartbeat_num_ops",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/logFatal": {
+ "metric": "Hadoop:service=DataNode,name=JvmMetrics.LogFatal",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/renewLease_avg_time": {
+ "metric": "Hadoop:service=DataNode,name=RpcDetailedActivity.renewLease_avg_time",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcProcessingTime_avg_time": {
+ "metric": "Hadoop:service=DataNode,name=RpcActivity.RpcProcessingTimeAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/blockReport_num_ops": {
+ "metric": "Hadoop:service=DataNode,name=RpcDetailedActivity.blockReport_num_ops",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/rpcAuthenticationSuccesses": {
+ "metric": "Hadoop:service=DataNode,name=RpcActivity.RpcAuthenticationSuccesses",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/rpcAuthenticationFailures": {
+ "metric": "Hadoop:service=DataNode,name=RpcActivity.RpcAuthenticationFailures",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/FSNamesystem/PendingDeletionBlocks": {
+ "metric": "Hadoop:service=DataNode,name=DataNode.PendingDeletionBlocks",
+ "pointInTime": true,
+ "temporal": false
+ }
+ }
+ }
+ ]
+ },
+ "JOURNALNODE": {
+ "HostComponent": [
+ {
+ "type": "jmx",
+ "metrics": {
+ "metrics/jvm/memHeapCommittedM": {
+ "metric": "Hadoop:service=JournalNode,name=JvmMetrics.MemHeapCommittedM",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/NonHeapMemoryUsed": {
+ "metric": "java.lang:type=Memory.NonHeapMemoryUsage[used]",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/threadsRunnable": {
+ "metric": "Hadoop:service=JournalNode,name=JvmMetrics.ThreadsRunnable",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/journalnode/cluster/$1.replaceAll(\"^-\",\"\")/syncs300s95thPercentileLatencyMicros": {
+ "metric": "Hadoop:service=JournalNode,name=Journal(-.+).Syncs300s95thPercentileLatencyMicros",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/threadsNew": {
+ "metric": "Hadoop:service=JournalNode,name=JvmMetrics.ThreadsNew",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/finalizeLogSegment_num_ops": {
+ "metric": "Hadoop:service=JournalNode,name=RpcDetailedActivity.FinalizeLogSegmentNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/rpcAuthorizationFailures": {
+ "metric": "Hadoop:service=JournalNode,name=RpcActivity.RpcAuthorizationFailures",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/ugi/loginSuccess_avg_time": {
+ "metric": "Hadoop:service=JournalNode,name=UgiMetrics.LoginSuccessAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcQueueTime_avg_time": {
+ "metric": "Hadoop:service=JournalNode,name=RpcActivity.RpcQueueTimeAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/startLogSegment_avg_time": {
+ "metric": "Hadoop:service=JournalNode,name=RpcDetailedActivity.StartLogSegmentAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/journalnode/cluster/$1.replaceAll(\"^-\",\"\")/lastWriterEpoch": {
+ "metric": "Hadoop:service=JournalNode,name=Journal(-.+).LastWriterEpoch",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/getEditLogManifest_avg_time": {
+ "metric": "Hadoop:service=JournalNode,name=RpcDetailedActivity.GetEditLogManifestAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/SentBytes": {
+ "metric": "Hadoop:service=JournalNode,name=RpcActivity.SentBytes",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/memNonHeapUsedM": {
+ "metric": "Hadoop:service=JournalNode,name=JvmMetrics.MemNonHeapUsedM",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/journalnode/cluster/$1.replaceAll(\"^-\",\"\")/syncs60s_num_ops": {
+ "metric": "Hadoop:service=JournalNode,name=Journal(-.+).Syncs60sNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/logWarn": {
+ "metric": "Hadoop:service=JournalNode,name=JvmMetrics.LogWarn",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/journalnode/cluster/$1.replaceAll(\"^-\",\"\")/syncs3600s50thPercentileLatencyMicros": {
+ "metric": "Hadoop:service=JournalNode,name=Journal(-.+).Syncs3600s50thPercentileLatencyMicros",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/threadsTimedWaiting": {
+ "metric": "Hadoop:service=JournalNode,name=JvmMetrics.ThreadsTimedWaiting",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/gcCount": {
+ "metric": "Hadoop:service=JournalNode,name=JvmMetrics.GcCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/journalnode/cluster/$1.replaceAll(\"^-\",\"\")/syncs3600s95thPercentileLatencyMicros": {
+ "metric": "Hadoop:service=JournalNode,name=Journal(-.+).Syncs3600s95thPercentileLatencyMicros",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/acceptRecovery_num_ops": {
+ "metric": "Hadoop:service=JournalNode,name=RpcDetailedActivity.AcceptRecoveryNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/ReceivedBytes": {
+ "metric": "Hadoop:service=JournalNode,name=RpcActivity.ReceivedBytes",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/journalnode/cluster/$1.replaceAll(\"^-\",\"\")/syncs3600s99thPercentileLatencyMicros": {
+ "metric": "Hadoop:service=JournalNode,name=Journal(-.+).Syncs3600s99thPercentileLatencyMicros",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/threadsBlocked": {
+ "metric": "Hadoop:service=JournalNode,name=JvmMetrics.ThreadsBlocked",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/journalnode/cluster/$1.replaceAll(\"^-\",\"\")/lastWrittenTxId": {
+ "metric": "Hadoop:service=JournalNode,name=Journal(-.+).LastWrittenTxId",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcQueueTime_num_ops": {
+ "metric": "Hadoop:service=JournalNode,name=RpcActivity.RpcQueueTimeNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/HeapMemoryMax": {
+ "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/HeapMemoryUsed": {
+ "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/newEpoch_num_ops": {
+ "metric": "Hadoop:service=JournalNode,name=RpcDetailedActivity.NewEpochNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/journalnode/cluster/$1.replaceAll(\"^-\",\"\")/syncs60s75thPercentileLatencyMicros": {
+ "metric": "Hadoop:service=JournalNode,name=Journal(-.+).Syncs60s75thPercentileLatencyMicros",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/NumOpenConnections": {
+ "metric": "Hadoop:service=JournalNode,name=RpcActivity.NumOpenConnections",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/memHeapUsedM": {
+ "metric": "Hadoop:service=JournalNode,name=JvmMetrics.MemHeapUsedM",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/prepareRecovery_num_ops": {
+ "metric": "Hadoop:service=JournalNode,name=RpcDetailedActivity.PrepareRecoveryNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/journalnode/cluster/$1.replaceAll(\"^-\",\"\")/batchesWrittenWhileLagging": {
+ "metric": "Hadoop:service=JournalNode,name=Journal(-.+).BatchesWrittenWhileLagging",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/threadsWaiting": {
+ "metric": "Hadoop:service=JournalNode,name=JvmMetrics.ThreadsWaiting",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/newEpoch_avg_time": {
+ "metric": "Hadoop:service=JournalNode,name=RpcDetailedActivity.NewEpochAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/ugi/loginSuccess_num_ops": {
+ "metric": "Hadoop:service=JournalNode,name=UgiMetrics.LoginSuccessNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/gcTimeMillis": {
+ "metric": "Hadoop:service=JournalNode,name=JvmMetrics.GcTimeMillis",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/journalnode/cluster/$1.replaceAll(\"^-\",\"\")/syncs300s75thPercentileLatencyMicros": {
+ "metric": "Hadoop:service=JournalNode,name=Journal(-.+).Syncs300s75thPercentileLatencyMicros",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/threadsTerminated": {
+ "metric": "Hadoop:service=JournalNode,name=JvmMetrics.ThreadsTerminated",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/memMaxM": {
+ "metric": "Hadoop:service=JournalNode,name=JvmMetrics.MemMaxM",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/journalnode/cluster/$1.replaceAll(\"^-\",\"\")/syncs300s_num_ops": {
+ "metric": "Hadoop:service=JournalNode,name=Journal(-.+).Syncs300sNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/journalnode/cluster/$1.replaceAll(\"^-\",\"\")/syncs300s50thPercentileLatencyMicros": {
+ "metric": "Hadoop:service=JournalNode,name=Journal(-.+).Syncs300s50thPercentileLatencyMicros",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/getJournalState_avg_time": {
+ "metric": "Hadoop:service=JournalNode,name=RpcDetailedActivity.GetJournalStateAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/journalnode/cluster/$1.replaceAll(\"^-\",\"\")/lastPromisedEpoch": {
+ "metric": "Hadoop:service=JournalNode,name=Journal(-.+).LastPromisedEpoch",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/startTime": {
+ "metric": "java.lang:type=Runtime.StartTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/journalnode/cluster/$1.replaceAll(\"^-\",\"\")/syncs60s95thPercentileLatencyMicros": {
+ "metric": "Hadoop:service=JournalNode,name=Journal(-.+).Syncs60s95thPercentileLatencyMicros",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/journalnode/journalsStatus": {
+ "metric": "Hadoop:service=JournalNode,name=JournalNodeInfo.JournalsStatus",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/memNonHeapCommittedM": {
+ "metric": "Hadoop:service=JournalNode,name=JvmMetrics.MemNonHeapCommittedM",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/journalnode/cluster/$1.replaceAll(\"^-\",\"\")/syncs60s50thPercentileLatencyMicros": {
+ "metric": "Hadoop:service=JournalNode,name=Journal(-.+).Syncs60s50thPercentileLatencyMicros",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/callQueueLen": {
+ "metric": "Hadoop:service=JournalNode,name=RpcActivity.CallQueueLength",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/journal_avg_time": {
+ "metric": "Hadoop:service=JournalNode,name=RpcDetailedActivity.JournalAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/journalnode/cluster/$1.replaceAll(\"^-\",\"\")/syncs3600s75thPercentileLatencyMicros": {
+ "metric": "Hadoop:service=JournalNode,name=Journal(-.+).Syncs3600s75thPercentileLatencyMicros",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/finalizeLogSegment_avg_time": {
+ "metric": "Hadoop:service=JournalNode,name=RpcDetailedActivity.FinalizeLogSegmentAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/journalnode/cluster/$1.replaceAll(\"^-\",\"\")/syncs3600s90thPercentileLatencyMicros": {
+ "metric": "Hadoop:service=JournalNode,name=Journal(-.+).Syncs3600s90thPercentileLatencyMicros",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/journalnode/cluster/$1.replaceAll(\"^-\",\"\")/batchesWritten": {
+ "metric": "Hadoop:service=JournalNode,name=Journal(-.+).BatchesWritten",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/startLogSegment_num_ops": {
+ "metric": "Hadoop:service=JournalNode,name=RpcDetailedActivity.StartLogSegmentNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/journalnode/cluster/$1.replaceAll(\"^-\",\"\")/syncs60s90thPercentileLatencyMicros": {
+ "metric": "Hadoop:service=JournalNode,name=Journal(-.+).Syncs60s90thPercentileLatencyMicros",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/journalnode/cluster/$1.replaceAll(\"^-\",\"\")/syncs3600s_num_ops": {
+ "metric": "Hadoop:service=JournalNode,name=Journal(-.+).Syncs3600sNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/prepareRecovery_avg_time": {
+ "metric": "Hadoop:service=JournalNode,name=RpcDetailedActivity.PrepareRecoveryAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/logInfo": {
+ "metric": "Hadoop:service=JournalNode,name=JvmMetrics.LogInfo",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/getJournalState_num_ops": {
+ "metric": "Hadoop:service=JournalNode,name=RpcDetailedActivity.GetJournalStateNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/journalnode/cluster/$1.replaceAll(\"^-\",\"\")/currentLagTxns": {
+ "metric": "Hadoop:service=JournalNode,name=Journal(-.+).CurrentLagTxns",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcProcessingTime_num_ops": {
+ "metric": "Hadoop:service=JournalNode,name=RpcActivity.RpcProcessingTimeNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/ugi/loginFailure_num_ops": {
+ "metric": "Hadoop:service=JournalNode,name=UgiMetrics.LoginFailureNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/getEditLogManifest_num_ops": {
+ "metric": "Hadoop:service=JournalNode,name=RpcDetailedActivity.GetEditLogManifestNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/journalnode/cluster/$1.replaceAll(\"^-\",\"\")/syncs300s99thPercentileLatencyMicros": {
+ "metric": "Hadoop:service=JournalNode,name=Journal(-.+).Syncs300s99thPercentileLatencyMicros",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/NonHeapMemoryMax": {
+ "metric": "java.lang:type=Memory.NonHeapMemoryUsage[max]",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/journalnode/cluster/$1.replaceAll(\"^-\",\"\")/syncs300s90thPercentileLatencyMicros": {
+ "metric": "Hadoop:service=JournalNode,name=Journal(-.+).Syncs300s90thPercentileLatencyMicros",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/acceptRecovery_avg_time": {
+ "metric": "Hadoop:service=JournalNode,name=RpcDetailedActivity.AcceptRecoveryAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/logError": {
+ "metric": "Hadoop:service=JournalNode,name=JvmMetrics.LogError",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/ugi/loginFailure_avg_time": {
+ "metric": "Hadoop:service=JournalNode,name=UgiMetrics.LoginFailureAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/journalnode/cluster/$1.replaceAll(\"^-\",\"\")/syncs60s99thPercentileLatencyMicros": {
+ "metric": "Hadoop:service=JournalNode,name=Journal(-.+).Syncs60s99thPercentileLatencyMicros",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpcdetailed/journal_num_ops": {
+ "metric": "Hadoop:service=JournalNode,name=RpcDetailedActivity.JournalNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/rpcAuthorizationSuccesses": {
+ "metric": "Hadoop:service=JournalNode,name=RpcActivity.RpcAuthorizationSuccesses",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/journalnode/cluster/$1.replaceAll(\"^-\",\"\")/txnsWritten": {
+ "metric": "Hadoop:service=JournalNode,name=Journal(-.+).TxnsWritten",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/logFatal": {
+ "metric": "Hadoop:service=JournalNode,name=JvmMetrics.LogFatal",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcProcessingTime_avg_time": {
+ "metric": "Hadoop:service=JournalNode,name=RpcActivity.RpcProcessingTimeAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/rpcAuthenticationSuccesses": {
+ "metric": "Hadoop:service=JournalNode,name=RpcActivity.RpcAuthenticationSuccesses",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/rpcAuthenticationFailures": {
+ "metric": "Hadoop:service=JournalNode,name=RpcActivity.RpcAuthenticationFailures",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/dfs/journalnode/cluster/$1.replaceAll(\"^-\",\"\")/bytesWritten": {
+ "metric": "Hadoop:service=JournalNode,name=Journal(-.+).BytesWritten",
+ "pointInTime": true,
+ "temporal": false
+ }
+ }
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/package/scripts/datanode.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/package/scripts/datanode.py
new file mode 100644
index 0000000000..3d8ed5c681
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/package/scripts/datanode.py
@@ -0,0 +1,49 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from hdfs import hdfs
+import service_mapping
+
+class DataNode(Script):
+ def install(self, env):
+
+ if not check_windows_service_exists(service_mapping.datanode_win_service_name):
+ self.install_packages(env)
+
+ def start(self, env):
+ import params
+ self.configure(env)
+ Service(service_mapping.datanode_win_service_name, action="start")
+
+ def stop(self, env):
+ import params
+ env.set_params(params)
+ Service(service_mapping.datanode_win_service_name, action="stop")
+
+ def configure(self, env):
+ import params
+ env.set_params(params)
+ hdfs("datanode")
+
+ def status(self, env):
+ check_windows_service_status(service_mapping.datanode_win_service_name)
+
+if __name__ == "__main__":
+ DataNode().execute()
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/package/scripts/hdfs.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/package/scripts/hdfs.py
new file mode 100644
index 0000000000..a37ea24ec3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/package/scripts/hdfs.py
@@ -0,0 +1,60 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+def hdfs(component=None):
+ import params
+ if component == "namenode":
+ Directory(params.dfs_name_dir,
+ owner=params.hdfs_user,
+ mode="(OI)(CI)F",
+ recursive=True
+ )
+ File(params.exclude_file_path,
+ content=Template("exclude_hosts_list.j2"),
+ owner=params.hdfs_user,
+ mode="f",
+ )
+ if "hadoop-policy" in params.config['configurations']:
+ XmlConfig("hadoop-policy.xml",
+ conf_dir=params.hadoop_conf_dir,
+ configurations=params.config['configurations']['hadoop-policy'],
+ owner=params.hdfs_user,
+ mode="f"
+ )
+
+ XmlConfig("hdfs-site.xml",
+ conf_dir=params.hadoop_conf_dir,
+ configurations=params.config['configurations']['hdfs-site'],
+ owner=params.hdfs_user,
+ mode="f"
+ )
+ File(format("{params.hadoop_conf_dir}/hadoop-metrics2.properties"),
+ content=Template("hadoop-metrics2.properties.j2"),
+ owner=params.hdfs_user,
+ mode="f"
+ )
+ File(format("{params.hbase_conf_dir}/hadoop-metrics2-hbase.properties"),
+ content=Template("hadoop-metrics2.properties.j2"),
+ owner=params.hdfs_user,
+ mode="f"
+ )
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/package/scripts/hdfs_client.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/package/scripts/hdfs_client.py
new file mode 100644
index 0000000000..0227c4bfd7
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/package/scripts/hdfs_client.py
@@ -0,0 +1,41 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from hdfs import hdfs
+
+
+class HdfsClient(Script):
+ def install(self, env):
+ # client checks env var to determine if it is installed
+ if not os.environ.has_key("HADOOP_CONF_DIR"):
+ self.install_packages(env)
+ self.configure(env)
+
+ def status(self, env):
+ raise ClientComponentHasNoStatus()
+
+ def configure(self, env):
+ import params
+ env.set_params(params)
+ hdfs()
+
+
+if __name__ == "__main__":
+ HdfsClient().execute()
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/package/scripts/hdfs_rebalance.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/package/scripts/hdfs_rebalance.py
new file mode 100644
index 0000000000..aea6fce725
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/package/scripts/hdfs_rebalance.py
@@ -0,0 +1,130 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import re
+
+class HdfsParser():
+ def __init__(self):
+ self.initialLine = None
+ self.state = None
+
+ def parseLine(self, line):
+ hdfsLine = HdfsLine()
+ type, matcher = hdfsLine.recognizeType(line)
+ if(type == HdfsLine.LineType.HeaderStart):
+ self.state = 'PROCESS_STARTED'
+ elif (type == HdfsLine.LineType.Progress):
+ self.state = 'PROGRESS'
+ hdfsLine.parseProgressLog(line, matcher)
+ if(self.initialLine == None): self.initialLine = hdfsLine
+
+ return hdfsLine
+ elif (type == HdfsLine.LineType.ProgressEnd):
+ self.state = 'PROCESS_FINISED'
+ return None
+
+class HdfsLine():
+
+ class LineType:
+ HeaderStart, Progress, ProgressEnd, Unknown = range(4)
+
+
+ MEMORY_SUFFIX = ['B','KB','MB','GB','TB','PB','EB']
+ MEMORY_PATTERN = '(?P<memmult_%d>(?P<memory_%d>(\d+)(.|,)?(\d+)?) (?P<mult_%d>'+"|".join(MEMORY_SUFFIX)+'))'
+
+ HEADER_BEGIN_PATTERN = re.compile('Time Stamp\w+Iteration#\w+Bytes Already Moved\w+Bytes Left To Move\w+Bytes Being Moved')
+ PROGRESS_PATTERN = re.compile(
+ "(?P<date>.*?)\s+" +
+ "(?P<iteration>\d+)\s+" +
+ MEMORY_PATTERN % (1,1,1) + "\s+" +
+ MEMORY_PATTERN % (2,2,2) + "\s+" +
+ MEMORY_PATTERN % (3,3,3)
+ )
+ PROGRESS_END_PATTERN = re.compile('(The cluster is balanced. Exiting...|The cluster is balanced. Exiting...)')
+
+ def __init__(self):
+ self.date = None
+ self.iteration = None
+ self.bytesAlreadyMoved = None
+ self.bytesLeftToMove = None
+ self.bytesBeingMoved = None
+ self.bytesAlreadyMovedStr = None
+ self.bytesLeftToMoveStr = None
+ self.bytesBeingMovedStr = None
+
+ def recognizeType(self, line):
+ for (type, pattern) in (
+ (HdfsLine.LineType.HeaderStart, self.HEADER_BEGIN_PATTERN),
+ (HdfsLine.LineType.Progress, self.PROGRESS_PATTERN),
+ (HdfsLine.LineType.ProgressEnd, self.PROGRESS_END_PATTERN)
+ ):
+ m = re.match(pattern, line)
+ if m:
+ return type, m
+ return HdfsLine.LineType.Unknown, None
+
+ def parseProgressLog(self, line, m):
+ '''
+ Parse the line of 'hdfs rebalancer' output. The example output being parsed:
+
+ Time Stamp Iteration# Bytes Already Moved Bytes Left To Move Bytes Being Moved
+ Jul 28, 2014 5:01:49 PM 0 0 B 5.74 GB 9.79 GB
+ Jul 28, 2014 5:03:00 PM 1 0 B 5.58 GB 9.79 GB
+
+ Throws AmbariException in case of parsing errors
+
+ '''
+ m = re.match(self.PROGRESS_PATTERN, line)
+ if m:
+ self.date = m.group('date')
+ self.iteration = int(m.group('iteration'))
+
+ self.bytesAlreadyMoved = self.parseMemory(m.group('memory_1'), m.group('mult_1'))
+ self.bytesLeftToMove = self.parseMemory(m.group('memory_2'), m.group('mult_2'))
+ self.bytesBeingMoved = self.parseMemory(m.group('memory_3'), m.group('mult_3'))
+
+ self.bytesAlreadyMovedStr = m.group('memmult_1')
+ self.bytesLeftToMoveStr = m.group('memmult_2')
+ self.bytesBeingMovedStr = m.group('memmult_3')
+ else:
+ raise AmbariException("Failed to parse line [%s]")
+
+ def parseMemory(self, memorySize, multiplier_type):
+ try:
+ factor = self.MEMORY_SUFFIX.index(multiplier_type)
+ except ValueError:
+ raise AmbariException("Failed to memory value [%s %s]" % (memorySize, multiplier_type))
+
+ return float(memorySize) * (1024 ** factor)
+ def toJson(self):
+ return {
+ 'timeStamp' : self.date,
+ 'iteration' : self.iteration,
+
+ 'dataMoved': self.bytesAlreadyMovedStr,
+ 'dataLeft' : self.bytesLeftToMoveStr,
+ 'dataBeingMoved': self.bytesBeingMovedStr,
+
+ 'bytesMoved': self.bytesAlreadyMoved,
+ 'bytesLeft' : self.bytesLeftToMove,
+ 'bytesBeingMoved': self.bytesBeingMoved,
+ }
+ def __str__(self):
+ return "[ date=%s,iteration=%d, bytesAlreadyMoved=%d, bytesLeftToMove=%d, bytesBeingMoved=%d]"%(self.date, self.iteration, self.bytesAlreadyMoved, self.bytesLeftToMove, self.bytesBeingMoved) \ No newline at end of file
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/package/scripts/journalnode.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/package/scripts/journalnode.py
new file mode 100644
index 0000000000..9b56ae7769
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/package/scripts/journalnode.py
@@ -0,0 +1,48 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from hdfs import hdfs
+import service_mapping
+
+class JournalNode(Script):
+ def install(self, env):
+ if not check_windows_service_exists(service_mapping.journalnode_win_service_name):
+ self.install_packages(env)
+
+ def start(self, env):
+ import params
+ self.configure(env)
+ Service(service_mapping.journalnode_win_service_name, action="start")
+
+ def stop(self, env):
+ import params
+ Service(service_mapping.journalnode_win_service_name, action="stop")
+
+ def configure(self, env):
+ import params
+ env.set_params(params)
+ hdfs()
+ pass
+
+ def status(self, env):
+ check_windows_service_status(service_mapping.journalnode_win_service_name)
+
+if __name__ == "__main__":
+ JournalNode().execute()
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/package/scripts/namenode.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/package/scripts/namenode.py
new file mode 100644
index 0000000000..32fc6813b4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/package/scripts/namenode.py
@@ -0,0 +1,128 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from hdfs import hdfs
+import service_mapping
+import hdfs_rebalance
+import time
+import json
+import subprocess
+import sys
+import os
+from datetime import datetime
+from ambari_commons.os_windows import *
+
+class NameNode(Script):
+ def install(self, env):
+ if not check_windows_service_exists(service_mapping.namenode_win_service_name):
+ self.install_packages(env)
+
+ import params
+ self.configure(env)
+ namenode_format_marker = os.path.join(params.hadoop_conf_dir,"NN_FORMATTED")
+ if not os.path.exists(namenode_format_marker):
+ hadoop_cmd = "cmd /C %s" % (os.path.join(params.hadoop_home, "bin", "hadoop.cmd"))
+ Execute("%s namenode -format" % (hadoop_cmd))
+ open(namenode_format_marker, 'a').close()
+
+ def start(self, env):
+ self.configure(env)
+ Service(service_mapping.namenode_win_service_name, action="start")
+
+ def stop(self, env):
+ Service(service_mapping.namenode_win_service_name, action="stop")
+
+ def configure(self, env):
+ import params
+ env.set_params(params)
+ hdfs("namenode")
+
+ def status(self, env):
+ check_windows_service_status(service_mapping.namenode_win_service_name)
+ pass
+
+ def decommission(self, env):
+ import params
+
+ env.set_params(params)
+ hdfs_user = params.hdfs_user
+ conf_dir = params.hadoop_conf_dir
+
+ File(params.exclude_file_path,
+ content=Template("exclude_hosts_list.j2"),
+ owner=hdfs_user
+ )
+
+ if params.dfs_ha_enabled:
+ # due to a bug in hdfs, refreshNodes will not run on both namenodes so we
+ # need to execute each command scoped to a particular namenode
+ nn_refresh_cmd = format('cmd /c hadoop dfsadmin -fs hdfs://{namenode_rpc} -refreshNodes')
+ else:
+ nn_refresh_cmd = format('cmd /c hadoop dfsadmin -refreshNodes')
+ Execute(nn_refresh_cmd, user=hdfs_user)
+
+
+ def rebalancehdfs(self, env):
+ import params
+ env.set_params(params)
+
+ hdfs_user = params.hdfs_user
+
+ name_node_parameters = json.loads( params.name_node_params )
+ threshold = name_node_parameters['threshold']
+ _print("Starting balancer with threshold = %s\n" % threshold)
+
+ def calculateCompletePercent(first, current):
+ return 1.0 - current.bytesLeftToMove/first.bytesLeftToMove
+
+ def startRebalancingProcess(threshold):
+ rebalanceCommand = 'hdfs balancer -threshold %s' % threshold
+ return ['cmd', '/C', rebalanceCommand]
+
+ command = startRebalancingProcess(threshold)
+ basedir = os.path.join(env.config.basedir, 'scripts')
+
+ _print("Executing command %s\n" % command)
+
+ parser = hdfs_rebalance.HdfsParser()
+ returncode, stdout, err = run_os_command_impersonated(' '.join(command), hdfs_user, Script.get_password(hdfs_user))
+
+ for line in stdout.split('\n'):
+ _print('[balancer] %s %s' % (str(datetime.now()), line ))
+ pl = parser.parseLine(line)
+ if pl:
+ res = pl.toJson()
+ res['completePercent'] = calculateCompletePercent(parser.initialLine, pl)
+
+ self.put_structured_out(res)
+ elif parser.state == 'PROCESS_FINISED' :
+ _print('[balancer] %s %s' % (str(datetime.now()), 'Process is finished' ))
+ self.put_structured_out({'completePercent' : 1})
+ break
+
+ if returncode != None and returncode != 0:
+ raise Fail('Hdfs rebalance process exited with error. See the log output')
+
+def _print(line):
+ sys.stdout.write(line)
+ sys.stdout.flush()
+
+if __name__ == "__main__":
+ NameNode().execute()
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/package/scripts/params.py
new file mode 100644
index 0000000000..1abad5c167
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/package/scripts/params.py
@@ -0,0 +1,65 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import os
+
+config = Script.get_config()
+hadoop_conf_dir = os.environ["HADOOP_CONF_DIR"]
+hbase_conf_dir = os.environ["HBASE_CONF_DIR"]
+hadoop_home = os.environ["HADOOP_HOME"]
+#directories & files
+dfs_name_dir = config['configurations']['hdfs-site']['dfs.namenode.name.dir']
+fs_checkpoint_dir = config['configurations']['hdfs-site']['dfs.namenode.checkpoint.dir']
+dfs_data_dir = config['configurations']['hdfs-site']['dfs.datanode.data.dir']
+#decomission
+hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
+exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
+# HDFS High Availability properties
+dfs_ha_enabled = False
+dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None)
+dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
+
+namenode_id = None
+namenode_rpc = None
+hostname = config["hostname"]
+if dfs_ha_namenode_ids:
+ dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
+ dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
+ if dfs_ha_namenode_ids_array_len > 1:
+ dfs_ha_enabled = True
+if dfs_ha_enabled:
+ for nn_id in dfs_ha_namemodes_ids_list:
+ nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
+ if hostname in nn_host:
+ namenode_id = nn_id
+ namenode_rpc = nn_host
+dbserver = config['configurations']['cluster-env']['sink.dbservername'].replace('\\', '\\\\')
+dblogin = config['configurations']['cluster-env']['sink.dblogin']
+dbpassword = config['configurations']['cluster-env']['sink.dbpassword']
+dburl = config['configurations']['cluster-env']['sink.jdbc.url'].replace('\\', '\\\\')
+
+if 'integratedSecurity=true' not in dburl:
+ dburl = dburl + ';user=' + dblogin + ';password=' + dbpassword;
+
+hdfs_user = "hadoop"
+
+grep_exe = "findstr"
+
+name_node_params = default("/commandParams/namenode", None) \ No newline at end of file
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/package/scripts/service_check.py
new file mode 100644
index 0000000000..e5cbaab63b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/package/scripts/service_check.py
@@ -0,0 +1,55 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from resource_management.libraries import functions
+
+class HdfsServiceCheck(Script):
+ def service_check(self, env):
+ import params
+ env.set_params(params)
+
+ unique = functions.get_unique_id_and_date()
+
+ #Hadoop uses POSIX-style paths, separator is always /
+ dir = '/tmp'
+ tmp_file = dir + '/' + unique
+
+ #commands for execution
+ hadoop_cmd = "cmd /C %s" % (os.path.join(params.hadoop_home, "bin", "hadoop.cmd"))
+ create_dir_cmd = "%s fs -mkdir %s" % (hadoop_cmd, dir)
+ own_dir = "%s fs -chmod 777 %s" % (hadoop_cmd, dir)
+ test_dir_exists = "%s fs -test -e %s" % (hadoop_cmd, dir)
+ cleanup_cmd = "%s fs -rm %s" % (hadoop_cmd, tmp_file)
+ create_file_cmd = "%s fs -put %s %s" % (hadoop_cmd, os.path.join(params.hadoop_conf_dir, "core-site.xml"), tmp_file)
+ test_cmd = "%s fs -test -e %s" % (hadoop_cmd, tmp_file)
+
+ hdfs_cmd = "cmd /C %s" % (os.path.join(params.hadoop_home, "bin", "hdfs.cmd"))
+ safemode_command = "%s dfsadmin -safemode get | %s OFF" % (hdfs_cmd, params.grep_exe)
+
+ Execute(safemode_command, logoutput=True, try_sleep=3, tries=20)
+ Execute(create_dir_cmd, user=params.hdfs_user,logoutput=True, ignore_failures=True)
+ Execute(own_dir, user=params.hdfs_user,logoutput=True)
+ Execute(test_dir_exists, user=params.hdfs_user,logoutput=True)
+ Execute(create_file_cmd, user=params.hdfs_user,logoutput=True)
+ Execute(test_cmd, user=params.hdfs_user,logoutput=True)
+ Execute(cleanup_cmd, user=params.hdfs_user,logoutput=True)
+
+if __name__ == "__main__":
+ HdfsServiceCheck().execute()
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/package/scripts/service_mapping.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/package/scripts/service_mapping.py
new file mode 100644
index 0000000000..d76ce07db5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/package/scripts/service_mapping.py
@@ -0,0 +1,24 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+namenode_win_service_name = "namenode"
+datanode_win_service_name = "datanode"
+snamenode_win_service_name = "secondarynamenode"
+journalnode_win_service_name = "journalnode"
+zkfc_win_service_name = "zkfc" \ No newline at end of file
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/package/scripts/snamenode.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/package/scripts/snamenode.py
new file mode 100644
index 0000000000..a3f880a367
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/package/scripts/snamenode.py
@@ -0,0 +1,48 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from hdfs import hdfs
+import service_mapping
+
+class SNameNode(Script):
+ def install(self, env):
+ if not check_windows_service_exists(service_mapping.snamenode_win_service_name):
+ self.install_packages(env)
+
+ def start(self, env):
+ import params
+ self.configure(env)
+ Service(service_mapping.snamenode_win_service_name, action="start")
+
+ def stop(self, env):
+ import params
+ Service(service_mapping.snamenode_win_service_name, action="stop")
+
+ def configure(self, env):
+ import params
+ env.set_params(params)
+ hdfs("secondarynamenode")
+
+ def status(self, env):
+ import params
+ check_windows_service_status(service_mapping.snamenode_win_service_name)
+
+if __name__ == "__main__":
+ SNameNode().execute()
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/package/scripts/zkfc_slave.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/package/scripts/zkfc_slave.py
new file mode 100644
index 0000000000..5fadce0603
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/package/scripts/zkfc_slave.py
@@ -0,0 +1,51 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from hdfs import hdfs
+import service_mapping
+
+class ZkfcSlave(Script):
+ def install(self, env):
+ if not check_windows_service_exists(service_mapping.zkfc_win_service_name):
+ import params
+ env.set_params(params)
+ self.install_packages(env)
+
+ def start(self, env):
+ import params
+ self.configure(env)
+ Service(service_mapping.zkfc_win_service_name, action="start")
+
+ def stop(self, env):
+ import params
+ Service(service_mapping.zkfc_win_service_name, action="stop")
+
+ def configure(self, env):
+ import params
+ env.set_params(params)
+ hdfs()
+ pass
+
+ def status(self, env):
+ check_windows_service_status(service_mapping.zkfc_win_service_name)
+
+
+if __name__ == "__main__":
+ ZkfcSlave().execute()
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/package/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/package/templates/exclude_hosts_list.j2
new file mode 100644
index 0000000000..a92cdc1221
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/package/templates/exclude_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in hdfs_exclude_file %}
+{{host}}
+{% endfor %} \ No newline at end of file
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/package/templates/hadoop-metrics2.properties.j2 b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/package/templates/hadoop-metrics2.properties.j2
new file mode 100644
index 0000000000..ee5b60eb31
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/package/templates/hadoop-metrics2.properties.j2
@@ -0,0 +1,53 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# syntax: [prefix].[source|sink].[instance].[options]
+# See javadoc of package-info.java for org.apache.hadoop.metrics2 for details
+
+*.sink.sql.class=org.apache.hadoop.metrics2.sink.SqlServerSinkHadoop2
+
+namenode.sink.sql.databaseUrl={{dburl}}
+datanode.sink.sql.databaseUrl={{dburl}}
+jobtracker.sink.sql.databaseUrl={{dburl}}
+tasktracker.sink.sql.databaseUrl={{dburl}}
+maptask.sink.sql.databaseUrl={{dburl}}
+reducetask.sink.sql.databaseUrl={{dburl}}
+resourcemanager.sink.sql.databaseUrl={{dburl}}
+nodemanager.sink.sql.databaseUrl={{dburl}}
+historyserver.sink.sql.databaseUrl={{dburl}}
+journalnode.sink.sql.databaseUrl={{dburl}}
+nimbus.sink.sql.databaseUrl={{dburl}}
+supervisor.sink.sql.databaseUrl={{dburl}}
+hbase.sink.sql.databaseUrl={{dburl}}
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/hive-env.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/hive-env.xml
new file mode 100644
index 0000000000..57144be045
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/hive-env.xml
@@ -0,0 +1,105 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+ <property>
+ <name>hive_database_type</name>
+ <value>mssql</value>
+ <description>Default HIVE DB type.</description>
+ </property>
+ <property>
+ <name>hive_hostname</name>
+ <value></value>
+ <description>
+ Specify the host on which the HIVE database is hosted.
+ </description>
+ </property>
+ <property>
+ <name>hive_database</name>
+ <value>Existing MSSQL Server database with sql auth</value>
+ <description>
+ Property that determines whether the HIVE DB is managed by Ambari.
+ </description>
+ </property>
+ <property>
+ <name>hive_ambari_database</name>
+ <value>MySQL</value>
+ <description>Database type.</description>
+ </property>
+ <property>
+ <name>hive_database_name</name>
+ <value>hive</value>
+ <description>Database name.</description>
+ </property>
+ <property>
+ <name>hive_dbroot</name>
+ <value>/usr/lib/hive/lib/</value>
+ <description>Hive DB Directory.</description>
+ </property>
+ <property>
+ <name>hive_log_dir</name>
+ <value>/var/log/hive</value>
+ <description>Directory for Hive Log files.</description>
+ </property>
+ <property>
+ <name>hive_pid_dir</name>
+ <value>/var/run/hive</value>
+ <description>Hive PID Dir.</description>
+ </property>
+ <property>
+ <name>hive_user</name>
+ <value>hive</value>
+ <description>Hive User.</description>
+ </property>
+
+ <!--HCAT-->
+
+ <!--<property>
+ <name>hcat_log_dir</name>
+ <value>/var/log/webhcat</value>
+ <description>WebHCat Log Dir.</description>
+ </property>
+ <property>
+ <name>hcat_pid_dir</name>
+ <value>/var/run/webhcat</value>
+ <description>WebHCat Pid Dir.</description>
+ </property>
+ <property>
+ <name>hcat_user</name>
+ <value>hcat</value>
+ <description>HCat User.</description>
+ </property>
+ <property>
+ <name>webhcat_user</name>
+ <value>hcat</value>
+ <description>WebHCat User.</description>
+ </property>-->
+
+ <!-- hive-env.cmd -->
+ <property>
+ <name>content</name>
+ <description>hive-env.cmd content</description>
+ <value>
+ </value>
+ </property>
+
+</configuration>
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/hive-site.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/hive-site.xml
new file mode 100644
index 0000000000..3f90c769f0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/hive-site.xml
@@ -0,0 +1,291 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<configuration supports_final="true">
+
+ <!-- Hive Configuration can either be stored in this file or in the hadoop configuration files -->
+ <!-- that are implied by Hadoop setup variables. -->
+ <!-- Aside from Hadoop setup variables - this file is provided as a convenience so that Hive -->
+ <!-- users do not have to edit hadoop configuration files (that may be managed as a centralized -->
+ <!-- resource). -->
+
+ <!-- Hive Execution Parameters -->
+
+ <property>
+ <name>hive.metastore.uris</name>
+ <value>thrift://localhost:9083</value>
+ </property>
+
+ <property>
+ <name>hive.metastore.connect.retries</name>
+ <value>5</value>
+ <description>Number of retries while opening a connection to metastore</description>
+ </property>
+
+ <property>
+ <name>hive.metastore.ds.retry.attempts</name>
+ <value>0</value>
+ <description>The number of times to retry a metastore call if there were a connection error</description>
+ </property>
+
+ <property>
+ <name>hive.metastore.ds.retry.interval</name>
+ <value>1000</value>
+ <description>The number of miliseconds between metastore retry attempts</description>
+ </property>
+
+ <property>
+ <name>hive.metastore.execute.setugi</name>
+ <value>true</value>
+ </property>
+
+ <property>
+ <name>hive.hmshandler.retry.attempts</name>
+ <value>5</value>
+ <description>The number of times to retry a HMSHandler call if there were a connection error</description>
+ </property>
+
+ <property>
+ <name>hive.hmshandler.retry.interval</name>
+ <value>1000</value>
+ <description>The number of miliseconds between HMSHandler retry attempts</description>
+ </property>
+
+ <property>
+ <name>javax.jdo.option.ConnectionURL</name>
+ <value></value>
+ <description>JDBC connect string for a JDBC metastore</description>
+ </property>
+
+ <property>
+ <name>javax.jdo.option.ConnectionDriverName</name>
+ <value>com.microsoft.sqlserver.jdbc.SQLServerDriver</value>
+ <description>Driver class name for a JDBC metastore</description>
+ </property>
+
+ <property>
+ <name>ambari.hive.db.schema.name</name>
+ <value>hive</value>
+ <description>Database name used as the Hive Metastore</description>
+ </property>
+
+ <property>
+ <name>javax.jdo.option.ConnectionUserName</name>
+ <value>hive</value>
+ <description>username to use against metastore database</description>
+ </property>
+
+ <property require-input="true">
+ <name>javax.jdo.option.ConnectionPassword</name>
+ <value></value>
+ <type>PASSWORD</type>
+ <description>password to use against metastore database</description>
+ </property>
+
+ <property>
+ <name>hive.metastore.warehouse.dir</name>
+ <value>/hive/warehouse</value>
+ <description>location of default database for the warehouse</description>
+ </property>
+
+ <property>
+ <name>hive.hwi.listen.host</name>
+ <value>0.0.0.0</value>
+ <description>This is the host address the Hive Web Interface will listen on</description>
+ </property>
+
+ <property>
+ <name>hive.hwi.listen.port</name>
+ <value>9999</value>
+ <description>This is the port the Hive Web Interface will listen on</description>
+ </property>
+
+ <property>
+ <name>hive.hwi.war.file</name>
+ <value>lib\hive-hwi-@hive.version@.war</value>
+ <description>This is the WAR file with the jsp content for Hive Web Interface</description>
+ </property>
+
+ <property>
+ <name>hive.server2.transport.mode</name>
+ <value>binary</value>
+ <description>Server transport mode. "binary" or "http".</description>
+ </property>
+
+ <property>
+ <name>hive.server2.thrift.http.port</name>
+ <value>10001</value>
+ <description>Port number when in HTTP mode.</description>
+ </property>
+
+ <property>
+ <name>hive.server2.thrift.http.path</name>
+ <value>/</value>
+ <description>Path component of URL endpoint when in HTTP mode.</description>
+ </property>
+
+ <property>
+ <name>hive.server2.thrift.http.min.worker.threads</name>
+ <value>5</value>
+ <description>Minimum number of worker threads when in HTTP mode.</description>
+ </property>
+
+ <property>
+ <name>hive.server2.thrift.http.max.worker.threads</name>
+ <value>100</value>
+ <description>Maximum number of worker threads when in HTTP mode.</description>
+ </property>
+
+ <property>
+ <name>hive.server2.thrift.port</name>
+ <value>10001</value>
+ <description>HiveServer2 thrift port</description>
+ </property>
+
+ <property>
+ <name>hive.server2.enable.doAs</name>
+ <value>false</value>
+ </property>
+
+ <property>
+ <name>hive.security.authorization.enabled</name>
+ <value>true</value>
+ <description>enable or disable the hive client authorization</description>
+ </property>
+
+ <property>
+ <name>hive.security.authorization.manager</name>
+ <value>org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider</value>
+ </property>
+
+ <property>
+ <name>hive.optimize.mapjoin.mapreduce</name>
+ <value>true</value>
+ </property>
+
+ <property>
+ <name>hive.enforce.bucketing</name>
+ <value>true</value>
+ </property>
+
+ <property>
+ <name>hive.enforce.sorting</name>
+ <value>true</value>
+ </property>
+
+ <property>
+ <name>hive.optimize.index.filter</name>
+ <value>true</value>
+ </property>
+
+ <property>
+ <name>hive.mapred.reduce.tasks.speculative.execution</name>
+ <value>false</value>
+ </property>
+
+ <property>
+ <name>hive.orc.splits.include.file.footer</name>
+ <value>false</value>
+ </property>
+
+ <property>
+ <name>hive.exec.local.cache</name>
+ <value>true</value>
+ </property>
+
+ <property>
+ <name>hive.vectorized.execution.enabled</name>
+ <value>true</value>
+ </property>
+
+ <property>
+ <name>hive.vectorized.groupby.flush.percent</name>
+ <value>1.0</value>
+ </property>
+
+ <property>
+ <name>hive.vectorized.groupby.checkinterval</name>
+ <value>1024</value>
+ </property>
+
+ <property>
+ <name>hive.vectorized.groupby.maxentries</name>
+ <value>1024</value>
+ </property>
+
+ <property>
+ <name>hive.optimize.bucketmapjoin.sortedmerge</name>
+ <value>false</value>
+ </property>
+
+ <property>
+ <name>hive.optimize.bucketmapjoin</name>
+ <value>true</value>
+ </property>
+
+ <property>
+ <name>hive.enforce.sortmergebucketmapjoin</name>
+ <value>true</value>
+ </property>
+
+ <property>
+ <name>hive.convert.join.bucket.mapjoin.tez</name>
+ <value>false</value>
+ </property>
+
+ <property>
+ <name>hive.auto.convert.sortmerge.join</name>
+ <value>true</value>
+ </property>
+
+ <property>
+ <name>hive.auto.convert.sortmerge.join.noconditionaltask</name>
+ <value>true</value>
+ </property>
+
+ <property>
+ <name>hive.server2.tez.sessions.per.default.queue</name>
+ <value>1</value>
+ </property>
+
+ <property>
+ <name>hive.server2.tez.initialize.default.sessions</name>
+ <value>false</value>
+ </property>
+
+ <property>
+ <name>hive.server2.tez.default.queues</name>
+ <value>default</value>
+ </property>
+
+ <property>
+ <name>hive.stats.dbclass</name>
+ <value>fs</value>
+ </property>
+
+ <property>
+ <name>hive.compute.query.using.stats</name>
+ <value>true</value>
+ </property>
+
+
+ <property>
+ <name>hive.querylog.location</name>
+ <value>c:\hadoop\logs\hive</value>
+ </property>
+
+ <property>
+ <name>hive.log.dir</name>
+ <value>c:\hadoop\logs\hive</value>
+ </property>
+
+ <property>
+ <name>hive.stats.autogather</name>
+ <value>true</value>
+ </property>
+
+ <property>
+ <name>hive.execution.engine</name>
+ <value>mr</value>
+ </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/webhcat-site.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/webhcat-site.xml
new file mode 100644
index 0000000000..bae9712322
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/webhcat-site.xml
@@ -0,0 +1,109 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+<!-- The default settings for Templeton. -->
+<!-- Edit templeton-site.xml to change settings for your local -->
+<!-- install. -->
+
+<configuration>
+
+ <property>
+ <name>templeton.port</name>
+ <value>50111</value>
+ <description>The HTTP port for the main server.</description>
+ </property>
+
+ <property>
+ <name>templeton.jar</name>
+ <value>c:\hdp\hive\hcatalog\share\webhcat\svr\lib\hive-webhca.jar</value>
+ <description>The path to the Templeton jar file.</description>
+ </property>
+
+ <property>
+ <name>templeton.override.enabled</name>
+ <value>false</value>
+ <description>
+ Enable the override path in templeton.override.jars
+ </description>
+ </property>
+
+ <property>
+ <name>templeton.hcat</name>
+ <value>${env.HCAT_HOME}/bin/hcat.py</value>
+ <description>The path to the hcatalog executable.</description>
+ </property>
+
+ <property>
+ <name>templeton.hadoop</name>
+ <value>${env.HADOOP_HOME}/bin/hadoop.cmd</value>
+ <description>The path to the Hadoop executable.</description>
+ </property>
+
+ <property>
+ <name>templeton.exec.envs</name>
+ <value>HADOOP_HOME,JAVA_HOME,HIVE_HOME,TEMP,HADOOP_BIN_PATH,PATH,SystemRoot,TEZ_CLASSPATH</value>
+ <description>The environment variables passed through to exec.</description>
+ </property>
+
+ <property>
+ <name>templeton.streaming.jar</name>
+ <value>file:///c:/hdp/hadoop/share/hadoop/tools/lib/hadoop-streaming.jar</value>
+ <description>The hdfs path to the Hadoop streaming jar file.</description>
+ </property>
+
+ <property>
+ <name>templeton.hive.properties</name>
+ <value>hive.metastore.local=false,hive.metastore.uris=thrift://WIN-QS1HDPKHRAM:9083</value>
+ <description>Properties to set when running hive.</description>
+ </property>
+
+ <property>
+ <name>templeton.libjars</name>
+ <value>file:///c:/hdp/hive/lib/zookeeper.jar</value>
+ <description>Jars to add to the classpath.</description>
+ </property>
+
+ <property>
+ <name>templeton.pig.path</name>
+ <value>${env.PIG_HOME}/bin/pig.cmd</value>
+ <description>The path to the Pig executable.</description>
+ </property>
+
+ <property>
+ <name>templeton.hive.path</name>
+ <value>${env.HIVE_HOME}/bin/hive.cmd</value>
+ <description>The path to the Hive executable.</description>
+ </property>
+
+
+ <property>
+ <name>templeton.hadoop.queue.name</name>
+ <value>joblauncher</value>
+ </property>
+
+ <property>
+ <name>templeton.zookeeper.hosts</name>
+ <value>localhost:2181</value>
+ </property>
+
+ <property>
+ <name>templeton.storage.class</name>
+ <value>org.apache.hive.hcatalog.templeton.tool.ZooKeeperStorage</value>
+ </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/etc/hive-schema-0.12.0.mysql.sql b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/etc/hive-schema-0.12.0.mysql.sql
new file mode 100644
index 0000000000..bacee9ed59
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/etc/hive-schema-0.12.0.mysql.sql
@@ -0,0 +1,777 @@
+-- MySQL dump 10.13 Distrib 5.5.25, for osx10.6 (i386)
+--
+-- Host: localhost Database: test
+-- ------------------------------------------------------
+-- Server version 5.5.25
+
+/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
+/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
+/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
+/*!40101 SET NAMES utf8 */;
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+
+--
+-- Table structure for table `BUCKETING_COLS`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `BUCKETING_COLS` (
+ `SD_ID` bigint(20) NOT NULL,
+ `BUCKET_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `INTEGER_IDX` int(11) NOT NULL,
+ PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+ KEY `BUCKETING_COLS_N49` (`SD_ID`),
+ CONSTRAINT `BUCKETING_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `CDS`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `CDS` (
+ `CD_ID` bigint(20) NOT NULL,
+ PRIMARY KEY (`CD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `COLUMNS_V2`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `COLUMNS_V2` (
+ `CD_ID` bigint(20) NOT NULL,
+ `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `TYPE_NAME` varchar(4000) DEFAULT NULL,
+ `INTEGER_IDX` int(11) NOT NULL,
+ PRIMARY KEY (`CD_ID`,`COLUMN_NAME`),
+ KEY `COLUMNS_V2_N49` (`CD_ID`),
+ CONSTRAINT `COLUMNS_V2_FK1` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `DATABASE_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `DATABASE_PARAMS` (
+ `DB_ID` bigint(20) NOT NULL,
+ `PARAM_KEY` varchar(180) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ PRIMARY KEY (`DB_ID`,`PARAM_KEY`),
+ KEY `DATABASE_PARAMS_N49` (`DB_ID`),
+ CONSTRAINT `DATABASE_PARAMS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `DBS`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `DBS` (
+ `DB_ID` bigint(20) NOT NULL,
+ `DESC` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `DB_LOCATION_URI` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ PRIMARY KEY (`DB_ID`),
+ UNIQUE KEY `UNIQUE_DATABASE` (`NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `DB_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `DB_PRIVS` (
+ `DB_GRANT_ID` bigint(20) NOT NULL,
+ `CREATE_TIME` int(11) NOT NULL,
+ `DB_ID` bigint(20) DEFAULT NULL,
+ `GRANT_OPTION` smallint(6) NOT NULL,
+ `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `DB_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ PRIMARY KEY (`DB_GRANT_ID`),
+ UNIQUE KEY `DBPRIVILEGEINDEX` (`DB_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`DB_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+ KEY `DB_PRIVS_N49` (`DB_ID`),
+ CONSTRAINT `DB_PRIVS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `GLOBAL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `GLOBAL_PRIVS` (
+ `USER_GRANT_ID` bigint(20) NOT NULL,
+ `CREATE_TIME` int(11) NOT NULL,
+ `GRANT_OPTION` smallint(6) NOT NULL,
+ `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `USER_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ PRIMARY KEY (`USER_GRANT_ID`),
+ UNIQUE KEY `GLOBALPRIVILEGEINDEX` (`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`USER_PRIV`,`GRANTOR`,`GRANTOR_TYPE`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `IDXS`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `IDXS` (
+ `INDEX_ID` bigint(20) NOT NULL,
+ `CREATE_TIME` int(11) NOT NULL,
+ `DEFERRED_REBUILD` bit(1) NOT NULL,
+ `INDEX_HANDLER_CLASS` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `INDEX_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `INDEX_TBL_ID` bigint(20) DEFAULT NULL,
+ `LAST_ACCESS_TIME` int(11) NOT NULL,
+ `ORIG_TBL_ID` bigint(20) DEFAULT NULL,
+ `SD_ID` bigint(20) DEFAULT NULL,
+ PRIMARY KEY (`INDEX_ID`),
+ UNIQUE KEY `UNIQUEINDEX` (`INDEX_NAME`,`ORIG_TBL_ID`),
+ KEY `IDXS_N51` (`SD_ID`),
+ KEY `IDXS_N50` (`INDEX_TBL_ID`),
+ KEY `IDXS_N49` (`ORIG_TBL_ID`),
+ CONSTRAINT `IDXS_FK1` FOREIGN KEY (`ORIG_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
+ CONSTRAINT `IDXS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
+ CONSTRAINT `IDXS_FK3` FOREIGN KEY (`INDEX_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `INDEX_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `INDEX_PARAMS` (
+ `INDEX_ID` bigint(20) NOT NULL,
+ `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ PRIMARY KEY (`INDEX_ID`,`PARAM_KEY`),
+ KEY `INDEX_PARAMS_N49` (`INDEX_ID`),
+ CONSTRAINT `INDEX_PARAMS_FK1` FOREIGN KEY (`INDEX_ID`) REFERENCES `IDXS` (`INDEX_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `NUCLEUS_TABLES`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `NUCLEUS_TABLES` (
+ `CLASS_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `TYPE` varchar(4) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `OWNER` varchar(2) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `VERSION` varchar(20) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `INTERFACE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ PRIMARY KEY (`CLASS_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITIONS`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITIONS` (
+ `PART_ID` bigint(20) NOT NULL,
+ `CREATE_TIME` int(11) NOT NULL,
+ `LAST_ACCESS_TIME` int(11) NOT NULL,
+ `PART_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `SD_ID` bigint(20) DEFAULT NULL,
+ `TBL_ID` bigint(20) DEFAULT NULL,
+ `LINK_TARGET_ID` bigint(20) DEFAULT NULL,
+ PRIMARY KEY (`PART_ID`),
+ UNIQUE KEY `UNIQUEPARTITION` (`PART_NAME`,`TBL_ID`),
+ KEY `PARTITIONS_N49` (`TBL_ID`),
+ KEY `PARTITIONS_N50` (`SD_ID`),
+ KEY `PARTITIONS_N51` (`LINK_TARGET_ID`),
+ CONSTRAINT `PARTITIONS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
+ CONSTRAINT `PARTITIONS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
+ CONSTRAINT `PARTITIONS_FK3` FOREIGN KEY (`LINK_TARGET_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_EVENTS`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_EVENTS` (
+ `PART_NAME_ID` bigint(20) NOT NULL,
+ `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `EVENT_TIME` bigint(20) NOT NULL,
+ `EVENT_TYPE` int(11) NOT NULL,
+ `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `TBL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ PRIMARY KEY (`PART_NAME_ID`),
+ KEY `PARTITIONEVENTINDEX` (`PARTITION_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_KEYS`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_KEYS` (
+ `TBL_ID` bigint(20) NOT NULL,
+ `PKEY_COMMENT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `PKEY_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `PKEY_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `INTEGER_IDX` int(11) NOT NULL,
+ PRIMARY KEY (`TBL_ID`,`PKEY_NAME`),
+ KEY `PARTITION_KEYS_N49` (`TBL_ID`),
+ CONSTRAINT `PARTITION_KEYS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_KEY_VALS`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_KEY_VALS` (
+ `PART_ID` bigint(20) NOT NULL,
+ `PART_KEY_VAL` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `INTEGER_IDX` int(11) NOT NULL,
+ PRIMARY KEY (`PART_ID`,`INTEGER_IDX`),
+ KEY `PARTITION_KEY_VALS_N49` (`PART_ID`),
+ CONSTRAINT `PARTITION_KEY_VALS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_PARAMS` (
+ `PART_ID` bigint(20) NOT NULL,
+ `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ PRIMARY KEY (`PART_ID`,`PARAM_KEY`),
+ KEY `PARTITION_PARAMS_N49` (`PART_ID`),
+ CONSTRAINT `PARTITION_PARAMS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PART_COL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PART_COL_PRIVS` (
+ `PART_COLUMN_GRANT_ID` bigint(20) NOT NULL,
+ `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `CREATE_TIME` int(11) NOT NULL,
+ `GRANT_OPTION` smallint(6) NOT NULL,
+ `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `PART_ID` bigint(20) DEFAULT NULL,
+ `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `PART_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ PRIMARY KEY (`PART_COLUMN_GRANT_ID`),
+ KEY `PART_COL_PRIVS_N49` (`PART_ID`),
+ KEY `PARTITIONCOLUMNPRIVILEGEINDEX` (`PART_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+ CONSTRAINT `PART_COL_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PART_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PART_PRIVS` (
+ `PART_GRANT_ID` bigint(20) NOT NULL,
+ `CREATE_TIME` int(11) NOT NULL,
+ `GRANT_OPTION` smallint(6) NOT NULL,
+ `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `PART_ID` bigint(20) DEFAULT NULL,
+ `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `PART_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ PRIMARY KEY (`PART_GRANT_ID`),
+ KEY `PARTPRIVILEGEINDEX` (`PART_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+ KEY `PART_PRIVS_N49` (`PART_ID`),
+ CONSTRAINT `PART_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `ROLES`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `ROLES` (
+ `ROLE_ID` bigint(20) NOT NULL,
+ `CREATE_TIME` int(11) NOT NULL,
+ `OWNER_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `ROLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ PRIMARY KEY (`ROLE_ID`),
+ UNIQUE KEY `ROLEENTITYINDEX` (`ROLE_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `ROLE_MAP`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `ROLE_MAP` (
+ `ROLE_GRANT_ID` bigint(20) NOT NULL,
+ `ADD_TIME` int(11) NOT NULL,
+ `GRANT_OPTION` smallint(6) NOT NULL,
+ `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `ROLE_ID` bigint(20) DEFAULT NULL,
+ PRIMARY KEY (`ROLE_GRANT_ID`),
+ UNIQUE KEY `USERROLEMAPINDEX` (`PRINCIPAL_NAME`,`ROLE_ID`,`GRANTOR`,`GRANTOR_TYPE`),
+ KEY `ROLE_MAP_N49` (`ROLE_ID`),
+ CONSTRAINT `ROLE_MAP_FK1` FOREIGN KEY (`ROLE_ID`) REFERENCES `ROLES` (`ROLE_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SDS`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SDS` (
+ `SD_ID` bigint(20) NOT NULL,
+ `CD_ID` bigint(20) DEFAULT NULL,
+ `INPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `IS_COMPRESSED` bit(1) NOT NULL,
+ `IS_STOREDASSUBDIRECTORIES` bit(1) NOT NULL,
+ `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `NUM_BUCKETS` int(11) NOT NULL,
+ `OUTPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `SERDE_ID` bigint(20) DEFAULT NULL,
+ PRIMARY KEY (`SD_ID`),
+ KEY `SDS_N49` (`SERDE_ID`),
+ KEY `SDS_N50` (`CD_ID`),
+ CONSTRAINT `SDS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`),
+ CONSTRAINT `SDS_FK2` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SD_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SD_PARAMS` (
+ `SD_ID` bigint(20) NOT NULL,
+ `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ PRIMARY KEY (`SD_ID`,`PARAM_KEY`),
+ KEY `SD_PARAMS_N49` (`SD_ID`),
+ CONSTRAINT `SD_PARAMS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SEQUENCE_TABLE`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SEQUENCE_TABLE` (
+ `SEQUENCE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `NEXT_VAL` bigint(20) NOT NULL,
+ PRIMARY KEY (`SEQUENCE_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SERDES`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SERDES` (
+ `SERDE_ID` bigint(20) NOT NULL,
+ `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `SLIB` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ PRIMARY KEY (`SERDE_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SERDE_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SERDE_PARAMS` (
+ `SERDE_ID` bigint(20) NOT NULL,
+ `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ PRIMARY KEY (`SERDE_ID`,`PARAM_KEY`),
+ KEY `SERDE_PARAMS_N49` (`SERDE_ID`),
+ CONSTRAINT `SERDE_PARAMS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_COL_NAMES`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_COL_NAMES` (
+ `SD_ID` bigint(20) NOT NULL,
+ `SKEWED_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `INTEGER_IDX` int(11) NOT NULL,
+ PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+ KEY `SKEWED_COL_NAMES_N49` (`SD_ID`),
+ CONSTRAINT `SKEWED_COL_NAMES_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_COL_VALUE_LOC_MAP`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_COL_VALUE_LOC_MAP` (
+ `SD_ID` bigint(20) NOT NULL,
+ `STRING_LIST_ID_KID` bigint(20) NOT NULL,
+ `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ PRIMARY KEY (`SD_ID`,`STRING_LIST_ID_KID`),
+ KEY `SKEWED_COL_VALUE_LOC_MAP_N49` (`STRING_LIST_ID_KID`),
+ KEY `SKEWED_COL_VALUE_LOC_MAP_N50` (`SD_ID`),
+ CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK2` FOREIGN KEY (`STRING_LIST_ID_KID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`),
+ CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_STRING_LIST`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST` (
+ `STRING_LIST_ID` bigint(20) NOT NULL,
+ PRIMARY KEY (`STRING_LIST_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_STRING_LIST_VALUES`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST_VALUES` (
+ `STRING_LIST_ID` bigint(20) NOT NULL,
+ `STRING_LIST_VALUE` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `INTEGER_IDX` int(11) NOT NULL,
+ PRIMARY KEY (`STRING_LIST_ID`,`INTEGER_IDX`),
+ KEY `SKEWED_STRING_LIST_VALUES_N49` (`STRING_LIST_ID`),
+ CONSTRAINT `SKEWED_STRING_LIST_VALUES_FK1` FOREIGN KEY (`STRING_LIST_ID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_VALUES`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_VALUES` (
+ `SD_ID_OID` bigint(20) NOT NULL,
+ `STRING_LIST_ID_EID` bigint(20) NOT NULL,
+ `INTEGER_IDX` int(11) NOT NULL,
+ PRIMARY KEY (`SD_ID_OID`,`INTEGER_IDX`),
+ KEY `SKEWED_VALUES_N50` (`SD_ID_OID`),
+ KEY `SKEWED_VALUES_N49` (`STRING_LIST_ID_EID`),
+ CONSTRAINT `SKEWED_VALUES_FK2` FOREIGN KEY (`STRING_LIST_ID_EID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`),
+ CONSTRAINT `SKEWED_VALUES_FK1` FOREIGN KEY (`SD_ID_OID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SORT_COLS`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SORT_COLS` (
+ `SD_ID` bigint(20) NOT NULL,
+ `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `ORDER` int(11) NOT NULL,
+ `INTEGER_IDX` int(11) NOT NULL,
+ PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+ KEY `SORT_COLS_N49` (`SD_ID`),
+ CONSTRAINT `SORT_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TABLE_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TABLE_PARAMS` (
+ `TBL_ID` bigint(20) NOT NULL,
+ `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ PRIMARY KEY (`TBL_ID`,`PARAM_KEY`),
+ KEY `TABLE_PARAMS_N49` (`TBL_ID`),
+ CONSTRAINT `TABLE_PARAMS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TBLS`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TBLS` (
+ `TBL_ID` bigint(20) NOT NULL,
+ `CREATE_TIME` int(11) NOT NULL,
+ `DB_ID` bigint(20) DEFAULT NULL,
+ `LAST_ACCESS_TIME` int(11) NOT NULL,
+ `OWNER` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `RETENTION` int(11) NOT NULL,
+ `SD_ID` bigint(20) DEFAULT NULL,
+ `TBL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `TBL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `VIEW_EXPANDED_TEXT` mediumtext,
+ `VIEW_ORIGINAL_TEXT` mediumtext,
+ `LINK_TARGET_ID` bigint(20) DEFAULT NULL,
+ PRIMARY KEY (`TBL_ID`),
+ UNIQUE KEY `UNIQUETABLE` (`TBL_NAME`,`DB_ID`),
+ KEY `TBLS_N50` (`SD_ID`),
+ KEY `TBLS_N49` (`DB_ID`),
+ KEY `TBLS_N51` (`LINK_TARGET_ID`),
+ CONSTRAINT `TBLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
+ CONSTRAINT `TBLS_FK2` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`),
+ CONSTRAINT `TBLS_FK3` FOREIGN KEY (`LINK_TARGET_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TBL_COL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TBL_COL_PRIVS` (
+ `TBL_COLUMN_GRANT_ID` bigint(20) NOT NULL,
+ `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `CREATE_TIME` int(11) NOT NULL,
+ `GRANT_OPTION` smallint(6) NOT NULL,
+ `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `TBL_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `TBL_ID` bigint(20) DEFAULT NULL,
+ PRIMARY KEY (`TBL_COLUMN_GRANT_ID`),
+ KEY `TABLECOLUMNPRIVILEGEINDEX` (`TBL_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+ KEY `TBL_COL_PRIVS_N49` (`TBL_ID`),
+ CONSTRAINT `TBL_COL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TBL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TBL_PRIVS` (
+ `TBL_GRANT_ID` bigint(20) NOT NULL,
+ `CREATE_TIME` int(11) NOT NULL,
+ `GRANT_OPTION` smallint(6) NOT NULL,
+ `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `TBL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `TBL_ID` bigint(20) DEFAULT NULL,
+ PRIMARY KEY (`TBL_GRANT_ID`),
+ KEY `TBL_PRIVS_N49` (`TBL_ID`),
+ KEY `TABLEPRIVILEGEINDEX` (`TBL_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+ CONSTRAINT `TBL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TAB_COL_STATS`
+--
+CREATE TABLE IF NOT EXISTS `TAB_COL_STATS` (
+ `CS_ID` bigint(20) NOT NULL,
+ `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `TBL_ID` bigint(20) NOT NULL,
+ `LONG_LOW_VALUE` bigint(20),
+ `LONG_HIGH_VALUE` bigint(20),
+ `DOUBLE_HIGH_VALUE` double(53,4),
+ `DOUBLE_LOW_VALUE` double(53,4),
+ `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `NUM_NULLS` bigint(20) NOT NULL,
+ `NUM_DISTINCTS` bigint(20),
+ `AVG_COL_LEN` double(53,4),
+ `MAX_COL_LEN` bigint(20),
+ `NUM_TRUES` bigint(20),
+ `NUM_FALSES` bigint(20),
+ `LAST_ANALYZED` bigint(20) NOT NULL,
+ PRIMARY KEY (`CS_ID`),
+ CONSTRAINT `TAB_COL_STATS_FK` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+--
+-- Table structure for table `PART_COL_STATS`
+--
+CREATE TABLE IF NOT EXISTS `PART_COL_STATS` (
+ `CS_ID` bigint(20) NOT NULL,
+ `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `PART_ID` bigint(20) NOT NULL,
+ `LONG_LOW_VALUE` bigint(20),
+ `LONG_HIGH_VALUE` bigint(20),
+ `DOUBLE_HIGH_VALUE` double(53,4),
+ `DOUBLE_LOW_VALUE` double(53,4),
+ `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `NUM_NULLS` bigint(20) NOT NULL,
+ `NUM_DISTINCTS` bigint(20),
+ `AVG_COL_LEN` double(53,4),
+ `MAX_COL_LEN` bigint(20),
+ `NUM_TRUES` bigint(20),
+ `NUM_FALSES` bigint(20),
+ `LAST_ANALYZED` bigint(20) NOT NULL,
+ PRIMARY KEY (`CS_ID`),
+ CONSTRAINT `PART_COL_STATS_FK` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+--
+-- Table structure for table `TYPES`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TYPES` (
+ `TYPES_ID` bigint(20) NOT NULL,
+ `TYPE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `TYPE1` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `TYPE2` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ PRIMARY KEY (`TYPES_ID`),
+ UNIQUE KEY `UNIQUE_TYPE` (`TYPE_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TYPE_FIELDS`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TYPE_FIELDS` (
+ `TYPE_NAME` bigint(20) NOT NULL,
+ `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `FIELD_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `FIELD_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `INTEGER_IDX` int(11) NOT NULL,
+ PRIMARY KEY (`TYPE_NAME`,`FIELD_NAME`),
+ KEY `TYPE_FIELDS_N49` (`TYPE_NAME`),
+ CONSTRAINT `TYPE_FIELDS_FK1` FOREIGN KEY (`TYPE_NAME`) REFERENCES `TYPES` (`TYPES_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+-- Table `MASTER_KEYS` for classes [org.apache.hadoop.hive.metastore.model.MMasterKey]
+CREATE TABLE IF NOT EXISTS `MASTER_KEYS`
+(
+ `KEY_ID` INTEGER NOT NULL AUTO_INCREMENT,
+ `MASTER_KEY` VARCHAR(767) BINARY NULL,
+ PRIMARY KEY (`KEY_ID`)
+) ENGINE=INNODB DEFAULT CHARSET=latin1;
+
+-- Table `DELEGATION_TOKENS` for classes [org.apache.hadoop.hive.metastore.model.MDelegationToken]
+CREATE TABLE IF NOT EXISTS `DELEGATION_TOKENS`
+(
+ `TOKEN_IDENT` VARCHAR(767) BINARY NOT NULL,
+ `TOKEN` VARCHAR(767) BINARY NULL,
+ PRIMARY KEY (`TOKEN_IDENT`)
+) ENGINE=INNODB DEFAULT CHARSET=latin1;
+
+--
+-- Table structure for VERSION
+--
+CREATE TABLE IF NOT EXISTS `VERSION` (
+ `VER_ID` BIGINT NOT NULL,
+ `SCHEMA_VERSION` VARCHAR(127) NOT NULL,
+ `VERSION_COMMENT` VARCHAR(255),
+ PRIMARY KEY (`VER_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '0.12.0', 'Hive release version 0.12.0');
+
+/*!40101 SET character_set_client = @saved_cs_client */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
+/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
+/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2012-08-23 0:56:31
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/etc/hive-schema-0.12.0.oracle.sql b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/etc/hive-schema-0.12.0.oracle.sql
new file mode 100644
index 0000000000..a2cbfa24de
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/etc/hive-schema-0.12.0.oracle.sql
@@ -0,0 +1,717 @@
+-- Table SEQUENCE_TABLE is an internal table required by DataNucleus.
+-- NOTE: Some versions of SchemaTool do not automatically generate this table.
+-- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
+CREATE TABLE SEQUENCE_TABLE
+(
+ SEQUENCE_NAME VARCHAR2(255) NOT NULL,
+ NEXT_VAL NUMBER NOT NULL
+);
+
+ALTER TABLE SEQUENCE_TABLE ADD CONSTRAINT PART_TABLE_PK PRIMARY KEY (SEQUENCE_NAME);
+
+-- Table NUCLEUS_TABLES is an internal table required by DataNucleus.
+-- This table is required if datanucleus.autoStartMechanism=SchemaTable
+-- NOTE: Some versions of SchemaTool do not automatically generate this table.
+-- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
+CREATE TABLE NUCLEUS_TABLES
+(
+ CLASS_NAME VARCHAR2(128) NOT NULL,
+ TABLE_NAME VARCHAR2(128) NOT NULL,
+ TYPE VARCHAR2(4) NOT NULL,
+ OWNER VARCHAR2(2) NOT NULL,
+ VERSION VARCHAR2(20) NOT NULL,
+ INTERFACE_NAME VARCHAR2(255) NULL
+);
+
+ALTER TABLE NUCLEUS_TABLES ADD CONSTRAINT NUCLEUS_TABLES_PK PRIMARY KEY (CLASS_NAME);
+
+-- Table PART_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
+CREATE TABLE PART_COL_PRIVS
+(
+ PART_COLUMN_GRANT_ID NUMBER NOT NULL,
+ "COLUMN_NAME" VARCHAR2(128) NULL,
+ CREATE_TIME NUMBER (10) NOT NULL,
+ GRANT_OPTION NUMBER (5) NOT NULL,
+ GRANTOR VARCHAR2(128) NULL,
+ GRANTOR_TYPE VARCHAR2(128) NULL,
+ PART_ID NUMBER NULL,
+ PRINCIPAL_NAME VARCHAR2(128) NULL,
+ PRINCIPAL_TYPE VARCHAR2(128) NULL,
+ PART_COL_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_PK PRIMARY KEY (PART_COLUMN_GRANT_ID);
+
+-- Table CDS.
+CREATE TABLE CDS
+(
+ CD_ID NUMBER NOT NULL
+);
+
+ALTER TABLE CDS ADD CONSTRAINT CDS_PK PRIMARY KEY (CD_ID);
+
+-- Table COLUMNS_V2 for join relationship
+CREATE TABLE COLUMNS_V2
+(
+ CD_ID NUMBER NOT NULL,
+ "COMMENT" VARCHAR2(256) NULL,
+ "COLUMN_NAME" VARCHAR2(128) NOT NULL,
+ TYPE_NAME VARCHAR2(4000) NOT NULL,
+ INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_PK PRIMARY KEY (CD_ID,"COLUMN_NAME");
+
+-- Table PARTITION_KEY_VALS for join relationship
+CREATE TABLE PARTITION_KEY_VALS
+(
+ PART_ID NUMBER NOT NULL,
+ PART_KEY_VAL VARCHAR2(256) NULL,
+ INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_PK PRIMARY KEY (PART_ID,INTEGER_IDX);
+
+-- Table DBS for classes [org.apache.hadoop.hive.metastore.model.MDatabase]
+CREATE TABLE DBS
+(
+ DB_ID NUMBER NOT NULL,
+ "DESC" VARCHAR2(4000) NULL,
+ DB_LOCATION_URI VARCHAR2(4000) NOT NULL,
+ "NAME" VARCHAR2(128) NULL
+);
+
+ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID);
+
+-- Table PARTITION_PARAMS for join relationship
+CREATE TABLE PARTITION_PARAMS
+(
+ PART_ID NUMBER NOT NULL,
+ PARAM_KEY VARCHAR2(256) NOT NULL,
+ PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_PK PRIMARY KEY (PART_ID,PARAM_KEY);
+
+-- Table SERDES for classes [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
+CREATE TABLE SERDES
+(
+ SERDE_ID NUMBER NOT NULL,
+ "NAME" VARCHAR2(128) NULL,
+ SLIB VARCHAR2(4000) NULL
+);
+
+ALTER TABLE SERDES ADD CONSTRAINT SERDES_PK PRIMARY KEY (SERDE_ID);
+
+-- Table TYPES for classes [org.apache.hadoop.hive.metastore.model.MType]
+CREATE TABLE TYPES
+(
+ TYPES_ID NUMBER NOT NULL,
+ TYPE_NAME VARCHAR2(128) NULL,
+ TYPE1 VARCHAR2(767) NULL,
+ TYPE2 VARCHAR2(767) NULL
+);
+
+ALTER TABLE TYPES ADD CONSTRAINT TYPES_PK PRIMARY KEY (TYPES_ID);
+
+-- Table PARTITION_KEYS for join relationship
+CREATE TABLE PARTITION_KEYS
+(
+ TBL_ID NUMBER NOT NULL,
+ PKEY_COMMENT VARCHAR2(4000) NULL,
+ PKEY_NAME VARCHAR2(128) NOT NULL,
+ PKEY_TYPE VARCHAR2(767) NOT NULL,
+ INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEY_PK PRIMARY KEY (TBL_ID,PKEY_NAME);
+
+-- Table ROLES for classes [org.apache.hadoop.hive.metastore.model.MRole]
+CREATE TABLE ROLES
+(
+ ROLE_ID NUMBER NOT NULL,
+ CREATE_TIME NUMBER (10) NOT NULL,
+ OWNER_NAME VARCHAR2(128) NULL,
+ ROLE_NAME VARCHAR2(128) NULL
+);
+
+ALTER TABLE ROLES ADD CONSTRAINT ROLES_PK PRIMARY KEY (ROLE_ID);
+
+-- Table PARTITIONS for classes [org.apache.hadoop.hive.metastore.model.MPartition]
+CREATE TABLE PARTITIONS
+(
+ PART_ID NUMBER NOT NULL,
+ CREATE_TIME NUMBER (10) NOT NULL,
+ LAST_ACCESS_TIME NUMBER (10) NOT NULL,
+ PART_NAME VARCHAR2(767) NULL,
+ SD_ID NUMBER NULL,
+ TBL_ID NUMBER NULL
+);
+
+ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_PK PRIMARY KEY (PART_ID);
+
+-- Table INDEX_PARAMS for join relationship
+CREATE TABLE INDEX_PARAMS
+(
+ INDEX_ID NUMBER NOT NULL,
+ PARAM_KEY VARCHAR2(256) NOT NULL,
+ PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_PK PRIMARY KEY (INDEX_ID,PARAM_KEY);
+
+-- Table TBL_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
+CREATE TABLE TBL_COL_PRIVS
+(
+ TBL_COLUMN_GRANT_ID NUMBER NOT NULL,
+ "COLUMN_NAME" VARCHAR2(128) NULL,
+ CREATE_TIME NUMBER (10) NOT NULL,
+ GRANT_OPTION NUMBER (5) NOT NULL,
+ GRANTOR VARCHAR2(128) NULL,
+ GRANTOR_TYPE VARCHAR2(128) NULL,
+ PRINCIPAL_NAME VARCHAR2(128) NULL,
+ PRINCIPAL_TYPE VARCHAR2(128) NULL,
+ TBL_COL_PRIV VARCHAR2(128) NULL,
+ TBL_ID NUMBER NULL
+);
+
+ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_PK PRIMARY KEY (TBL_COLUMN_GRANT_ID);
+
+-- Table IDXS for classes [org.apache.hadoop.hive.metastore.model.MIndex]
+CREATE TABLE IDXS
+(
+ INDEX_ID NUMBER NOT NULL,
+ CREATE_TIME NUMBER (10) NOT NULL,
+ DEFERRED_REBUILD NUMBER(1) NOT NULL CHECK (DEFERRED_REBUILD IN (1,0)),
+ INDEX_HANDLER_CLASS VARCHAR2(4000) NULL,
+ INDEX_NAME VARCHAR2(128) NULL,
+ INDEX_TBL_ID NUMBER NULL,
+ LAST_ACCESS_TIME NUMBER (10) NOT NULL,
+ ORIG_TBL_ID NUMBER NULL,
+ SD_ID NUMBER NULL
+);
+
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_PK PRIMARY KEY (INDEX_ID);
+
+-- Table BUCKETING_COLS for join relationship
+CREATE TABLE BUCKETING_COLS
+(
+ SD_ID NUMBER NOT NULL,
+ BUCKET_COL_NAME VARCHAR2(256) NULL,
+ INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+
+-- Table TYPE_FIELDS for join relationship
+CREATE TABLE TYPE_FIELDS
+(
+ TYPE_NAME NUMBER NOT NULL,
+ "COMMENT" VARCHAR2(256) NULL,
+ FIELD_NAME VARCHAR2(128) NOT NULL,
+ FIELD_TYPE VARCHAR2(767) NOT NULL,
+ INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_PK PRIMARY KEY (TYPE_NAME,FIELD_NAME);
+
+-- Table SD_PARAMS for join relationship
+CREATE TABLE SD_PARAMS
+(
+ SD_ID NUMBER NOT NULL,
+ PARAM_KEY VARCHAR2(256) NOT NULL,
+ PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_PK PRIMARY KEY (SD_ID,PARAM_KEY);
+
+-- Table GLOBAL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
+CREATE TABLE GLOBAL_PRIVS
+(
+ USER_GRANT_ID NUMBER NOT NULL,
+ CREATE_TIME NUMBER (10) NOT NULL,
+ GRANT_OPTION NUMBER (5) NOT NULL,
+ GRANTOR VARCHAR2(128) NULL,
+ GRANTOR_TYPE VARCHAR2(128) NULL,
+ PRINCIPAL_NAME VARCHAR2(128) NULL,
+ PRINCIPAL_TYPE VARCHAR2(128) NULL,
+ USER_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE GLOBAL_PRIVS ADD CONSTRAINT GLOBAL_PRIVS_PK PRIMARY KEY (USER_GRANT_ID);
+
+-- Table SDS for classes [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
+CREATE TABLE SDS
+(
+ SD_ID NUMBER NOT NULL,
+ CD_ID NUMBER NULL,
+ INPUT_FORMAT VARCHAR2(4000) NULL,
+ IS_COMPRESSED NUMBER(1) NOT NULL CHECK (IS_COMPRESSED IN (1,0)),
+ LOCATION VARCHAR2(4000) NULL,
+ NUM_BUCKETS NUMBER (10) NOT NULL,
+ OUTPUT_FORMAT VARCHAR2(4000) NULL,
+ SERDE_ID NUMBER NULL,
+ IS_STOREDASSUBDIRECTORIES NUMBER(1) NOT NULL CHECK (IS_STOREDASSUBDIRECTORIES IN (1,0))
+);
+
+ALTER TABLE SDS ADD CONSTRAINT SDS_PK PRIMARY KEY (SD_ID);
+
+-- Table TABLE_PARAMS for join relationship
+CREATE TABLE TABLE_PARAMS
+(
+ TBL_ID NUMBER NOT NULL,
+ PARAM_KEY VARCHAR2(256) NOT NULL,
+ PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_PK PRIMARY KEY (TBL_ID,PARAM_KEY);
+
+-- Table SORT_COLS for join relationship
+CREATE TABLE SORT_COLS
+(
+ SD_ID NUMBER NOT NULL,
+ "COLUMN_NAME" VARCHAR2(128) NULL,
+ "ORDER" NUMBER (10) NOT NULL,
+ INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+
+-- Table TBL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
+CREATE TABLE TBL_PRIVS
+(
+ TBL_GRANT_ID NUMBER NOT NULL,
+ CREATE_TIME NUMBER (10) NOT NULL,
+ GRANT_OPTION NUMBER (5) NOT NULL,
+ GRANTOR VARCHAR2(128) NULL,
+ GRANTOR_TYPE VARCHAR2(128) NULL,
+ PRINCIPAL_NAME VARCHAR2(128) NULL,
+ PRINCIPAL_TYPE VARCHAR2(128) NULL,
+ TBL_PRIV VARCHAR2(128) NULL,
+ TBL_ID NUMBER NULL
+);
+
+ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_PK PRIMARY KEY (TBL_GRANT_ID);
+
+-- Table DATABASE_PARAMS for join relationship
+CREATE TABLE DATABASE_PARAMS
+(
+ DB_ID NUMBER NOT NULL,
+ PARAM_KEY VARCHAR2(180) NOT NULL,
+ PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_PK PRIMARY KEY (DB_ID,PARAM_KEY);
+
+-- Table ROLE_MAP for classes [org.apache.hadoop.hive.metastore.model.MRoleMap]
+CREATE TABLE ROLE_MAP
+(
+ ROLE_GRANT_ID NUMBER NOT NULL,
+ ADD_TIME NUMBER (10) NOT NULL,
+ GRANT_OPTION NUMBER (5) NOT NULL,
+ GRANTOR VARCHAR2(128) NULL,
+ GRANTOR_TYPE VARCHAR2(128) NULL,
+ PRINCIPAL_NAME VARCHAR2(128) NULL,
+ PRINCIPAL_TYPE VARCHAR2(128) NULL,
+ ROLE_ID NUMBER NULL
+);
+
+ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_PK PRIMARY KEY (ROLE_GRANT_ID);
+
+-- Table SERDE_PARAMS for join relationship
+CREATE TABLE SERDE_PARAMS
+(
+ SERDE_ID NUMBER NOT NULL,
+ PARAM_KEY VARCHAR2(256) NOT NULL,
+ PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_PK PRIMARY KEY (SERDE_ID,PARAM_KEY);
+
+-- Table PART_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
+CREATE TABLE PART_PRIVS
+(
+ PART_GRANT_ID NUMBER NOT NULL,
+ CREATE_TIME NUMBER (10) NOT NULL,
+ GRANT_OPTION NUMBER (5) NOT NULL,
+ GRANTOR VARCHAR2(128) NULL,
+ GRANTOR_TYPE VARCHAR2(128) NULL,
+ PART_ID NUMBER NULL,
+ PRINCIPAL_NAME VARCHAR2(128) NULL,
+ PRINCIPAL_TYPE VARCHAR2(128) NULL,
+ PART_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_PK PRIMARY KEY (PART_GRANT_ID);
+
+-- Table DB_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
+CREATE TABLE DB_PRIVS
+(
+ DB_GRANT_ID NUMBER NOT NULL,
+ CREATE_TIME NUMBER (10) NOT NULL,
+ DB_ID NUMBER NULL,
+ GRANT_OPTION NUMBER (5) NOT NULL,
+ GRANTOR VARCHAR2(128) NULL,
+ GRANTOR_TYPE VARCHAR2(128) NULL,
+ PRINCIPAL_NAME VARCHAR2(128) NULL,
+ PRINCIPAL_TYPE VARCHAR2(128) NULL,
+ DB_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_PK PRIMARY KEY (DB_GRANT_ID);
+
+-- Table TBLS for classes [org.apache.hadoop.hive.metastore.model.MTable]
+CREATE TABLE TBLS
+(
+ TBL_ID NUMBER NOT NULL,
+ CREATE_TIME NUMBER (10) NOT NULL,
+ DB_ID NUMBER NULL,
+ LAST_ACCESS_TIME NUMBER (10) NOT NULL,
+ OWNER VARCHAR2(767) NULL,
+ RETENTION NUMBER (10) NOT NULL,
+ SD_ID NUMBER NULL,
+ TBL_NAME VARCHAR2(128) NULL,
+ TBL_TYPE VARCHAR2(128) NULL,
+ VIEW_EXPANDED_TEXT CLOB NULL,
+ VIEW_ORIGINAL_TEXT CLOB NULL
+);
+
+ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID);
+
+-- Table PARTITION_EVENTS for classes [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
+CREATE TABLE PARTITION_EVENTS
+(
+ PART_NAME_ID NUMBER NOT NULL,
+ DB_NAME VARCHAR2(128) NULL,
+ EVENT_TIME NUMBER NOT NULL,
+ EVENT_TYPE NUMBER (10) NOT NULL,
+ PARTITION_NAME VARCHAR2(767) NULL,
+ TBL_NAME VARCHAR2(128) NULL
+);
+
+ALTER TABLE PARTITION_EVENTS ADD CONSTRAINT PARTITION_EVENTS_PK PRIMARY KEY (PART_NAME_ID);
+
+-- Table SKEWED_STRING_LIST for classes [org.apache.hadoop.hive.metastore.model.MStringList]
+CREATE TABLE SKEWED_STRING_LIST
+(
+ STRING_LIST_ID NUMBER NOT NULL
+);
+
+ALTER TABLE SKEWED_STRING_LIST ADD CONSTRAINT SKEWED_STRING_LIST_PK PRIMARY KEY (STRING_LIST_ID);
+
+CREATE TABLE SKEWED_STRING_LIST_VALUES
+(
+ STRING_LIST_ID NUMBER NOT NULL,
+ "STRING_LIST_VALUE" VARCHAR2(256) NULL,
+ INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_PK PRIMARY KEY (STRING_LIST_ID,INTEGER_IDX);
+
+ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
+
+CREATE TABLE SKEWED_COL_NAMES
+(
+ SD_ID NUMBER NOT NULL,
+ "SKEWED_COL_NAME" VARCHAR2(256) NULL,
+ INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+
+ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE TABLE SKEWED_COL_VALUE_LOC_MAP
+(
+ SD_ID NUMBER NOT NULL,
+ STRING_LIST_ID_KID NUMBER NOT NULL,
+ "LOCATION" VARCHAR2(4000) NULL
+);
+
+CREATE TABLE MASTER_KEYS
+(
+ KEY_ID NUMBER (10) NOT NULL,
+ MASTER_KEY VARCHAR2(767) NULL
+);
+
+CREATE TABLE DELEGATION_TOKENS
+(
+ TOKEN_IDENT VARCHAR2(767) NOT NULL,
+ TOKEN VARCHAR2(767) NULL
+);
+
+ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_PK PRIMARY KEY (SD_ID,STRING_LIST_ID_KID);
+
+ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK1 FOREIGN KEY (STRING_LIST_ID_KID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE TABLE SKEWED_VALUES
+(
+ SD_ID_OID NUMBER NOT NULL,
+ STRING_LIST_ID_EID NUMBER NOT NULL,
+ INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_PK PRIMARY KEY (SD_ID_OID,INTEGER_IDX);
+
+ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID_EID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK2 FOREIGN KEY (SD_ID_OID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+-- column statistics
+
+CREATE TABLE TAB_COL_STATS (
+ CS_ID NUMBER NOT NULL,
+ DB_NAME VARCHAR2(128) NOT NULL,
+ TABLE_NAME VARCHAR2(128) NOT NULL,
+ COLUMN_NAME VARCHAR2(128) NOT NULL,
+ COLUMN_TYPE VARCHAR2(128) NOT NULL,
+ TBL_ID NUMBER NOT NULL,
+ LONG_LOW_VALUE NUMBER,
+ LONG_HIGH_VALUE NUMBER,
+ DOUBLE_LOW_VALUE NUMBER,
+ DOUBLE_HIGH_VALUE NUMBER,
+ BIG_DECIMAL_LOW_VALUE VARCHAR2(4000),
+ BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000),
+ NUM_NULLS NUMBER NOT NULL,
+ NUM_DISTINCTS NUMBER,
+ AVG_COL_LEN NUMBER,
+ MAX_COL_LEN NUMBER,
+ NUM_TRUES NUMBER,
+ NUM_FALSES NUMBER,
+ LAST_ANALYZED NUMBER NOT NULL
+);
+
+CREATE TABLE VERSION (
+ VER_ID NUMBER NOT NULL,
+ SCHEMA_VERSION VARCHAR(127) NOT NULL,
+ VERSION_COMMENT VARCHAR(255)
+);
+ALTER TABLE VERSION ADD CONSTRAINT VERSION_PK PRIMARY KEY (VER_ID);
+
+ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PKEY PRIMARY KEY (CS_ID);
+
+ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_FK FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TAB_COL_STATS_N49 ON TAB_COL_STATS(TBL_ID);
+
+CREATE TABLE PART_COL_STATS (
+ CS_ID NUMBER NOT NULL,
+ DB_NAME VARCHAR2(128) NOT NULL,
+ TABLE_NAME VARCHAR2(128) NOT NULL,
+ PARTITION_NAME VARCHAR2(767) NOT NULL,
+ COLUMN_NAME VARCHAR2(128) NOT NULL,
+ COLUMN_TYPE VARCHAR2(128) NOT NULL,
+ PART_ID NUMBER NOT NULL,
+ LONG_LOW_VALUE NUMBER,
+ LONG_HIGH_VALUE NUMBER,
+ DOUBLE_LOW_VALUE NUMBER,
+ DOUBLE_HIGH_VALUE NUMBER,
+ BIG_DECIMAL_LOW_VALUE VARCHAR2(4000),
+ BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000),
+ NUM_NULLS NUMBER NOT NULL,
+ NUM_DISTINCTS NUMBER,
+ AVG_COL_LEN NUMBER,
+ MAX_COL_LEN NUMBER,
+ NUM_TRUES NUMBER,
+ NUM_FALSES NUMBER,
+ LAST_ANALYZED NUMBER NOT NULL
+);
+
+ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PKEY PRIMARY KEY (CS_ID);
+
+ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_FK FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED;
+
+CREATE INDEX PART_COL_STATS_N49 ON PART_COL_STATS (PART_ID);
+
+-- Constraints for table PART_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
+ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PART_COL_PRIVS_N49 ON PART_COL_PRIVS (PART_ID);
+
+CREATE INDEX PARTITIONCOLUMNPRIVILEGEINDEX ON PART_COL_PRIVS (PART_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_COL_PRIV,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table COLUMNS_V2
+ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_FK1 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX COLUMNS_V2_N49 ON COLUMNS_V2 (CD_ID);
+
+
+-- Constraints for table PARTITION_KEY_VALS
+ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITION_KEY_VALS_N49 ON PARTITION_KEY_VALS (PART_ID);
+
+
+-- Constraints for table DBS for class(es) [org.apache.hadoop.hive.metastore.model.MDatabase]
+CREATE UNIQUE INDEX UNIQUE_DATABASE ON DBS ("NAME");
+
+
+-- Constraints for table PARTITION_PARAMS
+ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITION_PARAMS_N49 ON PARTITION_PARAMS (PART_ID);
+
+
+-- Constraints for table SERDES for class(es) [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
+
+-- Constraints for table TYPES for class(es) [org.apache.hadoop.hive.metastore.model.MType]
+CREATE UNIQUE INDEX UNIQUE_TYPE ON TYPES (TYPE_NAME);
+
+
+-- Constraints for table PARTITION_KEYS
+ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEYS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITION_KEYS_N49 ON PARTITION_KEYS (TBL_ID);
+
+
+-- Constraints for table ROLES for class(es) [org.apache.hadoop.hive.metastore.model.MRole]
+CREATE UNIQUE INDEX ROLEENTITYINDEX ON ROLES (ROLE_NAME);
+
+
+-- Constraints for table PARTITIONS for class(es) [org.apache.hadoop.hive.metastore.model.MPartition]
+ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITIONS_N49 ON PARTITIONS (SD_ID);
+
+CREATE INDEX PARTITIONS_N50 ON PARTITIONS (TBL_ID);
+
+CREATE UNIQUE INDEX UNIQUEPARTITION ON PARTITIONS (PART_NAME,TBL_ID);
+
+
+-- Constraints for table INDEX_PARAMS
+ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_FK1 FOREIGN KEY (INDEX_ID) REFERENCES IDXS (INDEX_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX INDEX_PARAMS_N49 ON INDEX_PARAMS (INDEX_ID);
+
+
+-- Constraints for table TBL_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
+ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TABLECOLUMNPRIVILEGEINDEX ON TBL_COL_PRIVS (TBL_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_COL_PRIV,GRANTOR,GRANTOR_TYPE);
+
+CREATE INDEX TBL_COL_PRIVS_N49 ON TBL_COL_PRIVS (TBL_ID);
+
+
+-- Constraints for table IDXS for class(es) [org.apache.hadoop.hive.metastore.model.MIndex]
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK1 FOREIGN KEY (ORIG_TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK3 FOREIGN KEY (INDEX_TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE UNIQUE INDEX UNIQUEINDEX ON IDXS (INDEX_NAME,ORIG_TBL_ID);
+
+CREATE INDEX IDXS_N50 ON IDXS (INDEX_TBL_ID);
+
+CREATE INDEX IDXS_N51 ON IDXS (SD_ID);
+
+CREATE INDEX IDXS_N49 ON IDXS (ORIG_TBL_ID);
+
+
+-- Constraints for table BUCKETING_COLS
+ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX BUCKETING_COLS_N49 ON BUCKETING_COLS (SD_ID);
+
+
+-- Constraints for table TYPE_FIELDS
+ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_FK1 FOREIGN KEY (TYPE_NAME) REFERENCES TYPES (TYPES_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TYPE_FIELDS_N49 ON TYPE_FIELDS (TYPE_NAME);
+
+
+-- Constraints for table SD_PARAMS
+ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SD_PARAMS_N49 ON SD_PARAMS (SD_ID);
+
+
+-- Constraints for table GLOBAL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
+CREATE UNIQUE INDEX GLOBALPRIVILEGEINDEX ON GLOBAL_PRIVS (PRINCIPAL_NAME,PRINCIPAL_TYPE,USER_PRIV,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table SDS for class(es) [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
+ALTER TABLE SDS ADD CONSTRAINT SDS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) INITIALLY DEFERRED ;
+ALTER TABLE SDS ADD CONSTRAINT SDS_FK2 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SDS_N49 ON SDS (SERDE_ID);
+CREATE INDEX SDS_N50 ON SDS (CD_ID);
+
+
+-- Constraints for table TABLE_PARAMS
+ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TABLE_PARAMS_N49 ON TABLE_PARAMS (TBL_ID);
+
+
+-- Constraints for table SORT_COLS
+ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SORT_COLS_N49 ON SORT_COLS (SD_ID);
+
+
+-- Constraints for table TBL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
+ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TBL_PRIVS_N49 ON TBL_PRIVS (TBL_ID);
+
+CREATE INDEX TABLEPRIVILEGEINDEX ON TBL_PRIVS (TBL_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_PRIV,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table DATABASE_PARAMS
+ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX DATABASE_PARAMS_N49 ON DATABASE_PARAMS (DB_ID);
+
+
+-- Constraints for table ROLE_MAP for class(es) [org.apache.hadoop.hive.metastore.model.MRoleMap]
+ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_FK1 FOREIGN KEY (ROLE_ID) REFERENCES ROLES (ROLE_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX ROLE_MAP_N49 ON ROLE_MAP (ROLE_ID);
+
+CREATE UNIQUE INDEX USERROLEMAPINDEX ON ROLE_MAP (PRINCIPAL_NAME,ROLE_ID,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table SERDE_PARAMS
+ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SERDE_PARAMS_N49 ON SERDE_PARAMS (SERDE_ID);
+
+
+-- Constraints for table PART_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
+ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTPRIVILEGEINDEX ON PART_PRIVS (PART_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_PRIV,GRANTOR,GRANTOR_TYPE);
+
+CREATE INDEX PART_PRIVS_N49 ON PART_PRIVS (PART_ID);
+
+
+-- Constraints for table DB_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
+ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
+
+CREATE UNIQUE INDEX DBPRIVILEGEINDEX ON DB_PRIVS (DB_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,DB_PRIV,GRANTOR,GRANTOR_TYPE);
+
+CREATE INDEX DB_PRIVS_N49 ON DB_PRIVS (DB_ID);
+
+
+-- Constraints for table TBLS for class(es) [org.apache.hadoop.hive.metastore.model.MTable]
+ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK2 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TBLS_N49 ON TBLS (DB_ID);
+
+CREATE UNIQUE INDEX UNIQUETABLE ON TBLS (TBL_NAME,DB_ID);
+
+CREATE INDEX TBLS_N50 ON TBLS (SD_ID);
+
+
+-- Constraints for table PARTITION_EVENTS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
+CREATE INDEX PARTITIONEVENTINDEX ON PARTITION_EVENTS (PARTITION_NAME);
+
+INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '0.12.0', 'Hive release version 0.12.0');
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/etc/hive-schema-0.12.0.postgres.sql b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/etc/hive-schema-0.12.0.postgres.sql
new file mode 100644
index 0000000000..61769f6653
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/etc/hive-schema-0.12.0.postgres.sql
@@ -0,0 +1,1405 @@
+--
+-- PostgreSQL database dump
+--
+
+SET statement_timeout = 0;
+SET client_encoding = 'UTF8';
+SET standard_conforming_strings = off;
+SET check_function_bodies = false;
+SET client_min_messages = warning;
+SET escape_string_warning = off;
+
+SET search_path = public, pg_catalog;
+
+SET default_tablespace = '';
+
+SET default_with_oids = false;
+
+--
+-- Name: BUCKETING_COLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "BUCKETING_COLS" (
+ "SD_ID" bigint NOT NULL,
+ "BUCKET_COL_NAME" character varying(256) DEFAULT NULL::character varying,
+ "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: CDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "CDS" (
+ "CD_ID" bigint NOT NULL
+);
+
+
+--
+-- Name: COLUMNS_OLD; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "COLUMNS_OLD" (
+ "SD_ID" bigint NOT NULL,
+ "COMMENT" character varying(256) DEFAULT NULL::character varying,
+ "COLUMN_NAME" character varying(128) NOT NULL,
+ "TYPE_NAME" character varying(4000) NOT NULL,
+ "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: COLUMNS_V2; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "COLUMNS_V2" (
+ "CD_ID" bigint NOT NULL,
+ "COMMENT" character varying(4000),
+ "COLUMN_NAME" character varying(128) NOT NULL,
+ "TYPE_NAME" character varying(4000),
+ "INTEGER_IDX" integer NOT NULL
+);
+
+
+--
+-- Name: DATABASE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "DATABASE_PARAMS" (
+ "DB_ID" bigint NOT NULL,
+ "PARAM_KEY" character varying(180) NOT NULL,
+ "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: DBS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "DBS" (
+ "DB_ID" bigint NOT NULL,
+ "DESC" character varying(4000) DEFAULT NULL::character varying,
+ "DB_LOCATION_URI" character varying(4000) NOT NULL,
+ "NAME" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: DB_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "DB_PRIVS" (
+ "DB_GRANT_ID" bigint NOT NULL,
+ "CREATE_TIME" bigint NOT NULL,
+ "DB_ID" bigint,
+ "GRANT_OPTION" smallint NOT NULL,
+ "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+ "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+ "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+ "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+ "DB_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: GLOBAL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "GLOBAL_PRIVS" (
+ "USER_GRANT_ID" bigint NOT NULL,
+ "CREATE_TIME" bigint NOT NULL,
+ "GRANT_OPTION" smallint NOT NULL,
+ "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+ "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+ "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+ "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+ "USER_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: IDXS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "IDXS" (
+ "INDEX_ID" bigint NOT NULL,
+ "CREATE_TIME" bigint NOT NULL,
+ "DEFERRED_REBUILD" boolean NOT NULL,
+ "INDEX_HANDLER_CLASS" character varying(4000) DEFAULT NULL::character varying,
+ "INDEX_NAME" character varying(128) DEFAULT NULL::character varying,
+ "INDEX_TBL_ID" bigint,
+ "LAST_ACCESS_TIME" bigint NOT NULL,
+ "ORIG_TBL_ID" bigint,
+ "SD_ID" bigint
+);
+
+
+--
+-- Name: INDEX_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "INDEX_PARAMS" (
+ "INDEX_ID" bigint NOT NULL,
+ "PARAM_KEY" character varying(256) NOT NULL,
+ "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: NUCLEUS_TABLES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "NUCLEUS_TABLES" (
+ "CLASS_NAME" character varying(128) NOT NULL,
+ "TABLE_NAME" character varying(128) NOT NULL,
+ "TYPE" character varying(4) NOT NULL,
+ "OWNER" character varying(2) NOT NULL,
+ "VERSION" character varying(20) NOT NULL,
+ "INTERFACE_NAME" character varying(255) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: PARTITIONS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITIONS" (
+ "PART_ID" bigint NOT NULL,
+ "CREATE_TIME" bigint NOT NULL,
+ "LAST_ACCESS_TIME" bigint NOT NULL,
+ "PART_NAME" character varying(767) DEFAULT NULL::character varying,
+ "SD_ID" bigint,
+ "TBL_ID" bigint
+);
+
+
+--
+-- Name: PARTITION_EVENTS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_EVENTS" (
+ "PART_NAME_ID" bigint NOT NULL,
+ "DB_NAME" character varying(128),
+ "EVENT_TIME" bigint NOT NULL,
+ "EVENT_TYPE" integer NOT NULL,
+ "PARTITION_NAME" character varying(767),
+ "TBL_NAME" character varying(128)
+);
+
+
+--
+-- Name: PARTITION_KEYS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_KEYS" (
+ "TBL_ID" bigint NOT NULL,
+ "PKEY_COMMENT" character varying(4000) DEFAULT NULL::character varying,
+ "PKEY_NAME" character varying(128) NOT NULL,
+ "PKEY_TYPE" character varying(767) NOT NULL,
+ "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: PARTITION_KEY_VALS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_KEY_VALS" (
+ "PART_ID" bigint NOT NULL,
+ "PART_KEY_VAL" character varying(256) DEFAULT NULL::character varying,
+ "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: PARTITION_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_PARAMS" (
+ "PART_ID" bigint NOT NULL,
+ "PARAM_KEY" character varying(256) NOT NULL,
+ "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: PART_COL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PART_COL_PRIVS" (
+ "PART_COLUMN_GRANT_ID" bigint NOT NULL,
+ "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+ "CREATE_TIME" bigint NOT NULL,
+ "GRANT_OPTION" smallint NOT NULL,
+ "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+ "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+ "PART_ID" bigint,
+ "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+ "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+ "PART_COL_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: PART_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PART_PRIVS" (
+ "PART_GRANT_ID" bigint NOT NULL,
+ "CREATE_TIME" bigint NOT NULL,
+ "GRANT_OPTION" smallint NOT NULL,
+ "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+ "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+ "PART_ID" bigint,
+ "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+ "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+ "PART_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: ROLES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "ROLES" (
+ "ROLE_ID" bigint NOT NULL,
+ "CREATE_TIME" bigint NOT NULL,
+ "OWNER_NAME" character varying(128) DEFAULT NULL::character varying,
+ "ROLE_NAME" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: ROLE_MAP; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "ROLE_MAP" (
+ "ROLE_GRANT_ID" bigint NOT NULL,
+ "ADD_TIME" bigint NOT NULL,
+ "GRANT_OPTION" smallint NOT NULL,
+ "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+ "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+ "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+ "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+ "ROLE_ID" bigint
+);
+
+
+--
+-- Name: SDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SDS" (
+ "SD_ID" bigint NOT NULL,
+ "INPUT_FORMAT" character varying(4000) DEFAULT NULL::character varying,
+ "IS_COMPRESSED" boolean NOT NULL,
+ "LOCATION" character varying(4000) DEFAULT NULL::character varying,
+ "NUM_BUCKETS" bigint NOT NULL,
+ "OUTPUT_FORMAT" character varying(4000) DEFAULT NULL::character varying,
+ "SERDE_ID" bigint,
+ "CD_ID" bigint,
+ "IS_STOREDASSUBDIRECTORIES" boolean NOT NULL
+);
+
+
+--
+-- Name: SD_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SD_PARAMS" (
+ "SD_ID" bigint NOT NULL,
+ "PARAM_KEY" character varying(256) NOT NULL,
+ "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: SEQUENCE_TABLE; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SEQUENCE_TABLE" (
+ "SEQUENCE_NAME" character varying(255) NOT NULL,
+ "NEXT_VAL" bigint NOT NULL
+);
+
+
+--
+-- Name: SERDES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SERDES" (
+ "SERDE_ID" bigint NOT NULL,
+ "NAME" character varying(128) DEFAULT NULL::character varying,
+ "SLIB" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: SERDE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SERDE_PARAMS" (
+ "SERDE_ID" bigint NOT NULL,
+ "PARAM_KEY" character varying(256) NOT NULL,
+ "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: SORT_COLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SORT_COLS" (
+ "SD_ID" bigint NOT NULL,
+ "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+ "ORDER" bigint NOT NULL,
+ "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: TABLE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TABLE_PARAMS" (
+ "TBL_ID" bigint NOT NULL,
+ "PARAM_KEY" character varying(256) NOT NULL,
+ "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: TBLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TBLS" (
+ "TBL_ID" bigint NOT NULL,
+ "CREATE_TIME" bigint NOT NULL,
+ "DB_ID" bigint,
+ "LAST_ACCESS_TIME" bigint NOT NULL,
+ "OWNER" character varying(767) DEFAULT NULL::character varying,
+ "RETENTION" bigint NOT NULL,
+ "SD_ID" bigint,
+ "TBL_NAME" character varying(128) DEFAULT NULL::character varying,
+ "TBL_TYPE" character varying(128) DEFAULT NULL::character varying,
+ "VIEW_EXPANDED_TEXT" text,
+ "VIEW_ORIGINAL_TEXT" text
+);
+
+
+--
+-- Name: TBL_COL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TBL_COL_PRIVS" (
+ "TBL_COLUMN_GRANT_ID" bigint NOT NULL,
+ "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+ "CREATE_TIME" bigint NOT NULL,
+ "GRANT_OPTION" smallint NOT NULL,
+ "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+ "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+ "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+ "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+ "TBL_COL_PRIV" character varying(128) DEFAULT NULL::character varying,
+ "TBL_ID" bigint
+);
+
+
+--
+-- Name: TBL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TBL_PRIVS" (
+ "TBL_GRANT_ID" bigint NOT NULL,
+ "CREATE_TIME" bigint NOT NULL,
+ "GRANT_OPTION" smallint NOT NULL,
+ "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+ "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+ "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+ "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+ "TBL_PRIV" character varying(128) DEFAULT NULL::character varying,
+ "TBL_ID" bigint
+);
+
+
+--
+-- Name: TYPES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TYPES" (
+ "TYPES_ID" bigint NOT NULL,
+ "TYPE_NAME" character varying(128) DEFAULT NULL::character varying,
+ "TYPE1" character varying(767) DEFAULT NULL::character varying,
+ "TYPE2" character varying(767) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: TYPE_FIELDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TYPE_FIELDS" (
+ "TYPE_NAME" bigint NOT NULL,
+ "COMMENT" character varying(256) DEFAULT NULL::character varying,
+ "FIELD_NAME" character varying(128) NOT NULL,
+ "FIELD_TYPE" character varying(767) NOT NULL,
+ "INTEGER_IDX" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_STRING_LIST" (
+ "STRING_LIST_ID" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_STRING_LIST_VALUES" (
+ "STRING_LIST_ID" bigint NOT NULL,
+ "STRING_LIST_VALUE" character varying(256) DEFAULT NULL::character varying,
+ "INTEGER_IDX" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_COL_NAMES" (
+ "SD_ID" bigint NOT NULL,
+ "SKEWED_COL_NAME" character varying(256) DEFAULT NULL::character varying,
+ "INTEGER_IDX" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_COL_VALUE_LOC_MAP" (
+ "SD_ID" bigint NOT NULL,
+ "STRING_LIST_ID_KID" bigint NOT NULL,
+ "LOCATION" character varying(4000) DEFAULT NULL::character varying
+);
+
+CREATE TABLE "SKEWED_VALUES" (
+ "SD_ID_OID" bigint NOT NULL,
+ "STRING_LIST_ID_EID" bigint NOT NULL,
+ "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: TAB_COL_STATS Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "MASTER_KEYS"
+(
+ "KEY_ID" SERIAL,
+ "MASTER_KEY" varchar(767) NULL,
+ PRIMARY KEY ("KEY_ID")
+);
+
+CREATE TABLE "DELEGATION_TOKENS"
+(
+ "TOKEN_IDENT" varchar(767) NOT NULL,
+ "TOKEN" varchar(767) NULL,
+ PRIMARY KEY ("TOKEN_IDENT")
+);
+
+CREATE TABLE "TAB_COL_STATS" (
+ "CS_ID" bigint NOT NULL,
+ "DB_NAME" character varying(128) DEFAULT NULL::character varying,
+ "TABLE_NAME" character varying(128) DEFAULT NULL::character varying,
+ "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+ "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying,
+ "TBL_ID" bigint NOT NULL,
+ "LONG_LOW_VALUE" bigint,
+ "LONG_HIGH_VALUE" bigint,
+ "DOUBLE_LOW_VALUE" double precision,
+ "DOUBLE_HIGH_VALUE" double precision,
+ "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "NUM_NULLS" bigint NOT NULL,
+ "NUM_DISTINCTS" bigint,
+ "AVG_COL_LEN" double precision,
+ "MAX_COL_LEN" bigint,
+ "NUM_TRUES" bigint,
+ "NUM_FALSES" bigint,
+ "LAST_ANALYZED" bigint NOT NULL
+);
+
+--
+-- Table structure for VERSION
+--
+CREATE TABLE "VERSION" (
+ "VER_ID" bigint,
+ "SCHEMA_VERSION" character varying(127) NOT NULL,
+ "VERSION_COMMENT" character varying(255) NOT NULL
+);
+
+--
+-- Name: PART_COL_STATS Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PART_COL_STATS" (
+ "CS_ID" bigint NOT NULL,
+ "DB_NAME" character varying(128) DEFAULT NULL::character varying,
+ "TABLE_NAME" character varying(128) DEFAULT NULL::character varying,
+ "PARTITION_NAME" character varying(767) DEFAULT NULL::character varying,
+ "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+ "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying,
+ "PART_ID" bigint NOT NULL,
+ "LONG_LOW_VALUE" bigint,
+ "LONG_HIGH_VALUE" bigint,
+ "DOUBLE_LOW_VALUE" double precision,
+ "DOUBLE_HIGH_VALUE" double precision,
+ "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "NUM_NULLS" bigint NOT NULL,
+ "NUM_DISTINCTS" bigint,
+ "AVG_COL_LEN" double precision,
+ "MAX_COL_LEN" bigint,
+ "NUM_TRUES" bigint,
+ "NUM_FALSES" bigint,
+ "LAST_ANALYZED" bigint NOT NULL
+);
+
+--
+-- Name: BUCKETING_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "BUCKETING_COLS"
+ ADD CONSTRAINT "BUCKETING_COLS_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+
+
+--
+-- Name: CDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "CDS"
+ ADD CONSTRAINT "CDS_pkey" PRIMARY KEY ("CD_ID");
+
+
+--
+-- Name: COLUMNS_V2_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "COLUMNS_V2"
+ ADD CONSTRAINT "COLUMNS_V2_pkey" PRIMARY KEY ("CD_ID", "COLUMN_NAME");
+
+
+--
+-- Name: COLUMNS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "COLUMNS_OLD"
+ ADD CONSTRAINT "COLUMNS_pkey" PRIMARY KEY ("SD_ID", "COLUMN_NAME");
+
+
+--
+-- Name: DATABASE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DATABASE_PARAMS"
+ ADD CONSTRAINT "DATABASE_PARAMS_pkey" PRIMARY KEY ("DB_ID", "PARAM_KEY");
+
+
+--
+-- Name: DBPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DB_PRIVS"
+ ADD CONSTRAINT "DBPRIVILEGEINDEX" UNIQUE ("DB_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "DB_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: DBS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DBS"
+ ADD CONSTRAINT "DBS_pkey" PRIMARY KEY ("DB_ID");
+
+
+--
+-- Name: DB_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DB_PRIVS"
+ ADD CONSTRAINT "DB_PRIVS_pkey" PRIMARY KEY ("DB_GRANT_ID");
+
+
+--
+-- Name: GLOBALPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "GLOBAL_PRIVS"
+ ADD CONSTRAINT "GLOBALPRIVILEGEINDEX" UNIQUE ("PRINCIPAL_NAME", "PRINCIPAL_TYPE", "USER_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: GLOBAL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "GLOBAL_PRIVS"
+ ADD CONSTRAINT "GLOBAL_PRIVS_pkey" PRIMARY KEY ("USER_GRANT_ID");
+
+
+--
+-- Name: IDXS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "IDXS"
+ ADD CONSTRAINT "IDXS_pkey" PRIMARY KEY ("INDEX_ID");
+
+
+--
+-- Name: INDEX_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "INDEX_PARAMS"
+ ADD CONSTRAINT "INDEX_PARAMS_pkey" PRIMARY KEY ("INDEX_ID", "PARAM_KEY");
+
+
+--
+-- Name: NUCLEUS_TABLES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "NUCLEUS_TABLES"
+ ADD CONSTRAINT "NUCLEUS_TABLES_pkey" PRIMARY KEY ("CLASS_NAME");
+
+
+--
+-- Name: PARTITIONS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+ ADD CONSTRAINT "PARTITIONS_pkey" PRIMARY KEY ("PART_ID");
+
+
+--
+-- Name: PARTITION_EVENTS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_EVENTS"
+ ADD CONSTRAINT "PARTITION_EVENTS_pkey" PRIMARY KEY ("PART_NAME_ID");
+
+
+--
+-- Name: PARTITION_KEYS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_KEYS"
+ ADD CONSTRAINT "PARTITION_KEYS_pkey" PRIMARY KEY ("TBL_ID", "PKEY_NAME");
+
+
+--
+-- Name: PARTITION_KEY_VALS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_KEY_VALS"
+ ADD CONSTRAINT "PARTITION_KEY_VALS_pkey" PRIMARY KEY ("PART_ID", "INTEGER_IDX");
+
+
+--
+-- Name: PARTITION_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_PARAMS"
+ ADD CONSTRAINT "PARTITION_PARAMS_pkey" PRIMARY KEY ("PART_ID", "PARAM_KEY");
+
+
+--
+-- Name: PART_COL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PART_COL_PRIVS"
+ ADD CONSTRAINT "PART_COL_PRIVS_pkey" PRIMARY KEY ("PART_COLUMN_GRANT_ID");
+
+
+--
+-- Name: PART_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PART_PRIVS"
+ ADD CONSTRAINT "PART_PRIVS_pkey" PRIMARY KEY ("PART_GRANT_ID");
+
+
+--
+-- Name: ROLEENTITYINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLES"
+ ADD CONSTRAINT "ROLEENTITYINDEX" UNIQUE ("ROLE_NAME");
+
+
+--
+-- Name: ROLES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLES"
+ ADD CONSTRAINT "ROLES_pkey" PRIMARY KEY ("ROLE_ID");
+
+
+--
+-- Name: ROLE_MAP_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLE_MAP"
+ ADD CONSTRAINT "ROLE_MAP_pkey" PRIMARY KEY ("ROLE_GRANT_ID");
+
+
+--
+-- Name: SDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SDS"
+ ADD CONSTRAINT "SDS_pkey" PRIMARY KEY ("SD_ID");
+
+
+--
+-- Name: SD_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SD_PARAMS"
+ ADD CONSTRAINT "SD_PARAMS_pkey" PRIMARY KEY ("SD_ID", "PARAM_KEY");
+
+
+--
+-- Name: SEQUENCE_TABLE_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SEQUENCE_TABLE"
+ ADD CONSTRAINT "SEQUENCE_TABLE_pkey" PRIMARY KEY ("SEQUENCE_NAME");
+
+
+--
+-- Name: SERDES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SERDES"
+ ADD CONSTRAINT "SERDES_pkey" PRIMARY KEY ("SERDE_ID");
+
+
+--
+-- Name: SERDE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SERDE_PARAMS"
+ ADD CONSTRAINT "SERDE_PARAMS_pkey" PRIMARY KEY ("SERDE_ID", "PARAM_KEY");
+
+
+--
+-- Name: SORT_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SORT_COLS"
+ ADD CONSTRAINT "SORT_COLS_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+
+
+--
+-- Name: TABLE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TABLE_PARAMS"
+ ADD CONSTRAINT "TABLE_PARAMS_pkey" PRIMARY KEY ("TBL_ID", "PARAM_KEY");
+
+
+--
+-- Name: TBLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBLS"
+ ADD CONSTRAINT "TBLS_pkey" PRIMARY KEY ("TBL_ID");
+
+
+--
+-- Name: TBL_COL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBL_COL_PRIVS"
+ ADD CONSTRAINT "TBL_COL_PRIVS_pkey" PRIMARY KEY ("TBL_COLUMN_GRANT_ID");
+
+
+--
+-- Name: TBL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBL_PRIVS"
+ ADD CONSTRAINT "TBL_PRIVS_pkey" PRIMARY KEY ("TBL_GRANT_ID");
+
+
+--
+-- Name: TYPES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TYPES"
+ ADD CONSTRAINT "TYPES_pkey" PRIMARY KEY ("TYPES_ID");
+
+
+--
+-- Name: TYPE_FIELDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TYPE_FIELDS"
+ ADD CONSTRAINT "TYPE_FIELDS_pkey" PRIMARY KEY ("TYPE_NAME", "FIELD_NAME");
+
+ALTER TABLE ONLY "SKEWED_STRING_LIST"
+ ADD CONSTRAINT "SKEWED_STRING_LIST_pkey" PRIMARY KEY ("STRING_LIST_ID");
+
+ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES"
+ ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_pkey" PRIMARY KEY ("STRING_LIST_ID", "INTEGER_IDX");
+
+
+ALTER TABLE ONLY "SKEWED_COL_NAMES"
+ ADD CONSTRAINT "SKEWED_COL_NAMES_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+
+ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
+ ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_pkey" PRIMARY KEY ("SD_ID", "STRING_LIST_ID_KID");
+
+ALTER TABLE ONLY "SKEWED_VALUES"
+ ADD CONSTRAINT "SKEWED_VALUES_pkey" PRIMARY KEY ("SD_ID_OID", "INTEGER_IDX");
+
+--
+-- Name: TAB_COL_STATS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_pkey" PRIMARY KEY("CS_ID");
+
+--
+-- Name: PART_COL_STATS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_pkey" PRIMARY KEY("CS_ID");
+
+--
+-- Name: UNIQUEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "IDXS"
+ ADD CONSTRAINT "UNIQUEINDEX" UNIQUE ("INDEX_NAME", "ORIG_TBL_ID");
+
+
+--
+-- Name: UNIQUEPARTITION; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+ ADD CONSTRAINT "UNIQUEPARTITION" UNIQUE ("PART_NAME", "TBL_ID");
+
+
+--
+-- Name: UNIQUETABLE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBLS"
+ ADD CONSTRAINT "UNIQUETABLE" UNIQUE ("TBL_NAME", "DB_ID");
+
+
+--
+-- Name: UNIQUE_DATABASE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DBS"
+ ADD CONSTRAINT "UNIQUE_DATABASE" UNIQUE ("NAME");
+
+
+--
+-- Name: UNIQUE_TYPE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TYPES"
+ ADD CONSTRAINT "UNIQUE_TYPE" UNIQUE ("TYPE_NAME");
+
+
+--
+-- Name: USERROLEMAPINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLE_MAP"
+ ADD CONSTRAINT "USERROLEMAPINDEX" UNIQUE ("PRINCIPAL_NAME", "ROLE_ID", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: BUCKETING_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "BUCKETING_COLS_N49" ON "BUCKETING_COLS" USING btree ("SD_ID");
+
+
+--
+-- Name: COLUMNS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "COLUMNS_N49" ON "COLUMNS_OLD" USING btree ("SD_ID");
+
+
+--
+-- Name: DATABASE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "DATABASE_PARAMS_N49" ON "DATABASE_PARAMS" USING btree ("DB_ID");
+
+
+--
+-- Name: DB_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "DB_PRIVS_N49" ON "DB_PRIVS" USING btree ("DB_ID");
+
+
+--
+-- Name: IDXS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "IDXS_N49" ON "IDXS" USING btree ("ORIG_TBL_ID");
+
+
+--
+-- Name: IDXS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "IDXS_N50" ON "IDXS" USING btree ("INDEX_TBL_ID");
+
+
+--
+-- Name: IDXS_N51; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "IDXS_N51" ON "IDXS" USING btree ("SD_ID");
+
+
+--
+-- Name: INDEX_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "INDEX_PARAMS_N49" ON "INDEX_PARAMS" USING btree ("INDEX_ID");
+
+
+--
+-- Name: PARTITIONCOLUMNPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONCOLUMNPRIVILEGEINDEX" ON "PART_COL_PRIVS" USING btree ("PART_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: PARTITIONEVENTINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONEVENTINDEX" ON "PARTITION_EVENTS" USING btree ("PARTITION_NAME");
+
+
+--
+-- Name: PARTITIONS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONS_N49" ON "PARTITIONS" USING btree ("TBL_ID");
+
+
+--
+-- Name: PARTITIONS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONS_N50" ON "PARTITIONS" USING btree ("SD_ID");
+
+
+--
+-- Name: PARTITION_KEYS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITION_KEYS_N49" ON "PARTITION_KEYS" USING btree ("TBL_ID");
+
+
+--
+-- Name: PARTITION_KEY_VALS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITION_KEY_VALS_N49" ON "PARTITION_KEY_VALS" USING btree ("PART_ID");
+
+
+--
+-- Name: PARTITION_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITION_PARAMS_N49" ON "PARTITION_PARAMS" USING btree ("PART_ID");
+
+
+--
+-- Name: PARTPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTPRIVILEGEINDEX" ON "PART_PRIVS" USING btree ("PART_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: PART_COL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PART_COL_PRIVS_N49" ON "PART_COL_PRIVS" USING btree ("PART_ID");
+
+
+--
+-- Name: PART_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PART_PRIVS_N49" ON "PART_PRIVS" USING btree ("PART_ID");
+
+
+--
+-- Name: ROLE_MAP_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "ROLE_MAP_N49" ON "ROLE_MAP" USING btree ("ROLE_ID");
+
+
+--
+-- Name: SDS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SDS_N49" ON "SDS" USING btree ("SERDE_ID");
+
+
+--
+-- Name: SD_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SD_PARAMS_N49" ON "SD_PARAMS" USING btree ("SD_ID");
+
+
+--
+-- Name: SERDE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SERDE_PARAMS_N49" ON "SERDE_PARAMS" USING btree ("SERDE_ID");
+
+
+--
+-- Name: SORT_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SORT_COLS_N49" ON "SORT_COLS" USING btree ("SD_ID");
+
+
+--
+-- Name: TABLECOLUMNPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TABLECOLUMNPRIVILEGEINDEX" ON "TBL_COL_PRIVS" USING btree ("TBL_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: TABLEPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TABLEPRIVILEGEINDEX" ON "TBL_PRIVS" USING btree ("TBL_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: TABLE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TABLE_PARAMS_N49" ON "TABLE_PARAMS" USING btree ("TBL_ID");
+
+
+--
+-- Name: TBLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBLS_N49" ON "TBLS" USING btree ("DB_ID");
+
+
+--
+-- Name: TBLS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBLS_N50" ON "TBLS" USING btree ("SD_ID");
+
+
+--
+-- Name: TBL_COL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBL_COL_PRIVS_N49" ON "TBL_COL_PRIVS" USING btree ("TBL_ID");
+
+
+--
+-- Name: TBL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBL_PRIVS_N49" ON "TBL_PRIVS" USING btree ("TBL_ID");
+
+
+--
+-- Name: TYPE_FIELDS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TYPE_FIELDS_N49" ON "TYPE_FIELDS" USING btree ("TYPE_NAME");
+
+--
+-- Name: TAB_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TAB_COL_STATS_N49" ON "TAB_COL_STATS" USING btree ("TBL_ID");
+
+--
+-- Name: PART_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PART_COL_STATS_N49" ON "PART_COL_STATS" USING btree ("PART_ID");
+
+
+ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES"
+ ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_fkey" FOREIGN KEY ("STRING_LIST_ID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
+
+
+ALTER TABLE ONLY "SKEWED_COL_NAMES"
+ ADD CONSTRAINT "SKEWED_COL_NAMES_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
+ ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_fkey1" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
+ ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_fkey2" FOREIGN KEY ("STRING_LIST_ID_KID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
+
+ALTER TABLE ONLY "SKEWED_VALUES"
+ ADD CONSTRAINT "SKEWED_VALUES_fkey1" FOREIGN KEY ("STRING_LIST_ID_EID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
+
+ALTER TABLE ONLY "SKEWED_VALUES"
+ ADD CONSTRAINT "SKEWED_VALUES_fkey2" FOREIGN KEY ("SD_ID_OID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: BUCKETING_COLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "BUCKETING_COLS"
+ ADD CONSTRAINT "BUCKETING_COLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: COLUMNS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "COLUMNS_OLD"
+ ADD CONSTRAINT "COLUMNS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: COLUMNS_V2_CD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "COLUMNS_V2"
+ ADD CONSTRAINT "COLUMNS_V2_CD_ID_fkey" FOREIGN KEY ("CD_ID") REFERENCES "CDS"("CD_ID") DEFERRABLE;
+
+
+--
+-- Name: DATABASE_PARAMS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "DATABASE_PARAMS"
+ ADD CONSTRAINT "DATABASE_PARAMS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
+
+
+--
+-- Name: DB_PRIVS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "DB_PRIVS"
+ ADD CONSTRAINT "DB_PRIVS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
+
+
+--
+-- Name: IDXS_INDEX_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "IDXS"
+ ADD CONSTRAINT "IDXS_INDEX_TBL_ID_fkey" FOREIGN KEY ("INDEX_TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: IDXS_ORIG_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "IDXS"
+ ADD CONSTRAINT "IDXS_ORIG_TBL_ID_fkey" FOREIGN KEY ("ORIG_TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: IDXS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "IDXS"
+ ADD CONSTRAINT "IDXS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: INDEX_PARAMS_INDEX_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "INDEX_PARAMS"
+ ADD CONSTRAINT "INDEX_PARAMS_INDEX_ID_fkey" FOREIGN KEY ("INDEX_ID") REFERENCES "IDXS"("INDEX_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITIONS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+ ADD CONSTRAINT "PARTITIONS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITIONS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+ ADD CONSTRAINT "PARTITIONS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITION_KEYS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITION_KEYS"
+ ADD CONSTRAINT "PARTITION_KEYS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITION_KEY_VALS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITION_KEY_VALS"
+ ADD CONSTRAINT "PARTITION_KEY_VALS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITION_PARAMS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITION_PARAMS"
+ ADD CONSTRAINT "PARTITION_PARAMS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: PART_COL_PRIVS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PART_COL_PRIVS"
+ ADD CONSTRAINT "PART_COL_PRIVS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: PART_PRIVS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PART_PRIVS"
+ ADD CONSTRAINT "PART_PRIVS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: ROLE_MAP_ROLE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "ROLE_MAP"
+ ADD CONSTRAINT "ROLE_MAP_ROLE_ID_fkey" FOREIGN KEY ("ROLE_ID") REFERENCES "ROLES"("ROLE_ID") DEFERRABLE;
+
+
+--
+-- Name: SDS_CD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SDS"
+ ADD CONSTRAINT "SDS_CD_ID_fkey" FOREIGN KEY ("CD_ID") REFERENCES "CDS"("CD_ID") DEFERRABLE;
+
+
+--
+-- Name: SDS_SERDE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SDS"
+ ADD CONSTRAINT "SDS_SERDE_ID_fkey" FOREIGN KEY ("SERDE_ID") REFERENCES "SERDES"("SERDE_ID") DEFERRABLE;
+
+
+--
+-- Name: SD_PARAMS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SD_PARAMS"
+ ADD CONSTRAINT "SD_PARAMS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: SERDE_PARAMS_SERDE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SERDE_PARAMS"
+ ADD CONSTRAINT "SERDE_PARAMS_SERDE_ID_fkey" FOREIGN KEY ("SERDE_ID") REFERENCES "SERDES"("SERDE_ID") DEFERRABLE;
+
+
+--
+-- Name: SORT_COLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SORT_COLS"
+ ADD CONSTRAINT "SORT_COLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: TABLE_PARAMS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TABLE_PARAMS"
+ ADD CONSTRAINT "TABLE_PARAMS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: TBLS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBLS"
+ ADD CONSTRAINT "TBLS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
+
+
+--
+-- Name: TBLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBLS"
+ ADD CONSTRAINT "TBLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: TBL_COL_PRIVS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBL_COL_PRIVS"
+ ADD CONSTRAINT "TBL_COL_PRIVS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: TBL_PRIVS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBL_PRIVS"
+ ADD CONSTRAINT "TBL_PRIVS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: TYPE_FIELDS_TYPE_NAME_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TYPE_FIELDS"
+ ADD CONSTRAINT "TYPE_FIELDS_TYPE_NAME_fkey" FOREIGN KEY ("TYPE_NAME") REFERENCES "TYPES"("TYPES_ID") DEFERRABLE;
+
+--
+-- Name: TAB_COL_STATS_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_fkey" FOREIGN KEY("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: PART_COL_STATS_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_fkey" FOREIGN KEY("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+ALTER TABLE ONLY "VERSION" ADD CONSTRAINT "VERSION_pkey" PRIMARY KEY ("VER_ID");
+
+--
+-- Name: public; Type: ACL; Schema: -; Owner: hiveuser
+--
+
+REVOKE ALL ON SCHEMA public FROM PUBLIC;
+GRANT ALL ON SCHEMA public TO PUBLIC;
+
+
+INSERT INTO "VERSION" ("VER_ID", "SCHEMA_VERSION", "VERSION_COMMENT") VALUES (1, '0.12.0', 'Hive release version 0.12.0');
+--
+-- PostgreSQL database dump complete
+--
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/metainfo.xml
new file mode 100644
index 0000000000..af45930c26
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/metainfo.xml
@@ -0,0 +1,210 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<metainfo>
+ <schemaVersion>2.0</schemaVersion>
+ <services>
+ <service>
+ <name>HIVE</name>
+ <displayName>Hive</displayName>
+ <comment>Data warehouse system for ad-hoc queries &amp; analysis of large datasets and table &amp; storage management service</comment>
+ <version>0.12.0.2.0</version>
+ <components>
+
+ <component>
+ <name>HIVE_METASTORE</name>
+ <displayName>Hive Metastore</displayName>
+ <category>MASTER</category>
+ <cardinality>1</cardinality>
+ <auto-deploy>
+ <enabled>true</enabled>
+ <co-locate>HIVE/HIVE_SERVER</co-locate>
+ </auto-deploy>
+ <commandScript>
+ <script>scripts/hive_metastore.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>1200</timeout>
+ </commandScript>
+ </component>
+
+ <component>
+ <name>HIVE_SERVER</name>
+ <displayName>HiveServer2</displayName>
+ <category>MASTER</category>
+ <cardinality>1</cardinality>
+ <dependencies>
+ <dependency>
+ <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
+ <scope>cluster</scope>
+ <auto-deploy>
+ <enabled>true</enabled>
+ <co-locate>HIVE/HIVE_SERVER</co-locate>
+ </auto-deploy>
+ </dependency>
+ <dependency>
+ <name>TEZ/TEZ_CLIENT</name>
+ <scope>host</scope>
+ <auto-deploy>
+ <enabled>true</enabled>
+ </auto-deploy>
+ </dependency>
+ <dependency>
+ <name>YARN/YARN_CLIENT</name>
+ <scope>host</scope>
+ <auto-deploy>
+ <enabled>true</enabled>
+ </auto-deploy>
+ </dependency>
+ <dependency>
+ <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
+ <scope>host</scope>
+ <auto-deploy>
+ <enabled>true</enabled>
+ </auto-deploy>
+ </dependency>
+ </dependencies>
+ <commandScript>
+ <script>scripts/hive_server.py</script>
+ <scriptType>PYTHON</scriptType>
+ </commandScript>
+ </component>
+ <component>
+ <name>WEBHCAT_SERVER</name>
+ <displayName>WebHCat Server</displayName>
+ <category>MASTER</category>
+ <cardinality>1</cardinality>
+ <clientsToUpdateConfigs>
+ <client>HCAT</client>
+ </clientsToUpdateConfigs>
+ <dependencies>
+ <dependency>
+ <name>HDFS/HDFS_CLIENT</name>
+ <scope>host</scope>
+ <auto-deploy>
+ <enabled>true</enabled>
+ </auto-deploy>
+ </dependency>
+ <dependency>
+ <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
+ <scope>host</scope>
+ <auto-deploy>
+ <enabled>true</enabled>
+ </auto-deploy>
+ </dependency>
+ <dependency>
+ <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
+ <scope>cluster</scope>
+ <auto-deploy>
+ <enabled>true</enabled>
+ <co-locate>HIVE/WEBHCAT_SERVER</co-locate>
+ </auto-deploy>
+ </dependency>
+ <dependency>
+ <name>ZOOKEEPER/ZOOKEEPER_CLIENT</name>
+ <scope>host</scope>
+ <auto-deploy>
+ <enabled>true</enabled>
+ </auto-deploy>
+ </dependency>
+ <dependency>
+ <name>YARN/YARN_CLIENT</name>
+ <scope>host</scope>
+ <auto-deploy>
+ <enabled>true</enabled>
+ </auto-deploy>
+ </dependency>
+ </dependencies>
+ <commandScript>
+ <script>scripts/webhcat_server.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>1200</timeout>
+ </commandScript>
+ </component>
+ <component>
+ <name>MYSQL_SERVER</name>
+ <displayName>MySQL Server</displayName>
+ <category>MASTER</category>
+ <cardinality>1</cardinality>
+ <commandScript>
+ <script>scripts/mysql_server.py</script>
+ <scriptType>PYTHON</scriptType>
+ </commandScript>
+ </component>
+
+ <component>
+ <name>HIVE_CLIENT</name>
+ <displayName>Hive Client</displayName>
+ <category>CLIENT</category>
+ <cardinality>0+</cardinality>
+ <commandScript>
+ <script>scripts/hive_client.py</script>
+ <scriptType>PYTHON</scriptType>
+ </commandScript>
+ <configFiles>
+ <configFile>
+ <type>xml</type>
+ <fileName>hive-site.xml</fileName>
+ <dictionaryName>hive-site</dictionaryName>
+ </configFile>
+ <configFile>
+ <type>env</type>
+ <fileName>hive-env.cmd</fileName>
+ <dictionaryName>hive-env</dictionaryName>
+ </configFile>
+ </configFiles>
+ </component>
+ <component>
+ <name>HCAT</name>
+ <displayName>HCat Client</displayName>
+ <category>CLIENT</category>
+ <cardinality>0+</cardinality>
+ <commandScript>
+ <script>scripts/hcat_client.py</script>
+ <scriptType>PYTHON</scriptType>
+ </commandScript>
+ <configFiles>
+ <configFile>
+ <type>env</type>
+ <fileName>hcat-env.cmd</fileName>
+ <dictionaryName>hcat-env</dictionaryName>
+ </configFile>
+ </configFiles>
+ </component>
+
+ </components>
+
+ <commandScript>
+ <script>scripts/service_check.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>1200</timeout>
+ </commandScript>
+
+ <requiredServices>
+ <service>ZOOKEEPER</service>
+ <service>YARN</service>
+ <service>TEZ</service>
+ </requiredServices>
+
+ <configuration-dependencies>
+ <config-type>hive-site</config-type>
+ <config-type>hive-env</config-type>
+ <config-type>webhcat-site</config-type>
+ </configuration-dependencies>
+ </service>
+
+ </services>
+</metainfo>
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/files/templetonSmoke.sh b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/files/templetonSmoke.sh
new file mode 100644
index 0000000000..2d07b8b813
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/files/templetonSmoke.sh
@@ -0,0 +1,96 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+export ttonhost=$1
+export smoke_test_user=$2
+export smoke_user_keytab=$3
+export security_enabled=$4
+export kinit_path_local=$5
+export ttonurl="http://${ttonhost}:50111/templeton/v1"
+
+if [[ $security_enabled == "true" ]]; then
+ kinitcmd="${kinit_path_local} -kt ${smoke_user_keytab} ${smoke_test_user}; "
+else
+ kinitcmd=""
+fi
+
+export no_proxy=$ttonhost
+cmd="${kinitcmd}curl --negotiate -u : -s -w 'http_code <%{http_code}>' $ttonurl/status 2>&1"
+retVal=`su - ${smoke_test_user} -c "$cmd"`
+httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
+
+if [[ "$httpExitCode" -ne "200" ]] ; then
+ echo "Templeton Smoke Test (status cmd): Failed. : $retVal"
+ export TEMPLETON_EXIT_CODE=1
+ exit 1
+fi
+
+exit 0
+
+#try hcat ddl command
+echo "user.name=${smoke_test_user}&exec=show databases;" /tmp/show_db.post.txt
+cmd="${kinitcmd}curl --negotiate -u : -s -w 'http_code <%{http_code}>' -d \@${destdir}/show_db.post.txt $ttonurl/ddl 2>&1"
+retVal=`su - ${smoke_test_user} -c "$cmd"`
+httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
+
+if [[ "$httpExitCode" -ne "200" ]] ; then
+ echo "Templeton Smoke Test (ddl cmd): Failed. : $retVal"
+ export TEMPLETON_EXIT_CODE=1
+ exit 1
+fi
+
+# NOT SURE?? SUHAS
+if [[ $security_enabled == "true" ]]; then
+ echo "Templeton Pig Smoke Tests not run in secure mode"
+ exit 0
+fi
+
+#try pig query
+outname=${smoke_test_user}.`date +"%M%d%y"`.$$;
+ttonTestOutput="/tmp/idtest.${outname}.out";
+ttonTestInput="/tmp/idtest.${outname}.in";
+ttonTestScript="idtest.${outname}.pig"
+
+echo "A = load '$ttonTestInput' using PigStorage(':');" > /tmp/$ttonTestScript
+echo "B = foreach A generate \$0 as id; " >> /tmp/$ttonTestScript
+echo "store B into '$ttonTestOutput';" >> /tmp/$ttonTestScript
+
+#copy pig script to hdfs
+su - ${smoke_test_user} -c "hadoop dfs -copyFromLocal /tmp/$ttonTestScript /tmp/$ttonTestScript"
+
+#copy input file to hdfs
+su - ${smoke_test_user} -c "hadoop dfs -copyFromLocal /etc/passwd $ttonTestInput"
+
+#create, copy post args file
+echo -n "user.name=${smoke_test_user}&file=/tmp/$ttonTestScript" > /tmp/pig_post.txt
+
+#submit pig query
+cmd="curl -s -w 'http_code <%{http_code}>' -d \@${destdir}/pig_post.txt $ttonurl/pig 2>&1"
+retVal=`su - ${smoke_test_user} -c "$cmd"`
+httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
+if [[ "$httpExitCode" -ne "200" ]] ; then
+ echo "Templeton Smoke Test (pig cmd): Failed. : $retVal"
+ export TEMPLETON_EXIT_CODE=1
+ exit 1
+fi
+
+exit 0
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/hcat_client.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/hcat_client.py
new file mode 100644
index 0000000000..33c1fdf7ef
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/hcat_client.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+class HCatClient(Script):
+ def install(self, env):
+ import params
+ if params.hcat_home is None:
+ self.install_packages(env)
+ self.configure(env)
+
+ def configure(self, env):
+ import params
+ env.set_params(params)
+
+ def status(self, env):
+ raise ClientComponentHasNoStatus()
+
+
+if __name__ == "__main__":
+ HCatClient().execute()
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/hcat_service_check.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/hcat_service_check.py
new file mode 100644
index 0000000000..69535d5770
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/hcat_service_check.py
@@ -0,0 +1,25 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from resource_management.libraries.functions import get_unique_id_and_date
+
+def hcat_service_check():
+ import params
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/hive.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/hive.py
new file mode 100644
index 0000000000..f5548e026a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/hive.py
@@ -0,0 +1,33 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+from resource_management.libraries import functions
+
+def hive(name=None):
+ import params
+ XmlConfig("hive-site.xml",
+ conf_dir = params.hive_conf_dir,
+ configurations = params.config['configurations']['hive-site'],
+ owner=params.hive_user
+ )
+ if name in ["hiveserver2","metastore"]:
+ Execute(format("cmd /c hadoop fs -mkdir -p {hive_warehouse_dir}"), logoutput=True, user=params.hadoop_user) \ No newline at end of file
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/hive_client.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/hive_client.py
new file mode 100644
index 0000000000..5f1f507b38
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/hive_client.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import sys
+from resource_management import *
+from hive import hive
+import service_mapping
+
+class HiveClient(Script):
+ def install(self, env):
+ if not check_windows_service_exists(service_mapping.hive_client_win_service_name):
+ self.install_packages(env)
+ self.configure(env)
+
+ def configure(self, env):
+ import params
+ env.set_params(params)
+
+ def status(self, env):
+ import params
+ check_windows_service_status(service_mapping.hive_client_win_service_name)
+
+if __name__ == "__main__":
+ HiveClient().execute()
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/hive_metastore.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/hive_metastore.py
new file mode 100644
index 0000000000..6772e8b865
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/hive_metastore.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from hive import hive
+import service_mapping
+
+class HiveMetastore(Script):
+
+ def install(self, env):
+ if not check_windows_service_exists(service_mapping.hive_metastore_win_service_name):
+ self.install_packages(env)
+ self.configure(env)
+
+ def configure(self, env):
+ import params
+ env.set_params(params)
+ hive(name='metastore')
+
+ def start(self, env):
+ import params
+ env.set_params(params)
+ self.configure(env) # FOR SECURITY
+ Service(service_mapping.hive_metastore_win_service_name, action="start")
+
+ def stop(self, env):
+ import params
+ Service(service_mapping.hive_metastore_win_service_name, action="stop")
+
+ def status(self, env):
+ import params
+ check_windows_service_status(service_mapping.hive_metastore_win_service_name)
+
+if __name__ == "__main__":
+ HiveMetastore().execute()
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/hive_server.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/hive_server.py
new file mode 100644
index 0000000000..f43084b162
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/hive_server.py
@@ -0,0 +1,52 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from hive import hive
+import service_mapping
+
+class HiveServer(Script):
+
+ def install(self, env):
+ if not check_windows_service_exists(service_mapping.hive_server_win_service_name):
+ self.install_packages(env)
+ self.configure(env)
+
+ def configure(self, env):
+ import params
+ env.set_params(params)
+ hive(name='hiveserver2')
+
+ def start(self, env):
+ import params
+ env.set_params(params)
+ self.configure(env) # FOR SECURITY
+ Service(service_mapping.hive_server_win_service_name, action="start")
+
+ def stop(self, env):
+ import params
+ Service(service_mapping.hive_server_win_service_name, action="stop")
+
+ def status(self, env):
+ import params
+ check_windows_service_status(service_mapping.hive_server_win_service_name)
+
+if __name__ == "__main__":
+ HiveServer().execute()
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/mysql_server.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/mysql_server.py
new file mode 100644
index 0000000000..7fd06ded74
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/mysql_server.py
@@ -0,0 +1,46 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+class MysqlServer(Script):
+
+ def install(self, env):
+ self.install_packages(env)
+ self.configure(env)
+
+ def configure(self, env):
+ import params
+ env.set_params(params)
+
+ def start(self, env):
+ import params
+ env.set_params(params)
+
+ def stop(self, env):
+ import params
+ env.set_params(params)
+
+ def status(self, env):
+ import status_params
+
+if __name__ == "__main__":
+ MysqlServer().execute()
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/params.py
new file mode 100644
index 0000000000..041669faf8
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/params.py
@@ -0,0 +1,39 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+# server configurations
+config = Script.get_config()
+
+hdp_root = os.path.abspath(os.path.join(os.environ["HADOOP_HOME"],".."))
+hive_conf_dir = os.environ["HIVE_CONF_DIR"]
+hive_home = os.environ["HIVE_HOME"]
+hive_lib_dir = os.environ["HIVE_LIB_DIR"]
+hive_log_dir = os.environ["HIVE_LOG_DIR"]
+hive_opts = os.environ["HIVE_OPTS"]
+hcat_home = os.environ["HCAT_HOME"]
+hcat_config_dir = os.environ["WEBHCAT_CONF_DIR"]
+
+hive_env_sh_template = config['configurations']['hive-env']['content']
+hive_warehouse_dir = config['configurations']['hive-site']['hive.metastore.warehouse.dir']
+hive_user = "hadoop"
+hadoop_user = "hadoop"
+hcat_user = "hadoop"
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/service_check.py
new file mode 100644
index 0000000000..619be7ac6a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/service_check.py
@@ -0,0 +1,39 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import socket
+import sys
+
+from hcat_service_check import hcat_service_check
+from webhcat_service_check import webhcat_service_check
+
+class HiveServiceCheck(Script):
+ def service_check(self, env):
+ import params
+ env.set_params(params)
+ smoke_cmd = os.path.join(params.hdp_root,"Run-SmokeTests.cmd")
+ service = "HIVE"
+ Execute(format("cmd /C {smoke_cmd} {service}"), user=params.hive_user, logoutput=True)
+
+ webhcat_service_check()
+
+if __name__ == "__main__":
+ HiveServiceCheck().execute()
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/service_mapping.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/service_mapping.py
new file mode 100644
index 0000000000..84dd8973a8
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/service_mapping.py
@@ -0,0 +1,23 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+hive_metastore_win_service_name = "metastore"
+hive_client_win_service_name = "hwi"
+hive_server_win_service_name = "hiveserver2"
+webhcat_server_win_service_name = "templeton" \ No newline at end of file
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/webhcat.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/webhcat.py
new file mode 100644
index 0000000000..032ca79e82
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/webhcat.py
@@ -0,0 +1,30 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+from resource_management import *
+import sys
+
+
+def webhcat():
+ import params
+ XmlConfig("webhcat-site.xml",
+ conf_dir=params.config_dir,
+ configurations=params.config['configurations']['webhcat-site']
+ )
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/webhcat_server.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/webhcat_server.py
new file mode 100644
index 0000000000..527efb8d18
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/webhcat_server.py
@@ -0,0 +1,48 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+import sys
+from resource_management import *
+from webhcat import webhcat
+import service_mapping
+
+class WebHCatServer(Script):
+ def install(self, env):
+ if not check_windows_service_exists(service_mapping.webhcat_server_win_service_name):
+ self.install_packages(env)
+
+ def configure(self, env):
+ import params
+ env.set_params(params)
+ webhcat()
+
+ def start(self, env):
+ import params
+ self.configure(env) # FOR SECURITY
+ Service(service_mapping.webhcat_server_win_service_name, action="start")
+
+ def stop(self, env):
+ Service(service_mapping.webhcat_server_win_service_name, action="stop")
+
+ def status(self, env):
+ check_windows_service_status(service_mapping.webhcat_server_win_service_name)
+
+if __name__ == "__main__":
+ WebHCatServer().execute()
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/webhcat_service_check.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/webhcat_service_check.py
new file mode 100644
index 0000000000..1fa08a313f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/webhcat_service_check.py
@@ -0,0 +1,27 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+from resource_management import *
+
+def webhcat_service_check():
+ import params
+ smoke_cmd = os.path.join(params.hdp_root,"Run-SmokeTests.cmd")
+ service = "WEBHCAT"
+ Execute(format("cmd /C {smoke_cmd} {service}"), user=params.hcat_user, logoutput=True) \ No newline at end of file
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/templates/webhcat-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/templates/webhcat-env.sh.j2
new file mode 100644
index 0000000000..220f42029e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/templates/webhcat-env.sh.j2
@@ -0,0 +1,62 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+
+# The file containing the running pid
+PID_FILE={{pid_file}}
+
+TEMPLETON_LOG_DIR={{templeton_log_dir}}/
+
+
+WEBHCAT_LOG_DIR={{templeton_log_dir}}/
+
+# The console error log
+ERROR_LOG={{templeton_log_dir}}/webhcat-console-error.log
+
+# The console log
+CONSOLE_LOG={{templeton_log_dir}}/webhcat-console.log
+
+#TEMPLETON_JAR=templeton_jar_name
+
+#HADOOP_PREFIX=hadoop_prefix
+
+#HCAT_PREFIX=hive_prefix
+
+# Set HADOOP_HOME to point to a specific hadoop install directory
+export HADOOP_HOME=/usr/lib/hadoop
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-env.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-env.xml
new file mode 100644
index 0000000000..423db7315e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-env.xml
@@ -0,0 +1,140 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+ <property>
+ <name>oozie_user</name>
+ <value>oozie</value>
+ <description>Oozie User.</description>
+ </property>
+ <property>
+ <name>oozie_hostname</name>
+ <value></value>
+ <description>
+ Specify the host on which the OOZIE database is hosted.
+ </description>
+ </property>
+ <property>
+ <name>oozie_database</name>
+ <value>Existing MSSQL Server database with sql auth</value>
+ <description>Oozie Server Database.</description>
+ </property>
+ <property>
+ <name>oozie_derby_database</name>
+ <value>Derby</value>
+ <description>Oozie Derby Database</description>
+ </property>
+ <property>
+ <name>oozie_data_dir</name>
+ <value>/hadoop/oozie/data</value>
+ <description>Data directory in which the Oozie DB exists</description>
+ </property>
+ <property>
+ <name>oozie_log_dir</name>
+ <value>/var/log/oozie</value>
+ <description>Directory for oozie logs</description>
+ </property>
+ <property>
+ <name>oozie_pid_dir</name>
+ <value>/var/run/oozie</value>
+ <description>Directory in which the pid files for oozie reside.</description>
+ </property>
+ <property>
+ <name>oozie_admin_port</name>
+ <value>11001</value>
+ <description>The admin port Oozie server runs.</description>
+ </property>
+
+ <!-- oozie-env.sh -->
+ <property>
+ <name>content</name>
+ <description>oozie-env.cmd content</description>
+ <value>
+@rem Licensed to the Apache Software Foundation (ASF) under one
+@rem or more contributor license agreements. See the NOTICE file
+@rem distributed with this work for additional information
+@rem regarding copyright ownership. The ASF licenses this file
+@rem to you under the Apache License, Version 2.0 (the
+@rem "License"); you may not use this file except in compliance
+@rem with the License. You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+@rem
+
+@rem Set Oozie specific environment variables here.
+
+@rem Settings for the Embedded Tomcat that runs Oozie
+@rem Java System properties for Oozie should be specified in this variable
+@rem
+set CATALINA_OPTS=%CATALINA_OPTS% -Xmx1024m
+
+@rem Oozie configuration file to load from Oozie configuration directory
+@rem
+@rem set OOZIE_CONFIG_FILE=oozie-site.xml
+
+@rem Oozie logs directory
+@rem
+@rem set OOZIE_LOG=%OOZIE_HOME%\logs
+
+@rem Oozie Log4J configuration file to load from Oozie configuration directory
+@rem
+@rem set OOZIE_LOG4J_FILE=oozie-log4j.properties
+
+@rem Reload interval of the Log4J configuration file, in seconds
+@rem
+@rem set OOZIE_LOG4J_RELOAD=10
+
+@rem The port Oozie server runs
+@rem
+@rem set OOZIE_HTTP_PORT=11000
+
+@rem The port Oozie server runs if using SSL (HTTPS)
+@rem
+@rem set OOZIE_HTTPS_PORT=11443
+
+@rem The host name Oozie server runs on
+@rem
+@rem set OOZIE_HTTP_HOSTNAME=%COMPUTERNAME%
+
+@rem The base URL for callback URLs to Oozie
+@rem
+@rem set OOZIE_BASE_URL="http://%OOZIE_HTTP_HOSTNAME%:%OOZIE_HTTP_PORT%/oozie"
+
+@rem The location of the keystore for the Oozie server if using SSL (HTTPS)
+@rem
+@rem set OOZIE_HTTPS_KEYSTORE_FILE=%HOME%/.keystore
+
+@rem The password of the keystore for the Oozie server if using SSL (HTTPS)
+@rem
+@rem set OOZIE_HTTPS_KEYSTORE_PASS=password
+
+set JAVA_LIBRARY_PATH=%HADOOP_COMMON_HOME%\bin
+ </value>
+ </property>
+
+</configuration>
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-log4j.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-log4j.xml
new file mode 100644
index 0000000000..cb77566e97
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-log4j.xml
@@ -0,0 +1,96 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+
+ <property>
+ <name>content</name>
+ <value>
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License. See accompanying LICENSE file.
+#
+
+# If the Java System property 'oozie.log.dir' is not defined at Oozie start up time
+# XLogService sets its value to '${oozie.home}/logs'
+
+log4j.appender.oozie=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.oozie.DatePattern='.'yyyy-MM-dd-HH
+log4j.appender.oozie.File=${oozie.log.dir}/oozie.log
+log4j.appender.oozie.Append=true
+log4j.appender.oozie.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozie.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.appender.oozieops=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.oozieops.DatePattern='.'yyyy-MM-dd
+log4j.appender.oozieops.File=${oozie.log.dir}/oozie-ops.log
+log4j.appender.oozieops.Append=true
+log4j.appender.oozieops.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozieops.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.appender.oozieinstrumentation=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.oozieinstrumentation.DatePattern='.'yyyy-MM-dd
+log4j.appender.oozieinstrumentation.File=${oozie.log.dir}/oozie-instrumentation.log
+log4j.appender.oozieinstrumentation.Append=true
+log4j.appender.oozieinstrumentation.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozieinstrumentation.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.appender.oozieaudit=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.oozieaudit.DatePattern='.'yyyy-MM-dd
+log4j.appender.oozieaudit.File=${oozie.log.dir}/oozie-audit.log
+log4j.appender.oozieaudit.Append=true
+log4j.appender.oozieaudit.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozieaudit.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.appender.openjpa=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.openjpa.DatePattern='.'yyyy-MM-dd
+log4j.appender.openjpa.File=${oozie.log.dir}/oozie-jpa.log
+log4j.appender.openjpa.Append=true
+log4j.appender.openjpa.layout=org.apache.log4j.PatternLayout
+log4j.appender.openjpa.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.logger.openjpa=INFO, openjpa
+log4j.logger.oozieops=INFO, oozieops
+log4j.logger.oozieinstrumentation=ALL, oozieinstrumentation
+log4j.logger.oozieaudit=ALL, oozieaudit
+log4j.logger.org.apache.oozie=INFO, oozie
+log4j.logger.org.apache.hadoop=WARN, oozie
+log4j.logger.org.mortbay=WARN, oozie
+log4j.logger.org.hsqldb=WARN, oozie
+log4j.logger.org.apache.hadoop.security.authentication.server=INFO, oozie
+ </value>
+ </property>
+
+</configuration>
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-site.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-site.xml
new file mode 100644
index 0000000000..2051d011e1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-site.xml
@@ -0,0 +1,502 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements. See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership. The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<configuration supports_final="true">
+
+ <!--
+ Refer to the oozie-default.xml file for the complete list of
+ Oozie configuration properties and their default values.
+ -->
+
+ <property>
+ <name>oozie.service.ActionService.executor.ext.classes</name>
+ <value>
+ org.apache.oozie.action.email.EmailActionExecutor,
+ org.apache.oozie.action.hadoop.HiveActionExecutor,
+ org.apache.oozie.action.hadoop.ShellActionExecutor,
+ org.apache.oozie.action.hadoop.SqoopActionExecutor,
+ org.apache.oozie.action.hadoop.DistcpActionExecutor
+ </value>
+ </property>
+
+ <property>
+ <name>oozie.service.SchemaService.wf.ext.schemas</name>
+ <value>
+ shell-action-0.1.xsd,shell-action-0.2.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,
+ hive-action-0.3.xsd,hive-action-0.4.xsd,hive-action-0.5.xsd,sqoop-action-0.2.xsd,
+ sqoop-action-0.3.xsd,sqoop-action-0.4.xsd,ssh-action-0.1.xsd,ssh-action-0.2.xsd,distcp-action-0.1.xsd,
+ oozie-sla-0.1.xsd,oozie-sla-0.2.xsd
+ </value>
+ </property>
+
+ <property>
+ <name>oozie.system.id</name>
+ <value>oozie-${user.name}</value>
+ <description>
+ The Oozie system ID.
+ </description>
+ </property>
+
+ <property>
+ <name>oozie.systemmode</name>
+ <value>NORMAL</value>
+ <description>
+ System mode for Oozie at startup.
+ </description>
+ </property>
+
+ <property>
+ <name>oozie.service.AuthorizationService.security.enabled</name>
+ <value>true</value>
+ <description>
+ Specifies whether security (user name/admin role) is enabled or not.
+ If disabled any user can manage Oozie system and manage any job.
+ </description>
+ </property>
+
+ <property>
+ <name>oozie.service.PurgeService.older.than</name>
+ <value>30</value>
+ <description>
+ Jobs older than this value, in days, will be purged by the PurgeService.
+ </description>
+ </property>
+
+ <property>
+ <name>oozie.service.PurgeService.purge.interval</name>
+ <value>3600</value>
+ <description>
+ Interval at which the purge service will run, in seconds.
+ </description>
+ </property>
+
+ <property>
+ <name>oozie.service.CallableQueueService.queue.size</name>
+ <value>10000</value>
+ <description>Max callable queue size</description>
+ </property>
+
+ <property>
+ <name>oozie.service.CallableQueueService.threads</name>
+ <value>10</value>
+ <description>Number of threads used for executing callables</description>
+ </property>
+
+ <property>
+ <name>oozie.service.CallableQueueService.callable.concurrency</name>
+ <value>3</value>
+ <description>
+ Maximum concurrency for a given callable type.
+ Each command is a callable type (submit, start, run, signal, job, jobs, suspend,resume, etc).
+ Each action type is a callable type (Map-Reduce, Pig, SSH, FS, sub-workflow, etc).
+ All commands that use action executors (action-start, action-end, action-kill and action-check) use
+ the action type as the callable type.
+ </description>
+ </property>
+
+ <property>
+ <name>oozie.service.coord.normal.default.timeout
+ </name>
+ <value>120</value>
+ <description>Default timeout for a coordinator action input check (in minutes) for normal job.
+ -1 means infinite timeout</description>
+ </property>
+
+ <property>
+ <name>oozie.db.schema.name</name>
+ <value>oozie</value>
+ <description>
+ Oozie DataBase Name
+ </description>
+ </property>
+
+ <property>
+ <name>oozie.service.JPAService.create.db.schema</name>
+ <value>true</value>
+ <description>
+ Creates Oozie DB.
+
+ If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.
+ If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.
+ </description>
+ </property>
+
+ <property>
+ <name>oozie.service.JPAService.jdbc.driver</name>
+ <value>com.microsoft.sqlserver.jdbc.SQLServerDriver</value>
+ <description>
+ JDBC driver class.
+ </description>
+ </property>
+
+ <property>
+ <name>oozie.service.JPAService.jdbc.url</name>
+ <value></value>
+ <description>
+ JDBC URL.
+ </description>
+ </property>
+
+ <property>
+ <name>oozie.service.JPAService.jdbc.username</name>
+ <value>oozie</value>
+ <description>
+ DB user name.
+ </description>
+ </property>
+
+ <property>
+ <name>oozie.service.JPAService.jdbc.password</name>
+ <value>oozie</value>
+ <description>
+ DB user password.
+
+ IMPORTANT: if password is emtpy leave a 1 space string, the service trims the value,
+ if empty Configuration assumes it is NULL.
+ </description>
+ </property>
+
+ <property>
+ <name>oozie.service.JPAService.pool.max.active.conn</name>
+ <value>10</value>
+ <description>
+ Max number of connections.
+ </description>
+ </property>
+
+ <property>
+ <name>oozie.service.HadoopAccessorService.kerberos.enabled</name>
+ <value>false</value>
+ <description>
+ Indicates if Oozie is configured to use Kerberos.
+ </description>
+ </property>
+
+ <property>
+ <name>local.realm</name>
+ <value>LOCALHOST</value>
+ <description>
+ Kerberos Realm used by Oozie and Hadoop. Using 'local.realm' to be aligned with Hadoop configuration
+ </description>
+ </property>
+
+ <property>
+ <name>oozie.service.HadoopAccessorService.keytab.file</name>
+ <value>${user.home}/oozie.keytab</value>
+ <description>
+ Location of the Oozie user keytab file.
+ </description>
+ </property>
+
+ <property>
+ <name>oozie.service.HadoopAccessorService.kerberos.principal</name>
+ <value>${user.name}/localhost@${local.realm}</value>
+ <description>
+ Kerberos principal for Oozie service.
+ </description>
+ </property>
+
+ <property>
+ <name>oozie.service.HadoopAccessorService.jobTracker.whitelist</name>
+ <value> </value>
+ <description>
+ Whitelisted job tracker for Oozie service.
+ </description>
+ </property>
+
+ <property>
+ <name>oozie.service.HadoopAccessorService.nameNode.whitelist</name>
+ <value> </value>
+ <description>
+ Whitelisted job tracker for Oozie service.
+ </description>
+ </property>
+
+ <property>
+ <name>oozie.service.HadoopAccessorService.hadoop.configurations</name>
+ <value>*=c:\hdp\hadoop\etc\hadoop</value>
+ <description>
+ Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of
+ the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is
+ used when there is no exact match for an authority. The HADOOP_CONF_DIR contains
+ the relevant Hadoop *-site.xml files. If the path is relative is looked within
+ the Oozie configuration directory; though the path can be absolute (i.e. to point
+ to Hadoop client conf/ directories in the local filesystem.
+ </description>
+ </property>
+
+ <property>
+ <name>oozie.service.WorkflowAppService.system.libpath</name>
+ <value>/user/${user.name}/share/lib</value>
+ <description>
+ System library path to use for workflow applications.
+ This path is added to workflow application if their job properties sets
+ the property 'oozie.use.system.libpath' to true.
+ </description>
+ </property>
+
+ <property>
+ <name>use.system.libpath.for.mapreduce.and.pig.jobs</name>
+ <value>false</value>
+ <description>
+ If set to true, submissions of MapReduce and Pig jobs will include
+ automatically the system library path, thus not requiring users to
+ specify where the Pig JAR files are. Instead, the ones from the system
+ library path are used.
+ </description>
+ </property>
+
+ <property>
+ <name>oozie.authentication.type</name>
+ <value>simple</value>
+ <description>
+ Defines authentication used for Oozie HTTP endpoint.
+ Supported values are: simple | basic | kerberos | #AUTHENTICATION_HANDLER_CLASSNAME#
+ </description>
+ </property>
+
+ <property>
+ <name>oozie.authentication.token.validity</name>
+ <value>36000</value>
+ <description>
+ Indicates how long (in seconds) an authentication token is valid before it has
+ to be renewed.
+ </description>
+ </property>
+
+ <property>
+ <name>oozie.authentication.signature.secret</name>
+ <value>oozie</value>
+ <description>
+ The signature secret for signing the authentication tokens.
+ If not set a random secret is generated at startup time.
+ In order to authentiation to work correctly across multiple hosts
+ the secret must be the same across al the hosts.
+ </description>
+ </property>
+
+ <!--<property>
+ <name>oozie.authentication.cookie.domain</name>
+ <value></value>
+ <description>
+ The domain to use for the HTTP cookie that stores the authentication token.
+ In order to authentiation to work correctly across multiple hosts
+ the domain must be correctly set.
+ </description>
+ </property>-->
+
+ <property>
+ <name>oozie.authentication.simple.anonymous.allowed</name>
+ <value>true</value>
+ <description>
+ Indicates if anonymous requests are allowed.
+ This setting is meaningful only when using 'simple' authentication.
+ </description>
+ </property>
+
+ <property>
+ <name>oozie.authentication.kerberos.principal</name>
+ <value>HTTP/localhost@${local.realm}</value>
+ <description>
+ Indicates the Kerberos principal to be used for HTTP endpoint.
+ The principal MUST start with 'HTTP/' as per Kerberos HTTP SPNEGO specification.
+ </description>
+ </property>
+
+ <property>
+ <name>oozie.authentication.kerberos.keytab</name>
+ <value>${oozie.service.HadoopAccessorService.keytab.file}</value>
+ <description>
+ Location of the keytab file with the credentials for the principal.
+ Referring to the same keytab file Oozie uses for its Kerberos credentials for Hadoop.
+ </description>
+ </property>
+
+ <property>
+ <name>oozie.authentication.kerberos.name.rules</name>
+ <value>DEFAULT</value>
+ <description>
+ The kerberos names rules is to resolve kerberos principal names, refer to Hadoop's
+ KerberosName for more details.
+ </description>
+ </property>
+
+ <!-- Proxyuser Configuration -->
+
+ <!--
+
+ <property>
+ <name>oozie.service.ProxyUserService.proxyuser.#USER#.hosts</name>
+ <value>*</value>
+ <description>
+ List of hosts the '#USER#' user is allowed to perform 'doAs'
+ operations.
+
+ The '#USER#' must be replaced with the username o the user who is
+ allowed to perform 'doAs' operations.
+
+ The value can be the '*' wildcard or a list of hostnames.
+
+ For multiple users copy this property and replace the user name
+ in the property name.
+ </description>
+ </property>
+
+ <property>
+ <name>oozie.service.ProxyUserService.proxyuser.#USER#.groups</name>
+ <value>*</value>
+ <description>
+ List of groups the '#USER#' user is allowed to impersonate users
+ from to perform 'doAs' operations.
+
+ The '#USER#' must be replaced with the username o the user who is
+ allowed to perform 'doAs' operations.
+
+ The value can be the '*' wildcard or a list of groups.
+
+ For multiple users copy this property and replace the user name
+ in the property name.
+ </description>
+ </property>
+
+ -->
+
+
+ <property>
+ <name>oozie.service.coord.push.check.requeue.interval</name>
+ <value>30000</value>
+ </property>
+
+ <property>
+ <name>oozie.services.ext</name>
+ <value>org.apache.oozie.service.JMSAccessorService,
+ org.apache.oozie.service.PartitionDependencyManagerService,
+ org.apache.oozie.service.HCatAccessorService</value>
+ </property>
+
+ <property>
+ <name>oozie.credentials.credentialclasses</name>
+ <value>hcat=org.apache.oozie.action.hadoop.HCatCredentials</value>
+ </property>
+
+ <property>
+ <name>oozie.service.URIHandlerService.uri.handlers</name>
+ <value>org.apache.oozie.dependency.FSURIHandler,
+ org.apache.oozie.dependency.HCatURIHandler</value>
+ </property>
+
+ <property>
+ <name>oozie.service.ELService.ext.functions.coord-job-submit-data</name>
+ <value>now=org.apache.oozie.extensions.OozieELExtensions#ph1_now_echo,
+ today=org.apache.oozie.extensions.OozieELExtensions#ph1_today_echo,
+ yesterday=org.apache.oozie.extensions.OozieELExtensions#ph1_yesterday_echo,
+ currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_currentMonth_echo,
+ lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_lastMonth_echo,
+ currentYear=org.apache.oozie.extensions.OozieELExtensions#ph1_currentYear_echo,
+ lastYear=org.apache.oozie.extensions.OozieELExtensions#ph1_lastYear_echo,
+ dataIn=org.apache.oozie.extensions.OozieELExtensions#ph1_dataIn_echo,
+ instanceTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_nominalTime_echo_wrap,
+ formatTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_formatTime_echo,
+ dateOffset=org.apache.oozie.coord.CoordELFunctions#ph1_coord_dateOffset_echo,
+ user=org.apache.oozie.coord.CoordELFunctions#coord_user</value>
+ </property>
+
+ <property>
+ <name>oozie.service.ProxyUserService.proxyuser.hadoop.hosts</name>
+ <value>*</value>
+ </property>
+
+ <property>
+ <name>oozie.service.ELService.ext.functions.coord-sla-submit</name>
+ <value>instanceTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_nominalTime_echo_fixed,
+ user=org.apache.oozie.coord.CoordELFunctions#coord_user</value>
+ </property>
+
+ <property>
+ <name>oozie.service.ELService.ext.functions.coord-sla-create</name>
+ <value>instanceTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_nominalTime,
+ user=org.apache.oozie.coord.CoordELFunctions#coord_user</value>
+ </property>
+
+ <property>
+ <name>oozie.service.ProxyUserService.proxyuser.hadoop.groups</name>
+ <value>*</value>
+ </property>
+
+ <property>
+ <name>oozie.service.ELService.ext.functions.coord-action-create</name>
+ <value>now=org.apache.oozie.extensions.OozieELExtensions#ph2_now,
+ today=org.apache.oozie.extensions.OozieELExtensions#ph2_today,
+ yesterday=org.apache.oozie.extensions.OozieELExtensions#ph2_yesterday,
+ currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_currentMonth,
+ lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_lastMonth,
+ currentYear=org.apache.oozie.extensions.OozieELExtensions#ph2_currentYear,
+ lastYear=org.apache.oozie.extensions.OozieELExtensions#ph2_lastYear,
+ latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo,
+ future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo,
+ formatTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_formatTime,
+ user=org.apache.oozie.coord.CoordELFunctions#coord_user</value>
+ </property>
+
+ <property>
+ <name>oozie.service.ELService.ext.functions.coord-job-submit-instances</name>
+ <value>now=org.apache.oozie.extensions.OozieELExtensions#ph1_now_echo,
+ today=org.apache.oozie.extensions.OozieELExtensions#ph1_today_echo,
+ yesterday=org.apache.oozie.extensions.OozieELExtensions#ph1_yesterday_echo,
+ currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_currentMonth_echo,
+ lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_lastMonth_echo,
+ currentYear=org.apache.oozie.extensions.OozieELExtensions#ph1_currentYear_echo,
+ lastYear=org.apache.oozie.extensions.OozieELExtensions#ph1_lastYear_echo,
+ formatTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_formatTime_echo,
+ latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo,
+ future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo</value>
+ </property>
+
+ <property>
+ <name>oozie.service.ELService.ext.functions.coord-action-start</name>
+ <value>now=org.apache.oozie.extensions.OozieELExtensions#ph2_now,
+ today=org.apache.oozie.extensions.OozieELExtensions#ph2_today,
+ yesterday=org.apache.oozie.extensions.OozieELExtensions#ph2_yesterday,
+ currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_currentMonth,
+ lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_lastMonth,
+ currentYear=org.apache.oozie.extensions.OozieELExtensions#ph2_currentYear,
+ lastYear=org.apache.oozie.extensions.OozieELExtensions#ph2_lastYear,
+ latest=org.apache.oozie.coord.CoordELFunctions#ph3_coord_latest,
+ future=org.apache.oozie.coord.CoordELFunctions#ph3_coord_future,
+ dataIn=org.apache.oozie.extensions.OozieELExtensions#ph3_dataIn,
+ instanceTime=org.apache.oozie.coord.CoordELFunctions#ph3_coord_nominalTime,
+ dateOffset=org.apache.oozie.coord.CoordELFunctions#ph3_coord_dateOffset,
+ formatTime=org.apache.oozie.coord.CoordELFunctions#ph3_coord_formatTime,
+ user=org.apache.oozie.coord.CoordELFunctions#coord_user</value>
+ </property>
+
+ <property>
+ <name>oozie.service.ELService.ext.functions.coord-action-create-inst</name>
+ <value>now=org.apache.oozie.extensions.OozieELExtensions#ph2_now_inst,
+ today=org.apache.oozie.extensions.OozieELExtensions#ph2_today_inst,
+ yesterday=org.apache.oozie.extensions.OozieELExtensions#ph2_yesterday_inst,
+ currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_currentMonth_inst,
+ lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_lastMonth_inst,
+ currentYear=org.apache.oozie.extensions.OozieELExtensions#ph2_currentYear_inst,
+ lastYear=org.apache.oozie.extensions.OozieELExtensions#ph2_lastYear_inst,
+ latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo,
+ future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo,
+ formatTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_formatTime,
+ user=org.apache.oozie.coord.CoordELFunctions#coord_user</value>
+ </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/metainfo.xml
new file mode 100644
index 0000000000..347954b9de
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/metainfo.xml
@@ -0,0 +1,126 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<metainfo>
+ <schemaVersion>2.0</schemaVersion>
+ <services>
+ <service>
+ <name>OOZIE</name>
+ <displayName>Oozie</displayName>
+ <comment>System for workflow coordination and execution of Apache Hadoop jobs. This also includes the installation of the optional Oozie Web Console which relies on and will install the &lt;a target="_blank" href="http://www.sencha.com/legal/open-source-faq/"&gt;ExtJS&lt;/a&gt; Library.
+ </comment>
+ <version>4.0.0.2.0</version>
+ <components>
+ <component>
+ <name>OOZIE_SERVER</name>
+ <displayName>Oozie Server</displayName>
+ <category>MASTER</category>
+ <cardinality>1</cardinality>
+ <dependencies>
+ <dependency>
+ <name>HDFS/HDFS_CLIENT</name>
+ <scope>host</scope>
+ <auto-deploy>
+ <enabled>true</enabled>
+ </auto-deploy>
+ </dependency>
+ <dependency>
+ <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
+ <scope>host</scope>
+ <auto-deploy>
+ <enabled>true</enabled>
+ </auto-deploy>
+ </dependency>
+ <dependency>
+ <name>YARN/YARN_CLIENT</name>
+ <scope>host</scope>
+ <auto-deploy>
+ <enabled>true</enabled>
+ </auto-deploy>
+ </dependency>
+ </dependencies>
+ <commandScript>
+ <script>scripts/oozie_server.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>600</timeout>
+ </commandScript>
+ </component>
+
+ <component>
+ <name>OOZIE_CLIENT</name>
+ <displayName>Oozie Client</displayName>
+ <category>CLIENT</category>
+ <cardinality>1+</cardinality>
+ <dependencies>
+ <dependency>
+ <name>HDFS/HDFS_CLIENT</name>
+ <scope>host</scope>
+ <auto-deploy>
+ <enabled>true</enabled>
+ </auto-deploy>
+ </dependency>
+ <dependency>
+ <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
+ <scope>host</scope>
+ <auto-deploy>
+ <enabled>true</enabled>
+ </auto-deploy>
+ </dependency>
+ </dependencies>
+ <commandScript>
+ <script>scripts/oozie_client.py</script>
+ <scriptType>PYTHON</scriptType>
+ </commandScript>
+ <configFiles>
+ <configFile>
+ <type>xml</type>
+ <fileName>oozie-site.xml</fileName>
+ <dictionaryName>oozie-site</dictionaryName>
+ </configFile>
+ <configFile>
+ <type>env</type>
+ <fileName>oozie-env.cmd</fileName>
+ <dictionaryName>oozie-env</dictionaryName>
+ </configFile>
+ <configFile>
+ <type>env</type>
+ <fileName>oozie-log4j.properties</fileName>
+ <dictionaryName>oozie-log4j</dictionaryName>
+ </configFile>
+ </configFiles>
+ </component>
+ </components>
+
+ <commandScript>
+ <script>scripts/service_check.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>300</timeout>
+ </commandScript>
+
+ <requiredServices>
+ <service>YARN</service>
+ </requiredServices>
+
+ <configuration-dependencies>
+ <config-type>oozie-site</config-type>
+ <config-type>oozie-env</config-type>
+ <config-type>oozie-log4j</config-type>
+ <config-type>yarn-site</config-type>
+ </configuration-dependencies>
+ </service>
+ </services>
+</metainfo>
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/package/scripts/oozie.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/package/scripts/oozie.py
new file mode 100644
index 0000000000..7778a75cbb
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/package/scripts/oozie.py
@@ -0,0 +1,36 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+def oozie():
+ import params
+ XmlConfig("oozie-site.xml",
+ conf_dir = params.oozie_conf_dir,
+ configurations = params.config['configurations']['oozie-site'],
+ owner = params.oozie_user,
+ mode = 'f'
+ )
+
+ File(format("{oozie_conf_dir}/oozie-env.cmd"),
+ owner=params.oozie_user,
+ content=InlineTemplate(params.oozie_env_cmd_template)
+ ) \ No newline at end of file
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/package/scripts/oozie_client.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/package/scripts/oozie_client.py
new file mode 100644
index 0000000000..164c940ac5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/package/scripts/oozie_client.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from oozie import oozie
+
+class OozieClient(Script):
+ def install(self, env):
+ # client checks env var to determine if it is installed
+ if not os.environ.has_key("OOZIE_HOME"):
+ self.install_packages(env)
+ self.configure(env)
+
+ def configure(self, env):
+ import params
+ env.set_params(params)
+ oozie()
+
+ def status(self, env):
+ raise ClientComponentHasNoStatus()
+
+if __name__ == "__main__":
+ OozieClient().execute()
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/package/scripts/oozie_server.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/package/scripts/oozie_server.py
new file mode 100644
index 0000000000..261bc8f9b6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/package/scripts/oozie_server.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+import service_mapping
+from resource_management import *
+from oozie import oozie
+from ambari_commons.inet_utils import force_download_file
+
+class OozieServer(Script):
+ def install(self, env):
+ import params
+ if not check_windows_service_exists(service_mapping.oozie_server_win_service_name):
+ self.install_packages(env)
+ force_download_file(os.path.join(params.config['hostLevelParams']['jdk_location'], "sqljdbc4.jar"),
+ os.path.join(params.oozie_root, "extra_libs", "sqljdbc4.jar")
+ )
+ webapps_sqljdbc_path = os.path.join(params.oozie_home, "oozie-server", "webapps", "oozie", "WEB-INF", "lib", "sqljdbc4.jar")
+ if os.path.isfile(webapps_sqljdbc_path):
+ force_download_file(os.path.join(params.config['hostLevelParams']['jdk_location'], "sqljdbc4.jar"),
+ webapps_sqljdbc_path
+ )
+ force_download_file(os.path.join(params.config['hostLevelParams']['jdk_location'], "sqljdbc4.jar"),
+ os.path.join(params.oozie_home, "share", "lib", "oozie", "sqljdbc4.jar")
+ )
+ force_download_file(os.path.join(params.config['hostLevelParams']['jdk_location'], "sqljdbc4.jar"),
+ os.path.join(params.oozie_home, "temp", "WEB-INF", "lib", "sqljdbc4.jar")
+ )
+
+ def configure(self, env):
+ oozie()
+
+ def start(self, env):
+ import params
+ env.set_params(params)
+ self.configure(env)
+ Service(service_mapping.oozie_server_win_service_name, action="start")
+
+ def stop(self, env):
+ import params
+ env.set_params(params)
+ Service(service_mapping.oozie_server_win_service_name, action="stop")
+
+ def status(self, env):
+ import params
+ check_windows_service_status(service_mapping.oozie_server_win_service_name)
+
+if __name__ == "__main__":
+ OozieServer().execute()
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/package/scripts/params.py
new file mode 100644
index 0000000000..37405de2ae
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/package/scripts/params.py
@@ -0,0 +1,33 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import os
+
+# server configurations
+config = Script.get_config()
+
+hdp_root = os.path.abspath(os.path.join(os.environ["HADOOP_HOME"], ".."))
+oozie_root = os.environ['OOZIE_ROOT']
+oozie_home = os.environ['OOZIE_HOME']
+oozie_conf_dir = os.path.join(oozie_home,'conf')
+oozie_user = "hadoop"
+
+oozie_env_cmd_template = config['configurations']['oozie-env']['content'] \ No newline at end of file
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/package/scripts/service_check.py
new file mode 100644
index 0000000000..386781990f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/package/scripts/service_check.py
@@ -0,0 +1,32 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+class OozieServiceCheck(Script):
+ def service_check(self, env):
+ import params
+ env.set_params(params)
+ smoke_cmd = os.path.join(params.hdp_root,"Run-SmokeTests.cmd")
+ service = "OOZIE"
+ Execute(format("cmd /C {smoke_cmd} {service}"), logoutput=True)
+
+if __name__ == "__main__":
+ OozieServiceCheck().execute()
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/package/scripts/service_mapping.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/package/scripts/service_mapping.py
new file mode 100644
index 0000000000..b71b25c55b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/package/scripts/service_mapping.py
@@ -0,0 +1,21 @@
+# !/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+oozie_server_win_service_name = "oozieservice" \ No newline at end of file
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/PIG/configuration/pig-log4j.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/PIG/configuration/pig-log4j.xml
new file mode 100644
index 0000000000..cbdd452b41
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/PIG/configuration/pig-log4j.xml
@@ -0,0 +1,61 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+
+ <property>
+ <name>content</name>
+ <value>
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+
+# ***** Set root logger level to DEBUG and its only appender to A.
+log4j.logger.org.apache.pig=info, A
+
+# ***** A is set to be a ConsoleAppender.
+log4j.appender.A=org.apache.log4j.ConsoleAppender
+# ***** A uses PatternLayout.
+log4j.appender.A.layout=org.apache.log4j.PatternLayout
+log4j.appender.A.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n
+ </value>
+ </property>
+
+</configuration>
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/PIG/configuration/pig-properties.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/PIG/configuration/pig-properties.xml
new file mode 100644
index 0000000000..88e2fea5bf
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/PIG/configuration/pig-properties.xml
@@ -0,0 +1,262 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+
+ <property>
+ <name>pig-content</name>
+ <value>
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+# Pig configuration file. All values can be overwritten by command line arguments.
+
+# Use the "-h properties" command to see description of the properties
+
+# log4jconf log4j configuration file
+# log4jconf=./conf/log4j.properties
+
+# a file that contains pig script
+#file=
+
+# load jarfile, colon separated
+#jar=
+
+#verbose print all log messages to screen (default to print only INFO and above to screen)
+#verbose=true
+
+#exectype local|mapreduce, mapreduce is default
+#exectype=local
+
+#the default timezone: if it is not set, the default timezone for this host is used.
+#the correct timezone format is the UTC offset: e.g., +08:00.
+#pig.datetime.default.tz=
+
+#pig.logfile=
+
+#Do not spill temp files smaller than this size (bytes)
+#pig.spill.size.threshold=5000000
+
+#EXPERIMENT: Activate garbage collection when spilling a file bigger than this size (bytes)
+#This should help reduce the number of files being spilled.
+#pig.spill.gc.activation.size=40000000
+
+#the following two parameters are to help estimate the reducer number
+#pig.exec.reducers.bytes.per.reducer=1000000000
+#pig.exec.reducers.max=999
+
+#Logging properties
+#verbose=false
+#brief=false
+#debug=INFO
+#aggregate.warning=true
+
+#Performance tuning properties
+#pig.cachedbag.memusage=0.2
+#pig.skewedjoin.reduce.memusage=0.3
+#pig.exec.nocombiner=false
+#opt.multiquery=true
+
+#Following parameters are for configuring intermediate storage format
+#Supported storage types are seqfile and tfile
+#Supported codec types: tfile supports gz(gzip) and lzo, seqfile support gz(gzip), lzo, snappy, bzip2
+#pig.tmpfilecompression=false
+#pig.tmpfilecompression.storage=seqfile
+#pig.tmpfilecompression.codec=gz
+
+#pig.noSplitCombination=true
+
+#pig.exec.mapPartAgg=false
+#pig.exec.mapPartAgg.minReduction=10
+
+#exectype=mapreduce
+#pig.additional.jars=&lt;comma seperated list of jars&gt;
+#udf.import.list=&lt;comma seperated list of imports&gt;
+#stop.on.failure=false
+
+#Use this option only when your Pig job will otherwise die because of
+#using more counters than hadoop configured limit
+#pig.disable.counter=true
+
+# By default, pig will allow 1GB of data to be replicated using
+# the distributed cache when doing fragment-replicated join.
+# pig.join.replicated.max.bytes=1000000000
+
+# Use this option to turn on UDF timers. This will cause two
+# counters to be tracked for every UDF and LoadFunc in your script:
+# approx_microsecs measures approximate time spent inside a UDF
+# approx_invocations reports the approximate number of times the UDF was invoked
+# pig.udf.profile=false
+
+#When enabled, 'describe' prints a multi-line formatted schema
+#(similar to an indended json) rather than on a single line.
+#pig.pretty.print.schema=true
+
+#pig.sql.type=hcat
+hcat.bin=c:\hdp\hcatalog-@hcat.version@\\bin\\hcat.py
+
+############################ SchemaTuple ############################
+
+# Setting this value will turn on the SchemaTuple feature (PIG-2632)
+# This will attempt to use code generation for more efficient within
+# the pig code. This can lead to both CPU, serialization, and memory
+# benefits (currently, the potential memory benefits are the largest).
+
+# This parameter will enable the optimization in all available cases
+#pig.schematuple=true
+
+# Certain cases can be turned off by uncommenting the following. These will
+# all be off by default, but will all be turned on if pig.schematuple is set
+# to true.
+
+# This will disable SchemaTuples in the case of udfs. Currently,
+# the input to UDF's will be SchemaTuples.
+
+#pig.schematuple.udf=false
+
+# This is currently not implemented. In the future, LoadFunc's with known
+# schema's should output SchemaTuples
+
+#pig.schematuple.load=false
+
+# This will use SchemaTuples in replicated joins. The potential memory saving
+# here is significant. It will use SchemaTuples when it builds the HashMap of
+# the join key to related values.
+
+#pig.schematuple.fr_join=false
+
+# In the current implementation of merge join, all of the Tuples in the left relation
+# that share a given key will be stored in a List in memory. This will use SchemaTuples
+# instead in that List.
+
+#pig.schematuple.merge_join=false
+
+#####################################################################
+
+##### Set up optional Pig Progress Notification Listener ############
+
+# Note that only one PPNL can be set up. If you need several, write a PPNL that will chain them.
+# pig.notification.listener = &lt;fully qualified class name of a PPNL implementation&gt;
+
+# Optionally, you can supply a single String argument to pass to your PPNL.
+# pig.notification.listener.arg = &lt;somevalue&gt;
+
+#####################################################################
+
+########## Override the default Reducer Estimator logic #############
+
+# By default, the logic to estimate the number of reducers to use for a given job lives in:
+# org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.InputSizeReducerEstimator
+# This logic can be replaced by implementing the following interface:
+# org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigReducerEstimator
+
+# This class will be invoked to estimate the number of reducers to use.
+# pig.exec.reducer.estimator = &lt;fully qualified class name of a PigReducerEstimator implementation&gt;
+
+# Optionally, you can supply a single String argument to pass to your PigReducerEstimator.
+# pig.exec.reducer.estimator.arg = &lt;somevalue&gt;
+
+#####################################################################
+
+###### Override the default Pig Stats Output Size Reader logic ######
+
+# By default, the size of reducers output is computed as the total size of
+# output files. But since not every storage is file-based, this logic is not
+# always applicable. If that is the case, the logic can be replaced by
+# implementing the following interface:
+# org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigStatsOutputSizeReader
+
+# This class will be invoked to compute the size of reducers output.
+# pig.stats.output.size.reader = &lt;fully qualified class name of a PigStatsOutputSizeReader implementation&gt;
+
+# If you need to register more than one reader, you can register them as a comma
+# separated list. Every reader implements a boolean supports(POStore sto) method.
+# When there are more than one reader, they are consulted in order, and the
+# first one whose supports() method returns true will be used.
+#
+#####################################################################
+
+#pig.load.default.statements=
+
+#####################################################################
+
+########### Override hadoop configs programatically #################
+
+# By default, Pig expects hadoop configs (hadoop-site.xml and core-site.xml)
+# to be present on the classpath. There are cases when these configs are
+# needed to be passed programatically, such as while using the PigServer API.
+# In such cases, you can override hadoop configs by setting the property
+# "pig.use.overriden.hadoop.configs".
+#
+# When this property is set to true, Pig ignores looking for hadoop configs
+# in the classpath and instead picks it up from Properties/Configuration
+# object passed to it.
+
+# pig.use.overriden.hadoop.configs=false
+#
+######################################################################
+
+# Check if the script needs to check multiple stores writing
+# to the same location. When set to true, stops the execution
+# of script right away.
+pig.location.check.strict=false
+
+######################################################################
+
+# This key is used to define the default load func. Pig will fallback
+# on PigStorage as default in case this is undefined.
+
+# pig.default.load.func=&lt;fully qualified class name of a LoadFunc implementation&gt;
+# For eg, pig.default.load.func=org.apache.pig.custom.MyCustomStorage
+
+# This key is used to define the default store func. Pig will fallback
+# on PigStorage as default in case this is undefined.
+
+# pig.default.store.func=&lt;fully qualified class name of a StoreFunc implementation&gt;
+# For eg, pig.default.store.func=org.apache.pig.custom.MyCustomStorage
+
+# This option is used to define whether to support recovery to handle the
+# application master getting restarted.
+# pig.output.committer.recovery.support=true
+
+# Set this option to true if you need to use the old partition filter optimizer.
+# Note: Old filter optimizer PColFilterOptimizer will be deprecated in the future.
+# pig.exec.useOldPartitionFilterOptimizer=true
+
+ </value>
+ </property>
+
+</configuration>
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/PIG/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/PIG/metainfo.xml
new file mode 100644
index 0000000000..68f7566cbe
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/PIG/metainfo.xml
@@ -0,0 +1,75 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<metainfo>
+ <schemaVersion>2.0</schemaVersion>
+ <services>
+ <service>
+ <name>PIG</name>
+ <displayName>Pig</displayName>
+ <comment>Scripting platform for analyzing large datasets</comment>
+ <version>0.12.0.2.0</version>
+ <components>
+ <component>
+ <name>PIG</name>
+ <displayName>Pig</displayName>
+ <category>CLIENT</category>
+ <cardinality>0+</cardinality>
+ <commandScript>
+ <script>scripts/pig_client.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>600</timeout>
+ </commandScript>
+ <configFiles>
+ <configFile>
+ <type>env</type>
+ <fileName>pig-env.cmd</fileName>
+ <dictionaryName>pig-env</dictionaryName>
+ </configFile>
+ <configFile>
+ <type>env</type>
+ <fileName>log4j.properties</fileName>
+ <dictionaryName>pig-log4j</dictionaryName>
+ </configFile>
+ <configFile>
+ <type>env</type>
+ <fileName>pig.properties</fileName>
+ <dictionaryName>pig-properties</dictionaryName>
+ </configFile>
+ </configFiles>
+ </component>
+ </components>
+
+ <commandScript>
+ <script>scripts/service_check.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>300</timeout>
+ </commandScript>
+
+ <requiredServices>
+ <service>YARN</service>
+ </requiredServices>
+
+ <configuration-dependencies>
+ <config-type>pig-env</config-type>
+ <config-type>pig-log4j</config-type>
+ <config-type>pig-properties</config-type>
+ </configuration-dependencies>
+
+ </service>
+ </services>
+</metainfo>
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/PIG/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/PIG/package/scripts/params.py
new file mode 100644
index 0000000000..854d5774fe
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/PIG/package/scripts/params.py
@@ -0,0 +1,37 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+# server configurations
+config = Script.get_config()
+hdp_root = os.path.abspath(os.path.join(os.environ["HADOOP_HOME"],".."))
+pig_home = os.environ['PIG_HOME']
+pig_conf_dir = os.path.join(pig_home,'conf')
+pig_properties = config['configurations']['pig-properties']['pig-content']
+
+if (('pig-log4j' in config['configurations']) and ('content' in config['configurations']['pig-log4j'])):
+ log4j_props = config['configurations']['pig-log4j']['content']
+else:
+ log4j_props = None
+
+pig_user = "hadoop"
+hdfs_user = "hadoop" \ No newline at end of file
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/PIG/package/scripts/pig.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/PIG/package/scripts/pig.py
new file mode 100644
index 0000000000..8e677efe07
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/PIG/package/scripts/pig.py
@@ -0,0 +1,49 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import os
+from resource_management import *
+
+def pig():
+ import params
+ File(format("{params.pig_conf_dir}/pig.properties"),
+ mode="f",
+ owner=params.pig_user,
+ content=params.pig_properties
+ )
+
+ if (params.log4j_props != None):
+ File(format("{params.pig_conf_dir}/log4j.properties"),
+ mode='f',
+ owner=params.pig_user,
+ content=params.log4j_props
+ )
+
+def pig_TemplateConfig(name):
+ import params
+
+ if not isinstance(name, list):
+ name = [name]
+
+ for x in name:
+ TemplateConfig( format("{pig_conf_dir}/{x}"),
+ owner = params.hdfs_user
+ )
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/PIG/package/scripts/pig_client.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/PIG/package/scripts/pig_client.py
new file mode 100644
index 0000000000..381bfe9a39
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/PIG/package/scripts/pig_client.py
@@ -0,0 +1,41 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import os
+from resource_management import *
+
+
+class PigClient(Script):
+ def install(self, env):
+ import params
+ if params.pig_home is None:
+ self.install_packages(env)
+ self.configure(env)
+
+ def configure(self, env):
+ import params
+ env.set_params(params)
+
+ def status(self, env):
+ raise ClientComponentHasNoStatus()
+
+if __name__ == "__main__":
+ PigClient().execute()
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/PIG/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/PIG/package/scripts/service_check.py
new file mode 100644
index 0000000000..9e519c5dea
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/PIG/package/scripts/service_check.py
@@ -0,0 +1,34 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+from resource_management.libraries import functions
+
+class PigServiceCheck(Script):
+ def service_check(self, env):
+ import params
+ env.set_params(params)
+ smoke_cmd = os.path.join(params.hdp_root,"Run-SmokeTests.cmd")
+ service = "PIG"
+ Execute(format("cmd /C {smoke_cmd} {service}"), logoutput=True, user=params.hdfs_user)
+
+if __name__ == "__main__":
+ PigServiceCheck().execute()
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/SQOOP/configuration/sqoop-site.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/SQOOP/configuration/sqoop-site.xml
new file mode 100644
index 0000000000..eccd168e56
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/SQOOP/configuration/sqoop-site.xml
@@ -0,0 +1,156 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied. See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+
+<!-- Put Sqoop-specific properties in this file. -->
+
+<configuration>
+
+ <!--
+ Set the value of this property to explicitly enable third-party
+ ManagerFactory plugins.
+
+ If this is not used, you can alternately specify a set of ManagerFactories
+ in the $SQOOP_CONF_DIR/managers.d/ subdirectory. Each file should contain
+ one or more lines like:
+ manager.class.name[=/path/to/containing.jar]
+
+ Files will be consulted in lexicographical order only if this property
+ is unset.
+ -->
+ <!--
+ <property>
+ <name>sqoop.connection.factories</name>
+ <value>com.cloudera.sqoop.manager.DefaultManagerFactory</value>
+ <description>A comma-delimited list of ManagerFactory implementations
+ which are consulted, in order, to instantiate ConnManager instances
+ used to drive connections to databases.
+ </description>
+ </property>
+ -->
+
+ <!--
+ Set the value of this property to enable third-party tools.
+
+ If this is not used, you can alternately specify a set of ToolPlugins
+ in the $SQOOP_CONF_DIR/tools.d/ subdirectory. Each file should contain
+ one or more lines like:
+ plugin.class.name[=/path/to/containing.jar]
+
+ Files will be consulted in lexicographical order only if this property
+ is unset.
+ -->
+ <!--
+ <property>
+ <name>sqoop.tool.plugins</name>
+ <value></value>
+ <description>A comma-delimited list of ToolPlugin implementations
+ which are consulted, in order, to register SqoopTool instances which
+ allow third-party tools to be used.
+ </description>
+ </property>
+ -->
+
+ <!--
+ By default, the Sqoop metastore will auto-connect to a local embedded
+ database stored in ~/.sqoop/. To disable metastore auto-connect, uncomment
+ this next property.
+ -->
+ <!--
+ <property>
+ <name>sqoop.metastore.client.enable.autoconnect</name>
+ <value>false</value>
+ <description>If true, Sqoop will connect to a local metastore
+ for job management when no other metastore arguments are
+ provided.
+ </description>
+ </property>
+ -->
+
+ <!--
+ The auto-connect metastore is stored in ~/.sqoop/. Uncomment
+ these next arguments to control the auto-connect process with
+ greater precision.
+ -->
+ <!--
+ <property>
+ <name>sqoop.metastore.client.autoconnect.url</name>
+ <value>jdbc:hsqldb:file:/tmp/sqoop-meta/meta.db;shutdown=true</value>
+ <description>The connect string to use when connecting to a
+ job-management metastore. If unspecified, uses ~/.sqoop/.
+ You can specify a different path here.
+ </description>
+ </property>
+ <property>
+ <name>sqoop.metastore.client.autoconnect.username</name>
+ <value>SA</value>
+ <description>The username to bind to the metastore.
+ </description>
+ </property>
+ <property>
+ <name>sqoop.metastore.client.autoconnect.password</name>
+ <value></value>
+ <description>The password to bind to the metastore.
+ </description>
+ </property>
+ -->
+
+ <!--
+ For security reasons, by default your database password will not be stored in
+ the Sqoop metastore. When executing a saved job, you will need to
+ reenter the database password. Uncomment this setting to enable saved
+ password storage. (INSECURE!)
+ -->
+ <!--
+ <property>
+ <name>sqoop.metastore.client.record.password</name>
+ <value>true</value>
+ <description>If true, allow saved passwords in the metastore.
+ </description>
+ </property>
+ -->
+
+
+ <!--
+ SERVER CONFIGURATION: If you plan to run a Sqoop metastore on this machine,
+ you should uncomment and set these parameters appropriately.
+
+ You should then configure clients with:
+ sqoop.metastore.client.autoconnect.url =
+ jdbc:hsqldb:hsql://&lt;server-name&gt;:&lt;port&gt;/sqoop
+ -->
+ <!--
+ <property>
+ <name>sqoop.metastore.server.location</name>
+ <value>/tmp/sqoop-metastore/shared.db</value>
+ <description>Path to the shared metastore database files.
+ If this is not set, it will be placed in ~/.sqoop/.
+ </description>
+ </property>
+
+ <property>
+ <name>sqoop.metastore.server.port</name>
+ <value>16000</value>
+ <description>Port that this metastore should listen on.
+ </description>
+ </property>
+ -->
+
+</configuration>
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/SQOOP/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/SQOOP/metainfo.xml
new file mode 100644
index 0000000000..13f96301a1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/SQOOP/metainfo.xml
@@ -0,0 +1,80 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<metainfo>
+ <schemaVersion>2.0</schemaVersion>
+ <services>
+ <service>
+ <name>SQOOP</name>
+ <displayName>Sqoop</displayName>
+ <comment>Tool for transferring bulk data between Apache Hadoop and
+ structured data stores such as relational databases
+ </comment>
+ <version>1.4.4.2.0</version>
+
+ <components>
+ <component>
+ <name>SQOOP</name>
+ <displayName>Sqoop</displayName>
+ <category>CLIENT</category>
+ <cardinality>1+</cardinality>
+ <dependencies>
+ <dependency>
+ <name>HDFS/HDFS_CLIENT</name>
+ <scope>host</scope>
+ <auto-deploy>
+ <enabled>true</enabled>
+ </auto-deploy>
+ </dependency>
+ <dependency>
+ <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
+ <scope>host</scope>
+ <auto-deploy>
+ <enabled>true</enabled>
+ </auto-deploy>
+ </dependency>
+ </dependencies>
+ <commandScript>
+ <script>scripts/sqoop_client.py</script>
+ <scriptType>PYTHON</scriptType>
+ </commandScript>
+ <configFiles>
+ <configFile>
+ <type>env</type>
+ <fileName>sqoop-env.cmd</fileName>
+ <dictionaryName>sqoop-env</dictionaryName>
+ </configFile>
+ </configFiles>
+ </component>
+ </components>
+
+ <commandScript>
+ <script>scripts/service_check.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>300</timeout>
+ </commandScript>
+
+ <requiredServices>
+ <service>HDFS</service>
+ </requiredServices>
+
+ <configuration-dependencies>
+ <config-type>sqoop-env</config-type>
+ </configuration-dependencies>
+ </service>
+ </services>
+</metainfo>
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/SQOOP/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/SQOOP/package/scripts/params.py
new file mode 100644
index 0000000000..7197be391b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/SQOOP/package/scripts/params.py
@@ -0,0 +1,32 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import os
+
+config = Script.get_config()
+sqoop_user = "sqoop"
+sqoop_home_dir = None
+sqoop_conf_dir = "conf"
+
+hdp_root = os.path.abspath(os.path.join(os.environ["HADOOP_HOME"], ".."))
+
+if os.environ.has_key("SQOOP_HOME"):
+ sqoop_home_dir = os.environ["SQOOP_HOME"]
+ sqoop_conf_dir = os.path.join(sqoop_home_dir, "conf")
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/SQOOP/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/SQOOP/package/scripts/service_check.py
new file mode 100644
index 0000000000..18c0f15ac3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/SQOOP/package/scripts/service_check.py
@@ -0,0 +1,34 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+
+from resource_management import *
+
+
+class SqoopServiceCheck(Script):
+ def service_check(self, env):
+ import params
+ env.set_params(params)
+ smoke_cmd = os.path.join(params.hdp_root,"Run-SmokeTests.cmd")
+ service = "SQOOP"
+ Execute(format("cmd /C {smoke_cmd} {service}"), logoutput=True)
+
+if __name__ == "__main__":
+ SqoopServiceCheck().execute()
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/SQOOP/package/scripts/sqoop.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/SQOOP/package/scripts/sqoop.py
new file mode 100644
index 0000000000..a906dd2f73
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/SQOOP/package/scripts/sqoop.py
@@ -0,0 +1,30 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import sys
+
+def sqoop():
+ import params
+ XmlConfig("sqoop-site.xml",
+ conf_dir=params.sqoop_conf_dir,
+ configurations=params.config['configurations']['sqoop-site'],
+ owner=params.sqoop_user,
+ mode="f"
+ )
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/SQOOP/package/scripts/sqoop_client.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/SQOOP/package/scripts/sqoop_client.py
new file mode 100644
index 0000000000..34673fcb02
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/SQOOP/package/scripts/sqoop_client.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+
+
+class SqoopClient(Script):
+ def install(self, env):
+ import params
+ if params.sqoop_home_dir is None:
+ self.install_packages(env)
+
+ def configure(self, env):
+ import params
+ env.set_params(params)
+ sqoop()
+
+ def status(self, env):
+ raise ClientComponentHasNoStatus()
+
+if __name__ == "__main__":
+ SqoopClient().execute()
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/configuration/storm-site.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/configuration/storm-site.xml
new file mode 100644
index 0000000000..5a23314232
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/configuration/storm-site.xml
@@ -0,0 +1,651 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="true">
+ <property>
+ <name>storm.zookeeper.servers</name>
+ <value>['localhost']</value>
+ <description>A list of hosts of ZooKeeper servers used to manage the cluster.</description>
+ </property>
+ <property>
+ <name>nimbus.host</name>
+ <value>localhost</value>
+ <description>The host that the master server is running on.</description>
+ </property>
+ <property>
+ <name>storm.local.dir</name>
+ <value>c:\\hdpdata</value>
+ <description>A directory on the local filesystem used by Storm for any local
+ filesystem usage it needs. The directory must exist and the Storm daemons must
+ have permission to read/write from this location.</description>
+ </property>
+ <property>
+ <name>logviewer.port</name>
+ <value>8081</value>
+ <description>HTTP UI port for log viewer.</description>
+ </property>
+ <property>
+ <name>storm.messaging.transport</name>
+ <value>backtype.storm.messaging.netty.Context</value>
+ <description>The transporter for communication among Storm tasks.</description>
+ </property>
+ <property>
+ <name>storm.messaging.netty.buffer_size</name>
+ <value>16384</value>
+ <description>Netty based messaging: The buffer size for send/recv buffer.</description>
+ </property>
+ <property>
+ <name>storm.messaging.netty.max_retries</name>
+ <value>10</value>
+ <description>Netty based messaging: The max # of retries that a peer will perform when a remote is not accessible.</description>
+ </property>
+ <property>
+ <name>storm.messaging.netty.max_wait_ms</name>
+ <value>5000</value>
+ <description>Netty based messaging: The max # of milliseconds that a peer will wait.</description>
+ </property>
+ <property>
+ <name>storm.messaging.netty.min_wait_ms</name>
+ <value>1000</value>
+ <description>Netty based messaging: The min # of milliseconds that a peer will wait.</description>
+ </property>
+ <property>
+ <name>ui.port</name>
+ <value>8772</value>
+ <description>Storm UI binds to this port.</description>
+ </property>
+ <property>
+ <name>java.library.path</name>
+ <value>/usr/local/lib:/opt/local/lib:/usr/lib</value>
+ <description>This value is passed to spawned JVMs (e.g., Nimbus, Supervisor, and Workers)
+ for the java.library.path value. java.library.path tells the JVM where
+ to look for native libraries. It is necessary to set this config correctly since
+ Storm uses the ZeroMQ and JZMQ native libs.
+ </description>
+ </property>
+ <property>
+ <name>storm.zookeeper.servers</name>
+ <value>['localhost']</value>
+ <description>A list of hosts of ZooKeeper servers used to manage the cluster.</description>
+ </property>
+ <property>
+ <name>storm.zookeeper.port</name>
+ <value>2181</value>
+ <description>The port Storm will use to connect to each of the ZooKeeper servers.</description>
+ </property>
+ <property>
+ <name>storm.zookeeper.root</name>
+ <value>/storm</value>
+ <description>The root location at which Storm stores data in ZooKeeper.</description>
+ </property>
+ <property>
+ <name>storm.zookeeper.session.timeout</name>
+ <value>20000</value>
+ <description>The session timeout for clients to ZooKeeper.</description>
+ </property>
+ <property>
+ <name>storm.zookeeper.connection.timeout</name>
+ <value>15000</value>
+ <description>The connection timeout for clients to ZooKeeper.</description>
+ </property>
+ <property>
+ <name>storm.zookeeper.retry.times</name>
+ <value>5</value>
+ <description>The number of times to retry a Zookeeper operation.</description>
+ </property>
+ <property>
+ <name>storm.zookeeper.retry.interval</name>
+ <value>1000</value>
+ <description>The interval between retries of a Zookeeper operation.</description>
+ </property>
+ <property>
+ <name>storm.zookeeper.retry.intervalceiling.millis</name>
+ <value>30000</value>
+ <description>The ceiling of the interval between retries of a Zookeeper operation.</description>
+ </property>
+ <property>
+ <name>storm.cluster.mode</name>
+ <value>distributed</value>
+ <description>The mode this Storm cluster is running in. Either "distributed" or "local".</description>
+ </property>
+ <property>
+ <name>storm.local.mode.zmq</name>
+ <value>false</value>
+ <description>Whether or not to use ZeroMQ for messaging in local mode. If this is set
+ to false, then Storm will use a pure-Java messaging system. The purpose
+ of this flag is to make it easy to run Storm in local mode by eliminating
+ the need for native dependencies, which can be difficult to install.
+ </description>
+ </property>
+ <property>
+ <name>storm.thrift.transport</name>
+ <value>backtype.storm.security.auth.SimpleTransportPlugin</value>
+ <description>The transport plug-in for Thrift client/server communication.</description>
+ </property>
+ <property>
+ <name>storm.messaging.transport</name>
+ <value>backtype.storm.messaging.netty.Context</value>
+ <description>The transporter for communication among Storm tasks.</description>
+ </property>
+ <property>
+ <name>nimbus.host</name>
+ <value>localhost</value>
+ <description>The host that the master server is running on.</description>
+ </property>
+ <property>
+ <name>nimbus.thrift.port</name>
+ <value>6627</value>
+ <description>Which port the Thrift interface of Nimbus should run on. Clients should
+ connect to this port to upload jars and submit topologies.
+ </description>
+ </property>
+ <property>
+ <name>nimbus.thrift.max_buffer_size</name>
+ <value>1048576</value>
+ <description>The maximum buffer size thrift should use when reading messages.</description>
+ </property>
+ <property>
+ <name>nimbus.childopts</name>
+ <value>-Xmx1024m</value>
+ <description>This parameter is used by the storm-deploy project to configure the jvm options for the nimbus
+ daemon.
+ </description>
+ </property>
+ <property>
+ <name>nimbus.task.timeout.secs</name>
+ <value>30</value>
+ <description>How long without heartbeating a task can go before nimbus will consider the task dead and reassign it
+ to another location.
+ </description>
+ </property>
+ <property>
+ <name>nimbus.supervisor.timeout.secs</name>
+ <value>60</value>
+ <description>How long before a supervisor can go without heartbeating before nimbus considers it dead and stops
+ assigning new work to it.
+ </description>
+ </property>
+ <property>
+ <name>nimbus.monitor.freq.secs</name>
+ <value>10</value>
+ <description>
+ How often nimbus should wake up to check heartbeats and do reassignments. Note
+ that if a machine ever goes down Nimbus will immediately wake up and take action.
+ This parameter is for checking for failures when there's no explicit event like that occuring.
+ </description>
+ </property>
+ <property>
+ <name>nimbus.cleanup.inbox.freq.secs</name>
+ <value>600</value>
+ <description>How often nimbus should wake the cleanup thread to clean the inbox.</description>
+ </property>
+ <property>
+ <name>nimbus.inbox.jar.expiration.secs</name>
+ <value>3600</value>
+ <description>
+ The length of time a jar file lives in the inbox before being deleted by the cleanup thread.
+
+ Probably keep this value greater than or equal to NIMBUS_CLEANUP_INBOX_JAR_EXPIRATION_SECS.
+ Note that the time it takes to delete an inbox jar file is going to be somewhat more than
+ NIMBUS_CLEANUP_INBOX_JAR_EXPIRATION_SECS (depending on how often NIMBUS_CLEANUP_FREQ_SECS is set to).
+ </description>
+ </property>
+ <property>
+ <name>nimbus.task.launch.secs</name>
+ <value>120</value>
+ <description>A special timeout used when a task is initially launched. During launch, this is the timeout
+ used until the first heartbeat, overriding nimbus.task.timeout.secs.
+ </description>
+ </property>
+ <property>
+ <name>nimbus.reassign</name>
+ <value>true</value>
+ <description>Whether or not nimbus should reassign tasks if it detects that a task goes down.
+ Defaults to true, and it's not recommended to change this value.
+ </description>
+ </property>
+ <property>
+ <name>nimbus.file.copy.expiration.secs</name>
+ <value>600</value>
+ <description>During upload/download with the master, how long an upload or download connection is idle
+ before nimbus considers it dead and drops the connection.
+ </description>
+ </property>
+ <property>
+ <name>nimbus.topology.validator</name>
+ <value>backtype.storm.nimbus.DefaultTopologyValidator</value>
+ <description>A custom class that implements ITopologyValidator that is run whenever a
+ topology is submitted. Can be used to provide business-specific logic for
+ whether topologies are allowed to run or not.
+ </description>
+ </property>
+ <property>
+ <name>ui.childopts</name>
+ <value>-Xmx768m -Djava.security.auth.login.config=/etc/storm/conf/storm_jaas.conf</value>
+ <description>Childopts for Storm UI Java process.</description>
+ </property>
+ <property>
+ <name>logviewer.childopts</name>
+ <value>-Xmx128m</value>
+ <description>Childopts for log viewer java process.</description>
+ </property>
+ <property>
+ <name>logviewer.appender.name</name>
+ <value>A1</value>
+ <description>Appender name used by log viewer to determine log directory.</description>
+ </property>
+ <property>
+ <name>drpc.port</name>
+ <value>3772</value>
+ <description>This port is used by Storm DRPC for receiving DPRC requests from clients.</description>
+ </property>
+ <property>
+ <name>drpc.worker.threads</name>
+ <value>64</value>
+ <description>DRPC thrift server worker threads.</description>
+ </property>
+ <property>
+ <name>drpc.queue.size</name>
+ <value>128</value>
+ <description>DRPC thrift server queue size.</description>
+ </property>
+ <property>
+ <name>drpc.invocations.port</name>
+ <value>3773</value>
+ <description>This port on Storm DRPC is used by DRPC topologies to receive function invocations and send results
+ back.
+ </description>
+ </property>
+ <property>
+ <name>drpc.request.timeout.secs</name>
+ <value>600</value>
+ <description>The timeout on DRPC requests within the DRPC server. Defaults to 10 minutes. Note that requests can
+ also
+ timeout based on the socket timeout on the DRPC client, and separately based on the topology message
+ timeout for the topology implementing the DRPC function.
+ </description>
+ </property>
+ <property>
+ <name>drpc.childopts</name>
+ <value>-Xmx768m</value>
+ <description>Childopts for Storm DRPC Java process.</description>
+ </property>
+ <property>
+ <name>transactional.zookeeper.root</name>
+ <value>/transactional</value>
+ <description>The root directory in ZooKeeper for metadata about TransactionalSpouts.</description>
+ </property>
+ <property>
+ <name>transactional.zookeeper.servers</name>
+ <value>null</value>
+ <description>The list of zookeeper servers in which to keep the transactional state. If null (which is default),
+ will use storm.zookeeper.servers
+ </description>
+ </property>
+ <property>
+ <name>transactional.zookeeper.port</name>
+ <value>null</value>
+ <description>The port to use to connect to the transactional zookeeper servers. If null (which is default),
+ will use storm.zookeeper.port
+ </description>
+ </property>
+ <property>
+ <name>supervisor.slots.ports</name>
+ <value>[6700, 6701]</value>
+ <description>A list of ports that can run workers on this supervisor. Each worker uses one port, and
+ the supervisor will only run one worker per port. Use this configuration to tune
+ how many workers run on each machine.
+ </description>
+ </property>
+ <property>
+ <name>supervisor.childopts</name>
+ <value>-Xmx256m</value>
+ <description>This parameter is used by the storm-deploy project to configure the jvm options for the supervisor
+ daemon.
+ </description>
+ </property>
+ <property>
+ <name>supervisor.worker.start.timeout.secs</name>
+ <value>120</value>
+ <description>How long a worker can go without heartbeating during the initial launch before
+ the supervisor tries to restart the worker process. This value override
+ supervisor.worker.timeout.secs during launch because there is additional
+ overhead to starting and configuring the JVM on launch.
+ </description>
+ </property>
+ <property>
+ <name>supervisor.worker.timeout.secs</name>
+ <value>30</value>
+ <description>How long a worker can go without heartbeating before the supervisor tries to restart the worker
+ process.
+ </description>
+ </property>
+ <property>
+ <name>supervisor.monitor.frequency.secs</name>
+ <value>3</value>
+ <description>How often the supervisor checks the worker heartbeats to see if any of them need to be restarted.
+ </description>
+ </property>
+ <property>
+ <name>supervisor.heartbeat.frequency.secs</name>
+ <value>5</value>
+ <description>How often the supervisor sends a heartbeat to the master.</description>
+ </property>
+ <property>
+ <name>worker.childopts</name>
+ <value>-Xmx768m</value>
+ <description>The jvm opts provided to workers launched by this supervisor. All \"%ID%\" substrings are replaced with
+ an identifier for this worker.
+ </description>
+ </property>
+ <property>
+ <name>worker.heartbeat.frequency.secs</name>
+ <value>1</value>
+ <description>How often this worker should heartbeat to the supervisor.</description>
+ </property>
+ <property>
+ <name>task.heartbeat.frequency.secs</name>
+ <value>3</value>
+ <description>How often a task should heartbeat its status to the master.</description>
+ </property>
+ <property>
+ <name>task.refresh.poll.secs</name>
+ <value>10</value>
+ <description>How often a task should sync its connections with other tasks (if a task is
+ reassigned, the other tasks sending messages to it need to refresh their connections).
+ In general though, when a reassignment happens other tasks will be notified
+ almost immediately. This configuration is here just in case that notification doesn't
+ come through.
+ </description>
+ </property>
+ <property>
+ <name>zmq.threads</name>
+ <value>1</value>
+ <description>The number of threads that should be used by the zeromq context in each worker process.</description>
+ </property>
+ <property>
+ <name>zmq.linger.millis</name>
+ <value>5000</value>
+ <description>How long a connection should retry sending messages to a target host when
+ the connection is closed. This is an advanced configuration and can almost
+ certainly be ignored.
+ </description>
+ </property>
+ <property>
+ <name>zmq.hwm</name>
+ <value>0</value>
+ <description>The high water for the ZeroMQ push sockets used for networking. Use this config to prevent buffer
+ explosion
+ on the networking layer.
+ </description>
+ </property>
+ <property>
+ <name>storm.messaging.netty.server_worker_threads</name>
+ <value>1</value>
+ <description>Netty based messaging: The # of worker threads for the server.</description>
+ </property>
+ <property>
+ <name>storm.messaging.netty.client_worker_threads</name>
+ <value>1</value>
+ <description>Netty based messaging: The # of worker threads for the client.</description>
+ </property>
+ <property>
+ <name>topology.enable.message.timeouts</name>
+ <value>true</value>
+ <description>True if Storm should timeout messages or not. Defaults to true. This is meant to be used
+ in unit tests to prevent tuples from being accidentally timed out during the test.
+ </description>
+ </property>
+ <property>
+ <name>topology.debug</name>
+ <value>false</value>
+ <description>When set to true, Storm will log every message that's emitted.</description>
+ </property>
+ <property>
+ <name>topology.optimize</name>
+ <value>true</value>
+ <description>Whether or not the master should optimize topologies by running multiple tasks in a single thread where
+ appropriate.
+ </description>
+ </property>
+ <property>
+ <name>topology.workers</name>
+ <value>1</value>
+ <description>How many processes should be spawned around the cluster to execute this
+ topology. Each process will execute some number of tasks as threads within
+ them. This parameter should be used in conjunction with the parallelism hints
+ on each component in the topology to tune the performance of a topology.
+ </description>
+ </property>
+ <property>
+ <name>topology.acker.executors</name>
+ <value>null</value>
+ <description>How many executors to spawn for ackers.
+
+ If this is set to 0, then Storm will immediately ack tuples as soon
+ as they come off the spout, effectively disabling reliability.
+ </description>
+ </property>
+ <property>
+ <name>topology.message.timeout.secs</name>
+ <value>30</value>
+ <description>The maximum amount of time given to the topology to fully process a message
+ emitted by a spout. If the message is not acked within this time frame, Storm
+ will fail the message on the spout. Some spouts implementations will then replay
+ the message at a later time.
+ </description>
+ </property>
+ <property>
+ <name>topology.skip.missing.kryo.registrations</name>
+ <value>false</value>
+ <description>Whether or not Storm should skip the loading of kryo registrations for which it
+ does not know the class or have the serializer implementation. Otherwise, the task will
+ fail to load and will throw an error at runtime. The use case of this is if you want to
+ declare your serializations on the storm.yaml files on the cluster rather than every single
+ time you submit a topology. Different applications may use different serializations and so
+ a single application may not have the code for the other serializers used by other apps.
+ By setting this config to true, Storm will ignore that it doesn't have those other serializations
+ rather than throw an error.
+ </description>
+ </property>
+ <property>
+ <name>topology.max.task.parallelism</name>
+ <value>null</value>
+ <description>The maximum parallelism allowed for a component in this topology. This configuration is
+ typically used in testing to limit the number of threads spawned in local mode.
+ </description>
+ </property>
+ <property>
+ <name>topology.max.spout.pending</name>
+ <value>null</value>
+ <description>The maximum number of tuples that can be pending on a spout task at any given time.
+ This config applies to individual tasks, not to spouts or topologies as a whole.
+
+ A pending tuple is one that has been emitted from a spout but has not been acked or failed yet.
+ Note that this config parameter has no effect for unreliable spouts that don't tag
+ their tuples with a message id.
+ </description>
+ </property>
+ <property>
+ <name>topology.state.synchronization.timeout.secs</name>
+ <value>60</value>
+ <description>The maximum amount of time a component gives a source of state to synchronize before it requests
+ synchronization again.
+ </description>
+ </property>
+ <property>
+ <name>topology.stats.sample.rate</name>
+ <value>0.05</value>
+ <description>The percentage of tuples to sample to produce stats for a task.</description>
+ </property>
+ <property>
+ <name>topology.builtin.metrics.bucket.size.secs</name>
+ <value>60</value>
+ <description>The time period that builtin metrics data in bucketed into.</description>
+ </property>
+ <property>
+ <name>topology.fall.back.on.java.serialization</name>
+ <value>true</value>
+ <description>Whether or not to use Java serialization in a topology.</description>
+ </property>
+ <property>
+ <name>topology.worker.childopts</name>
+ <value>null</value>
+ <description>Topology-specific options for the worker child process. This is used in addition to WORKER_CHILDOPTS.
+ </description>
+ </property>
+ <property>
+ <name>topology.executor.receive.buffer.size</name>
+ <value>1024</value>
+ <description>The size of the Disruptor receive queue for each executor. Must be a power of 2.</description>
+ </property>
+ <property>
+ <name>topology.executor.send.buffer.size</name>
+ <value>1024</value>
+ <description>The size of the Disruptor send queue for each executor. Must be a power of 2.</description>
+ </property>
+ <property>
+ <name>topology.receiver.buffer.size</name>
+ <value>8</value>
+ <description>The maximum number of messages to batch from the thread receiving off the network to the
+ executor queues. Must be a power of 2.
+ </description>
+ </property>
+ <property>
+ <name>topology.transfer.buffer.size</name>
+ <value>1024</value>
+ <description>The size of the Disruptor transfer queue for each worker.</description>
+ </property>
+ <property>
+ <name>topology.tick.tuple.freq.secs</name>
+ <value>null</value>
+ <description>How often a tick tuple from the "__system" component and "__tick" stream should be sent
+ to tasks. Meant to be used as a component-specific configuration.
+ </description>
+ </property>
+ <property>
+ <name>topology.worker.shared.thread.pool.size</name>
+ <value>4</value>
+ <description>The size of the shared thread pool for worker tasks to make use of. The thread pool can be accessed
+ via the TopologyContext.
+ </description>
+ </property>
+ <property>
+ <name>topology.disruptor.wait.strategy</name>
+ <value>com.lmax.disruptor.BlockingWaitStrategy</value>
+ <description>Configure the wait strategy used for internal queuing. Can be used to tradeoff latency
+ vs. throughput.
+ </description>
+ </property>
+ <property>
+ <name>topology.executor.send.buffer.size</name>
+ <value>1024</value>
+ <description>The size of the Disruptor send queue for each executor. Must be a power of 2.</description>
+ </property>
+ <property>
+ <name>topology.receiver.buffer.size</name>
+ <value>8</value>
+ <description>The maximum number of messages to batch from the thread receiving off the network to the
+ executor queues. Must be a power of 2.
+ </description>
+ </property>
+ <property>
+ <name>topology.transfer.buffer.size</name>
+ <value>1024</value>
+ <description>The size of the Disruptor transfer queue for each worker.</description>
+ </property>
+ <property>
+ <name>topology.tick.tuple.freq.secs</name>
+ <value>null</value>
+ <description>How often a tick tuple from the "__system" component and "__tick" stream should be sent
+ to tasks. Meant to be used as a component-specific configuration.
+ </description>
+ </property>
+ <property>
+ <name>topology.worker.shared.thread.pool.size</name>
+ <value>4</value>
+ <description>The size of the shared thread pool for worker tasks to make use of. The thread pool can be accessed
+ via the TopologyContext.
+ </description>
+ </property>
+ <property>
+ <name>topology.spout.wait.strategy</name>
+ <value>backtype.storm.spout.SleepSpoutWaitStrategy</value>
+ <description>A class that implements a strategy for what to do when a spout needs to wait. Waiting is
+ triggered in one of two conditions:
+
+ 1. nextTuple emits no tuples
+ 2. The spout has hit maxSpoutPending and can't emit any more tuples
+ </description>
+ </property>
+ <property>
+ <name>topology.sleep.spout.wait.strategy.time.ms</name>
+ <value>1</value>
+ <description>The amount of milliseconds the SleepEmptyEmitStrategy should sleep for.</description>
+ </property>
+ <property>
+ <name>topology.error.throttle.interval.secs</name>
+ <value>10</value>
+ <description>The interval in seconds to use for determining whether to throttle error reported to Zookeeper. For
+ example,
+ an interval of 10 seconds with topology.max.error.report.per.interval set to 5 will only allow 5 errors to be
+ reported to Zookeeper per task for every 10 second interval of time.
+ </description>
+ </property>
+ <property>
+ <name>topology.max.error.report.per.interval</name>
+ <value>5</value>
+ <description>The interval in seconds to use for determining whether to throttle error reported to Zookeeper. For
+ example,
+ an interval of 10 seconds with topology.max.error.report.per.interval set to 5 will only allow 5 errors to be
+ reported to Zookeeper per task for every 10 second interval of time.
+ </description>
+ </property>
+ <property>
+ <name>topology.kryo.factory</name>
+ <value>backtype.storm.serialization.DefaultKryoFactory</value>
+ <description>Class that specifies how to create a Kryo instance for serialization. Storm will then apply
+ topology.kryo.register and topology.kryo.decorators on top of this. The default implementation
+ implements topology.fall.back.on.java.serialization and turns references off.
+ </description>
+ </property>
+ <property>
+ <name>topology.tuple.serializer</name>
+ <value>backtype.storm.serialization.types.ListDelegateSerializer</value>
+ <description>The serializer class for ListDelegate (tuple payload).
+ The default serializer will be ListDelegateSerializer
+ </description>
+ </property>
+ <property>
+ <name>topology.trident.batch.emit.interval.millis</name>
+ <value>500</value>
+ <description>How often a batch can be emitted in a Trident topology.</description>
+ </property>
+ <property>
+ <name>dev.zookeeper.path</name>
+ <value>/tmp/dev-storm-zookeeper</value>
+ <description>The path to use as the zookeeper dir when running a zookeeper server via
+ "storm dev-zookeeper". This zookeeper instance is only intended for development;
+ it is not a production grade zookeeper setup.
+ </description>
+ </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/metainfo.xml
new file mode 100644
index 0000000000..9df2aa8128
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/metainfo.xml
@@ -0,0 +1,92 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<metainfo>
+ <schemaVersion>2.0</schemaVersion>
+ <services>
+ <service>
+ <name>STORM</name>
+ <displayName>Storm</displayName>
+ <comment>Apache Hadoop Stream processing framework</comment>
+ <version>0.9.1.2.1</version>
+ <components>
+
+ <component>
+ <name>NIMBUS</name>
+ <displayName>Nimbus</displayName>
+ <category>MASTER</category>
+ <cardinality>1</cardinality>
+ <dependencies>
+ <dependency>
+ <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
+ <scope>cluster</scope>
+ <auto-deploy>
+ <enabled>true</enabled>
+ </auto-deploy>
+ </dependency>
+ </dependencies>
+ <commandScript>
+ <script>scripts/nimbus.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>600</timeout>
+ </commandScript>
+ </component>
+
+ <component>
+ <name>SUPERVISOR</name>
+ <displayName>Supervisor</displayName>
+ <category>SLAVE</category>
+ <cardinality>1+</cardinality>
+ <commandScript>
+ <script>scripts/supervisor.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>600</timeout>
+ </commandScript>
+ </component>
+
+ <component>
+ <name>STORM_UI_SERVER</name>
+ <displayName>Storm UI Server</displayName>
+ <category>MASTER</category>
+ <cardinality>1</cardinality>
+ <commandScript>
+ <script>scripts/ui_server.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>600</timeout>
+ </commandScript>
+ </component>
+
+ </components>
+
+ <commandScript>
+ <script>scripts/service_check.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>300</timeout>
+ </commandScript>
+
+ <requiredServices>
+ <service>ZOOKEEPER</service>
+ </requiredServices>
+
+ <configuration-dependencies>
+ <config-type>storm-site</config-type>
+ <config-type>storm-env</config-type>
+ </configuration-dependencies>
+ </service>
+ </services>
+</metainfo>
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/metrics.json b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/metrics.json
new file mode 100644
index 0000000000..a1451dd89c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/metrics.json
@@ -0,0 +1,99 @@
+{
+ "STORM_REST_API": {
+ "Component": [
+ {
+ "type": "jmx",
+ "metrics": {
+ "metrics/api/cluster/summary/tasks.total": {
+ "metric": "tasks.total",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/api/cluster/summary/slots.total": {
+ "metric": "slots.total",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/api/cluster/summary/slots.free": {
+ "metric": "slots.free",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/api/cluster/summary/supervisors": {
+ "metric": "supervisors",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/api/cluster/summary/executors.total": {
+ "metric": "executors.total",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/api/cluster/summary/slots.used": {
+ "metric": "slots.used",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/api/cluster/summary/topologies": {
+ "metric": "topologies",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/api/cluster/summary/nimbus.uptime": {
+ "metric": "nimbus.uptime",
+ "pointInTime": true,
+ "temporal": false
+ }
+ }
+ }
+ ],
+ "HostComponent": [
+ {
+ "type": "jmx",
+ "metrics": {
+ "metrics/api/cluster/summary/tasks.total": {
+ "metric": "tasks.total",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/api/cluster/summary/slots.total": {
+ "metric": "slots.total",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/api/cluster/summary/slots.free": {
+ "metric": "slots.free",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/api/cluster/summary/supervisors": {
+ "metric": "supervisors",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/api/cluster/summary/executors.total": {
+ "metric": "executors.total",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/api/cluster/summary/slots.used": {
+ "metric": "slots.used",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/api/cluster/summary/topologies": {
+ "metric": "topologies",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/api/cluster/summary/nimbus.uptime": {
+ "metric": "nimbus.uptime",
+ "pointInTime": true,
+ "temporal": false
+ }
+ }
+ }
+
+ ]
+ }
+}
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/package/scripts/nimbus.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/package/scripts/nimbus.py
new file mode 100644
index 0000000000..d9ca8578f1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/package/scripts/nimbus.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+import service_mapping
+from storm import storm
+
+
+class Nimbus(Script):
+ def install(self, env):
+ if not check_windows_service_exists(service_mapping.nimbus_win_service_name):
+ self.install_packages(env)
+ self.configure(env)
+
+ def start(self, env):
+ self.configure(env)
+ Service(service_mapping.nimbus_win_service_name, action="start")
+
+ def stop(self, env):
+ Service(service_mapping.nimbus_win_service_name, action="stop")
+
+ def configure(self, env):
+ import params
+ env.set_params(params)
+ storm()
+
+ def status(self, env):
+ check_windows_service_status(service_mapping.nimbus_win_service_name)
+ pass
+
+if __name__ == "__main__":
+ Nimbus().execute()
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/package/scripts/params.py
new file mode 100644
index 0000000000..849572007d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/package/scripts/params.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import status_params
+
+# server configurations
+config = Script.get_config()
+
+hdp_root = os.path.abspath(os.path.join(os.environ["HADOOP_HOME"],".."))
+conf_dir = os.environ["STORM_CONF_DIR"]
+# storm_user = config['configurations']['storm-env']['storm_user']
+storm_user = "hadoop"
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/package/scripts/service_check.py
new file mode 100644
index 0000000000..788044af6c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/package/scripts/service_check.py
@@ -0,0 +1,32 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+class ServiceCheck(Script):
+ def service_check(self, env):
+ import params
+ env.set_params(params)
+ smoke_cmd = os.path.join(params.hdp_root,"Run-SmokeTests.cmd")
+ service = "STORM"
+ Execute(format("cmd /C {smoke_cmd} {service}"), logoutput=True)
+
+if __name__ == "__main__":
+ ServiceCheck().execute()
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/package/scripts/service_mapping.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/package/scripts/service_mapping.py
new file mode 100644
index 0000000000..254abd2197
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/package/scripts/service_mapping.py
@@ -0,0 +1,22 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+nimbus_win_service_name = "nimbus"
+supervisor_win_service_name = "supervisor"
+ui_win_service_name = "ui"
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/package/scripts/status_params.py
new file mode 100644
index 0000000000..bd97604c5d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/package/scripts/status_params.py
@@ -0,0 +1,22 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management import *
+
+config = Script.get_config()
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/package/scripts/storm.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/package/scripts/storm.py
new file mode 100644
index 0000000000..e0bc25b21d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/package/scripts/storm.py
@@ -0,0 +1,33 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from yaml_config import yaml_config
+import sys
+
+
+def storm():
+ import params
+
+ yaml_config("storm.yaml",
+ conf_dir=params.conf_dir,
+ configurations=params.config['configurations']['storm-site'],
+ owner=params.storm_user
+ )
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/package/scripts/supervisor.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/package/scripts/supervisor.py
new file mode 100644
index 0000000000..827b3ba970
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/package/scripts/supervisor.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+import service_mapping
+from storm import storm
+
+
+class Supervisor(Script):
+ def install(self, env):
+ if not check_windows_service_exists(service_mapping.supervisor_win_service_name):
+ self.install_packages(env)
+ self.configure(env)
+
+ def start(self, env):
+ self.configure(env)
+ Service(service_mapping.supervisor_win_service_name, action="start")
+
+ def stop(self, env):
+ Service(service_mapping.supervisor_win_service_name, action="stop")
+
+ def configure(self, env):
+ import params
+ env.set_params(params)
+ storm()
+
+ def status(self, env):
+ check_windows_service_status(service_mapping.supervisor_win_service_name)
+ pass
+
+if __name__ == "__main__":
+ Supervisor().execute()
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/package/scripts/ui_server.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/package/scripts/ui_server.py
new file mode 100644
index 0000000000..ebd28309c3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/package/scripts/ui_server.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+import service_mapping
+from storm import storm
+
+
+
+class UiServer(Script):
+ def install(self, env):
+ if not check_windows_service_exists(service_mapping.ui_win_service_name):
+ self.install_packages(env)
+ self.configure(env)
+
+ def start(self, env):
+ self.configure(env)
+ Service(service_mapping.ui_win_service_name, action="start")
+
+ def stop(self, env):
+ Service(service_mapping.ui_win_service_name, action="stop")
+
+ def configure(self, env):
+ import params
+ env.set_params(params)
+ storm()
+
+ def status(self, env):
+ check_windows_service_status(service_mapping.ui_win_service_name)
+ pass
+
+if __name__ == "__main__":
+ UiServer().execute()
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/package/scripts/yaml_config.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/package/scripts/yaml_config.py
new file mode 100644
index 0000000000..55aa7776f6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/package/scripts/yaml_config.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+
+from resource_management import *
+
+def escape_yaml_propetry(value):
+ unquouted = False
+ unquouted_values = ["null","Null","NULL","true","True","TRUE","false","False","FALSE","YES","Yes","yes","NO","No","no","ON","On","on","OFF","Off","off"]
+
+ if value in unquouted_values:
+ unquouted = True
+
+ # if is list [a,b,c]
+ if re.match('^\w*\[.+\]\w*$', value):
+ unquouted = True
+
+ try:
+ int(value)
+ unquouted = True
+ except ValueError:
+ pass
+
+ try:
+ float(value)
+ unquouted = True
+ except ValueError:
+ pass
+
+ if not unquouted:
+ value = value.replace("'","''")
+ value = "'"+value+"'"
+
+ return value
+
+def yaml_config(
+ filename,
+ configurations = None,
+ conf_dir = None,
+ owner = None,
+ group = None
+):
+ config_content = source.InlineTemplate('''{% for key, value in configurations_dict.items() %}{{ key }}: {{ escape_yaml_propetry(value) }}
+{% endfor %}''', configurations_dict=configurations, extra_imports=[escape_yaml_propetry])
+
+ File (format("{conf_dir}/{filename}"),
+ content = config_content,
+ owner = owner,
+ mode = "f"
+ )
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/TEZ/configuration/tez-site.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/TEZ/configuration/tez-site.xml
new file mode 100644
index 0000000000..42eaa456fa
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/TEZ/configuration/tez-site.xml
@@ -0,0 +1,218 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements. See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership. The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<configuration supports_final="true">
+
+ <property>
+ <name>tez.lib.uris</name>
+ <value>hdfs:///apps/tez/,hdfs:///apps/tez/lib/</value>
+ <!-- TODO:Remove this value>${fs.default.name}/apps/tez/,${fs.default.name}/apps/tez/lib</value-->
+ <description>The location of the Tez libraries which will be localized for DAGs</description>
+ </property>
+
+ <property>
+ <name>tez.am.log.level</name>
+ <value>INFO</value>
+ <description>Root Logging level passed to the Tez app master</description>
+ </property>
+
+ <property>
+ <name>tez.staging-dir</name>
+ <value>/tmp/${user.name}/staging</value>
+ <description>The staging dir used while submitting DAGs</description>
+ </property>
+
+ <property>
+ <name>tez.am.resource.memory.mb</name>
+ <value>1536</value>
+ <!-- TODO: Value needs to be set based on YARN configuration - similar to the way the MR AM size is set, 1.5 times the MR AM size -->
+ <description>The amount of memory to be used by the AppMaster</description>
+ </property>
+
+ <property>
+ <name>tez.am.java.opts</name>
+ <value>-server -Xmx1024m -Djava.net.preferIPv4Stack=true -XX:+UseNUMA -XX:+UseParallelGC</value>
+ <!-- TODO: Xmx value needs to be set based on tez.am.resource.memory.mb - typically at least 512MB less. Also this could have some additional hardware specific settings if Ambari supports it -->
+ <description>Java options for the Tez AppMaster process</description>
+ </property>
+ <!-- tez picks the java opts from yarn.app.mapreduce.am.command-opts for MR tasks. Likewise for the AM memory MB -->
+
+ <property>
+ <name>tez.am.shuffle-vertex-manager.min-src-fraction</name>
+ <value>0.2</value>
+ <description>In case of a ScatterGather connection, the fraction of source tasks which should
+ complete before tasks for the current vertex are schedule
+ </description>
+ </property>
+
+ <property>
+ <name>tez.am.shuffle-vertex-manager.max-src-fraction</name>
+ <value>0.4</value>
+ <description>In case of a ScatterGather connection, once this fraction of source tasks have
+ completed, all tasks on the current vertex can be scheduled. Number of tasks ready for
+ scheduling on the current vertex scales linearly between min-fraction and max-fraction
+ </description>
+ </property>
+
+ <property>
+ <name>tez.am.am-rm.heartbeat.interval-ms.max</name>
+ <value>250</value>
+ <description>The maximum heartbeat interval between the AM and RM in milliseconds</description>
+ </property>
+
+ <property>
+ <name>tez.am.grouping.split-waves</name>
+ <value>1.4</value>
+ <description>The multiplier for available queue capacity when determining number of tasks for
+ a Vertex. 1.4 with 100% queue available implies generating a number of tasks roughly equal
+ to 140% of the available containers on the queue
+ </description>
+ </property>
+
+ <property>
+ <name>tez.am.grouping.min-size</name>
+ <value>16777216</value>
+ <description>Lower bound on the size (in bytes) of a grouped split, to avoid generating
+ too many splits
+ </description>
+ </property>
+
+ <property>
+ <name>tez.am.grouping.max-size</name>
+ <value>1073741824</value>
+ <description>Upper bound on the size (in bytes) of a grouped split, to avoid generating
+ excessively large split
+ </description>
+ </property>
+
+ <property>
+ <name>tez.am.container.reuse.enabled</name>
+ <value>true</value>
+ <description>Configuration to specify whether container should be reused</description>
+ </property>
+
+ <property>
+ <name>tez.am.container.reuse.rack-fallback.enabled</name>
+ <value>true</value>
+ <description>Whether to reuse containers for rack local tasks. Active only if reuse is enabled
+ </description>
+ </property>
+
+ <property>
+ <name>tez.am.container.reuse.non-local-fallback.enabled</name>
+ <value>true</value>
+ <description>Whether to reuse containers for non-local tasks. Active only if reuse is enabled
+ </description>
+ </property>
+
+ <property>
+ <name>tez.am.container.session.delay-allocation-millis</name>
+ <value>10000</value>
+ <!-- TODO This value may change -->
+ <description>The amount of time to hold on to a container if no task can be assigned to
+ it immediately. Only active when reuse is enabled. Set to -1 to never release a container
+ in a session
+ </description>
+ </property>
+
+ <property>
+ <name>tez.am.container.reuse.locality.delay-allocation-millis</name>
+ <value>250</value>
+ <description>The amount of time to wait before assigning a container to the next level of
+ locality. NODE -&gt; RACK -&gt; NON_LOCAL
+ </description>
+ </property>
+
+ <property>
+ <name>tez.task.get-task.sleep.interval-ms.max</name>
+ <value>200</value>
+ <description>The maximum amount of time, in seconds, to wait before a task asks an AM for
+ another task
+ </description>
+ </property>
+
+ <!-- Client Submission timeout value when submitting DAGs to a session -->
+ <property>
+ <name>tez.session.client.timeout.secs</name>
+ <value>180</value>
+ <description>Time (in seconds) to wait for AM to come up when trying to submit a DAG from
+ the client
+ </description>
+ </property>
+
+ <property>
+ <name>tez.session.am.dag.submit.timeout.secs</name>
+ <value>300</value>
+ <description>Time (in seconds) for which the Tez AM should wait for a DAG to be submitted
+ before shutting down
+ </description>
+ </property>
+
+
+ <!-- Configuration for runtime components -->
+
+ <!-- These properties can be set on a per edge basis by configuring the payload for each
+ edge independently. -->
+
+ <property>
+ <name>tez.runtime.intermediate-output.should-compress</name>
+ <value>false</value>
+ <description>Whether intermediate output should be compressed or not</description>
+ </property>
+
+ <property>
+ <name>tez.runtime.intermediate-output.compress.codec</name>
+ <value>org.apache.hadoop.io.compress.SnappyCodec</value>
+ <description>The coded to be used if compressing intermediate output. Only
+ applicable if tez.runtime.intermediate-output.should-compress is enabled.
+ </description>
+ </property>
+
+ <property>
+ <name>tez.runtime.intermediate-input.is-compressed</name>
+ <value>false</value>
+ <description>Whether intermediate input is compressed</description>
+ </property>
+
+ <property>
+ <name>tez.runtime.intermediate-input.compress.codec</name>
+ <value>org.apache.hadoop.io.compress.SnappyCodec</value>
+ <description>The coded to be used when reading intermediate compressed input.
+ Only applicable if tez.runtime.intermediate-input.is-compressed is enabled.
+ </description>
+ </property>
+
+ <property>
+ <name>tez.runtime.job.counters.max</name>
+ <value>10000</value>
+ </property>
+ <property>
+ <name>tez.runtime.job.counters.groups.max</name>
+ <value>10000</value>
+ </property>
+
+ <!-- Configuration for ATS integration -->
+
+ <property>
+ <name>tez.yarn.ats.enabled</name>
+ <value>true</value>
+ <description>Whether to send history events to YARN Application Timeline Server</description>
+ </property>
+
+</configuration>
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/TEZ/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/TEZ/metainfo.xml
new file mode 100644
index 0000000000..1ca4d56911
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/TEZ/metainfo.xml
@@ -0,0 +1,63 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<metainfo>
+ <schemaVersion>2.0</schemaVersion>
+ <services>
+ <service>
+ <name>TEZ</name>
+ <displayName>Tez</displayName>
+ <comment>Tez is the next generation Hadoop Query Processing framework written on top of YARN.</comment>
+ <version>0.4.0.2.1</version>
+ <components>
+ <component>
+ <name>TEZ_CLIENT</name>
+ <displayName>Tez Client</displayName>
+ <cardinality>1+</cardinality>
+ <category>CLIENT</category>
+ <commandScript>
+ <script>scripts/tez_client.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>600</timeout>
+ </commandScript>
+ <configFiles>
+ <configFile>
+ <type>xml</type>
+ <fileName>tez-site.xml</fileName>
+ <dictionaryName>tez-site</dictionaryName>
+ </configFile>
+ <configFile>
+ <type>env</type>
+ <fileName>tez-env.cmd</fileName>
+ <dictionaryName>tez-env</dictionaryName>
+ </configFile>
+ </configFiles>
+ </component>
+ </components>
+
+ <requiredServices>
+ <service>YARN</service>
+ </requiredServices>
+
+ <configuration-dependencies>
+ <config-type>tez-site</config-type>
+ <config-type>tez-env</config-type>
+ </configuration-dependencies>
+
+ </service>
+ </services>
+</metainfo>
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/TEZ/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/TEZ/package/scripts/params.py
new file mode 100644
index 0000000000..db34c9fa09
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/TEZ/package/scripts/params.py
@@ -0,0 +1,30 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import os
+
+config = Script.get_config()
+tez_user = "hadoop"
+tez_home_dir = None
+tez_conf_dir = "conf"
+
+if os.environ.has_key("TEZ_HOME"):
+ tez_home_dir = os.environ["TEZ_HOME"]
+ tez_conf_dir = os.path.join(tez_home_dir, "conf")
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/TEZ/package/scripts/tez.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/TEZ/package/scripts/tez.py
new file mode 100644
index 0000000000..38cdb626e9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/TEZ/package/scripts/tez.py
@@ -0,0 +1,30 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import sys
+
+def tez():
+ import params
+ XmlConfig("tez-site.xml",
+ conf_dir=params.tez_conf_dir,
+ configurations=params.config['configurations']['tez-site'],
+ owner=params.tez_user,
+ mode="f"
+ )
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/TEZ/package/scripts/tez_client.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/TEZ/package/scripts/tez_client.py
new file mode 100644
index 0000000000..6b4a5aacf9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/TEZ/package/scripts/tez_client.py
@@ -0,0 +1,41 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+from resource_management import *
+
+class TezClient(Script):
+ def install(self, env):
+ import params
+ if params.tez_home_dir is None:
+ self.install_packages(env)
+
+ def configure(self, env):
+ import params
+ env.set_params(params)
+ tez()
+
+ def status(self, env):
+ raise ClientComponentHasNoStatus()
+
+
+if __name__ == "__main__":
+ TezClient().execute()
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/configuration-mapred/mapred-site.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/configuration-mapred/mapred-site.xml
new file mode 100644
index 0000000000..3fcb7e7507
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/configuration-mapred/mapred-site.xml
@@ -0,0 +1,239 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!-- Put site-specific property overrides in this file. -->
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
+ <!-- MR AM properties -->
+ <property>
+ <name>mapreduce.framework.name</name>
+ <value>yarn</value>
+ </property>
+ <property>
+ <name>yarn.app.mapreduce.am.staging-dir</name>
+ <value>/user</value>
+ </property>
+ <property>
+ <name>mapreduce.job.hdfs-servers</name>
+ <value>${fs.defaultFS}</value>
+ </property>
+ <property>
+ <name>mapreduce.map.speculative</name>
+ <value>false</value>
+ <description>If true, then multiple instances of some map tasks
+ may be executed in parallel.</description>
+ </property>
+ <property>
+ <name>mapreduce.reduce.speculative</name>
+ <value>false</value>
+ <description>If true, then multiple instances of some reduce tasks
+ may be executed in parallel.</description>
+ </property>
+ <property>
+ <name>mapreduce.job.reduce.slowstart.completedmaps</name>
+ <value>0.05</value>
+ <description>Fraction of the number of maps in the job which should be
+ complete before reduces are scheduled for the job.
+ </description>
+ </property>
+ <property>
+ <name>mapreduce.task.timeout</name>
+ <value>600000</value>
+ <description>The number of milliseconds before a task will be
+ terminated if it neither reads an input, writes an output, nor
+ updates its status string. A value of 0 disables the timeout.
+ </description>
+ </property>
+ <property>
+ <name>jetty.connector</name>
+ <value>org.mortbay.jetty.nio.SelectChannelConnector</value>
+ <description>No description</description>
+ </property>
+ <property>
+ <name>mapred.child.root.logger</name>
+ <value>INFO,TLA</value>
+ </property>
+ <property>
+ <name>mapreduce.fileoutputcommitter.marksuccessfuljobs</name>
+ <value>true</value>
+ </property>
+ <property>
+ <name>mapreduce.job.acl-view-job</name>
+ <value>*</value>
+ </property>
+ <!-- i/o properties -->
+ <property>
+ <name>io.sort.mb</name>
+ <value>200</value>
+ <description>No description</description>
+ </property>
+ <property>
+ <name>io.sort.spill.percent</name>
+ <value>0.9</value>
+ <description>No description</description>
+ </property>
+ <property>
+ <name>io.sort.factor</name>
+ <value>100</value>
+ <description>No description</description>
+ </property>
+ <!-- map tasks' properties -->
+ <property>
+ <name>mapreduce.map.output.compress</name>
+ <value>true</value>
+ <description>Should the outputs of the maps be compressed before being
+ sent across the network. Uses SequenceFile compression.
+ </description>
+ </property>
+ <property>
+ <name>mapreduce.map.output.compress.codec</name>
+ <value>org.apache.hadoop.io.compress.SnappyCodec</value>
+ <description>If the map outputs are compressed, how should they be
+ compressed?
+ </description>
+ </property>
+ <!-- reduce tasks' properties -->
+ <property>
+ <name>mapreduce.reduce.shuffle.parallelcopies</name>
+ <value>30</value>
+ <description>The default number of parallel transfers run by reduce
+ during the copy(shuffle) phase.
+ </description>
+ </property>
+ <property>
+ <name>mapreduce.reduce.merge.inmem.threshold</name>
+ <value>1000</value>
+ <description>The threshold, in terms of the number of files
+ for the in-memory merge process. When we accumulate threshold number of files
+ we initiate the in-memory merge and spill to disk. A value of 0 or less than
+ 0 indicates we want to DON'T have any threshold and instead depend only on
+ the ramfs's memory consumption to trigger the merge.
+ </description>
+ </property>
+ <property>
+ <name>mapreduce.reduce.shuffle.merge.percent</name>
+ <value>0.66</value>
+ <description>The usage threshold at which an in-memory merge will be
+ initiated, expressed as a percentage of the total memory allocated to
+ storing in-memory map outputs, as defined by
+ mapreduce.reduce.shuffle.input.buffer.percent.
+ </description>
+ </property>
+ <property>
+ <name>mapreduce.reduce.shuffle.input.buffer.percent</name>
+ <value>0.70</value>
+ <description>The percentage of memory to be allocated from the maximum heap
+ size to storing map outputs during the shuffle.
+ </description>
+ </property>
+ <!-- JobHistory Server -->
+ <property>
+ <name>mapreduce.jobhistory.intermediate-done-dir</name>
+ <value>/mapred/history/done_intermediate</value>
+ </property>
+ <property>
+ <name>mapreduce.jobhistory.done-dir</name>
+ <value>/mapred/history/done</value>
+ </property>
+ <property>
+ <name>mapreduce.jobhistory.address</name>
+ <value>localhost:10020</value>
+ </property>
+ <property>
+ <name>mapreduce.jobhistory.webapp.address</name>
+ <value>localhost:19888</value>
+ </property>
+ <property>
+ <name>mapreduce.jobhistory.webapp.https.address</name>
+ <value>localhost:19888</value>
+ </property>
+ <property>
+ <name>yarn.app.mapreduce.am.create-intermediate-jh-base-dir</name>
+ <value>false</value>
+ </property>
+ <!-- JobHistory Security Settings -->
+ <property>
+ <name>mapreduce.application.classpath</name>
+ <value>%HADOOP_CONF_DIR%,%HADOOP_COMMON_HOME%/share/hadoop/common/*,%HADOOP_COMMON_HOME%/share/hadoop/common/lib/*,%HADOOP_HDFS_HOME%/share/hadoop/hdfs/*,%HADOOP_HDFS_HOME%/share/hadoop/hdfs/lib/*,%HADOOP_MAPRED_HOME%/share/hadoop/mapreduce/*,%HADOOP_MAPRED_HOME%/share/hadoop/mapreduce/lib/*,%HADOOP_YARN_HOME%/share/hadoop/yarn/*,%HADOOP_YARN_HOME%/share/hadoop/yarn/lib/*</value>
+ <description>CLASSPATH for MR applications. A comma-separated list
+ of CLASSPATH entries</description>
+ </property>
+ <property>
+ <name>mapreduce.shuffle.ssl.enabled</name>
+ <value>false</value>
+ </property>
+ <property>
+ <name>mapreduce.ssl.enabled</name>
+ <value>false</value>
+ </property>
+ <property>
+ <name>mapreduce.job.counters.max</name>
+ <value>20000</value>
+ </property>
+ <property>
+ <name>mapreduce.job.counters.groups.max</name>
+ <value>10000</value>
+ </property>
+ <property>
+ <name>mapreduce.job.counters.group.name.max</name>
+ <value>1000</value>
+ </property>
+ <property>
+ <name>mapreduce.job.counters.counter.name.max</name>
+ <value>1000</value>
+ </property>
+ <property>
+ <name>mapreduce.cluster.local.dir</name>
+ <value>c:\hdpdata\hadoop\local</value>
+ </property>
+
+ <property>
+ <name>mapred.job.tracker.history.completed.location</name>
+ <value>/mapred/history/done</value>
+ </property>
+
+ <property>
+ <name>mapred.local.dir</name>
+ <value>c:\hdpdata\hdfs\mapred\local</value>
+ </property>
+
+ <property>
+ <name>mapreduce.map.java.opts</name>
+ <value>-Xmx756m</value>
+ </property>
+
+ <property>
+ <name>mapred.child.tmp</name>
+ <value>c:\hdp\temp\hadoop</value>
+ </property>
+
+ <property>
+ <name>mapreduce.reduce.java.opts</name>
+ <value>-Xmx756m</value>
+ </property>
+
+ <property>
+ <name>mapreduce.task.io.sort.mb</name>
+ <value>200</value>
+ <description>
+ The total amount of buffer memory to use while sorting files, in megabytes.
+ By default, gives each merge stream 1MB, which should minimize seeks.
+ </description>
+ </property>
+
+ <property>
+ <name>yarn.app.mapreduce.am.resource.mb</name>
+ <value>512</value>
+ <description>The amount of memory the MR AppMaster needs.</description>
+ </property>
+
+ <property>
+ <name>mapreduce.map.memory.mb</name>
+ <value>1024</value>
+ <description>Virtual memory for single Map task</description>
+ </property>
+
+ <property>
+ <name>mapreduce.reduce.memory.mb</name>
+ <value>1024</value>
+ <description>Virtual memory for single Reduce task</description>
+ </property>
+</configuration> \ No newline at end of file
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/configuration/capacity-scheduler.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/configuration/capacity-scheduler.xml
new file mode 100644
index 0000000000..c2be95ba65
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/configuration/capacity-scheduler.xml
@@ -0,0 +1,114 @@
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+<configuration supports_final="false" supports_adding_forbidden="true">
+ <property>
+ <name>yarn.scheduler.capacity.maximum-applications</name>
+ <value>10000</value>
+ <description>
+ Maximum number of applications that can be pending and running.
+ </description>
+ </property>
+ <property>
+ <name>yarn.scheduler.capacity.maximum-am-resource-percent</name>
+ <value>0.1</value>
+ <description>
+ Maximum percent of resources in the cluster which can be used to run
+ application masters i.e. controls number of concurrent running
+ applications.
+ </description>
+ </property>
+ <property>
+ <name>yarn.scheduler.capacity.resource-calculator</name>
+ <value>org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator</value>
+ <description>
+ The ResourceCalculator implementation to be used to compare
+ Resources in the scheduler.
+ The default i.e. DefaultResourceCalculator only uses Memory while
+ DominantResourceCalculator uses dominant-resource to compare
+ multi-dimensional resources such as Memory, CPU etc.
+ </description>
+ </property>
+ <property>
+ <name>yarn.scheduler.capacity.root.queues</name>
+ <value>default,joblauncher</value>
+ <description>
+ The queues at the this level (root is the root queue).
+ </description>
+ </property>
+ <property>
+ <name>yarn.scheduler.capacity.root.default.capacity</name>
+ <value>95</value>
+ <description>Default queue target capacity.</description>
+ </property>
+ <property>
+ <name>yarn.scheduler.capacity.root.default.user-limit-factor</name>
+ <value>10</value>
+ <description>
+ Default queue user limit a percentage from 0.0 to 1.0.
+ </description>
+ </property>
+ <property>
+ <name>yarn.scheduler.capacity.root.default.maximum-capacity</name>
+ <value>100</value>
+ <description>
+ The maximum capacity of the default queue.
+ </description>
+ </property>
+ <property>
+ <name>yarn.scheduler.capacity.root.default.state</name>
+ <value>RUNNING</value>
+ <description>
+ The state of the default queue. State can be one of RUNNING or STOPPED.
+ </description>
+ </property>
+ <property>
+ <name>yarn.scheduler.capacity.root.default.acl_submit_applications</name>
+ <value>*</value>
+ <description>
+ The ACL of who can submit jobs to the default queue.
+ </description>
+ </property>
+ <property>
+ <name>yarn.scheduler.capacity.root.default.acl_administer_queue</name>
+ <value>*</value>
+ <description>
+ The ACL of who can administer jobs on the default queue.
+ </description>
+ </property>
+ <property>
+ <name>yarn.scheduler.capacity.node-locality-delay</name>
+ <value>40</value>
+ <description>
+ Number of missed scheduling opportunities after which the CapacityScheduler
+ attempts to schedule rack-local containers.
+ Typically this should be set to number of nodes in the cluster, By default is setting
+ approximately number of nodes in one rack which is 40.
+ </description>
+ </property>
+
+ <property>
+ <name>yarn.scheduler.capacity.root.joblauncher.capacity</name>
+ <value>5</value>
+ </property>
+
+ <property>
+ <name>yarn.scheduler.capacity.root.joblauncher.user-limit-factor</name>
+ <value>10</value>
+ </property>
+
+ <property>
+ <name>yarn.scheduler.capacity.root.joblauncher.maximum-capacity</name>
+ <value>50</value>
+ </property>
+</configuration> \ No newline at end of file
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/configuration/yarn-site.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/configuration/yarn-site.xml
new file mode 100644
index 0000000000..b22bb5a6cc
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/configuration/yarn-site.xml
@@ -0,0 +1,214 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!-- Put site-specific property overrides in this file. -->
+<configuration supports_final="true" xmlns:xi="http://www.w3.org/2001/XInclude">
+ <property>
+ <name>yarn.nodemanager.resource.memory-mb</name>
+ <value>5120</value>
+ <description>Amount of physical memory, in MB, that can be allocated
+ for containers.</description>
+ </property>
+
+ <property>
+ <name>yarn.scheduler.minimum-allocation-mb</name>
+ <value>512</value>
+ <description>
+ The minimum allocation for every container request at the RM,
+ in MBs. Memory requests lower than this won't take effect,
+ and the specified value will get allocated at minimum.
+ </description>
+ </property>
+
+ <property>
+ <name>yarn.scheduler.maximum-allocation-mb</name>
+ <value>2048</value>
+ <description>
+ The maximum allocation for every container request at the RM,
+ in MBs. Memory requests higher than this won't take effect,
+ and will get capped to this value.
+ </description>
+ </property>
+ <property>
+ <name>yarn.nodemanager.pmem-check-enabled</name>
+ <value>true</value>
+ </property>
+ <property>
+ <name>yarn.nodemanager.vmem-check-enabled</name>
+ <value>true</value>
+ </property>
+ <!-- NodeManager -->
+ <property>
+ <name>yarn.nodemanager.address</name>
+ <value>0.0.0.0:45454</value>
+ <description>The address of the container manager in the NM.</description>
+ </property>
+ <property>
+ <name>yarn.nodemanager.resource.memory-mb</name>
+ <value>5120</value>
+ <description>Amount of physical memory, in MB, that can be allocated
+ for containers.</description>
+ </property>
+ <property>
+ <name>yarn.nodemanager.webapp.address</name>
+ <value>0.0.0.0:50060</value>
+ </property>
+ <property>
+ <name>yarn.nodemanager.vmem-pmem-ratio</name>
+ <value>2.1</value>
+ <description>Ratio between virtual memory to physical memory when
+ setting memory limits for containers. Container allocations are
+ expressed in terms of physical memory, and virtual memory usage
+ is allowed to exceed this allocation by this ratio.
+ </description>
+ </property>
+ <property>
+ <name>yarn.nodemanager.container-executor.class</name>
+ <value>org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor</value>
+ <description>ContainerExecutor for launching containers</description>
+ </property>
+ <property>
+ <name>yarn.nodemanager.aux-services</name>
+ <value>mapreduce_shuffle</value>
+ <description>Auxilliary services of NodeManager</description>
+ </property>
+ <property>
+ <name>yarn.nodemanager.aux-services.mapreduce_shuffle.class</name>
+ <value>org.apache.hadoop.mapred.ShuffleHandler</value>
+ </property>
+ <property>
+ <name>yarn.nodemanager.container-monitor.interval-ms</name>
+ <value>3000</value>
+ <description>The interval, in milliseconds, for which the node manager
+ waits between two cycles of monitoring its containers' memory usage.
+ </description>
+ </property>
+ <property>
+ <name>yarn.nodemanager.linux-container-executor.group</name>
+ <value>hadoop</value>
+ <description>Unix group of the NodeManager</description>
+ </property>
+ <property>
+ <name>yarn.nodemanager.log.retain-second</name>
+ <value>604800</value>
+ </property>
+ <property>
+ <name>yarn.log-aggregation-enable</name>
+ <value>true</value>
+ </property>
+ <property>
+ <name>yarn.nodemanager.remote-app-log-dir</name>
+ <value>/app-logs</value>
+ </property>
+ <property>
+ <name>yarn.nodemanager.remote-app-log-dir-suffix</name>
+ <value>logs</value>
+ </property>
+ <property>
+ <name>yarn.nodemanager.log-aggregation.compression-type</name>
+ <value>gz</value>
+ </property>
+ <property>
+ <name>yarn.nodemanager.delete.debug-delay-sec</name>
+ <value>36000</value>
+ </property>
+ <property>
+ <description>Store class name for history store, defaulting to file system store</description>
+ <name>yarn.timeline-service.generic-application-history.store-class</name>
+ <value>org.apache.hadoop.yarn.server.applicationhistoryservice.FileSystemApplicationHistoryStore</value>
+ </property>
+ <!-- Use a directory that is set up on HDFS to store generic history -->
+ <property>
+ <description>URI pointing to the location of the FileSystem path where the history will be persisted. This must be
+ supplied when using org.apache.hadoop.yarn.server.applicationhistoryservice.FileSystemApplicationHistoryStore as
+ the value for yarn.timeline-service.generic-application-history.store-class
+ </description>
+ <name>yarn.timeline-service.generic-application-history.fs-history-store.uri</name>
+ <value>/yarn/generic-history/</value>
+ </property>
+ <property>
+ <description>T-file compression types used to compress history data.</description>
+ <name>yarn.timeline-service.generic-application-history.fs-history-store.compression-type</name>
+ <value>none</value>
+ </property>
+ <property>
+ <description>Indicate to ResourceManager as well as clients whether
+ history-service is enabled or not. If enabled, ResourceManager starts
+ recording historical data that ApplicationHistory service can consume.
+ Similarly, clients can redirect to the history service when applications
+ finish if this is enabled.
+ </description>
+ <name>yarn.timeline-service.generic-application-history.enabled</name>
+ <value>false</value>
+ </property>
+ <property>
+ <description>Indicate to clients whether timeline service is enabled or not.
+ If enabled, clients will put entities and events to the timeline server.
+ </description>
+ <name>yarn.timeline-service.enabled</name>
+ <value>false</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.scheduler.class</name>
+ <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value>
+ <description>The class to use as the resource scheduler.</description>
+ </property>
+
+ <property>
+ <name>yarn.scheduler.minimum-allocation-mb</name>
+ <value>512</value>
+ <description>
+ The minimum allocation for every container request at the RM,
+ in MBs. Memory requests lower than this won't take effect,
+ and the specified value will get allocated at minimum.
+ </description>
+ </property>
+
+ <property>
+ <name>yarn.scheduler.maximum-allocation-mb</name>
+ <value>2048</value>
+ <description>
+ The maximum allocation for every container request at the RM,
+ in MBs. Memory requests higher than this won't take effect,
+ and will get capped to this value.
+ </description>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.hostname</name>
+ <value>localhost</value>
+ </property>
+
+ <property>
+ <name>yarn.nodemanager.local-dirs</name>
+ <value>c:\hdpdata\hadoop\local</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.webapp.https.address</name>
+ <value>localhost:8088</value>
+ </property>
+
+ <property>
+ <name>yarn.nodemanager.log-dirs</name>
+ <value>c:\hdpdata\hadoop\logs</value>
+ </property>
+
+ <property>
+ <name>yarn.log.server.url</name>
+ <value>http://localhost:19888/jobhistory/logs</value>
+ <description>
+ URI for the HistoryServer's log resource
+ </description>
+ </property>
+
+ <property>
+ <name>yarn.timeline-service.hostname</name>
+ <value>localhost</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.webapp.address</name>
+ <value>localhost:8088</value>
+ </property>
+</configuration> \ No newline at end of file
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/metainfo.xml
new file mode 100644
index 0000000000..a1a4804b64
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/metainfo.xml
@@ -0,0 +1,224 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<metainfo>
+ <schemaVersion>2.0</schemaVersion>
+ <services>
+ <service>
+ <name>YARN</name>
+ <displayName>YARN</displayName>
+ <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
+ <version>2.1.0.2.0</version>
+ <components>
+
+ <component>
+ <name>RESOURCEMANAGER</name>
+ <displayName>ResourceManager</displayName>
+ <category>MASTER</category>
+ <cardinality>1</cardinality>
+ <commandScript>
+ <script>scripts/resourcemanager.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>600</timeout>
+ </commandScript>
+ <customCommands>
+ <customCommand>
+ <name>DECOMMISSION</name>
+ <commandScript>
+ <script>scripts/resourcemanager.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>600</timeout>
+ </commandScript>
+ </customCommand>
+ <customCommand>
+ <name>REFRESHQUEUES</name>
+ <commandScript>
+ <script>scripts/resourcemanager.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>600</timeout>
+ </commandScript>
+ </customCommand>
+ </customCommands>
+ <configuration-dependencies>
+ <config-type>capacity-scheduler</config-type>
+ </configuration-dependencies>
+ </component>
+
+ <component>
+ <name>NODEMANAGER</name>
+ <displayName>NodeManager</displayName>
+ <category>SLAVE</category>
+ <cardinality>1+</cardinality>
+ <commandScript>
+ <script>scripts/nodemanager.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>600</timeout>
+ </commandScript>
+ </component>
+
+ <component>
+ <name>YARN_CLIENT</name>
+ <displayName>Yarn Client</displayName>
+ <category>CLIENT</category>
+ <cardinality>1+</cardinality>
+ <commandScript>
+ <script>scripts/yarn_client.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>600</timeout>
+ </commandScript>
+ <configFiles>
+ <configFile>
+ <type>xml</type>
+ <fileName>yarn-site.xml</fileName>
+ <dictionaryName>yarn-site</dictionaryName>
+ </configFile>
+ <configFile>
+ <type>xml</type>
+ <fileName>core-site.xml</fileName>
+ <dictionaryName>core-site</dictionaryName>
+ </configFile>
+ <configFile>
+ <type>env</type>
+ <fileName>yarn-env.cmd</fileName>
+ <dictionaryName>yarn-env</dictionaryName>
+ </configFile>
+ <configFile>
+ <type>env</type>
+ <fileName>log4j.properties</fileName>
+ <dictionaryName>hdfs-log4j,yarn-log4j</dictionaryName>
+ </configFile>
+ <configFile>
+ <type>xml</type>
+ <fileName>capacity-scheduler.xml</fileName>
+ <dictionaryName>capacity-scheduler</dictionaryName>
+ </configFile>
+ </configFiles>
+ </component>
+ </components>
+
+ <commandScript>
+ <script>scripts/service_check.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>300</timeout>
+ </commandScript>
+
+ <requiredServices>
+ <service>HDFS</service>
+ </requiredServices>
+
+ <configuration-dependencies>
+ <config-type>yarn-site</config-type>
+ <config-type>yarn-env</config-type>
+ <config-type>core-site</config-type>
+ <config-type>yarn-log4j</config-type>
+ </configuration-dependencies>
+ </service>
+
+ <service>
+ <name>MAPREDUCE2</name>
+ <displayName>MapReduce2</displayName>
+ <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
+ <version>2.1.0.2.0.6.0</version>
+ <components>
+ <component>
+ <name>HISTORYSERVER</name>
+ <displayName>History Server</displayName>
+ <category>MASTER</category>
+ <cardinality>1</cardinality>
+ <auto-deploy>
+ <enabled>true</enabled>
+ <co-locate>YARN/RESOURCEMANAGER</co-locate>
+ </auto-deploy>
+ <dependencies>
+ <dependency>
+ <name>HDFS/HDFS_CLIENT</name>
+ <scope>host</scope>
+ <auto-deploy>
+ <enabled>true</enabled>
+ </auto-deploy>
+ </dependency>
+ </dependencies>
+ <commandScript>
+ <script>scripts/historyserver.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>600</timeout>
+ </commandScript>
+ </component>
+
+ <component>
+ <name>MAPREDUCE2_CLIENT</name>
+ <displayName>MapReduce2 Client</displayName>
+ <category>CLIENT</category>
+ <cardinality>0+</cardinality>
+ <commandScript>
+ <script>scripts/mapreduce2_client.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>600</timeout>
+ </commandScript>
+ <configFiles>
+ <configFile>
+ <type>xml</type>
+ <fileName>mapred-site.xml</fileName>
+ <dictionaryName>mapred-site</dictionaryName>
+ </configFile>
+ <configFile>
+ <type>xml</type>
+ <fileName>core-site.xml</fileName>
+ <dictionaryName>core-site</dictionaryName>
+ </configFile>
+ <configFile>
+ <type>env</type>
+ <fileName>mapred-env.cmd</fileName>
+ <dictionaryName>mapred-env</dictionaryName>
+ </configFile>
+ </configFiles>
+ </component>
+ </components>
+
+ <osSpecifics>
+ <osSpecific>
+ <osFamily>any</osFamily>
+ <packages>
+ <package>
+ <name>hadoop-mapreduce</name>
+ </package>
+ </packages>
+ </osSpecific>
+ </osSpecifics>
+
+ <commandScript>
+ <script>scripts/mapred_service_check.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>300</timeout>
+ </commandScript>
+
+ <requiredServices>
+ <service>YARN</service>
+ </requiredServices>
+
+ <configuration-dir>configuration-mapred</configuration-dir>
+
+ <configuration-dependencies>
+ <config-type>core-site</config-type>
+ <config-type>mapred-site</config-type>
+ <config-type>mapred-env</config-type>
+ </configuration-dependencies>
+ </service>
+
+ </services>
+</metainfo>
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/metrics.json b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/metrics.json
new file mode 100644
index 0000000000..24cf6a38b0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/metrics.json
@@ -0,0 +1,3138 @@
+{
+ "NODEMANAGER": {
+ "HostComponent": [
+ {
+ "type": "jmx",
+ "metrics": {
+ "metrics/jvm/memHeapCommittedM": {
+ "metric": "Hadoop:service=NodeManager,name=JvmMetrics.MemHeapCommittedM",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/threadsRunnable": {
+ "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsRunnable",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/threadsNew": {
+ "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsNew",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/memNonHeapCommittedM": {
+ "metric": "Hadoop:service=NodeManager,name=JvmMetrics.MemNonHeapCommittedM",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/callQueueLen": {
+ "metric": "Hadoop:service=NodeManager,name=RpcActivity.CallQueueLength",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/rpcAuthorizationFailures": {
+ "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcAuthorizationFailures",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcQueueTime_avg_time": {
+ "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcQueueTimeAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/ugi/loginSuccess_avg_time": {
+ "metric": "Hadoop:service=NodeManager,name=UgiMetrics.LoginSuccessAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/SentBytes": {
+ "metric": "Hadoop:service=NodeManager,name=RpcActivity.SentBytes",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/logInfo": {
+ "metric": "Hadoop:service=NodeManager,name=JvmMetrics.LogInfo",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/memNonHeapUsedM": {
+ "metric": "Hadoop:service=NodeManager,name=JvmMetrics.MemNonHeapUsedM",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/logWarn": {
+ "metric": "Hadoop:service=NodeManager,name=JvmMetrics.LogWarn",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/ugi/loginFailure_num_ops": {
+ "metric": "Hadoop:service=NodeManager,name=UgiMetrics.LoginFailureNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/threadsTimedWaiting": {
+ "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsTimedWaiting",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcProcessingTime_num_ops": {
+ "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcProcessingTimeNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/gcCount": {
+ "metric": "Hadoop:service=NodeManager,name=JvmMetrics.GcCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/ReceivedBytes": {
+ "metric": "Hadoop:service=NodeManager,name=RpcActivity.ReceivedBytes",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/threadsBlocked": {
+ "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsBlocked",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcQueueTime_num_ops": {
+ "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcQueueTimeNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/logError": {
+ "metric": "Hadoop:service=NodeManager,name=JvmMetrics.LogError",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/ugi/loginFailure_avg_time": {
+ "metric": "Hadoop:service=NodeManager,name=UgiMetrics.LoginFailureAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/rpcAuthorizationSuccesses": {
+ "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcAuthorizationSuccesses",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/NumOpenConnections": {
+ "metric": "Hadoop:service=NodeManager,name=RpcActivity.NumOpenConnections",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/memHeapUsedM": {
+ "metric": "Hadoop:service=NodeManager,name=JvmMetrics.MemHeapUsedM",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/logFatal": {
+ "metric": "Hadoop:service=NodeManager,name=JvmMetrics.LogFatal",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcProcessingTime_avg_time": {
+ "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcProcessingTimeAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/threadsWaiting": {
+ "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsWaiting",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/gcTimeMillis": {
+ "metric": "Hadoop:service=NodeManager,name=JvmMetrics.GcTimeMillis",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/ugi/loginSuccess_num_ops": {
+ "metric": "Hadoop:service=NodeManager,name=UgiMetrics.LoginSuccessNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/rpcAuthenticationSuccesses": {
+ "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcAuthenticationSuccesses",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/rpcAuthenticationFailures": {
+ "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcAuthenticationFailures",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/threadsTerminated": {
+ "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsTerminated",
+ "pointInTime": true,
+ "temporal": false
+ }
+ }
+ }
+ ]
+ },
+ "RESOURCEMANAGER": {
+ "Component": [
+ {
+ "type": "jmx",
+ "metrics": {
+ "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsFailed": {
+ "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsFailed",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "ServiceComponentInfo/rm_metrics/cluster/rebootedNMcount": {
+ "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumRebootedNMs",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/memHeapCommittedM": {
+ "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemHeapCommittedM",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/ClusterMetrics/NumUnhealthyNMs": {
+ "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumUnhealthyNMs",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/HeapMemoryMax":{
+ "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
+ "pointInTime" : true,
+ "temporal" : false
+ },
+ "metrics/jvm/HeapMemoryUsed":{
+ "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
+ "pointInTime" : true,
+ "temporal" : false
+ },
+ "metrics/jvm/NonHeapMemoryMax":{
+ "metric": "java.lang:type=Memory.NonHeapMemoryUsage[max]",
+ "pointInTime" : true,
+ "temporal" : false
+ },
+ "metrics/jvm/NonHeapMemoryUsed":{
+ "metric": "java.lang:type=Memory.NonHeapMemoryUsage[used]",
+ "pointInTime" : true,
+ "temporal" : false
+ },
+ "metrics/jvm/threadsRunnable": {
+ "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsRunnable",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/threadsNew": {
+ "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsNew",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/ClusterMetrics/NumRebootedNMs": {
+ "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumRebootedNMs",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/runtime/StartTime": {
+ "metric": "java.lang:type=Runtime.StartTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsKilled": {
+ "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsKilled",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/rpcAuthorizationFailures": {
+ "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcAuthorizationFailures",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AggregateContainersAllocated": {
+ "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AggregateContainersAllocated",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/ClusterMetrics/NumLostNMs": {
+ "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumLostNMs",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/ugi/loginSuccess_avg_time": {
+ "metric": "Hadoop:service=ResourceManager,name=UgiMetrics.LoginSuccessAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "ServiceComponentInfo/StartTime": {
+ "metric": "java.lang:type=Runtime.StartTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcQueueTime_avg_time": {
+ "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcQueueTimeAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/ReservedContainers": {
+ "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).ReservedContainers",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsSubmitted": {
+ "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsSubmitted",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/SentBytes": {
+ "metric": "Hadoop:service=ResourceManager,name=RpcActivity.SentBytes",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/ClusterMetrics/NumActiveNMs": {
+ "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumActiveNMs",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/running_300": {
+ "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).running_300",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/memNonHeapUsedM": {
+ "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemNonHeapUsedM",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/logWarn": {
+ "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.LogWarn",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/threadsTimedWaiting": {
+ "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsTimedWaiting",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/gcCount": {
+ "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.GcCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/ReceivedBytes": {
+ "metric": "Hadoop:service=ResourceManager,name=RpcActivity.ReceivedBytes",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/threadsBlocked": {
+ "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsBlocked",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/running_60": {
+ "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).running_60",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcQueueTime_num_ops": {
+ "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcQueueTimeNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/ClusterMetrics/NumDecommissionedNMs": {
+ "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumDecommissionedNMs",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AllocatedContainers": {
+ "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AllocatedContainers",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/PendingContainers": {
+ "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).PendingContainers",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/NumOpenConnections": {
+ "metric": "Hadoop:service=ResourceManager,name=RpcActivity.NumOpenConnections",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/memHeapUsedM": {
+ "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemHeapUsedM",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/threadsWaiting": {
+ "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsWaiting",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/gcTimeMillis": {
+ "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.GcTimeMillis",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/ugi/loginSuccess_num_ops": {
+ "metric": "Hadoop:service=ResourceManager,name=UgiMetrics.LoginSuccessNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/threadsTerminated": {
+ "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsTerminated",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/memMaxM": {
+ "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemMaxM",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "ServiceComponentInfo/rm_metrics/cluster/unhealthyNMcount": {
+ "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumUnhealthyNMs",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/ReservedVCores": {
+ "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).ReservedVCores",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "ServiceComponentInfo/rm_metrics/cluster/decommissionedNMcount": {
+ "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumDecommissionedNMs",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/startTime": {
+ "metric": "java.lang:type=Runtime.StartTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/ActiveApplications": {
+ "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).ActiveApplications",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AvailableMB": {
+ "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AvailableMB",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/memNonHeapCommittedM": {
+ "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemNonHeapCommittedM",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "ServiceComponentInfo/rm_metrics/cluster/nodeManagers": {
+ "metric": "Hadoop:service=ResourceManager,name=RMNMInfo.LiveNodeManagers",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/callQueueLen": {
+ "metric": "Hadoop:service=ResourceManager,name=RpcActivity.CallQueueLength",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AllocatedVCores": {
+ "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AllocatedVCores",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsPending": {
+ "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsPending",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsCompleted": {
+ "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsCompleted",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/ActiveUsers": {
+ "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).ActiveUsers",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/logInfo": {
+ "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.LogInfo",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsRunning": {
+ "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsRunning",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/running_1440": {
+ "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).running_1440",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AvailableVCores": {
+ "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AvailableVCores",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/ugi/loginFailure_num_ops": {
+ "metric": "Hadoop:service=ResourceManager,name=UgiMetrics.LoginFailureNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcProcessingTime_num_ops": {
+ "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcProcessingTimeNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/ReservedMB": {
+ "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).ReservedMB",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/logError": {
+ "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.LogError",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/PendingMB": {
+ "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).PendingMB",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/ugi/loginFailure_avg_time": {
+ "metric": "Hadoop:service=ResourceManager,name=UgiMetrics.LoginFailureAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/rpcAuthorizationSuccesses": {
+ "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcAuthorizationSuccesses",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/logFatal": {
+ "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.LogFatal",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcProcessingTime_avg_time": {
+ "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcProcessingTimeAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "ServiceComponentInfo/rm_metrics/cluster/activeNMcount": {
+ "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumActiveNMs",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/rpcAuthenticationSuccesses": {
+ "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcAuthenticationSuccesses",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AggregateContainersReleased": {
+ "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AggregateContainersReleased",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/rpcAuthenticationFailures": {
+ "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcAuthenticationFailures",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "ServiceComponentInfo/rm_metrics/cluster/lostNMcount": {
+ "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumLostNMs",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AllocatedMB": {
+ "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AllocatedMB",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/PendingVCores": {
+ "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).PendingVCores",
+ "pointInTime": true,
+ "temporal": false
+ }
+ }
+ }
+ ],
+ "HostComponent": [
+ {
+ "type": "jmx",
+ "metrics": {
+ "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsFailed": {
+ "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsFailed",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/memHeapCommittedM": {
+ "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemHeapCommittedM",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/ClusterMetrics/NumUnhealthyNMs": {
+ "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumUnhealthyNMs",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/threadsRunnable": {
+ "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsRunnable",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/threadsNew": {
+ "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsNew",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/ClusterMetrics/NumRebootedNMs": {
+ "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumRebootedNMs",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsKilled": {
+ "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsKilled",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/rpcAuthorizationFailures": {
+ "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcAuthorizationFailures",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AggregateContainersAllocated": {
+ "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AggregateContainersAllocated",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/ClusterMetrics/NumLostNMs": {
+ "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumLostNMs",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcQueueTime_avg_time": {
+ "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcQueueTimeAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/ugi/loginSuccess_avg_time": {
+ "metric": "Hadoop:service=ResourceManager,name=UgiMetrics.LoginSuccessAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/ReservedContainers": {
+ "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).ReservedContainers",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsSubmitted": {
+ "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsSubmitted",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/SentBytes": {
+ "metric": "Hadoop:service=ResourceManager,name=RpcActivity.SentBytes",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/ClusterMetrics/NumActiveNMs": {
+ "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumActiveNMs",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/running_300": {
+ "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).running_300",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/HeapMemoryMax":{
+ "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
+ "pointInTime" : true,
+ "temporal" : false
+ },
+ "metrics/jvm/HeapMemoryUsed":{
+ "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
+ "pointInTime" : true,
+ "temporal" : false
+ },
+ "metrics/jvm/NonHeapMemoryMax":{
+ "metric": "java.lang:type=Memory.NonHeapMemoryUsage[max]",
+ "pointInTime" : true,
+ "temporal" : false
+ },
+ "metrics/jvm/NonHeapMemoryUsed":{
+ "metric": "java.lang:type=Memory.NonHeapMemoryUsage[used]",
+ "pointInTime" : true,
+ "temporal" : false
+ },
+ "metrics/jvm/memNonHeapUsedM": {
+ "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemNonHeapUsedM",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/logWarn": {
+ "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.LogWarn",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/threadsTimedWaiting": {
+ "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsTimedWaiting",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/gcCount": {
+ "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.GcCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/ReceivedBytes": {
+ "metric": "Hadoop:service=ResourceManager,name=RpcActivity.ReceivedBytes",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/threadsBlocked": {
+ "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsBlocked",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/running_60": {
+ "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).running_60",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcQueueTime_num_ops": {
+ "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcQueueTimeNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/ClusterMetrics/NumDecommissionedNMs": {
+ "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumDecommissionedNMs",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AllocatedContainers": {
+ "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AllocatedContainers",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/PendingContainers": {
+ "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).PendingContainers",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/NumOpenConnections": {
+ "metric": "Hadoop:service=ResourceManager,name=RpcActivity.NumOpenConnections",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/memHeapUsedM": {
+ "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemHeapUsedM",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/threadsWaiting": {
+ "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsWaiting",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/gcTimeMillis": {
+ "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.GcTimeMillis",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/ugi/loginSuccess_num_ops": {
+ "metric": "Hadoop:service=ResourceManager,name=UgiMetrics.LoginSuccessNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/threadsTerminated": {
+ "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsTerminated",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/memMaxM": {
+ "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemMaxM",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/ReservedVCores": {
+ "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).ReservedVCores",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/ActiveApplications": {
+ "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).ActiveApplications",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AvailableMB": {
+ "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AvailableMB",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/memNonHeapCommittedM": {
+ "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemNonHeapCommittedM",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/callQueueLen": {
+ "metric": "Hadoop:service=ResourceManager,name=RpcActivity.CallQueueLength",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsPending": {
+ "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsPending",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AllocatedVCores": {
+ "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AllocatedVCores",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsCompleted": {
+ "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsCompleted",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/ActiveUsers": {
+ "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).ActiveUsers",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/logInfo": {
+ "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.LogInfo",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsRunning": {
+ "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsRunning",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/running_1440": {
+ "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).running_1440",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AvailableVCores": {
+ "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AvailableVCores",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/ugi/loginFailure_num_ops": {
+ "metric": "Hadoop:service=ResourceManager,name=UgiMetrics.LoginFailureNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcProcessingTime_num_ops": {
+ "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcProcessingTimeNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/ReservedMB": {
+ "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).ReservedMB",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/logError": {
+ "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.LogError",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/PendingMB": {
+ "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).PendingMB",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/ugi/loginFailure_avg_time": {
+ "metric": "Hadoop:service=ResourceManager,name=UgiMetrics.LoginFailureAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/rpcAuthorizationSuccesses": {
+ "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcAuthorizationSuccesses",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/logFatal": {
+ "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.LogFatal",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcProcessingTime_avg_time": {
+ "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcProcessingTimeAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/rpcAuthenticationSuccesses": {
+ "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcAuthenticationSuccesses",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AggregateContainersReleased": {
+ "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AggregateContainersReleased",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/rpcAuthenticationFailures": {
+ "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcAuthenticationFailures",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AllocatedMB": {
+ "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AllocatedMB",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/PendingVCores": {
+ "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).PendingVCores",
+ "pointInTime": true,
+ "temporal": false
+ }
+ }
+ }
+ ]
+ },
+ "HISTORYSERVER": {
+ "Component": [
+ {
+ "type": "jmx",
+ "metrics": {
+ "metrics/rpc/ReceivedBytes": {
+ "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.ReceivedBytes",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/SentBytes": {
+ "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.SentBytes",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcQueueTimeNumOps": {
+ "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcQueueTimeNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcQueueTimeAvgTime": {
+ "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcQueueTimeAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcProcessingTimeNumOps": {
+ "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcProcessingTimeNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcProcessingTimeAvgTime": {
+ "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcProcessingTimeAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcAuthenticationFailures": {
+ "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcAuthenticationFailures",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcAuthenticationSuccesses": {
+ "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcAuthenticationSuccesses",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcAuthorizationFailures": {
+ "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcAuthorizationFailures",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcAuthorizationSuccesses": {
+ "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcAuthorizationSuccesses",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/NumOpenConnections": {
+ "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.NumOpenConnections",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/CallQueueLength": {
+ "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.CallQueueLength",
+ "pointInTime": true,
+ "temporal": false
+ },
+
+
+
+
+ "metrics/jvm/MemNonHeapUsedM": {
+ "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemNonHeapUsedM",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/MemNonHeapCommittedM": {
+ "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemNonHeapCommittedM",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/MemHeapUsedM": {
+ "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemHeapUsedM",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/MemHeapCommittedM": {
+ "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemHeapCommittedM",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/MemMaxM": {
+ "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemMaxM",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/GcCountCopy": {
+ "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcCountCopy",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/GcTimeMillisCopy": {
+ "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcTimeMillisCopy",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/GcCountMarkSweepCompact": {
+ "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcCountMarkSweepCompact",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/GcTimeMillisMarkSweepCompact": {
+ "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcTimeMillisMarkSweepCompact",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/GcCount": {
+ "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/GcTimeMillis": {
+ "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcTimeMillis",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/ThreadsNew": {
+ "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsNew",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/ThreadsRunnable": {
+ "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsRunnable",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/ThreadsBlocked": {
+ "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsBlocked",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/ThreadsWaiting": {
+ "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsWaiting",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/ThreadsTimedWaiting": {
+ "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsTimedWaiting",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/ThreadsTerminated": {
+ "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsTerminated",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/LogFatal": {
+ "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogFatal",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/LogError": {
+ "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogError",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/LogWarn": {
+ "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogWarn",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/LogInfo": {
+ "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogInfo",
+ "pointInTime": true,
+ "temporal": false
+ },
+
+
+
+
+ "metrics/Memory/HeapMemoryMax":{
+ "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
+ "pointInTime" : true,
+ "temporal" : false
+ },
+ "metrics/Memory/HeapMemoryUsed":{
+ "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
+ "pointInTime" : true,
+ "temporal" : false
+ },
+ "metrics/Memory/HeapMemoryCommitted":{
+ "metric": "java.lang:type=Memory.HeapMemoryUsage[committed]",
+ "pointInTime" : true,
+ "temporal" : false
+ },
+ "metrics/Memory/HeapMemoryInit":{
+ "metric": "java.lang:type=Memory.HeapMemoryUsage[init]",
+ "pointInTime" : true,
+ "temporal" : false
+ },
+ "metrics/Memory/NonHeapMemoryMax":{
+ "metric": "java.lang:type=Memory.NonHeapMemoryUsage[max]",
+ "pointInTime" : true,
+ "temporal" : false
+ },
+ "metrics/Memory/NonHeapMemoryUsed":{
+ "metric": "java.lang:type=Memory.NonHeapMemoryUsage[used]",
+ "pointInTime" : true,
+ "temporal" : false
+ },
+ "metrics/Memory/NonHeapMemoryCommitted":{
+ "metric": "java.lang:type=Memory.NonHeapMemoryUsage[committed]",
+ "pointInTime" : true,
+ "temporal" : false
+ },
+ "metrics/Memory/NonHeapMemoryInit":{
+ "metric": "java.lang:type=Memory.NonHeapMemoryUsage[init]",
+ "pointInTime" : true,
+ "temporal" : false
+ },
+
+
+
+ "metrics/MBeanServerDelegate/MBeanServerId": {
+ "metric": "JMImplementation:type=MBeanServerDelegate.MBeanServerId",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MBeanServerDelegate/SpecificationName": {
+ "metric": "JMImplementation:type=MBeanServerDelegate.SpecificationName",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MBeanServerDelegate/SpecificationVersion": {
+ "metric": "JMImplementation:type=MBeanServerDelegate.SpecificationVersion",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MBeanServerDelegate/SpecificationVendor": {
+ "metric": "JMImplementation:type=MBeanServerDelegate.SpecificationVendor",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MBeanServerDelegate/ImplementationName": {
+ "metric": "JMImplementation:type=MBeanServerDelegate.ImplementationName",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MBeanServerDelegate/ImplementationVersion": {
+ "metric": "JMImplementation:type=MBeanServerDelegate.ImplementationVersion",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MBeanServerDelegate/ImplementationVendor": {
+ "metric": "JMImplementation:type=MBeanServerDelegate.ImplementationVendor",
+ "pointInTime": true,
+ "temporal": false
+ },
+
+
+
+ "metrics/StartupProgress/ElapsedTime": {
+ "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.ElapsedTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/StartupProgress/PercentComplete": {
+ "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.PercentComplete",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/StartupProgress/LoadingFsImageCount": {
+ "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingFsImageCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/StartupProgress/LoadingFsImageElapsedTime": {
+ "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingFsImageElapsedTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/StartupProgress/LoadingFsImageTotal": {
+ "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingFsImageTotal",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/StartupProgress/LoadingFsImagePercentComplete": {
+ "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingFsImagePercentComplete",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/StartupProgress/LoadingEditsCount": {
+ "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingEditsCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/StartupProgress/LoadingEditsElapsedTime": {
+ "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingEditsElapsedTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/StartupProgress/LoadingEditsTotal": {
+ "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingEditsTotal",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/StartupProgress/LoadingEditsPercentComplete": {
+ "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingEditsPercentComplete",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/StartupProgress/SavingCheckpointCount": {
+ "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SavingCheckpointCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/StartupProgress/SavingCheckpointElapsedTime": {
+ "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SavingCheckpointElapsedTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/StartupProgress/SavingCheckpointTotal": {
+ "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SavingCheckpointTotal",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/StartupProgress/SavingCheckpointPercentComplete": {
+ "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SavingCheckpointPercentComplete",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/StartupProgress/SafeModeCount": {
+ "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SafeModeCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/StartupProgress/SafeModeElapsedTime": {
+ "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SafeModeElapsedTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/StartupProgress/SafeModeTotal": {
+ "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SafeModeTotal",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/StartupProgress/SafeModePercentComplete": {
+ "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SafeModePercentComplete",
+ "pointInTime": true,
+ "temporal": false
+ },
+
+
+ "metrics/HotSpotDiagnostic/DiagnosticOptions": {
+ "metric": "com.sun.management:type=HotSpotDiagnostic.DiagnosticOptions",
+ "pointInTime": true,
+ "temporal": false
+ },
+
+
+
+ "metrics/MemoryManager/MemoryPoolNames": {
+ "metric": "java.lang:type=MemoryManager,name=CodeCacheManager.MemoryPoolNames",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryManager/Name": {
+ "metric": "java.lang:type=MemoryManager,name=CodeCacheManager.Name",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryManager/Valid": {
+ "metric": "java.lang:type=MemoryManager,name=CodeCacheManager.Valid",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryManager/ObjectName": {
+ "metric": "java.lang:type=MemoryManager,name=CodeCacheManager.ObjectName",
+ "pointInTime": true,
+ "temporal": false
+ },
+
+
+ "metrics/Logging/LoggerNames": {
+ "metric": "java.util.logging:type=Logging.LoggerNames",
+ "pointInTime": true,
+ "temporal": false
+ },
+
+
+ "metrics/UgiMetrics/LoginSuccessNumOps": {
+ "metric": "Hadoop:service=JobHistoryServer,name=UgiMetrics.LoginSuccessNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/UgiMetrics/LoginSuccessAvgTime": {
+ "metric": "Hadoop:service=JobHistoryServer,name=UgiMetrics.LoginSuccessAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/UgiMetrics/LoginFailureNumOps": {
+ "metric": "Hadoop:service=JobHistoryServer,name=UgiMetrics.LoginFailureNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/UgiMetrics/LoginFailureAvgTime": {
+ "metric": "Hadoop:service=JobHistoryServer,name=UgiMetrics.LoginFailureAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+
+
+
+ "metrics/MemoryPool/SurvivorSpace/CollectionUsage": {
+ "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsage",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/SurvivorSpace/CollectionUsageThreshold": {
+ "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsageThreshold",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/SurvivorSpace/CollectionUsageThresholdCount": {
+ "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsageThresholdCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/SurvivorSpace/MemoryManagerNames": {
+ "metric": "java.lang:type=MemoryPool,name=Survivor Space.MemoryManagerNames",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/SurvivorSpace/PeakUsage": {
+ "metric": "java.lang:type=MemoryPool,name=Survivor Space.PeakUsage",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/SurvivorSpace/Usage": {
+ "metric": "java.lang:type=MemoryPool,name=Survivor Space.Usage",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/SurvivorSpace/CollectionUsageThresholdExceeded": {
+ "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsageThresholdExceeded",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/SurvivorSpace/CollectionUsageThresholdSupported": {
+ "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsageThresholdSupported",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/SurvivorSpace/UsageThresholdSupported": {
+ "metric": "java.lang:type=MemoryPool,name=Survivor Space.UsageThresholdSupported",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/SurvivorSpace/Name": {
+ "metric": "java.lang:type=MemoryPool,name=Survivor Space.Name",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/SurvivorSpace/Type": {
+ "metric": "java.lang:type=MemoryPool,name=Survivor Space.Type",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/SurvivorSpace/Valid": {
+ "metric": "java.lang:type=MemoryPool,name=Survivor Space.Valid",
+ "pointInTime": true,
+ "temporal": false
+ },
+
+
+
+ "metrics/Threading/ThreadAllocatedMemoryEnabled": {
+ "metric": "java.lang:type=Threading.ThreadAllocatedMemoryEnabled",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Threading/ThreadAllocatedMemorySupported": {
+ "metric": "java.lang:type=Threading.ThreadAllocatedMemorySupported",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Threading/DaemonThreadCount": {
+ "metric": "java.lang:type=Threading.DaemonThreadCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Threading/PeakThreadCount": {
+ "metric": "java.lang:type=Threading.PeakThreadCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Threading/CurrentThreadCpuTimeSupported": {
+ "metric": "java.lang:type=Threading.CurrentThreadCpuTimeSupported",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Threading/ObjectMonitorUsageSupported": {
+ "metric": "java.lang:type=Threading.ObjectMonitorUsageSupported",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Threading/SynchronizerUsageSupported": {
+ "metric": "java.lang:type=Threading.SynchronizerUsageSupported",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Threading/ThreadContentionMonitoringSupported": {
+ "metric": "java.lang:type=Threading.ThreadContentionMonitoringSupported",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Threading/ThreadCpuTimeEnabled": {
+ "metric": "java.lang:type=Threading.ThreadCpuTimeEnabled",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Threading/CurrentThreadCpuTime": {
+ "metric": "java.lang:type=Threading.CurrentThreadCpuTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Threading/CurrentThreadUserTime": {
+ "metric": "java.lang:type=Threading.CurrentThreadUserTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Threading/ThreadCount": {
+ "metric": "java.lang:type=Threading.ThreadCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Threading/TotalStartedThreadCount": {
+ "metric": "java.lang:type=Threading.TotalStartedThreadCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Threading/ThreadCpuTimeSupported": {
+ "metric": "java.lang:type=Threading.ThreadCpuTimeSupported",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Threading/ThreadContentionMonitoringEnabled": {
+ "metric": "java.lang:type=Threading.ThreadContentionMonitoringEnabled",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Threading/AllThreadIds": {
+ "metric": "java.lang:type=Threading.AllThreadIds",
+ "pointInTime": true,
+ "temporal": false
+ },
+
+
+ "metrics/ClassLoading/LoadedClassCount": {
+ "metric": "java.lang:type=ClassLoading.LoadedClassCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/ClassLoading/UnloadedClassCount": {
+ "metric": "java.lang:type=ClassLoading.UnloadedClassCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/ClassLoading/TotalLoadedClassCount": {
+ "metric": "java.lang:type=ClassLoading.TotalLoadedClassCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/ClassLoading/Verbose": {
+ "metric": "java.lang:type=ClassLoading.Verbose",
+ "pointInTime": true,
+ "temporal": false
+ },
+
+
+
+ "metrics/Runtime/BootClassPath": {
+ "metric": "java.lang:type=Runtime.BootClassPath",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Runtime/LibraryPath": {
+ "metric": "java.lang:type=Runtime.LibraryPath",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Runtime/VmName": {
+ "metric": "java.lang:type=Runtime.VmName",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Runtime/VmVendor": {
+ "metric": "java.lang:type=Runtime.VmVendor",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Runtime/VmVersion": {
+ "metric": "java.lang:type=Runtime.VmVersion",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Runtime/BootClassPathSupported": {
+ "metric": "java.lang:type=Runtime.BootClassPathSupported",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Runtime/StartTime": {
+ "metric": "java.lang:type=Runtime.StartTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Runtime/InputArguments": {
+ "metric": "java.lang:type=Runtime.InputArguments",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Runtime/ManagementSpecVersion": {
+ "metric": "java.lang:type=Runtime.ManagementSpecVersion",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Runtime/SpecName": {
+ "metric": "java.lang:type=Runtime.SpecName",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Runtime/SpecVendor": {
+ "metric": "java.lang:type=Runtime.SpecVendor",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Runtime/SpecVersion": {
+ "metric": "java.lang:type=Runtime.SpecVersion",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Runtime/SystemProperties": {
+ "metric": "java.lang:type=Runtime.SystemProperties",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Runtime/Uptime": {
+ "metric": "java.lang:type=Runtime.Uptime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Runtime/Name": {
+ "metric": "java.lang:type=Runtime.Name",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Runtime/ClassPath": {
+ "metric": "java.lang:type=Runtime.ClassPath",
+ "pointInTime": true,
+ "temporal": false
+ },
+
+
+ "metrics/OperatingSystem/MaxFileDescriptorCount": {
+ "metric": "java.lang:type=OperatingSystem.MaxFileDescriptorCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/OperatingSystem/OpenFileDescriptorCount": {
+ "metric": "java.lang:type=OperatingSystem.OpenFileDescriptorCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/OperatingSystem/CommittedVirtualMemorySize": {
+ "metric": "java.lang:type=OperatingSystem.CommittedVirtualMemorySize",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/OperatingSystem/FreePhysicalMemorySize": {
+ "metric": "java.lang:type=OperatingSystem.FreePhysicalMemorySize",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/OperatingSystem/FreeSwapSpaceSize": {
+ "metric": "java.lang:type=OperatingSystem.FreeSwapSpaceSize",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/OperatingSystem/ProcessCpuLoad": {
+ "metric": "java.lang:type=OperatingSystem.ProcessCpuLoad",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/OperatingSystem/ProcessCpuTime": {
+ "metric": "java.lang:type=OperatingSystem.ProcessCpuTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/OperatingSystem/SystemCpuLoad": {
+ "metric": "java.lang:type=OperatingSystem.SystemCpuLoad",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/OperatingSystem/TotalPhysicalMemorySize": {
+ "metric": "java.lang:type=OperatingSystem.TotalPhysicalMemorySize",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/OperatingSystem/TotalSwapSpaceSize": {
+ "metric": "java.lang:type=OperatingSystem.TotalSwapSpaceSize",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/OperatingSystem/AvailableProcessors": {
+ "metric": "java.lang:type=OperatingSystem.AvailableProcessors",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/OperatingSystem/Version": {
+ "metric": "java.lang:type=OperatingSystem.Version",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/OperatingSystem/Arch": {
+ "metric": "java.lang:type=OperatingSystem.Arch",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/OperatingSystem/SystemLoadAverage": {
+ "metric": "java.lang:type=OperatingSystem.SystemLoadAverage",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/OperatingSystem/Name": {
+ "metric": "java.lang:type=OperatingSystem.Name",
+ "pointInTime": true,
+ "temporal": false
+ },
+
+
+
+ "metrics/MemoryPool/PermGen/CollectionUsage": {
+ "metric": "java.lang:type=MemoryPool,name=Perm Gen.CollectionUsage",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/PermGen/CollectionUsageThreshold": {
+ "metric": "java.lang:type=MemoryPool,name=Perm Gen.CollectionUsageThreshold",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/PermGen/CollectionUsageThresholdCount": {
+ "metric": "java.lang:type=MemoryPool,name=Perm Gen.CollectionUsageThresholdCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/PermGen/MemoryManagerNames": {
+ "metric": "java.lang:type=MemoryPool,name=Perm Gen.MemoryManagerNames",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/PermGen/PeakUsage": {
+ "metric": "java.lang:type=MemoryPool,name=Perm Gen.PeakUsage",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/PermGen/Usage": {
+ "metric": "java.lang:type=MemoryPool,name=Perm Gen.Usage",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/PermGen/UsageThreshold": {
+ "metric": "java.lang:type=MemoryPool,name=Perm Gen.UsageThreshold",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/PermGen/UsageThresholdCount": {
+ "metric": "java.lang:type=MemoryPool,name=Perm Gen.UsageThresholdCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/PermGen/CollectionUsageThresholdExceeded": {
+ "metric": "java.lang:type=MemoryPool,name=Perm Gen.CollectionUsageThresholdExceeded",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/PermGen/CollectionUsageThresholdSupported": {
+ "metric": "java.lang:type=MemoryPool,name=Perm Gen.CollectionUsageThresholdSupported",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/PermGen/UsageThresholdExceeded": {
+ "metric": "java.lang:type=MemoryPool,name=Perm Gen.UsageThresholdExceeded",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/PermGen/UsageThresholdSupported": {
+ "metric": "java.lang:type=MemoryPool,name=Perm Gen.UsageThresholdSupported",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/PermGen/Name": {
+ "metric": "java.lang:type=MemoryPool,name=Perm Gen.Name",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/PermGen/Type": {
+ "metric": "java.lang:type=MemoryPool,name=Perm Gen.Type",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/PermGen/Valid": {
+ "metric": "java.lang:type=MemoryPool,name=Perm Gen.Valid",
+ "pointInTime": true,
+ "temporal": false
+ },
+
+
+
+ "metrics/BufferPool/mapred/Count": {
+ "metric": "java.nio:type=BufferPool,name=mapped.Count",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/BufferPool/mapred/MemoryUsed": {
+ "metric": "java.nio:type=BufferPool,name=mapped.MemoryUsed",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/BufferPool/mapred/TotalCapacity": {
+ "metric": "java.nio:type=BufferPool,name=mapped.TotalCapacity",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/BufferPool/mapred/Name": {
+ "metric": "java.nio:type=BufferPool,name=mapped.Name",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/BufferPool/direct/Count": {
+ "metric": "java.nio:type=BufferPool,name=direct.Count",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/BufferPool/direct/MemoryUsed": {
+ "metric": "java.nio:type=BufferPool,name=direct.MemoryUsed",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/BufferPool/direct/TotalCapacity": {
+ "metric": "java.nio:type=BufferPool,name=direct.TotalCapacity",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/BufferPool/direct/Name": {
+ "metric": "java.nio:type=BufferPool,name=direct.Name",
+ "pointInTime": true,
+ "temporal": false
+ },
+
+
+
+ "metrics/GarbageCollector/MarkSweepCompact/LastGcInfo": {
+ "metric": "java.lang:type=GarbageCollector,name=MarkSweepCompact.LastGcInfo",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/GarbageCollector/MarkSweepCompact/CollectionCount": {
+ "metric": "java.lang:type=GarbageCollector,name=MarkSweepCompact.CollectionCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/GarbageCollector/MarkSweepCompact/CollectionTime": {
+ "metric": "java.lang:type=GarbageCollector,name=MarkSweepCompact.CollectionTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/GarbageCollector/MarkSweepCompact/MemoryPoolNames": {
+ "metric": "java.lang:type=GarbageCollector,name=MarkSweepCompact.MemoryPoolNames",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/GarbageCollector/MarkSweepCompact/Name": {
+ "metric": "java.lang:type=GarbageCollector,name=MarkSweepCompact.Name",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/GarbageCollector/MarkSweepCompact/Valid": {
+ "metric": "java.lang:type=GarbageCollector,name=MarkSweepCompact.Valid",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/GarbageCollector/Copy/LastGcInfo": {
+ "metric": "java.lang:type=GarbageCollector,name=Copy.LastGcInfo",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/GarbageCollector/Copy/CollectionCount": {
+ "metric": "java.lang:type=GarbageCollector,name=Copy.CollectionCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/GarbageCollector/Copy/CollectionTime": {
+ "metric": "java.lang:type=GarbageCollector,name=Copy.CollectionTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/GarbageCollector/Copy/MemoryPoolNames": {
+ "metric": "java.lang:type=GarbageCollector,name=Copy.MemoryPoolNames",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/GarbageCollector/Copy/Name": {
+ "metric": "java.lang:type=GarbageCollector,name=Copy.Name",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/GarbageCollector/Copy/Valid": {
+ "metric": "java.lang:type=GarbageCollector,name=Copy.Valid",
+ "pointInTime": true,
+ "temporal": false
+ },
+
+
+
+ "metrics/MemoryPool/CodeCache/CollectionUsage": {
+ "metric": "java.lang:type=MemoryPool,name=Code Cache.CollectionUsage",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/CodeCache/MemoryManagerNames": {
+ "metric": "java.lang:type=MemoryPool,name=Code Cache.MemoryManagerNames",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/CodeCache/PeakUsage": {
+ "metric": "java.lang:type=MemoryPool,name=Code Cache.PeakUsage",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/CodeCache/Usage": {
+ "metric": "java.lang:type=MemoryPool,name=Code Cache.Usage",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/CodeCache/UsageThreshold": {
+ "metric": "java.lang:type=MemoryPool,name=Code Cache.UsageThreshold",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/CodeCache/UsageThresholdCount": {
+ "metric": "java.lang:type=MemoryPool,name=Code Cache.UsageThresholdCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/CodeCache/CollectionUsageThresholdSupported": {
+ "metric": "java.lang:type=MemoryPool,name=Code Cache.CollectionUsageThresholdSupported",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/CodeCache/UsageThresholdExceeded": {
+ "metric": "java.lang:type=MemoryPool,name=Code Cache.UsageThresholdExceeded",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/CodeCache/UsageThresholdSupported": {
+ "metric": "java.lang:type=MemoryPool,name=Code Cache.UsageThresholdSupported",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/CodeCache/Name": {
+ "metric": "java.lang:type=MemoryPool,name=Code Cache.Name",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/CodeCache/Type": {
+ "metric": "java.lang:type=MemoryPool,name=Code Cache.Type",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/CodeCache/Valid": {
+ "metric": "java.lang:type=MemoryPool,name=Code Cache.Valid",
+ "pointInTime": true,
+ "temporal": false
+ },
+
+ "metrics/MemoryPool/EdenSpace/CollectionUsage": {
+ "metric": "java.lang:type=MemoryPool,name=Eden Space.CollectionUsage",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/EdenSpace/CollectionUsageThreshold": {
+ "metric": "java.lang:type=MemoryPool,name=Eden Space.CollectionUsageThreshold",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/EdenSpace/CollectionUsageThresholdCount": {
+ "metric": "java.lang:type=MemoryPool,name=Eden Space.CollectionUsageThresholdCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/EdenSpace/MemoryManagerNames": {
+ "metric": "java.lang:type=MemoryPool,name=Eden Space.MemoryManagerNames",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/EdenSpace/PeakUsage": {
+ "metric": "java.lang:type=MemoryPool,name=Eden Space.PeakUsage",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/EdenSpace/Usage": {
+ "metric": "java.lang:type=MemoryPool,name=Eden Space.Usage",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/EdenSpace/CollectionUsageThresholdSupported": {
+ "metric": "java.lang:type=MemoryPool,name=Eden Space.CollectionUsageThresholdSupported",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/EdenSpace/CollectionUsageThresholdExceeded": {
+ "metric": "java.lang:type=MemoryPool,name=Eden Space.CollectionUsageThresholdExceeded",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/EdenSpace/UsageThresholdSupported": {
+ "metric": "java.lang:type=MemoryPool,name=Eden Space.UsageThresholdSupported",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/EdenSpace/Name": {
+ "metric": "java.lang:type=MemoryPool,name=Eden Space.Name",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/EdenSpace/Type": {
+ "metric": "java.lang:type=MemoryPool,name=Eden Space.Type",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/EdenSpace/Valid": {
+ "metric": "java.lang:type=MemoryPool,name=Eden Space.Valid",
+ "pointInTime": true,
+ "temporal": false
+ },
+
+ "metrics/MemoryPool/TenuredGen/CollectionUsage": {
+ "metric": "java.lang:type=MemoryPool,name=Tenured Gen.CollectionUsage",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/TenuredGen/CollectionUsageThreshold": {
+ "metric": "java.lang:type=MemoryPool,name=Tenured Gen.CollectionUsageThreshold",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/TenuredGen/CollectionUsageThresholdCount": {
+ "metric": "java.lang:type=MemoryPool,name=Tenured Gen.CollectionUsageThresholdCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/TenuredGen/MemoryManagerNames": {
+ "metric": "java.lang:type=MemoryPool,name=Tenured Gen.MemoryManagerNames",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/TenuredGen/PeakUsage": {
+ "metric": "java.lang:type=MemoryPool,name=Tenured Gen.PeakUsage",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/TenuredGen/Usage": {
+ "metric": "java.lang:type=MemoryPool,name=Tenured Gen.Usage",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/TenuredGen/UsageThreshold": {
+ "metric": "java.lang:type=MemoryPool,name=Tenured Gen.UsageThreshold",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/TenuredGen/UsageThresholdCount": {
+ "metric": "java.lang:type=MemoryPool,name=Tenured Gen.UsageThresholdCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/TenuredGen/CollectionUsageThresholdSupported": {
+ "metric": "java.lang:type=MemoryPool,name=Tenured Gen.CollectionUsageThresholdSupported",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/TenuredGen/CollectionUsageThresholdExceeded": {
+ "metric": "java.lang:type=MemoryPool,name=Tenured Gen.CollectionUsageThresholdExceeded",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/TenuredGen/UsageThresholdSupported": {
+ "metric": "java.lang:type=MemoryPool,name=Tenured Gen.UsageThresholdSupported",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/TenuredGen/UsageThresholdExceeded": {
+ "metric": "java.lang:type=MemoryPool,name=Tenured Gen.UsageThresholdExceeded",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/TenuredGen/Name": {
+ "metric": "java.lang:type=MemoryPool,name=Tenured Gen.Name",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/TenuredGen/Type": {
+ "metric": "java.lang:type=MemoryPool,name=Tenured Gen.Type",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/TenuredGen/Valid": {
+ "metric": "java.lang:type=MemoryPool,name=Tenured Gen.Valid",
+ "pointInTime": true,
+ "temporal": false
+ }
+ }
+ }
+ ],
+ "HostComponent": [
+ {
+ "type": "jmx",
+ "metrics": {
+ "metrics/rpc/ReceivedBytes": {
+ "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.ReceivedBytes",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/SentBytes": {
+ "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.SentBytes",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcQueueTimeNumOps": {
+ "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcQueueTimeNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcQueueTimeAvgTime": {
+ "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcQueueTimeAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcProcessingTimeNumOps": {
+ "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcProcessingTimeNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcProcessingTimeAvgTime": {
+ "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcProcessingTimeAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcAuthenticationFailures": {
+ "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcAuthenticationFailures",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcAuthenticationSuccesses": {
+ "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcAuthenticationSuccesses",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcAuthorizationFailures": {
+ "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcAuthorizationFailures",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/RpcAuthorizationSuccesses": {
+ "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcAuthorizationSuccesses",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/NumOpenConnections": {
+ "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.NumOpenConnections",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/rpc/CallQueueLength": {
+ "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.CallQueueLength",
+ "pointInTime": true,
+ "temporal": false
+ },
+
+
+
+
+ "metrics/jvm/MemNonHeapUsedM": {
+ "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemNonHeapUsedM",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/MemNonHeapCommittedM": {
+ "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemNonHeapCommittedM",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/MemHeapUsedM": {
+ "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemHeapUsedM",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/MemHeapCommittedM": {
+ "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemHeapCommittedM",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/MemMaxM": {
+ "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemMaxM",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/GcCountCopy": {
+ "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcCountCopy",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/GcTimeMillisCopy": {
+ "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcTimeMillisCopy",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/GcCountMarkSweepCompact": {
+ "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcCountMarkSweepCompact",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/GcTimeMillisMarkSweepCompact": {
+ "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcTimeMillisMarkSweepCompact",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/GcCount": {
+ "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/GcTimeMillis": {
+ "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcTimeMillis",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/ThreadsNew": {
+ "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsNew",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/ThreadsRunnable": {
+ "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsRunnable",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/ThreadsBlocked": {
+ "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsBlocked",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/ThreadsWaiting": {
+ "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsWaiting",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/ThreadsTimedWaiting": {
+ "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsTimedWaiting",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/ThreadsTerminated": {
+ "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsTerminated",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/LogFatal": {
+ "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogFatal",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/LogError": {
+ "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogError",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/LogWarn": {
+ "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogWarn",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/jvm/LogInfo": {
+ "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogInfo",
+ "pointInTime": true,
+ "temporal": false
+ },
+
+
+
+
+ "metrics/Memory/HeapMemoryMax":{
+ "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
+ "pointInTime" : true,
+ "temporal" : false
+ },
+ "metrics/Memory/HeapMemoryUsed":{
+ "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
+ "pointInTime" : true,
+ "temporal" : false
+ },
+ "metrics/Memory/HeapMemoryCommitted":{
+ "metric": "java.lang:type=Memory.HeapMemoryUsage[committed]",
+ "pointInTime" : true,
+ "temporal" : false
+ },
+ "metrics/Memory/HeapMemoryInit":{
+ "metric": "java.lang:type=Memory.HeapMemoryUsage[init]",
+ "pointInTime" : true,
+ "temporal" : false
+ },
+ "metrics/Memory/NonHeapMemoryMax":{
+ "metric": "java.lang:type=Memory.NonHeapMemoryUsage[max]",
+ "pointInTime" : true,
+ "temporal" : false
+ },
+ "metrics/Memory/NonHeapMemoryUsed":{
+ "metric": "java.lang:type=Memory.NonHeapMemoryUsage[used]",
+ "pointInTime" : true,
+ "temporal" : false
+ },
+ "metrics/Memory/NonHeapMemoryCommitted":{
+ "metric": "java.lang:type=Memory.NonHeapMemoryUsage[committed]",
+ "pointInTime" : true,
+ "temporal" : false
+ },
+ "metrics/Memory/NonHeapMemoryInit":{
+ "metric": "java.lang:type=Memory.NonHeapMemoryUsage[init]",
+ "pointInTime" : true,
+ "temporal" : false
+ },
+
+
+
+ "metrics/MBeanServerDelegate/MBeanServerId": {
+ "metric": "JMImplementation:type=MBeanServerDelegate.MBeanServerId",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MBeanServerDelegate/SpecificationName": {
+ "metric": "JMImplementation:type=MBeanServerDelegate.SpecificationName",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MBeanServerDelegate/SpecificationVersion": {
+ "metric": "JMImplementation:type=MBeanServerDelegate.SpecificationVersion",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MBeanServerDelegate/SpecificationVendor": {
+ "metric": "JMImplementation:type=MBeanServerDelegate.SpecificationVendor",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MBeanServerDelegate/ImplementationName": {
+ "metric": "JMImplementation:type=MBeanServerDelegate.ImplementationName",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MBeanServerDelegate/ImplementationVersion": {
+ "metric": "JMImplementation:type=MBeanServerDelegate.ImplementationVersion",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MBeanServerDelegate/ImplementationVendor": {
+ "metric": "JMImplementation:type=MBeanServerDelegate.ImplementationVendor",
+ "pointInTime": true,
+ "temporal": false
+ },
+
+
+
+ "metrics/StartupProgress/ElapsedTime": {
+ "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.ElapsedTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/StartupProgress/PercentComplete": {
+ "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.PercentComplete",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/StartupProgress/LoadingFsImageCount": {
+ "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingFsImageCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/StartupProgress/LoadingFsImageElapsedTime": {
+ "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingFsImageElapsedTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/StartupProgress/LoadingFsImageTotal": {
+ "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingFsImageTotal",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/StartupProgress/LoadingFsImagePercentComplete": {
+ "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingFsImagePercentComplete",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/StartupProgress/LoadingEditsCount": {
+ "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingEditsCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/StartupProgress/LoadingEditsElapsedTime": {
+ "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingEditsElapsedTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/StartupProgress/LoadingEditsTotal": {
+ "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingEditsTotal",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/StartupProgress/LoadingEditsPercentComplete": {
+ "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingEditsPercentComplete",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/StartupProgress/SavingCheckpointCount": {
+ "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SavingCheckpointCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/StartupProgress/SavingCheckpointElapsedTime": {
+ "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SavingCheckpointElapsedTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/StartupProgress/SavingCheckpointTotal": {
+ "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SavingCheckpointTotal",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/StartupProgress/SavingCheckpointPercentComplete": {
+ "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SavingCheckpointPercentComplete",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/StartupProgress/SafeModeCount": {
+ "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SafeModeCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/StartupProgress/SafeModeElapsedTime": {
+ "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SafeModeElapsedTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/StartupProgress/SafeModeTotal": {
+ "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SafeModeTotal",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/StartupProgress/SafeModePercentComplete": {
+ "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SafeModePercentComplete",
+ "pointInTime": true,
+ "temporal": false
+ },
+
+
+ "metrics/HotSpotDiagnostic/DiagnosticOptions": {
+ "metric": "com.sun.management:type=HotSpotDiagnostic.DiagnosticOptions",
+ "pointInTime": true,
+ "temporal": false
+ },
+
+
+
+ "metrics/MemoryManager/MemoryPoolNames": {
+ "metric": "java.lang:type=MemoryManager,name=CodeCacheManager.MemoryPoolNames",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryManager/Name": {
+ "metric": "java.lang:type=MemoryManager,name=CodeCacheManager.Name",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryManager/Valid": {
+ "metric": "java.lang:type=MemoryManager,name=CodeCacheManager.Valid",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryManager/ObjectName": {
+ "metric": "java.lang:type=MemoryManager,name=CodeCacheManager.ObjectName",
+ "pointInTime": true,
+ "temporal": false
+ },
+
+
+ "metrics/Logging/LoggerNames": {
+ "metric": "java.util.logging:type=Logging.LoggerNames",
+ "pointInTime": true,
+ "temporal": false
+ },
+
+
+ "metrics/UgiMetrics/LoginSuccessNumOps": {
+ "metric": "Hadoop:service=JobHistoryServer,name=UgiMetrics.LoginSuccessNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/UgiMetrics/LoginSuccessAvgTime": {
+ "metric": "Hadoop:service=JobHistoryServer,name=UgiMetrics.LoginSuccessAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/UgiMetrics/LoginFailureNumOps": {
+ "metric": "Hadoop:service=JobHistoryServer,name=UgiMetrics.LoginFailureNumOps",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/UgiMetrics/LoginFailureAvgTime": {
+ "metric": "Hadoop:service=JobHistoryServer,name=UgiMetrics.LoginFailureAvgTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+
+
+
+ "metrics/MemoryPool/SurvivorSpace/CollectionUsage": {
+ "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsage",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/SurvivorSpace/CollectionUsageThreshold": {
+ "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsageThreshold",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/SurvivorSpace/CollectionUsageThresholdCount": {
+ "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsageThresholdCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/SurvivorSpace/MemoryManagerNames": {
+ "metric": "java.lang:type=MemoryPool,name=Survivor Space.MemoryManagerNames",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/SurvivorSpace/PeakUsage": {
+ "metric": "java.lang:type=MemoryPool,name=Survivor Space.PeakUsage",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/SurvivorSpace/Usage": {
+ "metric": "java.lang:type=MemoryPool,name=Survivor Space.Usage",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/SurvivorSpace/CollectionUsageThresholdExceeded": {
+ "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsageThresholdExceeded",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/SurvivorSpace/CollectionUsageThresholdSupported": {
+ "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsageThresholdSupported",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/SurvivorSpace/UsageThresholdSupported": {
+ "metric": "java.lang:type=MemoryPool,name=Survivor Space.UsageThresholdSupported",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/SurvivorSpace/Name": {
+ "metric": "java.lang:type=MemoryPool,name=Survivor Space.Name",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/SurvivorSpace/Type": {
+ "metric": "java.lang:type=MemoryPool,name=Survivor Space.Type",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/SurvivorSpace/Valid": {
+ "metric": "java.lang:type=MemoryPool,name=Survivor Space.Valid",
+ "pointInTime": true,
+ "temporal": false
+ },
+
+
+
+ "metrics/Threading/ThreadAllocatedMemoryEnabled": {
+ "metric": "java.lang:type=Threading.ThreadAllocatedMemoryEnabled",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Threading/ThreadAllocatedMemorySupported": {
+ "metric": "java.lang:type=Threading.ThreadAllocatedMemorySupported",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Threading/DaemonThreadCount": {
+ "metric": "java.lang:type=Threading.DaemonThreadCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Threading/PeakThreadCount": {
+ "metric": "java.lang:type=Threading.PeakThreadCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Threading/CurrentThreadCpuTimeSupported": {
+ "metric": "java.lang:type=Threading.CurrentThreadCpuTimeSupported",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Threading/ObjectMonitorUsageSupported": {
+ "metric": "java.lang:type=Threading.ObjectMonitorUsageSupported",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Threading/SynchronizerUsageSupported": {
+ "metric": "java.lang:type=Threading.SynchronizerUsageSupported",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Threading/ThreadContentionMonitoringSupported": {
+ "metric": "java.lang:type=Threading.ThreadContentionMonitoringSupported",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Threading/ThreadCpuTimeEnabled": {
+ "metric": "java.lang:type=Threading.ThreadCpuTimeEnabled",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Threading/CurrentThreadCpuTime": {
+ "metric": "java.lang:type=Threading.CurrentThreadCpuTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Threading/CurrentThreadUserTime": {
+ "metric": "java.lang:type=Threading.CurrentThreadUserTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Threading/ThreadCount": {
+ "metric": "java.lang:type=Threading.ThreadCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Threading/TotalStartedThreadCount": {
+ "metric": "java.lang:type=Threading.TotalStartedThreadCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Threading/ThreadCpuTimeSupported": {
+ "metric": "java.lang:type=Threading.ThreadCpuTimeSupported",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Threading/ThreadContentionMonitoringEnabled": {
+ "metric": "java.lang:type=Threading.ThreadContentionMonitoringEnabled",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Threading/AllThreadIds": {
+ "metric": "java.lang:type=Threading.AllThreadIds",
+ "pointInTime": true,
+ "temporal": false
+ },
+
+
+ "metrics/ClassLoading/LoadedClassCount": {
+ "metric": "java.lang:type=ClassLoading.LoadedClassCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/ClassLoading/UnloadedClassCount": {
+ "metric": "java.lang:type=ClassLoading.UnloadedClassCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/ClassLoading/TotalLoadedClassCount": {
+ "metric": "java.lang:type=ClassLoading.TotalLoadedClassCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/ClassLoading/Verbose": {
+ "metric": "java.lang:type=ClassLoading.Verbose",
+ "pointInTime": true,
+ "temporal": false
+ },
+
+
+
+ "metrics/Runtime/BootClassPath": {
+ "metric": "java.lang:type=Runtime.BootClassPath",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Runtime/LibraryPath": {
+ "metric": "java.lang:type=Runtime.LibraryPath",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Runtime/VmName": {
+ "metric": "java.lang:type=Runtime.VmName",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Runtime/VmVendor": {
+ "metric": "java.lang:type=Runtime.VmVendor",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Runtime/VmVersion": {
+ "metric": "java.lang:type=Runtime.VmVersion",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Runtime/BootClassPathSupported": {
+ "metric": "java.lang:type=Runtime.BootClassPathSupported",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Runtime/StartTime": {
+ "metric": "java.lang:type=Runtime.StartTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Runtime/InputArguments": {
+ "metric": "java.lang:type=Runtime.InputArguments",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Runtime/ManagementSpecVersion": {
+ "metric": "java.lang:type=Runtime.ManagementSpecVersion",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Runtime/SpecName": {
+ "metric": "java.lang:type=Runtime.SpecName",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Runtime/SpecVendor": {
+ "metric": "java.lang:type=Runtime.SpecVendor",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Runtime/SpecVersion": {
+ "metric": "java.lang:type=Runtime.SpecVersion",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Runtime/SystemProperties": {
+ "metric": "java.lang:type=Runtime.SystemProperties",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Runtime/Uptime": {
+ "metric": "java.lang:type=Runtime.Uptime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Runtime/Name": {
+ "metric": "java.lang:type=Runtime.Name",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/Runtime/ClassPath": {
+ "metric": "java.lang:type=Runtime.ClassPath",
+ "pointInTime": true,
+ "temporal": false
+ },
+
+
+ "metrics/OperatingSystem/MaxFileDescriptorCount": {
+ "metric": "java.lang:type=OperatingSystem.MaxFileDescriptorCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/OperatingSystem/OpenFileDescriptorCount": {
+ "metric": "java.lang:type=OperatingSystem.OpenFileDescriptorCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/OperatingSystem/CommittedVirtualMemorySize": {
+ "metric": "java.lang:type=OperatingSystem.CommittedVirtualMemorySize",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/OperatingSystem/FreePhysicalMemorySize": {
+ "metric": "java.lang:type=OperatingSystem.FreePhysicalMemorySize",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/OperatingSystem/FreeSwapSpaceSize": {
+ "metric": "java.lang:type=OperatingSystem.FreeSwapSpaceSize",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/OperatingSystem/ProcessCpuLoad": {
+ "metric": "java.lang:type=OperatingSystem.ProcessCpuLoad",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/OperatingSystem/ProcessCpuTime": {
+ "metric": "java.lang:type=OperatingSystem.ProcessCpuTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/OperatingSystem/SystemCpuLoad": {
+ "metric": "java.lang:type=OperatingSystem.SystemCpuLoad",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/OperatingSystem/TotalPhysicalMemorySize": {
+ "metric": "java.lang:type=OperatingSystem.TotalPhysicalMemorySize",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/OperatingSystem/TotalSwapSpaceSize": {
+ "metric": "java.lang:type=OperatingSystem.TotalSwapSpaceSize",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/OperatingSystem/AvailableProcessors": {
+ "metric": "java.lang:type=OperatingSystem.AvailableProcessors",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/OperatingSystem/Version": {
+ "metric": "java.lang:type=OperatingSystem.Version",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/OperatingSystem/Arch": {
+ "metric": "java.lang:type=OperatingSystem.Arch",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/OperatingSystem/SystemLoadAverage": {
+ "metric": "java.lang:type=OperatingSystem.SystemLoadAverage",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/OperatingSystem/Name": {
+ "metric": "java.lang:type=OperatingSystem.Name",
+ "pointInTime": true,
+ "temporal": false
+ },
+
+
+
+ "metrics/MemoryPool/PermGen/CollectionUsage": {
+ "metric": "java.lang:type=MemoryPool,name=Perm Gen.CollectionUsage",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/PermGen/CollectionUsageThreshold": {
+ "metric": "java.lang:type=MemoryPool,name=Perm Gen.CollectionUsageThreshold",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/PermGen/CollectionUsageThresholdCount": {
+ "metric": "java.lang:type=MemoryPool,name=Perm Gen.CollectionUsageThresholdCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/PermGen/MemoryManagerNames": {
+ "metric": "java.lang:type=MemoryPool,name=Perm Gen.MemoryManagerNames",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/PermGen/PeakUsage": {
+ "metric": "java.lang:type=MemoryPool,name=Perm Gen.PeakUsage",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/PermGen/Usage": {
+ "metric": "java.lang:type=MemoryPool,name=Perm Gen.Usage",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/PermGen/UsageThreshold": {
+ "metric": "java.lang:type=MemoryPool,name=Perm Gen.UsageThreshold",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/PermGen/UsageThresholdCount": {
+ "metric": "java.lang:type=MemoryPool,name=Perm Gen.UsageThresholdCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/PermGen/CollectionUsageThresholdExceeded": {
+ "metric": "java.lang:type=MemoryPool,name=Perm Gen.CollectionUsageThresholdExceeded",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/PermGen/CollectionUsageThresholdSupported": {
+ "metric": "java.lang:type=MemoryPool,name=Perm Gen.CollectionUsageThresholdSupported",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/PermGen/UsageThresholdExceeded": {
+ "metric": "java.lang:type=MemoryPool,name=Perm Gen.UsageThresholdExceeded",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/PermGen/UsageThresholdSupported": {
+ "metric": "java.lang:type=MemoryPool,name=Perm Gen.UsageThresholdSupported",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/PermGen/Name": {
+ "metric": "java.lang:type=MemoryPool,name=Perm Gen.Name",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/PermGen/Type": {
+ "metric": "java.lang:type=MemoryPool,name=Perm Gen.Type",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/PermGen/Valid": {
+ "metric": "java.lang:type=MemoryPool,name=Perm Gen.Valid",
+ "pointInTime": true,
+ "temporal": false
+ },
+
+
+
+ "metrics/BufferPool/mapred/Count": {
+ "metric": "java.nio:type=BufferPool,name=mapped.Count",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/BufferPool/mapred/MemoryUsed": {
+ "metric": "java.nio:type=BufferPool,name=mapped.MemoryUsed",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/BufferPool/mapred/TotalCapacity": {
+ "metric": "java.nio:type=BufferPool,name=mapped.TotalCapacity",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/BufferPool/mapred/Name": {
+ "metric": "java.nio:type=BufferPool,name=mapped.Name",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/BufferPool/direct/Count": {
+ "metric": "java.nio:type=BufferPool,name=direct.Count",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/BufferPool/direct/MemoryUsed": {
+ "metric": "java.nio:type=BufferPool,name=direct.MemoryUsed",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/BufferPool/direct/TotalCapacity": {
+ "metric": "java.nio:type=BufferPool,name=direct.TotalCapacity",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/BufferPool/direct/Name": {
+ "metric": "java.nio:type=BufferPool,name=direct.Name",
+ "pointInTime": true,
+ "temporal": false
+ },
+
+
+
+ "metrics/GarbageCollector/MarkSweepCompact/LastGcInfo": {
+ "metric": "java.lang:type=GarbageCollector,name=MarkSweepCompact.LastGcInfo",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/GarbageCollector/MarkSweepCompact/CollectionCount": {
+ "metric": "java.lang:type=GarbageCollector,name=MarkSweepCompact.CollectionCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/GarbageCollector/MarkSweepCompact/CollectionTime": {
+ "metric": "java.lang:type=GarbageCollector,name=MarkSweepCompact.CollectionTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/GarbageCollector/MarkSweepCompact/MemoryPoolNames": {
+ "metric": "java.lang:type=GarbageCollector,name=MarkSweepCompact.MemoryPoolNames",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/GarbageCollector/MarkSweepCompact/Name": {
+ "metric": "java.lang:type=GarbageCollector,name=MarkSweepCompact.Name",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/GarbageCollector/MarkSweepCompact/Valid": {
+ "metric": "java.lang:type=GarbageCollector,name=MarkSweepCompact.Valid",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/GarbageCollector/Copy/LastGcInfo": {
+ "metric": "java.lang:type=GarbageCollector,name=Copy.LastGcInfo",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/GarbageCollector/Copy/CollectionCount": {
+ "metric": "java.lang:type=GarbageCollector,name=Copy.CollectionCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/GarbageCollector/Copy/CollectionTime": {
+ "metric": "java.lang:type=GarbageCollector,name=Copy.CollectionTime",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/GarbageCollector/Copy/MemoryPoolNames": {
+ "metric": "java.lang:type=GarbageCollector,name=Copy.MemoryPoolNames",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/GarbageCollector/Copy/Name": {
+ "metric": "java.lang:type=GarbageCollector,name=Copy.Name",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/GarbageCollector/Copy/Valid": {
+ "metric": "java.lang:type=GarbageCollector,name=Copy.Valid",
+ "pointInTime": true,
+ "temporal": false
+ },
+
+
+
+ "metrics/MemoryPool/CodeCache/CollectionUsage": {
+ "metric": "java.lang:type=MemoryPool,name=Code Cache.CollectionUsage",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/CodeCache/MemoryManagerNames": {
+ "metric": "java.lang:type=MemoryPool,name=Code Cache.MemoryManagerNames",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/CodeCache/PeakUsage": {
+ "metric": "java.lang:type=MemoryPool,name=Code Cache.PeakUsage",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/CodeCache/Usage": {
+ "metric": "java.lang:type=MemoryPool,name=Code Cache.Usage",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/CodeCache/UsageThreshold": {
+ "metric": "java.lang:type=MemoryPool,name=Code Cache.UsageThreshold",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/CodeCache/UsageThresholdCount": {
+ "metric": "java.lang:type=MemoryPool,name=Code Cache.UsageThresholdCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/CodeCache/CollectionUsageThresholdSupported": {
+ "metric": "java.lang:type=MemoryPool,name=Code Cache.CollectionUsageThresholdSupported",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/CodeCache/UsageThresholdExceeded": {
+ "metric": "java.lang:type=MemoryPool,name=Code Cache.UsageThresholdExceeded",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/CodeCache/UsageThresholdSupported": {
+ "metric": "java.lang:type=MemoryPool,name=Code Cache.UsageThresholdSupported",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/CodeCache/Name": {
+ "metric": "java.lang:type=MemoryPool,name=Code Cache.Name",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/CodeCache/Type": {
+ "metric": "java.lang:type=MemoryPool,name=Code Cache.Type",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/CodeCache/Valid": {
+ "metric": "java.lang:type=MemoryPool,name=Code Cache.Valid",
+ "pointInTime": true,
+ "temporal": false
+ },
+
+ "metrics/MemoryPool/EdenSpace/CollectionUsage": {
+ "metric": "java.lang:type=MemoryPool,name=Eden Space.CollectionUsage",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/EdenSpace/CollectionUsageThreshold": {
+ "metric": "java.lang:type=MemoryPool,name=Eden Space.CollectionUsageThreshold",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/EdenSpace/CollectionUsageThresholdCount": {
+ "metric": "java.lang:type=MemoryPool,name=Eden Space.CollectionUsageThresholdCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/EdenSpace/MemoryManagerNames": {
+ "metric": "java.lang:type=MemoryPool,name=Eden Space.MemoryManagerNames",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/EdenSpace/PeakUsage": {
+ "metric": "java.lang:type=MemoryPool,name=Eden Space.PeakUsage",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/EdenSpace/Usage": {
+ "metric": "java.lang:type=MemoryPool,name=Eden Space.Usage",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/EdenSpace/CollectionUsageThresholdSupported": {
+ "metric": "java.lang:type=MemoryPool,name=Eden Space.CollectionUsageThresholdSupported",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/EdenSpace/CollectionUsageThresholdExceeded": {
+ "metric": "java.lang:type=MemoryPool,name=Eden Space.CollectionUsageThresholdExceeded",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/EdenSpace/UsageThresholdSupported": {
+ "metric": "java.lang:type=MemoryPool,name=Eden Space.UsageThresholdSupported",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/EdenSpace/Name": {
+ "metric": "java.lang:type=MemoryPool,name=Eden Space.Name",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/EdenSpace/Type": {
+ "metric": "java.lang:type=MemoryPool,name=Eden Space.Type",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/EdenSpace/Valid": {
+ "metric": "java.lang:type=MemoryPool,name=Eden Space.Valid",
+ "pointInTime": true,
+ "temporal": false
+ },
+
+ "metrics/MemoryPool/TenuredGen/CollectionUsage": {
+ "metric": "java.lang:type=MemoryPool,name=Tenured Gen.CollectionUsage",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/TenuredGen/CollectionUsageThreshold": {
+ "metric": "java.lang:type=MemoryPool,name=Tenured Gen.CollectionUsageThreshold",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/TenuredGen/CollectionUsageThresholdCount": {
+ "metric": "java.lang:type=MemoryPool,name=Tenured Gen.CollectionUsageThresholdCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/TenuredGen/MemoryManagerNames": {
+ "metric": "java.lang:type=MemoryPool,name=Tenured Gen.MemoryManagerNames",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/TenuredGen/PeakUsage": {
+ "metric": "java.lang:type=MemoryPool,name=Tenured Gen.PeakUsage",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/TenuredGen/Usage": {
+ "metric": "java.lang:type=MemoryPool,name=Tenured Gen.Usage",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/TenuredGen/UsageThreshold": {
+ "metric": "java.lang:type=MemoryPool,name=Tenured Gen.UsageThreshold",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/TenuredGen/UsageThresholdCount": {
+ "metric": "java.lang:type=MemoryPool,name=Tenured Gen.UsageThresholdCount",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/TenuredGen/CollectionUsageThresholdSupported": {
+ "metric": "java.lang:type=MemoryPool,name=Tenured Gen.CollectionUsageThresholdSupported",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/TenuredGen/CollectionUsageThresholdExceeded": {
+ "metric": "java.lang:type=MemoryPool,name=Tenured Gen.CollectionUsageThresholdExceeded",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/TenuredGen/UsageThresholdSupported": {
+ "metric": "java.lang:type=MemoryPool,name=Tenured Gen.UsageThresholdSupported",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/TenuredGen/UsageThresholdExceeded": {
+ "metric": "java.lang:type=MemoryPool,name=Tenured Gen.UsageThresholdExceeded",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/TenuredGen/Name": {
+ "metric": "java.lang:type=MemoryPool,name=Tenured Gen.Name",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/TenuredGen/Type": {
+ "metric": "java.lang:type=MemoryPool,name=Tenured Gen.Type",
+ "pointInTime": true,
+ "temporal": false
+ },
+ "metrics/MemoryPool/TenuredGen/Valid": {
+ "metric": "java.lang:type=MemoryPool,name=Tenured Gen.Valid",
+ "pointInTime": true,
+ "temporal": false
+ }
+ }
+ }
+ ]
+ }
+}
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/files/validateYarnComponentStatus.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/files/validateYarnComponentStatus.py
new file mode 100644
index 0000000000..073371a38c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/files/validateYarnComponentStatus.py
@@ -0,0 +1,161 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import optparse
+import subprocess
+import json
+import urllib2
+
+RESOURCEMANAGER = 'rm'
+NODEMANAGER = 'nm'
+HISTORYSERVER = 'hs'
+
+STARTED_STATE = 'STARTED'
+RUNNING_STATE = 'RUNNING'
+
+#Return reponse for given path and address
+def getResponse(path, address, ssl_enabled):
+ if ssl_enabled:
+ url = 'https://' + address + path
+ else:
+ url = 'http://' + address + path
+
+ try:
+ handle = urllib2.urlopen(url)
+ output = handle.read()
+ handle.close()
+ response = json.loads(output)
+ if response == None:
+ print 'There is no response for url: ' + str(url)
+ exit(1)
+ return response
+ except Exception as e:
+ print 'Error getting response for url:' + str(url), e
+ exit(1)
+
+#Verify that REST api is available for given component
+def validateAvailability(component, path, address, ssl_enabled):
+
+ try:
+ response = getResponse(path, address, ssl_enabled)
+ is_valid = validateAvailabilityResponse(component, response)
+ if not is_valid:
+ exit(1)
+ except Exception as e:
+ print 'Error checking availability status of component', e
+ exit(1)
+
+#Validate component-specific response
+def validateAvailabilityResponse(component, response):
+ try:
+ if component == RESOURCEMANAGER:
+ rm_state = response['clusterInfo']['state']
+ if rm_state == STARTED_STATE:
+ return True
+ else:
+ print 'Resourcemanager is not started'
+ return False
+
+ elif component == NODEMANAGER:
+ node_healthy = bool(response['nodeInfo']['nodeHealthy'])
+ if node_healthy:
+ return True
+ else:
+ return False
+ elif component == HISTORYSERVER:
+ hs_start_time = response['historyInfo']['startedOn']
+ if hs_start_time > 0:
+ return True
+ else:
+ return False
+ else:
+ return False
+ except Exception as e:
+ print 'Error validation of availability response for ' + str(component), e
+ return False
+
+#Verify that component has required resources to work
+def validateAbility(component, path, address, ssl_enabled):
+
+ try:
+ response = getResponse(path, address, ssl_enabled)
+ is_valid = validateAbilityResponse(component, response)
+ if not is_valid:
+ exit(1)
+ except Exception as e:
+ print 'Error checking ability of component', e
+ exit(1)
+
+#Validate component-specific response that it has required resources to work
+def validateAbilityResponse(component, response):
+ try:
+ if component == RESOURCEMANAGER:
+ nodes = []
+ if response.has_key('nodes') and not response['nodes'] == None and response['nodes'].has_key('node'):
+ nodes = response['nodes']['node']
+ connected_nodes_count = len(nodes)
+ if connected_nodes_count == 0:
+ print 'There is no connected nodemanagers to resourcemanager'
+ return False
+ active_nodes = filter(lambda x: x['state'] == RUNNING_STATE, nodes)
+ active_nodes_count = len(active_nodes)
+
+ if connected_nodes_count == 0:
+ print 'There is no connected active nodemanagers to resourcemanager'
+ return False
+ else:
+ return True
+ else:
+ return False
+ except Exception as e:
+ print 'Error validation of ability response', e
+ return False
+
+#
+# Main.
+#
+def main():
+ parser = optparse.OptionParser(usage="usage: %prog [options] component ")
+ parser.add_option("-p", "--port", dest="address", help="Host:Port for REST API of a desired component")
+ parser.add_option("-s", "--ssl", dest="ssl_enabled", help="Is SSL enabled for UI of component")
+
+ (options, args) = parser.parse_args()
+
+ component = args[0]
+
+ address = options.address
+ ssl_enabled = (options.ssl_enabled) in 'true'
+ if component == RESOURCEMANAGER:
+ path = '/ws/v1/cluster/info'
+ elif component == NODEMANAGER:
+ path = '/ws/v1/node/info'
+ elif component == HISTORYSERVER:
+ path = '/ws/v1/history/info'
+ else:
+ parser.error("Invalid component")
+
+ validateAvailability(component, path, address, ssl_enabled)
+
+ if component == RESOURCEMANAGER:
+ path = '/ws/v1/cluster/nodes'
+ validateAbility(component, path, address, ssl_enabled)
+
+if __name__ == "__main__":
+ main()
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/scripts/application_timeline_server.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/scripts/application_timeline_server.py
new file mode 100644
index 0000000000..09a8f6d6bc
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/scripts/application_timeline_server.py
@@ -0,0 +1,54 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+from yarn import yarn
+import service_mapping
+
+class ApplicationTimelineServer(Script):
+
+ def install(self, env):
+ if not check_windows_service_exists(service_mapping.apptimelineserver_win_service_name):
+ self.install_packages(env)
+ self.configure(env)
+
+ def configure(self, env):
+ import params
+ env.set_params(params)
+ yarn()
+
+ def start(self, env):
+ import params
+ env.set_params(params)
+ self.configure(env)
+ Service(service_mapping.apptimelineserver_win_service_name, action="start")
+
+ def stop(self, env):
+ import params
+ env.set_params(params)
+ Service(service_mapping.apptimelineserver_win_service_name, action="stop")
+
+ def status(self, env):
+ import params
+ check_windows_service_status(service_mapping.apptimelineserver_win_service_name)
+
+if __name__ == "__main__":
+ ApplicationTimelineServer().execute()
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/scripts/historyserver.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/scripts/historyserver.py
new file mode 100644
index 0000000000..969963d107
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/scripts/historyserver.py
@@ -0,0 +1,53 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+from yarn import yarn
+import service_mapping
+
+class Historyserver(Script):
+
+ def install(self, env):
+ if not check_windows_service_exists(service_mapping.historyserver_win_service_name):
+ self.install_packages(env)
+ self.configure(env)
+
+ def configure(self, env):
+ import params
+ env.set_params(params)
+ yarn()
+
+ def start(self, env):
+ import params
+ env.set_params(params)
+ self.configure(env)
+ Service(service_mapping.historyserver_win_service_name, action="start")
+
+ def stop(self, env):
+ import params
+ env.set_params(params)
+ Service(service_mapping.historyserver_win_service_name, action="stop")
+
+ def status(self, env):
+ check_windows_service_status(service_mapping.historyserver_win_service_name)
+
+if __name__ == "__main__":
+ Historyserver().execute()
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/scripts/mapred_service_check.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/scripts/mapred_service_check.py
new file mode 100644
index 0000000000..f81408f546
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/scripts/mapred_service_check.py
@@ -0,0 +1,105 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+from resource_management.libraries import functions
+import sys
+import os
+
+
+class MapReduce2ServiceCheck(Script):
+ def service_check(self, env):
+ import params
+
+ env.set_params(params)
+
+ component_type = 'hs'
+ if params.hadoop_ssl_enabled:
+ component_address = params.hs_webui_address
+ else:
+ component_address = params.hs_webui_address
+
+ validateStatusFileName = "validateYarnComponentStatus.py"
+ validateStatusFilePath = os.path.join(os.path.dirname(params.hadoop_home), "temp", validateStatusFileName)
+ python_executable = sys.executable
+ validateStatusCmd = "{} {} {} -p {} -s {}".format(
+ python_executable, validateStatusFilePath, component_type, component_address, params.hadoop_ssl_enabled)
+
+ if params.security_enabled:
+ kinit_cmd = "{} -kt {} {};".format(params.kinit_path_local, params.smoke_user_keytab, params.smokeuser)
+ smoke_cmd = kinit_cmd + validateStatusCmd
+ else:
+ smoke_cmd = validateStatusCmd
+
+ File(validateStatusFilePath,
+ content=StaticFile(validateStatusFileName)
+ )
+
+ Execute(smoke_cmd,
+ tries=3,
+ try_sleep=5,
+ logoutput=True
+ )
+
+ # hadoop_exe = os.path.join(params.hadoop_home, "bin", "hadoop")
+ #
+ # tested_file = os.path.join(params.hadoop_home, "bin", "hadoop.cmd")
+ # jar_path = os.path.join(params.hadoop_mapred2_jar_location, params.hadoopMapredExamplesJarName)
+ # input_file = format("/user/hadoop/mapredsmokeinput")
+ # output_file = format("/user/hadoop/mapredsmokeoutput")
+ # cleanup_cmd = format("cmd /C {hadoop_exe} fs -rm -r -f {output_file} {input_file}")
+ # create_file_cmd = format("cmd /C {hadoop_exe} fs -put {tested_file} {input_file}")
+ # run_wordcount_job = format("cmd /C {hadoop_exe} jar {jar_path} wordcount {input_file} {output_file}")
+ # test_cmd = format("cmd /C {hadoop_exe} fs -test -e {output_file}")
+ #
+ # if params.security_enabled:
+ # kinit_cmd = "{0} -kt {1} {2};".format(kinit_path_local, smoke_user_keytab, smokeuser)
+ # Execute(kinit_cmd)
+ #
+ # Execute(cleanup_cmd,
+ # tries=1,
+ # try_sleep=5,
+ # logoutput=True,
+ # user=params.hdfs_user
+ # )
+ #
+ # Execute(create_file_cmd,
+ # tries=1,
+ # try_sleep=5,
+ # logoutput=True,
+ # user=params.hdfs_user
+ # )
+ #
+ # Execute(run_wordcount_job,
+ # tries=1,
+ # try_sleep=5,
+ # logoutput=True,
+ # user=params.hdfs_user
+ # )
+ #
+ # Execute(test_cmd,
+ # logoutput=True,
+ # user=params.hdfs_user
+ # )
+
+
+if __name__ == "__main__":
+ MapReduce2ServiceCheck().execute()
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/scripts/mapreduce2_client.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/scripts/mapreduce2_client.py
new file mode 100644
index 0000000000..d324e9571d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/scripts/mapreduce2_client.py
@@ -0,0 +1,43 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+from yarn import yarn
+import os
+
+class MapReduce2Client(Script):
+
+ def install(self, env):
+ # client checks env var to determine if it is installed
+ if not os.environ.has_key("HADOOP_CONF_DIR"):
+ self.install_packages(env)
+ self.configure(env)
+
+ def configure(self, env):
+ import params
+ env.set_params(params)
+ yarn()
+
+ def status(self, env):
+ raise ClientComponentHasNoStatus()
+
+if __name__ == "__main__":
+ MapReduce2Client().execute()
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/scripts/nodemanager.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/scripts/nodemanager.py
new file mode 100644
index 0000000000..4b767b129e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/scripts/nodemanager.py
@@ -0,0 +1,53 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+from yarn import yarn
+import service_mapping
+
+class Nodemanager(Script):
+
+ def install(self, env):
+ if not check_windows_service_exists(service_mapping.nodemanager_win_service_name):
+ self.install_packages(env)
+ self.configure(env)
+
+ def configure(self, env):
+ import params
+ env.set_params(params)
+ yarn()
+
+ def start(self, env):
+ import params
+ env.set_params(params)
+ self.configure(env)
+ Service(service_mapping.nodemanager_win_service_name, action="start")
+
+ def stop(self, env):
+ import params
+ env.set_params(params)
+ Service(service_mapping.nodemanager_win_service_name, action="stop")
+
+ def status(self, env):
+ check_windows_service_status(service_mapping.nodemanager_win_service_name)
+
+if __name__ == "__main__":
+ Nodemanager().execute()
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/scripts/params.py
new file mode 100644
index 0000000000..a548f98765
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/scripts/params.py
@@ -0,0 +1,57 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+from resource_management.libraries import functions
+import os
+
+# server configurations
+config = Script.get_config()
+
+yarn_user = "hadoop"
+hdfs_user = "hadoop"
+smokeuser = "hadoop"
+config_dir = os.environ["HADOOP_CONF_DIR"]
+hadoop_home = os.environ["HADOOP_HOME"]
+
+yarn_home = os.environ["HADOOP_YARN_HOME"]
+
+hadoop_ssl_enabled = default("/configurations/core-site/hadoop.ssl.enabled", False)
+_authentication = config['configurations']['core-site']['hadoop.security.authentication']
+security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
+smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
+kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+rm_host = config['clusterHostInfo']['rm_host'][0]
+rm_port = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'].split(':')[-1]
+rm_https_port = "8090"
+rm_webui_address = format("{rm_host}:{rm_port}")
+rm_webui_https_address = format("{rm_host}:{rm_https_port}")
+
+hs_host = config['clusterHostInfo']['hs_host'][0]
+hs_port = config['configurations']['mapred-site']['mapreduce.jobhistory.webapp.address'].split(':')[-1]
+hs_webui_address = format("{hs_host}:{hs_port}")
+
+hadoop_mapred2_jar_location = os.path.join(os.environ["HADOOP_COMMON_HOME"], "share", "hadoop", "mapreduce")
+hadoopMapredExamplesJarName = "hadoop-mapreduce-examples-2.*.jar"
+
+exclude_hosts = default("/clusterHostInfo/decom_nm_hosts", [])
+exclude_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.exclude-path","/etc/hadoop/conf/yarn.exclude")
+update_exclude_file_only = config['commandParams']['update_exclude_file_only'] \ No newline at end of file
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/scripts/resourcemanager.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/scripts/resourcemanager.py
new file mode 100644
index 0000000000..29e4efa5b2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/scripts/resourcemanager.py
@@ -0,0 +1,77 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+from yarn import yarn
+import service_mapping
+
+class Resourcemanager(Script):
+
+ def install(self, env):
+ import params
+ if not check_windows_service_exists(service_mapping.resourcemanager_win_service_name):
+ self.install_packages(env)
+ self.configure(env)
+
+ def configure(self, env):
+ import params
+ env.set_params(params)
+ yarn()
+
+ def start(self, env):
+ import params
+ env.set_params(params)
+ self.configure(env)
+ Service(service_mapping.resourcemanager_win_service_name, action="start")
+
+ def stop(self, env):
+ import params
+ env.set_params(params)
+ Service(service_mapping.resourcemanager_win_service_name, action="stop")
+
+ def status(self, env):
+ check_windows_service_status(service_mapping.resourcemanager_win_service_name)
+
+ def refreshqueues(self, env):
+ pass
+
+ def decommission(self, env):
+ import params
+
+ env.set_params(params)
+ yarn_user = params.yarn_user
+
+ yarn_refresh_cmd = format("cmd /c yarn rmadmin -refreshNodes")
+
+ File(params.exclude_file_path,
+ content=Template("exclude_hosts_list.j2"),
+ owner=yarn_user,
+ mode="f"
+ )
+
+ if params.update_exclude_file_only == False:
+ Execute(yarn_refresh_cmd,
+ user=yarn_user)
+ pass
+ pass
+
+if __name__ == "__main__":
+ Resourcemanager().execute()
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/scripts/service_check.py
new file mode 100644
index 0000000000..8196f2e249
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/scripts/service_check.py
@@ -0,0 +1,68 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+from ambari_commons import os_utils
+
+from resource_management import *
+import sys
+import os
+
+class ServiceCheck(Script):
+ def service_check(self, env):
+ import params
+ env.set_params(params)
+
+ yarn_exe = os_utils.quote_path(os.path.join(params.yarn_home, "bin", "yarn.cmd"))
+
+ run_yarn_check_cmd = "cmd /C %s node -list" % yarn_exe
+
+ component_type = 'rm'
+ if params.hadoop_ssl_enabled:
+ component_address = params.rm_webui_https_address
+ else:
+ component_address = params.rm_webui_address
+
+ #temp_dir = os.path.abspath(os.path.join(params.hadoop_home, os.pardir)), "/tmp"
+ temp_dir = os.path.join(os.path.dirname(params.hadoop_home), "temp")
+ validateStatusFileName = "validateYarnComponentStatus.py"
+ validateStatusFilePath = os.path.join(temp_dir, validateStatusFileName)
+ python_executable = sys.executable
+ validateStatusCmd = "%s %s %s -p %s -s %s" % (python_executable, validateStatusFilePath, component_type, component_address, params.hadoop_ssl_enabled)
+
+ if params.security_enabled:
+ kinit_cmd = "%s -kt %s %s;" % (params.kinit_path_local, params.smoke_user_keytab, params.smokeuser)
+ smoke_cmd = kinit_cmd + ' ' + validateStatusCmd
+ else:
+ smoke_cmd = validateStatusCmd
+
+ File(validateStatusFilePath,
+ content=StaticFile(validateStatusFileName)
+ )
+
+ Execute(smoke_cmd,
+ tries=3,
+ try_sleep=5,
+ logoutput=True
+ )
+
+ Execute(run_yarn_check_cmd, logoutput=True)
+
+if __name__ == "__main__":
+ ServiceCheck().execute()
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/scripts/service_mapping.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/scripts/service_mapping.py
new file mode 100644
index 0000000000..1863797fef
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/scripts/service_mapping.py
@@ -0,0 +1,26 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+# windows services mapping
+resourcemanager_win_service_name = "resourcemanager"
+nodemanager_win_service_name = "nodemanager"
+historyserver_win_service_name = "jobhistoryserver"
+apptimelineserver_win_service_name = "historyserver" \ No newline at end of file
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/scripts/yarn.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/scripts/yarn.py
new file mode 100644
index 0000000000..8578d748d7
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/scripts/yarn.py
@@ -0,0 +1,44 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+
+def yarn():
+ import params
+ XmlConfig("mapred-site.xml",
+ conf_dir=params.config_dir,
+ configurations=params.config['configurations']['mapred-site'],
+ owner=params.yarn_user,
+ mode='f'
+ )
+ XmlConfig("yarn-site.xml",
+ conf_dir=params.config_dir,
+ configurations=params.config['configurations']['yarn-site'],
+ owner=params.yarn_user,
+ mode='f'
+ )
+ XmlConfig("capacity-scheduler.xml",
+ conf_dir=params.config_dir,
+ configurations=params.config['configurations']['capacity-scheduler'],
+ owner=params.yarn_user,
+ mode='f'
+ )
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/scripts/yarn_client.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/scripts/yarn_client.py
new file mode 100644
index 0000000000..b7037b66b7
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/scripts/yarn_client.py
@@ -0,0 +1,44 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+from yarn import yarn
+import os
+
+class YarnClient(Script):
+
+ def install(self, env):
+ import params
+ # client checks env var to determine if it is installed
+ if params.config_dir is None:
+ self.install_packages(env)
+ self.configure(env)
+
+ def configure(self, env):
+ import params
+ env.set_params(params)
+ yarn()
+
+ def status(self, env):
+ raise ClientComponentHasNoStatus()
+
+if __name__ == "__main__":
+ YarnClient().execute()
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/templates/container-executor.cfg.j2 b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/templates/container-executor.cfg.j2
new file mode 100644
index 0000000000..90b12e69c4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/templates/container-executor.cfg.j2
@@ -0,0 +1,40 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements. See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership. The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License. You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+yarn.nodemanager.local-dirs={{nm_local_dirs}}
+yarn.nodemanager.log-dirs={{nm_log_dirs}}
+yarn.nodemanager.linux-container-executor.group={{yarn_executor_container_group}}
+banned.users=hdfs,yarn,mapred,bin
+min.user.id=1000
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/templates/exclude_hosts_list.j2
new file mode 100644
index 0000000000..c7ce4168ae
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/templates/exclude_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in exclude_hosts %}
+{{host}}
+{% endfor %} \ No newline at end of file
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/templates/mapreduce.conf.j2 b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/templates/mapreduce.conf.j2
new file mode 100644
index 0000000000..b996645da1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/templates/mapreduce.conf.j2
@@ -0,0 +1,35 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+{{mapred_user}} - nofile 32768
+{{mapred_user}} - nproc 65536
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/templates/taskcontroller.cfg.j2 b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/templates/taskcontroller.cfg.j2
new file mode 100644
index 0000000000..3d5f4f2d4f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/templates/taskcontroller.cfg.j2
@@ -0,0 +1,38 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements. See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership. The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License. You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+mapred.local.dir={{mapred_local_dir}}
+mapreduce.tasktracker.group={{mapred_tt_group}}
+hadoop.log.dir={{hdfs_log_dir_prefix}}/{{mapred_user}}
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/templates/yarn.conf.j2 b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/templates/yarn.conf.j2
new file mode 100644
index 0000000000..3bd7a45764
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/package/templates/yarn.conf.j2
@@ -0,0 +1,35 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+{{yarn_user}} - nofile 32768
+{{yarn_user}} - nproc 65536
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/configuration/zookeeper-env.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/configuration/zookeeper-env.xml
new file mode 100644
index 0000000000..e30a165c23
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/configuration/zookeeper-env.xml
@@ -0,0 +1,73 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+ <property>
+ <name>zk_user</name>
+ <value>zookeeper</value>
+ <description>ZooKeeper User.</description>
+ </property>
+ <property>
+ <name>zk_data_dir</name>
+ <value>C:\\\\hadoop\\\\zookeeper</value>
+ <description>Data directory for ZooKeeper.</description>
+ </property>
+ <property>
+ <name>tickTime</name>
+ <value>2000</value>
+ <description>The length of a single tick in milliseconds, which is the basic time unit used by ZooKeeper</description>
+ </property>
+ <property>
+ <name>initLimit</name>
+ <value>10</value>
+ <description>Ticks to allow for sync at Init.</description>
+ </property>
+ <property>
+ <name>syncLimit</name>
+ <value>5</value>
+ <description>Ticks to allow for sync at Runtime.</description>
+ </property>
+ <property>
+ <name>clientPort</name>
+ <value>2181</value>
+ <description>Port for running ZK Server.</description>
+ </property>
+
+ <!-- zookeeper-env.sh -->
+ <property>
+ <name>content</name>
+ <description>zookeeper-env.sh content</description>
+ <value>
+export JAVA_HOME={{java64_home}}
+export ZOO_LOG_DIR={{zk_log_dir}}
+export ZOOPIDFILE={{zk_pid_file}}
+export SERVER_JVMFLAGS={{zk_server_heapsize}}
+export JAVA=$JAVA_HOME/bin/java
+export CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*
+
+{% if security_enabled %}
+export SERVER_JVMFLAGS="$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}"
+export CLIENT_JVMFLAGS="$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}"
+{% endif %}
+ </value>
+ </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/configuration/zookeeper-log4j.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/configuration/zookeeper-log4j.xml
new file mode 100644
index 0000000000..4dce6d1cd8
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/configuration/zookeeper-log4j.xml
@@ -0,0 +1,100 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+
+ <property>
+ <name>content</name>
+ <value>
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+
+#
+# ZooKeeper Logging Configuration
+#
+
+# DEFAULT: console appender only
+log4j.rootLogger=INFO, CONSOLE
+
+# Example with rolling log file
+#log4j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE
+
+# Example with rolling log file and tracing
+#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE
+
+#
+# Log INFO level and above messages to the console
+#
+log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
+log4j.appender.CONSOLE.Threshold=INFO
+log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
+log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n
+
+#
+# Add ROLLINGFILE to rootLogger to get log file output
+# Log DEBUG level and above messages to a log file
+log4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender
+log4j.appender.ROLLINGFILE.Threshold=DEBUG
+log4j.appender.ROLLINGFILE.File=zookeeper.log
+
+# Max log file size of 10MB
+log4j.appender.ROLLINGFILE.MaxFileSize=10MB
+# uncomment the next line to limit number of backup files
+#log4j.appender.ROLLINGFILE.MaxBackupIndex=10
+
+log4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout
+log4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n
+
+
+#
+# Add TRACEFILE to rootLogger to get log file output
+# Log DEBUG level and above messages to a log file
+log4j.appender.TRACEFILE=org.apache.log4j.FileAppender
+log4j.appender.TRACEFILE.Threshold=TRACE
+log4j.appender.TRACEFILE.File=zookeeper_trace.log
+
+log4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout
+### Notice we are including log4j's NDC here (%x)
+log4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n
+ </value>
+ </property>
+
+</configuration>
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/metainfo.xml
new file mode 100644
index 0000000000..daae2a757c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/metainfo.xml
@@ -0,0 +1,78 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<metainfo>
+ <schemaVersion>2.0</schemaVersion>
+ <services>
+ <service>
+ <name>ZOOKEEPER</name>
+ <displayName>ZooKeeper</displayName>
+ <comment>Centralized service which provides highly reliable distributed coordination</comment>
+ <version>3.4.5.2.0</version>
+ <components>
+
+ <component>
+ <name>ZOOKEEPER_SERVER</name>
+ <displayName>ZooKeeper Server</displayName>
+ <category>MASTER</category>
+ <cardinality>1+</cardinality>
+ <commandScript>
+ <script>scripts/zookeeper_server.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>600</timeout>
+ </commandScript>
+ </component>
+
+ <component>
+ <name>ZOOKEEPER_CLIENT</name>
+ <displayName>ZooKeeper Client</displayName>
+ <category>CLIENT</category>
+ <cardinality>1+</cardinality>
+ <commandScript>
+ <script>scripts/zookeeper_client.py</script>
+ <scriptType>PYTHON</scriptType>
+ </commandScript>
+ <configFiles>
+ <configFile>
+ <type>env</type>
+ <fileName>zookeeper-env.cmd</fileName>
+ <dictionaryName>zookeeper-env</dictionaryName>
+ </configFile>
+ <configFile>
+ <type>env</type>
+ <fileName>log4j.properties</fileName>
+ <dictionaryName>zookeeper-log4j</dictionaryName>
+ </configFile>
+ </configFiles>
+ </component>
+ </components>
+
+ <commandScript>
+ <script>scripts/service_check.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>300</timeout>
+ </commandScript>
+
+ <configuration-dependencies>
+ <config-type>zookeeper-log4j</config-type>
+ <config-type>zookeeper-env</config-type>
+ <config-type>zoo.cfg</config-type>
+ </configuration-dependencies>
+ <restartRequiredAfterChange>true</restartRequiredAfterChange>
+ </service>
+ </services>
+</metainfo>
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/package/scripts/params.py
new file mode 100644
index 0000000000..0c78c67786
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/package/scripts/params.py
@@ -0,0 +1,59 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+# server configurations
+config = Script.get_config()
+
+# notused zookeeper_home_dir = os.environ["ZOOKEEPER_HOME"]
+zookeeper_conf_dir = os.environ["ZOOKEEPER_CONF_DIR"]
+hdp_root = os.environ["HADOOP_NODE_INSTALL_ROOT"]
+zk_user = "hadoop"
+
+# notused zk_log_dir = config['configurations']['zookeeper-env']['zk_log_dir']
+#todo zk_data_dir must be dynamically changed
+zk_data_dir = "c:\\\\hadoop\\\\zookeeper"
+zk_data_dir = config['configurations']['zookeeper-env']['zk_data_dir']
+
+tickTime = config['configurations']['zookeeper-env']['tickTime']
+initLimit = config['configurations']['zookeeper-env']['initLimit']
+syncLimit = config['configurations']['zookeeper-env']['syncLimit']
+clientPort = config['configurations']['zookeeper-env']['clientPort']
+
+if 'zoo.cfg' in config['configurations']:
+ zoo_cfg_properties_map = config['configurations']['zoo.cfg']
+else:
+ zoo_cfg_properties_map = {}
+zoo_cfg_properties_map_length = len(zoo_cfg_properties_map)
+
+zookeeper_hosts = config['clusterHostInfo']['zookeeper_hosts']
+zookeeper_hosts.sort()
+hostname = config['hostname']
+
+_authentication = config['configurations']['core-site']['hadoop.security.authentication']
+security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
+
+#log4j.properties
+if (('zookeeper-log4j' in config['configurations']) and ('content' in config['configurations']['zookeeper-log4j'])):
+ log4j_props = config['configurations']['zookeeper-log4j']['content']
+else:
+ log4j_props = None
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/package/scripts/service_check.py
new file mode 100644
index 0000000000..4cff7c900c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/package/scripts/service_check.py
@@ -0,0 +1,34 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+class ZookeeperServiceCheck(Script):
+ def service_check(self, env):
+ import params
+ env.set_params(params)
+
+ smoke_cmd = os.path.join(params.hdp_root,"Run-SmokeTests.cmd")
+ service = "Zookeeper"
+ Execute(format("cmd /C {smoke_cmd} {service}"), logoutput=True)
+
+if __name__ == "__main__":
+ ZookeeperServiceCheck().execute()
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/package/scripts/service_mapping.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/package/scripts/service_mapping.py
new file mode 100644
index 0000000000..5fc6da2b19
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/package/scripts/service_mapping.py
@@ -0,0 +1,22 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+zookeeper_win_service_name = "zkServer" \ No newline at end of file
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/package/scripts/zookeeper.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/package/scripts/zookeeper.py
new file mode 100644
index 0000000000..15bc881293
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/package/scripts/zookeeper.py
@@ -0,0 +1,63 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+import os
+
+from resource_management import *
+import sys
+
+
+def zookeeper(type = None):
+ import params
+ configFile("zoo.cfg", template_name="zoo.cfg.j2")
+ configFile("configuration.xsl", template_name="configuration.xsl.j2")
+
+ Directory(params.zk_data_dir,
+ owner=params.zk_user,
+ mode="(OI)(CI)F",
+ recursive=True
+ )
+ if (params.log4j_props != None):
+ File(format("{params.zookeeper_conf_dir}/log4j.properties"),
+ mode="f",
+ owner=params.zk_user,
+ content=params.log4j_props
+ )
+ elif (os.path.exists(format("{params.zookeeper_conf_dir}/log4j.properties"))):
+ File(format("{params.zookeeper_conf_dir}/log4j.properties"),
+ mode="f",
+ owner=params.zk_user
+ )
+ if type == 'server':
+ myid = str(sorted(params.zookeeper_hosts).index(params.hostname) + 1)
+ File(format("{zk_data_dir}/myid"),
+ owner=params.zk_user,
+ mode = "f",
+ content = myid
+ )
+
+def configFile(name, template_name=None):
+ import params
+
+ File(format("{params.zookeeper_conf_dir}/{name}"),
+ content=Template(template_name),
+ owner=params.zk_user,
+ mode="f"
+ )
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/package/scripts/zookeeper_client.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/package/scripts/zookeeper_client.py
new file mode 100644
index 0000000000..fa79b8e2a1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/package/scripts/zookeeper_client.py
@@ -0,0 +1,42 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+from resource_management import *
+from zookeeper import zookeeper
+import os
+
+class ZookeeperClient(Script):
+ def install(self, env):
+ # client checks env var to determine if it is installed
+ if not os.environ.has_key("ZOOKEEPER_HOME"):
+ self.install_packages(env)
+
+ def configure(self, env):
+ import params
+ env.set_params(params)
+ zookeeper(type='client')
+
+ def status(self, env):
+ raise ClientComponentHasNoStatus()
+
+if __name__ == "__main__":
+ ZookeeperClient().execute()
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/package/scripts/zookeeper_server.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/package/scripts/zookeeper_server.py
new file mode 100644
index 0000000000..d2ad45d76f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/package/scripts/zookeeper_server.py
@@ -0,0 +1,51 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+from resource_management import *
+from zookeeper import zookeeper
+import service_mapping
+
+class ZookeeperServer(Script):
+ def install(self, env):
+ if not check_windows_service_exists(service_mapping.zookeeper_win_service_name):
+ self.install_packages(env)
+ self.configure(env)
+
+ def configure(self, env):
+ import params
+ env.set_params(params)
+ zookeeper(type='server')
+
+ def start(self, env):
+ import params
+ self.configure(env)
+ Service(service_mapping.zookeeper_win_service_name, action="start")
+
+ def stop(self, env):
+ import params
+ Service(service_mapping.zookeeper_win_service_name, action="stop")
+
+ def status(self, env):
+ check_windows_service_status(service_mapping.zookeeper_win_service_name)
+
+if __name__ == "__main__":
+ ZookeeperServer().execute()
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/package/templates/configuration.xsl.j2 b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/package/templates/configuration.xsl.j2
new file mode 100644
index 0000000000..8830c45ea6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/package/templates/configuration.xsl.j2
@@ -0,0 +1,42 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+<?xml version="1.0"?>
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
+<xsl:output method="html"/>
+<xsl:template match="configuration">
+<html>
+<body>
+<table border="1">
+<tr>
+ <td>name</td>
+ <td>value</td>
+ <td>description</td>
+</tr>
+<xsl:for-each select="property">
+ <tr>
+ <td><a name="{name}"><xsl:value-of select="name"/></a></td>
+ <td><xsl:value-of select="value"/></td>
+ <td><xsl:value-of select="description"/></td>
+ </tr>
+</xsl:for-each>
+</table>
+</body>
+</html>
+</xsl:template>
+</xsl:stylesheet>
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/package/templates/zoo.cfg.j2 b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/package/templates/zoo.cfg.j2
new file mode 100644
index 0000000000..beb4730995
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/package/templates/zoo.cfg.j2
@@ -0,0 +1,69 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+
+# The number of milliseconds of each tick
+tickTime={{tickTime}}
+# The number of ticks that the initial
+# synchronization phase can take
+initLimit={{initLimit}}
+# The number of ticks that can pass between
+# sending a request and getting an acknowledgement
+syncLimit={{syncLimit}}
+# the directory where the snapshot is stored.
+dataDir={{zk_data_dir}}
+# the port at which the clients will connect
+clientPort={{clientPort}}
+{% for host in zookeeper_hosts %}
+server.{{loop.index}}={{host}}:2888:3888
+{% endfor %}
+
+{% if security_enabled %}
+authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider
+jaasLoginRenew=3600000
+kerberos.removeHostFromPrincipal=true
+kerberos.removeRealmFromPrincipal=true
+{% endif %}
+
+{% if zoo_cfg_properties_map_length > 0 %}
+# Custom properties
+{% endif %}
+{% for key, value in zoo_cfg_properties_map.iteritems() %}
+{{key}}={{value}}
+{% endfor %}
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/stack_advisor.py
new file mode 100644
index 0000000000..cf35e47b97
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/stack_advisor.py
@@ -0,0 +1,414 @@
+#!/usr/bin/env ambari-python-wrap
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import re
+import sys
+from math import ceil
+
+from stack_advisor import DefaultStackAdvisor
+
+class HDPWIN21StackAdvisor(DefaultStackAdvisor):
+
+ def getComponentLayoutValidations(self, services, hosts):
+ """Returns array of Validation objects about issues with hostnames components assigned to"""
+ items = []
+
+ # Validating NAMENODE and SECONDARY_NAMENODE are on different hosts if possible
+ hostsList = [host["Hosts"]["host_name"] for host in hosts["items"]]
+ hostsCount = len(hostsList)
+
+ componentsListList = [service["components"] for service in services["services"]]
+ componentsList = [item for sublist in componentsListList for item in sublist]
+ nameNodeHosts = [component["StackServiceComponents"]["hostnames"] for component in componentsList if component["StackServiceComponents"]["component_name"] == "NAMENODE"]
+ secondaryNameNodeHosts = [component["StackServiceComponents"]["hostnames"] for component in componentsList if component["StackServiceComponents"]["component_name"] == "SECONDARY_NAMENODE"]
+
+ # Validating cardinality
+ for component in componentsList:
+ if component["StackServiceComponents"]["cardinality"] is not None:
+ componentName = component["StackServiceComponents"]["component_name"]
+ componentDisplayName = component["StackServiceComponents"]["display_name"]
+ componentHostsCount = 0
+ if component["StackServiceComponents"]["hostnames"] is not None:
+ componentHostsCount = len(component["StackServiceComponents"]["hostnames"])
+ cardinality = str(component["StackServiceComponents"]["cardinality"])
+ # cardinality types: null, 1+, 1-2, 1, ALL
+ message = None
+ if "+" in cardinality:
+ hostsMin = int(cardinality[:-1])
+ if componentHostsCount < hostsMin:
+ message = "At least {0} {1} components should be installed in cluster.".format(hostsMin, componentDisplayName)
+ elif "-" in cardinality:
+ nums = cardinality.split("-")
+ hostsMin = int(nums[0])
+ hostsMax = int(nums[1])
+ if componentHostsCount > hostsMax or componentHostsCount < hostsMin:
+ message = "Between {0} and {1} {2} components should be installed in cluster.".format(hostsMin, hostsMax, componentDisplayName)
+ elif "ALL" == cardinality:
+ if componentHostsCount != hostsCount:
+ message = "{0} component should be installed on all hosts in cluster.".format(componentDisplayName)
+ else:
+ if componentHostsCount != int(cardinality):
+ message = "Exactly {0} {1} components should be installed in cluster.".format(int(cardinality), componentDisplayName)
+
+ if message is not None:
+ items.append({"type": 'host-component', "level": 'ERROR', "message": message, "component-name": componentName})
+
+ # Validating host-usage
+ usedHostsListList = [component["StackServiceComponents"]["hostnames"] for component in componentsList if not self.isComponentNotValuable(component)]
+ usedHostsList = [item for sublist in usedHostsListList for item in sublist]
+ nonUsedHostsList = [item for item in hostsList if item not in usedHostsList]
+ for host in nonUsedHostsList:
+ items.append( { "type": 'host-component', "level": 'ERROR', "message": 'Host is not used', "host": str(host) } )
+
+ return items
+
+ def getServiceConfigurationRecommenderDict(self):
+ return {
+ "YARN": self.recommendYARNConfigurations,
+ "MAPREDUCE2": self.recommendMapReduce2Configurations,
+ "OOZIE": self.recommendOozieConfigurations,
+ "HIVE": self.recommendHiveConfigurations,
+ "TEZ": self.recommendTezConfigurations
+ }
+
+ def putProperty(self, config, configType):
+ config[configType] = {"properties": {}}
+ def appendProperty(key, value):
+ config[configType]["properties"][key] = str(value)
+ return appendProperty
+
+ def recommendYARNConfigurations(self, configurations, clusterData):
+ putYarnProperty = self.putProperty(configurations, "yarn-site")
+ putYarnProperty('yarn.nodemanager.resource.memory-mb', int(round(clusterData['containers'] * clusterData['ramPerContainer'])))
+ putYarnProperty('yarn.scheduler.minimum-allocation-mb', int(clusterData['ramPerContainer']))
+ putYarnProperty('yarn.scheduler.maximum-allocation-mb', int(round(clusterData['containers'] * clusterData['ramPerContainer'])))
+
+ def recommendMapReduce2Configurations(self, configurations, clusterData):
+ putMapredProperty = self.putProperty(configurations, "mapred-site")
+ putMapredProperty('yarn.app.mapreduce.am.resource.mb', int(clusterData['amMemory']))
+ putMapredProperty('yarn.app.mapreduce.am.command-opts', "-Xmx" + str(int(round(0.8 * clusterData['amMemory']))) + "m")
+ putMapredProperty('mapreduce.map.memory.mb', clusterData['mapMemory'])
+ putMapredProperty('mapreduce.reduce.memory.mb', int(clusterData['reduceMemory']))
+ putMapredProperty('mapreduce.map.java.opts', "-Xmx" + str(int(round(0.8 * clusterData['mapMemory']))) + "m")
+ putMapredProperty('mapreduce.reduce.java.opts', "-Xmx" + str(int(round(0.8 * clusterData['reduceMemory']))) + "m")
+ putMapredProperty('mapreduce.task.io.sort.mb', min(int(round(0.4 * clusterData['mapMemory'])), 1024))
+
+ def recommendOozieConfigurations(self, configurations, clusterData):
+ if "FALCON_SERVER" in clusterData["components"]:
+ putMapredProperty = self.putProperty(configurations, "oozie-site")
+ putMapredProperty("oozie.services.ext",
+ "org.apache.oozie.service.JMSAccessorService," +
+ "org.apache.oozie.service.PartitionDependencyManagerService," +
+ "org.apache.oozie.service.HCatAccessorService")
+
+ def recommendHiveConfigurations(self, configurations, clusterData):
+ containerSize = clusterData['mapMemory'] if clusterData['mapMemory'] > 2048 else int(clusterData['reduceMemory'])
+ containerSize = min(clusterData['containers'] * clusterData['ramPerContainer'], containerSize)
+ putHiveProperty = self.putProperty(configurations, "hive-site")
+ putHiveProperty('hive.auto.convert.join.noconditionaltask.size', int(round(containerSize / 3)) * 1048576)
+ putHiveProperty('hive.tez.java.opts', "-server -Xmx" + str(int(round(0.8 * containerSize)))
+ + "m -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseParallelGC")
+ putHiveProperty('hive.tez.container.size', containerSize)
+
+ def recommendTezConfigurations(self, configurations, clusterData):
+ putTezProperty = self.putProperty(configurations, "tez-site")
+ putTezProperty("tez.am.resource.memory.mb", int(clusterData['amMemory']))
+ putTezProperty("tez.am.java.opts",
+ "-server -Xmx" + str(int(0.8 * clusterData["amMemory"]))
+ + "m -Djava.net.preferIPv4Stack=true -XX:+UseNUMA -XX:+UseParallelGC")
+
+ def getConfigurationClusterSummary(self, servicesList, hosts, components):
+
+ hBaseInstalled = False
+ if 'HBASE' in servicesList:
+ hBaseInstalled = True
+
+ cluster = {
+ "cpu": 0,
+ "disk": 0,
+ "ram": 0,
+ "hBaseInstalled": hBaseInstalled,
+ "components": components
+ }
+
+ if len(hosts["items"]) > 0:
+ host = hosts["items"][0]["Hosts"]
+ cluster["cpu"] = host["cpu_count"]
+ cluster["disk"] = len(host["disk_info"])
+ cluster["ram"] = int(host["total_mem"] / (1024 * 1024))
+
+ ramRecommendations = [
+ {"os":1, "hbase":1},
+ {"os":2, "hbase":1},
+ {"os":2, "hbase":2},
+ {"os":4, "hbase":4},
+ {"os":6, "hbase":8},
+ {"os":8, "hbase":8},
+ {"os":8, "hbase":8},
+ {"os":12, "hbase":16},
+ {"os":24, "hbase":24},
+ {"os":32, "hbase":32},
+ {"os":64, "hbase":64}
+ ]
+ index = {
+ cluster["ram"] <= 4: 0,
+ 4 < cluster["ram"] <= 8: 1,
+ 8 < cluster["ram"] <= 16: 2,
+ 16 < cluster["ram"] <= 24: 3,
+ 24 < cluster["ram"] <= 48: 4,
+ 48 < cluster["ram"] <= 64: 5,
+ 64 < cluster["ram"] <= 72: 6,
+ 72 < cluster["ram"] <= 96: 7,
+ 96 < cluster["ram"] <= 128: 8,
+ 128 < cluster["ram"] <= 256: 9,
+ 256 < cluster["ram"]: 10
+ }[1]
+ cluster["reservedRam"] = ramRecommendations[index]["os"]
+ cluster["hbaseRam"] = ramRecommendations[index]["hbase"]
+
+ cluster["minContainerSize"] = {
+ cluster["ram"] <= 4: 256,
+ 4 < cluster["ram"] <= 8: 512,
+ 8 < cluster["ram"] <= 24: 1024,
+ 24 < cluster["ram"]: 2048
+ }[1]
+
+ totalAvailableRam = cluster["ram"] - cluster["reservedRam"]
+ if cluster["hBaseInstalled"]:
+ totalAvailableRam -= cluster["hbaseRam"]
+ cluster["totalAvailableRam"] = max(2048, totalAvailableRam * 1024)
+ '''containers = max(3, min (2*cores,min (1.8*DISKS,(Total available RAM) / MIN_CONTAINER_SIZE))))'''
+ cluster["containers"] = round(max(3,
+ min(2 * cluster["cpu"],
+ min(ceil(1.8 * cluster["disk"]),
+ cluster["totalAvailableRam"] / cluster["minContainerSize"]))))
+
+ '''ramPerContainers = max(2GB, RAM - reservedRam - hBaseRam) / containers'''
+ cluster["ramPerContainer"] = abs(cluster["totalAvailableRam"] / cluster["containers"])
+ '''If greater than 1GB, value will be in multiples of 512.'''
+ if cluster["ramPerContainer"] > 1024:
+ cluster["ramPerContainer"] = int(cluster["ramPerContainer"] / 512) * 512
+
+ cluster["mapMemory"] = int(cluster["ramPerContainer"])
+ cluster["reduceMemory"] = cluster["ramPerContainer"]
+ cluster["amMemory"] = max(cluster["mapMemory"], cluster["reduceMemory"])
+
+ return cluster
+
+ def getConfigurationsValidationItems(self, services, hosts):
+ """Returns array of Validation objects about issues with configuration values provided in services"""
+ items = []
+
+ recommendations = self.recommendConfigurations(services, hosts)
+ recommendedDefaults = recommendations["recommendations"]["blueprint"]["configurations"]
+
+ configurations = services["configurations"]
+ for service in services["services"]:
+ serviceName = service["StackServices"]["service_name"]
+ validator = self.validateServiceConfigurations(serviceName)
+ if validator is not None:
+ siteName = validator[0]
+ method = validator[1]
+ if siteName in recommendedDefaults:
+ siteProperties = getSiteProperties(configurations, siteName)
+ if siteProperties is not None:
+ resultItems = method(siteProperties, recommendedDefaults[siteName]["properties"], configurations)
+ items.extend(resultItems)
+ return items
+
+ def getServiceConfigurationValidators(self):
+ return {
+ "MAPREDUCE2": ["mapred-site", self.validateMapReduce2Configurations],
+ "YARN": ["yarn-site", self.validateYARNConfigurations]
+ "HIVE": ["hive-site", self.validateHiveConfigurations],
+ "TEZ": ["tez-site", self.validateTezConfigurations]
+ }
+
+ def validateServiceConfigurations(self, serviceName):
+ return self.getServiceConfigurationValidators().get(serviceName, None)
+
+ def toConfigurationValidationProblems(self, validationProblems, siteName):
+ result = []
+ for validationProblem in validationProblems:
+ validationItem = validationProblem.get("item", None)
+ if validationItem is not None:
+ problem = {"type": 'configuration', "level": validationItem["level"], "message": validationItem["message"],
+ "config-type": siteName, "config-name": validationProblem["config-name"] }
+ result.append(problem)
+ return result
+
+ def getWarnItem(self, message):
+ return {"level": "WARN", "message": message}
+
+ def getErrorItem(self, message):
+ return {"level": "ERROR", "message": message}
+
+ def validatorLessThenDefaultValue(self, properties, recommendedDefaults, propertyName):
+ if not propertyName in properties:
+ return self.getErrorItem("Value should be set")
+ value = to_number(properties[propertyName])
+ if value is None:
+ return self.getErrorItem("Value should be integer")
+ defaultValue = to_number(recommendedDefaults[propertyName])
+ if defaultValue is None:
+ return None
+ if value < defaultValue:
+ return self.getWarnItem("Value is less than the recommended default of {0}".format(defaultValue))
+ return None
+
+ def validateXmxValue(self, properties, recommendedDefaults, propertyName):
+ if not propertyName in properties:
+ return self.getErrorItem("Value should be set")
+ value = properties[propertyName]
+ defaultValue = recommendedDefaults[propertyName]
+ if defaultValue is None:
+ return self.getErrorItem("Config's default value can't be null or undefined")
+ if not checkXmxValueFormat(value):
+ return self.getErrorItem('Invalid value format')
+ valueInt = formatXmxSizeToBytes(getXmxSize(value))
+ defaultValueXmx = getXmxSize(defaultValue)
+ defaultValueInt = formatXmxSizeToBytes(defaultValueXmx)
+ if valueInt < defaultValueInt:
+ return self.getWarnItem("Value is less than the recommended default of -Xmx" + defaultValueXmx)
+ return None
+
+ def validateMapReduce2Configurations(self, properties, recommendedDefaults, configurations):
+ validationItems = [ {"config-name": 'mapreduce.map.java.opts', "item": self.validateXmxValue(properties, recommendedDefaults, 'mapreduce.map.java.opts')},
+ {"config-name": 'mapreduce.reduce.java.opts', "item": self.validateXmxValue(properties, recommendedDefaults, 'mapreduce.reduce.java.opts')},
+ {"config-name": 'mapreduce.task.io.sort.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'mapreduce.task.io.sort.mb')},
+ {"config-name": 'mapreduce.map.memory.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'mapreduce.map.memory.mb')},
+ {"config-name": 'mapreduce.reduce.memory.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'mapreduce.reduce.memory.mb')},
+ {"config-name": 'yarn.app.mapreduce.am.resource.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'yarn.app.mapreduce.am.resource.mb')},
+ {"config-name": 'yarn.app.mapreduce.am.command-opts', "item": self.validateXmxValue(properties, recommendedDefaults, 'yarn.app.mapreduce.am.command-opts')} ]
+ return self.toConfigurationValidationProblems(validationItems, "mapred-site")
+
+ def validateYARNConfigurations(self, properties, recommendedDefaults, configurations):
+ validationItems = [ {"config-name": 'yarn.nodemanager.resource.memory-mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'yarn.nodemanager.resource.memory-mb')},
+ {"config-name": 'yarn.scheduler.minimum-allocation-mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'yarn.scheduler.minimum-allocation-mb')},
+ {"config-name": 'yarn.scheduler.maximum-allocation-mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'yarn.scheduler.maximum-allocation-mb')} ]
+ return self.toConfigurationValidationProblems(validationItems, "yarn-site")
+
+ def validateHiveConfigurations(self, properties, recommendedDefaults, configurations):
+ validationItems = [ {"config-name": 'hive.tez.container.size', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'hive.tez.container.size')},
+ {"config-name": 'hive.tez.java.opts', "item": self.validateXmxValue(properties, recommendedDefaults, 'hive.tez.java.opts')},
+ {"config-name": 'hive.auto.convert.join.noconditionaltask.size', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'hive.auto.convert.join.noconditionaltask.size')} ]
+ return self.toConfigurationValidationProblems(validationItems, "hive-site")
+
+ def validateTezConfigurations(self, properties, recommendedDefaults, configurations):
+ validationItems = [ {"config-name": 'tez.am.resource.memory.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'tez.am.resource.memory.mb')},
+ {"config-name": 'tez.am.java.opts', "item": self.validateXmxValue(properties, recommendedDefaults, 'tez.am.java.opts')} ]
+ return self.toConfigurationValidationProblems(validationItems, "tez-site")
+
+ def getMastersWithMultipleInstances(self):
+ return ['ZOOKEEPER_SERVER', 'HBASE_MASTER']
+
+ def getNotValuableComponents(self):
+ return ['JOURNALNODE', 'ZKFC', 'APP_TIMELINE_SERVER']
+
+ def getNotPreferableOnServerComponents(self):
+ return ['STORM_UI_SERVER', 'DRPC_SERVER', 'STORM_REST_API', 'NIMBUS']
+
+ def getCardinalitiesDict(self):
+ return {
+ 'ZOOKEEPER_SERVER': {"min": 3},
+ 'HBASE_MASTER': {"min": 1},
+ }
+
+ def getComponentLayoutSchemes(self):
+ return {
+ 'NAMENODE': {"else": 0},
+ 'SECONDARY_NAMENODE': {"else": 1},
+ 'HBASE_MASTER': {6: 0, 31: 2, "else": 3},
+
+ 'HISTORYSERVER': {31: 1, "else": 2},
+ 'RESOURCEMANAGER': {31: 1, "else": 2},
+
+ 'OOZIE_SERVER': {6: 1, 31: 2, "else": 3},
+
+ 'HIVE_SERVER': {6: 1, 31: 2, "else": 4},
+ 'HIVE_METASTORE': {6: 1, 31: 2, "else": 4},
+ 'WEBHCAT_SERVER': {6: 1, 31: 2, "else": 4},
+ 'APP_TIMELINE_SERVER': {31: 1, "else": 2},
+ 'FALCON_SERVER': {6: 1, 31: 2, "else": 3}
+ }
+
+# Validation helper methods
+def getSiteProperties(configurations, siteName):
+ siteConfig = configurations.get(siteName)
+ if siteConfig is None:
+ return None
+ return siteConfig.get("properties")
+
+def to_number(s):
+ try:
+ return int(re.sub("\D", "", s))
+ except ValueError:
+ return None
+
+def checkXmxValueFormat(value):
+ p = re.compile('-Xmx(\d+)(b|k|m|g|p|t|B|K|M|G|P|T)?')
+ matches = p.findall(value)
+ return len(matches) == 1
+
+def getXmxSize(value):
+ p = re.compile("-Xmx(\d+)(.?)")
+ result = p.findall(value)[0]
+ if len(result) > 1:
+ # result[1] - is a space or size formatter (b|k|m|g etc)
+ return result[0] + result[1].lower()
+ return result[0]
+
+def formatXmxSizeToBytes(value):
+ value = value.lower()
+ if len(value) == 0:
+ return 0
+ modifier = value[-1]
+
+ if modifier == ' ' or modifier in "0123456789":
+ modifier = 'b'
+ m = {
+ modifier == 'b': 1,
+ modifier == 'k': 1024,
+ modifier == 'm': 1024 * 1024,
+ modifier == 'g': 1024 * 1024 * 1024,
+ modifier == 't': 1024 * 1024 * 1024 * 1024,
+ modifier == 'p': 1024 * 1024 * 1024 * 1024 * 1024
+ }[1]
+ return to_number(value) * m
+
+def getPort(address):
+ """
+ Extracts port from the address like 0.0.0.0:1019
+ """
+ if address is None:
+ return None
+ m = re.search(r'(?:http(?:s)?://)?([\w\d.]*):(\d{1,5})', address)
+ if m is not None:
+ return int(m.group(2))
+ else:
+ return None
+
+def isSecurePort(port):
+ """
+ Returns True if port is root-owned at *nix systems
+ """
+ if port is not None:
+ return port < 1024
+ else:
+ return False \ No newline at end of file
diff --git a/ambari-server/src/main/windows/ambari-server.cmd b/ambari-server/src/main/windows/ambari-server.cmd
new file mode 100644
index 0000000000..34d1ccc9d7
--- /dev/null
+++ b/ambari-server/src/main/windows/ambari-server.cmd
@@ -0,0 +1,2 @@
+@echo off
+powershell -File ambari-server.ps1 %* \ No newline at end of file
diff --git a/ambari-server/src/main/windows/ambari-server.ps1 b/ambari-server/src/main/windows/ambari-server.ps1
new file mode 100644
index 0000000000..9afb341b21
--- /dev/null
+++ b/ambari-server/src/main/windows/ambari-server.ps1
@@ -0,0 +1,303 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+# description: ambari-server service
+# processname: ambari-server
+
+$VERSION="1.3.0-SNAPSHOT"
+$HASH="testhash"
+
+switch ($($args[0])){
+ "--version" {
+ echo "$VERSION"
+ exit 0
+ }
+ "--hash" {
+ echo "$HASH"
+ exit 0
+ }
+}
+
+$AMBARI_SERVER="ambari-server"
+$AMBARI_SVC_NAME = "Ambari Server"
+$current_directory = (Get-Item -Path ".\" -Verbose).FullName
+#environment variables used in python, check if they exists, otherwise set them to $current_directory
+#and pass to child python process
+$Env:PYTHONPATH="$current_directory\sbin;$($Env:PYTHONPATH)"
+$Env:PYTHON = "python.exe"
+
+$AMBARI_LOG_DIR="\var\log\ambari-server"
+$OUTFILE_STDOUT=Join-Path -path $AMBARI_LOG_DIR -childpath "ambari-server.stdout"
+$OUTFILE_STDERR=Join-Path -path $AMBARI_LOG_DIR -childpath "ambari-server.stderr"
+$LOGFILE=Join-Path -path $AMBARI_LOG_DIR -childpath "ambari-server.log"
+$AMBARI_SERVER_PY_SCRIPT=Join-Path -path $PSScriptRoot -childpath "sbin\ambari-server-windows.py"
+if($AMBARI_SERVER_PY_SCRIPT.Contains(' '))
+{
+ $AMBARI_SERVER_PY_SCRIPT = """" + $AMBARI_SERVER_PY_SCRIPT + """"
+}
+
+$OK=1
+$NOTOK=0
+
+
+# Reading the environment file
+#if [ -a /var/lib/ambari-server/ambari-env.sh ]; then
+# . /var/lib/ambari-server/ambari-env.sh
+#fi
+
+
+#echo $AMBARI_PASSPHRASE
+
+$retcode=0
+
+function _exit($code)
+{
+ $host.SetShouldExit($code)
+ exit $code
+}
+
+function _detect_python()
+{
+ if(![boolean]$(Get-Command $Env:PYTHON -ErrorAction SilentlyContinue))
+ {
+ echo "ERROR: Can not find python.exe in PATH. Add python executable to PATH and try again."
+ _exit(1)
+ }
+}
+function _detect_local_sql()
+{
+ $services = Get-Service -Include @("MSSQL$*")
+ if($services)
+ {
+ echo "Detected following local SQL Server instances:"
+ foreach ($instance in $services) {
+ echo $instance
+ }
+ } else {
+ echo "WARNING: No local SQL Server instances detected. Make sure you have properly configured SQL Server"
+ }
+}
+
+function _echo([switch]$off)
+{
+ if($off)
+ {
+ try
+ {
+ stop-transcript|out-null
+ }
+ catch [System.InvalidOperationException]
+ {}
+ }
+ else
+ {
+ try
+ {
+ start-transcript|out-null
+ }
+ catch [System.InvalidOperationException]
+ {}
+ }
+}
+
+Function _pstart_brief($cmd_args)
+{
+ #start python with -u to make stdout and stderr unbuffered
+ $arguments = @("-u",$AMBARI_SERVER_PY_SCRIPT) + $cmd_args
+
+ $psi = New-Object System.Diagnostics.ProcessStartInfo
+
+ $psi.RedirectStandardError = $True
+ $psi.RedirectStandardOutput = $True
+
+ $psi.UseShellExecute = $False
+
+ $psi.FileName = $Env:PYTHON
+ $psi.Arguments = $arguments
+ #$psi.WindowStyle = WindowStyle.Hidden
+
+ $process = [Diagnostics.Process]::Start($psi)
+
+ $process.WaitForExit()
+
+ Write-Output $process.StandardOutput.ReadToEnd()
+}
+
+Function _start($cmd_args)
+{
+ echo "Starting $AMBARI_SVC_NAME..."
+ _echo -off
+
+ _pstart_brief($cmd_args)
+
+ $cnt = 0
+ do
+ {
+ Start-Sleep -Milliseconds 250
+ $svc = Get-Service -Name $AMBARI_SVC_NAME
+ $cnt += 1
+ if ($cnt -eq 120)
+ {
+ echo "$AMBARI_SVC_NAME still starting...".
+ return
+ }
+ }
+ until($svc.status -eq "Running")
+
+ echo "$AMBARI_SVC_NAME is running"
+}
+
+Function _pstart($cmd_args)
+{
+ New-Item -ItemType Directory -Force -Path $AMBARI_LOG_DIR | Out-Null
+
+ $arguments = @($AMBARI_SERVER_PY_SCRIPT) + $cmd_args
+
+ $p = New-Object System.Diagnostics.Process
+ $p.StartInfo.UseShellExecute = $false
+ $p.StartInfo.FileName = $Env:PYTHON
+ $p.StartInfo.Arguments = $arguments
+ [void]$p.Start();
+
+ echo "Verifying $AMBARI_SERVER process status..."
+ if (!$p){
+ echo "ERROR: $AMBARI_SERVER start failed"
+ $host.SetShouldExit(-1)
+ exit
+ }
+ echo "Server log at: $LOGFILE"
+
+ $p.WaitForExit()
+}
+
+Function _pstart_ioredir($cmd_args)
+{
+ New-Item -ItemType Directory -Force -Path $AMBARI_LOG_DIR | Out-Null
+
+ #start python with -u to make stdout and stderr unbuffered
+ $arguments = @("-u",$AMBARI_SERVER_PY_SCRIPT) + $cmd_args
+ $process = Start-Process -FilePath $Env:PYTHON -ArgumentList $arguments -WindowStyle Hidden -RedirectStandardError $OUTFILE_STDERR -RedirectStandardOutput $OUTFILE_STDOUT -PassThru
+ echo "Verifying $AMBARI_SERVER process status..."
+ if (!$process){
+ echo "ERROR: $AMBARI_SERVER start failed"
+ $host.SetShouldExit(-1)
+ exit
+ }
+ echo "Server stdout at: $OUTFILE_STDOUT"
+ echo "Server stderr at: $OUTFILE_STDERR"
+ echo "Server log at: $LOGFILE"
+
+ $process.WaitForExit()
+}
+
+Function _upgrade($cmd_args){
+ _pstart($cmd_args)
+}
+
+Function _stop($cmd_args){
+ echo "Stopping $AMBARI_SVC_NAME..."
+ _pstart_brief($cmd_args)
+
+ $cnt = 0
+ do
+ {
+ Start-Sleep -Milliseconds 250
+ $svc = Get-Service -Name $AMBARI_SVC_NAME
+ $cnt += 1
+ if ($cnt -eq 40)
+ {
+ echo "$AMBARI_SVC_NAME still stopping...".
+ return
+ }
+ }
+ until($svc.status -eq "Stopped")
+ echo "$AMBARI_SVC_NAME is stopped"
+}
+
+Function _status($cmd_args){
+ echo "Getting $AMBARI_SVC_NAME status..."
+ _pstart_brief($cmd_args)
+}
+
+# check for python before any action
+_detect_python
+switch ($($args[0])){
+ "start" {_start $args}
+ "pstart"
+ {
+ echo "Starting Ambari Server"
+ _pstart_ioredir $args
+ echo "Ambari Server Start finished"
+ }
+ "stop"
+ {
+ echo "Stopping Ambari Server"
+ _stop $args
+ echo "Ambari Server Stop finished"
+ }
+ "reset"
+ {
+ echo "Reseting Ambari Server"
+ _pstart $args
+ echo "Ambari Server Reset finished"
+ }
+ "restart"
+ {
+ echo "Restarting Ambari Server"
+ _stop @("stop")
+ _start @("start")
+ echo "Ambari Server Restart finished"
+ }
+ "upgrade"
+ {
+ echo "Upgrade Ambari Server"
+ _upgrade $args
+ echo "Ambari Server Upgrade finished"
+ }
+ "status"
+ {
+ echo "Checking Ambari Server status"
+ _status $args
+ }
+# "upgradestack" {_pstart $args}
+ "setup"
+ {
+ echo "Installing Ambari Server"
+ _detect_local_sql
+ _pstart $args
+ echo "Ambari Server Installation finished"
+ }
+ "setup-ldap"
+ {
+ echo "Setting up LDAP for Ambari Server"
+ _pstart $args
+ echo "Ambari Server LDAP setup finished"
+ }
+ "setup-security"
+ {
+ echo "Setting up security for Ambari Server"
+ _pstart $args
+ echo "Ambari Server security setup finished"
+ }
+ default
+ {
+ echo "Usage: ambari-server {start|stop|restart|setup|upgrade|status|upgradestack|setup-ldap|setup-security} [options]"
+ echo "Use ambari-server <action> --help to get details on options available."
+ echo "Or, simply invoke ambari-server.py --help to print the options."
+ $retcode=1
+ }
+}
+
+_exit($retcode)
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java b/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
index 6ab35c240c..faaafa631b 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
@@ -610,7 +610,10 @@ public class AmbariMetaInfoTest {
Assert.assertTrue(metaInfo.isOsSupported("suse11"));
Assert.assertTrue(metaInfo.isOsSupported("sles11"));
Assert.assertTrue(metaInfo.isOsSupported("ubuntu12"));
- Assert.assertFalse(metaInfo.isOsSupported("windows"));
+ Assert.assertFalse(metaInfo.isOsSupported("win2008server6"));
+ Assert.assertFalse(metaInfo.isOsSupported("win2008serverr26"));
+ Assert.assertFalse(metaInfo.isOsSupported("win2012server6"));
+ Assert.assertFalse(metaInfo.isOsSupported("win2012serverr26"));
}
@Test
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProviderTest.java
index 7ed8459352..1f5d9aa365 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProviderTest.java
@@ -223,7 +223,8 @@ public class ClientConfigResourceProviderTest {
expect(configHelper.getEffectiveConfigProperties(cluster, configTags)).andReturn(properties);
expect(clusterConfig.getType()).andReturn(Configuration.HIVE_CONFIG_TAG).anyTimes();
expect(configHelper.getEffectiveConfigAttributes(cluster, configTags)).andReturn(attributes);
- expect(configuration.getProperty("server.tmp.dir")).andReturn(Configuration.SERVER_TMP_DIR_DEFAULT);
+ expect(configuration.getProperty(Configuration.SERVER_TMP_DIR_KEY)).andReturn(Configuration.SERVER_TMP_DIR_DEFAULT);
+ expect(configuration.getProperty(Configuration.AMBARI_PYTHON_WRAP_KEY)).andReturn(Configuration.AMBARI_PYTHON_WRAP_DEFAULT);
expect(configuration.getExternalScriptTimeout()).andReturn(Integer.parseInt(Configuration.EXTERNAL_SCRIPT_TIMEOUT_DEFAULT));
Map<String,String> props = new HashMap<String, String>();
props.put(Configuration.HIVE_METASTORE_PASSWORD_PROPERTY, "pass");
diff --git a/ambari-server/src/test/python/TestAmbariServer.py b/ambari-server/src/test/python/TestAmbariServer.py
index ebce460c74..caa87cc05c 100644
--- a/ambari-server/src/test/python/TestAmbariServer.py
+++ b/ambari-server/src/test/python/TestAmbariServer.py
@@ -16,6 +16,8 @@ See the License for the specific language governing permissions and
limitations under the License.
'''
+from stacks.utils.RMFTestCase import *
+
import StringIO
import re
from unittest import TestCase
diff --git a/ambari-server/src/test/python/TestBootstrap.py b/ambari-server/src/test/python/TestBootstrap.py
index 6bcc94f427..d0295a213d 100644
--- a/ambari-server/src/test/python/TestBootstrap.py
+++ b/ambari-server/src/test/python/TestBootstrap.py
@@ -16,6 +16,7 @@ See the License for the specific language governing permissions and
limitations under the License.
'''
+from stacks.utils.RMFTestCase import *
import bootstrap
import time
import subprocess
diff --git a/ambari-server/src/test/python/TestCheckHost.py b/ambari-server/src/test/python/TestCheckHost.py
index 3cc590b351..d68c9036c0 100644
--- a/ambari-server/src/test/python/TestCheckHost.py
+++ b/ambari-server/src/test/python/TestCheckHost.py
@@ -17,9 +17,13 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
+
+from stacks.utils.RMFTestCase import *
import json
import os
import socket
+import subprocess
+from ambari_commons import inet_utils
from resource_management import Script,ConfigDictionary
from mock.mock import patch
from mock.mock import MagicMock
@@ -60,12 +64,12 @@ class TestCheckHost(TestCase):
@patch.object(Script, 'get_config')
@patch.object(Script, 'get_tmp_dir')
- @patch("check_host.Execute")
+ @patch("check_host.download_file")
@patch("resource_management.libraries.script.Script.put_structured_out")
@patch("subprocess.Popen")
@patch("check_host.format")
@patch("os.path.isfile")
- def testDBConnectionCheck(self, isfile_mock, format_mock, popenMock, structured_out_mock, execute_mock, get_tmp_dir_mock, mock_config):
+ def testDBConnectionCheck(self, isfile_mock, format_mock, popenMock, structured_out_mock, download_file_mock, get_tmp_dir_mock, mock_config):
# test, download DBConnectionVerification.jar failed
mock_config.return_value = {"commandParams" : {"check_execute_list" : "db_connection_check",
"java_home" : "test_java_home",
@@ -75,9 +79,10 @@ class TestCheckHost(TestCase):
"db_connection_url" : "test_db_connection_url",
"user_name" : "test_user_name",
"user_passwd" : "test_user_passwd",
- "jdk_name" : "test_jdk_name"}}
+ "jdk_name" : "test_jdk_name"},
+ "hostLevelParams": { "agentCacheDir": "/nonexistent_tmp" }}
get_tmp_dir_mock.return_value = "/tmp"
- execute_mock.side_effect = Exception("test exception")
+ download_file_mock.side_effect = Exception("test exception")
isfile_mock.return_value = True
checkHost = CheckHost()
checkHost.actionexecute(None)
@@ -85,11 +90,6 @@ class TestCheckHost(TestCase):
self.assertEquals(structured_out_mock.call_args[0][0], {'db_connection_check': {'message': 'Error downloading ' \
'DBConnectionVerification.jar from Ambari Server resources. Check network access to Ambari ' \
'Server.\ntest exception', 'exit_code': 1}})
-
- self.assertEquals(format_mock.call_args_list[2][0][0], "/bin/sh -c 'cd /usr/lib/ambari-agent/ && curl -kf " \
- "--retry 5 {jdk_location}{check_db_connection_jar_name} -o {check_db_connection_jar_name}'")
-
- self.assertEquals(format_mock.call_args_list[3][0][0], "[ -f /usr/lib/ambari-agent/{check_db_connection_jar_name}]")
# test, download jdbc driver failed
mock_config.return_value = {"commandParams" : {"check_execute_list" : "db_connection_check",
@@ -100,11 +100,12 @@ class TestCheckHost(TestCase):
"db_connection_url" : "test_db_connection_url",
"user_name" : "test_user_name",
"user_passwd" : "test_user_passwd",
- "jdk_name" : "test_jdk_name"}}
+ "jdk_name" : "test_jdk_name"},
+ "hostLevelParams": { "agentCacheDir": "/nonexistent_tmp" }}
format_mock.reset_mock()
- execute_mock.reset_mock()
+ download_file_mock.reset_mock()
p = MagicMock()
- execute_mock.side_effect = [p, Exception("test exception")]
+ download_file_mock.side_effect = [p, Exception("test exception")]
checkHost.actionexecute(None)
@@ -114,24 +115,21 @@ class TestCheckHost(TestCase):
'Server host to make the JDBC driver available for download and to enable testing '
'the database connection.\n')
self.assertEquals(structured_out_mock.call_args[0][0]['db_connection_check']['exit_code'], 1)
- self.assertEquals(format_mock.call_args_list[4][0][0], "/bin/sh -c 'cd /usr/lib/ambari-agent/ && curl -kf " \
- "--retry 5 {jdbc_url} -o {jdbc_name}'")
-
- self.assertEquals(format_mock.call_args_list[5][0][0], "[ -f /usr/lib/ambari-agent/{jdbc_name}]")
# test, no connection to remote db
mock_config.return_value = {"commandParams" : {"check_execute_list" : "db_connection_check",
"java_home" : "test_java_home",
"ambari_server_host" : "test_host",
"jdk_location" : "test_jdk_location",
- "db_name" : "postgresql",
+ "db_name" : "postgres",
"db_connection_url" : "test_db_connection_url",
"user_name" : "test_user_name",
"user_passwd" : "test_user_passwd",
- "jdk_name" : "test_jdk_name"}}
+ "jdk_name" : "test_jdk_name"},
+ "hostLevelParams": { "agentCacheDir": "/nonexistent_tmp" }}
format_mock.reset_mock()
- execute_mock.reset_mock()
- execute_mock.side_effect = [p, p]
+ download_file_mock.reset_mock()
+ download_file_mock.side_effect = [p, p]
s = MagicMock()
s.communicate.return_value = ("test message", "")
s.returncode = 1
@@ -141,14 +139,14 @@ class TestCheckHost(TestCase):
self.assertEquals(structured_out_mock.call_args[0][0], {'db_connection_check': {'message': 'test message',
'exit_code': 1}})
- self.assertEquals(format_mock.call_args[0][0],'{java64_home}/bin/java -cp /usr/lib/ambari-agent/{check_db_' \
- 'connection_jar_name}:/usr/lib/ambari-agent/{jdbc_name} org.' \
- 'apache.ambari.server.DBConnectionVerification \'{db_connection_url}\' ' \
- '{user_name} {user_passwd!p} {jdbc_driver}')
+ self.assertEquals(format_mock.call_args[0][0],'{java_exec} -cp '\
+ '{check_db_connection_path}{class_path_delimiter}{jdbc_path} -Djava.library.path={agent_cache_dir} '\
+ 'org.apache.ambari.server.DBConnectionVerification {db_connection_url} '\
+ '{user_name} {user_passwd!p} {jdbc_driver}')
# test, db connection success
- execute_mock.reset_mock()
- execute_mock.side_effect = [p, p]
+ download_file_mock.reset_mock()
+ download_file_mock.side_effect = [p, p]
s.returncode = 0
checkHost.actionexecute(None)
@@ -164,7 +162,8 @@ class TestCheckHost(TestCase):
"db_connection_url" : "test_db_connection_url",
"user_name" : "test_user_name",
"user_passwd" : "test_user_passwd",
- "db_name" : "postgresql"}}
+ "db_name" : "postgres"},
+ "hostLevelParams": { "agentCacheDir": "/nonexistent_tmp" }}
isfile_mock.return_value = False
checkHost.actionexecute(None)
diff --git a/ambari-server/src/test/python/TestOSCheck.py b/ambari-server/src/test/python/TestOSCheck.py
index 1f60964bcd..20b88906fb 100644
--- a/ambari-server/src/test/python/TestOSCheck.py
+++ b/ambari-server/src/test/python/TestOSCheck.py
@@ -28,6 +28,7 @@ from unittest import TestCase
from mock.mock import patch
from ambari_commons import OSCheck
+from ambari_commons.os_check import get_os_distribution
import os_check_type
utils = __import__('ambari_server.utils').utils
@@ -45,11 +46,13 @@ class TestOSCheck(TestCase):
# 1 - Any system
mock_exists.return_value = False
mock_linux_distribution.return_value = ('my_os', '', '')
+ OSCheck._dist = get_os_distribution()
result = OSCheck.get_os_type()
self.assertEquals(result, 'my_os')
# 2 - Negative case
mock_linux_distribution.return_value = ('', 'aaaa', 'bbbbb')
+ OSCheck._dist = get_os_distribution()
try:
result = OSCheck.get_os_type()
self.fail("Should throw exception in OSCheck.get_os_type()")
@@ -61,12 +64,14 @@ class TestOSCheck(TestCase):
# 3 - path exist: '/etc/oracle-release'
mock_exists.return_value = True
mock_linux_distribution.return_value = ('some_os', '', '')
+ OSCheck._dist = get_os_distribution()
result = OSCheck.get_os_type()
self.assertEquals(result, 'oraclelinux')
# 4 - Common system
mock_exists.return_value = False
mock_linux_distribution.return_value = ('CenToS', '', '')
+ OSCheck._dist = get_os_distribution()
result = OSCheck.get_os_type()
self.assertEquals(result, 'centos')
@@ -74,16 +79,19 @@ class TestOSCheck(TestCase):
mock_exists.return_value = False
# Red Hat Enterprise Linux Server release 6.5 (Santiago)
mock_linux_distribution.return_value = ('Red Hat Enterprise Linux Server', '6.5', 'Santiago')
+ OSCheck._dist = get_os_distribution()
result = OSCheck.get_os_type()
self.assertEquals(result, 'redhat')
# Red Hat Enterprise Linux Workstation release 6.4 (Santiago)
mock_linux_distribution.return_value = ('Red Hat Enterprise Linux Workstation', '6.4', 'Santiago')
+ OSCheck._dist = get_os_distribution()
result = OSCheck.get_os_type()
self.assertEquals(result, 'redhat')
# Red Hat Enterprise Linux AS release 4 (Nahant Update 3)
mock_linux_distribution.return_value = ('Red Hat Enterprise Linux AS', '4', 'Nahant Update 3')
+ OSCheck._dist = get_os_distribution()
result = OSCheck.get_os_type()
self.assertEquals(result, 'redhat')
@@ -94,18 +102,21 @@ class TestOSCheck(TestCase):
# 1 - Any system
mock_exists.return_value = False
mock_linux_distribution.return_value = ('MY_os', '', '')
+ OSCheck._dist = get_os_distribution()
result = OSCheck.get_os_family()
self.assertEquals(result, 'my_os')
# 2 - Redhat
mock_exists.return_value = False
mock_linux_distribution.return_value = ('Centos Linux', '', '')
+ OSCheck._dist = get_os_distribution()
result = OSCheck.get_os_family()
self.assertEquals(result, 'redhat')
# 3 - Ubuntu
mock_exists.return_value = False
mock_linux_distribution.return_value = ('Ubuntu', '', '')
+ OSCheck._dist = get_os_distribution()
result = OSCheck.get_os_family()
self.assertEquals(result, 'ubuntu')
@@ -113,16 +124,19 @@ class TestOSCheck(TestCase):
mock_exists.return_value = False
mock_linux_distribution.return_value = (
'suse linux enterprise server', '', '')
+ OSCheck._dist = get_os_distribution()
result = OSCheck.get_os_family()
self.assertEquals(result, 'suse')
mock_exists.return_value = False
mock_linux_distribution.return_value = ('SLED', '', '')
+ OSCheck._dist = get_os_distribution()
result = OSCheck.get_os_family()
self.assertEquals(result, 'suse')
# 5 - Negative case
mock_linux_distribution.return_value = ('', '111', '2222')
+ OSCheck._dist = get_os_distribution()
try:
result = OSCheck.get_os_family()
self.fail("Should throw exception in OSCheck.get_os_family()")
@@ -136,11 +150,13 @@ class TestOSCheck(TestCase):
# 1 - Any system
mock_linux_distribution.return_value = ('', '123.45', '')
+ OSCheck._dist = get_os_distribution()
result = OSCheck.get_os_version()
self.assertEquals(result, '123.45')
# 2 - Negative case
mock_linux_distribution.return_value = ('ssss', '', 'ddddd')
+ OSCheck._dist = get_os_distribution()
try:
result = OSCheck.get_os_version()
self.fail("Should throw exception in OSCheck.get_os_version()")
@@ -154,11 +170,13 @@ class TestOSCheck(TestCase):
# 1
mock_linux_distribution.return_value = ('', '123.45.67', '')
+ OSCheck._dist = get_os_distribution()
result = OSCheck.get_os_major_version()
self.assertEquals(result, '123')
# 2
mock_linux_distribution.return_value = ('Suse', '11', '')
+ OSCheck._dist = get_os_distribution()
result = OSCheck.get_os_major_version()
self.assertEquals(result, '11')
@@ -167,11 +185,13 @@ class TestOSCheck(TestCase):
# 1 - Any system
mock_linux_distribution.return_value = ('', '', 'MY_NEW_RELEASE')
+ OSCheck._dist = get_os_distribution()
result = OSCheck.get_os_release_name()
self.assertEquals(result, 'my_new_release')
# 2 - Negative case
mock_linux_distribution.return_value = ('aaaa', 'bbbb', '')
+ OSCheck._dist = get_os_distribution()
try:
result = OSCheck.get_os_release_name()
self.fail("Should throw exception in OSCheck.get_os_release_name()")
@@ -233,6 +253,7 @@ class TestOSCheck(TestCase):
mock_linux_distribution.return_value = ('aaa', '11', 'bb')
base_args = ["os_check_type.py", "aaa11"]
sys.argv = list(base_args)
+ OSCheck._dist = get_os_distribution()
try:
os_check_type.main()
@@ -244,6 +265,7 @@ class TestOSCheck(TestCase):
mock_linux_distribution.return_value = ('ddd', '33', 'bb')
base_args = ["os_check_type.py", "zzz_x77"]
sys.argv = list(base_args)
+ OSCheck._dist = get_os_distribution()
try:
os_check_type.main()
diff --git a/ambari-server/src/test/python/stacks/1.3.2/HDFS/test_datanode.py b/ambari-server/src/test/python/stacks/1.3.2/HDFS/test_datanode.py
index f7926f784e..086fa2b3d8 100644
--- a/ambari-server/src/test/python/stacks/1.3.2/HDFS/test_datanode.py
+++ b/ambari-server/src/test/python/stacks/1.3.2/HDFS/test_datanode.py
@@ -17,8 +17,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
-from ambari_commons import OSCheck
-from mock.mock import MagicMock, patch
from stacks.utils.RMFTestCase import *
class TestDatanode(RMFTestCase):
diff --git a/ambari-server/src/test/python/stacks/1.3.2/hooks/before-ANY/test_before_any.py b/ambari-server/src/test/python/stacks/1.3.2/hooks/before-ANY/test_before_any.py
index 16aa939b8c..d0f8bbf488 100644
--- a/ambari-server/src/test/python/stacks/1.3.2/hooks/before-ANY/test_before_any.py
+++ b/ambari-server/src/test/python/stacks/1.3.2/hooks/before-ANY/test_before_any.py
@@ -18,9 +18,9 @@ See the License for the specific language governing permissions and
limitations under the License.
'''
-from mock.mock import MagicMock, call, patch
-from resource_management import *
from stacks.utils.RMFTestCase import *
+from mock.mock import MagicMock, call, patch
+from resource_management import Hook
@patch.object(Hook, "run_custom_hook", new = MagicMock())
class TestHookBeforeInstall(RMFTestCase):
diff --git a/ambari-server/src/test/python/stacks/1.3.2/hooks/before-INSTALL/test_before_install.py b/ambari-server/src/test/python/stacks/1.3.2/hooks/before-INSTALL/test_before_install.py
index 1f508c7acc..904c4698ba 100644
--- a/ambari-server/src/test/python/stacks/1.3.2/hooks/before-INSTALL/test_before_install.py
+++ b/ambari-server/src/test/python/stacks/1.3.2/hooks/before-INSTALL/test_before_install.py
@@ -18,9 +18,9 @@ See the License for the specific language governing permissions and
limitations under the License.
'''
+from stacks.utils.RMFTestCase import *
from mock.mock import MagicMock, call, patch
from resource_management import *
-from stacks.utils.RMFTestCase import *
@patch.object(Hook, "run_custom_hook", new = MagicMock())
class TestHookBeforeInstall(RMFTestCase):
diff --git a/ambari-server/src/test/python/stacks/1.3.2/hooks/before-START/test_before_start.py b/ambari-server/src/test/python/stacks/1.3.2/hooks/before-START/test_before_start.py
index d27ed5a00f..7fc033e138 100644
--- a/ambari-server/src/test/python/stacks/1.3.2/hooks/before-START/test_before_start.py
+++ b/ambari-server/src/test/python/stacks/1.3.2/hooks/before-START/test_before_start.py
@@ -18,9 +18,9 @@ See the License for the specific language governing permissions and
limitations under the License.
'''
-from mock.mock import MagicMock, call, patch
-from resource_management import *
from stacks.utils.RMFTestCase import *
+from mock.mock import MagicMock, call, patch
+from resource_management import Hook
import json
@patch("os.path.exists", new = MagicMock(return_value=True))
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
index 5372e2ac63..465f42b386 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
@@ -17,10 +17,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
+from stacks.utils.RMFTestCase import *
from ambari_commons import OSCheck
import json
from mock.mock import MagicMock, patch
-from stacks.utils.RMFTestCase import *
class TestDatanode(RMFTestCase):
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
index ec63d9b42d..3a576eba6c 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
@@ -411,8 +411,8 @@ class TestNamenode(RMFTestCase):
bin_dir = '/usr/bin',
kinit_override = True)
self.assertNoMoreResources()
-
-
+
+
def test_decommission_secured(self):
self.executeScript("2.0.6/services/HDFS/package/scripts/namenode.py",
classname = "NameNode",
@@ -510,7 +510,7 @@ class TestNamenode(RMFTestCase):
recursive = True,
mode = 0755,
)
-
+
@patch("resource_management.libraries.script.Script.put_structured_out")
def test_rebalance_hdfs(self, pso):
Popen_Mock.return_value = 1
@@ -526,7 +526,7 @@ class TestNamenode(RMFTestCase):
self.fail("Exception was not thrown")
except resource_management.core.exceptions.Fail:
pass ##expected
-
+
pso.reset_mock()
Popen_Mock.return_value = 0
ll = subprocess.Popen()
diff --git a/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
index 7b241fb99d..558d37aa49 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
@@ -466,5 +466,3 @@ class TestHDP206StackAdvisor(TestCase):
pass
actualItems.append(next)
self.checkEqual(expectedItems, actualItems)
-
-
diff --git a/ambari-server/src/test/python/stacks/2.0.6/hooks/before-ANY/test_before_any.py b/ambari-server/src/test/python/stacks/2.0.6/hooks/before-ANY/test_before_any.py
index 8b5a69832d..552da628c9 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/hooks/before-ANY/test_before_any.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/hooks/before-ANY/test_before_any.py
@@ -18,9 +18,9 @@ See the License for the specific language governing permissions and
limitations under the License.
'''
-from mock.mock import MagicMock, call, patch
-from resource_management import *
from stacks.utils.RMFTestCase import *
+from mock.mock import MagicMock, call, patch
+from resource_management import Hook
@patch.object(Hook, "run_custom_hook", new = MagicMock())
class TestHookBeforeInstall(RMFTestCase):
diff --git a/ambari-server/src/test/python/stacks/2.0.6/hooks/before-INSTALL/test_before_install.py b/ambari-server/src/test/python/stacks/2.0.6/hooks/before-INSTALL/test_before_install.py
index 1ae0e60627..cec41b7941 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/hooks/before-INSTALL/test_before_install.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/hooks/before-INSTALL/test_before_install.py
@@ -18,9 +18,9 @@ See the License for the specific language governing permissions and
limitations under the License.
'''
+from stacks.utils.RMFTestCase import *
from mock.mock import MagicMock, call, patch
from resource_management import *
-from stacks.utils.RMFTestCase import *
@patch.object(Hook, "run_custom_hook", new = MagicMock())
class TestHookBeforeInstall(RMFTestCase):
diff --git a/ambari-server/src/test/python/stacks/2.0.6/hooks/before-START/test_before_start.py b/ambari-server/src/test/python/stacks/2.0.6/hooks/before-START/test_before_start.py
index 4b7ffa5f43..5fceb8aacc 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/hooks/before-START/test_before_start.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/hooks/before-START/test_before_start.py
@@ -18,9 +18,9 @@ See the License for the specific language governing permissions and
limitations under the License.
'''
-from mock.mock import MagicMock, call, patch
-from resource_management import *
from stacks.utils.RMFTestCase import *
+from mock.mock import MagicMock, call, patch
+from resource_management import Hook
import json
@patch("os.path.exists", new = MagicMock(return_value=True))
diff --git a/ambari-server/src/test/python/stacks/2.1/configs/default-storm-start.json b/ambari-server/src/test/python/stacks/2.1/configs/default-storm-start.json
index ce2db09274..48219bc643 100644
--- a/ambari-server/src/test/python/stacks/2.1/configs/default-storm-start.json
+++ b/ambari-server/src/test/python/stacks/2.1/configs/default-storm-start.json
@@ -1,251 +1,251 @@
{
"configuration_attributes": {
- "storm-site": {},
+ "storm-site": {},
"hdfs-site": {
"final": {
- "dfs.support.append": "true",
+ "dfs.support.append": "true",
"dfs.namenode.http-address": "true"
}
- },
- "storm-env": {},
+ },
+ "storm-env": {},
"core-site": {
"final": {
"fs.defaultFS": "true"
}
- },
- "hadoop-policy": {},
- "hdfs-log4j": {},
- "hadoop-env": {},
- "zookeeper-env": {},
- "zookeeper-log4j": {},
+ },
+ "hadoop-policy": {},
+ "hdfs-log4j": {},
+ "hadoop-env": {},
+ "zookeeper-env": {},
+ "zookeeper-log4j": {},
"cluster-env": {}
- },
+ },
"commandParams": {
- "command_timeout": "600",
- "script": "scripts/nimbus.py",
- "script_type": "PYTHON",
- "service_package_folder": "HDP/2.1/services/STORM/package",
+ "command_timeout": "600",
+ "script": "scripts/nimbus.py",
+ "script_type": "PYTHON",
+ "service_package_folder": "HDP/2.1/services/STORM/package",
"hooks_folder": "HDP/2.0.6/hooks"
- },
- "roleCommand": "START",
- "clusterName": "pacan",
- "hostname": "c6402.ambari.apache.org",
+ },
+ "roleCommand": "START",
+ "clusterName": "pacan",
+ "hostname": "c6402.ambari.apache.org",
"hostLevelParams": {
- "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
- "ambari_db_rca_password": "mapred",
- "java_home": "/usr/jdk64/jdk1.7.0_45",
- "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
- "jce_name": "UnlimitedJCEPolicyJDK7.zip",
- "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar",
- "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.2.0.0\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.2\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.2.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.2.0.0\"},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.20\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6\"}]",
- "group_list": "[\"hadoop\",\"users\"]",
- "package_list": "[{\"name\":\"storm_2_2_0_0_*\"}]",
- "stack_version": "2.2",
- "stack_name": "HDP",
- "db_name": "ambari",
- "ambari_db_rca_driver": "org.postgresql.Driver",
- "jdk_name": "jdk-7u45-linux-x64.tar.gz",
- "ambari_db_rca_username": "mapred",
- "db_driver_filename": "mysql-connector-java.jar",
- "user_list": "[\"ambari-qa\",\"zookeeper\",\"hdfs\",\"storm\"]",
+ "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
+ "ambari_db_rca_password": "mapred",
+ "java_home": "/usr/jdk64/jdk1.7.0_45",
+ "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
+ "jce_name": "UnlimitedJCEPolicyJDK7.zip",
+ "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar",
+ "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.2.0.0\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.2\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.2.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.2.0.0\"},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.20\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6\"}]",
+ "group_list": "[\"hadoop\",\"users\"]",
+ "package_list": "[{\"name\":\"storm_2_2_0_0_*\"}]",
+ "stack_version": "2.2",
+ "stack_name": "HDP",
+ "db_name": "ambari",
+ "ambari_db_rca_driver": "org.postgresql.Driver",
+ "jdk_name": "jdk-7u45-linux-x64.tar.gz",
+ "ambari_db_rca_username": "mapred",
+ "db_driver_filename": "mysql-connector-java.jar",
+ "user_list": "[\"ambari-qa\",\"zookeeper\",\"hdfs\",\"storm\"]",
"mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar"
- },
- "commandType": "EXECUTION_COMMAND",
- "roleParams": {},
- "serviceName": "STORM",
- "role": "NIMBUS",
- "forceRefreshConfigTags": [],
- "taskId": 24,
- "public_hostname": "c6402.ambari.apache.org",
+ },
+ "commandType": "EXECUTION_COMMAND",
+ "roleParams": {},
+ "serviceName": "STORM",
+ "role": "NIMBUS",
+ "forceRefreshConfigTags": [],
+ "taskId": 24,
+ "public_hostname": "c6402.ambari.apache.org",
"configurations": {
"storm-site": {
- "topology.tuple.serializer": "backtype.storm.serialization.types.ListDelegateSerializer",
- "topology.workers": "1",
- "drpc.worker.threads": "64",
- "storm.zookeeper.servers": "['c6402.ambari.apache.org']",
- "supervisor.heartbeat.frequency.secs": "5",
- "topology.executor.send.buffer.size": "1024",
- "drpc.childopts": "-Xmx768m",
- "nimbus.thrift.port": "6627",
- "storm.zookeeper.retry.intervalceiling.millis": "30000",
- "storm.local.dir": "/hadoop/storm",
- "topology.receiver.buffer.size": "8",
- "storm.messaging.netty.client_worker_threads": "1",
- "transactional.zookeeper.root": "/transactional",
- "topology.skip.missing.kryo.registrations": "false",
- "worker.heartbeat.frequency.secs": "1",
- "zmq.hwm": "0",
- "storm.zookeeper.connection.timeout": "15000",
- "_storm.thrift.secure.transport": "SECURED_TRANSPORT_CLASS",
- "storm.messaging.netty.server_worker_threads": "1",
- "supervisor.worker.start.timeout.secs": "120",
- "zmq.threads": "1",
- "topology.acker.executors": "null",
- "storm.local.mode.zmq": "false",
- "topology.max.task.parallelism": "null",
- "topology.max.error.report.per.interval": "5",
- "storm.zookeeper.port": "2181",
- "drpc.queue.size": "128",
- "worker.childopts": "-Xmx768m _JAAS_PLACEHOLDER -javaagent:/usr/hdp/current/storm/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Worker_%ID%_JVM",
- "nimbus.childopts": "-Xmx1024m _JAAS_PLACEHOLDER -javaagent:/usr/hdp/current/storm/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8649,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Nimbus_JVM",
- "storm.zookeeper.retry.times": "5",
- "nimbus.monitor.freq.secs": "10",
- "storm.cluster.mode": "distributed",
- "dev.zookeeper.path": "/tmp/dev-storm-zookeeper",
- "drpc.invocations.port": "3773",
- "_storm.thrift.nonsecure.transport": "NON_SECURED_TRANSPORT_CLASS",
- "storm.zookeeper.root": "/storm",
- "logviewer.childopts": "-Xmx128m",
- "transactional.zookeeper.port": "null",
- "topology.worker.childopts": "null",
- "topology.max.spout.pending": "null",
- "nimbus.cleanup.inbox.freq.secs": "600",
- "storm.messaging.netty.min_wait_ms": "100",
- "nimbus.task.timeout.secs": "30",
- "nimbus.thrift.max_buffer_size": "1048576",
- "topology.sleep.spout.wait.strategy.time.ms": "1",
- "topology.optimize": "true",
- "nimbus.reassign": "true",
- "storm.messaging.transport": "backtype.storm.messaging.netty.Context",
- "logviewer.appender.name": "A1",
- "nimbus.host": "c6402.ambari.apache.org",
- "ui.port": "8744",
- "supervisor.slots.ports": "[6700, 6701]",
- "nimbus.file.copy.expiration.secs": "600",
- "supervisor.monitor.frequency.secs": "3",
- "ui.childopts": "-Xmx768m _JAAS_PLACEHOLDER",
- "transactional.zookeeper.servers": "null",
- "zmq.linger.millis": "5000",
- "topology.error.throttle.interval.secs": "10",
- "topology.worker.shared.thread.pool.size": "4",
- "java.library.path": "/usr/local/lib:/opt/local/lib:/usr/lib:/usr/hdp/current/storm/lib",
- "topology.spout.wait.strategy": "backtype.storm.spout.SleepSpoutWaitStrategy",
- "task.heartbeat.frequency.secs": "3",
- "topology.transfer.buffer.size": "1024",
- "storm.zookeeper.session.timeout": "20000",
- "topology.executor.receive.buffer.size": "1024",
- "topology.stats.sample.rate": "0.05",
- "topology.fall.back.on.java.serialization": "true",
- "supervisor.childopts": "-Xmx256m _JAAS_PLACEHOLDER -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.port=56431 -javaagent:/usr/hdp/current/storm/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Supervisor_JVM",
- "topology.enable.message.timeouts": "true",
- "storm.messaging.netty.max_wait_ms": "1000",
- "nimbus.topology.validator": "backtype.storm.nimbus.DefaultTopologyValidator",
- "nimbus.supervisor.timeout.secs": "60",
- "topology.disruptor.wait.strategy": "com.lmax.disruptor.BlockingWaitStrategy",
- "nimbus.inbox.jar.expiration.secs": "3600",
- "drpc.port": "3772",
- "topology.kryo.factory": "backtype.storm.serialization.DefaultKryoFactory",
- "storm.zookeeper.retry.interval": "1000",
- "storm.messaging.netty.max_retries": "30",
- "topology.tick.tuple.freq.secs": "null",
- "drpc.request.timeout.secs": "600",
- "nimbus.task.launch.secs": "120",
- "task.refresh.poll.secs": "10",
- "topology.message.timeout.secs": "30",
- "storm.messaging.netty.buffer_size": "5242880",
- "topology.state.synchronization.timeout.secs": "60",
- "supervisor.worker.timeout.secs": "30",
- "topology.trident.batch.emit.interval.millis": "500",
- "topology.builtin.metrics.bucket.size.secs": "60",
- "logviewer.port": "8000",
+ "topology.tuple.serializer": "backtype.storm.serialization.types.ListDelegateSerializer",
+ "topology.workers": "1",
+ "drpc.worker.threads": "64",
+ "storm.zookeeper.servers": "['c6402.ambari.apache.org']",
+ "supervisor.heartbeat.frequency.secs": "5",
+ "topology.executor.send.buffer.size": "1024",
+ "drpc.childopts": "-Xmx768m",
+ "nimbus.thrift.port": "6627",
+ "storm.zookeeper.retry.intervalceiling.millis": "30000",
+ "storm.local.dir": "/hadoop/storm",
+ "topology.receiver.buffer.size": "8",
+ "storm.messaging.netty.client_worker_threads": "1",
+ "transactional.zookeeper.root": "/transactional",
+ "topology.skip.missing.kryo.registrations": "false",
+ "worker.heartbeat.frequency.secs": "1",
+ "zmq.hwm": "0",
+ "storm.zookeeper.connection.timeout": "15000",
+ "_storm.thrift.secure.transport": "SECURED_TRANSPORT_CLASS",
+ "storm.messaging.netty.server_worker_threads": "1",
+ "supervisor.worker.start.timeout.secs": "120",
+ "zmq.threads": "1",
+ "topology.acker.executors": "null",
+ "storm.local.mode.zmq": "false",
+ "topology.max.task.parallelism": "null",
+ "topology.max.error.report.per.interval": "5",
+ "storm.zookeeper.port": "2181",
+ "drpc.queue.size": "128",
+ "worker.childopts": "-Xmx768m _JAAS_PLACEHOLDER -javaagent:/usr/hdp/current/storm/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Worker_%ID%_JVM",
+ "nimbus.childopts": "-Xmx1024m _JAAS_PLACEHOLDER -javaagent:/usr/hdp/current/storm/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8649,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Nimbus_JVM",
+ "storm.zookeeper.retry.times": "5",
+ "nimbus.monitor.freq.secs": "10",
+ "storm.cluster.mode": "distributed",
+ "dev.zookeeper.path": "/tmp/dev-storm-zookeeper",
+ "drpc.invocations.port": "3773",
+ "_storm.thrift.nonsecure.transport": "NON_SECURED_TRANSPORT_CLASS",
+ "storm.zookeeper.root": "/storm",
+ "logviewer.childopts": "-Xmx128m",
+ "transactional.zookeeper.port": "null",
+ "topology.worker.childopts": "null",
+ "topology.max.spout.pending": "null",
+ "nimbus.cleanup.inbox.freq.secs": "600",
+ "storm.messaging.netty.min_wait_ms": "100",
+ "nimbus.task.timeout.secs": "30",
+ "nimbus.thrift.max_buffer_size": "1048576",
+ "topology.sleep.spout.wait.strategy.time.ms": "1",
+ "topology.optimize": "true",
+ "nimbus.reassign": "true",
+ "storm.messaging.transport": "backtype.storm.messaging.netty.Context",
+ "logviewer.appender.name": "A1",
+ "nimbus.host": "c6402.ambari.apache.org",
+ "ui.port": "8744",
+ "supervisor.slots.ports": "[6700, 6701]",
+ "nimbus.file.copy.expiration.secs": "600",
+ "supervisor.monitor.frequency.secs": "3",
+ "ui.childopts": "-Xmx768m _JAAS_PLACEHOLDER",
+ "transactional.zookeeper.servers": "null",
+ "zmq.linger.millis": "5000",
+ "topology.error.throttle.interval.secs": "10",
+ "topology.worker.shared.thread.pool.size": "4",
+ "java.library.path": "/usr/local/lib:/opt/local/lib:/usr/lib:/usr/hdp/current/storm/lib",
+ "topology.spout.wait.strategy": "backtype.storm.spout.SleepSpoutWaitStrategy",
+ "task.heartbeat.frequency.secs": "3",
+ "topology.transfer.buffer.size": "1024",
+ "storm.zookeeper.session.timeout": "20000",
+ "topology.executor.receive.buffer.size": "1024",
+ "topology.stats.sample.rate": "0.05",
+ "topology.fall.back.on.java.serialization": "true",
+ "supervisor.childopts": "-Xmx256m _JAAS_PLACEHOLDER -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.port=56431 -javaagent:/usr/hdp/current/storm/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Supervisor_JVM",
+ "topology.enable.message.timeouts": "true",
+ "storm.messaging.netty.max_wait_ms": "1000",
+ "nimbus.topology.validator": "backtype.storm.nimbus.DefaultTopologyValidator",
+ "nimbus.supervisor.timeout.secs": "60",
+ "topology.disruptor.wait.strategy": "com.lmax.disruptor.BlockingWaitStrategy",
+ "nimbus.inbox.jar.expiration.secs": "3600",
+ "drpc.port": "3772",
+ "topology.kryo.factory": "backtype.storm.serialization.DefaultKryoFactory",
+ "storm.zookeeper.retry.interval": "1000",
+ "storm.messaging.netty.max_retries": "30",
+ "topology.tick.tuple.freq.secs": "null",
+ "drpc.request.timeout.secs": "600",
+ "nimbus.task.launch.secs": "120",
+ "task.refresh.poll.secs": "10",
+ "topology.message.timeout.secs": "30",
+ "storm.messaging.netty.buffer_size": "5242880",
+ "topology.state.synchronization.timeout.secs": "60",
+ "supervisor.worker.timeout.secs": "30",
+ "topology.trident.batch.emit.interval.millis": "500",
+ "topology.builtin.metrics.bucket.size.secs": "60",
+ "logviewer.port": "8000",
"topology.debug": "false"
- },
+ },
"hdfs-site": {
- "dfs.namenode.avoid.write.stale.datanode": "true",
- "dfs.namenode.checkpoint.txns": "1000000",
- "dfs.block.access.token.enable": "true",
- "dfs.support.append": "true",
- "dfs.datanode.address": "0.0.0.0:50010",
- "dfs.cluster.administrators": " hdfs",
- "dfs.datanode.balance.bandwidthPerSec": "6250000",
- "dfs.namenode.safemode.threshold-pct": "1.0f",
- "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}",
- "dfs.permissions.enabled": "true",
- "dfs.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
- "dfs.namenode.checkpoint.dir": "/hadoop/hdfs/namesecondary",
- "dfs.https.port": "50470",
- "dfs.namenode.https-address": "c6402.ambari.apache.org:50470",
- "dfs.secondary.namenode.kerberos.https.principal": "HTTP/_HOST@EXAMPLE.COM",
- "dfs.blockreport.initialDelay": "120",
- "dfs.journalnode.edits.dir": "/grid/0/hdfs/journal",
- "dfs.blocksize": "134217728",
- "dfs.client.read.shortcircuit": "true",
- "dfs.datanode.max.transfer.threads": "1024",
- "dfs.heartbeat.interval": "3",
- "dfs.replication": "3",
- "dfs.namenode.handler.count": "40",
- "dfs.web.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
- "fs.permissions.umask-mode": "022",
- "dfs.namenode.stale.datanode.interval": "30000",
- "dfs.datanode.ipc.address": "0.0.0.0:8010",
- "dfs.namenode.name.dir": "/hadoop/hdfs/namenode",
- "dfs.datanode.data.dir": "/hadoop/hdfs/data",
- "dfs.namenode.http-address": "c6402.ambari.apache.org:50070",
- "dfs.webhdfs.enabled": "true",
- "dfs.datanode.failed.volumes.tolerated": "0",
- "dfs.namenode.accesstime.precision": "0",
- "dfs.namenode.avoid.read.stale.datanode": "true",
- "dfs.namenode.secondary.http-address": "c6402.ambari.apache.org:50090",
- "dfs.datanode.kerberos.principal": "dn/_HOST@EXAMPLE.COM",
- "dfs.datanode.http.address": "0.0.0.0:50075",
- "dfs.datanode.du.reserved": "1073741824",
- "dfs.client.read.shortcircuit.streams.cache.size": "4096",
- "dfs.namenode.kerberos.https.principal": "HTTP/_HOST@EXAMPLE.COM",
- "dfs.secondary.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
- "dfs.web.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
- "dfs.secondary.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
- "dfs.datanode.keytab.file": "/etc/security/keytabs/dn.service.keytab",
- "dfs.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
- "dfs.permissions.superusergroup": "hdfs",
- "dfs.journalnode.http-address": "0.0.0.0:8480",
- "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
- "dfs.namenode.write.stale.datanode.ratio": "1.0f",
- "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude",
- "dfs.datanode.data.dir.perm": "750",
- "dfs.namenode.name.dir.restore": "true",
- "dfs.replication.max": "50",
+ "dfs.namenode.avoid.write.stale.datanode": "true",
+ "dfs.namenode.checkpoint.txns": "1000000",
+ "dfs.block.access.token.enable": "true",
+ "dfs.support.append": "true",
+ "dfs.datanode.address": "0.0.0.0:50010",
+ "dfs.cluster.administrators": " hdfs",
+ "dfs.datanode.balance.bandwidthPerSec": "6250000",
+ "dfs.namenode.safemode.threshold-pct": "1.0f",
+ "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}",
+ "dfs.permissions.enabled": "true",
+ "dfs.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
+ "dfs.namenode.checkpoint.dir": "/hadoop/hdfs/namesecondary",
+ "dfs.https.port": "50470",
+ "dfs.namenode.https-address": "c6402.ambari.apache.org:50470",
+ "dfs.secondary.namenode.kerberos.https.principal": "HTTP/_HOST@EXAMPLE.COM",
+ "dfs.blockreport.initialDelay": "120",
+ "dfs.journalnode.edits.dir": "/grid/0/hdfs/journal",
+ "dfs.blocksize": "134217728",
+ "dfs.client.read.shortcircuit": "true",
+ "dfs.datanode.max.transfer.threads": "1024",
+ "dfs.heartbeat.interval": "3",
+ "dfs.replication": "3",
+ "dfs.namenode.handler.count": "40",
+ "dfs.web.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
+ "fs.permissions.umask-mode": "022",
+ "dfs.namenode.stale.datanode.interval": "30000",
+ "dfs.datanode.ipc.address": "0.0.0.0:8010",
+ "dfs.namenode.name.dir": "/hadoop/hdfs/namenode",
+ "dfs.datanode.data.dir": "/hadoop/hdfs/data",
+ "dfs.namenode.http-address": "c6402.ambari.apache.org:50070",
+ "dfs.webhdfs.enabled": "true",
+ "dfs.datanode.failed.volumes.tolerated": "0",
+ "dfs.namenode.accesstime.precision": "0",
+ "dfs.namenode.avoid.read.stale.datanode": "true",
+ "dfs.namenode.secondary.http-address": "c6402.ambari.apache.org:50090",
+ "dfs.datanode.kerberos.principal": "dn/_HOST@EXAMPLE.COM",
+ "dfs.datanode.http.address": "0.0.0.0:50075",
+ "dfs.datanode.du.reserved": "1073741824",
+ "dfs.client.read.shortcircuit.streams.cache.size": "4096",
+ "dfs.namenode.kerberos.https.principal": "HTTP/_HOST@EXAMPLE.COM",
+ "dfs.secondary.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
+ "dfs.web.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
+ "dfs.secondary.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
+ "dfs.datanode.keytab.file": "/etc/security/keytabs/dn.service.keytab",
+ "dfs.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
+ "dfs.permissions.superusergroup": "hdfs",
+ "dfs.journalnode.http-address": "0.0.0.0:8480",
+ "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
+ "dfs.namenode.write.stale.datanode.ratio": "1.0f",
+ "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude",
+ "dfs.datanode.data.dir.perm": "750",
+ "dfs.namenode.name.dir.restore": "true",
+ "dfs.replication.max": "50",
"dfs.namenode.checkpoint.period": "21600",
"dfs.http.policy": "HTTP_ONLY"
- },
+ },
"storm-env": {
- "content": "\n#!/bin/bash\n\n# Set Storm specific environment variables here.\n\n# The java implementation to use.\nexport JAVA_HOME={{java64_home}}\n\n# export STORM_CONF_DIR=\"\"",
- "storm_log_dir": "/var/log/storm",
- "storm_pid_dir": "/var/run/storm",
+ "content": "\n#!/bin/bash\n\n# Set Storm specific environment variables here.\n\n# The java implementation to use.\nexport JAVA_HOME={{java64_home}}\n\n# export STORM_CONF_DIR=\"\"",
+ "storm_log_dir": "/var/log/storm",
+ "storm_pid_dir": "/var/run/storm",
"storm_user": "storm"
- },
+ },
"core-site": {
- "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization",
- "fs.trash.interval": "360",
- "hadoop.security.authentication": "simple",
- "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec",
- "hadoop.proxyuser.falcon.hosts": "*",
- "mapreduce.jobtracker.webinterface.trusted": "false",
- "hadoop.security.authorization": "false",
- "fs.defaultFS": "hdfs://c6402.ambari.apache.org:8020",
- "ipc.server.tcpnodelay": "true",
- "ipc.client.connect.max.retries": "50",
- "ipc.client.idlethreshold": "8000",
- "io.file.buffer.size": "131072",
- "hadoop.security.auth_to_local": "\n RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/\n RULE:[2:$1@$0](jhs@.*)s/.*/mapred/\n RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/\n RULE:[2:$1@$0](hm@.*)s/.*/hbase/\n RULE:[2:$1@$0](rs@.*)s/.*/hbase/\n DEFAULT",
- "ipc.client.connection.maxidletime": "30000",
+ "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization",
+ "fs.trash.interval": "360",
+ "hadoop.security.authentication": "simple",
+ "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec",
+ "hadoop.proxyuser.falcon.hosts": "*",
+ "mapreduce.jobtracker.webinterface.trusted": "false",
+ "hadoop.security.authorization": "false",
+ "fs.defaultFS": "hdfs://c6402.ambari.apache.org:8020",
+ "ipc.server.tcpnodelay": "true",
+ "ipc.client.connect.max.retries": "50",
+ "ipc.client.idlethreshold": "8000",
+ "io.file.buffer.size": "131072",
+ "hadoop.security.auth_to_local": "\n RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/\n RULE:[2:$1@$0](jhs@.*)s/.*/mapred/\n RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/\n RULE:[2:$1@$0](hm@.*)s/.*/hbase/\n RULE:[2:$1@$0](rs@.*)s/.*/hbase/\n DEFAULT",
+ "ipc.client.connection.maxidletime": "30000",
"hadoop.proxyuser.falcon.groups": "users"
- },
+ },
"hadoop-policy": {
- "security.job.client.protocol.acl": "*",
- "security.job.task.protocol.acl": "*",
- "security.datanode.protocol.acl": "*",
- "security.namenode.protocol.acl": "*",
- "security.client.datanode.protocol.acl": "*",
- "security.inter.tracker.protocol.acl": "*",
- "security.refresh.usertogroups.mappings.protocol.acl": "hadoop",
- "security.client.protocol.acl": "*",
- "security.refresh.policy.protocol.acl": "hadoop",
- "security.admin.operations.protocol.acl": "hadoop",
+ "security.job.client.protocol.acl": "*",
+ "security.job.task.protocol.acl": "*",
+ "security.datanode.protocol.acl": "*",
+ "security.namenode.protocol.acl": "*",
+ "security.client.datanode.protocol.acl": "*",
+ "security.inter.tracker.protocol.acl": "*",
+ "security.refresh.usertogroups.mappings.protocol.acl": "hadoop",
+ "security.client.protocol.acl": "*",
+ "security.refresh.policy.protocol.acl": "hadoop",
+ "security.admin.operations.protocol.acl": "hadoop",
"security.inter.datanode.protocol.acl": "*"
- },
+ },
"hdfs-log4j": {
"content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.logger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.logger}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# mapred audit logging\n#\nmapred.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging levels\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN"
- },
+ },
"hadoop-env": {
"namenode_opt_maxnewsize": "200m",
"hdfs_log_dir_prefix": "/var/log/hadoop",
@@ -258,98 +258,98 @@
"namenode_opt_maxpermsize" : "256m",
"dtnode_heapsize": "1024m",
"hadoop_root_logger": "INFO,RFA",
- "hadoop_heapsize": "1024",
+ "hadoop_heapsize": "1024",
"hadoop_pid_dir_prefix": "/var/run/hadoop"
- },
+ },
"zookeeper-env": {
- "clientPort": "2181",
- "zk_user": "zookeeper",
- "zk_log_dir": "/var/log/zookeeper",
- "syncLimit": "5",
- "content": "\nexport JAVA_HOME={{java64_home}}\nexport ZOO_LOG_DIR={{zk_log_dir}}\nexport ZOOPIDFILE={{zk_pid_file}}\nexport SERVER_JVMFLAGS={{zk_server_heapsize}}\nexport JAVA=$JAVA_HOME/bin/java\nexport CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*\n\n{% if security_enabled %}\nexport SERVER_JVMFLAGS=\"$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}\"\nexport CLIENT_JVMFLAGS=\"$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}\"\n{% endif %}",
- "initLimit": "10",
- "zk_pid_dir": "/var/run/zookeeper",
- "zk_data_dir": "/hadoop/zookeeper",
+ "clientPort": "2181",
+ "zk_user": "zookeeper",
+ "zk_log_dir": "/var/log/zookeeper",
+ "syncLimit": "5",
+ "content": "\nexport JAVA_HOME={{java64_home}}\nexport ZOO_LOG_DIR={{zk_log_dir}}\nexport ZOOPIDFILE={{zk_pid_file}}\nexport SERVER_JVMFLAGS={{zk_server_heapsize}}\nexport JAVA=$JAVA_HOME/bin/java\nexport CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*\n\n{% if security_enabled %}\nexport SERVER_JVMFLAGS=\"$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}\"\nexport CLIENT_JVMFLAGS=\"$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}\"\n{% endif %}",
+ "initLimit": "10",
+ "zk_pid_dir": "/var/run/zookeeper",
+ "zk_data_dir": "/hadoop/zookeeper",
"tickTime": "2000"
- },
+ },
"zookeeper-log4j": {
"content": "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\n\n#\n# ZooKeeper Logging Configuration\n#\n\n# DEFAULT: console appender only\nlog4j.rootLogger=INFO, CONSOLE\n\n# Example with rolling log file\n#log4j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE\n\n# Example with rolling log file and tracing\n#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE\n\n#\n# Log INFO level and above messages to the console\n#\nlog4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender\nlog4j.appender.CONSOLE.Threshold=INFO\nlog4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n#\n# Add ROLLINGFILE to rootLogger to get log file output\n# Log DEBUG level and above messages to a log file\nlog4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender\nlog4j.appender.ROLLINGFILE.Threshold=DEBUG\nlog4j.appender.ROLLINGFILE.File=zookeeper.log\n\n# Max log file size of 10MB\nlog4j.appender.ROLLINGFILE.MaxFileSize=10MB\n# uncomment the next line to limit number of backup files\n#log4j.appender.ROLLINGFILE.MaxBackupIndex=10\n\nlog4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n\n#\n# Add TRACEFILE to rootLogger to get log file output\n# Log DEBUG level and above messages to a log file\nlog4j.appender.TRACEFILE=org.apache.log4j.FileAppender\nlog4j.appender.TRACEFILE.Threshold=TRACE\nlog4j.appender.TRACEFILE.File=zookeeper_trace.log\n\nlog4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout\n### Notice we are including log4j's NDC here (%x)\nlog4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n"
- },
+ },
"cluster-env": {
- "security_enabled": "false",
- "ignore_groupsusers_create": "false",
- "smokeuser": "ambari-qa",
- "kerberos_domain": "EXAMPLE.COM",
+ "security_enabled": "false",
+ "ignore_groupsusers_create": "false",
+ "smokeuser": "ambari-qa",
+ "kerberos_domain": "EXAMPLE.COM",
"user_group": "hadoop"
}
- },
+ },
"configurationTags": {
"storm-site": {
"tag": "version1"
- },
+ },
"hdfs-site": {
"tag": "version1411996371868"
- },
+ },
"storm-env": {
"tag": "version1"
- },
+ },
"core-site": {
"tag": "version1411996371868"
- },
+ },
"hadoop-policy": {
"tag": "version1411996371868"
- },
+ },
"hdfs-log4j": {
"tag": "version1411996371868"
- },
+ },
"hadoop-env": {
"tag": "version1411996371868"
- },
+ },
"zookeeper-env": {
"tag": "version1"
- },
+ },
"zookeeper-log4j": {
"tag": "version1"
- },
+ },
"cluster-env": {
"tag": "version1"
}
- },
- "commandId": "6-1",
+ },
+ "commandId": "6-1",
"clusterHostInfo": {
"snamenode_host": [
"c6402.ambari.apache.org"
- ],
+ ],
"drpc_server_hosts": [
"c6402.ambari.apache.org"
- ],
+ ],
"nimbus_hosts": [
"c6402.ambari.apache.org"
- ],
+ ],
"all_ping_ports": [
"8670"
- ],
+ ],
"all_hosts": [
"c6402.ambari.apache.org"
- ],
+ ],
"slave_hosts": [
"c6402.ambari.apache.org"
- ],
+ ],
"namenode_host": [
"c6402.ambari.apache.org"
- ],
+ ],
"storm_ui_server_hosts": [
"c6402.ambari.apache.org"
- ],
+ ],
"storm_rest_api_hosts": [
"c6402.ambari.apache.org"
- ],
+ ],
"ambari_server_host": [
"c6401.ambari.apache.org"
- ],
+ ],
"zookeeper_hosts": [
"c6402.ambari.apache.org"
- ],
+ ],
"supervisor_hosts": [
"c6402.ambari.apache.org"
]
diff --git a/ambari-server/src/test/python/stacks/2.1/configs/secured-storm-start.json.orig b/ambari-server/src/test/python/stacks/2.1/configs/secured-storm-start.json.orig
new file mode 100644
index 0000000000..29b9c83133
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/2.1/configs/secured-storm-start.json.orig
@@ -0,0 +1,374 @@
+{
+ "configuration_attributes": {
+ "storm-site": {},
+ "hdfs-site": {
+ "final": {
+ "dfs.support.append": "true",
+ "dfs.namenode.http-address": "true"
+ }
+ },
+ "storm-env": {},
+ "core-site": {
+ "final": {
+ "fs.defaultFS": "true"
+ }
+ },
+ "hadoop-policy": {},
+ "hdfs-log4j": {},
+ "hadoop-env": {},
+ "zookeeper-env": {},
+ "zookeeper-log4j": {},
+ "cluster-env": {}
+ },
+ "commandParams": {
+ "command_timeout": "600",
+ "script": "scripts/nimbus.py",
+ "script_type": "PYTHON",
+ "service_package_folder": "HDP/2.1/services/STORM/package",
+ "hooks_folder": "HDP/2.0.6/hooks"
+ },
+ "roleCommand": "START",
+ "clusterName": "pacan",
+ "hostname": "c6402.ambari.apache.org",
+ "hostLevelParams": {
+ "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
+ "ambari_db_rca_password": "mapred",
+ "java_home": "/usr/jdk64/jdk1.7.0_45",
+ "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
+ "jce_name": "UnlimitedJCEPolicyJDK7.zip",
+ "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar",
+ "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.2.0.0\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.2\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.2.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.2.0.0\"},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.20\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6\"}]",
+ "group_list": "[\"hadoop\",\"users\"]",
+ "package_list": "[{\"name\":\"storm_2_2_0_0_*\"}]",
+ "stack_version": "2.2",
+ "stack_name": "HDP",
+ "db_name": "ambari",
+ "ambari_db_rca_driver": "org.postgresql.Driver",
+ "jdk_name": "jdk-7u45-linux-x64.tar.gz",
+ "ambari_db_rca_username": "mapred",
+ "db_driver_filename": "mysql-connector-java.jar",
+ "user_list": "[\"ambari-qa\",\"zookeeper\",\"hdfs\",\"storm\"]",
+ "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar"
+ },
+ "commandType": "EXECUTION_COMMAND",
+ "roleParams": {},
+ "serviceName": "STORM",
+ "role": "NIMBUS",
+ "forceRefreshConfigTags": [],
+ "taskId": 54,
+ "public_hostname": "c6402.ambari.apache.org",
+ "configurations": {
+ "storm-site": {
+ "topology.tuple.serializer": "backtype.storm.serialization.types.ListDelegateSerializer",
+ "topology.workers": "1",
+ "drpc.worker.threads": "64",
+ "storm.zookeeper.servers": "['c6402.ambari.apache.org']",
+ "supervisor.heartbeat.frequency.secs": "5",
+ "topology.executor.send.buffer.size": "1024",
+ "drpc.childopts": "-Xmx768m",
+ "nimbus.thrift.port": "6627",
+ "storm.zookeeper.retry.intervalceiling.millis": "30000",
+ "storm.local.dir": "/hadoop/storm",
+ "topology.receiver.buffer.size": "8",
+ "storm.messaging.netty.client_worker_threads": "1",
+ "transactional.zookeeper.root": "/transactional",
+ "topology.skip.missing.kryo.registrations": "false",
+ "worker.heartbeat.frequency.secs": "1",
+ "zmq.hwm": "0",
+ "storm.zookeeper.connection.timeout": "15000",
+ "_storm.thrift.secure.transport": "SECURED_TRANSPORT_CLASS",
+ "storm.messaging.netty.server_worker_threads": "1",
+ "supervisor.worker.start.timeout.secs": "120",
+ "zmq.threads": "1",
+ "topology.acker.executors": "null",
+ "storm.local.mode.zmq": "false",
+ "topology.max.task.parallelism": "null",
+ "topology.max.error.report.per.interval": "5",
+ "storm.zookeeper.port": "2181",
+ "drpc.queue.size": "128",
+ "worker.childopts": "-Xmx768m _JAAS_PLACEHOLDER -javaagent:/usr/hdp/current/storm/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Worker_%ID%_JVM",
+ "nimbus.childopts": "-Xmx1024m _JAAS_PLACEHOLDER -javaagent:/usr/hdp/current/storm/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8649,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Nimbus_JVM",
+ "storm.zookeeper.retry.times": "5",
+ "nimbus.monitor.freq.secs": "10",
+ "storm.cluster.mode": "distributed",
+ "dev.zookeeper.path": "/tmp/dev-storm-zookeeper",
+ "drpc.invocations.port": "3773",
+ "_storm.thrift.nonsecure.transport": "NON_SECURED_TRANSPORT_CLASS",
+ "storm.zookeeper.root": "/storm",
+ "logviewer.childopts": "-Xmx128m",
+ "transactional.zookeeper.port": "null",
+ "topology.worker.childopts": "null",
+ "topology.max.spout.pending": "null",
+ "nimbus.cleanup.inbox.freq.secs": "600",
+ "storm.messaging.netty.min_wait_ms": "100",
+ "nimbus.task.timeout.secs": "30",
+ "nimbus.thrift.max_buffer_size": "1048576",
+ "topology.sleep.spout.wait.strategy.time.ms": "1",
+ "topology.optimize": "true",
+ "nimbus.reassign": "true",
+ "storm.messaging.transport": "backtype.storm.messaging.netty.Context",
+ "logviewer.appender.name": "A1",
+ "nimbus.host": "c6402.ambari.apache.org",
+ "ui.port": "8744",
+ "supervisor.slots.ports": "[6700, 6701]",
+ "nimbus.file.copy.expiration.secs": "600",
+ "supervisor.monitor.frequency.secs": "3",
+ "ui.childopts": "-Xmx768m _JAAS_PLACEHOLDER",
+ "transactional.zookeeper.servers": "null",
+ "zmq.linger.millis": "5000",
+ "topology.error.throttle.interval.secs": "10",
+ "topology.worker.shared.thread.pool.size": "4",
+ "java.library.path": "/usr/local/lib:/opt/local/lib:/usr/lib:/usr/hdp/current/storm/lib",
+ "topology.spout.wait.strategy": "backtype.storm.spout.SleepSpoutWaitStrategy",
+ "task.heartbeat.frequency.secs": "3",
+ "topology.transfer.buffer.size": "1024",
+ "storm.zookeeper.session.timeout": "20000",
+ "topology.executor.receive.buffer.size": "1024",
+ "topology.stats.sample.rate": "0.05",
+ "topology.fall.back.on.java.serialization": "true",
+ "supervisor.childopts": "-Xmx256m _JAAS_PLACEHOLDER -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.port=56431 -javaagent:/usr/hdp/current/storm/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Supervisor_JVM",
+ "topology.enable.message.timeouts": "true",
+ "storm.messaging.netty.max_wait_ms": "1000",
+ "nimbus.topology.validator": "backtype.storm.nimbus.DefaultTopologyValidator",
+ "nimbus.supervisor.timeout.secs": "60",
+ "topology.disruptor.wait.strategy": "com.lmax.disruptor.BlockingWaitStrategy",
+ "nimbus.inbox.jar.expiration.secs": "3600",
+ "drpc.port": "3772",
+ "topology.kryo.factory": "backtype.storm.serialization.DefaultKryoFactory",
+ "storm.zookeeper.retry.interval": "1000",
+ "storm.messaging.netty.max_retries": "30",
+ "topology.tick.tuple.freq.secs": "null",
+ "drpc.request.timeout.secs": "600",
+ "nimbus.task.launch.secs": "120",
+ "task.refresh.poll.secs": "10",
+ "topology.message.timeout.secs": "30",
+ "storm.messaging.netty.buffer_size": "5242880",
+ "topology.state.synchronization.timeout.secs": "60",
+ "supervisor.worker.timeout.secs": "30",
+ "topology.trident.batch.emit.interval.millis": "500",
+ "topology.builtin.metrics.bucket.size.secs": "60",
+ "logviewer.port": "8000",
+ "topology.debug": "false"
+ },
+ "hdfs-site": {
+ "dfs.namenode.avoid.write.stale.datanode": "true",
+ "dfs.namenode.kerberos.internal.spnego.principal": "${dfs.web.authentication.kerberos.principal}",
+ "dfs.namenode.checkpoint.txns": "1000000",
+ "dfs.block.access.token.enable": "true",
+ "dfs.support.append": "true",
+ "dfs.datanode.address": "0.0.0.0:1019",
+ "dfs.cluster.administrators": " hdfs",
+ "dfs.journalnode.kerberos.principal": "jn/_HOST@EXAMPLE.COM",
+ "dfs.datanode.balance.bandwidthPerSec": "6250000",
+ "dfs.namenode.safemode.threshold-pct": "1.0f",
+ "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}",
+ "dfs.permissions.enabled": "true",
+ "dfs.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
+ "dfs.namenode.checkpoint.dir": "/hadoop/hdfs/namesecondary",
+ "dfs.https.port": "50470",
+ "dfs.namenode.https-address": "c6402.ambari.apache.org:50470",
+ "dfs.secondary.namenode.kerberos.https.principal": "HTTP/_HOST@EXAMPLE.COM",
+ "dfs.blockreport.initialDelay": "120",
+ "dfs.journalnode.edits.dir": "/grid/0/hdfs/journal",
+ "dfs.blocksize": "134217728",
+ "dfs.client.read.shortcircuit": "true",
+ "dfs.datanode.max.transfer.threads": "1024",
+ "dfs.heartbeat.interval": "3",
+ "dfs.replication": "3",
+ "dfs.namenode.handler.count": "40",
+ "dfs.web.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
+ "fs.permissions.umask-mode": "022",
+ "dfs.namenode.stale.datanode.interval": "30000",
+ "dfs.datanode.ipc.address": "0.0.0.0:8010",
+ "dfs.namenode.name.dir": "/hadoop/hdfs/namenode",
+ "dfs.secondary.namenode.kerberos.internal.spnego.principal": "${dfs.web.authentication.kerberos.principal}",
+ "dfs.datanode.data.dir": "/hadoop/hdfs/data",
+ "dfs.namenode.http-address": "c6402.ambari.apache.org:50070",
+ "dfs.webhdfs.enabled": "true",
+ "dfs.datanode.failed.volumes.tolerated": "0",
+ "dfs.namenode.accesstime.precision": "0",
+ "dfs.namenode.avoid.read.stale.datanode": "true",
+ "dfs.namenode.secondary.http-address": "c6402.ambari.apache.org:50090",
+ "dfs.datanode.kerberos.principal": "dn/_HOST@EXAMPLE.COM",
+ "dfs.journalnode.keytab.file": "/etc/security/keytabs/jn.service.keytab",
+ "dfs.datanode.http.address": "0.0.0.0:1022",
+ "dfs.datanode.du.reserved": "1073741824",
+ "dfs.client.read.shortcircuit.streams.cache.size": "4096",
+ "dfs.namenode.kerberos.https.principal": "HTTP/_HOST@EXAMPLE.COM",
+ "dfs.secondary.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
+ "dfs.web.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
+ "dfs.secondary.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
+ "dfs.datanode.keytab.file": "/etc/security/keytabs/dn.service.keytab",
+ "dfs.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
+ "dfs.permissions.superusergroup": "hdfs",
+ "dfs.journalnode.http-address": "0.0.0.0:8480",
+ "dfs.journalnode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM",
+ "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
+ "dfs.namenode.write.stale.datanode.ratio": "1.0f",
+ "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude",
+ "dfs.datanode.data.dir.perm": "750",
+ "dfs.namenode.name.dir.restore": "true",
+ "dfs.replication.max": "50",
+ "dfs.namenode.checkpoint.period": "21600",
+ "dfs.http.policy": "HTTP_ONLY"
+ },
+ "storm-env": {
+ "storm_log_dir": "/var/log/storm",
+ "storm_principal_name": "storm@EXAMPLE.COM",
+ "storm_pid_dir": "/var/run/storm",
+ "storm_user": "storm",
+ "content": "\n#!/bin/bash\n\n# Set Storm specific environment variables here.\n\n# The java implementation to use.\nexport JAVA_HOME={{java64_home}}\n\n# export STORM_CONF_DIR=\"\"",
+ "nimbus_principal_name": "nimbus/_HOST@EXAMPLE.COM",
+ "storm_ui_keytab": "/etc/security/keytabs/http.storm.service.keytab",
+ "nimbus_keytab": "/etc/security/keytabs/nimbus.service.keytab",
+ "storm_keytab": "/etc/security/keytabs/storm.service.keytab",
+ "storm_ui_principal_name": "HTTP/_HOST"
+ },
+ "core-site": {
+ "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization",
+ "fs.trash.interval": "360",
+ "hadoop.security.authentication": "kerberos",
+ "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec",
+ "hadoop.proxyuser.falcon.hosts": "*",
+ "mapreduce.jobtracker.webinterface.trusted": "false",
+ "hadoop.security.authorization": "true",
+ "fs.defaultFS": "hdfs://c6402.ambari.apache.org:8020",
+ "ipc.server.tcpnodelay": "true",
+ "ipc.client.connect.max.retries": "50",
+ "ipc.client.idlethreshold": "8000",
+ "io.file.buffer.size": "131072",
+ "hadoop.security.auth_to_local": "RULE:[2:$1@$0](rm@.*EXAMPLE.COM)s/.*/yarn/\nRULE:[2:$1@$0](nm@.*EXAMPLE.COM)s/.*/yarn/\nRULE:[2:$1@$0](nn@.*EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](dn@.*EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](jn/_HOST@.*EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](falcon@.*EXAMPLE.COM)s/.*/falcon/\nDEFAULT",
+ "ipc.client.connection.maxidletime": "30000",
+ "hadoop.proxyuser.falcon.groups": "users"
+ },
+ "hadoop-policy": {
+ "security.job.client.protocol.acl": "*",
+ "security.job.task.protocol.acl": "*",
+ "security.datanode.protocol.acl": "*",
+ "security.namenode.protocol.acl": "*",
+ "security.client.datanode.protocol.acl": "*",
+ "security.inter.tracker.protocol.acl": "*",
+ "security.refresh.usertogroups.mappings.protocol.acl": "hadoop",
+ "security.client.protocol.acl": "*",
+ "security.refresh.policy.protocol.acl": "hadoop",
+ "security.admin.operations.protocol.acl": "hadoop",
+ "security.inter.datanode.protocol.acl": "*"
+ },
+ "hdfs-log4j": {
+ "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.logger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.logger}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# mapred audit logging\n#\nmapred.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging levels\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN"
+ },
+ "hadoop-env": {
+ "namenode_opt_maxnewsize": "200m",
+ "hdfs_log_dir_prefix": "/var/log/hadoop",
+ "namenode_heapsize": "1024m",
+ "proxyuser_group": "users",
+ "hdfs_user_keytab": "/etc/security/keytabs/hdfs.headless.keytab",
+ "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME. All others are\n# optional. When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use. Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n# Hadoop Configuration Directory\n#TODO: if env var set that can cause problems\nexport HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options. Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appended to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nHADOOP_DATANODE_OPTS=\"-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER={{hdfs_user}}\n\n# Extra ssh options. Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored. $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts. $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from. Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes. See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\ndo\n JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\nif [ -d \"/usr/lib/tez\" ]; then\n export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf\nfi\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n#Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64\n\n#Hadoop logging options\nexport HADOOP_ROOT_LOGGER={{hadoop_root_logger}}",
+ "hdfs_user": "hdfs",
+ "namenode_opt_newsize": "200m",
+ "namenode_opt_permsize" : "128m",
+ "namenode_opt_maxpermsize" : "256m",
+ "dtnode_heapsize": "1024m",
+ "hadoop_root_logger": "INFO,RFA",
+ "hadoop_heapsize": "1024",
+ "hadoop_pid_dir_prefix": "/var/run/hadoop",
+ "hdfs_principal_name": "hdfs"
+ },
+ "zookeeper-env": {
+ "clientPort": "2181",
+ "zookeeper_keytab_path": "/etc/security/keytabs/zk.service.keytab",
+ "zk_user": "zookeeper",
+ "zk_log_dir": "/var/log/zookeeper",
+ "syncLimit": "5",
+ "content": "\nexport JAVA_HOME={{java64_home}}\nexport ZOO_LOG_DIR={{zk_log_dir}}\nexport ZOOPIDFILE={{zk_pid_file}}\nexport SERVER_JVMFLAGS={{zk_server_heapsize}}\nexport JAVA=$JAVA_HOME/bin/java\nexport CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*\n\n{% if security_enabled %}\nexport SERVER_JVMFLAGS=\"$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}\"\nexport CLIENT_JVMFLAGS=\"$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}\"\n{% endif %}",
+ "initLimit": "10",
+ "zk_pid_dir": "/var/run/zookeeper",
+ "zk_data_dir": "/hadoop/zookeeper",
+ "zookeeper_principal_name": "zookeeper/_HOST@EXAMPLE.COM",
+ "tickTime": "2000"
+ },
+ "zookeeper-log4j": {
+ "content": "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\n\n#\n# ZooKeeper Logging Configuration\n#\n\n# DEFAULT: console appender only\nlog4j.rootLogger=INFO, CONSOLE\n\n# Example with rolling log file\n#log4j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE\n\n# Example with rolling log file and tracing\n#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE\n\n#\n# Log INFO level and above messages to the console\n#\nlog4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender\nlog4j.appender.CONSOLE.Threshold=INFO\nlog4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n#\n# Add ROLLINGFILE to rootLogger to get log file output\n# Log DEBUG level and above messages to a log file\nlog4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender\nlog4j.appender.ROLLINGFILE.Threshold=DEBUG\nlog4j.appender.ROLLINGFILE.File=zookeeper.log\n\n# Max log file size of 10MB\nlog4j.appender.ROLLINGFILE.MaxFileSize=10MB\n# uncomment the next line to limit number of backup files\n#log4j.appender.ROLLINGFILE.MaxBackupIndex=10\n\nlog4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n\n#\n# Add TRACEFILE to rootLogger to get log file output\n# Log DEBUG level and above messages to a log file\nlog4j.appender.TRACEFILE=org.apache.log4j.FileAppender\nlog4j.appender.TRACEFILE.Threshold=TRACE\nlog4j.appender.TRACEFILE.File=zookeeper_trace.log\n\nlog4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout\n### Notice we are including log4j's NDC here (%x)\nlog4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n"
+ },
+ "cluster-env": {
+ "security_enabled": "true",
+ "ignore_groupsusers_create": "false",
+ "smokeuser_keytab": "/etc/security/keytabs/smokeuser.headless.keytab",
+ "kerberos_domain": "EXAMPLE.COM",
+ "kinit_path_local": "/usr/bin",
+ "user_group": "hadoop",
+ "smokeuser": "ambari-qa"
+ }
+ },
+ "configurationTags": {
+ "storm-site": {
+ "tag": "version1412001710682"
+ },
+ "hdfs-site": {
+ "tag": "version1412001710682"
+ },
+ "storm-env": {
+ "tag": "version1412001710682"
+ },
+ "core-site": {
+ "tag": "version1412001710682"
+ },
+ "hadoop-policy": {
+ "tag": "version1411996371868"
+ },
+ "hdfs-log4j": {
+ "tag": "version1411996371868"
+ },
+ "hadoop-env": {
+ "tag": "version1412001710682"
+ },
+ "zookeeper-env": {
+ "tag": "version1412001710682"
+ },
+ "zookeeper-log4j": {
+ "tag": "version1"
+ },
+ "cluster-env": {
+ "tag": "version1412001710681"
+ }
+ },
+ "commandId": "12-1",
+ "clusterHostInfo": {
+ "snamenode_host": [
+ "c6402.ambari.apache.org"
+ ],
+ "drpc_server_hosts": [
+ "c6402.ambari.apache.org"
+ ],
+ "nimbus_hosts": [
+ "c6402.ambari.apache.org"
+ ],
+ "all_ping_ports": [
+ "8670"
+ ],
+ "all_hosts": [
+ "c6402.ambari.apache.org"
+ ],
+ "slave_hosts": [
+ "c6402.ambari.apache.org"
+ ],
+ "namenode_host": [
+ "c6402.ambari.apache.org"
+ ],
+ "storm_ui_server_hosts": [
+ "c6402.ambari.apache.org"
+ ],
+ "storm_rest_api_hosts": [
+ "c6402.ambari.apache.org"
+ ],
+ "ambari_server_host": [
+ "c6401.ambari.apache.org"
+ ],
+ "zookeeper_hosts": [
+ "c6402.ambari.apache.org"
+ ],
+ "supervisor_hosts": [
+ "c6402.ambari.apache.org"
+ ]
+ }
+}
diff --git a/ambari-server/src/test/python/stacks/2.1/configs/secured-storm-start.json.rej b/ambari-server/src/test/python/stacks/2.1/configs/secured-storm-start.json.rej
new file mode 100644
index 0000000000..93981b5aea
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/2.1/configs/secured-storm-start.json.rej
@@ -0,0 +1,527 @@
+***************
+*** 1,262 ****
+ {
+ "configuration_attributes": {
+- "storm-site": {},
+ "hdfs-site": {
+ "final": {
+- "dfs.support.append": "true",
+ "dfs.namenode.http-address": "true"
+ }
+- },
+- "storm-env": {},
+ "core-site": {
+ "final": {
+ "fs.defaultFS": "true"
+ }
+- },
+- "hadoop-policy": {},
+- "hdfs-log4j": {},
+- "hadoop-env": {},
+- "zookeeper-env": {},
+- "zookeeper-log4j": {},
+ "cluster-env": {}
+- },
+ "commandParams": {
+- "command_timeout": "600",
+- "script": "scripts/nimbus.py",
+- "script_type": "PYTHON",
+- "service_package_folder": "HDP/2.1/services/STORM/package",
+ "hooks_folder": "HDP/2.0.6/hooks"
+- },
+- "roleCommand": "START",
+- "clusterName": "pacan",
+- "hostname": "c6402.ambari.apache.org",
+ "hostLevelParams": {
+- "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
+- "ambari_db_rca_password": "mapred",
+- "java_home": "/usr/jdk64/jdk1.7.0_45",
+- "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
+- "jce_name": "UnlimitedJCEPolicyJDK7.zip",
+- "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar",
+- "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.2.0.0\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.2\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.2.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.2.0.0\"},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.20\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6\"}]",
+- "group_list": "[\"hadoop\",\"users\"]",
+- "package_list": "[{\"name\":\"storm_2_2_0_0_*\"}]",
+- "stack_version": "2.2",
+- "stack_name": "HDP",
+- "db_name": "ambari",
+- "ambari_db_rca_driver": "org.postgresql.Driver",
+- "jdk_name": "jdk-7u45-linux-x64.tar.gz",
+- "ambari_db_rca_username": "mapred",
+- "db_driver_filename": "mysql-connector-java.jar",
+- "user_list": "[\"ambari-qa\",\"zookeeper\",\"hdfs\",\"storm\"]",
+ "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar"
+- },
+- "commandType": "EXECUTION_COMMAND",
+- "roleParams": {},
+- "serviceName": "STORM",
+- "role": "NIMBUS",
+- "forceRefreshConfigTags": [],
+- "taskId": 54,
+- "public_hostname": "c6402.ambari.apache.org",
+ "configurations": {
+ "storm-site": {
+- "topology.tuple.serializer": "backtype.storm.serialization.types.ListDelegateSerializer",
+- "topology.workers": "1",
+- "drpc.worker.threads": "64",
+- "storm.zookeeper.servers": "['c6402.ambari.apache.org']",
+- "supervisor.heartbeat.frequency.secs": "5",
+- "topology.executor.send.buffer.size": "1024",
+- "drpc.childopts": "-Xmx768m",
+- "nimbus.thrift.port": "6627",
+- "storm.zookeeper.retry.intervalceiling.millis": "30000",
+- "storm.local.dir": "/hadoop/storm",
+- "topology.receiver.buffer.size": "8",
+- "storm.messaging.netty.client_worker_threads": "1",
+- "transactional.zookeeper.root": "/transactional",
+- "topology.skip.missing.kryo.registrations": "false",
+- "worker.heartbeat.frequency.secs": "1",
+- "zmq.hwm": "0",
+- "storm.zookeeper.connection.timeout": "15000",
+- "_storm.thrift.secure.transport": "SECURED_TRANSPORT_CLASS",
+- "storm.messaging.netty.server_worker_threads": "1",
+- "supervisor.worker.start.timeout.secs": "120",
+- "zmq.threads": "1",
+- "topology.acker.executors": "null",
+- "storm.local.mode.zmq": "false",
+- "topology.max.task.parallelism": "null",
+- "topology.max.error.report.per.interval": "5",
+- "storm.zookeeper.port": "2181",
+- "drpc.queue.size": "128",
+- "worker.childopts": "-Xmx768m _JAAS_PLACEHOLDER -javaagent:/usr/hdp/current/storm/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Worker_%ID%_JVM",
+- "nimbus.childopts": "-Xmx1024m _JAAS_PLACEHOLDER -javaagent:/usr/hdp/current/storm/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8649,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Nimbus_JVM",
+- "storm.zookeeper.retry.times": "5",
+- "nimbus.monitor.freq.secs": "10",
+- "storm.cluster.mode": "distributed",
+- "dev.zookeeper.path": "/tmp/dev-storm-zookeeper",
+- "drpc.invocations.port": "3773",
+- "_storm.thrift.nonsecure.transport": "NON_SECURED_TRANSPORT_CLASS",
+- "storm.zookeeper.root": "/storm",
+- "logviewer.childopts": "-Xmx128m",
+- "transactional.zookeeper.port": "null",
+- "topology.worker.childopts": "null",
+- "topology.max.spout.pending": "null",
+- "nimbus.cleanup.inbox.freq.secs": "600",
+- "storm.messaging.netty.min_wait_ms": "100",
+- "nimbus.task.timeout.secs": "30",
+- "nimbus.thrift.max_buffer_size": "1048576",
+- "topology.sleep.spout.wait.strategy.time.ms": "1",
+- "topology.optimize": "true",
+- "nimbus.reassign": "true",
+- "storm.messaging.transport": "backtype.storm.messaging.netty.Context",
+- "logviewer.appender.name": "A1",
+- "nimbus.host": "c6402.ambari.apache.org",
+- "ui.port": "8744",
+- "supervisor.slots.ports": "[6700, 6701]",
+- "nimbus.file.copy.expiration.secs": "600",
+- "supervisor.monitor.frequency.secs": "3",
+- "ui.childopts": "-Xmx768m _JAAS_PLACEHOLDER",
+- "transactional.zookeeper.servers": "null",
+- "zmq.linger.millis": "5000",
+- "topology.error.throttle.interval.secs": "10",
+- "topology.worker.shared.thread.pool.size": "4",
+- "java.library.path": "/usr/local/lib:/opt/local/lib:/usr/lib:/usr/hdp/current/storm/lib",
+- "topology.spout.wait.strategy": "backtype.storm.spout.SleepSpoutWaitStrategy",
+- "task.heartbeat.frequency.secs": "3",
+- "topology.transfer.buffer.size": "1024",
+- "storm.zookeeper.session.timeout": "20000",
+- "topology.executor.receive.buffer.size": "1024",
+- "topology.stats.sample.rate": "0.05",
+- "topology.fall.back.on.java.serialization": "true",
+- "supervisor.childopts": "-Xmx256m _JAAS_PLACEHOLDER -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.port=56431 -javaagent:/usr/hdp/current/storm/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Supervisor_JVM",
+- "topology.enable.message.timeouts": "true",
+- "storm.messaging.netty.max_wait_ms": "1000",
+- "nimbus.topology.validator": "backtype.storm.nimbus.DefaultTopologyValidator",
+- "nimbus.supervisor.timeout.secs": "60",
+- "topology.disruptor.wait.strategy": "com.lmax.disruptor.BlockingWaitStrategy",
+- "nimbus.inbox.jar.expiration.secs": "3600",
+- "drpc.port": "3772",
+- "topology.kryo.factory": "backtype.storm.serialization.DefaultKryoFactory",
+- "storm.zookeeper.retry.interval": "1000",
+- "storm.messaging.netty.max_retries": "30",
+- "topology.tick.tuple.freq.secs": "null",
+- "drpc.request.timeout.secs": "600",
+- "nimbus.task.launch.secs": "120",
+- "task.refresh.poll.secs": "10",
+- "topology.message.timeout.secs": "30",
+- "storm.messaging.netty.buffer_size": "5242880",
+- "topology.state.synchronization.timeout.secs": "60",
+- "supervisor.worker.timeout.secs": "30",
+- "topology.trident.batch.emit.interval.millis": "500",
+- "topology.builtin.metrics.bucket.size.secs": "60",
+- "logviewer.port": "8000",
+ "topology.debug": "false"
+- },
+ "hdfs-site": {
+- "dfs.namenode.avoid.write.stale.datanode": "true",
+- "dfs.namenode.kerberos.internal.spnego.principal": "${dfs.web.authentication.kerberos.principal}",
+- "dfs.namenode.checkpoint.txns": "1000000",
+- "dfs.block.access.token.enable": "true",
+- "dfs.support.append": "true",
+- "dfs.datanode.address": "0.0.0.0:1019",
+- "dfs.cluster.administrators": " hdfs",
+- "dfs.journalnode.kerberos.principal": "jn/_HOST@EXAMPLE.COM",
+- "dfs.datanode.balance.bandwidthPerSec": "6250000",
+- "dfs.namenode.safemode.threshold-pct": "1.0f",
+- "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}",
+- "dfs.permissions.enabled": "true",
+- "dfs.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
+- "dfs.namenode.checkpoint.dir": "/hadoop/hdfs/namesecondary",
+- "dfs.https.port": "50470",
+- "dfs.namenode.https-address": "c6402.ambari.apache.org:50470",
+- "dfs.secondary.namenode.kerberos.https.principal": "HTTP/_HOST@EXAMPLE.COM",
+- "dfs.blockreport.initialDelay": "120",
+- "dfs.journalnode.edits.dir": "/grid/0/hdfs/journal",
+- "dfs.blocksize": "134217728",
+- "dfs.client.read.shortcircuit": "true",
+- "dfs.datanode.max.transfer.threads": "1024",
+- "dfs.heartbeat.interval": "3",
+- "dfs.replication": "3",
+- "dfs.namenode.handler.count": "40",
+- "dfs.web.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
+- "fs.permissions.umask-mode": "022",
+- "dfs.namenode.stale.datanode.interval": "30000",
+- "dfs.datanode.ipc.address": "0.0.0.0:8010",
+- "dfs.namenode.name.dir": "/hadoop/hdfs/namenode",
+- "dfs.secondary.namenode.kerberos.internal.spnego.principal": "${dfs.web.authentication.kerberos.principal}",
+- "dfs.datanode.data.dir": "/hadoop/hdfs/data",
+- "dfs.namenode.http-address": "c6402.ambari.apache.org:50070",
+- "dfs.webhdfs.enabled": "true",
+- "dfs.datanode.failed.volumes.tolerated": "0",
+- "dfs.namenode.accesstime.precision": "0",
+- "dfs.namenode.avoid.read.stale.datanode": "true",
+- "dfs.namenode.secondary.http-address": "c6402.ambari.apache.org:50090",
+- "dfs.datanode.kerberos.principal": "dn/_HOST@EXAMPLE.COM",
+- "dfs.journalnode.keytab.file": "/etc/security/keytabs/jn.service.keytab",
+- "dfs.datanode.http.address": "0.0.0.0:1022",
+- "dfs.datanode.du.reserved": "1073741824",
+- "dfs.client.read.shortcircuit.streams.cache.size": "4096",
+- "dfs.namenode.kerberos.https.principal": "HTTP/_HOST@EXAMPLE.COM",
+- "dfs.secondary.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
+- "dfs.web.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
+- "dfs.secondary.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
+- "dfs.datanode.keytab.file": "/etc/security/keytabs/dn.service.keytab",
+- "dfs.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
+- "dfs.permissions.superusergroup": "hdfs",
+- "dfs.journalnode.http-address": "0.0.0.0:8480",
+- "dfs.journalnode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM",
+- "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
+- "dfs.namenode.write.stale.datanode.ratio": "1.0f",
+- "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude",
+- "dfs.datanode.data.dir.perm": "750",
+- "dfs.namenode.name.dir.restore": "true",
+- "dfs.replication.max": "50",
+ "dfs.namenode.checkpoint.period": "21600",
+ "dfs.http.policy": "HTTP_ONLY"
+- },
+ "storm-env": {
+- "storm_log_dir": "/var/log/storm",
+- "storm_principal_name": "storm@EXAMPLE.COM",
+- "storm_pid_dir": "/var/run/storm",
+- "storm_user": "storm",
+- "content": "\n#!/bin/bash\n\n# Set Storm specific environment variables here.\n\n# The java implementation to use.\nexport JAVA_HOME={{java64_home}}\n\n# export STORM_CONF_DIR=\"\"",
+- "nimbus_principal_name": "nimbus/_HOST@EXAMPLE.COM",
+- "strom_ui_keytab": "/etc/security/keytabs/http.storm.service.keytab",
+- "nimbus_keytab": "/etc/security/keytabs/nimbus.service.keytab",
+- "storm_keytab": "/etc/security/keytabs/storm.service.keytab",
+ "strom_ui_principal_name": "HTTP/_HOST"
+- },
+ "core-site": {
+- "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization",
+- "fs.trash.interval": "360",
+- "hadoop.security.authentication": "kerberos",
+- "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec",
+- "hadoop.proxyuser.falcon.hosts": "*",
+- "mapreduce.jobtracker.webinterface.trusted": "false",
+- "hadoop.security.authorization": "true",
+- "fs.defaultFS": "hdfs://c6402.ambari.apache.org:8020",
+- "ipc.server.tcpnodelay": "true",
+- "ipc.client.connect.max.retries": "50",
+- "ipc.client.idlethreshold": "8000",
+- "io.file.buffer.size": "131072",
+- "hadoop.security.auth_to_local": "RULE:[2:$1@$0](rm@.*EXAMPLE.COM)s/.*/yarn/\nRULE:[2:$1@$0](nm@.*EXAMPLE.COM)s/.*/yarn/\nRULE:[2:$1@$0](nn@.*EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](dn@.*EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](jn/_HOST@.*EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](falcon@.*EXAMPLE.COM)s/.*/falcon/\nDEFAULT",
+- "ipc.client.connection.maxidletime": "30000",
+ "hadoop.proxyuser.falcon.groups": "users"
+- },
+ "hadoop-policy": {
+- "security.job.client.protocol.acl": "*",
+- "security.job.task.protocol.acl": "*",
+- "security.datanode.protocol.acl": "*",
+- "security.namenode.protocol.acl": "*",
+- "security.client.datanode.protocol.acl": "*",
+- "security.inter.tracker.protocol.acl": "*",
+- "security.refresh.usertogroups.mappings.protocol.acl": "hadoop",
+- "security.client.protocol.acl": "*",
+- "security.refresh.policy.protocol.acl": "hadoop",
+- "security.admin.operations.protocol.acl": "hadoop",
+ "security.inter.datanode.protocol.acl": "*"
+- },
+ "hdfs-log4j": {
+ "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.logger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.logger}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# mapred audit logging\n#\nmapred.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging levels\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN"
+- },
+ "hadoop-env": {
+ "namenode_opt_maxnewsize": "200m",
+ "hdfs_log_dir_prefix": "/var/log/hadoop",
+--- 1,262 ----
+ {
+ "configuration_attributes": {
++ "storm-site": {},
+ "hdfs-site": {
+ "final": {
++ "dfs.support.append": "true",
+ "dfs.namenode.http-address": "true"
+ }
++ },
++ "storm-env": {},
+ "core-site": {
+ "final": {
+ "fs.defaultFS": "true"
+ }
++ },
++ "hadoop-policy": {},
++ "hdfs-log4j": {},
++ "hadoop-env": {},
++ "zookeeper-env": {},
++ "zookeeper-log4j": {},
+ "cluster-env": {}
++ },
+ "commandParams": {
++ "command_timeout": "600",
++ "script": "scripts/nimbus.py",
++ "script_type": "PYTHON",
++ "service_package_folder": "HDP/2.1/services/STORM/package",
+ "hooks_folder": "HDP/2.0.6/hooks"
++ },
++ "roleCommand": "START",
++ "clusterName": "pacan",
++ "hostname": "c6402.ambari.apache.org",
+ "hostLevelParams": {
++ "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
++ "ambari_db_rca_password": "mapred",
++ "java_home": "/usr/jdk64/jdk1.7.0_45",
++ "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
++ "jce_name": "UnlimitedJCEPolicyJDK7.zip",
++ "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar",
++ "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.2.0.0\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.2\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.2.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.2.0.0\"},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.20\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6\"}]",
++ "group_list": "[\"hadoop\",\"users\"]",
++ "package_list": "[{\"name\":\"storm_2_2_0_0_*\"}]",
++ "stack_version": "2.2",
++ "stack_name": "HDP",
++ "db_name": "ambari",
++ "ambari_db_rca_driver": "org.postgresql.Driver",
++ "jdk_name": "jdk-7u45-linux-x64.tar.gz",
++ "ambari_db_rca_username": "mapred",
++ "db_driver_filename": "mysql-connector-java.jar",
++ "user_list": "[\"ambari-qa\",\"zookeeper\",\"hdfs\",\"storm\"]",
+ "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar"
++ },
++ "commandType": "EXECUTION_COMMAND",
++ "roleParams": {},
++ "serviceName": "STORM",
++ "role": "NIMBUS",
++ "forceRefreshConfigTags": [],
++ "taskId": 54,
++ "public_hostname": "c6402.ambari.apache.org",
+ "configurations": {
+ "storm-site": {
++ "topology.tuple.serializer": "backtype.storm.serialization.types.ListDelegateSerializer",
++ "topology.workers": "1",
++ "drpc.worker.threads": "64",
++ "storm.zookeeper.servers": "['c6402.ambari.apache.org']",
++ "supervisor.heartbeat.frequency.secs": "5",
++ "topology.executor.send.buffer.size": "1024",
++ "drpc.childopts": "-Xmx768m",
++ "nimbus.thrift.port": "6627",
++ "storm.zookeeper.retry.intervalceiling.millis": "30000",
++ "storm.local.dir": "/hadoop/storm",
++ "topology.receiver.buffer.size": "8",
++ "storm.messaging.netty.client_worker_threads": "1",
++ "transactional.zookeeper.root": "/transactional",
++ "topology.skip.missing.kryo.registrations": "false",
++ "worker.heartbeat.frequency.secs": "1",
++ "zmq.hwm": "0",
++ "storm.zookeeper.connection.timeout": "15000",
++ "_storm.thrift.secure.transport": "SECURED_TRANSPORT_CLASS",
++ "storm.messaging.netty.server_worker_threads": "1",
++ "supervisor.worker.start.timeout.secs": "120",
++ "zmq.threads": "1",
++ "topology.acker.executors": "null",
++ "storm.local.mode.zmq": "false",
++ "topology.max.task.parallelism": "null",
++ "topology.max.error.report.per.interval": "5",
++ "storm.zookeeper.port": "2181",
++ "drpc.queue.size": "128",
++ "worker.childopts": "-Xmx768m _JAAS_PLACEHOLDER -javaagent:/usr/hdp/current/storm/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Worker_%ID%_JVM",
++ "nimbus.childopts": "-Xmx1024m _JAAS_PLACEHOLDER -javaagent:/usr/hdp/current/storm/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8649,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Nimbus_JVM",
++ "storm.zookeeper.retry.times": "5",
++ "nimbus.monitor.freq.secs": "10",
++ "storm.cluster.mode": "distributed",
++ "dev.zookeeper.path": "/tmp/dev-storm-zookeeper",
++ "drpc.invocations.port": "3773",
++ "_storm.thrift.nonsecure.transport": "NON_SECURED_TRANSPORT_CLASS",
++ "storm.zookeeper.root": "/storm",
++ "logviewer.childopts": "-Xmx128m",
++ "transactional.zookeeper.port": "null",
++ "topology.worker.childopts": "null",
++ "topology.max.spout.pending": "null",
++ "nimbus.cleanup.inbox.freq.secs": "600",
++ "storm.messaging.netty.min_wait_ms": "100",
++ "nimbus.task.timeout.secs": "30",
++ "nimbus.thrift.max_buffer_size": "1048576",
++ "topology.sleep.spout.wait.strategy.time.ms": "1",
++ "topology.optimize": "true",
++ "nimbus.reassign": "true",
++ "storm.messaging.transport": "backtype.storm.messaging.netty.Context",
++ "logviewer.appender.name": "A1",
++ "nimbus.host": "c6402.ambari.apache.org",
++ "ui.port": "8744",
++ "supervisor.slots.ports": "[6700, 6701]",
++ "nimbus.file.copy.expiration.secs": "600",
++ "supervisor.monitor.frequency.secs": "3",
++ "ui.childopts": "-Xmx768m _JAAS_PLACEHOLDER",
++ "transactional.zookeeper.servers": "null",
++ "zmq.linger.millis": "5000",
++ "topology.error.throttle.interval.secs": "10",
++ "topology.worker.shared.thread.pool.size": "4",
++ "java.library.path": "/usr/local/lib:/opt/local/lib:/usr/lib:/usr/hdp/current/storm/lib",
++ "topology.spout.wait.strategy": "backtype.storm.spout.SleepSpoutWaitStrategy",
++ "task.heartbeat.frequency.secs": "3",
++ "topology.transfer.buffer.size": "1024",
++ "storm.zookeeper.session.timeout": "20000",
++ "topology.executor.receive.buffer.size": "1024",
++ "topology.stats.sample.rate": "0.05",
++ "topology.fall.back.on.java.serialization": "true",
++ "supervisor.childopts": "-Xmx256m _JAAS_PLACEHOLDER -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.port=56431 -javaagent:/usr/hdp/current/storm/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Supervisor_JVM",
++ "topology.enable.message.timeouts": "true",
++ "storm.messaging.netty.max_wait_ms": "1000",
++ "nimbus.topology.validator": "backtype.storm.nimbus.DefaultTopologyValidator",
++ "nimbus.supervisor.timeout.secs": "60",
++ "topology.disruptor.wait.strategy": "com.lmax.disruptor.BlockingWaitStrategy",
++ "nimbus.inbox.jar.expiration.secs": "3600",
++ "drpc.port": "3772",
++ "topology.kryo.factory": "backtype.storm.serialization.DefaultKryoFactory",
++ "storm.zookeeper.retry.interval": "1000",
++ "storm.messaging.netty.max_retries": "30",
++ "topology.tick.tuple.freq.secs": "null",
++ "drpc.request.timeout.secs": "600",
++ "nimbus.task.launch.secs": "120",
++ "task.refresh.poll.secs": "10",
++ "topology.message.timeout.secs": "30",
++ "storm.messaging.netty.buffer_size": "5242880",
++ "topology.state.synchronization.timeout.secs": "60",
++ "supervisor.worker.timeout.secs": "30",
++ "topology.trident.batch.emit.interval.millis": "500",
++ "topology.builtin.metrics.bucket.size.secs": "60",
++ "logviewer.port": "8000",
+ "topology.debug": "false"
++ },
+ "hdfs-site": {
++ "dfs.namenode.avoid.write.stale.datanode": "true",
++ "dfs.namenode.kerberos.internal.spnego.principal": "${dfs.web.authentication.kerberos.principal}",
++ "dfs.namenode.checkpoint.txns": "1000000",
++ "dfs.block.access.token.enable": "true",
++ "dfs.support.append": "true",
++ "dfs.datanode.address": "0.0.0.0:1019",
++ "dfs.cluster.administrators": " hdfs",
++ "dfs.journalnode.kerberos.principal": "jn/_HOST@EXAMPLE.COM",
++ "dfs.datanode.balance.bandwidthPerSec": "6250000",
++ "dfs.namenode.safemode.threshold-pct": "1.0f",
++ "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}",
++ "dfs.permissions.enabled": "true",
++ "dfs.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
++ "dfs.namenode.checkpoint.dir": "/hadoop/hdfs/namesecondary",
++ "dfs.https.port": "50470",
++ "dfs.namenode.https-address": "c6402.ambari.apache.org:50470",
++ "dfs.secondary.namenode.kerberos.https.principal": "HTTP/_HOST@EXAMPLE.COM",
++ "dfs.blockreport.initialDelay": "120",
++ "dfs.journalnode.edits.dir": "/grid/0/hdfs/journal",
++ "dfs.blocksize": "134217728",
++ "dfs.client.read.shortcircuit": "true",
++ "dfs.datanode.max.transfer.threads": "1024",
++ "dfs.heartbeat.interval": "3",
++ "dfs.replication": "3",
++ "dfs.namenode.handler.count": "40",
++ "dfs.web.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
++ "fs.permissions.umask-mode": "022",
++ "dfs.namenode.stale.datanode.interval": "30000",
++ "dfs.datanode.ipc.address": "0.0.0.0:8010",
++ "dfs.namenode.name.dir": "/hadoop/hdfs/namenode",
++ "dfs.secondary.namenode.kerberos.internal.spnego.principal": "${dfs.web.authentication.kerberos.principal}",
++ "dfs.datanode.data.dir": "/hadoop/hdfs/data",
++ "dfs.namenode.http-address": "c6402.ambari.apache.org:50070",
++ "dfs.webhdfs.enabled": "true",
++ "dfs.datanode.failed.volumes.tolerated": "0",
++ "dfs.namenode.accesstime.precision": "0",
++ "dfs.namenode.avoid.read.stale.datanode": "true",
++ "dfs.namenode.secondary.http-address": "c6402.ambari.apache.org:50090",
++ "dfs.datanode.kerberos.principal": "dn/_HOST@EXAMPLE.COM",
++ "dfs.journalnode.keytab.file": "/etc/security/keytabs/jn.service.keytab",
++ "dfs.datanode.http.address": "0.0.0.0:1022",
++ "dfs.datanode.du.reserved": "1073741824",
++ "dfs.client.read.shortcircuit.streams.cache.size": "4096",
++ "dfs.namenode.kerberos.https.principal": "HTTP/_HOST@EXAMPLE.COM",
++ "dfs.secondary.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
++ "dfs.web.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
++ "dfs.secondary.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
++ "dfs.datanode.keytab.file": "/etc/security/keytabs/dn.service.keytab",
++ "dfs.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
++ "dfs.permissions.superusergroup": "hdfs",
++ "dfs.journalnode.http-address": "0.0.0.0:8480",
++ "dfs.journalnode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM",
++ "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
++ "dfs.namenode.write.stale.datanode.ratio": "1.0f",
++ "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude",
++ "dfs.datanode.data.dir.perm": "750",
++ "dfs.namenode.name.dir.restore": "true",
++ "dfs.replication.max": "50",
+ "dfs.namenode.checkpoint.period": "21600",
+ "dfs.http.policy": "HTTP_ONLY"
++ },
+ "storm-env": {
++ "storm_log_dir": "/var/log/storm",
++ "storm_principal_name": "storm@EXAMPLE.COM",
++ "storm_pid_dir": "/var/run/storm",
++ "storm_user": "storm",
++ "content": "\n#!/bin/bash\n\n# Set Storm specific environment variables here.\n\n# The java implementation to use.\nexport JAVA_HOME={{java64_home}}\n\n# export STORM_CONF_DIR=\"\"",
++ "nimbus_principal_name": "nimbus/_HOST@EXAMPLE.COM",
++ "strom_ui_keytab": "/etc/security/keytabs/http.storm.service.keytab",
++ "nimbus_keytab": "/etc/security/keytabs/nimbus.service.keytab",
++ "storm_keytab": "/etc/security/keytabs/storm.service.keytab",
+ "strom_ui_principal_name": "HTTP/_HOST"
++ },
+ "core-site": {
++ "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization",
++ "fs.trash.interval": "360",
++ "hadoop.security.authentication": "kerberos",
++ "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec",
++ "hadoop.proxyuser.falcon.hosts": "*",
++ "mapreduce.jobtracker.webinterface.trusted": "false",
++ "hadoop.security.authorization": "true",
++ "fs.defaultFS": "hdfs://c6402.ambari.apache.org:8020",
++ "ipc.server.tcpnodelay": "true",
++ "ipc.client.connect.max.retries": "50",
++ "ipc.client.idlethreshold": "8000",
++ "io.file.buffer.size": "131072",
++ "hadoop.security.auth_to_local": "RULE:[2:$1@$0](rm@.*EXAMPLE.COM)s/.*/yarn/\nRULE:[2:$1@$0](nm@.*EXAMPLE.COM)s/.*/yarn/\nRULE:[2:$1@$0](nn@.*EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](dn@.*EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](jn/_HOST@.*EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](falcon@.*EXAMPLE.COM)s/.*/falcon/\nDEFAULT",
++ "ipc.client.connection.maxidletime": "30000",
+ "hadoop.proxyuser.falcon.groups": "users"
++ },
+ "hadoop-policy": {
++ "security.job.client.protocol.acl": "*",
++ "security.job.task.protocol.acl": "*",
++ "security.datanode.protocol.acl": "*",
++ "security.namenode.protocol.acl": "*",
++ "security.client.datanode.protocol.acl": "*",
++ "security.inter.tracker.protocol.acl": "*",
++ "security.refresh.usertogroups.mappings.protocol.acl": "hadoop",
++ "security.client.protocol.acl": "*",
++ "security.refresh.policy.protocol.acl": "hadoop",
++ "security.admin.operations.protocol.acl": "hadoop",
+ "security.inter.datanode.protocol.acl": "*"
++ },
+ "hdfs-log4j": {
+ "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.logger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.logger}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# mapred audit logging\n#\nmapred.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging levels\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN"
++ },
+ "hadoop-env": {
+ "namenode_opt_maxnewsize": "200m",
+ "hdfs_log_dir_prefix": "/var/log/hadoop",
diff --git a/ambari-server/src/test/python/unitTests.py b/ambari-server/src/test/python/unitTests.py
index b4e3688ef0..f2ffacb0b0 100644
--- a/ambari-server/src/test/python/unitTests.py
+++ b/ambari-server/src/test/python/unitTests.py
@@ -90,7 +90,8 @@ def stack_test_executor(base_folder, service, stack, custom_tests, executor_resu
tests = get_test_files(base_folder, mask = test_mask)
- shuffle(tests)
+ #TODO Add an option to randomize the tests' execution
+ #shuffle(tests)
modules = [os.path.basename(s)[:-3] for s in tests]
suites = [unittest.defaultTestLoader.loadTestsFromName(name) for name in
modules]
@@ -183,7 +184,8 @@ def main():
test_mask = TEST_MASK
tests = get_test_files(pwd, mask=test_mask, recursive=False)
- shuffle(tests)
+ #TODO Add an option to randomize the tests' execution
+ #shuffle(tests)
modules = [os.path.basename(s)[:-3] for s in tests]
suites = [unittest.defaultTestLoader.loadTestsFromName(name) for name in
modules]
diff --git a/ambari-shell/ambari-python-shell/pom.xml b/ambari-shell/ambari-python-shell/pom.xml
index 08d74c0211..4381e1bba9 100644
--- a/ambari-shell/ambari-python-shell/pom.xml
+++ b/ambari-shell/ambari-python-shell/pom.xml
@@ -75,7 +75,7 @@
<executions>
<execution>
<configuration>
- <executable>${project.basedir}/../../ambari-common/src/main/unix/ambari-python-wrap</executable>
+ <executable>${executable.python}</executable>
<workingDirectory>target/ambari-python-shell-${project.version}</workingDirectory>
<arguments>
<argument>${project.basedir}/src/main/python/setup.py</argument>
@@ -216,4 +216,30 @@
</extension>
</extensions>
</build>
+ <profiles>
+ <profile>
+ <id>windows</id>
+ <activation>
+ <os>
+ <family>win</family>
+ </os>
+ </activation>
+ <properties>
+ <envClassifier>win</envClassifier>
+ <executable.python>python</executable.python>
+ </properties>
+ </profile>
+ <profile>
+ <id>linux</id>
+ <activation>
+ <os>
+ <family>unix</family>
+ </os>
+ </activation>
+ <properties>
+ <envClassifier>linux</envClassifier>
+ <executable.python>${project.basedir}/../../ambari-common/src/main/unix/ambari-python-wrap</executable.python>
+ </properties>
+ </profile>
+ </profiles>
</project>
diff --git a/ambari-shell/pom.xml b/ambari-shell/pom.xml
index 947c9b922f..b23602e0b8 100644
--- a/ambari-shell/pom.xml
+++ b/ambari-shell/pom.xml
@@ -56,6 +56,23 @@
</executions>
</plugin>
<plugin>
+ <groupId>org.apache.rat</groupId>
+ <artifactId>apache-rat-plugin</artifactId>
+ <configuration>
+ <excludes>
+ <exclude>**/*.iml</exclude>
+ </excludes>
+ </configuration>
+ <executions>
+ <execution>
+ <phase>test</phase>
+ <goals>
+ <goal>check</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
<groupId>org.vafer</groupId>
<artifactId>jdeb</artifactId>
<version>1.0.1</version>
diff --git a/ambari-views/pom.xml b/ambari-views/pom.xml
index fad949ac16..cecf8a1fa0 100644
--- a/ambari-views/pom.xml
+++ b/ambari-views/pom.xml
@@ -133,6 +133,9 @@
<excludes>
<exclude>**/*.json</exclude>
</excludes>
+ <excludes>
+ <exclude>**/*.iml</exclude>
+ </excludes>
</configuration>
<executions>
<execution>
diff --git a/ambari-web/app/app.js b/ambari-web/app/app.js
index 77bb21e55f..20ad4ac195 100644
--- a/ambari-web/app/app.js
+++ b/ambari-web/app/app.js
@@ -72,14 +72,14 @@ module.exports = Em.Application.create({
currentStackVersion: '',
currentStackName: function() {
return Em.get((this.get('currentStackVersion') || this.get('defaultStackVersion')).match(/(.+)-\d.+/), '1');
- }.property('currentStackVersion'),
+ }.property('currentStackVersion', 'defaultStackVersion'),
allHostNames: [],
currentStackVersionNumber: function () {
var regExp = new RegExp(this.get('currentStackName') + '-');
return (this.get('currentStackVersion') || this.get('defaultStackVersion')).replace(regExp, '');
- }.property('currentStackVersion', 'currentStackName'),
+ }.property('currentStackVersion', 'defaultStackVersion', 'currentStackName'),
isHadoop2Stack: function () {
var result = true;
@@ -96,6 +96,10 @@ module.exports = Em.Application.create({
return (stringUtils.compareVersions(this.get('currentStackVersionNumber'), "2.2") > -1);
}.property('currentStackVersionNumber'),
+ isHadoopWindowsStack: function() {
+ return this.get('currentStackName') == "HDPWIN";
+ }.property('currentStackName'),
+
/**
* If NameNode High Availability is enabled
* Based on <code>clusterStatus.isInstalled</code>, stack version, <code>SNameNode</code> availability
@@ -155,6 +159,18 @@ module.exports = Em.Application.create({
return App.StackService.find().filterProperty('isMonitoringService').mapProperty('serviceName');
}.property('App.router.clusterController.isLoaded'),
+ hostMetrics: function () {
+ return App.StackService.find().filterProperty('isHostMetricsService').mapProperty('serviceName');
+ }.property('App.router.clusterController.isLoaded'),
+
+ serviceMetrics: function () {
+ return App.StackService.find().filterProperty('isServiceMetricsService').mapProperty('serviceName');
+ }.property('App.router.clusterController.isLoaded'),
+
+ alerting: function () {
+ return App.StackService.find().filterProperty('isAlertingService').mapProperty('serviceName');
+ }.property('App.router.clusterController.isLoaded'),
+
supportsServiceCheck: function() {
return App.StackService.find().filterProperty('serviceCheckSupported').mapProperty('serviceName');
}.property('App.router.clusterController.isLoaded')
diff --git a/ambari-web/app/assets/data/configuration/cluster_env_site.json b/ambari-web/app/assets/data/configuration/cluster_env_site.json
new file mode 100644
index 0000000000..292b0a1144
--- /dev/null
+++ b/ambari-web/app/assets/data/configuration/cluster_env_site.json
@@ -0,0 +1,14 @@
+{
+ "href" : "http://c6401.ambari.apache.org:8080/api/v1/clusters/c/configurations?type=cluster-env",
+ "items" : [
+ {
+ "href" : "http://c6401.ambari.apache.org:8080/api/v1/clusters/c/configurations?type=cluster-env&tag=version1",
+ "tag" : "version1",
+ "type" : "cluster-env",
+ "version" : 1,
+ "Config" : {
+ "cluster_name" : "c"
+ }
+ }
+]
+}
diff --git a/ambari-web/app/config.js b/ambari-web/app/config.js
index e6f08f286f..aa692d0db3 100644
--- a/ambari-web/app/config.js
+++ b/ambari-web/app/config.js
@@ -28,6 +28,8 @@ App.testEnableSecurity = true; // By default enable security is tested; turning
App.testNameNodeHA = true;
App.apiPrefix = '/api/v1';
App.defaultStackVersion = 'HDP-2.2';
+App.defaultWindowsStackVersion = 'HDPWIN-2.1';
+
App.defaultJavaHome = '/usr/jdk/jdk1.6.0_31';
App.timeout = 180000; // default AJAX timeout
App.maxRetries = 3; // max number of retries for certain AJAX calls
diff --git a/ambari-web/app/controllers/global/cluster_controller.js b/ambari-web/app/controllers/global/cluster_controller.js
index f72d0cd671..bd033948e3 100644
--- a/ambari-web/app/controllers/global/cluster_controller.js
+++ b/ambari-web/app/controllers/global/cluster_controller.js
@@ -97,6 +97,7 @@ App.ClusterController = Em.Controller.extend({
var dfd = $.Deferred();
if (App.get('clusterName') && !reload) {
+ App.set('clusterName', this.get('clusterName'));
dfd.resolve();
} else {
App.ajax.send({
diff --git a/ambari-web/app/controllers/installer.js b/ambari-web/app/controllers/installer.js
index 1fd57b3286..73cfb6227d 100644
--- a/ambari-web/app/controllers/installer.js
+++ b/ambari-web/app/controllers/installer.js
@@ -102,10 +102,6 @@ App.InstallerController = App.WizardController.extend({
return jQuery.extend({}, this.get('clusterStatusTemplate'));
},
- getInstallOptions: function () {
- return jQuery.extend({}, this.get('installOptionsTemplate'));
- },
-
getHosts: function () {
return [];
},
diff --git a/ambari-web/app/controllers/main/admin/serviceAccounts_controller.js b/ambari-web/app/controllers/main/admin/serviceAccounts_controller.js
index 0551b2c77a..94221aaacd 100644
--- a/ambari-web/app/controllers/main/admin/serviceAccounts_controller.js
+++ b/ambari-web/app/controllers/main/admin/serviceAccounts_controller.js
@@ -58,11 +58,11 @@ App.MainAdminServiceAccountsController = App.MainServiceInfoConfigsController.ex
App.router.get('configurationController').getConfigsByTags(this.get('serviceConfigTags')).done(function (configGroups) {
var configSet = App.config.mergePreDefinedWithLoaded(configGroups, [], self.get('serviceConfigTags'), serviceName);
- var misc_configs = configSet.configs.filterProperty('serviceName', self.get('selectedService')).filterProperty('category', 'Users and Groups').filterProperty('isVisible', true);
+ var misc_configs = configSet.configs.filterProperty('serviceName', self.get('selectedService')).filterProperty('category', 'Users and Groups').filterProperty('isVisible', true).rejectProperty('displayType', 'password');
misc_configs = App.config.miscConfigVisibleProperty(misc_configs, installedServices);
- var sortOrder = self.get('configs').filterProperty('serviceName', self.get('selectedService')).filterProperty('category', 'Users and Groups').filterProperty('isVisible', true).mapProperty('name');
+ var sortOrder = self.get('configs').filterProperty('serviceName', self.get('selectedService')).filterProperty('category', 'Users and Groups').filterProperty('isVisible', true).rejectProperty('displayType', 'password').mapProperty('name');
self.setProxyUserGroupLabel(misc_configs);
diff --git a/ambari-web/app/controllers/main/charts/heatmap.js b/ambari-web/app/controllers/main/charts/heatmap.js
index 8010ead276..324be13749 100644
--- a/ambari-web/app/controllers/main/charts/heatmap.js
+++ b/ambari-web/app/controllers/main/charts/heatmap.js
@@ -33,20 +33,25 @@ App.MainChartsHeatmapController = Em.Controller.extend({
}.property('modelRacks.@each.isLoaded'),
allMetrics: function () {
- var metrics = [
- Em.Object.create({
- label: Em.I18n.t('charts.heatmap.category.host'),
- category: 'host',
- items: [
- App.MainChartHeatmapDiskSpaceUsedMetric.create(),
- App.MainChartHeatmapMemoryUsedMetric.create(),
- App.MainChartHeatmapCpuWaitIOMetric.create()
- /*, App.MainChartHeatmapProcessRunMetric.create()*/
- ]
- })
- ];
-
- if (App.HDFSService.find().get('length')) {
+ var metrics = [];
+
+ // Display host heatmaps if the stack definition has a host metrics service to display it.
+ if(App.get('services.hostMetrics').length) {
+ metrics.push(
+ Em.Object.create({
+ label: Em.I18n.t('charts.heatmap.category.host'),
+ category: 'host',
+ items: [
+ App.MainChartHeatmapDiskSpaceUsedMetric.create(),
+ App.MainChartHeatmapMemoryUsedMetric.create(),
+ App.MainChartHeatmapCpuWaitIOMetric.create()
+ /*, App.MainChartHeatmapProcessRunMetric.create()*/
+ ]
+ })
+ );
+ }
+
+ if(App.HDFSService.find().get('length')) {
metrics.push(
Em.Object.create({
label: Em.I18n.t('charts.heatmap.category.hdfs'),
diff --git a/ambari-web/app/controllers/main/host/add_controller.js b/ambari-web/app/controllers/main/host/add_controller.js
index 001175a356..9945aa304d 100644
--- a/ambari-web/app/controllers/main/host/add_controller.js
+++ b/ambari-web/app/controllers/main/host/add_controller.js
@@ -79,14 +79,6 @@ App.AddHostController = App.WizardController.extend({
},
/**
- * return new object extended from installOptionsTemplate
- * @return Object
- */
- getInstallOptions: function () {
- return jQuery.extend({}, this.get('installOptionsTemplate'));
- },
-
- /**
* Remove host from model. Used at <code>Confirm hosts</code> step
* @param hosts Array of hosts, which we want to delete
*/
diff --git a/ambari-web/app/controllers/main/service/info/configs.js b/ambari-web/app/controllers/main/service/info/configs.js
index 6ed0d72e5d..933308488f 100644
--- a/ambari-web/app/controllers/main/service/info/configs.js
+++ b/ambari-web/app/controllers/main/service/info/configs.js
@@ -58,6 +58,9 @@ App.MainServiceInfoConfigsController = Em.Controller.extend(App.ServerValidatorM
// note passed on configs save
serviceConfigVersionNote: '',
versionLoaded: false,
+ // current cluster-env version
+ clusterEnvTagVersion: '',
+
isCurrentSelected: function () {
return App.ServiceConfigVersion.find(this.get('content.serviceName') + "_" + this.get('selectedVersion')).get('isCurrent');
}.property('selectedVersion', 'content.serviceName'),
@@ -253,6 +256,26 @@ App.MainServiceInfoConfigsController = Em.Controller.extend(App.ServerValidatorM
loadStep: function () {
console.log("TRACE: Loading configure for service");
this.clearStep();
+ this.loadClusterEnvSite();
+ },
+
+ /**
+ * load all tag versions of cluster-env site
+ * @returns {$.ajax}
+ */
+ loadClusterEnvSite: function () {
+ var self = this;
+ return App.ajax.send({
+ name: 'config.cluster_env_site',
+ sender: self,
+ success: 'loadClusterEnvSiteSuccess'
+ });
+ },
+
+ loadClusterEnvSiteSuccess: function (data) {
+ // find the latest tag version
+ var maxVersion = Math.max.apply(this, data.items.mapProperty('version'));
+ this.set('clusterEnvTagVersion', data.items.findProperty('version', maxVersion).tag);
this.loadServiceConfigs();
},
@@ -407,6 +430,10 @@ App.MainServiceInfoConfigsController = Em.Controller.extend(App.ServerValidatorM
}, this);
App.router.get('configurationController').saveToDB(configurations);
+
+ // add cluster-env tag
+ siteToTagMap['cluster-env'] = this.get('clusterEnvTagVersion');
+
this.loadedClusterSiteToTagMap = siteToTagMap;
this.set('selectedVersion', selectedVersion);
//reset map if selected current version of default group
@@ -822,14 +849,87 @@ App.MainServiceInfoConfigsController = Em.Controller.extend(App.ServerValidatorM
},
checkDatabaseProperties: function (serviceConfig) {
- if (!['OOZIE', 'HIVE'].contains(this.get('content.serviceName'))) return;
- var configsToHide = ['oozie_hostname', 'hive_hostname'];
- configsToHide.forEach(function (configName) {
- var property = serviceConfig.configs.findProperty('name', configName);
- if (property) property.set('isVisible', false);
- });
+ this.hideSinkDatabaseProperties(serviceConfig.configs);
+ this.hideHiveDatabaseProperties(serviceConfig.configs);
+ this.hideOozieDatabaseProperties(serviceConfig.configs);
+ },
+
+ hideSinkDatabaseProperties: function (configs) {
+ if (!['HDFS'].contains(this.get('content.serviceName'))) return;
+ var property = configs.findProperty('name', 'sink.dbservername');
+ if (property) property.set('isVisible', false);
+ var hadoop_user_property = configs.findProperty('name', 'hadoop.user.name');
+ if(hadoop_user_property)
+ {
+ hadoop_user_property.setProperties({
+ isVisible: false,
+ isRequired: false
+ });
+ }
+
+ var hadoop_password_property = configs.findProperty('name', 'hadoop.user.password');
+ if(hadoop_password_property)
+ {
+ hadoop_password_property.setProperties({
+ isVisible: false,
+ isRequired: false
+ });
+ }
+
+ if (configs.someProperty('name', 'sink_database')) {
+ var sinkDb = configs.findProperty('name', 'sink_database');
+ if (sinkDb.value === 'Existing MSSQL Server database with integrated authentication') {
+ configs.findProperty('name', 'sink.dblogin').setProperties({
+ isVisible: false,
+ isRequired: false
+ });
+ configs.findProperty('name', 'sink.dbpassword').setProperties({
+ isVisible: false,
+ isRequired: false
+ });
+ }
+ }
},
+ hideHiveDatabaseProperties: function (configs) {
+ if (!['HIVE'].contains(this.get('content.serviceName'))) return;
+ var property = configs.findProperty('name', 'hive_hostname');
+ if (property) property.set('isVisible', false);
+
+ if (configs.someProperty('name', 'hive_database')) {
+ var hiveDb = configs.findProperty('name', 'hive_database');
+ if (hiveDb.value === 'Existing MSSQL Server database with integrated authentication') {
+ configs.findProperty('name', 'javax.jdo.option.ConnectionUserName').setProperties({
+ isVisible: false,
+ isRequired: false
+ });
+ configs.findProperty('name', 'javax.jdo.option.ConnectionPassword').setProperties({
+ isVisible: false,
+ isRequired: false
+ });
+ }
+ }
+ },
+
+ hideOozieDatabaseProperties: function (configs) {
+ if (!['OOZIE'].contains(this.get('content.serviceName'))) return;
+ var property = configs.findProperty('name', 'oozie_hostname');
+ if (property) property.set('isVisible', false);
+
+ if (configs.someProperty('name', 'oozie_database')) {
+ var oozieDb = configs.findProperty('name', 'oozie_database');
+ if (oozieDb.value === 'Existing MSSQL Server database with integrated authentication') {
+ configs.findProperty('name', 'oozie.service.JPAService.jdbc.username').setProperties({
+ isVisible: false,
+ isRequired: false
+ });
+ configs.findProperty('name', 'oozie.service.JPAService.jdbc.password').setProperties({
+ isVisible: false,
+ isRequired: false
+ });
+ }
+ }
+ },
onLoadOverrides: function (allConfigs) {
var serviceName = this.get('content.serviceName');
@@ -1462,11 +1562,44 @@ App.MainServiceInfoConfigsController = Em.Controller.extend(App.ServerValidatorM
this.set('saveConfigsFlag', true);
this.onDoPUTClusterConfigurations();
},
+
+/**
+ * set sink hostnames in configs
+ * @param configs
+ */
+ setSinkHostName: function (configs) {
+ var dbHostPropertyName = null;
+ if (configs.someProperty('name', 'sink_database')) {
+ var sinkDb = configs.findProperty('name', 'sink_database');
+ if (sinkDb.value === 'Existing MSSQL Server database with integrated authentication') {
+ var existingMSSQLServerHost = configs.findProperty('name', 'sink_existing_mssql_server_host');
+ if (existingMSSQLServerHost) {
+ dbHostPropertyName = 'sink_existing_mssql_server_host';
+ }
+ configs = configs.without(configs.findProperty('name', 'sink_existing_mssql_server_2_database'));
+ configs = configs.without(configs.findProperty('name', 'sink_existing_mssql_server_2_host'));
+ } else if (sinkDb.value === 'Existing MSSQL Server database with sql auth') {
+ var existingMSSQL2ServerHost = configs.findProperty('name', 'sink_existing_mssql_server_2_host');
+ if (existingMSSQL2ServerHost) {
+ dbHostPropertyName = 'sink_existing_mssql_server_2_host';
+ }
+ configs = configs.without(configs.findProperty('name', 'sink_existing_mssql_server_database'));
+ configs = configs.without(configs.findProperty('name', 'sink_existing_mssql_server_host'));
+ }
+ }
+ if (dbHostPropertyName) {
+ var sinkHostNameProperty = App.ServiceConfigProperty.create(App.config.get('preDefinedSiteProperties').findProperty('name', 'sink.dbservername'));
+ sinkHostNameProperty.set('value', configs.findProperty('name', dbHostPropertyName).get('value'));
+ configs.pushObject(sinkHostNameProperty);
+ }
+ },
+
/**
* set hive hostnames in configs
* @param configs
*/
setHiveHostName: function (configs) {
+ var dbHostPropertyName = null;
if (configs.someProperty('name', 'hive_database')) {
var hiveDb = configs.findProperty('name', 'hive_database');
if (hiveDb.value === 'New MySQL Database' || hiveDb.value === 'New PostgreSQL Database') {
@@ -1480,10 +1613,14 @@ App.MainServiceInfoConfigsController = Em.Controller.extend(App.ServerValidatorM
configs = configs.without(configs.findProperty('name', 'hive_existing_oracle_database'));
configs = configs.without(configs.findProperty('name', 'hive_existing_postgresql_host'));
configs = configs.without(configs.findProperty('name', 'hive_existing_postgresql_database'));
+ configs = configs.without(configs.findProperty('name', 'hive_existing_mssql_server_database'));
+ configs = configs.without(configs.findProperty('name', 'hive_existing_mssql_server_host'));
+ configs = configs.without(configs.findProperty('name', 'hive_existing_mssql_server_2_database'));
+ configs = configs.without(configs.findProperty('name', 'hive_existing_mssql_server_2_host'));
} else if (hiveDb.value === 'Existing MySQL Database') {
var existingMySqlHost = configs.findProperty('name', 'hive_existing_mysql_host');
if (existingMySqlHost) {
- existingMySqlHost.name = 'hive_hostname';
+ dbHostPropertyName = 'hive_existing_mysql_host';
}
configs = configs.without(configs.findProperty('name', 'hive_ambari_host'));
configs = configs.without(configs.findProperty('name', 'hive_ambari_database'));
@@ -1491,10 +1628,14 @@ App.MainServiceInfoConfigsController = Em.Controller.extend(App.ServerValidatorM
configs = configs.without(configs.findProperty('name', 'hive_existing_oracle_database'));
configs = configs.without(configs.findProperty('name', 'hive_existing_postgresql_host'));
configs = configs.without(configs.findProperty('name', 'hive_existing_postgresql_database'));
+ configs = configs.without(configs.findProperty('name', 'hive_existing_mssql_server_database'));
+ configs = configs.without(configs.findProperty('name', 'hive_existing_mssql_server_host'));
+ configs = configs.without(configs.findProperty('name', 'hive_existing_mssql_server_2_database'));
+ configs = configs.without(configs.findProperty('name', 'hive_existing_mssql_server_2_host'));
} else if (hiveDb.value === Em.I18n.t('services.service.config.hive.oozie.postgresql')) {
var existingPostgreSqlHost = configs.findProperty('name', 'hive_existing_postgresql_host');
if (existingPostgreSqlHost) {
- existingPostgreSqlHost.name = 'hive_hostname';
+ dbHostPropertyName = 'hive_existing_postgresql_host';
}
configs = configs.without(configs.findProperty('name', 'hive_ambari_host'));
configs = configs.without(configs.findProperty('name', 'hive_ambari_database'));
@@ -1502,11 +1643,29 @@ App.MainServiceInfoConfigsController = Em.Controller.extend(App.ServerValidatorM
configs = configs.without(configs.findProperty('name', 'hive_existing_mysql_database'));
configs = configs.without(configs.findProperty('name', 'hive_existing_oracle_host'));
configs = configs.without(configs.findProperty('name', 'hive_existing_oracle_database'));
- }
- else { //existing oracle database
+ configs = configs.without(configs.findProperty('name', 'hive_existing_mssql_server_database'));
+ configs = configs.without(configs.findProperty('name', 'hive_existing_mssql_server_host'));
+ configs = configs.without(configs.findProperty('name', 'hive_existing_mssql_server_2_database'));
+ configs = configs.without(configs.findProperty('name', 'hive_existing_mssql_server_2_host'));
+ } else if (hiveDb.value === 'Existing Oracle Database') {
var existingOracleHost = configs.findProperty('name', 'hive_existing_oracle_host');
if (existingOracleHost) {
- existingOracleHost.name = 'hive_hostname';
+ dbHostPropertyName = 'hive_existing_oracle_host';
+ }
+ configs = configs.without(configs.findProperty('name', 'hive_ambari_host'));
+ configs = configs.without(configs.findProperty('name', 'hive_ambari_database'));
+ configs = configs.without(configs.findProperty('name', 'hive_existing_mysql_host'));
+ configs = configs.without(configs.findProperty('name', 'hive_existing_mysql_database'));
+ configs = configs.without(configs.findProperty('name', 'hive_existing_postgresql_host'));
+ configs = configs.without(configs.findProperty('name', 'hive_existing_postgresql_database'));
+ configs = configs.without(configs.findProperty('name', 'hive_existing_mssql_server_database'));
+ configs = configs.without(configs.findProperty('name', 'hive_existing_mssql_server_host'));
+ configs = configs.without(configs.findProperty('name', 'hive_existing_mssql_server_2_database'));
+ configs = configs.without(configs.findProperty('name', 'hive_existing_mssql_server_2_host'));
+ } else if (hiveDb.value === 'Existing MSSQL Server database with integrated authentication') {
+ var existingMSSQLServerHost = configs.findProperty('name', 'hive_existing_mssql_server_host');
+ if (existingMSSQLServerHost) {
+ dbHostPropertyName = 'hive_existing_mssql_server_host';
}
configs = configs.without(configs.findProperty('name', 'hive_ambari_host'));
configs = configs.without(configs.findProperty('name', 'hive_ambari_database'));
@@ -1514,8 +1673,31 @@ App.MainServiceInfoConfigsController = Em.Controller.extend(App.ServerValidatorM
configs = configs.without(configs.findProperty('name', 'hive_existing_mysql_database'));
configs = configs.without(configs.findProperty('name', 'hive_existing_postgresql_host'));
configs = configs.without(configs.findProperty('name', 'hive_existing_postgresql_database'));
+ configs = configs.without(configs.findProperty('name', 'hive_existing_oracle_host'));
+ configs = configs.without(configs.findProperty('name', 'hive_existing_oracle_database'));
+ configs = configs.without(configs.findProperty('name', 'hive_existing_mssql_server_2_database'));
+ configs = configs.without(configs.findProperty('name', 'hive_existing_mssql_server_2_host'));
+ } else if (hiveDb.value === 'Existing MSSQL Server database with sql auth') {
+ var existingMSSQL2ServerHost = configs.findProperty('name', 'hive_existing_mssql_server_2_host');
+ if (existingMSSQL2ServerHost) {
+ dbHostPropertyName = 'hive_existing_mssql_server_2_host';
+ }
+ configs = configs.without(configs.findProperty('name', 'hive_ambari_host'));
+ configs = configs.without(configs.findProperty('name', 'hive_ambari_database'));
+ configs = configs.without(configs.findProperty('name', 'hive_existing_mysql_host'));
+ configs = configs.without(configs.findProperty('name', 'hive_existing_mysql_database'));
+ configs = configs.without(configs.findProperty('name', 'hive_existing_postgresql_host'));
+ configs = configs.without(configs.findProperty('name', 'hive_existing_postgresql_database'));
+ configs = configs.without(configs.findProperty('name', 'hive_existing_oracle_host'));
+ configs = configs.without(configs.findProperty('name', 'hive_existing_oracle_database'));
+ configs = configs.without(configs.findProperty('name', 'hive_existing_mssql_server_database'));
+ configs = configs.without(configs.findProperty('name', 'hive_existing_mssql_server_host'));
}
-
+ }
+ if (dbHostPropertyName) {
+ var hiveHostNameProperty = App.ServiceConfigProperty.create(App.config.get('preDefinedSiteProperties').findProperty('name', 'hive_hostname'));
+ hiveHostNameProperty.set('value', configs.findProperty('name', dbHostPropertyName).get('value'));
+ configs.pushObject(hiveHostNameProperty);
}
},
@@ -1536,6 +1718,10 @@ App.MainServiceInfoConfigsController = Em.Controller.extend(App.ServerValidatorM
configs = configs.without(configs.findProperty('name', 'oozie_existing_oracle_database'));
configs = configs.without(configs.findProperty('name', 'oozie_existing_postgresql_host'));
configs = configs.without(configs.findProperty('name', 'oozie_existing_postgresql_database'));
+ configs = configs.without(configs.findProperty('name', 'oozie_existing_mssql_server_database'));
+ configs = configs.without(configs.findProperty('name', 'oozie_existing_mssql_server_host'));
+ configs = configs.without(configs.findProperty('name', 'oozie_existing_mssql_server_2_database'));
+ configs = configs.without(configs.findProperty('name', 'oozie_existing_mssql_server_2_host'));
} else if (oozieDb.value === 'New MySQL Database') {
var ambariHost = configs.findProperty('name', 'oozie_ambari_host');
if (ambariHost) {
@@ -1548,6 +1734,10 @@ App.MainServiceInfoConfigsController = Em.Controller.extend(App.ServerValidatorM
configs = configs.without(configs.findProperty('name', 'oozie_derby_database'));
configs = configs.without(configs.findProperty('name', 'oozie_existing_postgresql_host'));
configs = configs.without(configs.findProperty('name', 'oozie_existing_postgresql_database'));
+ configs = configs.without(configs.findProperty('name', 'oozie_existing_mssql_server_database'));
+ configs = configs.without(configs.findProperty('name', 'oozie_existing_mssql_server_host'));
+ configs = configs.without(configs.findProperty('name', 'oozie_existing_mssql_server_2_database'));
+ configs = configs.without(configs.findProperty('name', 'oozie_existing_mssql_server_2_host'));
} else if (oozieDb.value === 'Existing MySQL Database') {
var existingMySqlHost = configs.findProperty('name', 'oozie_existing_mysql_host');
@@ -1561,8 +1751,14 @@ App.MainServiceInfoConfigsController = Em.Controller.extend(App.ServerValidatorM
configs = configs.without(configs.findProperty('name', 'oozie_derby_database'));
configs = configs.without(configs.findProperty('name', 'oozie_existing_postgresql_host'));
configs = configs.without(configs.findProperty('name', 'oozie_existing_postgresql_database'));
+ configs = configs.without(configs.findProperty('name', 'oozie_existing_mssql_server_database'));
+ configs = configs.without(configs.findProperty('name', 'oozie_existing_mssql_server_host'));
+ configs = configs.without(configs.findProperty('name', 'oozie_existing_mssql_server_2_database'));
+ configs = configs.without(configs.findProperty('name', 'oozie_existing_mssql_server_2_host'));
} else if (oozieDb.value === Em.I18n.t('services.service.config.hive.oozie.postgresql')) {
var existingPostgreSqlHost = configs.findProperty('name', 'oozie_existing_postgresql_host');
+
+
if (existingPostgreSqlHost) {
dbHostPropertyName = 'oozie_existing_postgresql_host';
}
@@ -1572,8 +1768,12 @@ App.MainServiceInfoConfigsController = Em.Controller.extend(App.ServerValidatorM
configs = configs.without(configs.findProperty('name', 'oozie_existing_mysql_database'));
configs = configs.without(configs.findProperty('name', 'oozie_existing_oracle_host'));
configs = configs.without(configs.findProperty('name', 'oozie_existing_oracle_database'));
+ configs = configs.without(configs.findProperty('name', 'oozie_existing_mssql_server_database'));
+ configs = configs.without(configs.findProperty('name', 'oozie_existing_mssql_server_host'));
+ configs = configs.without(configs.findProperty('name', 'oozie_existing_mssql_server_2_database'));
+ configs = configs.without(configs.findProperty('name', 'oozie_existing_mssql_server_2_host'));
}
- else { //existing oracle database
+ else if (oozieDb.value === 'Existing Oracle Database') {
var existingOracleHost = configs.findProperty('name', 'oozie_existing_oracle_host');
if (existingOracleHost) {
dbHostPropertyName = 'oozie_existing_oracle_host';
@@ -1583,8 +1783,43 @@ App.MainServiceInfoConfigsController = Em.Controller.extend(App.ServerValidatorM
configs = configs.without(configs.findProperty('name', 'oozie_existing_mysql_host'));
configs = configs.without(configs.findProperty('name', 'oozie_existing_mysql_database'));
configs = configs.without(configs.findProperty('name', 'oozie_derby_database'));
+ configs = configs.without(configs.findProperty('name', 'oozie_existing_mssql_server_database'));
+ configs = configs.without(configs.findProperty('name', 'oozie_existing_mssql_server_host'));
+ configs = configs.without(configs.findProperty('name', 'oozie_existing_mssql_server_2_database'));
+ configs = configs.without(configs.findProperty('name', 'oozie_existing_mssql_server_2_host'));
+ } else if (oozieDb.value === 'Existing MSSQL Server database with integrated authentication') {
+ var existingMySqlServerHost = configs.findProperty('name', 'oozie_existing_mssql_server_host');
+ if (existingMySqlServerHost) {
+ dbHostPropertyName = 'oozie_existing_mssql_server_host';
+ }
+ configs = configs.without(configs.findProperty('name', 'oozie_ambari_host'));
+ configs = configs.without(configs.findProperty('name', 'oozie_ambari_database'));
+ configs = configs.without(configs.findProperty('name', 'oozie_existing_oracle_host'));
+ configs = configs.without(configs.findProperty('name', 'oozie_existing_oracle_database'));
+ configs = configs.without(configs.findProperty('name', 'oozie_derby_database'));
+ configs = configs.without(configs.findProperty('name', 'oozie_existing_postgresql_host'));
+ configs = configs.without(configs.findProperty('name', 'oozie_existing_postgresql_database'));
+ configs = configs.without(configs.findProperty('name', 'oozie_existing_mysql_host'));
+ configs = configs.without(configs.findProperty('name', 'oozie_existing_mysql_database'));
+ configs = configs.without(configs.findProperty('name', 'oozie_existing_mssql_server_2_database'));
+ configs = configs.without(configs.findProperty('name', 'oozie_existing_mssql_server_2_host'));
+ } else if (oozieDb.value === 'Existing MSSQL Server database with sql auth') {
+ var existingMySql2ServerHost = configs.findProperty('name', 'oozie_existing_mssql_server_2_host');
+ if (existingMySql2ServerHost) {
+ dbHostPropertyName = 'oozie_existing_mssql_server_2_host';
+ }
+ configs = configs.without(configs.findProperty('name', 'oozie_ambari_host'));
+ configs = configs.without(configs.findProperty('name', 'oozie_ambari_database'));
+ configs = configs.without(configs.findProperty('name', 'oozie_existing_oracle_host'));
+ configs = configs.without(configs.findProperty('name', 'oozie_existing_oracle_database'));
+ configs = configs.without(configs.findProperty('name', 'oozie_derby_database'));
+ configs = configs.without(configs.findProperty('name', 'oozie_existing_postgresql_host'));
+ configs = configs.without(configs.findProperty('name', 'oozie_existing_postgresql_database'));
+ configs = configs.without(configs.findProperty('name', 'oozie_existing_mysql_host'));
+ configs = configs.without(configs.findProperty('name', 'oozie_existing_mysql_database'));
+ configs = configs.without(configs.findProperty('name', 'oozie_existing_mssql_server_database'));
+ configs = configs.without(configs.findProperty('name', 'oozie_existing_mssql_server_host'));
}
-
}
if (dbHostPropertyName) {
@@ -1602,6 +1837,7 @@ App.MainServiceInfoConfigsController = Em.Controller.extend(App.ServerValidatorM
//storedConfigs contains custom configs as well
this.setHiveHostName(configs);
this.setOozieHostName(configs);
+ this.setSinkHostName(configs);
this.formatConfigValues(configs);
var mappedConfigs = App.config.excludeUnsupportedConfigs(this.get('configMapping').all(), App.Service.find().mapProperty('serviceName'));
var allUiConfigs = this.loadUiSideConfigs(mappedConfigs);
@@ -2158,16 +2394,24 @@ App.MainServiceInfoConfigsController = Em.Controller.extend(App.ServerValidatorM
if (serviceName === 'HIVE') {
var hiveDb = configs.findProperty('name', 'hive_database').value;
- if (['Existing MySQL Database', 'Existing Oracle Database', 'Existing PostgreSQL Database'].contains(hiveDb)) {
+ if (['Existing MySQL Database', 'Existing Oracle Database', 'Existing PostgreSQL Database', 'Existing MSSQL Server database with integrated authentication', 'Existing MSSQL Server database with sql auth'].contains(hiveDb)) {
configs.findProperty('name', 'hive_hostname').isVisible = true;
}
}
if (serviceName === 'OOZIE') {
var oozieDb = configs.findProperty('name', 'oozie_database').value;
- if (['Existing MySQL Database', 'Existing Oracle Database', 'Existing PostgreSQL Database'].contains(oozieDb)) {
+ if (['Existing MySQL Database', 'Existing Oracle Database', 'Existing PostgreSQL Database', 'Existing MSSQL Server database with integrated authentication', 'Existing MSSQL Server database with sql auth'].contains(oozieDb)) {
configs.findProperty('name', 'oozie_hostname').isVisible = true;
}
}
+ if(App.get('isHadoopWindowsStack')) {
+ if (serviceName === 'HDFS') {
+ var sinkDB = configs.findProperty('name', 'sink_database').value;
+ if (['Existing MSSQL Server database with integrated authentication', 'Existing MSSQL Server database with sql auth'].contains(sinkDB)) {
+ configs.findProperty('name', 'sink.dbservername').isVisible = true;
+ }
+ }
+ }
},
/**
* set host name(s) property for component
diff --git a/ambari-web/app/controllers/wizard.js b/ambari-web/app/controllers/wizard.js
index d35d357a96..94ac83dd44 100644
--- a/ambari-web/app/controllers/wizard.js
+++ b/ambari-web/app/controllers/wizard.js
@@ -281,7 +281,7 @@ App.WizardController = Em.Controller.extend(App.LocalStorage, {
* Remove all data for installOptions step
*/
clearInstallOptions: function () {
- var installOptions = jQuery.extend({}, this.get('installOptionsTemplate'));
+ var installOptions = this.get('getInstallOptions');
this.set('content.installOptions', installOptions);
this.setDBProperty('installOptions', installOptions);
this.set('content.hosts', {});
@@ -503,6 +503,10 @@ App.WizardController = Em.Controller.extend(App.LocalStorage, {
}, this);
},
+ getInstallOptions: function() {
+ return jQuery.extend({}, App.get('isHadoopWindowsStack')? this.get('installWindowsOptionsTemplate') : this.get('installOptionsTemplate'));
+ }.property('App.isHadoopWindowsStack'),
+
installOptionsTemplate: {
hostNames: "", //string
manualInstall: false, //true, false
@@ -514,6 +518,17 @@ App.WizardController = Em.Controller.extend(App.LocalStorage, {
sshUser: "root" //string
},
+ installWindowsOptionsTemplate: {
+ hostNames: "", //string
+ manualInstall: true, //true, false
+ useSsh: false, //bool
+ javaHome: App.defaultJavaHome, //string
+ localRepo: false, //true, false
+ sshKey: "", //string
+ bootRequestId: null, //string
+ sshUser: "" //string
+ },
+
loadedServiceComponents: null,
/**
diff --git a/ambari-web/app/controllers/wizard/step2_controller.js b/ambari-web/app/controllers/wizard/step2_controller.js
index 08dcd12d23..5c38a9a99e 100644
--- a/ambari-web/app/controllers/wizard/step2_controller.js
+++ b/ambari-web/app/controllers/wizard/step2_controller.js
@@ -113,6 +113,10 @@ App.WizardStep2Controller = Em.Controller.extend({
*/
hostsError: null,
+ isSSHRegistrationEnabled: function () {
+ return !App.get('isHadoopWindowsStack');
+ }.property('App.isHadoopWindowsStack'),
+
/**
* Error-message if <code>sshKey</code> is empty, null otherwise
* @type {string|null}
@@ -469,7 +473,7 @@ App.WizardStep2Controller = Em.Controller.extend({
* @method manualInstallWarningPopup
*/
manualInstallWarningPopup: function () {
- if (!this.get('content.installOptions.useSsh')) {
+ if (this.get('isSSHRegistrationEnabled') && !this.get('content.installOptions.useSsh')) {
App.ModalPopup.show({
header: Em.I18n.t('common.warning'),
body: Em.I18n.t('installer.step2.manualInstall.info'),
diff --git a/ambari-web/app/controllers/wizard/step4_controller.js b/ambari-web/app/controllers/wizard/step4_controller.js
index 0199bd472e..3a728548a3 100644
--- a/ambari-web/app/controllers/wizard/step4_controller.js
+++ b/ambari-web/app/controllers/wizard/step4_controller.js
@@ -98,6 +98,16 @@ App.WizardStep4Controller = Em.ArrayController.extend({
},
/**
+ * Check if the stack definition has any monitoring service (alerting, metrics services) that has not been selected
+ * @return {bool}
+ * @method isMonitoringServiceNotSelected
+ */
+ isMonitoringServiceNotSelected: function () {
+ var stackMonitoringServices = this.filterProperty('isMonitoringService',true);
+ return stackMonitoringServices.length && stackMonitoringServices.someProperty('isSelected',false);
+ },
+
+ /**
* Check whether user turned on monitoring service and go to next step
* @method validateMonitoring
*/
@@ -291,7 +301,7 @@ App.WizardStep4Controller = Em.ArrayController.extend({
},this);
}
},this);
-
+
if (missingDependencies.length > 0) {
for(var i = 0; i < missingDependencies.length; i++) {
this.addValidationError({
diff --git a/ambari-web/app/controllers/wizard/step5_controller.js b/ambari-web/app/controllers/wizard/step5_controller.js
index 768b96f661..33afa00bc7 100644
--- a/ambari-web/app/controllers/wizard/step5_controller.js
+++ b/ambari-web/app/controllers/wizard/step5_controller.js
@@ -595,12 +595,12 @@ App.WizardStep5Controller = Em.Controller.extend(App.BlueprintMixin, {
multipleComponentHasBeenAdded[component.name] = true;
savedComponents.forEach(function(saved) {
- resultComponents.push(self.createComponentInstallationObject(fullComponent, host.fqdn, saved));
+ resultComponents.push(self.createComponentInstallationObject(fullComponent, host.fqdn.toLowerCase(), saved));
});
}
} else {
var savedComponent = masterHosts.findProperty('component', component.name);
- resultComponents.push(self.createComponentInstallationObject(fullComponent, host.fqdn, savedComponent));
+ resultComponents.push(self.createComponentInstallationObject(fullComponent, host.fqdn.toLowerCase(), savedComponent));
}
}
}
diff --git a/ambari-web/app/controllers/wizard/step7_controller.js b/ambari-web/app/controllers/wizard/step7_controller.js
index cfe1888be2..5b2a9046cb 100644
--- a/ambari-web/app/controllers/wizard/step7_controller.js
+++ b/ambari-web/app/controllers/wizard/step7_controller.js
@@ -874,21 +874,33 @@ App.WizardStep7Controller = Em.Controller.extend(App.ServerValidatorMixin, {
},
/**
- * Check if Oozie or Hive use existing database then need
+ * Check if Oozie, Hive or MetricsSink use existing database then need
* to restore missed properties
*
* @param {Object[]} configs
**/
setServiceDatabaseConfigs: function (configs) {
var serviceNames = this.get('installedServiceNames').filter(function (serviceName) {
- return ['OOZIE', 'HIVE'].contains(serviceName);
+ return ['OOZIE', 'HIVE', 'HDFS'].contains(serviceName);
});
serviceNames.forEach(function (serviceName) {
- var dbTypeConfig = configs.findProperty('name', serviceName.toLowerCase() + '_database');
+ var propertyPrefix = serviceName.toLowerCase();
+ if(/HDFS/gi.test(serviceName)) propertyPrefix = 'sink';
+ var dbTypeConfig = configs.findProperty('name', propertyPrefix + '_database');
if (!/existing/gi.test(dbTypeConfig.value)) return;
- var dbHostName = serviceName.toLowerCase() + '_hostname';
- var database = dbTypeConfig.value.match(/MySQL|PostgreSQL|Oracle|Derby/gi)[0];
- var existingDBConfig = configs.findProperty('name', serviceName.toLowerCase() + '_existing_' + database.toLowerCase() + '_host');
+ var dbHostName = propertyPrefix + '_hostname';
+ var database = dbTypeConfig.value.match(/MySQL|PostgreSQL|Oracle|Derby|MSSQL/gi)[0];
+ var dbPrefix = database.toLowerCase();
+ if(database.toLowerCase() == 'mssql') {
+ dbHostName = 'sink.dbservername';
+ if(/integrated/gi.test(dbTypeConfig.value)) {
+ dbPrefix = 'mssql_server';
+ } else {
+ dbPrefix = 'mssql_server_2';
+ }
+ }
+ var propertyName = propertyPrefix + '_existing_' + dbPrefix + '_host';
+ var existingDBConfig = configs.findProperty('name', propertyName);
if (!existingDBConfig.value)
existingDBConfig.value = existingDBConfig.defaultValue = configs.findProperty('name', dbHostName).value;
}, this);
diff --git a/ambari-web/app/controllers/wizard/step8_controller.js b/ambari-web/app/controllers/wizard/step8_controller.js
index 0f593e875a..affad0c212 100644
--- a/ambari-web/app/controllers/wizard/step8_controller.js
+++ b/ambari-web/app/controllers/wizard/step8_controller.js
@@ -226,6 +226,38 @@ App.WizardStep8Controller = Em.Controller.extend(App.AddSecurityConfigs, {
},
/**
+ * Remove unused Sink configs
+ * @param {Ember.Enumerable} configs
+ * @returns {Ember.Enumerable}
+ * @method removeSinkConfigs
+ */
+ removeSinkConfigs: function (configs) {
+ var sinkDb = configs.findProperty('name', 'sink_database');
+ var sinkDbType = configs.findProperty('name', 'sink_database_type');
+ if (sinkDbType) {
+ var sink_properties = Em.A([]);
+
+ switch (sinkDb.value) {
+ case 'Existing MSSQL Server database with integrated authentication':
+ configs.findProperty('name', 'sink.dbservername').value = configs.findProperty('name', 'sink_existing_mssql_server_host').value;
+ sinkDbType.value = 'mssql';
+ sink_properties = Em.A(['sink_existing_mssql_server_2_database', 'sink_existing_mssql_server_2_host']);
+ break;
+ case 'Existing MSSQL Server database with sql auth':
+ configs.findProperty('name', 'sink.dbservername').value = configs.findProperty('name', 'sink_existing_mssql_server_2_host').value;
+ sinkDbType.value = 'mssql';
+ sink_properties = Em.A(['sink_existing_mssql_server_database', 'sink_existing_mssql_server_host']);
+ break;
+ }
+
+ sink_properties.forEach(function (property) {
+ configs = configs.without(configs.findProperty('name', property));
+ });
+ }
+ return configs;
+ },
+
+ /**
* Remove unused Hive configs
* @param {Ember.Enumerable} configs
* @returns {Ember.Enumerable}
@@ -244,7 +276,9 @@ App.WizardStep8Controller = Em.Controller.extend(App.AddSecurityConfigs, {
hiveDbType.value = 'mysql';
}
hive_properties = Em.A(['hive_existing_mysql_host', 'hive_existing_mysql_database', 'hive_existing_oracle_host',
- 'hive_existing_oracle_database', 'hive_existing_postgresql_host', 'hive_existing_postgresql_database']);
+ 'hive_existing_oracle_database', 'hive_existing_postgresql_host', 'hive_existing_postgresql_database',
+ 'hive_existing_mssql_server_database', 'hive_existing_mssql_server_host',
+ 'hive_existing_mssql_server_2_database', 'hive_existing_mssql_server_2_host']);
break;
case 'New PostgreSQL Database':
if (configs.someProperty('name', 'hive_ambari_host')) {
@@ -258,19 +292,41 @@ App.WizardStep8Controller = Em.Controller.extend(App.AddSecurityConfigs, {
configs.findProperty('name', 'hive_hostname').value = configs.findProperty('name', 'hive_existing_mysql_host').value;
hiveDbType.value = 'mysql';
hive_properties = Em.A(['hive_ambari_host', 'hive_ambari_database', 'hive_existing_oracle_host',
- 'hive_existing_oracle_database', 'hive_existing_postgresql_host', 'hive_existing_postgresql_database']);
+ 'hive_existing_oracle_database', 'hive_existing_postgresql_host', 'hive_existing_postgresql_database',
+ 'hive_existing_mssql_server_database', 'hive_existing_mssql_server_host',
+ 'hive_existing_mssql_server_2_database', 'hive_existing_mssql_server_2_host']);
break;
case Em.I18n.t('services.service.config.hive.oozie.postgresql'):
configs.findProperty('name', 'hive_hostname').value = configs.findProperty('name', 'hive_existing_postgresql_host').value;
hiveDbType.value = 'postgres';
hive_properties = Em.A(['hive_ambari_host', 'hive_ambari_database', 'hive_existing_oracle_host',
- 'hive_existing_oracle_database', 'hive_existing_mysql_host', 'hive_existing_mysql_database']);
+ 'hive_existing_oracle_database', 'hive_existing_mysql_host', 'hive_existing_mysql_database',
+ 'hive_existing_mssql_server_database', 'hive_existing_mssql_server_host',
+ 'hive_existing_mssql_server_2_database', 'hive_existing_mssql_server_2_host']);
+ break;
+ case 'Existing MSSQL Server database with integrated authentication':
+ configs.findProperty('name', 'hive_hostname').value = configs.findProperty('name', 'hive_existing_mssql_server_2_host').value;
+ hiveDbType.value = 'mssql';
+ hive_properties = Em.A(['hive_ambari_host', 'hive_ambari_database', 'hive_existing_oracle_host',
+ 'hive_existing_oracle_database', 'hive_existing_postgresql_host', 'hive_existing_postgresql_database',
+ 'hive_existing_mysql_host', 'hive_existing_mysql_database', 'hive_existing_mssql_server_database',
+ 'hive_existing_mssql_server_host', 'hive_existing_mssql_server_2_database', 'hive_existing_mssql_server_2_host']);
+ break;
+ case 'Existing MSSQL Server database with sql auth':
+ configs.findProperty('name', 'hive_hostname').value = configs.findProperty('name', 'hive_existing_mssql_server_host').value;
+ hiveDbType.value = 'mssql';
+ hive_properties = Em.A(['hive_ambari_host', 'hive_ambari_database', 'hive_existing_oracle_host',
+ 'hive_existing_oracle_database', 'hive_existing_postgresql_host', 'hive_existing_postgresql_database',
+ 'hive_existing_mysql_host', 'hive_existing_mysql_database', 'hive_existing_mssql_server_database',
+ 'hive_existing_mssql_server_host', 'hive_existing_mssql_server_database', 'hive_existing_mssql_server_host']);
break;
default:
configs.findProperty('name', 'hive_hostname').value = configs.findProperty('name', 'hive_existing_oracle_host').value;
hiveDbType.value = 'oracle';
hive_properties = Em.A(['hive_ambari_host', 'hive_ambari_database', 'hive_existing_mysql_host',
- 'hive_existing_mysql_database', 'hive_existing_postgresql_host', 'hive_existing_postgresql_database']);
+ 'hive_existing_mysql_database', 'hive_existing_postgresql_host', 'hive_existing_postgresql_database',
+ 'hive_existing_mssql_server_database', 'hive_existing_mssql_server_host',
+ 'hive_existing_mssql_server_2_database', 'hive_existing_mssql_server_2_host']);
break;
}
@@ -299,25 +355,51 @@ App.WizardStep8Controller = Em.Controller.extend(App.AddSecurityConfigs, {
oozieDbType.value = 'derby';
oozie_properties = Em.A(['oozie_ambari_host', 'oozie_ambari_database', 'oozie_existing_mysql_host',
'oozie_existing_mysql_database', 'oozie_existing_oracle_host', 'oozie_existing_oracle_database',
- 'oozie_existing_postgresql_host', 'oozie_existing_postgresql_database']);
+ 'oozie_existing_postgresql_host', 'oozie_existing_postgresql_database',
+ 'oozie_existing_mssql_server_database', 'oozie_existing_mssql_server_host',
+ 'oozie_existing_mssql_server_2_database', 'oozie_existing_mssql_server_2_host']);
break;
case 'Existing MySQL Database':
configs.findProperty('name', 'oozie_hostname').value = configs.findProperty('name', 'oozie_existing_mysql_host').value;
oozieDbType.value = 'mysql';
oozie_properties = Em.A(['oozie_ambari_host', 'oozie_ambari_database', 'oozie_existing_oracle_host',
- 'oozie_existing_oracle_database', 'oozie_derby_database', 'oozie_existing_postgresql_host', 'oozie_existing_postgresql_database']);
+ 'oozie_existing_oracle_database', 'oozie_derby_database', 'oozie_existing_postgresql_host', 'oozie_existing_postgresql_database',
+ 'oozie_existing_mssql_server_database', 'oozie_existing_mssql_server_host',
+ 'oozie_existing_mssql_server_2_database', 'oozie_existing_mssql_server_2_host']);
break;
case Em.I18n.t('services.service.config.hive.oozie.postgresql'):
configs.findProperty('name', 'oozie_hostname').value = configs.findProperty('name', 'oozie_existing_postgresql_host').value;
oozieDbType.value = 'postgresql';
oozie_properties = Em.A(['oozie_ambari_host', 'oozie_ambari_database', 'oozie_existing_oracle_host',
- 'oozie_existing_oracle_database', 'oozie_existing_mysql_host', 'oozie_existing_mysql_database']);
+ 'oozie_existing_oracle_database', 'oozie_existing_mysql_host', 'oozie_existing_mysql_database',
+ 'oozie_existing_mssql_server_database', 'oozie_existing_mssql_server_host',
+ 'oozie_existing_mssql_server_2_database', 'oozie_existing_mssql_server_2_host']);
+ break;
+ case 'Existing MSSQL Server database with integrated authentication':
+ configs.findProperty('name', 'oozie_hostname').value = configs.findProperty('name', 'oozie_existing_mysql_host').value;
+ oozieDbType.value = 'mssql';
+ hive_properties = Em.A(['hive_ambari_host', 'hive_ambari_database', 'oozie_existing_oracle_host',
+ 'oozie_existing_oracle_database', 'oozie_existing_postgresql_host', 'oozie_existing_postgresql_database',
+ 'oozie_existing_mysql_host', 'oozie_existing_mysql_database',
+ 'oozie_existing_mssql_server_database', 'oozie_existing_mssql_server_host',
+ 'oozie_existing_mssql_server_2_database', 'oozie_existing_mssql_server_2_host']);
+ break;
+ case 'Existing MSSQL Server database with sql auth':
+ configs.findProperty('name', 'oozie_hostname').value = configs.findProperty('name', 'oozie_existing_mysql_host').value;
+ oozieDbType.value = 'mssql';
+ hive_properties = Em.A(['hive_ambari_host', 'hive_ambari_database', 'oozie_existing_oracle_host',
+ 'oozie_existing_oracle_database', 'oozie_existing_postgresql_host', 'oozie_existing_postgresql_database',
+ 'oozie_existing_mysql_host', 'oozie_existing_mysql_database',
+ 'oozie_existing_mssql_server_database', 'oozie_existing_mssql_server_host',
+ 'oozie_existing_mssql_server_database', 'oozie_existing_mssql_server_host']);
break;
default:
configs.findProperty('name', 'oozie_hostname').value = configs.findProperty('name', 'oozie_existing_oracle_host').value;
oozieDbType.value = 'oracle';
oozie_properties = Em.A(['oozie_ambari_host', 'oozie_ambari_database', 'oozie_existing_mysql_host',
- 'oozie_existing_mysql_database', 'oozie_derby_database', 'oozie_existing_postgresql_host', 'oozie_existing_postgresql_database']);
+ 'oozie_existing_mysql_database', 'oozie_derby_database', 'oozie_existing_postgresql_host',
+ 'oozie_existing_postgresql_database', 'oozie_existing_mssql_server_database', 'oozie_existing_mssql_server_host',
+ 'oozie_existing_mssql_server_2_database', 'oozie_existing_mssql_server_2_host']);
break;
}
oozie_properties.forEach(function (property) {
@@ -337,6 +419,9 @@ App.WizardStep8Controller = Em.Controller.extend(App.AddSecurityConfigs, {
if (configs.someProperty('name', 'hive_database')) {
configs = this.removeHiveConfigs(configs);
}
+ if (configs.someProperty('name', 'sink_database')) {
+ configs = this.removeSinkConfigs(configs);
+ }
if (configs.someProperty('name', 'oozie_database')) {
configs = this.removeOozieConfigs(configs);
}
@@ -579,7 +664,7 @@ App.WizardStep8Controller = Em.Controller.extend(App.AddSecurityConfigs, {
base_url: repo.get('baseUrl'),
os_type: repo.get('osType'),
repo_id: repo.get('repoId')
- }));
+ }));
}, this);
}
}, this);
@@ -719,14 +804,33 @@ App.WizardStep8Controller = Em.Controller.extend(App.AddSecurityConfigs, {
hostsCount + ' ' + Em.I18n.t('installer.step8.hosts'));
},
+/**
+ * Set displayed MetricsSink DB value based on DB type
+ * @param {Ember.Object} dbComponent
+ * @method loadSinkDbValue
+ */
+ loadSinkDbValue: function () {
+ var db, serviceConfigProperties = this.get('wizardController').getDBProperty('serviceConfigProperties'),
+ sinkDb = serviceConfigProperties.findProperty('name', 'sink_database');
+ if (sinkDb.value === 'Existing MSSQL Server database with integrated authentication') {
+ db = serviceConfigProperties.findProperty('name', 'sink_existing_mssql_server_database');
+ return db.value + ' (' + sinkDb.value + ')';
+ }
+ else {
+ if (sinkDb.value === 'Existing MSSQL Server database with sql auth') {
+ db = serviceConfigProperties.findProperty('name', 'sink_existing_mssql_server_2_database');
+ return db.value + ' (' + sinkDb.value + ')';
+ }
+ }
+ },
+
/**
* Set dispalyed Hive DB value based on DB type
* @param {Ember.Object} dbComponent
* @method loadHiveDbValue
*/
loadHiveDbValue: function () {
- var db,
- serviceConfigPreoprties = this.get('wizardController').getDBProperty('serviceConfigProperties'),
+ var db, serviceConfigPreoprties = this.get('wizardController').getDBProperty('serviceConfigProperties'),
hiveDb = serviceConfigPreoprties.findProperty('name', 'hive_database');
if (hiveDb.value === 'New MySQL Database') {
return 'MySQL (New Database)';
@@ -743,9 +847,22 @@ App.WizardStep8Controller = Em.Controller.extend(App.AddSecurityConfigs, {
db = serviceConfigPreoprties.findProperty('name', 'hive_existing_postgresql_database');
return db.value + ' (' + hiveDb.value + ')';
}
- else { // existing oracle database
- db = serviceConfigPreoprties.findProperty('name', 'hive_existing_oracle_database');
- return db.value + ' (' + hiveDb.value + ')';
+ else {
+ if (hiveDb.value === 'Existing MSSQL Server database with integrated authentication') {
+ db = serviceConfigPreoprties.findProperty('name', 'hive_existing_mssql_server_database');
+ return db.value + ' (' + hiveDb.value + ')';
+ }
+ else {
+ if (hiveDb.value === 'Existing MSSQL Server database with sql auth') {
+ db = serviceConfigPreoprties.findProperty('name', 'hive_existing_mssql_server_2_database');
+ return db.value + ' (' + hiveDb.value + ')';
+ }
+ else {
+ // existing oracle database
+ db = serviceConfigPreoprties.findProperty('name', 'hive_existing_oracle_database');
+ return db.value + ' (' + hiveDb.value + ')';
+ }
+ }
}
}
}
@@ -802,9 +919,22 @@ App.WizardStep8Controller = Em.Controller.extend(App.AddSecurityConfigs, {
db = this.get('wizardController').getDBProperty('serviceConfigProperties').findProperty('name', 'oozie_existing_postgresql_database');
return db.value + ' (' + oozieDb.value + ')';
}
- else { // existing oracle database
- db = this.get('wizardController').getDBProperty('serviceConfigProperties').findProperty('name', 'oozie_existing_oracle_database');
- return db.value + ' (' + oozieDb.value + ')';
+ else {
+ if (oozieDb.value === 'Existing MSSQL Server database with integrated authentication') {
+ db = this.get('wizardController').getDBProperty('serviceConfigProperties').findProperty('name', 'oozie_existing_mssql_server_database');
+ return db.value + ' (' + oozieDb.value + ')';
+ }
+ else {
+ if (oozieDb.value === 'Existing MSSQL Server database with sql auth') {
+ db = this.get('wizardController').getDBProperty('serviceConfigProperties').findProperty('name', 'oozie_existing_mssql_server_2_database');
+ return db.value + ' (' + oozieDb.value + ')';
+ }
+ else {
+ // existing oracle database
+ db = this.get('wizardController').getDBProperty('serviceConfigProperties').findProperty('name', 'oozie_existing_oracle_database');
+ return db.value + ' (' + oozieDb.value + ')';
+ }
+ }
}
}
}
diff --git a/ambari-web/app/controllers/wizard/step9_controller.js b/ambari-web/app/controllers/wizard/step9_controller.js
index adfe292c1c..93372c62aa 100644
--- a/ambari-web/app/controllers/wizard/step9_controller.js
+++ b/ambari-web/app/controllers/wizard/step9_controller.js
@@ -487,7 +487,7 @@ App.WizardStep9Controller = Em.Controller.extend({
data = {
"context": Em.I18n.t("requestInfo.startAddedServices"),
"ServiceInfo": { "state": "STARTED" },
- "urlParams": "ServiceInfo/state=INSTALLED&ServiceInfo/service_name.in(" + servicesList.join(",") + ")&params/reconfigure_client=false"
+ "urlParams": "ServiceInfo/state=INSTALLED&ServiceInfo/service_name.in(" + servicesList.join(",") + ")&params/run_smoke_test=true&params/reconfigure_client=false"
};
break;
default:
diff --git a/ambari-web/app/data/HDP2/site_properties.js b/ambari-web/app/data/HDP2/site_properties.js
index f132b63178..e9b82345a5 100644
--- a/ambari-web/app/data/HDP2/site_properties.js
+++ b/ambari-web/app/data/HDP2/site_properties.js
@@ -47,6 +47,203 @@ module.exports =
"index": 3
},
{
+ "id": "puppet var",
+ "name": "hadoop.user.name",
+ "displayName": "Hadoop User Name",
+ "description": "User to run Hadoop services under",
+ "defaultValue": "hadoop",
+ "isReconfigurable": false,
+ "displayType": "user",
+ "isOverridable": false,
+ "isVisible": App.get('isHadoopWindowsStack'),
+ "serviceName": "MISC",
+ "filename": "cluster-env.xml",
+ "category": "Users and Groups",
+ "belongsToService": ["HDFS"],
+ "index": 0
+ },
+ {
+ "id": "puppet var",
+ "name": "hadoop.user.password",
+ "displayName": "Hadoop User Password",
+ "description": "Password for hadoop user",
+ "isReconfigurable": false,
+ "displayType": "password",
+ "isOverridable": false,
+ "isVisible": App.get('isHadoopWindowsStack'),
+ "serviceName": "MISC",
+ "filename": "cluster-env.xml",
+ "category": "Users and Groups",
+ "belongsToService": ["HDFS"],
+ "index": 1
+ },
+ {
+ "id": "puppet var",
+ "name": "hadoop_host",
+ "displayName": "Hadoop host",
+ "value": "",
+ "defaultValue": "",
+ "description": "A hadoop host from where metrics will be uploaded",
+ "displayType": "masterHost",
+ "isOverridable": false,
+ "isVisible": true,
+ "isRequiredByAgent": false,
+ "serviceName": "HDFS",
+ "category": "MetricsSink",
+ "index": 0
+ },
+ {
+ "id": "puppet var",
+ "name": "sink_existing_mssql_server_database",
+ "displayName": "Database Type",
+ "value": "",
+ "defaultValue": "MSSQL",
+ "description": "Using an existing MSSQL database for Metrics Sink",
+ "displayType": "masterHost",
+ "isOverridable": false,
+ "isVisible": false,
+ "isReconfigurable": false,
+ "serviceName": "HDFS",
+ "category": "MetricsSink",
+ "index": 1
+ },
+ {
+ "id": "puppet var",
+ "name": "sink_existing_mssql_server_2_database",
+ "displayName": "Database Type",
+ "value": "",
+ "defaultValue": "MSSQL",
+ "description": "Using an existing MSSQL database for Metrics Sink",
+ "displayType": "masterHost",
+ "isOverridable": false,
+ "isVisible": false,
+ "isReconfigurable": false,
+ "serviceName": "HDFS",
+ "category": "MetricsSink",
+ "index": 1
+ },
+ {
+ "id": "puppet var",
+ "name": "sink_database",
+ "displayName": "Metrics Sink Database",
+ "value": "",
+ "defaultValue": "Existing MSSQL Server database with sql auth",
+ "options": [
+ {
+ displayName: 'Existing MSSQL Server database with integrated authentication',
+ foreignKeys: ['sink_existing_mssql_server_database', 'sink_existing_mssql_server_host']
+ },
+ {
+ displayName: 'Existing MSSQL Server database with sql auth',
+ foreignKeys: ['sink_existing_mssql_server_2_database', 'sink_existing_mssql_server_2_host']
+ }
+ ],
+ "displayType": "radio button",
+ "isReconfigurable": false,
+ "radioName": "sink-database",
+ "isOverridable": false,
+ "isVisible": true,
+ "serviceName": "HDFS",
+ "category": "MetricsSink",
+ "index": 2
+ },
+ {
+ "id": "site property",
+ "name": "sink.dbservername",
+ "displayName": "Database Host",
+ "description": "Specify the host on which the database is hosted",
+ "defaultValue": "",
+ "isReconfigurable": true,
+ "displayType": "host",
+ "isOverridable": false,
+ "isObserved": true,
+ "isVisible": false,
+ "category": "MetricsSink",
+ "serviceName": "HDFS",
+ "index": 2
+ },
+ {
+ "id": "puppet var",
+ "name": "sink_existing_mssql_server_host",
+ "displayName": "Database Host",
+ "description": "Specify the host on which the existing database is hosted",
+ "defaultValue": "",
+ "displayType": "host",
+ "isOverridable": false,
+ "isRequiredByAgent": false,
+ "isVisible": false,
+ "isObserved": true,
+ "serviceName": "HDFS",
+ "category": "MetricsSink",
+ "index": 3
+ },
+ {
+ "id": "puppet var",
+ "name": "sink_existing_mssql_server_2_host",
+ "displayName": "Database Host",
+ "description": "Specify the host on which the existing database is hosted",
+ "defaultValue": "",
+ "displayType": "host",
+ "isOverridable": false,
+ "isRequiredByAgent": false,
+ "isVisible": false,
+ "isObserved": true,
+ "serviceName": "HDFS",
+ "category": "MetricsSink",
+ "index": 3
+ },
+ {
+ "id": "site property",
+ "name": "sink.db.schema.name",
+ "displayName": "Database Name",
+ "isOverridable": false,
+ "displayType": "host",
+ "isObserved": true,
+ "category": "MetricsSink",
+ "serviceName": "HDFS",
+ "index": 4
+ },
+ {
+ "id": "site property",
+ "name": "sink.dblogin",
+ "displayName": "Database Username",
+ "isOverridable": false,
+ "displayType": "user",
+ "category": "MetricsSink",
+ "serviceName": "HDFS",
+ "index": 5
+ },
+ {
+ "id": "site property",
+ "name": "sink.dbpassword",
+ "displayName": "Database Password",
+ "isOverridable": false,
+ "displayType": "password",
+ "category": "MetricsSink",
+ "serviceName": "HDFS",
+ "index": 6
+ },
+ {
+ "id": "site property",
+ "name": "sink.jdbc.driver",
+ "displayName": "JDBC Driver Class",
+ "isOverridable": false,
+ "category": "MetricsSink",
+ "serviceName": "HDFS",
+ "index": 7
+ },
+ {
+ "id": "site property",
+ "name": "sink.jdbc.url",
+ "displayName": "Database URL",
+ "isOverridable": false,
+ "displayType": "advanced",
+ "category": "MetricsSink",
+ "serviceName": "HDFS",
+ "index": 8
+ },
+
+ {
"id": "site property",
"name": "dfs.namenode.name.dir",
"displayName": "NameNode directories",
@@ -450,7 +647,7 @@ module.exports =
"name": "oozie.service.JPAService.jdbc.username",
"displayName": "Database Username",
"isOverridable": false,
- "displayType": "host",
+ "displayType": "user",
"category": "OOZIE_SERVER",
"serviceName": "OOZIE",
"filename": "oozie-site.xml",
@@ -516,7 +713,7 @@ module.exports =
"id": "site property",
"name": "javax.jdo.option.ConnectionUserName",
"displayName": "Database Username",
- "displayType": "host",
+ "displayType": "user",
"isOverridable": false,
"category": "HIVE_METASTORE",
"serviceName": "HIVE",
@@ -2890,6 +3087,38 @@ module.exports =
"category": "HIVE_METASTORE",
"index": 1
},
+ {
+ "id": "puppet var",
+ "name": "hive_existing_mssql_server_database",
+ "displayName": "Database Type",
+ "value": "",
+ "defaultValue": "MSSQL",
+ "description": "Using an existing MSSQL database for Hive Metastore",
+ "displayType": "masterHost",
+ "isOverridable": false,
+ "isVisible": false,
+ "isReconfigurable": false,
+ "serviceName": "HIVE",
+ "filename": "hive-env.xml",
+ "category": "HIVE_METASTORE",
+ "index": 1
+ },
+ {
+ "id": "puppet var",
+ "name": "hive_existing_mssql_server_2_database",
+ "displayName": "Database Type",
+ "value": "",
+ "defaultValue": "MSSQL",
+ "description": "Using an existing MSSQL database for Hive Metastore",
+ "displayType": "masterHost",
+ "isOverridable": false,
+ "isVisible": false,
+ "isReconfigurable": false,
+ "serviceName": "HIVE",
+ "filename": "hive-env.xml",
+ "category": "HIVE_METASTORE",
+ "index": 1
+ },
// for existing PostgreSQL
{
"id": "puppet var",
@@ -2934,6 +3163,7 @@ module.exports =
"description": "MySQL will be installed by Ambari",
"displayType": "masterHost",
"isOverridable": false,
+ "isReconfigurable": false,
"isVisible": false,
"serviceName": "HIVE",
"filename": "hive-env.xml",
@@ -2949,19 +3179,33 @@ module.exports =
"options": [
{
displayName: 'New MySQL Database',
- foreignKeys: ['hive_ambari_database', 'hive_ambari_host']
+ foreignKeys: ['hive_ambari_database', 'hive_ambari_host'],
+ hidden: App.get('isHadoopWindowsStack')
},
{
displayName: 'Existing MySQL Database',
- foreignKeys: ['hive_existing_mysql_database', 'hive_existing_mysql_host']
+ foreignKeys: ['hive_existing_mysql_database', 'hive_existing_mysql_host'],
+ hidden: App.get('isHadoopWindowsStack')
},
{
displayName: 'Existing PostgreSQL Database',
- foreignKeys: ['hive_existing_postgresql_database', 'hive_existing_postgresql_host']
+ foreignKeys: ['hive_existing_postgresql_database', 'hive_existing_postgresql_host'],
+ hidden: App.get('isHadoopWindowsStack')
},
{
displayName: 'Existing Oracle Database',
- foreignKeys: ['hive_existing_oracle_database', 'hive_existing_oracle_host']
+ foreignKeys: ['hive_existing_oracle_database', 'hive_existing_oracle_host'],
+ hidden: App.get('isHadoopWindowsStack')
+ },
+ {
+ displayName: 'Existing MSSQL Server database with integrated authentication',
+ foreignKeys: ['hive_existing_mssql_server_database', 'hive_existing_mssql_server_host'],
+ hidden: !App.get('isHadoopWindowsStack')
+ },
+ {
+ displayName: 'Existing MSSQL Server database with sql auth',
+ foreignKeys: ['hive_existing_mssql_server_2_database', 'hive_existing_mssql_server_2_host'],
+ hidden: !App.get('isHadoopWindowsStack')
}
],
"description": "MySQL will be installed by Ambari",
@@ -3060,6 +3304,38 @@ module.exports =
},
{
"id": "puppet var",
+ "name": "hive_existing_mssql_server_host",
+ "displayName": "Database Host",
+ "description": "Specify the host on which the existing database is hosted",
+ "defaultValue": "",
+ "displayType": "host",
+ "isOverridable": false,
+ "isRequiredByAgent": false,
+ "isVisible": false,
+ "isObserved": true,
+ "serviceName": "HIVE",
+ "filename": "hive-env.xml",
+ "category": "HIVE_METASTORE",
+ "index": 3
+ },
+ {
+ "id": "puppet var",
+ "name": "hive_existing_mssql_server_2_host",
+ "displayName": "Database Host",
+ "description": "Specify the host on which the existing database is hosted",
+ "defaultValue": "",
+ "displayType": "host",
+ "isOverridable": false,
+ "isRequiredByAgent": false,
+ "isVisible": false,
+ "isObserved": true,
+ "serviceName": "HIVE",
+ "filename": "hive-env.xml",
+ "category": "HIVE_METASTORE",
+ "index": 3
+ },
+ {
+ "id": "puppet var",
"name": "hive_metastore_port",
"displayName": "Hive metastore port",
"description": "",
@@ -3258,6 +3534,7 @@ module.exports =
"description": "MySQL will be installed by Ambari",
"displayType": "masterHost",
"isVisible": false,
+ "isReconfigurable": false,
"isOverridable": false,
"serviceName": "OOZIE",
"filename": "oozie-env.xml",
@@ -3300,6 +3577,38 @@ module.exports =
},
{
"id": "puppet var",
+ "name": "oozie_existing_mssql_server_database",
+ "displayName": "Database Type",
+ "value": "",
+ "defaultValue": "MSSQL",
+ "description": "Using an existing MSSQL database with integrated authentication for Oozie Metastore",
+ "displayType": "masterHost",
+ "isVisible": false,
+ "isReconfigurable": false,
+ "isOverridable": false,
+ "serviceName": "OOZIE",
+ "filename": "oozie-env.xml",
+ "category": "OOZIE_SERVER",
+ "index": 1
+ },
+ {
+ "id": "puppet var",
+ "name": "oozie_existing_mssql_server_2_database",
+ "displayName": "Database Type",
+ "value": "",
+ "defaultValue": "MSSQL",
+ "description": "Using an existing MSSQL database with sql auth for Oozie Metastore",
+ "displayType": "masterHost",
+ "isVisible": false,
+ "isReconfigurable": false,
+ "isOverridable": false,
+ "serviceName": "OOZIE",
+ "filename": "oozie-env.xml",
+ "category": "OOZIE_SERVER",
+ "index": 1
+ },
+ {
+ "id": "puppet var",
"name": "oozie_database",
"displayName": "Oozie Database",
"value": "",
@@ -3307,19 +3616,33 @@ module.exports =
"options": [
{
displayName: 'New Derby Database',
- foreignKeys: ['oozie_derby_database']
+ foreignKeys: ['oozie_derby_database'],
+ hidden: App.get('isHadoopWindowsStack')
},
{
displayName: 'Existing MySQL Database',
- foreignKeys: ['oozie_existing_mysql_database', 'oozie_existing_mysql_host']
+ foreignKeys: ['oozie_existing_mysql_database', 'oozie_existing_mysql_host'],
+ hidden: App.get('isHadoopWindowsStack')
},
{
displayName: 'Existing PostgreSQL Database',
- foreignKeys: ['oozie_existing_postgresql_database', 'oozie_existing_postgresql_host']
+ foreignKeys: ['oozie_existing_postgresql_database', 'oozie_existing_postgresql_host'],
+ hidden: App.get('isHadoopWindowsStack')
},
{
displayName: 'Existing Oracle Database',
- foreignKeys: ['oozie_existing_oracle_database', 'oozie_existing_oracle_host']
+ foreignKeys: ['oozie_existing_oracle_database', 'oozie_existing_oracle_host'],
+ hidden: App.get('isHadoopWindowsStack')
+ },
+ {
+ displayName: 'Existing MSSQL Server database with integrated authentication',
+ foreignKeys: ['oozie_existing_mssql_server_database', 'oozie_existing_mssql_server_host'],
+ hidden: !App.get('isHadoopWindowsStack')
+ },
+ {
+ displayName: 'Existing MSSQL Server database with sql auth',
+ foreignKeys: ['oozie_existing_mssql_server_2_database', 'oozie_existing_mssql_server_2_host'],
+ hidden: !App.get('isHadoopWindowsStack')
}
],
"description": "Current Derby Database will be installed by Ambari",
@@ -3428,6 +3751,38 @@ module.exports =
},
{
"id": "puppet var",
+ "name": "oozie_existing_mssql_server_host",
+ "displayName": "Database Host",
+ "description": "Specify the host on which the existing database is hosted",
+ "defaultValue": "",
+ "displayType": "host",
+ "isOverridable": false,
+ "isRequiredByAgent": false,
+ "isVisible": false,
+ "isObserved": true,
+ "serviceName": "OOZIE",
+ "filename": "oozie-env.xml",
+ "category": "OOZIE_SERVER",
+ "index": 3
+ },
+ {
+ "id": "puppet var",
+ "name": "oozie_existing_mssql_server_2_host",
+ "displayName": "Database Host",
+ "description": "Specify the host on which the existing database is hosted",
+ "defaultValue": "",
+ "displayType": "host",
+ "isOverridable": false,
+ "isRequiredByAgent": false,
+ "isVisible": false,
+ "isObserved": true,
+ "serviceName": "OOZIE",
+ "filename": "oozie-env.xml",
+ "category": "OOZIE_SERVER",
+ "index": 3
+ },
+ {
+ "id": "puppet var",
"name": "oozie_log_dir",
"displayName": "Oozie Log Dir",
"description": "Directory for oozie logs",
@@ -3883,13 +4238,13 @@ module.exports =
{
"id": "puppet var",
"name": "proxyuser_group",
- "displayName": "Proxy group for Hive, WebHCat, Oozie and Falcon",
+ "displayName": "Proxy group for Hive, Oozie and Falcon",
"description": "",
"defaultValue": "users",
"isReconfigurable": false,
"displayType": "user",
"isOverridable": false,
- "isVisible": true,
+ "isVisible": !App.get('isHadoopWindowsStack'),
"filename": "hadoop-env.xml",
"serviceName": "MISC",
"category": "Users and Groups",
@@ -3919,12 +4274,12 @@ module.exports =
"isReconfigurable": false,
"displayType": "user",
"isOverridable": false,
- "isVisible": true,
+ "isVisible": !App.get('isHadoopWindowsStack'),
"serviceName": "MISC",
"filename": "hadoop-env.xml",
"category": "Users and Groups",
"belongsToService": ["HDFS"],
- "index": 1
+ "index": 2
},
{
"id": "puppet var",
@@ -3935,12 +4290,12 @@ module.exports =
"isReconfigurable": false,
"displayType": "user",
"isOverridable": false,
- "isVisible": true,
+ "isVisible": !App.get('isHadoopWindowsStack'),
"serviceName": "MISC",
"filename": "mapred-env.xml",
"category": "Users and Groups",
"belongsToService": ["MAPREDUCE2"],
- "index": 2
+ "index": 3
},
{
"id": "puppet var",
@@ -3951,12 +4306,12 @@ module.exports =
"isReconfigurable": false,
"displayType": "user",
"isOverridable": false,
- "isVisible": true,
+ "isVisible": !App.get('isHadoopWindowsStack'),
"serviceName": "MISC",
"filename": "yarn-env.xml",
"category": "Users and Groups",
"belongsToService": ["YARN"],
- "index": 3
+ "index": 4
},
{
"id": "puppet var",
@@ -3967,12 +4322,12 @@ module.exports =
"isReconfigurable": false,
"displayType": "user",
"isOverridable": false,
- "isVisible": true,
+ "isVisible": !App.get('isHadoopWindowsStack'),
"serviceName": "MISC",
"filename": "hbase-env.xml",
"category": "Users and Groups",
"belongsToService": ["HBASE"],
- "index": 4
+ "index": 5
},
{
"id": "puppet var",
@@ -3983,12 +4338,12 @@ module.exports =
"isReconfigurable": false,
"displayType": "user",
"isOverridable": false,
- "isVisible": true,
+ "isVisible": !App.get('isHadoopWindowsStack'),
"serviceName": "MISC",
"filename": "hive-env.xml",
"category": "Users and Groups",
"belongsToService": ["HIVE"],
- "index": 5
+ "index": 6
},
{
"id": "puppet var",
@@ -3999,7 +4354,7 @@ module.exports =
"isReconfigurable": false,
"displayType": "user",
"isOverridable": false,
- "isVisible": true,
+ "isVisible": !App.get('isHadoopWindowsStack'),
"serviceName": "MISC",
"filename": "hive-env.xml",
"category": "Users and Groups",
@@ -4015,7 +4370,7 @@ module.exports =
"isReconfigurable": false,
"displayType": "user",
"isOverridable": false,
- "isVisible": true,
+ "isVisible": !App.get('isHadoopWindowsStack'),
"serviceName": "MISC",
"filename": "hive-env.xml",
"category": "Users and Groups",
@@ -4031,12 +4386,12 @@ module.exports =
"isReconfigurable": false,
"displayType": "user",
"isOverridable": false,
- "isVisible": true,
+ "isVisible": !App.get('isHadoopWindowsStack'),
"serviceName": "MISC",
"filename": "oozie-env.xml",
"category": "Users and Groups",
"belongsToService": ["OOZIE"],
- "index": 8
+ "index": 9
},
{
"id": "puppet var",
@@ -4047,12 +4402,12 @@ module.exports =
"isReconfigurable": false,
"displayType": "user",
"isOverridable": false,
- "isVisible": true,
+ "isVisible": !App.get('isHadoopWindowsStack'),
"serviceName": "MISC",
"filename": "falcon-env.xml",
"category": "Users and Groups",
"belongsToService": ["FALCON"],
- "index": 10
+ "index": 11
},
{
"id": "puppet var",
@@ -4063,12 +4418,12 @@ module.exports =
"isReconfigurable": false,
"displayType": "user",
"isOverridable": false,
- "isVisible": true,
+ "isVisible": !App.get('isHadoopWindowsStack'),
"serviceName": "MISC",
"filename": "storm-env.xml",
"category": "Users and Groups",
"belongsToService": ["STORM"],
- "index": 9
+ "index": 10
},
{
"id": "puppet var",
@@ -4079,12 +4434,12 @@ module.exports =
"isReconfigurable": false,
"displayType": "user",
"isOverridable": false,
- "isVisible": true,
+ "isVisible": !App.get('isHadoopWindowsStack'),
"serviceName": "MISC",
"filename": "zookeeper-env.xml",
"category": "Users and Groups",
"belongsToService": ["ZOOKEEPER"],
- "index": 11
+ "index": 12
},
{
"id": "puppet var",
@@ -4111,7 +4466,7 @@ module.exports =
"isReconfigurable": false,
"displayType": "user",
"isOverridable": false,
- "isVisible": true,
+ "isVisible": !App.get('isHadoopWindowsStack'),
"serviceName": "MISC",
"filename": "ganglia-env.xml",
"category": "Users and Groups",
@@ -4127,7 +4482,7 @@ module.exports =
"isReconfigurable": false,
"displayType": "advanced",
"isOverridable": false,
- "isVisible": false,
+ "isVisible": !App.get('isHadoopWindowsStack'),
"serviceName": "MISC",
"filename": "ganglia-env.xml",
"category": "Users and Groups",
@@ -4142,12 +4497,12 @@ module.exports =
"isReconfigurable": false,
"displayType": "user",
"isOverridable": false,
- "isVisible": true,
+ "isVisible": !App.get('isHadoopWindowsStack'),
"serviceName": "MISC",
"filename": "nagios-env.xml",
"category": "Users and Groups",
"belongsToService": ["NAGIOS"],
- "index": 14
+ "index": 15
},
{
"id": "puppet var",
@@ -4158,12 +4513,12 @@ module.exports =
"isReconfigurable": false,
"displayType": "user",
"isOverridable": false,
- "isVisible": true,
+ "isVisible": !App.get('isHadoopWindowsStack'),
"serviceName": "MISC",
"filename": "nagios-env.xml",
"category": "Users and Groups",
"belongsToService": ["NAGIOS"],
- "index": 13
+ "index": 14
},
{
"id": "puppet var",
@@ -4174,6 +4529,7 @@ module.exports =
"isReconfigurable": false,
"displayType": "user",
"isOverridable": false,
+ "isVisible": !App.get('isHadoopWindowsStack'),
"serviceName": "MISC",
"filename": "cluster-env.xml",
"category": "Users and Groups",
@@ -4188,12 +4544,12 @@ module.exports =
"isReconfigurable": false,
"displayType": "user",
"isOverridable": false,
- "isVisible": true,
+ "isVisible": !App.get('isHadoopWindowsStack'),
"serviceName": "MISC",
"filename": "tez-env.xml",
"category": "Users and Groups",
"belongsToService": ["TEZ"],
- "index": 15
+ "index": 16
},
{
"id": "puppet var",
@@ -4204,7 +4560,7 @@ module.exports =
"isReconfigurable": false,
"displayType": "user",
"isOverridable": false,
- "isVisible": true,
+ "isVisible": !App.get('isHadoopWindowsStack'),
"serviceName": "MISC",
"filename": "cluster-env.xml",
"category": "Users and Groups",
@@ -4222,7 +4578,7 @@ module.exports =
"filename": "sqoop-env.xml",
"category": "Users and Groups",
"belongsToService": ["SQOOP"],
- "index": 17
+ "index": 18
},
{
"id": "puppet var",
@@ -4290,7 +4646,7 @@ module.exports =
"displayType": "checkbox",
"isReconfigurable": true,
"isOverridable": false,
- "isVisible": true,
+ "isVisible": !App.get('isHadoopWindowsStack'),
"serviceName": "MISC",
"filename": "cluster-env.xml",
"category": "Users and Groups"
diff --git a/ambari-web/app/messages.js b/ambari-web/app/messages.js
index 76dc25f145..94382b0438 100644
--- a/ambari-web/app/messages.js
+++ b/ambari-web/app/messages.js
@@ -300,7 +300,7 @@ Em.I18n.translations = {
'login.error.bad.connection':'Unable to connect to Ambari Server. Confirm Ambari Server is running and you can reach Ambari Server from this machine.',
'graphs.noData.title': 'No Data',
- 'graphs.noData.message': 'There was no data available. Possible reasons include inaccessible Ganglia service.',
+ 'graphs.noData.message': 'There was no data available. Possible reasons include inaccessible/unsupported metrics service.',
'graphs.noDataAtTime.message': 'No available data for the time period.',
'graphs.error.title': 'Error',
'graphs.error.message': 'There was a problem getting data for the chart ({0}: {1})',
diff --git a/ambari-web/app/models/quick_links.js b/ambari-web/app/models/quick_links.js
index a4d6f5e3d8..260f4a74f7 100644
--- a/ambari-web/app/models/quick_links.js
+++ b/ambari-web/app/models/quick_links.js
@@ -388,10 +388,9 @@ App.QuickLinks.FIXTURES = [
url:'%@://%@:%@/',
service_id: 'STORM',
template:'%@://%@:%@/',
- http_config: 'stormuiserver_host',
- https_config: 'stormuiserver_host',
+ http_config: 'ui.port',
site: 'storm-site',
- regex: portRegex,
+ regex: '^(\\d+)$',
default_http_port: 8744
},
{
diff --git a/ambari-web/app/models/service_config.js b/ambari-web/app/models/service_config.js
index 300c2c68b1..36ac18081e 100644
--- a/ambari-web/app/models/service_config.js
+++ b/ambari-web/app/models/service_config.js
@@ -466,6 +466,9 @@ App.ServiceConfigProperty = Ember.Object.extend({
case 'oozie_ambari_host':
this.set('value', masterComponentHostsInDB.findProperty('component', 'OOZIE_SERVER').hostName);
break;
+ case 'hadoop_host':
+ this.set('value', masterComponentHostsInDB.filterProperty('component', 'NAMENODE').mapProperty('hostName'));
+ break;
case 'storm.zookeeper.servers':
case 'zookeeperserver_hosts':
this.set('value', masterComponentHostsInDB.filterProperty('component', 'ZOOKEEPER_SERVER').mapProperty('hostName'));
@@ -477,13 +480,19 @@ App.ServiceConfigProperty = Ember.Object.extend({
this.set('value', masterComponentHostsInDB.findProperty('component', 'FALCON_SERVER').hostName);
break;
case 'drpcserver_host':
- this.set('value', masterComponentHostsInDB.findProperty('component', 'DRPC_SERVER').hostName);
+ var drpcHost = masterComponentHostsInDB.findProperty('component', 'DRPC_SERVER');
+ if (drpcHost) {
+ this.set('value', drpcHost.hostName);
+ }
break;
case 'stormuiserver_host':
this.set('value', masterComponentHostsInDB.findProperty('component', 'STORM_UI_SERVER').hostName);
break;
case 'storm_rest_api_host':
- this.set('value', masterComponentHostsInDB.findProperty('component', 'STORM_REST_API').hostName);
+ var stormRresApiHost = masterComponentHostsInDB.findProperty('component', 'STORM_REST_API');
+ if(stormRresApiHost) {
+ this.set('value', stormRresApiHost.hostName);
+ }
break;
case 'supervisor_hosts':
this.set('value', slaveComponentHostsInDB.findProperty('componentName', 'SUPERVISOR').hosts.mapProperty('hostName'));
@@ -686,7 +695,10 @@ App.ServiceConfigProperty = Ember.Object.extend({
}
mountPointsPerHost = mountPointsPerHost.filter(function (mPoint) {
- return !(['/', '/home', '/boot'].contains(mPoint.mountpoint) || ['devtmpfs', 'tmpfs', 'vboxsf'].contains(mPoint.type));
+ return !(['/', '/home', '/boot'].contains(mPoint.mountpoint)
+ || ['devtmpfs', 'tmpfs', 'vboxsf'].contains(mPoint.type)
+ || mPoint.available == 0
+ || mPoint.type == 'CDFS');
});
mountPointsPerHost.forEach(function (mPoint) {
@@ -699,6 +711,7 @@ App.ServiceConfigProperty = Ember.Object.extend({
allMountPoints.push(mountPointAsRoot);
}
this.set('value', '');
+ var winRegex = /^([a-z]):\\?$/;
if (!isOnlyFirstOneNeeded) {
allMountPoints.forEach(function (eachDrive) {
var mPoint = this.get('value');
@@ -707,6 +720,21 @@ App.ServiceConfigProperty = Ember.Object.extend({
}
if (eachDrive.mountpoint === "/") {
mPoint += this.get('defaultDirectory') + "\n";
+ } else if(winRegex.test(eachDrive.mountpoint.toLowerCase())) {
+ switch (this.get('name')) {
+ case 'dfs.datanode.data.dir':
+ case 'dfs.name.dir':
+ case 'dfs.namenode.name.dir':
+ case 'dfs.data.dir':
+ case 'dfs.datanode.data.dir':
+ var winDriveUrl = eachDrive.mountpoint.toLowerCase().replace(winRegex, "file:///$1:");
+ mPoint += winDriveUrl + this.get('defaultDirectory') + "\n";
+ break;
+ default:
+ var winDrive = eachDrive.mountpoint.toLowerCase().replace(winRegex, "$1:");
+ var winDir = this.get('defaultDirectory').replace(/\//g, "\\");
+ mPoint += winDrive + winDir + "\n";
+ }
} else {
mPoint += eachDrive.mountpoint + this.get('defaultDirectory') + "\n";
}
@@ -717,6 +745,23 @@ App.ServiceConfigProperty = Ember.Object.extend({
var mPoint = allMountPoints[0].mountpoint;
if (mPoint === "/") {
mPoint = this.get('defaultDirectory');
+ } else if(winRegex.test(mPoint.toLowerCase())) {
+ switch (this.get('name')) {
+ case 'fs.checkpoint.dir':
+ case 'dfs.namenode.checkpoint.dir':
+ var winDriveUrl = mPoint.toLowerCase().replace(winRegex, "file:///$1:");
+ mPoint = winDriveUrl + this.get('defaultDirectory') + "\n";
+ break;
+ case 'zk_data_dir':
+ var winDrive = mPoint.toLowerCase().replace(winRegex, "$1:");
+ var winDir = this.get('defaultDirectory').replace(/\//g, "\\\\");
+ mPoint = winDrive + winDir + "\n";
+ break;
+ default:
+ var winDrive = mPoint.toLowerCase().replace(winRegex, "$1:");
+ var winDir = this.get('defaultDirectory').replace(/\//g, "\\");
+ mPoint = winDrive + winDir + "\n";
+ }
} else {
mPoint = mPoint + this.get('defaultDirectory');
}
@@ -841,7 +886,7 @@ App.ServiceConfigProperty = Ember.Object.extend({
case 'directories':
case 'directory':
if (!validator.isValidDir(value)) {
- this.set('errorMessage', 'Must be a slash at the start');
+ this.set('errorMessage', 'Must be a slash or drive at the start');
isError = true;
}
else {
diff --git a/ambari-web/app/models/stack_service.js b/ambari-web/app/models/stack_service.js
index 64516a0f64..5c91e41c3d 100644
--- a/ambari-web/app/models/stack_service.js
+++ b/ambari-web/app/models/stack_service.js
@@ -87,6 +87,24 @@ App.StackService = DS.Model.extend({
return services.contains(this.get('serviceName'));
}.property('serviceName'),
+ // Is the service required for reporting host metrics
+ isHostMetricsService: function () {
+ var services = ['GANGLIA'];
+ return services.contains(this.get('serviceName'));
+ }.property('serviceName'),
+
+ // Is the service required for reporting hadoop service metrics
+ isServiceMetricsService: function () {
+ var services = ['GANGLIA'];
+ return services.contains(this.get('serviceName'));
+ }.property('serviceName'),
+
+ // Is the service required for reporting aleerts
+ isAlertingService: function () {
+ var services = ['NAGIOS'];
+ return services.contains(this.get('serviceName'));
+ }.property('serviceName'),
+
coSelectedServices: function () {
var coSelectedServices = App.StackService.coSelected[this.get('serviceName')];
if (!!coSelectedServices) {
@@ -122,7 +140,7 @@ App.StackService = DS.Model.extend({
}.property('configTypes'),
customReviewHandler: function () {
- return App.StackService.reviewPageHandlers[this.get('serviceName')];
+ return App.get('isHadoopWindowsStack')? App.StackService.reviewWindowsPageHandlers[this.get('serviceName')] : App.StackService.reviewPageHandlers[this.get('serviceName')];
}.property('serviceName'),
/**
@@ -134,7 +152,7 @@ App.StackService = DS.Model.extend({
var configTypes = this.get('configTypes');
var serviceComponents = this.get('serviceComponents');
if (configTypes && Object.keys(configTypes).length) {
- var pattern = ["General", "CapacityScheduler", "FaultTolerance", "Isolation", "Performance", "^Advanced", "Env$", "^Custom", "Falcon - Oozie integration", "FalconStartupSite", "FalconRuntimeSite"];
+ var pattern = ["MetricsSink", "General", "CapacityScheduler", "FaultTolerance", "Isolation", "Performance", "^Advanced", "Env$", "^Custom", "Falcon - Oozie integration", "FalconStartupSite", "FalconRuntimeSite"];
configCategories = App.StackService.configCategories.call(this).filter(function (_configCategory) {
var serviceComponentName = _configCategory.get('name');
var isServiceComponent = serviceComponents.someProperty('componentName', serviceComponentName);
@@ -191,10 +209,30 @@ App.StackService.reviewPageHandlers = {
}
};
+App.StackService.reviewWindowsPageHandlers = {
+ 'HIVE': {
+ 'Database': 'loadHiveDbValue'
+ },
+ 'HDFS': {
+ 'Database': 'loadSinkDbValue'
+ },
+ 'NAGIOS': {
+ 'Administrator': 'loadNagiosAdminValue'
+ },
+ 'OOZIE': {
+ 'Database': 'loadOozieDbValue'
+ }
+};
+
App.StackService.configCategories = function () {
var serviceConfigCategories = [];
switch (this.get('serviceName')) {
case 'HDFS':
+ if (App.get('isHadoopWindowsStack')) {
+ serviceConfigCategories.pushObjects([
+ App.ServiceConfigCategory.create({ name: 'MetricsSink', displayName: 'Metrics Sink'})
+ ]);
+ }
serviceConfigCategories.pushObjects([
App.ServiceConfigCategory.create({ name: 'NAMENODE', displayName: 'NameNode'}),
App.ServiceConfigCategory.create({ name: 'SECONDARY_NAMENODE', displayName: 'Secondary NameNode'}),
diff --git a/ambari-web/app/templates/main/dashboard/widgets.hbs b/ambari-web/app/templates/main/dashboard/widgets.hbs
index 84fe9d61b2..c462b68796 100644
--- a/ambari-web/app/templates/main/dashboard/widgets.hbs
+++ b/ambari-web/app/templates/main/dashboard/widgets.hbs
@@ -32,11 +32,13 @@
<i class="icon-refresh"></i> &nbsp; {{t dashboard.button.reset}}
</a>
</li>
- <li>
- <a target="_blank" {{bindAttr href="view.gangliaUrl"}}>
- <i class="icon-share"></i> &nbsp; {{t dashboard.button.gangliaLink}}
- </a>
- </li>
+ {{#if App.router.clusterController.isGangliaInstalled}}
+ <li>
+ <a target="_blank" {{bindAttr href="view.gangliaUrl"}}>
+ <i class="icon-share"></i> &nbsp; {{t dashboard.button.gangliaLink}}
+ </a>
+ </li>
+ {{/if}}
</ul>
</li>
</ul>
diff --git a/ambari-web/app/templates/main/host/summary.hbs b/ambari-web/app/templates/main/host/summary.hbs
index bace51109e..7b2a2f2226 100644
--- a/ambari-web/app/templates/main/host/summary.hbs
+++ b/ambari-web/app/templates/main/host/summary.hbs
@@ -144,24 +144,26 @@
</div>
</div>
{{!metrics}}
- <div class="span6">
- <div class="box">
- <div class="box-header">
- <h4>{{t hosts.host.summary.hostMetrics}}</h4>
- {{#if App.router.clusterController.isGangliaInstalled}}
- <div class="btn-group">
- {{#if App.router.clusterController.isGangliaUrlLoaded}}
- <a class="btn" rel="tooltip" title="Go to Ganglia" {{action "showGangliaCharts" target="view"}}><i class="icon-link"></i></a>
- {{else}}
- <div class="spinner"></div>
- {{/if}}
- </div>
- {{/if}}
- </div>
- <div>
- {{view App.MainHostMetricsView contentBinding="view.content"}}
+ {{#unless view.isNoHostMetricsService}}
+ <div class="span6">
+ <div class="box">
+ <div class="box-header">
+ <h4>{{t hosts.host.summary.hostMetrics}}</h4>
+ {{#if App.router.clusterController.isGangliaInstalled}}
+ <div class="btn-group">
+ {{#if App.router.clusterController.isGangliaUrlLoaded}}
+ <a class="btn" rel="tooltip" title="Go to Ganglia" {{action "showGangliaCharts" target="view"}}><i class="icon-link"></i></a>
+ {{else}}
+ <div class="spinner"></div>
+ {{/if}}
+ </div>
+ {{/if}}
+ </div>
+ <div>
+ {{view App.MainHostMetricsView contentBinding="view.content"}}
+ </div>
</div>
</div>
</div>
- </div>
+ {{/unless}}
</div>
diff --git a/ambari-web/app/templates/main/service/info/summary.hbs b/ambari-web/app/templates/main/service/info/summary.hbs
index 60844840ff..8ccc64e19f 100644
--- a/ambari-web/app/templates/main/service/info/summary.hbs
+++ b/ambari-web/app/templates/main/service/info/summary.hbs
@@ -61,8 +61,8 @@
</div>
</div>
</div>
- <div class="span6">
- {{#unless view.isNoAlertsService}}
+ {{#unless view.isNoAlertsService}}
+ <div class="span6">
<div class="box">
<div class="box-header">
<h4>{{t services.alerts.headingOfList}}</h4>
@@ -99,8 +99,8 @@
{{/if}}
</ul>
</div>
- {{/unless}}
- </div>
+ </div>
+ {{/unless}}
</div>
{{#if view.collapsedSections}}
@@ -135,7 +135,9 @@
</div>
{{/if}}
-{{#if view.serviceMetricGraphs.length}}
+{{!todo: Enable service metrics check once MetricsSink is deployed as a service}}
+{{!#unless view.isNoServiceMetricsService}}
+ {{#if view.serviceMetricGraphs.length}}
<div class="row-fluid">
<div class="span12">
<div class="box">
@@ -152,7 +154,6 @@
</div>
{{/if}}
</div>
-
<div class="">
<table class="graphs">
{{#each graphs in view.serviceMetricGraphs}}
@@ -168,10 +169,8 @@
{{/each}}
</table>
</div>
-
</div>
-
+ </div>
</div>
- </div>
-</div>
-{{/if}}
+ {{/if}}
+{{!/unless}}
diff --git a/ambari-web/app/templates/wizard/step2.hbs b/ambari-web/app/templates/wizard/step2.hbs
index 7d1b22a67d..911eae4764 100644
--- a/ambari-web/app/templates/wizard/step2.hbs
+++ b/ambari-web/app/templates/wizard/step2.hbs
@@ -46,45 +46,47 @@
<div class="ambari-agents">
<h5>{{t installer.step2.sshKey}}</h5>
- <label class="radio">
- {{view view.providingSSHKeyRadioButton}}
- {{t installer.step2.useSsh.provide}}
- <a href="javascript:void(null)"
- rel="popover"
- {{translateAttr title="installer.step2.useSsh.tooltip.title" data-content="installer.step2.useSsh.tooltip.content"}}>
- {{t installer.step2.useSsh.tooltip.title}}</a>
- {{t installer.step2.useSsh.provide_id_rsa}}
- </label>
+ {{#if isSSHRegistrationEnabled}}
+ <label class="radio">
+ {{view view.providingSSHKeyRadioButton}}
+ {{t installer.step2.useSsh.provide}}
+ <a href="javascript:void(null)"
+ rel="popover"
+ {{translateAttr title="installer.step2.useSsh.tooltip.title" data-content="installer.step2.useSsh.tooltip.content"}}>
+ {{t installer.step2.useSsh.tooltip.title}}</a>
+ {{t installer.step2.useSsh.provide_id_rsa}}
+ </label>
- <div class="ssh-key-input">
- {{#if view.isFileApi}}
- {{view App.SshKeyFileUploader disabledBinding="view.sshKeyState"}}
- {{/if}}
- <div {{bindAttr class="sshKeyError:error :controls :control-group"}}>
- {{view Ember.TextArea class="span6" rows="3" id="sshKey"
- placeholder="ssh private key" disabledBinding="view.sshKeyState" valueBinding="content.installOptions.sshKey"}}
- {{#if sshKeyError}}
- <span class="help-inline">{{sshKeyError}}</span>
+ <div class="ssh-key-input">
+ {{#if view.isFileApi}}
+ {{view App.SshKeyFileUploader disabledBinding="view.sshKeyState"}}
{{/if}}
- </div>
- <div>
- <label class="ssh-user pull-left">
- {{t installer.step2.sshUser}}
- <a href="javascript:void(null)"
- rel="popover"
- {{translateAttr title="installer.step2.sshUser.link" data-content="installer.step2.sshUser.toolTip"}}>
- {{t installer.step2.sshUser.link}}</a>
- {{t installer.step2.sshUser.account}}
- </label>
-
- <div {{bindAttr class="sshUserError:error :control-group"}}>
- {{view view.textFieldView valueBinding="content.installOptions.sshUser" isEnabledBinding="content.installOptions.useSsh"}}
- {{#if sshUserError}}
- <span class="help-inline">{{sshUserError}}</span>
+ <div {{bindAttr class="sshKeyError:error :controls :control-group"}}>
+ {{view Ember.TextArea class="span6" rows="3" id="sshKey"
+ placeholder="ssh private key" disabledBinding="view.sshKeyState" valueBinding="content.installOptions.sshKey"}}
+ {{#if sshKeyError}}
+ <span class="help-inline">{{sshKeyError}}</span>
{{/if}}
</div>
+ <div>
+ <label class="ssh-user pull-left">
+ {{t installer.step2.sshUser}}
+ <a href="javascript:void(null)"
+ rel="popover"
+ {{translateAttr title="installer.step2.sshUser.link" data-content="installer.step2.sshUser.toolTip"}}>
+ {{t installer.step2.sshUser.link}}</a>
+ {{t installer.step2.sshUser.account}}
+ </label>
+
+ <div {{bindAttr class="sshUserError:error :control-group"}}>
+ {{view view.textFieldView valueBinding="content.installOptions.sshUser" isEnabledBinding="content.installOptions.useSsh"}}
+ {{#if sshUserError}}
+ <span class="help-inline">{{sshUserError}}</span>
+ {{/if}}
+ </div>
+ </div>
</div>
- </div>
+ {{/if}}
<label class="radio">
{{view view.manualRegistrationRadioButton}}
diff --git a/ambari-web/app/utils/ajax/ajax.js b/ambari-web/app/utils/ajax/ajax.js
index 0f67b9b509..4c577b1a99 100644
--- a/ambari-web/app/utils/ajax/ajax.js
+++ b/ambari-web/app/utils/ajax/ajax.js
@@ -608,6 +608,11 @@ var urls = {
'mock': '/data/configurations/host_level_overrides_configs.json?{params}'
},
+ 'config.cluster_env_site': {
+ 'real': '/clusters/{clusterName}/configurations?type=cluster-env',
+ 'mock': '/data/configuration/cluster_env_site.json'
+ },
+
'host.host_component.add_new_component': {
'real': '/clusters/{clusterName}/hosts?Hosts/host_name={hostName}',
'mock': '/data/wizard/deploy/poll_1.json',
@@ -1047,7 +1052,12 @@ var urls = {
},
'cluster.update_upgrade_version': {
'real': '/stacks/{stackName}/versions?fields=stackServices/StackServices,Versions',
- 'mock': '/data/wizard/stack/stacks.json'
+ 'mock': '/data/wizard/stack/stacks.json',
+ 'format': function (data) {
+ return {
+ data: data.data
+ };
+ }
},
'cluster.load_repositories': {
'real': '/stacks/{stackName}/versions/{stackVersion}/operating_systems?fields=repositories/*',
diff --git a/ambari-web/app/utils/config.js b/ambari-web/app/utils/config.js
index 8c183b2f52..cb44f36803 100644
--- a/ambari-web/app/utils/config.js
+++ b/ambari-web/app/utils/config.js
@@ -642,7 +642,7 @@ App.config = Em.Object.create({
miscConfigVisibleProperty: function (configs, serviceToShow) {
configs.forEach(function (item) {
- if (item.belongsToService && item.belongsToService.length) {
+ if (item.get('isVisible') && item.belongsToService && item.belongsToService.length) {
item.set("isVisible", item.belongsToService.some(function (cur) {
return serviceToShow.contains(cur)
}));
@@ -844,8 +844,9 @@ App.config = Em.Object.create({
params.callback(properties);
},
- loadClusterConfigError: function (request, ajaxOptions, error, opt) {
+ loadClusterConfigError: function (request, ajaxOptions, error, opt, params) {
console.log('ERROR: Failed to load cluster-env configs');
+ params.callback([]);
},
diff --git a/ambari-web/app/utils/validator.js b/ambari-web/app/utils/validator.js
index 3fcdfb5e69..d1af967f74 100644
--- a/ambari-web/app/utils/validator.js
+++ b/ambari-web/app/utils/validator.js
@@ -41,15 +41,17 @@ module.exports = {
return floatRegex.test(value);
},
/**
- * validate directory with slash at the start
+ * validate directory with slash or drive at the start
* @param value
* @return {Boolean}
*/
isValidDir: function(value){
var floatRegex = /^\/[0-9a-z]*/;
+ var winRegex = /^[a-z]:\\[0-9a-z]*/;
+ var winUrlRegex = /^file:\/\/\/[a-z]:\/[0-9a-z]*/;
var dirs = value.replace(/,/g,' ').trim().split(new RegExp("\\s+", "g"));
for(var i = 0; i < dirs.length; i++){
- if(!floatRegex.test(dirs[i])){
+ if(!floatRegex.test(dirs[i]) && !winRegex.test(dirs[i]) && !winUrlRegex.test(dirs[i])){
return false;
}
}
diff --git a/ambari-web/app/views/common/quick_view_link_view.js b/ambari-web/app/views/common/quick_view_link_view.js
index 4123c8fbb8..75bf69173c 100644
--- a/ambari-web/app/views/common/quick_view_link_view.js
+++ b/ambari-web/app/views/common/quick_view_link_view.js
@@ -75,7 +75,7 @@ App.QuickViewLinks = Em.View.extend({
/**
* list of files that contains properties for enabling/disabling ssl
*/
- requiredSiteNames: ['hadoop-env','yarn-env','hbase-env','oozie-env','mapred-env','core-site', 'hdfs-site', 'hbase-site', 'oozie-site', 'yarn-site', 'mapred-site'],
+ requiredSiteNames: ['hadoop-env','yarn-env','hbase-env','oozie-env','mapred-env','storm-env', 'falcon-env', 'core-site', 'hdfs-site', 'hbase-site', 'oozie-site', 'yarn-site', 'mapred-site', 'storm-site'],
/**
* Get public host name by its host name.
*
diff --git a/ambari-web/app/views/main/admin.js b/ambari-web/app/views/main/admin.js
index b262f35d2e..41d6cbf1dd 100644
--- a/ambari-web/app/views/main/admin.js
+++ b/ambari-web/app/views/main/admin.js
@@ -40,11 +40,13 @@ App.MainAdminView = Em.View.extend({
url: 'adminServiceAccounts',
label: Em.I18n.t('common.serviceAccounts')
});
- items.push({
- name: 'security',
- url: 'adminSecurity.index',
- label: Em.I18n.t('common.security')
- });
+ if (!App.get('isHadoopWindowsStack')) {
+ items.push({
+ name: 'security',
+ url: 'adminSecurity.index',
+ label: Em.I18n.t('common.security')
+ });
+ }
return items;
}.property(''),
diff --git a/ambari-web/app/views/main/dashboard/widgets.js b/ambari-web/app/views/main/dashboard/widgets.js
index 60327b5b6d..bce957b47c 100644
--- a/ambari-web/app/views/main/dashboard/widgets.js
+++ b/ambari-web/app/views/main/dashboard/widgets.js
@@ -100,6 +100,9 @@ App.MainDashboardWidgetsView = Em.View.extend(App.UserPref, App.LocalStorage, {
setWidgetsDataModel: function () {
var services = App.Service.find();
var self = this;
+ if(App.get('services.hostMetrics').length > 0) {
+ self.set('host_metrics_model', App.get('services.hostMetrics'));
+ }
services.forEach(function (item) {
switch (item.get('serviceName')) {
case "HDFS":
@@ -140,6 +143,15 @@ App.MainDashboardWidgetsView = Em.View.extend(App.UserPref, App.LocalStorage, {
'29' // flume
]; // all in order
var hiddenFull = [['22','Region In Transition']];
+
+ // Display widgets for host metrics if the stack definition has a host metrics service to display it.
+ if (this.get('host_metrics_model') == null) {
+ var hostMetrics = ['11', '12', '13', '14'];
+ hostMetrics.forEach ( function (item) {
+ visibleFull = visibleFull.without(item);
+ }, this);
+ }
+
if (this.get('hdfs_model') == null) {
var hdfs= ['1', '2', '3', '4', '5', '15', '17'];
hdfs.forEach ( function (item) {
@@ -182,6 +194,8 @@ App.MainDashboardWidgetsView = Em.View.extend(App.UserPref, App.LocalStorage, {
obj.set('hidden', hiddenFull);
},
+ host_metrics_model: null,
+
hdfs_model: null,
mapreduce_model: null,
@@ -378,17 +392,25 @@ App.MainDashboardWidgetsView = Em.View.extend(App.UserPref, App.LocalStorage, {
// check each service, find out the newly added service and already deleted service
if (this.get('hdfs_model') != null) {
- var hdfsAndMetrics= ['1', '2', '3', '4', '5', '15', '17', '11', '12', '13', '14'];
- hdfsAndMetrics.forEach ( function (item) {
+ var hdfs = ['1', '2', '3', '4', '5', '15', '17'];
+ hdfs.forEach ( function (item) {
toDelete = self.removeWidget(toDelete, item);
}, this);
}
- else {
- var graphs = ['11', '12', '13', '14'];
- graphs.forEach ( function (item) {
- toDelete = self.removeWidget(toDelete, item);
- }, this);
+
+ // Display widgets for host metrics if the stack definition has a host metrics service to display it.
+ if (this.get('host_metrics_model') != null) {
+ var hostMetrics = ['11', '12', '13', '14'];
+ var flag = self.containsWidget(toDelete, hostMetrics[0]);
+ if (flag) {
+ hostMetrics.forEach ( function (item) {
+ toDelete = self.removeWidget(toDelete, item);
+ }, this);
+ } else {
+ toAdd = toAdd.concat(hostMetrics);
+ }
}
+
if (this.get('mapreduce_model') != null) {
var map = ['6', '7', '8', '9', '10', '16', '18'];
var flag = self.containsWidget(toDelete, map[0]);
diff --git a/ambari-web/app/views/main/host/summary.js b/ambari-web/app/views/main/host/summary.js
index 1e92fe7c95..c8317a9c84 100644
--- a/ambari-web/app/views/main/host/summary.js
+++ b/ambari-web/app/views/main/host/summary.js
@@ -60,6 +60,13 @@ App.MainHostSummaryView = Em.View.extend({
},
/**
+ * Host metrics panel not displayed when Metrics service (ex:Ganglia) is not in stack definition.
+ */
+ isNoHostMetricsService: function() {
+ return !App.get('services.hostMetrics').length;
+ }.property('App.services.hostMetrics'),
+
+ /**
* Message for "restart" block
* @type {String}
*/
diff --git a/ambari-web/app/views/main/menu.js b/ambari-web/app/views/main/menu.js
index 966f5ae02f..851ff999a1 100644
--- a/ambari-web/app/views/main/menu.js
+++ b/ambari-web/app/views/main/menu.js
@@ -141,11 +141,13 @@ App.MainMenuView = Em.CollectionView.extend({
url: 'serviceAccounts',
label: Em.I18n.t('common.serviceAccounts')
});
- categories.push({
- name: 'security',
- url: 'security/',
- label: Em.I18n.t('common.security')
- });
+ if (!App.get('isHadoopWindowsStack')) {
+ categories.push({
+ name: 'security',
+ url: 'security/',
+ label: Em.I18n.t('common.security')
+ });
+ }
}
return categories;
}.property('')
diff --git a/ambari-web/app/views/main/service/info/summary.js b/ambari-web/app/views/main/service/info/summary.js
index 08ef48c2c3..99c049b525 100644
--- a/ambari-web/app/views/main/service/info/summary.js
+++ b/ambari-web/app/views/main/service/info/summary.js
@@ -443,12 +443,21 @@ App.MainServiceInfoSummaryView = Em.View.extend({
this.set('oldServiceName', serviceName);
}.observes('serviceName'),
- /*
- * Alerts panel not display for PIG, SQOOP and TEZ Service
+ /**
+ * Alerts panel not displayed when alerting service (ex:Nagios)is not in stack definition.
+ * Alerts panel never displayed for PIG, SQOOP and TEZ Services
*/
isNoAlertsService: function () {
- return !!this.get('service.serviceName') && App.get('services.clientOnly').contains(this.get('service.serviceName'));
- }.property(''),
+ return !App.get('services.alerting').length ||
+ (!!this.get('service.serviceName') && App.get('services.clientOnly').contains(this.get('service.serviceName')));
+ }.property('service.serviceName', 'App.services.alerting'),
+
+ /**
+ * Service metrics panel not displayed when metrics service (ex:Ganglia) is not in stack definition.
+ */
+ isNoServiceMetricsService: function() {
+ return !App.get('services.serviceMetrics').length;
+ }.property('App.services.serviceMetrics'),
gangliaUrl:function () {
var gangliaUrl = App.router.get('clusterController.gangliaUrl');
diff --git a/ambari-web/app/views/wizard/controls_view.js b/ambari-web/app/views/wizard/controls_view.js
index b4e70cb6d3..a4cbf3fdfd 100644
--- a/ambari-web/app/views/wizard/controls_view.js
+++ b/ambari-web/app/views/wizard/controls_view.js
@@ -255,7 +255,24 @@ App.ServiceConfigRadioButtons = Ember.View.extend({
connectionUrl.set('value', "jdbc:oracle:thin:@//" + this.get('hostName') + ":1521/" + this.get('databaseName'));
dbClass.set('value', "oracle.jdbc.driver.OracleDriver");
break;
+ case 'Existing MSSQL Server database with integrated authentication':
+ connectionUrl.set('value', "jdbc:sqlserver://" + this.get('hostName') + ";databaseName=" + this.get('databaseName') + ";integratedSecurity=true");
+ dbClass.set('value', "com.microsoft.sqlserver.jdbc.SQLServerDriver");
+ break;
+ case 'Existing MSSQL Server database with sql auth':
+ connectionUrl.set('value', "jdbc:sqlserver://" + this.get('hostName') + ";databaseName=" + this.get('databaseName'));
+ dbClass.set('value', "com.microsoft.sqlserver.jdbc.SQLServerDriver");
+ break;
}
+ var isNotExistingMySQLServer = this.get('serviceConfig.value') !== 'Existing MSSQL Server database with integrated authentication';
+ this.get('categoryConfigsAll').findProperty('name', 'javax.jdo.option.ConnectionUserName').setProperties({
+ isVisible: isNotExistingMySQLServer,
+ isRequired: isNotExistingMySQLServer
+ });
+ this.get('categoryConfigsAll').findProperty('name', 'javax.jdo.option.ConnectionPassword').setProperties({
+ isVisible: isNotExistingMySQLServer,
+ isRequired: isNotExistingMySQLServer
+ });
} else if (this.get('serviceConfig.serviceName') === 'OOZIE') {
switch (this.get('serviceConfig.value')) {
case 'New Derby Database':
@@ -274,7 +291,44 @@ App.ServiceConfigRadioButtons = Ember.View.extend({
connectionUrl.set('value', "jdbc:oracle:thin:@//" + this.get('hostName') + ":1521/" + this.get('databaseName'));
dbClass.set('value', "oracle.jdbc.driver.OracleDriver");
break;
+ case 'Existing MSSQL Server database with integrated authentication':
+ connectionUrl.set('value', "jdbc:sqlserver://" + this.get('hostName') + ";databaseName=" + this.get('databaseName') + ";integratedSecurity=true");
+ dbClass.set('value', "com.microsoft.sqlserver.jdbc.SQLServerDriver");
+ break;
+ case 'Existing MSSQL Server database with sql auth':
+ connectionUrl.set('value', "jdbc:sqlserver://" + this.get('hostName') + ";databaseName=" + this.get('databaseName'));
+ dbClass.set('value', "com.microsoft.sqlserver.jdbc.SQLServerDriver");
+ break;
+ }
+ isNotExistingMySQLServer = this.get('serviceConfig.value') !== 'Existing MSSQL Server database with integrated authentication';
+ this.get('categoryConfigsAll').findProperty('name', 'oozie.service.JPAService.jdbc.username').setProperties({
+ isVisible: isNotExistingMySQLServer,
+ isRequired: isNotExistingMySQLServer
+ });
+ this.get('categoryConfigsAll').findProperty('name', 'oozie.service.JPAService.jdbc.password').setProperties({
+ isVisible: isNotExistingMySQLServer,
+ isRequired: isNotExistingMySQLServer
+ });
+ } else if (this.get('serviceConfig.serviceName') === 'HDFS') {
+ switch (this.get('serviceConfig.value')) {
+ case 'Existing MSSQL Server database with integrated authentication':
+ connectionUrl.set('value', "jdbc:sqlserver://" + this.get('hostName') + ";databaseName=" + this.get('databaseName') + ";integratedSecurity=true");
+ dbClass.set('value', "com.microsoft.sqlserver.jdbc.SQLServerDriver");
+ break;
+ case 'Existing MSSQL Server database with sql auth':
+ connectionUrl.set('value', "jdbc:sqlserver://" + this.get('hostName') + ";databaseName=" + this.get('databaseName'));
+ dbClass.set('value', "com.microsoft.sqlserver.jdbc.SQLServerDriver");
+ break;
}
+ var isNotExistingMySQLServer = this.get('serviceConfig.value') !== 'Existing MSSQL Server database with integrated authentication';
+ this.get('categoryConfigsAll').findProperty('name', 'sink.dblogin').setProperties({
+ isVisible: isNotExistingMySQLServer,
+ isRequired: isNotExistingMySQLServer
+ });
+ this.get('categoryConfigsAll').findProperty('name', 'sink.dbpassword').setProperties({
+ isVisible: isNotExistingMySQLServer,
+ isRequired: isNotExistingMySQLServer
+ });
}
connectionUrl.set('defaultValue', connectionUrl.get('value'));
}
@@ -287,6 +341,8 @@ App.ServiceConfigRadioButtons = Ember.View.extend({
switch (this.get('serviceConfig.serviceName')) {
case 'HIVE':
return this.get('categoryConfigsAll').findProperty('name', 'ambari.hive.db.schema.name').get('value');
+ case 'HDFS':
+ return this.get('categoryConfigsAll').findProperty('name', 'sink.db.schema.name').get('value');
case 'OOZIE':
return this.get('categoryConfigsAll').findProperty('name', 'oozie.db.schema.name').get('value');
default:
@@ -314,13 +370,32 @@ App.ServiceConfigRadioButtons = Ember.View.extend({
case 'Existing Oracle Database':
hostname = this.get('categoryConfigsAll').findProperty('name', 'hive_existing_oracle_host');
break;
+ case 'Existing MSSQL Server database with integrated authentication':
+ hostname = this.get('categoryConfigsAll').findProperty('name', 'hive_existing_mssql_server_host');
+ break;
+ case 'Existing MSSQL Server database with sql auth':
+ hostname = this.get('categoryConfigsAll').findProperty('name', 'hive_existing_mssql_server_2_host');
+ break;
}
if (hostname) {
returnValue = hostname.get('value');
} else {
returnValue = this.get('categoryConfigsAll').findProperty('name', 'hive_hostname').get('value');
}
-
+ } else if (this.get('serviceConfig.serviceName') === 'HDFS') {
+ switch (value) {
+ case 'Existing MSSQL Server database with integrated authentication':
+ hostname = this.get('categoryConfigsAll').findProperty('name', 'sink_existing_mssql_server_host');
+ break;
+ case 'Existing MSSQL Server database with sql auth':
+ hostname = this.get('categoryConfigsAll').findProperty('name', 'sink_existing_mssql_server_2_host');
+ break;
+ }
+ if (hostname) {
+ returnValue = hostname.get('value');
+ } else {
+ returnValue = this.get('categoryConfigsAll').findProperty('name', 'sink.dbservername').get('value');
+ }
} else if (this.get('serviceConfig.serviceName') === 'OOZIE') {
switch (value) {
case 'New Derby Database':
@@ -335,6 +410,12 @@ App.ServiceConfigRadioButtons = Ember.View.extend({
case 'Existing Oracle Database':
hostname = this.get('categoryConfigsAll').findProperty('name', 'oozie_existing_oracle_host');
break;
+ case 'Existing MSSQL Server database with integrated authentication':
+ hostname = this.get('categoryConfigsAll').findProperty('name', 'oozie_existing_mssql_server_host');
+ break;
+ case 'Existing MSSQL Server database with sql auth':
+ hostname = this.get('categoryConfigsAll').findProperty('name', 'oozie_existing_mssql_server_2_host');
+ break;
}
if (hostname) {
returnValue = hostname.get('value');
@@ -348,6 +429,8 @@ App.ServiceConfigRadioButtons = Ember.View.extend({
connectionUrl: function () {
if (this.get('serviceConfig.serviceName') === 'HIVE') {
return this.get('categoryConfigsAll').findProperty('name', 'javax.jdo.option.ConnectionURL');
+ } else if (this.get('serviceConfig.serviceName') === 'HDFS') {
+ return this.get('categoryConfigsAll').findProperty('name', 'sink.jdbc.url');
} else {
return this.get('categoryConfigsAll').findProperty('name', 'oozie.service.JPAService.jdbc.url');
}
@@ -356,6 +439,8 @@ App.ServiceConfigRadioButtons = Ember.View.extend({
dbClass: function () {
if (this.get('serviceConfig.serviceName') === 'HIVE') {
return this.get('categoryConfigsAll').findProperty('name', 'javax.jdo.option.ConnectionDriverName');
+ } else if (this.get('serviceConfig.serviceName') === 'HDFS') {
+ return this.get('categoryConfigsAll').findProperty('name', 'sink.jdbc.driver');
} else {
return this.get('categoryConfigsAll').findProperty('name', 'oozie.service.JPAService.jdbc.driver');
}
@@ -371,11 +456,11 @@ App.ServiceConfigRadioButtons = Ember.View.extend({
**/
handleDBConnectionProperty: function () {
if (!['addServiceController', 'installerController'].contains(App.clusterStatus.wizardControllerName)) return;
- var handledProperties = ['oozie_database', 'hive_database'];
+ var handledProperties = ['oozie_database', 'hive_database', 'sink_database'];
var currentValue = this.get('serviceConfig.value');
- var databases = /MySQL|PostgreSQL|Oracle|Derby/gi;
+ var databases = /MySQL|PostgreSQL|Oracle|Derby|MSSQL/gi;
var currentDB = currentValue.match(databases)[0];
- var databasesTypes = /MySQL|PostgreS|Oracle|Derby/gi;
+ var databasesTypes = /MySQL|PostgreS|Oracle|Derby|MSSQL/gi;
var currentDBType = currentValue.match(databasesTypes)[0];
var existingDatabase = /existing/gi.test(currentValue);
// db connection check button show up if existed db selected
@@ -392,6 +477,7 @@ App.ServiceConfigRadioButtons = Ember.View.extend({
// warning msg under database type radio buttons, to warn the user to setup jdbc driver if existed db selected
var propertyHive = this.get('categoryConfigsAll').findProperty('displayName', 'Hive Database');
var propertyOozie = this.get('categoryConfigsAll').findProperty('displayName', 'Oozie Database');
+ var propertyMetricsSink = this.get('categoryConfigsAll').findProperty('displayName', 'Metrics Sink Database');
var propertyAppendTo2 = propertyHive ? propertyHive : propertyOozie;
if (currentDB && existingDatabase) {
if (handledProperties.contains(this.get('serviceConfig.name'))) {
@@ -775,6 +861,8 @@ App.CheckDBConnectionView = Ember.View.extend({
hostNameProperty: function() {
if (!/wizard/i.test(this.get('controller.name')) && this.get('parentView.service.serviceName') === 'HIVE') {
return this.get('parentView.service.serviceName').toLowerCase() + '_hostname';
+ } else if (!/wizard/i.test(this.get('controller.name')) && this.get('parentView.service.serviceName') === 'HDFS') {
+ return 'sink.dbservername';
}
return '{0}_existing_{1}_host'.format(this.get('parentView.service.serviceName').toLowerCase(), this.get('databaseName').toLowerCase());
}.property('databaseName'),
@@ -785,6 +873,7 @@ App.CheckDBConnectionView = Ember.View.extend({
/** @property {object} requiredProperties - properties that necessary for database connection **/
requiredProperties: function() {
var propertiesMap = {
+ HDFS: ['sink.db.schema.name','sink.dblogin','sink.dbpassword','sink.jdbc.driver','sink.jdbc.url'],
OOZIE: ['oozie.db.schema.name','oozie.service.JPAService.jdbc.username','oozie.service.JPAService.jdbc.password','oozie.service.JPAService.jdbc.driver','oozie.service.JPAService.jdbc.url'],
HIVE: ['ambari.hive.db.schema.name','javax.jdo.option.ConnectionUserName','javax.jdo.option.ConnectionPassword','javax.jdo.option.ConnectionDriverName','javax.jdo.option.ConnectionURL']
};
@@ -793,8 +882,8 @@ App.CheckDBConnectionView = Ember.View.extend({
/** @property {Object} propertiesPattern - check pattern according to type of connection properties **/
propertiesPattern: function() {
return {
- user_name: /username$/ig,
- user_passwd: /password$/ig,
+ user_name: /(username|dblogin)$/ig,
+ user_passwd: /(dbpassword|password)$/ig,
db_connection_url: /jdbc\.url|connectionurl/ig
}
}.property(),
@@ -802,6 +891,7 @@ App.CheckDBConnectionView = Ember.View.extend({
masterHostName: function() {
var serviceMasterMap = {
'OOZIE': 'oozieserver_host',
+ 'HDFS': 'hadoop_host',
'HIVE': 'hivemetastore_host'
};
return this.get('parentView.categoryConfigsAll').findProperty('name', serviceMasterMap[this.get('parentView.service.serviceName')]).get('value');
diff --git a/ambari-web/app/views/wizard/step1_view.js b/ambari-web/app/views/wizard/step1_view.js
index 886543e647..d3d0c4b27d 100644
--- a/ambari-web/app/views/wizard/step1_view.js
+++ b/ambari-web/app/views/wizard/step1_view.js
@@ -21,6 +21,12 @@ App.WizardStep1View = Em.View.extend({
templateName: require('templates/wizard/step1'),
+ /**
+ * Is Repositories Accordion collapsed
+ * @type {bool}
+ */
+ isRLCollapsed: true,
+
didInsertElement: function () {
if (this.get('isRLCollapsed')) {
this.$('.accordion-body').hide();
@@ -114,12 +120,6 @@ App.WizardStep1View = Em.View.extend({
}.property('allRepositories.@each.emptyError', 'isNoOsChecked', 'allRepositories.@each.validation'),
/**
- * Is Repositories Accordion collapsed
- * @type {bool}
- */
- isRLCollapsed: true,
-
- /**
* Checkbox for each stack
* @type {Ember.Checkbox}
*/
diff --git a/ambari-web/pom.xml b/ambari-web/pom.xml
index 65e568d22f..294e60f18f 100644
--- a/ambari-web/pom.xml
+++ b/ambari-web/pom.xml
@@ -97,13 +97,13 @@
</goals>
<configuration>
<target name="ambari-web-clean">
- <exec dir="${basedir}" executable="rm">
- <arg value="-rf"/>
+ <exec dir="${basedir}" executable="${executable.rmdir}">
+ <arg value="${args.rm.clean}"/>
<arg value="public"/>
<arg value="node_modules"/>
</exec>
- <exec dir="${basedir}" executable="mkdir">
- <arg value="public"/>
+ <exec dir="${basedir}" executable="${executable.mkdir}">
+ <arg value="${args.mkdir.clean}"/>
</exec>
</target>
</configuration>
@@ -116,26 +116,25 @@
</goals>
<configuration>
<target name="ambari-web-compile">
- <exec dir="${basedir}" executable="npm">
- <env key="PYTHON" value="${project.basedir}/../ambari-common/src/main/unix/ambari-python-wrap" />
- <arg value="install"/>
+ <exec dir="${basedir}" executable="${executable.npm}" failonerror="true">
+ <arg value="${args.npm.install}"/>
</exec>
- <exec dir="${basedir}" executable="brunch" failonerror="true">
- <arg value="build"/>
+ <exec dir="${basedir}" executable="${executable.brunch}" failonerror="true">
+ <arg value="${args.brunch.build}"/>
</exec>
<!-- sets Ambari version to make it accessible from code -->
- <exec dir="${basedir}" executable="sh" failonerror="true">
- <arg value="${basedir}/set-ambari-version.sh"/>
+ <exec dir="${basedir}" executable="${executable.shell}" failonerror="true">
+ <arg value="${basedir}${dirsep}set-ambari-version.${fileextension.shell}"/>
<arg value="${ambariVersion}"/>
</exec>
<!-- sets default stack and version to use for install -->
- <exec dir="${basedir}" executable="sh" failonerror="true">
- <arg value="${basedir}/set-default-stack-version.sh"/>
+ <exec dir="${basedir}" executable="${executable.shell}" failonerror="true">
+ <arg value="${basedir}${dirsep}set-default-stack-version.${fileextension.shell}"/>
<arg value="${defaultStackVersion}"/>
</exec>
<!-- enables experimental features if enableExperimental is set to true -->
- <exec dir="${basedir}" executable="sh" failonerror="true">
- <arg value="${basedir}/toggle-experimental.sh"/>
+ <exec dir="${basedir}" executable="${executable.shell}" failonerror="true">
+ <arg value="${basedir}${dirsep}toggle-experimental.${fileextension.shell}"/>
<arg value="${enableExperimental}"/>
</exec>
</target>
@@ -149,22 +148,21 @@
</goals>
<configuration>
<target name="ambari-web unit tests" unless="${skipTests}">
- <exec dir="${basedir}" executable="npm" failonerror="true">
- <env key="PYTHON" value="${basedir}/../ambari-common/src/main/unix/ambari-python-wrap" />
- <arg value="test"/>
+ <exec dir="${basedir}" executable="${executable.npm}" failonerror="true">
+ <arg value="${args.npm.test}"/>
</exec>
</target>
</configuration>
</execution>
<execution>
- <id>gzip amabri-web content</id>
+ <id>gzip ambari-web content</id>
<phase>prepare-package</phase>
<goals>
<goal>run</goal>
</goals>
<configuration>
<target name="ambari-web-package">
- <exec dir="${basedir}" executable="gzip" failonerror="true">
+ <exec dir="${basedir}" executable="${executable.gzip}" failonerror="true">
<arg value="-f"/>
<arg value="public/javascripts/app.js"/>
<arg value="public/javascripts/vendor.js"/>
@@ -218,4 +216,54 @@
</plugin>
</plugins>
</build>
+ <profiles>
+ <profile>
+ <id>windows</id>
+ <activation>
+ <os>
+ <family>win</family>
+ </os>
+ </activation>
+ <properties>
+ <envClassifier>win</envClassifier>
+ <dirsep>\</dirsep>
+ <executable.brunch>cmd</executable.brunch>
+ <args.brunch.build>/C brunch build</args.brunch.build>
+ <executable.gzip>gzip</executable.gzip>
+ <executable.mkdir>cmd</executable.mkdir>
+ <args.mkdir.clean>/C mkdir public</args.mkdir.clean>
+ <executable.npm>cmd</executable.npm>
+ <args.npm.install>/C npm install</args.npm.install>
+ <args.npm.test>/C npm install</args.npm.test>
+ <executable.rmdir>cmd</executable.rmdir>
+ <args.rm.clean>/C rmdir /S /Q</args.rm.clean>
+ <executable.shell>cmd</executable.shell>
+ <fileextension.shell>cmd</fileextension.shell>
+ </properties>
+ </profile>
+ <profile>
+ <id>linux</id>
+ <activation>
+ <os>
+ <family>unix</family>
+ </os>
+ </activation>
+ <properties>
+ <envClassifier>linux</envClassifier>
+ <dirsep>/</dirsep>
+ <executable.brunch>brunch</executable.brunch>
+ <args.brunch.build>build</args.brunch.build>
+ <executable.gzip>gzip</executable.gzip>
+ <executable.mkdir>mkdir</executable.mkdir>
+ <args.mkdir.clean>public</args.mkdir.clean>
+ <executable.npm>npm</executable.npm>
+ <args.npm.install>install</args.npm.install>
+ <args.npm.test>test</args.npm.test>
+ <executable.rmdir>rm</executable.rmdir>
+ <args.rm.clean>-rf</args.rm.clean>
+ <executable.shell>sh</executable.shell>
+ <fileextension.shell>sh</fileextension.shell>
+ </properties>
+ </profile>
+ </profiles>
</project>
diff --git a/ambari-web/test/controllers/main/host/add_controller_test.js b/ambari-web/test/controllers/main/host/add_controller_test.js
index 6bd098f418..a6c94bcd51 100644
--- a/ambari-web/test/controllers/main/host/add_controller_test.js
+++ b/ambari-web/test/controllers/main/host/add_controller_test.js
@@ -415,7 +415,7 @@ describe('App.AddHostController', function () {
describe("#getInstallOptions()", function () {
it("", function () {
controller.set('installOptionsTemplate', {'prop': 'installOptionsTemplate'});
- expect(controller.getInstallOptions()).to.be.eql({
+ expect(controller.get('getInstallOptions')).to.be.eql({
prop: 'installOptionsTemplate'
});
});
diff --git a/ambari-web/test/views/main/dashboard/widgets_test.js b/ambari-web/test/views/main/dashboard/widgets_test.js
index fdd2b34dbd..4fdd6cd2f7 100644
--- a/ambari-web/test/views/main/dashboard/widgets_test.js
+++ b/ambari-web/test/views/main/dashboard/widgets_test.js
@@ -32,6 +32,7 @@ describe('App.MainDashboardWidgetsView', function() {
});
describe('#setInitPrefObject', function() {
+ var host_metrics_widgets_count = 4;
var hdfs_widgets_count = 7;
var mapreduce_widgets_count = 7;
var hbase_widgets_count = 4;
@@ -40,19 +41,35 @@ describe('App.MainDashboardWidgetsView', function() {
var tests = Em.A([
{
models: {
+ host_metrics_model: null,
hdfs_model: null,
mapreduce_model: null,
hbase_model: null,
yarn_model: null
},
e: {
- visibleL: total_widgets_count - hdfs_widgets_count - mapreduce_widgets_count - hbase_widgets_count - yarn_widgets_count - 1,
+ visibleL: total_widgets_count - host_metrics_widgets_count- hdfs_widgets_count - mapreduce_widgets_count - hbase_widgets_count - yarn_widgets_count - 1,
hiddenL: 0
},
m: 'All models are null'
},
{
models: {
+ host_metrics_model: {},
+ hdfs_model: null,
+ mapreduce_model: null,
+ hbase_model: null,
+ yarn_model: null
+ },
+ e: {
+ visibleL: total_widgets_count - hdfs_widgets_count - mapreduce_widgets_count - hbase_widgets_count - yarn_widgets_count - 1,
+ hiddenL: 0
+ },
+ m: 'hdfs_model, mapreduce_model, hbase_model, yarn_model are null'
+ },
+ {
+ models: {
+ host_metrics_model: {},
hdfs_model: {},
mapreduce_model: null,
hbase_model: null,
@@ -66,6 +83,7 @@ describe('App.MainDashboardWidgetsView', function() {
},
{
models: {
+ host_metrics_model: {},
hdfs_model: {},
mapreduce_model: {},
hbase_model: null,
@@ -79,6 +97,7 @@ describe('App.MainDashboardWidgetsView', function() {
},
{
models: {
+ host_metrics_model: {},
hdfs_model: {},
mapreduce_model: {},
hbase_model: {},
@@ -92,6 +111,7 @@ describe('App.MainDashboardWidgetsView', function() {
},
{
models: {
+ host_metrics_model: {},
hdfs_model: {},
mapreduce_model: {},
hbase_model: {},
@@ -106,6 +126,7 @@ describe('App.MainDashboardWidgetsView', function() {
]);
tests.forEach(function(test) {
it(test.m, function() {
+ mainDashboardWidgetsView.set('host_metrics_model', test.models.host_metrics_model);
mainDashboardWidgetsView.set('hdfs_model', test.models.hdfs_model);
mainDashboardWidgetsView.set('mapreduce_model', test.models.mapreduce_model);
mainDashboardWidgetsView.set('hbase_model', test.models.hbase_model);
diff --git a/contrib/ambari-scom/metrics-sink/db/Hadoop-Metrics-SQLServer-CREATE.ddl b/contrib/ambari-scom/metrics-sink/db/Hadoop-Metrics-SQLServer-CREATE.sql
index 1abf3fa152..6a88310da3 100644
--- a/contrib/ambari-scom/metrics-sink/db/Hadoop-Metrics-SQLServer-CREATE.ddl
+++ b/contrib/ambari-scom/metrics-sink/db/Hadoop-Metrics-SQLServer-CREATE.sql
@@ -17,24 +17,15 @@ limitations under the License.
*/
/*
-Deployment script for HadoopMetrics
-*/
+Schema population script for $(METRICSDBNAME)
-USE [master]
-GO
+Use this script in sqlcmd mode, setting the environment variables like this:
+set METRICSDBNAME=HadoopMetrics
-IF db_id('HadoopMetrics') IS NOT NULL
-BEGIN
- Print N'Dropping [dbo].[HadoopMetrics] database...'
- DROP DATABASE HadoopMetrics
-END
-GO
-
-Print N'Creating [dbo].[HadoopMetrics] database...'
-CREATE DATABASE [HadoopMetrics]
-GO
+sqlcmd -S localhost\SQLEXPRESS -i C:\app\ambari-server-1.3.0-SNAPSHOT\resources\Hadoop-Metrics-SQLServer-CREATE.sql
+*/
-USE [HadoopMetrics]
+USE [$(METRICSDBNAME)]
GO
SET QUOTED_IDENTIFIER ON;
@@ -281,8 +272,8 @@ BEGIN
PRINT N'Creating [dbo].[RecordType]...';
CREATE TABLE [dbo].[RecordType] (
[RecordTypeID] INT IDENTITY (1, 1) NOT NULL,
- [Name] NVARCHAR (256) NOT NULL,
- [Context] NVARCHAR (256) NOT NULL,
+ [Name] NVARCHAR (225) NOT NULL,
+ [Context] NVARCHAR (225) NOT NULL,
PRIMARY KEY CLUSTERED ([RecordTypeID] ASC)
);
END
@@ -301,7 +292,7 @@ BEGIN
PRINT N'Creating [dbo].[TagSet]...';
CREATE TABLE [dbo].[TagSet] (
[TagSetID] INT IDENTITY (1, 1) NOT NULL,
- [TagPairs] NVARCHAR (512) NOT NULL,
+ [TagPairs] NVARCHAR (450) NOT NULL,
PRIMARY KEY CLUSTERED ([TagSetID] ASC)
);
END
@@ -315,7 +306,7 @@ BEGIN
END
GO
-IF NOT EXISTS (SELECT name FROM sys.foreign_keys WHERE name = N'FK_CompletedJob_TagSet_TagSetID')
+IF NOT EXISTS (SELECT name FROM sys.foreign_keys WHERE name = N'FK_CompletedJob_TagSet_TagSetID')
BEGIN
PRINT N'Creating FK_CompletedJob_TagSet_TagSetID...';
ALTER TABLE [dbo].[CompletedJob] WITH NOCHECK
@@ -367,7 +358,7 @@ IF NOT EXISTS(SELECT name FROM sys.objects WHERE name = N'uspInsertMetricValue'
BEGIN
PRINT N'Creating [dbo].[uspInsertMetricValue]...';
exec('CREATE PROCEDURE [dbo].[uspInsertMetricValue]
- @recordID bigint,
+ @recordID bigint,
@metricName nvarchar(256),
@metricValue nvarchar(512)
AS
@@ -404,7 +395,7 @@ BEGIN
PRINT N'Creating [dbo].[uspUpdateHeartBeats]...';
exec('CREATE PROCEDURE [dbo].[uspUpdateHeartBeats]
@NodeID int,
- @SourceIP nvarchar(256),
+ @SourceIP nvarchar(256),
@NameNodeLast datetime,
@JobTrackerLast datetime,
@DataNodeLast datetime,
@@ -444,7 +435,7 @@ BEGIN
PRINT N'Creating [dbo].[uspGetMetricRecord]...';
exec('CREATE PROCEDURE [dbo].[uspGetMetricRecord]
@recordTypeContext nvarchar(256),
- @recordTypeName nvarchar(256),
+ @recordTypeName nvarchar(256),
@nodeName nvarchar(256),
@sourceIP nvarchar(256),
@clusterNodeName nvarchar(256),
@@ -463,7 +454,7 @@ BEGIN
DECLARE @serviceID int
DECLARE @err int
DECLARE @recordIDCutoff bigint
-
+
BEGIN TRANSACTION;
SELECT @recordTypeID = RecordTypeID FROM RecordType WHERE Context = @recordTypeContext AND Name = @recordTypeName;
IF @recordTypeID IS NULL
@@ -473,12 +464,12 @@ BEGIN
IF @err <> 0 GOTO Abort;
END
COMMIT TRANSACTION;
-
+
BEGIN TRANSACTION;
SELECT @serviceID = serviceID FROM Service WHERE Name = @serviceName;
IF @serviceID IS NULL
BEGIN
- INSERT INTO Service (Name) VALUES (@serviceName);
+ INSERT INTO Service (Name) VALUES (@serviceName);
SELECT @err = @@ERROR, @serviceID = SCOPE_IDENTITY();
IF @err <> 0 GOTO Abort;
END
@@ -486,16 +477,16 @@ BEGIN
BEGIN TRANSACTION;
SELECT @nodeID = NodeID FROM Node WHERE Name = @nodeName;
-
+
IF @nodeID IS NULL
BEGIN
-
+
/* Start with a node type of uninitialized. HealthNode will determine node type based on metrics delivered over time. */
- INSERT INTO Node (Name, LastKnownIP) VALUES (@nodeName, @sourceIP);
+ INSERT INTO Node (Name, LastKnownIP) VALUES (@nodeName, @sourceIP);
SELECT @err = @@ERROR, @nodeID = SCOPE_IDENTITY();
IF @err <> 0 GOTO Abort;
END
-
+
COMMIT TRANSACTION;
-- Do our best to determine the cluster node ID based on completely flakey input from user which might be an IP address, a non-FQDN,
@@ -508,7 +499,7 @@ BEGIN
SELECT TOP 1 @clusterNodeID = NodeID from Node WHERE LastKnownIP = @clusterNodeName ORDER BY LastNameNodeHeartBeat DESC;
IF @clusterNodeID IS NULL
BEGIN
- INSERT INTO Node (Name, LastKnownIP) VALUES (@clusterNodeName, @sourceIP);
+ INSERT INTO Node (Name, LastKnownIP) VALUES (@clusterNodeName, @sourceIP);
SELECT @err = @@ERROR, @clusterNodeID = SCOPE_IDENTITY();
IF @err <> 0 GOTO Abort;
END
@@ -520,14 +511,14 @@ BEGIN
SELECT @clusterNodeID = NodeID FROM Node WHERE Name = @clusterNodeName;
IF @clusterNodeID IS NULL
BEGIN
- INSERT INTO Node (Name, LastKnownIP) VALUES (@clusterNodeName, @sourceIP);
+ INSERT INTO Node (Name, LastKnownIP) VALUES (@clusterNodeName, @sourceIP);
SELECT @err = @@ERROR, @clusterNodeID = SCOPE_IDENTITY();
IF @err <> 0 GOTO Abort;
END
END
ELSE
BEGIN
- -- We have got a non-FQDN, but the NameNode might know its FQDN, so be careful! We must prefer the FQDN if we can find one.
+ -- We have got a non-FQDN, but the NameNode might know its FQDN, so be careful! We must prefer the FQDN if we can find one.
-- Sadly, yes, this could break things if we are monitoring clusters from different domains. This is now by design!
SELECT TOP 1 @clusterNodeID = NodeID FROM Node WHERE Name LIKE @clusterNodeName + ''.%'' ORDER BY LastNameNodeHeartBeat DESC;
IF @clusterNodeID IS NULL
@@ -535,7 +526,7 @@ BEGIN
SELECT @clusterNodeID = NodeID FROM Node WHERE Name = @clusterNodeName;
if @clusterNodeID IS NULL
BEGIN
- INSERT INTO Node (Name, LastKnownIP) VALUES (@clusterNodeName, @sourceIP);
+ INSERT INTO Node (Name, LastKnownIP) VALUES (@clusterNodeName, @sourceIP);
SELECT @err = @@ERROR, @clusterNodeID = SCOPE_IDENTITY();
IF @err <> 0 GOTO Abort;
END
@@ -600,7 +591,7 @@ BEGIN
PRINT N'Creating [dbo].[ufnIsIPAddress]...';
exec('CREATE FUNCTION [dbo].[ufnIsIPAddress]
(
- @inputString nvarchar(max)
+ @inputString nvarchar(max)
)
RETURNS BIT
AS
@@ -610,7 +601,7 @@ BEGIN
DECLARE @count int = 0;
if (LEN(@inputString) = 0) RETURN 0;
-
+
SELECT @nextPos = CHARINDEX(''.'', @inputString, @currentPos);
WHILE (@nextPos < LEN(@inputString) AND @count < 4)
@@ -657,7 +648,7 @@ BEGIN
DECLARE @ErrorSeverity INT;
DECLARE @ErrorState INT;
- SELECT
+ SELECT
@ErrorMessage = ERROR_MESSAGE(),
@ErrorSeverity = ERROR_SEVERITY(),
@ErrorState = ERROR_STATE();
@@ -678,30 +669,30 @@ BEGIN
@noOfDays bigint
AS
BEGIN
-
- IF @noOfDays IS NULL OR @noOfDays < 1
+
+ IF @noOfDays IS NULL OR @noOfDays < 1
BEGIN
RAISERROR(''INVALID_ARGUMENT'', 15, 1)
RETURN
END;
-
+
DECLARE @recordIDCutOff BIGINT
SELECT @recordIDCutoff = MAX(RecordID) FROM MetricRecord WHERE DateDiff(day, RecordDate, CURRENT_TIMESTAMP) >= @noOfDays
-
+
IF @recordIDCutoff IS NOT NULL
BEGIN
BEGIN TRY
BEGIN TRANSACTION
-
+
DELETE FROM MetricPair WHERE RecordID <= @recordIDCutoff
- DELETE FROM MetricRecord WHERE RecordID <= @recordIDCutoff
-
+ DELETE FROM MetricRecord WHERE RecordID <= @recordIDCutoff
+
IF @@TRANCOUNT > 0
BEGIN
COMMIT TRANSACTION
END
-
+
END TRY
BEGIN CATCH
IF @@TRANCOUNT > 0
@@ -718,39 +709,39 @@ BEGIN
END');
END
-IF NOT EXISTS(SELECT name FROM sys.objects WHERE name = N'ufGetMetrics' and type_desc = N'SQL_STORED_PROCEDURE')
+IF NOT EXISTS(SELECT name FROM sys.objects WHERE name = N'ufGetMetrics' and type_desc = N'SQL_TABLE_VALUED_FUNCTION')
BEGIN
PRINT N'Creating [dbo].[ufGetMetrics]...';
-exec( 'CREATE FUNCTION dbo.ufGetMetrics
- (@startTimeStamp bigint,
- @endTimeStamp bigint,
+ exec('CREATE FUNCTION dbo.ufGetMetrics
+ (@startTimeStamp bigint,
+ @endTimeStamp bigint,
@recordTypeContext NVARCHAR(256),
- @recordTypeName NVARCHAR(256),
- @metricName NVARCHAR(256),
+ @recordTypeName NVARCHAR(256),
+ @metricName NVARCHAR(256),
@serviceComponentName NVARCHAR(256),
@nodeName NVARCHAR(256)
- )
+ )
RETURNS TABLE --(MetricTimeStamp bigint, MetricValue NVARCHAR(512))
AS
RETURN
(
SELECT s.RecordTimeStamp AS RecordTimeStamp,
- mp.MetricValue AS MetricValue
+ mp.MetricValue AS MetricValue
FROM MetricPair mp
- INNER JOIN (SELECT mr.RecordID AS RecordID,
+ INNER JOIN (SELECT mr.RecordID AS RecordID,
mr.RecordTimeStamp AS RecordTimeStamp
- FROM MetricRecord mr
- INNER JOIN RecordType rt ON (mr.RecordTypeId = rt.RecordTypeId)
+ FROM MetricRecord mr
+ INNER JOIN RecordType rt ON (mr.RecordTypeId = rt.RecordTypeId)
INNER JOIN Node nd ON (mr.NodeID = nd.NodeID)
INNER JOIN Service sr ON (mr.ServiceID = sr.ServiceID)
- WHERE rt.Context = @recordTypeContext
+ WHERE rt.Context = @recordTypeContext
AND rt.Name = @recordTypeName
AND (nd.Name = @nodeName)
- AND (sr.Name = @serviceComponentName)
- AND mr.RecordTimestamp >= @startTimeStamp
+ AND (sr.Name = @serviceComponentName)
+ AND mr.RecordTimestamp >= @startTimeStamp
AND mr.RecordTimestamp <= @endTimeStamp
) s ON (mp.RecordID = s.RecordID)
- INNER JOIN MetricName mn ON (mp.MetricID = mn.MetricID)
+ INNER JOIN MetricName mn ON (mp.MetricID = mn.MetricID)
WHERE (mn.Name = @metricName)
)'
)
@@ -758,15 +749,15 @@ END
GO
-IF NOT EXISTS(SELECT name FROM sys.objects WHERE name = N'ufGetAggregatedServiceMetrics' and type_desc = N'SQL_STORED_PROCEDURE')
+IF NOT EXISTS(SELECT name FROM sys.objects WHERE name = N'ufGetAggregatedServiceMetrics' and type_desc = N'SQL_TABLE_VALUED_FUNCTION')
BEGIN
PRINT N'Creating [dbo].[ufGetAggregatedServiceMetrics]...';
exec( 'CREATE FUNCTION [dbo].[ufGetAggregatedServiceMetrics]
- (@startTimeStamp bigint,
- @endTimeStamp bigint,
+ (@startTimeStamp bigint,
+ @endTimeStamp bigint,
@recordTypeContext NVARCHAR(256),
- @recordTypeName NVARCHAR(256),
- @metricName NVARCHAR(256),
+ @recordTypeName NVARCHAR(256),
+ @metricName NVARCHAR(256),
@serviceComponentName NVARCHAR(256),
@period integer
)
@@ -780,8 +771,8 @@ BEGIN
INNER JOIN RecordType rt ON (rt.RecordTypeID = mr.RecordTypeID)
INNER JOIN MetricName mn ON (mn.MetricID = mp.MetricID)
INNER JOIN Service sr ON (sr.ServiceID = mr.ServiceID)
- WHERE mr.RecordTimestamp >= @startTimeStamp
- AND mr.RecordTimestamp <= @endTimeStamp
+ WHERE mr.RecordTimestamp >= @startTimeStamp
+ AND mr.RecordTimestamp <= @endTimeStamp
AND mn.Name = @metricName
AND rt.Context = @recordTypeContext
AND rt.Name = @recordTypeName
diff --git a/contrib/ambari-scom/metrics-sink/db/Hadoop-Metrics-SQLServer-CREATELOCAL.sql b/contrib/ambari-scom/metrics-sink/db/Hadoop-Metrics-SQLServer-CREATELOCAL.sql
new file mode 100644
index 0000000000..65218bb5fb
--- /dev/null
+++ b/contrib/ambari-scom/metrics-sink/db/Hadoop-Metrics-SQLServer-CREATELOCAL.sql
@@ -0,0 +1,140 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+Deployment script for $(METRICSDBNAME)
+
+Use this script in sqlcmd mode, with a series of environment variables like this:
+set METRICSDBNAME=HadoopMetrics
+set METRICSDBLOGNAME=HadoopMetrics_log
+
+set METRICSDBPATH=C:\Program Files\Microsoft SQL Server\MSSQL12.SQLEXPRESS\MSSQL\DATA\HadoopMetrics.mdf
+set METRICSDBLOGPATH=C:\Program Files\Microsoft SQL Server\MSSQL12.SQLEXPRESS\MSSQL\DATA\HadoopMetrics_log.ldf
+
+set METRICSDBOWNER=hadoop
+
+sqlcmd -S localhost\SQLEXPRESS -i C:\app\ambari-server-1.3.0-SNAPSHOT\resources\Hadoop-Metrics-SQLServer-CREATELOCAL.sql
+*/
+
+USE [master]
+GO
+
+IF db_id('$(METRICSDBNAME)') IS NOT NULL
+BEGIN
+ Print N'Dropping [dbo].[$(METRICSDBNAME)] database...'
+ ALTER DATABASE [$(METRICSDBNAME)] SET SINGLE_USER WITH ROLLBACK IMMEDIATE
+ DROP DATABASE [$(METRICSDBNAME)]
+END
+GO
+
+Print N'Creating [dbo].[$(METRICSDBNAME)] database at $(METRICSDBPATH)...'
+
+/****** Object: Database [$(METRICSDBNAME)] Script Date: 9/11/2014 3:58:17 PM ******/
+CREATE DATABASE [$(METRICSDBNAME)]
+ CONTAINMENT = NONE
+ ON PRIMARY
+ (NAME = N'$(METRICSDBNAME)', FILENAME = N'$(METRICSDBPATH)' , SIZE = 3264KB , MAXSIZE = UNLIMITED, FILEGROWTH = 1024KB)
+ LOG ON
+ (NAME = N'$(METRICSDBLOGNAME)', FILENAME = N'$(METRICSDBLOGPATH)' , SIZE = 832KB , MAXSIZE = 2048GB , FILEGROWTH = 10%)
+GO
+
+ALTER DATABASE [$(METRICSDBNAME)] SET COMPATIBILITY_LEVEL = 120
+GO
+IF (1 = FULLTEXTSERVICEPROPERTY('IsFullTextInstalled'))
+BEGIN
+ EXEC [$(METRICSDBNAME)].[dbo].[sp_fulltext_database] @action = 'enable'
+END
+GO
+ALTER DATABASE [$(METRICSDBNAME)] SET ANSI_NULL_DEFAULT OFF
+GO
+ALTER DATABASE [$(METRICSDBNAME)] SET ANSI_NULLS OFF
+GO
+ALTER DATABASE [$(METRICSDBNAME)] SET ANSI_PADDING OFF
+GO
+ALTER DATABASE [$(METRICSDBNAME)] SET ANSI_WARNINGS OFF
+GO
+ALTER DATABASE [$(METRICSDBNAME)] SET ARITHABORT OFF
+GO
+ALTER DATABASE [$(METRICSDBNAME)] SET AUTO_CLOSE ON
+GO
+ALTER DATABASE [$(METRICSDBNAME)] SET AUTO_SHRINK OFF
+GO
+ALTER DATABASE [$(METRICSDBNAME)] SET AUTO_UPDATE_STATISTICS ON
+GO
+ALTER DATABASE [$(METRICSDBNAME)] SET CURSOR_CLOSE_ON_COMMIT OFF
+GO
+ALTER DATABASE [$(METRICSDBNAME)] SET CURSOR_DEFAULT GLOBAL
+GO
+ALTER DATABASE [$(METRICSDBNAME)] SET CONCAT_NULL_YIELDS_NULL OFF
+GO
+ALTER DATABASE [$(METRICSDBNAME)] SET NUMERIC_ROUNDABORT OFF
+GO
+ALTER DATABASE [$(METRICSDBNAME)] SET QUOTED_IDENTIFIER OFF
+GO
+ALTER DATABASE [$(METRICSDBNAME)] SET RECURSIVE_TRIGGERS OFF
+GO
+ALTER DATABASE [$(METRICSDBNAME)] SET ENABLE_BROKER
+GO
+ALTER DATABASE [$(METRICSDBNAME)] SET AUTO_UPDATE_STATISTICS_ASYNC OFF
+GO
+ALTER DATABASE [$(METRICSDBNAME)] SET DATE_CORRELATION_OPTIMIZATION OFF
+GO
+ALTER DATABASE [$(METRICSDBNAME)] SET TRUSTWORTHY OFF
+GO
+ALTER DATABASE [$(METRICSDBNAME)] SET ALLOW_SNAPSHOT_ISOLATION OFF
+GO
+ALTER DATABASE [$(METRICSDBNAME)] SET PARAMETERIZATION SIMPLE
+GO
+ALTER DATABASE [$(METRICSDBNAME)] SET READ_COMMITTED_SNAPSHOT OFF
+GO
+ALTER DATABASE [$(METRICSDBNAME)] SET HONOR_BROKER_PRIORITY OFF
+GO
+ALTER DATABASE [$(METRICSDBNAME)] SET RECOVERY SIMPLE
+GO
+ALTER DATABASE [$(METRICSDBNAME)] SET MULTI_USER
+GO
+ALTER DATABASE [$(METRICSDBNAME)] SET PAGE_VERIFY CHECKSUM
+GO
+ALTER DATABASE [$(METRICSDBNAME)] SET DB_CHAINING OFF
+GO
+ALTER DATABASE [$(METRICSDBNAME)] SET FILESTREAM( NON_TRANSACTED_ACCESS = OFF )
+GO
+ALTER DATABASE [$(METRICSDBNAME)] SET TARGET_RECOVERY_TIME = 0 SECONDS
+GO
+ALTER DATABASE [$(METRICSDBNAME)] SET DELAYED_DURABILITY = DISABLED
+GO
+ALTER DATABASE [$(METRICSDBNAME)] SET READ_WRITE
+GO
+
+USE [$(METRICSDBNAME)]
+GO
+
+IF NOT EXISTS (SELECT name FROM sys.filegroups WHERE is_default=1 AND name = N'PRIMARY') ALTER DATABASE METRICSDBNAME MODIFY FILEGROUP [PRIMARY] DEFAULT
+GO
+
+ALTER authorization on DATABASE::$(METRICSDBNAME) to [NT AUTHORITY\SYSTEM]
+GO
+
+if exists (select 1 from master.sys.syslogins where name='$(METRICSDBOWNER)')
+BEGIN
+ CREATE USER [$(METRICSDBOWNER)] FOR LOGIN [$(METRICSDBOWNER)]
+ ALTER ROLE [db_owner] ADD MEMBER [$(METRICSDBOWNER)]
+END
+GO
+
+Print N'[dbo].[$(METRICSDBNAME)] database created.'
diff --git a/contrib/ambari-scom/metrics-sink/db/Hadoop-Metrics-SQLServer-DROP.sql b/contrib/ambari-scom/metrics-sink/db/Hadoop-Metrics-SQLServer-DROP.sql
new file mode 100644
index 0000000000..7266bc0083
--- /dev/null
+++ b/contrib/ambari-scom/metrics-sink/db/Hadoop-Metrics-SQLServer-DROP.sql
@@ -0,0 +1,161 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+Schema purge script for $(METRICSDBNAME)
+
+Use this script in sqlcmd mode, setting the environment variables like this:
+set METRICSDBNAME=HadoopMetrics
+
+sqlcmd -S localhost\SQLEXPRESS -i C:\app\ambari-server-1.3.0-SNAPSHOT\resources\Hadoop-Metrics-SQLServer-DROP.sql
+*/
+
+USE [$(METRICSDBNAME)]
+GO
+
+SET QUOTED_IDENTIFIER ON;
+GO
+
+IF EXISTS(SELECT name FROM sys.objects WHERE name = N'ufGetAggregatedServiceMetrics')
+BEGIN
+ PRINT N'Dropping [dbo].[ufGetAggregatedServiceMetrics]...';
+ exec('DROP FUNCTION [dbo].[ufGetAggregatedServiceMetrics]')
+END
+GO
+
+IF EXISTS(SELECT name FROM sys.objects WHERE name = N'ufGetMetrics')
+BEGIN
+ PRINT N'Dropping [dbo].[ufGetMetrics]...';
+ exec('DROP FUNCTION dbo.ufGetMetrics')
+END
+GO
+
+IF EXISTS(SELECT name FROM sys.objects WHERE name = N'uspPurgeMetrics')
+BEGIN
+ -- purge metrics older than @noOfDays
+ PRINT N'Dropping [dbo].[uspPurgeMetrics]...';
+ exec('DROP PROCEDURE [dbo].[uspPurgeMetrics]')
+END
+GO
+
+IF EXISTS(SELECT name FROM sys.objects WHERE name = N'RethrowError')
+BEGIN
+ PRINT N'Dropping Stored Proc: [dbo].[RethrowError]...';
+ exec('DROP PROCEDURE [dbo].[RethrowError]')
+END
+GO
+
+IF EXISTS(SELECT name FROM sys.objects WHERE name = N'ufnIsIPAddress')
+BEGIN
+ PRINT N'Dropping [dbo].[ufnIsIPAddress]...';
+ exec('DROP FUNCTION [dbo].[ufnIsIPAddress]')
+END
+GO
+
+IF EXISTS(SELECT name FROM sys.objects WHERE name = N'uspGetMetricRecord')
+BEGIN
+ PRINT N'Dropping [dbo].[uspGetMetricRecord]...';
+ exec('DROP PROCEDURE [dbo].[uspGetMetricRecord]')
+END
+GO
+
+IF EXISTS(SELECT name FROM sys.objects WHERE name = N'uspUpdateHeartBeats')
+BEGIN
+ PRINT N'Dropping [dbo].[uspUpdateHeartBeats]...';
+ exec('DROP PROCEDURE [dbo].[uspUpdateHeartBeats]')
+END
+GO
+
+IF EXISTS(SELECT name FROM sys.objects WHERE name = N'uspInsertMetricValue')
+BEGIN
+ PRINT N'Dropping [dbo].[uspInsertMetricValue]...';
+ exec('DROP PROCEDURE [dbo].[uspInsertMetricValue]')
+END
+GO
+
+--Dropping the tables
+
+IF EXISTS(SELECT name FROM sys.objects WHERE name = N'CompletedJob')
+BEGIN
+ PRINT N'Dropping [dbo].[CompletedJob]...';
+ DROP TABLE [dbo].[CompletedJob]
+END
+GO
+
+IF EXISTS(SELECT name FROM sys.objects WHERE name = N'Service')
+BEGIN
+ PRINT N'Dropping [dbo].[Service]...';
+ DROP TABLE [dbo].[Service]
+END
+GO
+
+IF EXISTS(SELECT name FROM sys.objects WHERE name = N'MetricPair')
+BEGIN
+ PRINT N'Dropping [dbo].[MetricPair]...';
+ DROP TABLE [dbo].[MetricPair]
+END
+GO
+
+IF EXISTS(SELECT name FROM sys.objects WHERE name = N'MetricRecord')
+BEGIN
+ PRINT N'Dropping [dbo].[MetricRecord]...';
+ DROP TABLE [dbo].[MetricRecord]
+END
+GO
+
+IF EXISTS(SELECT name FROM sys.objects WHERE name = N'MetricName')
+BEGIN
+ PRINT N'Dropping [dbo].[MetricName]...';
+ DROP TABLE [dbo].[MetricName]
+END
+GO
+
+IF EXISTS(SELECT name FROM sys.objects WHERE name = N'RecordType')
+BEGIN
+ PRINT N'Dropping [dbo].[RecordType]...';
+ DROP TABLE [dbo].[RecordType]
+END
+GO
+
+IF EXISTS(SELECT name FROM sys.objects WHERE name = N'TagSet')
+BEGIN
+ PRINT N'Dropping [dbo].[TagSet]...';
+ DROP TABLE [dbo].[TagSet]
+END
+GO
+
+IF EXISTS(SELECT name FROM sys.objects WHERE name = N'Node')
+BEGIN
+ PRINT N'Dropping [dbo].[Node]...';
+ DROP TABLE [dbo].[Node]
+END
+GO
+
+IF EXISTS(SELECT name FROM sys.objects WHERE name = N'DatabaseVersion')
+BEGIN
+ PRINT N'Dropping [dbo].[DatabaseVersion]...';
+ DROP TABLE [dbo].[DatabaseVersion]
+END
+GO
+
+IF EXISTS(SELECT name FROM sys.objects WHERE name = N'Configuration')
+BEGIN
+ PRINT N'Dropping [dbo].[Configuration]...';
+ DROP TABLE [dbo].[Configuration]
+END
+GO
diff --git a/contrib/views/jobs/pom.xml b/contrib/views/jobs/pom.xml
index 5677c097b7..59ebf5298b 100644
--- a/contrib/views/jobs/pom.xml
+++ b/contrib/views/jobs/pom.xml
@@ -98,7 +98,7 @@
</goals>
<configuration>
<workingDirectory>${basedir}/src/main/resources/ui</workingDirectory>
- <executable>node/node</executable>
+ <executable>${basedir}/src/main/resources/ui/node/${executable.node}</executable>
<arguments>
<argument>node_modules/bower/bin/bower</argument>
<argument>install</argument>
@@ -114,9 +114,9 @@
</goals>
<configuration>
<workingDirectory>${basedir}/src/main/resources/ui</workingDirectory>
- <executable>node/node</executable>
+ <executable>${basedir}/src/main/resources/ui/node/${executable.node}</executable>
<arguments>
- <argument>node_modules/.bin/grunt</argument>
+ <argument>node_modules/grunt-cli/bin/grunt</argument>
<argument>build</argument>
</arguments>
</configuration>
@@ -175,7 +175,7 @@
<artifactId>ambari-views</artifactId>
<version>${ambari.version}</version>
</dependency>
-
+
<dependency>
<groupId>javax.servlet</groupId>
<artifactId>servlet-api</artifactId>
@@ -189,4 +189,30 @@
</dependency>
</dependencies>
+ <profiles>
+ <profile>
+ <id>windows</id>
+ <activation>
+ <os>
+ <family>win</family>
+ </os>
+ </activation>
+ <properties>
+ <envClassifier>win</envClassifier>
+ <executable.node>node.exe</executable.node>
+ </properties>
+ </profile>
+ <profile>
+ <id>linux</id>
+ <activation>
+ <os>
+ <family>unix</family>
+ </os>
+ </activation>
+ <properties>
+ <envClassifier>linux</envClassifier>
+ <executable.node>node</executable.node>
+ </properties>
+ </profile>
+ </profiles>
</project> \ No newline at end of file
diff --git a/contrib/views/slider/pom.xml b/contrib/views/slider/pom.xml
index 16e975045a..355e4b423e 100644
--- a/contrib/views/slider/pom.xml
+++ b/contrib/views/slider/pom.xml
@@ -109,6 +109,7 @@
<dependency>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-views</artifactId>
+ <version>${ambari.version}</version>
<scope>provided</scope>
</dependency>
<dependency>
@@ -448,13 +449,14 @@
</goals>
<configuration>
<target name="slider-ui-clean">
- <exec dir="${ui.directory}" executable="rm" failonerror="false">
- <arg value="-rf" />
- <arg value="public" />
- </exec>
- <exec dir="${ui.directory}" executable="mkdir" failonerror="false">
- <arg value="public" />
- </exec>
+ <exec dir="${ui.directory}" executable="${executable.rmdir}">
+ <arg value="${args.rm.clean}"/>
+ <arg value="public"/>
+ <arg value="node_modules"/>
+ </exec>
+ <exec dir="${ui.directory}" executable="${executable.mkdir}">
+ <arg value="${args.mkdir.clean}"/>
+ </exec>
</target>
</configuration>
</execution>
@@ -466,19 +468,18 @@
</goals>
<configuration>
<target name="slider-ui-compile">
- <exec dir="${ui.directory}" executable="npm" failonerror="false">
- <env key="PYTHON" value="${project.basedir}/../src/main/unix/ambari-python-wrap" />
- <arg value="install" />
- </exec>
- <exec dir="${ui.directory}" executable="brunch"
- failonerror="false">
- <arg value="build" />
- </exec>
- <exec dir="${ui.directory}" executable="gzip" failonerror="false">
- <arg value="public/javascripts/app.js" />
- <arg value="public/javascripts/vendor.js" />
- <arg value="public/stylesheets/app.css" />
- </exec>
+ <exec dir="${ui.directory}" executable="${executable.npm}" failonerror="true">
+ <arg value="${args.npm.install}"/>
+ </exec>
+ <exec dir="${ui.directory}" executable="${executable.brunch}" failonerror="true">
+ <arg value="${args.brunch.build}"/>
+ </exec>
+ <exec dir="${ui.directory}" executable="${executable.gzip}" failonerror="false">
+ <arg value="-f"/>
+ <arg value="public/javascripts/app.js"/>
+ <arg value="public/javascripts/vendor.js"/>
+ <arg value="public/stylesheets/app.css"/>
+ </exec>
</target>
</configuration>
</execution>
@@ -631,4 +632,54 @@
</plugins>
</pluginManagement>
</build>
+ <profiles>
+ <profile>
+ <id>windows</id>
+ <activation>
+ <os>
+ <family>win</family>
+ </os>
+ </activation>
+ <properties>
+ <envClassifier>win</envClassifier>
+ <dirsep>\</dirsep>
+ <executable.brunch>cmd</executable.brunch>
+ <args.brunch.build>/C brunch build</args.brunch.build>
+ <executable.gzip>gzip</executable.gzip>
+ <executable.mkdir>cmd</executable.mkdir>
+ <args.mkdir.clean>/C mkdir public</args.mkdir.clean>
+ <executable.npm>cmd</executable.npm>
+ <args.npm.install>/C npm install</args.npm.install>
+ <args.npm.test>/C npm install</args.npm.test>
+ <executable.rmdir>cmd</executable.rmdir>
+ <args.rm.clean>/C rmdir /S /Q</args.rm.clean>
+ <executable.shell>cmd</executable.shell>
+ <fileextension.shell>cmd</fileextension.shell>
+ </properties>
+ </profile>
+ <profile>
+ <id>linux</id>
+ <activation>
+ <os>
+ <family>unix</family>
+ </os>
+ </activation>
+ <properties>
+ <envClassifier>linux</envClassifier>
+ <dirsep>/</dirsep>
+ <executable.brunch>brunch</executable.brunch>
+ <args.brunch.build>build</args.brunch.build>
+ <executable.gzip>gzip</executable.gzip>
+ <executable.mkdir>mkdir</executable.mkdir>
+ <args.mkdir.clean>public</args.mkdir.clean>
+ <executable.npm>npm</executable.npm>
+ <args.npm.install>install</args.npm.install>
+ <args.npm.test>test</args.npm.test>
+ <executable.rmdir>rm</executable.rmdir>
+ <args.rm.clean>-rf</args.rm.clean>
+ <executable.shell>sh</executable.shell>
+ <fileextension.shell>sh</fileextension.shell>
+ </properties>
+ </profile>
+ </profiles>
</project>
diff --git a/pom.xml b/pom.xml
index d31876f401..9a41d03929 100644
--- a/pom.xml
+++ b/pom.xml
@@ -108,7 +108,7 @@
<configuration>
<name>package-release</name>
<value>${project.version}</value>
- <regex>^([0-9]+)\.([0-9]+)\.([0-9]+)(\.|-)(([0-9]+)|(SNAPSHOT)).*</regex>
+ <regex>^([0-9]+)\.([0-9]+)\.([0-9]+)(\.|-)(([0-9]+)|(SNAPSHOT)|(techwin)).*</regex>
<replacement>$5</replacement>
<failIfNoMatch>true</failIfNoMatch>
</configuration>
@@ -183,7 +183,7 @@
<exclude>contrib/ambari-scom/msi/src/GUI_Ambari.sln</exclude>
<exclude>version</exclude>
<!--IDE and GIT files-->
- <exclude>.idea/</exclude>
+ <exclude>**/.idea/</exclude>
<exclude>.git/</exclude>
<exclude>.pydevproject</exclude>
<exclude>**/.gitignore</exclude>
@@ -193,7 +193,7 @@
<exclude>**/.editorconfig</exclude>
<!--gitignore content-->
<exclude>.DS_Store</exclude>
- <exclude>.iml/</exclude>
+ <exclude>**/*.iml</exclude>
<exclude>.classpath</exclude>
<exclude>.project</exclude>
<exclude>.settings</exclude>
@@ -247,6 +247,11 @@
<exclude>contrib/views/*/.project</exclude>
<exclude>contrib/views/*/.settings/**</exclude>
+ <exclude>contrib/ambari-scom/ambari-scom-server/pass.txt</exclude>
+ <exclude>contrib/ambari-scom/ambari-scom-server/target/**</exclude>
+ <exclude>contrib/ambari-scom/*/rat.txt</exclude>
+ <exclude>contrib/ambari-scom/metrics-sink/target/**</exclude>
+
<!--Velocity log -->
<exclude>**/velocity.log*</exclude>
</excludes>
@@ -298,15 +303,18 @@
</build>
<profiles>
<profile>
- <id>default</id>
+ <id>default-views</id>
<activation>
- <activeByDefault>true</activeByDefault>
+ <property>
+ <name>views</name>
+ </property>
</activation>
<modules>
<module>ambari-web</module>
<module>ambari-project</module>
<module>ambari-views</module>
<module>ambari-admin</module>
+ <module>contrib/views</module>
<module>ambari-server</module>
<module>ambari-agent</module>
<module>ambari-client</module>
@@ -314,18 +322,29 @@
</modules>
</profile>
<profile>
- <id>default-views</id>
+ <id>static-web</id>
+ <modules>
+ <module>ambari-project</module>
+ <module>ambari-views</module>
+ <module>ambari-admin</module>
+ <module>ambari-server</module>
+ <module>ambari-agent</module>
+ <module>ambari-client</module>
+ <module>ambari-shell</module>
+ </modules>
+ </profile>
+ <profile>
+ <id>linux</id>
<activation>
- <property>
- <name>views</name>
- </property>
+ <os>
+ <family>unix</family>
+ </os>
</activation>
<modules>
<module>ambari-web</module>
<module>ambari-project</module>
<module>ambari-views</module>
<module>ambari-admin</module>
- <module>contrib/views</module>
<module>ambari-server</module>
<module>ambari-agent</module>
<module>ambari-client</module>
@@ -333,11 +352,18 @@
</modules>
</profile>
<profile>
- <id>static-web</id>
+ <id>windows</id>
+ <activation>
+ <os>
+ <family>win</family>
+ </os>
+ </activation>
<modules>
+ <module>ambari-web</module>
<module>ambari-project</module>
<module>ambari-views</module>
<module>ambari-admin</module>
+ <module>contrib/ambari-scom/metrics-sink</module>
<module>ambari-server</module>
<module>ambari-agent</module>
<module>ambari-client</module>