summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSumit Mohanty <smohanty@hortonworks.com>2016-10-19 21:11:58 -0700
committerSumit Mohanty <smohanty@hortonworks.com>2016-10-19 21:11:58 -0700
commit68c22944c5ea14ab3c6eae51305a49a20d8a472b (patch)
treed5497c51c4e396610b86da36e57263ca2825a70a
parent67d415dfacc52ae2e6b129dd9a881ad12387904c (diff)
AMBARI-17489. Remove spark.yarn.max.executor.failures configuration in Spark Ambari definition (Saisai Shao via smohanty)
-rw-r--r--ambari-server/src/main/resources/common-services/SPARK/1.6.0/configuration/spark-defaults.xml5
-rw-r--r--ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml2
-rw-r--r--ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml.orig478
-rw-r--r--ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml22
-rw-r--r--ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml.orig349
-rw-r--r--ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.5.xml20
-rw-r--r--ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.5.xml.orig1262
-rw-r--r--ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.5.xml.rej17
-rw-r--r--ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.5.xml10
-rw-r--r--ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.5.xml.orig1087
-rw-r--r--ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.5.xml.rej34
11 files changed, 3278 insertions, 8 deletions
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.6.0/configuration/spark-defaults.xml b/ambari-server/src/main/resources/common-services/SPARK/1.6.0/configuration/spark-defaults.xml
index 6465b72bbc..0bcf10f39a 100644
--- a/ambari-server/src/main/resources/common-services/SPARK/1.6.0/configuration/spark-defaults.xml
+++ b/ambari-server/src/main/resources/common-services/SPARK/1.6.0/configuration/spark-defaults.xml
@@ -25,6 +25,11 @@
<on-ambari-upgrade add="false"/>
</property>
<property>
+ <name>spark.yarn.max.executor.failures</name>
+ <deleted>true</deleted>
+ <on-ambari-upgrade add="false"/>
+ </property>
+ <property>
<name>spark.history.provider</name>
<value>org.apache.spark.deploy.history.FsHistoryProvider</value>
<description>Name of history provider class</description>
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml
index cdbe319dcc..b63115bb69 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml
@@ -370,6 +370,7 @@
<transfer operation="delete" delete-key="spark.yarn.services" />
<transfer operation="delete" delete-key="spark.driver.extraJavaOptions" />
<transfer operation="delete" delete-key="spark.yarn.am.extraJavaOptions" />
+ <transfer operation="delete" delete-key="spark.yarn.max.executor.failures" />
<set key="spark.history.provider" value="org.apache.spark.deploy.history.FsHistoryProvider"/>
</definition>
<definition xsi:type="configure" id="hdp_2_4_0_0_spark_java_opts">
@@ -384,6 +385,7 @@
<type>spark-defaults</type>
<transfer operation="delete" delete-key="spark.driver.extraJavaOptions" />
<transfer operation="delete" delete-key="spark.yarn.am.extraJavaOptions" />
+ <transfer operation="delete" delete-key="spark.yarn.max.executor.failures" />
</definition>
<definition xsi:type="configure" id="hdp_2_4_0_0_spark_java_opts">
<type>spark-javaopts-properties</type>
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml.orig b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml.orig
new file mode 100644
index 0000000000..e0a95da216
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml.orig
@@ -0,0 +1,478 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<upgrade-config-changes xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+
+ <services>
+ <service name="HBASE">
+ <component name="HBASE_MASTER">
+ <changes>
+ <definition xsi:type="configure" id="hdp_2_3_4_0_hbase_remove_local_indexing">
+ <type>hbase-site</type>
+ <set key="phoenix.functions.allowUserDefinedFunctions" value="true"/>
+ <transfer operation="delete" delete-key="hbase.master.loadbalancer.class"
+ if-key="hbase.master.loadbalancer.class"
+ if-type="hbase-site"
+ if-value="org.apache.phoenix.hbase.index.balancer.IndexLoadBalancer"/>
+ <replace key="hbase.coprocessor.master.classes"
+ find="org.apache.phoenix.hbase.index.master.IndexMasterObserver"
+ replace-with=""/>
+ <replace key="hbase.coprocessor.regionserver.classes"
+ find="org.apache.hadoop.hbase.regionserver.LocalIndexMerger"
+ replace-with=""/>
+
+ </definition>
+ <!-- These HBASE configs changed in HDP 2.3.4.0, so upgrades like HDP 2.2 to 2.4 still need them. -->
+ <definition xsi:type="configure" id="hdp_2_4_0_0_hbase_remove_local_indexing">
+ <type>hbase-site</type>
+ <set key="phoenix.functions.allowUserDefinedFunctions" value="true"/>
+ <transfer operation="delete" delete-key="hbase.master.loadbalancer.class"
+ if-key="hbase.master.loadbalancer.class"
+ if-type="hbase-site"
+ if-value="org.apache.phoenix.hbase.index.balancer.IndexLoadBalancer"/>
+ <replace key="hbase.coprocessor.master.classes"
+ find="org.apache.phoenix.hbase.index.master.IndexMasterObserver"
+ replace-with="" />
+ <replace key="hbase.coprocessor.regionserver.classes"
+ find="org.apache.hadoop.hbase.regionserver.LocalIndexMerger"
+ replace-with="" />
+
+ </definition>
+
+ <definition xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_hbase_audit_db">
+ <type>ranger-hbase-audit</type>
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.jdbc.url" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.user" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.password" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.jdbc.driver" />
+ <transfer operation="delete" delete-key="xasecure.audit.credential.provider.file" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.batch.filespool.dir" />
+ </definition>
+
+ </changes>
+ </component>
+ </service>
+
+ <service name="TEZ">
+ <component name="TEZ_CLIENT">
+ <changes>
+ <definition xsi:type="configure" id="hdp_2_3_0_0_tez_client_adjust_tez_lib_uris_property">
+ <type>tez-site</type>
+ <set key="tez.lib.uris" value="/hdp/apps/${hdp.version}/tez/tez.tar.gz"/>
+ </definition>
+ <definition xsi:type="configure" id="hdp_2_4_0_0_tez_client_adjust_tez_lib_uris_property">
+ <type>tez-site</type>
+ <set key="tez.lib.uris" value="/hdp/apps/${hdp.version}/tez/tez.tar.gz"/>
+ </definition>
+ <definition xsi:type="configure" id="hdp_2_5_0_0_tez_client_adjust_tez_lib_uris_property">
+ <type>tez-site</type>
+ <set key="tez.lib.uris" value="/hdp/apps/${hdp.version}/tez/tez.tar.gz"/>
+ </definition>
+ </changes>
+ </component>
+ </service>
+
+ <service name="SQOOP">
+ <component name="SQOOP">
+ <changes>
+ <!-- All of these configs are present in Atlas' application.properties file instead and then copied to the hook's atlas-application.properties file. -->
+ <definition xsi:type="configure" id="hdp_2_5_0_0_remove_sqoop_atlas_configs">
+ <type>sqoop-site</type>
+ <transfer operation="delete" delete-key="atlas.cluster.name" />
+ </definition>
+
+ <!-- Add these configs if the cluster is Kerberized.
+ Will only be written to the local file system if Atlas is present. -->
+ <definition xsi:type="configure" id="hdp_2_5_0_0_add_sqoop_atlas_security_configs">
+ <condition type="cluster-env" key="security_enabled" value="true">
+ <type>sqoop-atlas-application.properties</type>
+ <key>atlas.jaas.KafkaClient.option.useTicketCache</key>
+ <value>true</value>
+ </condition>
+ <condition type="cluster-env" key="security_enabled" value="true">
+ <type>sqoop-atlas-application.properties</type>
+ <key>atlas.jaas.KafkaClient.option.renewTicket</key>
+ <value>true</value>
+ </condition>
+ </definition>
+ </changes>
+ </component>
+ </service>
+
+ <service name="HIVE">
+ <component name="HIVE_SERVER">
+ <changes>
+ <definition xsi:type="configure" id="hdp_2_4_0_0_hive_server_configure_authentication" summary="Removing unused properties for current hive authentication type">
+ <type>hive-site</type>
+ <transfer operation="delete" delete-key="hive.server2.authentication.ldap.url" if-key="hive.server2.authentication" if-type="hive-site" if-value="NONE"/>
+ <transfer operation="delete" delete-key="hive.server2.authentication.ldap.baseDN" if-key="hive.server2.authentication" if-type="hive-site" if-value="NONE"/>
+ <transfer operation="delete" delete-key="hive.server2.authentication.pam.services" if-key="hive.server2.authentication" if-type="hive-site" if-value="NONE"/>
+ <transfer operation="delete" delete-key="hive.server2.custom.authentication.class" if-key="hive.server2.authentication" if-type="hive-site" if-value="NONE"/>
+ <transfer operation="delete" delete-key="hive.server2.authentication.kerberos.keytab" if-key="hive.server2.authentication" if-type="hive-site" if-value="NONE"/>
+ <transfer operation="delete" delete-key="hive.server2.authentication.kerberos.principal" if-key="hive.server2.authentication" if-type="hive-site" if-value="NONE"/>
+
+ <transfer operation="delete" delete-key="hive.server2.authentication.kerberos.keytab" if-key="hive.server2.authentication" if-type="hive-site" if-value="ldap"/>
+ <transfer operation="delete" delete-key="hive.server2.authentication.kerberos.principal" if-key="hive.server2.authentication" if-type="hive-site" if-value="ldap"/>
+ <transfer operation="delete" delete-key="hive.server2.authentication.pam.services" if-key="hive.server2.authentication" if-type="hive-site" if-value="ldap"/>
+ <transfer operation="delete" delete-key="hive.server2.custom.authentication.class" if-key="hive.server2.authentication" if-type="hive-site" if-value="ldap"/>
+
+ <transfer operation="delete" delete-key="hive.server2.authentication.ldap.url" if-key="hive.server2.authentication" if-type="hive-site" if-value="kerberos"/>
+ <transfer operation="delete" delete-key="hive.server2.authentication.ldap.baseDN" if-key="hive.server2.authentication" if-type="hive-site" if-value="kerberos"/>
+ <transfer operation="delete" delete-key="hive.server2.authentication.pam.services" if-key="hive.server2.authentication" if-type="hive-site" if-value="kerberos"/>
+ <transfer operation="delete" delete-key="hive.server2.custom.authentication.class" if-key="hive.server2.authentication" if-type="hive-site" if-value="kerberos"/>
+
+ <transfer operation="delete" delete-key="hive.server2.authentication.ldap.url" if-key="hive.server2.authentication" if-type="hive-site" if-value="pam"/>
+ <transfer operation="delete" delete-key="hive.server2.authentication.ldap.baseDN" if-key="hive.server2.authentication" if-type="hive-site" if-value="pam"/>
+ <transfer operation="delete" delete-key="hive.server2.custom.authentication.class" if-key="hive.server2.authentication" if-type="hive-site" if-value="pam"/>
+ <transfer operation="delete" delete-key="hive.server2.authentication.kerberos.keytab" if-key="hive.server2.authentication" if-type="hive-site" if-value="pam"/>
+ <transfer operation="delete" delete-key="hive.server2.authentication.kerberos.principal" if-key="hive.server2.authentication" if-type="hive-site" if-value="pam"/>
+
+ <transfer operation="delete" delete-key="hive.server2.authentication.ldap.url" if-key="hive.server2.authentication" if-type="hive-site" if-value="custom"/>
+ <transfer operation="delete" delete-key="hive.server2.authentication.ldap.baseDN" if-key="hive.server2.authentication" if-type="hive-site" if-value="custom"/>
+ <transfer operation="delete" delete-key="hive.server2.authentication.pam.services" if-key="hive.server2.authentication" if-type="hive-site" if-value="custom"/>
+ <transfer operation="delete" delete-key="hive.server2.authentication.kerberos.keytab" if-key="hive.server2.authentication" if-type="hive-site" if-value="custom"/>
+ <transfer operation="delete" delete-key="hive.server2.authentication.kerberos.principal" if-key="hive.server2.authentication" if-type="hive-site" if-value="custom"/>
+ </definition>
+
+ <definition xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_hive_audit_db">
+ <type>ranger-hive-audit</type>
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.jdbc.url" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.user" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.password" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.jdbc.driver" />
+ <transfer operation="delete" delete-key="xasecure.audit.credential.provider.file" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.batch.filespool.dir" />
+ </definition>
+
+ <!-- All of these configs are present in Atlas' application.properties file instead and then copied to the hook's atlas-application.properties file. -->
+ <definition xsi:type="configure" id="hdp_2_5_0_0_remove_hive_atlas_configs">
+ <type>hive-site</type>
+ <transfer operation="delete" delete-key="atlas.rest.address" />
+ <transfer operation="delete" delete-key="atlas.hook.hive.minThreads" />
+ <transfer operation="delete" delete-key="atlas.hook.hive.maxThreads" />
+ </definition>
+
+ </changes>
+ </component>
+ <component name="WEBHCAT_SERVER">
+ <changes>
+ <definition xsi:type="configure" id="hdp_2_4_0_0_webhcat_server_update_configuration_paths" summary="Updating Configuration Paths">
+ <type>webhcat-site</type>
+ <replace key="templeton.jar" find="/usr/hdp/current/hive-webhcat" replace-with="/usr/hdp/${hdp.version}/hive"/>
+ <replace key="templeton.libjars" find="/usr/hdp/current/zookeeper-client" replace-with="/usr/hdp/${hdp.version}/zookeeper,/usr/hdp/${hdp.version}/hive/lib/hive-common.jar"/>
+ <replace key="templeton.hadoop" find="/usr/hdp/current/hadoop-client" replace-with="/usr/hdp/${hdp.version}/hadoop"/>
+ <replace key="templeton.hcat" find="/usr/hdp/current/hive-client" replace-with="/usr/hdp/${hdp.version}/hive"/>
+ <set key="templeton.hive.extra.files" value="/usr/hdp/${hdp.version}/tez/conf/tez-site.xml,/usr/hdp/${hdp.version}/tez,/usr/hdp/${hdp.version}/tez/lib"/>
+ </definition>
+
+ <definition xsi:type="configure" id="hdp_2_5_0_0_webhcat_server_update_configuration_paths" summary="Updating Configuration Paths">
+ <type>webhcat-site</type>
+ <replace key="templeton.jar" find="/usr/hdp/current/hive-webhcat" replace-with="/usr/hdp/${hdp.version}/hive"/>
+ <replace key="templeton.libjars" find="/usr/hdp/current/zookeeper-client" replace-with="/usr/hdp/${hdp.version}/zookeeper,/usr/hdp/${hdp.version}/hive/lib/hive-common.jar"/>
+ <replace key="templeton.hadoop" find="/usr/hdp/current/hadoop-client" replace-with="/usr/hdp/${hdp.version}/hadoop"/>
+ <replace key="templeton.hcat" find="/usr/hdp/current/hive-client" replace-with="/usr/hdp/${hdp.version}/hive"/>
+ <set key="templeton.hive.extra.files" value="/usr/hdp/${hdp.version}/tez/conf/tez-site.xml,/usr/hdp/${hdp.version}/tez,/usr/hdp/${hdp.version}/tez/lib"/>
+ </definition>
+ </changes>
+ </component>
+ </service>
+
+ <service name="RANGER">
+ <component name="RANGER_ADMIN">
+ <changes>
+
+ <definition xsi:type="configure" id="hdp_2_5_0_0_remove_audit_db_flag">
+ <type>ranger-env</type>
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db"/>
+ </definition>
+
+ <definition xsi:type="configure" id="hdp_2_5_0_0_remove_audit_db_admin_properties">
+ <type>admin-properties</type>
+ <transfer operation="delete" delete-key="audit_db_name" />
+ <transfer operation="delete" delete-key="audit_db_user" />
+ <transfer operation="delete" delete-key="audit_db_password" />
+ </definition>
+
+ <definition xsi:type="configure" id="hdp_2_5_0_0_remove_audit_db_ranger_admin_site">
+ <type>ranger-admin-site</type>
+ <set key="ranger.audit.source.type" value="solr"/>
+ <transfer operation="delete" delete-key="ranger.jpa.audit.jdbc.driver" />
+ <transfer operation="delete" delete-key="ranger.jpa.audit.jdbc.url" />
+ <transfer operation="delete" delete-key="ranger.jpa.audit.jdbc.user" />
+ <transfer operation="delete" delete-key="ranger.jpa.audit.jdbc.password" />
+ <transfer operation="delete" delete-key="ranger.jpa.audit.jdbc.credential.alias" />
+ <transfer operation="delete" delete-key="ranger.jpa.audit.jdbc.dialect" />
+ </definition>
+
+ <definition xsi:type="configure" id="hdp_2_5_0_0_remove_sso_property">
+ <type>ranger-admin-site</type>
+ <transfer operation="delete" delete-key="ranger.sso.cookiename" />
+ <transfer operation="delete" delete-key="ranger.sso.query.param.originalurl" />
+ </definition>
+
+ <definition xsi:type="configure" id="hdp_2_5_0_0_set_external_solrCloud_flag">
+ <condition type="ranger-env" key="is_solrCloud_enabled" value="true">
+ <type>ranger-env</type>
+ <key>is_external_solrCloud_enabled</key>
+ <value>true</value>
+ </condition>
+ </definition>
+
+ </changes>
+ </component>
+ </service>
+
+ <service name="RANGER_KMS">
+ <component name="RANGER_KMS_SERVER">
+ <changes>
+ <definition xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_kms_audit_db">
+ <type>ranger-kms-audit</type>
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.jdbc.url" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.user" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.password" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.jdbc.driver" />
+ <transfer operation="delete" delete-key="xasecure.audit.credential.provider.file" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.batch.filespool.dir" />
+ </definition>
+ </changes>
+ </component>
+ </service>
+
+ <service name="HDFS">
+ <component name="NAMENODE">
+ <changes>
+ <definition xsi:type="configure" id="hdp_2_4_0_0_namenode_ha_adjustments">
+ <type>hdfs-site</type>
+ <transfer operation="delete" delete-key="dfs.namenode.rpc-address" if-type="hdfs-site" if-key="dfs.nameservices" if-key-state="present"/>
+ <transfer operation="delete" delete-key="dfs.namenode.http-address" if-type="hdfs-site" if-key="dfs.nameservices" if-key-state="present"/>
+ <transfer operation="delete" delete-key="dfs.namenode.https-address" if-type="hdfs-site" if-key="dfs.nameservices" if-key-state="present"/>
+ </definition>
+
+ <definition xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_hdfs_audit_db">
+ <type>ranger-hdfs-audit</type>
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.jdbc.url" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.user" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.password" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.jdbc.driver" />
+ <transfer operation="delete" delete-key="xasecure.audit.credential.provider.file" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.batch.filespool.dir" />
+ </definition>
+ </changes>
+ </component>
+ </service>
+
+ <service name="SPARK">
+ <component name="SPARK_JOBHISTORYSERVER">
+ <changes>
+ <definition xsi:type="configure" id="hdp_2_4_0_0_spark_jobhistoryserver">
+ <type>spark-defaults</type>
+ <transfer operation="delete" delete-key="spark.yarn.services" />
+ <transfer operation="delete" delete-key="spark.driver.extraJavaOptions" />
+ <transfer operation="delete" delete-key="spark.yarn.am.extraJavaOptions" />
+ <set key="spark.history.provider" value="org.apache.spark.deploy.history.FsHistoryProvider"/>
+ </definition>
+ <definition xsi:type="configure" id="hdp_2_4_0_0_spark_java_opts">
+ <type>spark-javaopts-properties</type>
+ <transfer operation="delete" delete-key="content" />
+ </definition>
+ </changes>
+ </component>
+ <component name="SPARK_CLIENT">
+ <changes>
+ <definition xsi:type="configure" id="hdp_2_4_0_0_remove_spark_properties_extraJavaOptions">
+ <type>spark-defaults</type>
+ <transfer operation="delete" delete-key="spark.driver.extraJavaOptions" />
+ <transfer operation="delete" delete-key="spark.yarn.am.extraJavaOptions" />
+ </definition>
+ <definition xsi:type="configure" id="hdp_2_4_0_0_spark_java_opts">
+ <type>spark-javaopts-properties</type>
+ <transfer operation="delete" delete-key="content" />
+ </definition>
+ </changes>
+ </component>
+ <component name="SPARK_THRIFTSERVER">
+ <changes>
+ <definition xsi:type="configure" id="hdp_2_4_0_0_spark_thriftserver">
+ <type>spark-thrift-sparkconf</type>
+ <transfer operation="delete" delete-key="spark.yarn.executor.memoryOverhead" />
+ <transfer operation="delete" delete-key="spark.yarn.driver.memoryOverhead" />
+ <transfer operation="delete" delete-key="spark.yarn.scheduler.heartbeat.interval-ms" />
+ <transfer operation="delete" delete-key="spark.yarn.max.executor.failures" />
+ <transfer operation="delete" delete-key="spark.yarn.containerLauncherMaxThreads" />
+ <transfer operation="delete" delete-key="spark.yarn.submit.file.replication" />
+ <transfer operation="delete" delete-key="spark.yarn.preserve.staging.files" />
+ <transfer operation="delete" delete-key="spark.yarn.max.executor.failures" />
+ <transfer operation="delete" delete-key="spark.driver.extraJavaOptions" />
+ <transfer operation="delete" delete-key="spark.yarn.am.extraJavaOptions" />
+ </definition>
+ <definition xsi:type="configure" id="hdp_2_4_0_0_spark_java_opts">
+ <type>spark-javaopts-properties</type>
+ <transfer operation="delete" delete-key="content" />
+ </definition>
+ </changes>
+ </component>
+ </service>
+
+ <service name="OOZIE">
+ <component name="OOZIE_SERVER">
+ <changes>
+ <definition xsi:type="configure" id="hdp_2_4_0_0_oozie_remove_service_classes" summary="Updating Oozie Service classes">
+ <type>oozie-site</type>
+ <replace key="oozie.services" find="org.apache.oozie.service.CoordinatorStoreService," replace-with="" />
+ </definition>
+ </changes>
+ </component>
+ </service>
+
+ <service name="KAFKA">
+ <component name="KAFKA_BROKER">
+ <changes>
+ <definition xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_kafka_audit_db">
+ <type>ranger-kafka-audit</type>
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.jdbc.url" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.user" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.password" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.jdbc.driver" />
+ <transfer operation="delete" delete-key="xasecure.audit.credential.provider.file" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.batch.filespool.dir" />
+ </definition>
+ <definition xsi:type="configure" id="hdp_2_5_0_0_add_protocol_compatibility">
+ <type>kafka-broker</type>
+ <set key="inter.broker.protocol.version" value="0.9.0.0" />
+ <set key="log.message.format.version" value="0.9.0.0" />
+ </definition>
+ </changes>
+ </component>
+ </service>
+
+ <service name="YARN">
+ <component name="RESOURCEMANAGER">
+ <changes>
+ <definition xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_yarn_audit_db">
+ <type>ranger-yarn-audit</type>
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.jdbc.url" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.user" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.password" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.jdbc.driver" />
+ <transfer operation="delete" delete-key="xasecure.audit.credential.provider.file" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.batch.filespool.dir" />
+ </definition>
+ </changes>
+ </component>
+ </service>
+
+ <service name="STORM">
+ <component name="NIMBUS">
+ <changes>
+ <definition xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_storm_audit_db">
+ <type>ranger-storm-audit</type>
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.jdbc.url" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.user" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.password" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.jdbc.driver" />
+ <transfer operation="delete" delete-key="xasecure.audit.credential.provider.file" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.batch.filespool.dir" />
+ </definition>
+
+ <definition xsi:type="configure" id="hdp_2_5_0_0_upgrade_storm_1.0">
+ <type>storm-site</type>
+ <replace key="_storm.thrift.nonsecure.transport" find="backtype.storm.security.auth.SimpleTransportPlugin"
+ replace-with="org.apache.storm.security.auth.SimpleTransportPlugin" />
+ <replace key="_storm.thrift.secure.transport" find="backtype.storm.security.auth.KerberosSaslTransportPlugin"
+ replace-with="org.apache.storm.security.auth.KerberosSaslTransportPlugin" />
+ <replace key="storm.messaging.transport" find="backtype.storm.messaging.netty.Context"
+ replace-with="org.apache.storm.messaging.netty.Context" />
+ <replace key="nimbus.topology.validator" find="backtype.storm.nimbus.DefaultTopologyValidator"
+ replace-with="org.apache.storm.nimbus.DefaultTopologyValidator" />
+ <replace key="topology.spout.wait.strategy" find="backtype.storm.spout.SleepSpoutWaitStrategy"
+ replace-with="org.apache.storm.spout.SleepSpoutWaitStrategy" />
+ <replace key="topology.kryo.factory" find="backtype.storm.serialization.DefaultKryoFactory"
+ replace-with="org.apache.storm.serialization.DefaultKryoFactory" />
+ <replace key="topology.tuple.serializer" find="backtype.storm.serialization.types.ListDelegateSerializer"
+ replace-with="org.apache.storm.serialization.types.ListDelegateSerializer" />
+ <replace key="nimbus.authorizer" find="backtype.storm.security.auth.authorizer.SimpleACLAuthorizer"
+ replace-with="org.apache.storm.security.auth.authorizer.SimpleACLAuthorizer" />
+ <replace key="drpc.authorizer" find="backtype.storm.security.auth.authorizer.DRPCSimpleACLAuthorizer"
+ replace-with="org.apache.storm.security.auth.authorizer.DRPCSimpleACLAuthorizer" />
+ <replace key="ui.filter" find="backtype.storm.security.auth.KerberosPrincipalToLocal"
+ replace-with="org.apache.storm.security.auth.KerberosPrincipalToLocal" />
+ <replace key="storm.principal.tolocal" find="backtype.storm.security.auth.KerberosPrincipalToLocal"
+ replace-with="org.apache.storm.security.auth.KerberosPrincipalToLocal" />
+ <set key="client.jartransformer.class" value="org.apache.storm.hack.StormShadeTransformer" />
+ </definition>
+
+ <definition xsi:type="configure" id="hdp_2_5_0_0_add_storm_security_configs">
+ <type>storm-site</type>
+ <set key="nimbus.impersonation.authorizer" value="org.apache.storm.security.auth.authorizer.ImpersonationAuthorizer" if-type="cluster-env" if-key="security_enabled" if-value="true" />
+ <set key="nimbus.impersonation.acl" value="{ {{storm_bare_jaas_principal}} : {hosts: ['*'], groups: ['*']}}" if-type="cluster-env" if-key="security_enabled" if-value="true" />
+ <set key="nimbus.admins" value="['{{storm_bare_jaas_principal}}', '{{ambari_bare_jaas_principal}}']" if-type="cluster-env" if-key="security_enabled" if-value="true" />
+ </definition>
+
+ <definition xsi:type="configure" id="hdp_2_3_0_0_remove_empty_storm_topology_submission_notifier_plugin_class"
+ summary="Removing empty storm.topology.submission.notifier.plugin.class property">
+ <type>storm-site</type>
+ <transfer operation="delete" delete-key="storm.topology.submission.notifier.plugin.class" if-key="storm.topology.submission.notifier.plugin.class"
+ if-type="storm-site" if-value=" "/>
+ </definition>
+
+ <!-- All of these configs are present in Atlas' application.properties file instead and then copied to the hook's atlas-application.properties file. -->
+ <definition xsi:type="configure" id="hdp_2_5_0_0_remove_storm_atlas_configs">
+ <type>storm-site</type>
+ <transfer operation="delete" delete-key="atlas.cluster.name" />
+ </definition>
+ </changes>
+ </component>
+ </service>
+
+ <service name="KNOX">
+ <component name="KNOX_GATEWAY">
+ <changes>
+ <definition xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_knox_audit_db">
+ <type>ranger-knox-audit</type>
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.jdbc.url" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.user" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.password" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.jdbc.driver" />
+ <transfer operation="delete" delete-key="xasecure.audit.credential.provider.file" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.batch.filespool.dir" />
+ </definition>
+ </changes>
+ </component>
+ </service>
+
+ <service name="FALCON">
+ <component name="FALCON_SERVER">
+ <changes>
+ <definition xsi:type="configure" id="hdp_2_5_0_0_falcon_server_adjust_services_property">
+ <type>falcon-startup.properties</type>
+ <set key="*.application.services" value="org.apache.falcon.security.AuthenticationInitializationService, org.apache.falcon.workflow.WorkflowJobEndNotificationService, org.apache.falcon.service.ProcessSubscriberService, org.apache.falcon.extensions.ExtensionService, org.apache.falcon.service.LifecyclePolicyMap, org.apache.falcon.entity.store.ConfigurationStore, org.apache.falcon.rerun.service.RetryService, org.apache.falcon.rerun.service.LateRunService, org.apache.falcon.service.LogCleanupService, org.apache.falcon.metadata.MetadataMappingService{{atlas_application_class_addition}}"/>
+ </definition>
+ </changes>
+ </component>
+ </service>
+ </services>
+
+</upgrade-config-changes>
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml
index 9045443742..8a9996c9ae 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml
@@ -282,7 +282,7 @@
<transfer operation="delete" delete-key="xasecure.audit.destination.db.batch.filespool.dir" />
</definition>
-
+
<definition xsi:type="configure" id="hdp_2_5_0_0_upgrade_storm_1.0">
<type>storm-site</type>
<replace key="_storm.thrift.nonsecure.transport" find="backtype.storm.security.auth.SimpleTransportPlugin"
@@ -381,6 +381,26 @@
</changes>
</component>
</service>
+
+ <service name="SPARK">
+ <component name="SPARK_JOBHISTORYSERVER">
+ <changes>
+ <definition xsi:type="configure" id="hdp_2_5_0_0_spark_jobhistoryserver">
+ <type>spark-defaults</type>
+ <transfer operation="delete" delete-key="spark.yarn.max.executor.failures" />
+ </definition>
+ </changes>
+ </component>
+ <component name="SPARK_CLIENT">
+ <changes>
+ <definition xsi:type="configure" id="hdp_2_5_0_0_spark_client">
+ <type>spark-defaults</type>
+ <transfer operation="delete" delete-key="spark.yarn.max.executor.failures" />
+ </definition>
+ </changes>
+ </component>
+ </service>
+
</services>
</upgrade-config-changes>
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml.orig b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml.orig
new file mode 100644
index 0000000000..898150d30c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml.orig
@@ -0,0 +1,349 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<upgrade-config-changes xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+
+ <services>
+
+ <service name="TEZ">
+ <component name="TEZ_CLIENT">
+ <changes>
+ <definition xsi:type="configure" id="hdp_2_4_0_0_tez_client_adjust_tez_lib_uris_property">
+ <type>tez-site</type>
+ <set key="tez.lib.uris" value="/hdp/apps/${hdp.version}/tez/tez.tar.gz"/>
+ </definition>
+
+ <definition xsi:type="configure" id="hdp_2_5_0_0_tez_client_adjust_tez_lib_uris_property">
+ <type>tez-site</type>
+ <set key="tez.lib.uris" value="/hdp/apps/${hdp.version}/tez/tez.tar.gz"/>
+ </definition>
+
+ </changes>
+ </component>
+ </service>
+
+ <service name="SQOOP">
+ <component name="SQOOP">
+ <changes>
+ <definition xsi:type="configure" id="hdp_2_5_0_0_remove_sqoop_atlas_configs">
+ <type>sqoop-site</type>
+ <transfer operation="delete" delete-key="atlas.cluster.name" />
+ </definition>
+
+ <!-- Add these configs if the cluster is Kerberized.
+ Will only be written to the local file system if Atlas is present. -->
+ <definition xsi:type="configure" id="hdp_2_5_0_0_add_sqoop_atlas_security_configs">
+ <condition type="cluster-env" key="security_enabled" value="true">
+ <type>sqoop-atlas-application.properties</type>
+ <key>atlas.jaas.KafkaClient.option.useTicketCache</key>
+ <value>true</value>
+ </condition>
+ <condition type="cluster-env" key="security_enabled" value="true">
+ <type>sqoop-atlas-application.properties</type>
+ <key>atlas.jaas.KafkaClient.option.renewTicket</key>
+ <value>true</value>
+ </condition>
+ </definition>
+ </changes>
+ </component>
+ </service>
+
+ <service name="HIVE">
+ <component name="HIVE_SERVER">
+ <changes>
+ <definition xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_hive_audit_db">
+ <type>ranger-hive-audit</type>
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.jdbc.url" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.user" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.password" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.jdbc.driver" />
+ <transfer operation="delete" delete-key="xasecure.audit.credential.provider.file" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.batch.filespool.dir" />
+ </definition>
+
+ <definition xsi:type="configure" id="hdp_2_5_0_0_remove_hive_atlas_configs">
+ <type>hive-site</type>
+ <transfer operation="delete" delete-key="atlas.rest.address" />
+ <transfer operation="delete" delete-key="atlas.hook.hive.minThreads" />
+ <transfer operation="delete" delete-key="atlas.hook.hive.maxThreads" />
+ </definition>
+
+ </changes>
+ </component>
+
+ <component name="WEBHCAT_SERVER">
+ <changes>
+ <definition xsi:type="configure" id="hdp_2_4_0_0_webhcat_server_update_configuration_paths" summary="Updating Configuration Paths">
+ <type>webhcat-site</type>
+ <replace key="templeton.jar" find="/usr/hdp/current/hive-webhcat" replace-with="/usr/hdp/${hdp.version}/hive"/>
+ <replace key="templeton.libjars" find="/usr/hdp/current/zookeeper-client" replace-with="/usr/hdp/${hdp.version}/zookeeper,/usr/hdp/${hdp.version}/hive/lib/hive-common.jar"/>
+ <replace key="templeton.hadoop" find="/usr/hdp/current/hadoop-client" replace-with="/usr/hdp/${hdp.version}/hadoop"/>
+ <replace key="templeton.hcat" find="/usr/hdp/current/hive-client" replace-with="/usr/hdp/${hdp.version}/hive"/>
+ <set key="templeton.hive.extra.files" value="/usr/hdp/${hdp.version}/tez/conf/tez-site.xml,/usr/hdp/${hdp.version}/tez,/usr/hdp/${hdp.version}/tez/lib"/>
+ </definition>
+ </changes>
+ </component>
+ </service>
+
+ <service name="RANGER">
+ <component name="RANGER_ADMIN">
+ <changes>
+
+ <definition xsi:type="configure" id="hdp_2_5_0_0_remove_audit_db_flag">
+ <type>ranger-env</type>
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db"/>
+ </definition>
+
+ <definition xsi:type="configure" id="hdp_2_5_0_0_remove_audit_db_admin_properties">
+ <type>admin-properties</type>
+ <transfer operation="delete" delete-key="audit_db_name" />
+ <transfer operation="delete" delete-key="audit_db_user" />
+ <transfer operation="delete" delete-key="audit_db_password" />
+ </definition>
+
+ <definition xsi:type="configure" id="hdp_2_5_0_0_remove_audit_db_ranger_admin_site">
+ <type>ranger-admin-site</type>
+ <set key="ranger.audit.source.type" value="solr"/>
+ <transfer operation="delete" delete-key="ranger.jpa.audit.jdbc.driver" />
+ <transfer operation="delete" delete-key="ranger.jpa.audit.jdbc.url" />
+ <transfer operation="delete" delete-key="ranger.jpa.audit.jdbc.user" />
+ <transfer operation="delete" delete-key="ranger.jpa.audit.jdbc.password" />
+ <transfer operation="delete" delete-key="ranger.jpa.audit.jdbc.credential.alias" />
+ <transfer operation="delete" delete-key="ranger.jpa.audit.jdbc.dialect" />
+ </definition>
+
+ <definition xsi:type="configure" id="hdp_2_5_0_0_remove_sso_property">
+ <type>ranger-admin-site</type>
+ <transfer operation="delete" delete-key="ranger.sso.cookiename" />
+ <transfer operation="delete" delete-key="ranger.sso.query.param.originalurl" />
+ </definition>
+
+ <definition xsi:type="configure" id="hdp_2_5_0_0_set_external_solrCloud_flag">
+ <condition type="ranger-env" key="is_solrCloud_enabled" value="true">
+ <type>ranger-env</type>
+ <key>is_external_solrCloud_enabled</key>
+ <value>true</value>
+ </condition>
+ </definition>
+
+ </changes>
+ </component>
+ </service>
+
+ <service name="RANGER_KMS">
+ <component name="RANGER_KMS_SERVER">
+ <changes>
+ <definition xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_kms_audit_db">
+ <type>ranger-kms-audit</type>
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.jdbc.url" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.user" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.password" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.jdbc.driver" />
+ <transfer operation="delete" delete-key="xasecure.audit.credential.provider.file" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.batch.filespool.dir" />
+ </definition>
+ </changes>
+ </component>
+ </service>
+
+ <service name="HDFS">
+ <component name="NAMENODE">
+ <changes>
+ <definition xsi:type="configure" id="hdp_2_5_0_0_namenode_ha_adjustments">
+ <type>hdfs-site</type>
+ <transfer operation="delete" delete-key="dfs.namenode.rpc-address" if-type="hdfs-site" if-key="dfs.nameservices" if-key-state="present"/>
+ <transfer operation="delete" delete-key="dfs.namenode.http-address" if-type="hdfs-site" if-key="dfs.nameservices" if-key-state="present"/>
+ <transfer operation="delete" delete-key="dfs.namenode.https-address" if-type="hdfs-site" if-key="dfs.nameservices" if-key-state="present"/>
+ </definition>
+ <definition xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_hdfs_audit_db">
+ <type>ranger-hdfs-audit</type>
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.jdbc.url" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.user" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.password" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.jdbc.driver" />
+ <transfer operation="delete" delete-key="xasecure.audit.credential.provider.file" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.batch.filespool.dir" />
+ </definition>
+ </changes>
+ </component>
+ </service>
+
+ <service name="YARN">
+ <component name="RESOURCEMANAGER">
+ <changes>
+ <definition xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_yarn_audit_db">
+ <type>ranger-yarn-audit</type>
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.jdbc.url" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.user" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.password" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.jdbc.driver" />
+ <transfer operation="delete" delete-key="xasecure.audit.credential.provider.file" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.batch.filespool.dir" />
+ </definition>
+ </changes>
+ </component>
+
+ <component name="NODEMANAGER">
+ <changes>
+ <definition xsi:type="configure" id="hdp_2_5_0_0_add_spark2_yarn_shuffle">
+ <type>yarn-site</type>
+ <set key="yarn.nodemanager.aux-services" value="mapreduce_shuffle,spark_shuffle,spark2_shuffle"/>
+ <!-- Ideally we need to append spark2_shuffle to the existing value -->
+ </definition>
+ </changes>
+ </component>
+ </service>
+
+ <service name="KAFKA">
+ <component name="KAFKA_BROKER">
+ <changes>
+ <definition xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_kafka_audit_db">
+ <type>ranger-kafka-audit</type>
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.jdbc.url" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.user" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.password" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.jdbc.driver" />
+ <transfer operation="delete" delete-key="xasecure.audit.credential.provider.file" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.batch.filespool.dir" />
+ </definition>
+ <definition xsi:type="configure" id="hdp_2_5_0_0_add_protocol_compatibility">
+ <type>kafka-broker</type>
+ <set key="inter.broker.protocol.version" value="0.9.0.0" />
+ <set key="log.message.format.version" value="0.9.0.0" />
+ </definition>
+ </changes>
+ </component>
+ </service>
+
+ <service name="STORM">
+ <component name="NIMBUS">
+ <changes>
+ <definition xsi:type="configure" id="hdp_2_4_0_0_remove_empty_storm_topology_submission_notifier_plugin_class"
+ summary="Removing empty storm.topology.submission.notifier.plugin.class property">
+ <type>storm-site</type>
+ <transfer operation="delete" delete-key="storm.topology.submission.notifier.plugin.class" if-key="storm.topology.submission.notifier.plugin.class"
+ if-type="storm-site" if-value=" "/>
+ </definition>
+
+ <definition xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_storm_audit_db">
+ <type>ranger-storm-audit</type>
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.jdbc.url" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.user" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.password" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.jdbc.driver" />
+ <transfer operation="delete" delete-key="xasecure.audit.credential.provider.file" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.batch.filespool.dir" />
+ </definition>
+
+
+ <definition xsi:type="configure" id="hdp_2_5_0_0_upgrade_storm_1.0">
+ <type>storm-site</type>
+ <replace key="_storm.thrift.nonsecure.transport" find="backtype.storm.security.auth.SimpleTransportPlugin"
+ replace-with="org.apache.storm.security.auth.SimpleTransportPlugin" />
+ <replace key="_storm.thrift.secure.transport" find="backtype.storm.security.auth.KerberosSaslTransportPlugin"
+ replace-with="org.apache.storm.security.auth.KerberosSaslTransportPlugin" />
+ <replace key="storm.messaging.transport" find="backtype.storm.messaging.netty.Context"
+ replace-with="org.apache.storm.messaging.netty.Context" />
+ <replace key="nimbus.topology.validator" find="backtype.storm.nimbus.DefaultTopologyValidator"
+ replace-with="org.apache.storm.nimbus.DefaultTopologyValidator" />
+ <replace key="topology.spout.wait.strategy" find="backtype.storm.spout.SleepSpoutWaitStrategy"
+ replace-with="org.apache.storm.spout.SleepSpoutWaitStrategy" />
+ <replace key="topology.kryo.factory" find="backtype.storm.serialization.DefaultKryoFactory"
+ replace-with="org.apache.storm.serialization.DefaultKryoFactory" />
+ <replace key="topology.tuple.serializer" find="backtype.storm.serialization.types.ListDelegateSerializer"
+ replace-with="org.apache.storm.serialization.types.ListDelegateSerializer" />
+ <replace key="nimbus.authorizer" find="backtype.storm.security.auth.authorizer.SimpleACLAuthorizer"
+ replace-with="org.apache.storm.security.auth.authorizer.SimpleACLAuthorizer" />
+ <replace key="drpc.authorizer" find="backtype.storm.security.auth.authorizer.DRPCSimpleACLAuthorizer"
+ replace-with="org.apache.storm.security.auth.authorizer.DRPCSimpleACLAuthorizer" />
+ <replace key="ui.filter" find="backtype.storm.security.auth.KerberosPrincipalToLocal"
+ replace-with="org.apache.storm.security.auth.KerberosPrincipalToLocal" />
+ <replace key="storm.principal.tolocal" find="backtype.storm.security.auth.KerberosPrincipalToLocal"
+ replace-with="org.apache.storm.security.auth.KerberosPrincipalToLocal" />
+ <set key="client.jartransformer.class" value="org.apache.storm.hack.StormShadeTransformer" />
+ </definition>
+
+ <definition xsi:type="configure" id="hdp_2_5_0_0_add_storm_security_configs">
+ <type>storm-site</type>
+ <set key="nimbus.impersonation.authorizer" value="org.apache.storm.security.auth.authorizer.ImpersonationAuthorizer" if-type="cluster-env" if-key="security_enabled" if-value="true" />
+ <set key="nimbus.impersonation.acl" value="{ {{storm_bare_jaas_principal}} : {hosts: ['*'], groups: ['*']}}" if-type="cluster-env" if-key="security_enabled" if-value="true" />
+ <set key="nimbus.admins" value="['{{storm_bare_jaas_principal}}', '{{ambari_bare_jaas_principal}}']" if-type="cluster-env" if-key="security_enabled" if-value="true" />
+ </definition>
+
+ <!-- All of these configs are present in Atlas' application.properties file instead and then copied to the hook's atlas-application.properties file. -->
+ <definition xsi:type="configure" id="hdp_2_5_0_0_remove_storm_atlas_configs">
+ <type>storm-site</type>
+ <transfer operation="delete" delete-key="atlas.cluster.name" />
+ </definition>
+ </changes>
+ </component>
+ </service>
+
+ <service name="HBASE">
+ <component name="HBASE_MASTER">
+ <changes>
+ <definition xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_hbase_audit_db">
+ <type>ranger-hbase-audit</type>
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.jdbc.url" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.user" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.password" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.jdbc.driver" />
+ <transfer operation="delete" delete-key="xasecure.audit.credential.provider.file" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.batch.filespool.dir" />
+ </definition>
+ </changes>
+ </component>
+ </service>
+
+ <service name="KNOX">
+ <component name="KNOX_GATEWAY">
+ <changes>
+ <definition xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_knox_audit_db">
+ <type>ranger-knox-audit</type>
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.jdbc.url" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.user" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.password" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.jdbc.driver" />
+ <transfer operation="delete" delete-key="xasecure.audit.credential.provider.file" />
+ <transfer operation="delete" delete-key="xasecure.audit.destination.db.batch.filespool.dir" />
+ </definition>
+ </changes>
+ </component>
+ </service>
+
+ <service name="FALCON">
+ <component name="FALCON_SERVER">
+ <changes>
+ <definition xsi:type="configure" id="hdp_2_5_0_0_falcon_server_adjust_services_property">
+ <type>falcon-startup.properties</type>
+ <set key="*.application.services" value="org.apache.falcon.security.AuthenticationInitializationService, org.apache.falcon.workflow.WorkflowJobEndNotificationService, org.apache.falcon.service.ProcessSubscriberService, org.apache.falcon.extensions.ExtensionService, org.apache.falcon.service.LifecyclePolicyMap, org.apache.falcon.entity.store.ConfigurationStore, org.apache.falcon.rerun.service.RetryService, org.apache.falcon.rerun.service.LateRunService, org.apache.falcon.service.LogCleanupService, org.apache.falcon.metadata.MetadataMappingService{{atlas_application_class_addition}}"/>
+ </definition>
+ </changes>
+ </component>
+ </service>
+ </services>
+
+</upgrade-config-changes>
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.5.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.5.xml
index 3478603f17..29244559a1 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.5.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.5.xml
@@ -32,6 +32,7 @@
<check>org.apache.ambari.server.checks.AtlasPresenceCheck</check>
</prerequisite-checks>
+
<order>
<group xsi:type="cluster" name="PRE_CLUSTER" title="Prepare Upgrade">
<direction>UPGRADE</direction>
@@ -86,7 +87,7 @@
<component>ACCUMULO_MONITOR</component>
<component>ACCUMULO_MASTER</component>
</service>
-
+
<service name="STORM">
<component>DRPC_SERVER</component>
<component>STORM_UI_SERVER</component>
@@ -402,6 +403,15 @@
<execute-stage service="RANGER_KMS" component="RANGER_KMS_SERVER" title="Apply config changes for Ranger KMS Server">
<task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_kms_audit_db"/>
</execute-stage>
+
+ <!-- SPARK -->
+ <execute-stage service="SPARK" component="SPARK_JOBHISTORYSERVER" title="Apply config changes for Spark JobHistoryServer">
+ <task xsi:type="configure" id="hdp_2_5_0_0_spark_jobhistoryserver"/>
+ </execute-stage>
+ <execute-stage service="SPARK" component="SPARK_CLIENT" title="Apply config changes for Spark">
+ <task xsi:type="configure" id="hdp_2_5_0_0_spark_client"/>
+ </execute-stage>
+
</group>
<!--
@@ -588,7 +598,7 @@
<service>HBASE</service>
</priority>
</group>
-
+
<group xsi:type="restart" name="HIVE_MASTERS" title="Hive Masters">
<service-check>false</service-check>
<skippable>true</skippable>
@@ -663,7 +673,7 @@
<service>SPARK</service>
</priority>
</group>
-
+
<group xsi:type="restart" name="FALCON" title="Falcon">
<service-check>false</service-check>
<skippable>true</skippable>
@@ -727,7 +737,7 @@
<component>FLUME_HANDLER</component>
</service>
</group>
-
+
<group xsi:type="restart" name="ACCUMULO" title="Accumulo">
<service-check>false</service-check>
<skippable>true</skippable>
@@ -757,7 +767,7 @@
<group xsi:type="cluster" name="FINALIZE_PRE_CHECK" title="Finalize {{direction.text.proper}} Pre-Check">
<direction>UPGRADE</direction>
-
+
<execute-stage title="Check Component Versions">
<task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.ComponentVersionCheckAction" />
</execute-stage>
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.5.xml.orig b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.5.xml.orig
new file mode 100644
index 0000000000..a6457eae0b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.5.xml.orig
@@ -0,0 +1,1262 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+
+<upgrade xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+ <target>2.5.*.*</target>
+ <target-stack>HDP-2.5</target-stack>
+ <type>NON_ROLLING</type>
+ <prerequisite-checks>
+ <check>org.apache.ambari.server.checks.RangerAuditDbCheck</check>
+ <configuration>
+ <!-- Configuration properties for all pre-reqs including required pre-reqs -->
+ <check-properties name="org.apache.ambari.server.checks.HiveDynamicServiceDiscoveryCheck">
+ <property name="min-failure-stack-version">HDP-2.3.0.0</property>
+ </check-properties>
+ </configuration>
+
+ <check>org.apache.ambari.server.checks.AtlasPresenceCheck</check>
+ </prerequisite-checks>
+ <order>
+ <group xsi:type="cluster" name="PRE_CLUSTER" title="Prepare Upgrade">
+ <direction>UPGRADE</direction>
+
+ <skippable>true</skippable>
+ <supports-auto-skip-failure>false</supports-auto-skip-failure>
+
+ <execute-stage service="YARN" component="RESOURCEMANAGER" title="Stop YARN Queues">
+ <task xsi:type="manual">
+ <message>Before continuing, please stop all YARN queues. If yarn-site's yarn.resourcemanager.work-preserving-recovery.enabled is set to true, then you can skip this step since the clients will retry on their own.</message>
+ </task>
+ </execute-stage>
+
+ <execute-stage service="SLIDER" component="SLIDER" title="Stop Long Running Applications on Slider">
+ <task xsi:type="manual">
+ <message>Before continuing, please stop all long-running applications deployed using Slider. E.g., su - yarn "/usr/hdp/current/slider-client/bin/slider stop &lt;app_name&gt;"</message>
+ </task>
+ </execute-stage>
+
+ <execute-stage service="STORM" component="NIMBUS" title="Stop Storm Topologies">
+ <task xsi:type="manual">
+ <message>Before continuing, please deactivate and kill any currently running topologies.</message>
+ </task>
+ </execute-stage>
+
+ <execute-stage service="TEZ" component="TEZ_CLIENT" title="Check Tez Tarball">
+ <task xsi:type="execute" hosts="any">
+ <script>scripts/pre_upgrade.py</script>
+ <function>prepare</function>
+ </task>
+ </execute-stage>
+ </group>
+
+ <group xsi:type="stop" name="STOP_HIGH_LEVEL_SERVICE_COMPONENTS" title="Stop Components for High-Level Services">
+ <skippable>true</skippable>
+ <supports-auto-skip-failure>false</supports-auto-skip-failure>
+ <parallel-scheduler/>
+ <service-check>false</service-check>
+
+ <service name="ATLAS">
+ <component>ATLAS_SERVER</component>
+ </service>
+
+ <service name="FLUME">
+ <component>FLUME_HANDLER</component>
+ </service>
+
+ <service name="ACCUMULO">
+ <component>ACCUMULO_TRACER</component>
+ <component>ACCUMULO_GC</component>
+ <component>ACCUMULO_TSERVER</component>
+ <component>ACCUMULO_MONITOR</component>
+ <component>ACCUMULO_MASTER</component>
+ </service>
+
+ <service name="STORM">
+ <component>DRPC_SERVER</component>
+ <component>STORM_UI_SERVER</component>
+ <component>SUPERVISOR</component>
+ <component>NIMBUS</component>
+ </service>
+
+ <service name="KNOX">
+ <component>KNOX_GATEWAY</component>
+ </service>
+
+ <service name="FALCON">
+ <component>FALCON_SERVER</component>
+ </service>
+
+ <service name="OOZIE">
+ <component>OOZIE_SERVER</component>
+ </service>
+
+ <service name="SPARK">
+ <component>SPARK_JOBHISTORYSERVER</component>
+ <component>SPARK_THRIFTSERVER</component>
+ </service>
+
+ <service name="HIVE">
+ <component>WEBHCAT_SERVER</component>
+ <component>HIVE_SERVER</component>
+ <component>HIVE_METASTORE</component>
+ </service>
+
+ <service name="YARN">
+ <component>NODEMANAGER</component>
+ <component>RESOURCEMANAGER</component>
+ <component>APP_TIMELINE_SERVER</component>
+ </service>
+
+ <service name="MAPREDUCE2">
+ <component>HISTORYSERVER</component>
+ </service>
+ </group>
+
+ <group xsi:type="cluster" name="Backups" title="Perform Backups">
+ <direction>UPGRADE</direction>
+ <skippable>true</skippable>
+ <supports-auto-skip-failure>false</supports-auto-skip-failure>
+
+ <execute-stage service="OOZIE" component="OOZIE_SERVER" title="Backup Oozie Database">
+ <task xsi:type="manual">
+ <message>Before continuing, please backup the Oozie Server database referenced by the Oozie server located on {{hosts.all}}.</message>
+ </task>
+ </execute-stage>
+
+ <execute-stage service="HIVE" component="HIVE_METASTORE" title="Backup Hive Metastore">
+ <task xsi:type="manual">
+ <message>Before continuing, please backup the Hive Metastore database referenced by the Hive Metastore service(s) located on the following host(s): {{hosts.all}}.</message>
+ </task>
+ </execute-stage>
+
+ <execute-stage service="HBASE" component="HBASE_MASTER" title="Snapshot HBASE">
+ <task xsi:type="execute" hosts="master">
+ <script>scripts/hbase_upgrade.py</script>
+ <function>take_snapshot</function>
+ </task>
+ </execute-stage>
+
+ <execute-stage service="HDFS" component="NAMENODE" title="Prepare HDFS">
+ <task xsi:type="execute" hosts="master">
+ <script>scripts/namenode.py</script>
+ <function>prepare_express_upgrade</function>
+ </task>
+ </execute-stage>
+
+ <execute-stage service="RANGER" component="RANGER_ADMIN" title="Backup Ranger Database">
+ <task xsi:type="manual">
+ <message>Before continuing, please backup the Ranger Admin database and Ranger Audit database on the following host(s): {{hosts.all}}. If audit database size is too large (greater than 3GB) then follow the below instructions:
+ 1. Backup the audit table from audit database.
+ 2. Truncate audit table.
+ 3. Follow upgrade process and once completed then restore audit data to audit table.</message>
+ </task>
+ </execute-stage>
+
+ <execute-stage service="RANGER_KMS" component="RANGER_KMS_SERVER" title="Backup Ranger KMS Database">
+ <task xsi:type="manual">
+ <message>Before continuing, please backup Ranger KMS database on the following host(s): {{hosts.all}}.</message>
+ </task>
+ </execute-stage>
+ </group>
+
+ <group xsi:type="stop" name="STOP_LOW_LEVEL_SERVICE_COMPONENTS" title="Stop Components for Core Services">
+ <skippable>true</skippable>
+ <supports-auto-skip-failure>false</supports-auto-skip-failure>
+ <service-check>false</service-check>
+ <parallel-scheduler/>
+
+ <service name="HBASE">
+ <component>HBASE_REGIONSERVER</component>
+ <component>HBASE_MASTER</component>
+ <component>PHOENIX_QUERY_SERVER</component>
+ </service>
+
+ <service name="KAFKA">
+ <component>KAFKA_BROKER</component>
+ </service>
+
+ <service name="HDFS">
+ <component>DATANODE</component>
+ <component>NAMENODE</component>
+ <component>SECONDARY_NAMENODE</component>
+ <component>ZKFC</component>
+ <component>JOURNALNODE</component>
+ <component>NFS_GATEWAY</component>
+ </service>
+
+ <service name="RANGER">
+ <component>RANGER_USERSYNC</component>
+ <component>RANGER_ADMIN</component>
+ </service>
+
+ <service name="RANGER_KMS">
+ <component>RANGER_KMS_SERVER</component>
+ </service>
+
+ <service name="ZOOKEEPER">
+ <component>ZOOKEEPER_SERVER</component>
+ </service>
+ </group>
+
+ <group xsi:type="cluster" name="Restore Backups" title="Restore Backups">
+ <direction>DOWNGRADE</direction>
+ <skippable>true</skippable>
+
+ <!-- If the user attempts a downgrade after this point, they will need to restore backups
+ before starting any of the services. -->
+
+ <execute-stage service="OOZIE" component="OOZIE_SERVER" title="Restore Oozie Database">
+ <task xsi:type="manual">
+ <message>Before continuing, please restore the Oozie Server database referenced by the Oozie server located on {{hosts.all}}.</message>
+ </task>
+ </execute-stage>
+
+ <execute-stage service="HIVE" component="HIVE_METASTORE" title="Restore Hive Metastore">
+ <task xsi:type="manual">
+ <message>Before continuing, please restore the Hive Metastore database referenced by the Hive Metastore service(s) located on the following host(s):: {{hosts.all}}.</message>
+ </task>
+ </execute-stage>
+
+ <execute-stage service="RANGER" component="RANGER_ADMIN" title="Restore Ranger Database">
+ <task xsi:type="manual">
+ <message>Before continuing, please restore the Ranger Admin database and Ranger Audit database on the following host(s): {{hosts.all}}.</message>
+ </task>
+ </execute-stage>
+
+ <execute-stage service="RANGER_KMS" component="RANGER_KMS_SERVER" title="Restore Ranger KMS Database">
+ <task xsi:type="manual">
+ <message>Before continuing, please restore Ranger KMS database</message>
+ </task>
+ </execute-stage>
+
+ </group>
+
+ <!-- After processing this group, will change the effective Stack of the UpgradeContext object. -->
+ <group xsi:type="update-stack" name="UPDATE_DESIRED_STACK_ID" title="Update Target Stack">
+ <execute-stage title="Update Target Stack" service="" component="">
+ <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.UpdateDesiredStackAction">
+ </task>
+ </execute-stage>
+ </group>
+
+ <group xsi:type="cluster" name="Upgrade service configs" title="Upgrade service configs">
+ <direction>UPGRADE</direction> <!-- prevent config changes on downgrade -->
+ <skippable>true</skippable> <!-- May fix configuration problems manually -->
+
+ <!--YARN-->
+ <execute-stage service="MAPREDUCE2" component="MAPREDUCE2_CLIENT" title="Apply config changes for Mapreduce2 client">
+ <task xsi:type="server_action" summary="Verifying LZO codec path for mapreduce" class="org.apache.ambari.server.serveraction.upgrades.FixLzoCodecPath"/>
+ </execute-stage>
+
+ <!-- YARN -->
+ <execute-stage service="YARN" component="RESOURCEMANAGER" title="Apply config changes for Yarn Resourcemanager">
+ <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_yarn_audit_db"/>
+ </execute-stage>
+
+ <!-- YARN -->
+ <execute-stage service="YARN" component="NODEMANAGER" title="Add Spark2 shuffle">
+ <task xsi:type="configure" id="hdp_2_5_0_0_add_spark2_yarn_shuffle"/>
+ </execute-stage>
+
+ <!--TEZ-->
+ <execute-stage service="TEZ" component="TEZ_CLIENT" title="Verify LZO codec path for Tez">
+ <task xsi:type="server_action" summary="Verifying LZO codec path for Tez" class="org.apache.ambari.server.serveraction.upgrades.FixLzoCodecPath"/>
+ </execute-stage>
+
+ <execute-stage service="TEZ" component="TEZ_CLIENT" title="Apply config changes for Tez">
+ <task xsi:type="configure" id="hdp_2_5_0_0_tez_client_adjust_tez_lib_uris_property"/>
+ </execute-stage>
+
+ <!--OOZIE-->
+ <execute-stage service="OOZIE" component="OOZIE_SERVER" title="Apply config changes for Oozie Server">
+ <task xsi:type="server_action" summary="Adjusting Oozie properties" class="org.apache.ambari.server.serveraction.upgrades.OozieConfigCalculation"/>
+ <task xsi:type="server_action" summary="Fix oozie admin users" class="org.apache.ambari.server.serveraction.upgrades.FixOozieAdminUsers"/>
+ </execute-stage>
+
+
+ <!--FALCON-->
+ <execute-stage service="FALCON" component="FALCON_SERVER" title="Apply config changes for Falcon">
+ <task xsi:type="configure" id="hdp_2_5_0_0_falcon_server_adjust_services_property"/>
+ </execute-stage>
+
+ <!-- RANGER -->
+ <execute-stage service="RANGER" component="RANGER_ADMIN" title="Apply config changes for Ranger Admin">
+ <task xsi:type="configure" id="hdp_2_5_0_0_remove_audit_db_flag"/>
+ </execute-stage>
+
+ <execute-stage service="RANGER" component="RANGER_ADMIN" title="Apply config changes for Ranger Admin">
+ <task xsi:type="configure" id="hdp_2_5_0_0_remove_audit_db_admin_properties"/>
+ </execute-stage>
+
+ <execute-stage service="RANGER" component="RANGER_ADMIN" title="Apply config changes for Ranger Admin">
+ <task xsi:type="configure" id="hdp_2_5_0_0_remove_audit_db_ranger_admin_site"/>
+ </execute-stage>
+
+ <execute-stage service="RANGER" component="RANGER_ADMIN" title="Apply config changes for Ranger Admin">
+ <task xsi:type="configure" id="hdp_2_5_0_0_remove_sso_property"/>
+ </execute-stage>
+
+ <execute-stage service="RANGER" component="RANGER_ADMIN" title="Apply config changes for Ranger Admin">
+ <task xsi:type="configure" id="hdp_2_5_0_0_set_external_solrCloud_flag"/>
+ </execute-stage>
+
+ <execute-stage service="RANGER" component="RANGER_ADMIN" title="Calculating Ranger Properties">
+ <task xsi:type="server_action" summary="Calculating Ranger Properties" class="org.apache.ambari.server.serveraction.upgrades.RangerKerberosConfigCalculation"/>
+ </execute-stage>
+
+ <execute-stage service="RANGER" component="RANGER_ADMIN" title="Configuring Ranger Alerts">
+ <task xsi:type="server_action" summary="Configuring Ranger Alerts" class="org.apache.ambari.server.serveraction.upgrades.RangerWebAlertConfigAction"/>
+ </execute-stage>
+
+ <!-- HDFS -->
+ <execute-stage service="HDFS" component="NAMENODE" title="Apply config changes for Hdfs Namenode HA">
+ <task xsi:type="configure" id="hdp_2_5_0_0_namenode_ha_adjustments"/>
+ </execute-stage>
+
+ <execute-stage service="HDFS" component="NAMENODE" title="Apply config changes for Hdfs Namenode">
+ <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_hdfs_audit_db"/>
+ </execute-stage>
+
+ <!-- SQOOP -->
+ <execute-stage service="SQOOP" component="SQOOP" title="Apply config changes for Sqoop to remove Atlas Configs">
+ <!-- Remove Atlas configs that were incorrectly added to sqoop-site instead of Atlas' application.properties. -->
+ <task xsi:type="configure" id="hdp_2_5_0_0_remove_sqoop_atlas_configs"/>
+ </execute-stage>
+
+ <execute-stage service="SQOOP" component="SQOOP" title="Apply config changes for Sqoop if the cluster is Kerberized">
+ <!-- If cluster is Kerberized, add configs to sqoop-atlas-application.properties,
+ which will be written to the local file system if Atlas is present. -->
+ <task xsi:type="configure" id="hdp_2_5_0_0_add_sqoop_atlas_security_configs" />
+ </execute-stage>
+
+ <!-- HIVE -->
+ <execute-stage service="HIVE" component="HIVE_SERVER" title="Apply config changes for Hive Server">
+ <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_hive_audit_db"/>
+ </execute-stage>
+
+ <execute-stage service="HIVE" component="HIVE_SERVER" title="Apply config changes for Hive Server">
+ <!-- Remove Atlas configs that were incorrectly added to hive-site instead of Atlas' application.properties. -->
+ <task xsi:type="configure" id="hdp_2_5_0_0_remove_hive_atlas_configs"/>
+ </execute-stage>
+
+ <execute-stage service="HIVE" component="HIVE_SERVER" title="Apply config changes for Hive Server">
+ <task xsi:type="server_action" summary="Update hive-env content" class="org.apache.ambari.server.serveraction.upgrades.HiveEnvClasspathAction"/>
+ </execute-stage>
+
+ <!-- HBASE -->
+ <execute-stage service="HBASE" component="HBASE_MASTER" title="Apply config changes for Hbase Master">
+ <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_hbase_audit_db"/>
+ </execute-stage>
+
+ <!-- KNOX -->
+ <execute-stage service="KNOX" component="KNOX_GATEWAY" title="Apply config changes for Knox Gateway">
+ <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_knox_audit_db"/>
+ </execute-stage>
+
+ <!-- STORM -->
+ <execute-stage service="STORM" component="NIMBUS" title="Apply config changes for Storm Nimbus">
+ <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_storm_audit_db"/>
+ </execute-stage>
+
+ <execute-stage service="STORM" component="NIMBUS" title="Apply config changes for Nimbus">
+ <task xsi:type="configure" id="hdp_2_5_0_0_upgrade_storm_1.0"/>
+ </execute-stage>
+ <execute-stage service="STORM" component="NIMBUS" title="Apply config changes for Nimbus in KerberosDescriptor">
+ <task xsi:type="server_action" summary="Upgrade Storm Security Configs to 1.0"
+ class="org.apache.ambari.server.serveraction.upgrades.StormUpgradeKerberosDescriptorConfig"/>
+ </execute-stage>
+
+ <execute-stage service="STORM" component="NIMBUS" title="Apply config changes for Storm">
+ <!-- Remove Atlas configs that were incorrectly added to storm-site instead of Atlas' application.properties. -->
+ <task xsi:type="configure" id="hdp_2_5_0_0_remove_storm_atlas_configs"/>
+ <!-- Add nimbus.impersonation acls . -->
+ <task xsi:type="configure" id="hdp_2_5_0_0_add_storm_security_configs" />
+ </execute-stage>
+
+ <execute-stage service="STORM" component="NIMBUS" title="Apply config changes for Storm">
+ <task xsi:type="configure" id="hdp_2_4_0_0_remove_empty_storm_topology_submission_notifier_plugin_class"/>
+ </execute-stage>
+
+ <!-- KAFKA -->
+ <execute-stage service="KAFKA" component="KAFKA_BROKER" title="Apply config changes for Kafka Broker">
+ <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_kafka_audit_db"/>
+ </execute-stage>
+
+ <!-- RANGER KMS -->
+ <execute-stage service="RANGER_KMS" component="RANGER_KMS_SERVER" title="Apply config changes for Ranger KMS Server">
+ <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_kms_audit_db"/>
+ </execute-stage>
+ </group>
+
+ <!--
+ After processing this group, the user-specified Kerberos descriptor will be updated to work with
+ the new stack-level Kerberos descriptor.
+ -->
+ <group xsi:type="cluster" name="UPDATE_KERBEROS_DESCRIPTORS" title="Update Kerberos Descriptors">
+ <execute-stage title="Update the user-specified Kerberos descriptor" service="" component="">
+ <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.UpgradeUserKerberosDescriptor"/>
+ </execute-stage>
+ </group>
+
+ <!--
+ Invoke "hdp-select set all" to change any components we may have missed
+ that are installed on the hosts but not known by Ambari.
+ -->
+ <group xsi:type="cluster" name="ALL_HOST_OPS" title="Set Version On All Hosts">
+ <scope>COMPLETE</scope>
+ <skippable>true</skippable>
+ <supports-auto-skip-failure>false</supports-auto-skip-failure>
+
+ <execute-stage title="Update stack to {{version}}">
+ <task xsi:type="execute">
+ <script>scripts/ru_set_all.py</script>
+ <function>actionexecute</function>
+ </task>
+ </execute-stage>
+ </group>
+
+
+ <!-- Now, restart all of the services. -->
+ <group xsi:type="restart" name="ZOOKEEPER" title="ZooKeeper">
+ <service-check>false</service-check>
+ <skippable>true</skippable>
+ <supports-auto-skip-failure>false</supports-auto-skip-failure>
+ <parallel-scheduler/>
+ <service name="ZOOKEEPER">
+ <service-check>true</service-check>
+ <component>ZOOKEEPER_SERVER</component>
+ <component>ZOOKEEPER_CLIENT</component>
+ </service>
+ </group>
+
+ <group xsi:type="restart" name="RANGER" title="Ranger">
+ <service-check>false</service-check>
+ <skippable>true</skippable>
+ <supports-auto-skip-failure>false</supports-auto-skip-failure>
+ <parallel-scheduler/>
+ <service name="RANGER">
+ <component>RANGER_ADMIN</component>
+ <component>RANGER_USERSYNC</component>
+ </service>
+ </group>
+
+ <group xsi:type="restart" name="RANGER_KMS" title="Ranger KMS">
+ <service-check>false</service-check>
+ <skippable>true</skippable>
+ <supports-auto-skip-failure>false</supports-auto-skip-failure>
+ <parallel-scheduler/>
+ <service name="RANGER_KMS">
+ <component>RANGER_KMS_SERVER</component>
+ </service>
+ </group>
+
+ <group xsi:type="restart" name="HDFS" title="HDFS">
+ <service-check>false</service-check>
+ <skippable>true</skippable>
+ <supports-auto-skip-failure>false</supports-auto-skip-failure>
+ <parallel-scheduler/>
+ <service name="HDFS">
+ <component>JOURNALNODE</component>
+ <component>ZKFC</component>
+ <component>NAMENODE</component>
+ <component>SECONDARY_NAMENODE</component>
+ <component>NFS_GATEWAY</component>
+ <component>HDFS_CLIENT</component>
+ </service>
+ </group>
+
+ <group xsi:type="restart" name="HDFS_DATANODES" title="HDFS DataNodes">
+ <service-check>false</service-check>
+ <skippable>true</skippable>
+ <parallel-scheduler/>
+ <service name="HDFS">
+ <component>DATANODE</component>
+ </service>
+ </group>
+
+ <group xsi:type="cluster" name="HDFS_LEAVE_SAFEMODE" title="HDFS - Wait to leave Safemode">
+ <service-check>false</service-check>
+ <skippable>true</skippable>
+ <supports-auto-skip-failure>false</supports-auto-skip-failure>
+
+ <execute-stage service="HDFS" component="NAMENODE" title="Wait to leave Safemode">
+ <task xsi:type="execute" hosts="all" summary="Wait for NameNode to leave Safemode">
+ <script>scripts/namenode.py</script>
+ <function>wait_for_safemode_off</function>
+ </task>
+ </execute-stage>
+ </group>
+
+ <group xsi:type="restart" name="KAFKA" title="Kafka">
+ <service-check>false</service-check>
+ <skippable>true</skippable>
+ <parallel-scheduler/>
+ <service name="KAFKA">
+ <component>KAFKA_BROKER</component>
+ </service>
+ </group>
+
+ <group xsi:type="restart" name="YARN_AND_MAPR" title="YARN and MapReduce2">
+ <service-check>false</service-check>
+ <skippable>true</skippable>
+ <supports-auto-skip-failure>false</supports-auto-skip-failure>
+ <parallel-scheduler/>
+
+ <service name="MAPREDUCE2">
+ <component>HISTORYSERVER</component>
+ <component>MAPREDUCE2_CLIENT</component>
+ </service>
+
+ <service name="YARN">
+ <component>APP_TIMELINE_SERVER</component>
+ <component>RESOURCEMANAGER</component>
+ <component>YARN_CLIENT</component>
+ </service>
+ </group>
+
+ <group xsi:type="restart" name="YARN_NODEMANAGERS" title="YARN NodeManagers">
+ <service-check>false</service-check>
+ <skippable>true</skippable>
+ <parallel-scheduler/>
+
+ <service name="YARN">
+ <component>NODEMANAGER</component>
+ </service>
+ </group>
+
+ <group xsi:type="restart" name="HBASE" title="HBASE">
+ <service-check>false</service-check>
+ <skippable>true</skippable>
+ <supports-auto-skip-failure>false</supports-auto-skip-failure>
+ <parallel-scheduler/>
+ <service name="HBASE">
+ <component>HBASE_MASTER</component>
+ <component>HBASE_REGIONSERVER</component>
+ <component>HBASE_CLIENT</component>
+ <component>PHOENIX_QUERY_SERVER</component>
+ </service>
+ </group>
+
+ <group xsi:type="restart" name="CLIENTS" title="Tez, Pig, Sqoop Clients">
+ <service-check>false</service-check>
+ <skippable>true</skippable>
+ <parallel-scheduler/>
+ <service name="TEZ">
+ <component>TEZ_CLIENT</component>
+ </service>
+
+ <service name="MAHOUT">
+ <component>MAHOUT</component>
+ </service>
+
+ <service name="PIG">
+ <component>PIG</component>
+ </service>
+
+ <service name="SQOOP">
+ <component>SQOOP</component>
+ </service>
+ </group>
+
+ <group name="SERVICE_CHECK_1" title="All Service Checks" xsi:type="service-check">
+ <skippable>true</skippable>
+ <direction>UPGRADE</direction>
+ <priority>
+ <service>ZOOKEEPER</service>
+ <service>RANGER</service>
+ <service>RANGER_KMS</service>
+ <service>HDFS</service>
+ <service>KAFKA</service>
+ <service>YARN</service>
+ <service>MAPREDUCE2</service>
+ <service>HBASE</service>
+ </priority>
+ </group>
+
+ <group xsi:type="restart" name="HIVE_MASTERS" title="Hive Masters">
+ <service-check>false</service-check>
+ <skippable>true</skippable>
+ <supports-auto-skip-failure>false</supports-auto-skip-failure>
+ <!-- Must be ran sequentially because Hive Metastore upgrades the schema and Hive Server copies tarballs. -->
+ <parallel-scheduler>
+ <max-degree-of-parallelism>1</max-degree-of-parallelism>
+ </parallel-scheduler>
+ <service name="HIVE">
+ <component>HIVE_METASTORE</component>
+ <component>HIVE_SERVER</component>
+ <component>WEBHCAT_SERVER</component>
+ </service>
+ </group>
+
+ <group xsi:type="restart" name="HIVE_CLIENTS" title="Hive Clients">
+ <service-check>false</service-check>
+ <skippable>true</skippable>
+ <supports-auto-skip-failure>false</supports-auto-skip-failure>
+ <parallel-scheduler/>
+ <service name="HIVE">
+ <component>HIVE_CLIENT</component>
+ <component>HCAT</component>
+ </service>
+ </group>
+
+ <group xsi:type="restart" name="SPARK" title="Spark">
+ <service-check>false</service-check>
+ <skippable>true</skippable>
+ <supports-auto-skip-failure>false</supports-auto-skip-failure>
+ <parallel-scheduler/>
+ <service name="SPARK">
+ <component>SPARK_JOBHISTORYSERVER</component>
+ <component>SPARK_THRIFTSERVER</component>
+ </service>
+ </group>
+
+ <group xsi:type="restart" name="SPARK_CLIENTS" title="Spark Clients">
+ <service-check>false</service-check>
+ <skippable>true</skippable>
+ <parallel-scheduler/>
+ <service name="SPARK">
+ <component>SPARK_CLIENT</component>
+ </service>
+ </group>
+
+ <group xsi:type="restart" name="OOZIE" title="Oozie">
+ <service-check>false</service-check>
+ <skippable>true</skippable>
+ <supports-auto-skip-failure>false</supports-auto-skip-failure>
+ <parallel-scheduler/>
+ <service name="OOZIE">
+ <component>OOZIE_SERVER</component>
+ </service>
+ </group>
+
+ <group xsi:type="restart" name="OOZIE_CLIENTS" title="Oozie Clients">
+ <service-check>false</service-check>
+ <skippable>true</skippable>
+ <parallel-scheduler/>
+ <service name="OOZIE">
+ <component>OOZIE_CLIENT</component>
+ </service>
+ </group>
+
+ <group name="SERVICE_CHECK_2" title="All Service Checks" xsi:type="service-check">
+ <skippable>true</skippable>
+ <direction>UPGRADE</direction>
+ <priority>
+ <service>HIVE</service>
+ <service>OOZIE</service>
+ <service>SPARK</service>
+ </priority>
+ </group>
+
+ <group xsi:type="restart" name="FALCON" title="Falcon">
+ <service-check>false</service-check>
+ <skippable>true</skippable>
+ <supports-auto-skip-failure>false</supports-auto-skip-failure>
+ <parallel-scheduler/>
+ <service name="FALCON">
+ <component>FALCON_SERVER</component>
+ </service>
+ </group>
+
+ <group xsi:type="restart" name="FALCON_CLIENTS" title="Falcon Clients">
+ <service-check>false</service-check>
+ <skippable>true</skippable>
+ <parallel-scheduler/>
+ <service name="FALCON">
+ <component>FALCON_CLIENT</component>
+ </service>
+ </group>
+
+ <group xsi:type="restart" name="KNOX" title="Knox">
+ <service-check>false</service-check>
+ <skippable>true</skippable>
+ <parallel-scheduler/>
+ <service name="KNOX">
+ <component>KNOX_GATEWAY</component>
+ </service>
+ </group>
+
+ <group xsi:type="restart" name="STORM" title="Storm">
+ <service-check>false</service-check>
+ <skippable>true</skippable>
+ <parallel-scheduler/>
+ <service name="STORM">
+ <component>NIMBUS</component>
+ <component>SUPERVISOR</component>
+ <component>STORM_UI_SERVER</component>
+ <component>DRPC_SERVER</component>
+ </service>
+
+ <execute-stage service="STORM" component="DRPC_SERVER" title="Rebuild Storm Topology">
+ <task xsi:type="manual">
+ <message>Please rebuild your topology using the new Storm version dependencies and resubmit it using the newly created jar.</message>
+ </task>
+ </execute-stage>
+ </group>
+
+ <group xsi:type="restart" name="SLIDER" title="Slider">
+ <service-check>false</service-check>
+ <skippable>true</skippable>
+ <parallel-scheduler/>
+ <service name="SLIDER">
+ <component>SLIDER</component>
+ </service>
+ </group>
+
+ <group xsi:type="restart" name="FLUME" title="Flume">
+ <service-check>false</service-check>
+ <skippable>true</skippable>
+ <parallel-scheduler/>
+ <service name="FLUME">
+ <component>FLUME_HANDLER</component>
+ </service>
+ </group>
+
+ <group xsi:type="restart" name="ACCUMULO" title="Accumulo">
+ <service-check>false</service-check>
+ <skippable>true</skippable>
+ <parallel-scheduler/>
+ <service name="ACCUMULO">
+ <component>ACCUMULO_MASTER</component>
+ <component>ACCUMULO_TSERVER</component>
+ <component>ACCUMULO_MONITOR</component>
+ <component>ACCUMULO_GC</component>
+ <component>ACCUMULO_TRACER</component>
+ <component>ACCUMULO_CLIENT</component>
+ </service>
+ </group>
+
+ <group name="SERVICE_CHECK_3" title="All Service Checks" xsi:type="service-check">
+ <skippable>true</skippable>
+ <direction>UPGRADE</direction>
+ <priority>
+ <service>FALCON</service>
+ <service>KNOX</service>
+ <service>STORM</service>
+ <service>SLIDER</service>
+ <service>FLUME</service>
+ <service>ACCUMULO</service>
+ </priority>
+ </group>
+
+ <group xsi:type="cluster" name="FINALIZE_PRE_CHECK" title="Finalize {{direction.text.proper}} Pre-Check">
+ <direction>UPGRADE</direction>
+
+ <execute-stage title="Check Component Versions">
+ <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.ComponentVersionCheckAction" />
+ </execute-stage>
+ </group>
+
+ <group xsi:type="cluster" name="MANUAL_STEPS" title="Finishing Upgrade">
+ <direction>UPGRADE</direction>
+
+ <execute-stage service="YARN" component="RESOURCEMANAGER" title="Start YARN Queues">
+ <task xsi:type="manual">
+ <message>Please start previously stopped YARN queues. If yarn-site's yarn.resourcemanager.work-preserving-recovery.enabled is set to true, then you can skip this step since the clients will retry on their own.</message>
+ </task>
+ </execute-stage>
+ </group>
+
+ <group xsi:type="cluster" name="POST_CLUSTER" title="Finalize {{direction.text.proper}}">
+ <skippable>true</skippable>
+ <supports-auto-skip-failure>false</supports-auto-skip-failure>
+
+ <execute-stage title="Confirm Finalize">
+ <direction>UPGRADE</direction>
+ <task xsi:type="manual">
+ <message>Please confirm you are ready to finalize.</message>
+ </task>
+ </execute-stage>
+
+ <execute-stage service="HDFS" component="NAMENODE" title="Execute HDFS Finalize">
+ <task xsi:type="execute" hosts="master">
+ <script>scripts/namenode.py</script>
+ <function>finalize_non_rolling_upgrade</function>
+ </task>
+ </execute-stage>
+
+ <execute-stage title="Save Cluster State" service="" component="">
+ <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.FinalizeUpgradeAction">
+ </task>
+ </execute-stage>
+
+ </group>
+ </order>
+
+ <processing>
+ <service name="ZOOKEEPER">
+ <component name="ZOOKEEPER_SERVER">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+
+ <component name="ZOOKEEPER_CLIENT">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="RANGER">
+ <component name="RANGER_ADMIN">
+ <pre-upgrade>
+
+ <task xsi:type="execute" hosts="all">
+ <script>scripts/ranger_admin.py</script>
+ <function>set_pre_start</function>
+ </task>
+
+ <task xsi:type="execute" hosts="any" summary="Upgrading Ranger database schema">
+ <script>scripts/ranger_admin.py</script>
+ <function>setup_ranger_database</function>
+ </task>
+
+ <task xsi:type="configure_function" hosts="all" />
+
+ <task xsi:type="execute" hosts="any" summary="Applying Ranger java patches">
+ <script>scripts/ranger_admin.py</script>
+ <function>setup_ranger_java_patches</function>
+ </task>
+ </pre-upgrade>
+
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+
+ </component>
+
+ <component name="RANGER_USERSYNC">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="RANGER_KMS">
+ <component name="RANGER_KMS_SERVER">
+ <pre-upgrade>
+ <task xsi:type="execute" hosts="any" sequential="true" summary="Upgrading Ranger KMS database schema">
+ <script>scripts/kms_server.py</script>
+ <function>setup_ranger_kms_database</function>
+ </task>
+ </pre-upgrade>
+
+ <pre-downgrade>
+ <task xsi:type="execute" hosts="any" sequential="true" summary="Downgrading Ranger KMS database schema">
+ <script>scripts/kms_server.py</script>
+ <function>setup_ranger_kms_database</function>
+ </task>
+ </pre-downgrade>
+
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="HDFS">
+ <component name="NAMENODE">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+
+ <component name="SECONDARY_NAMENODE">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+
+ <component name="DATANODE">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+
+ <component name="HDFS_CLIENT">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+
+ <component name="JOURNALNODE">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+
+ <component name="ZKFC">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+
+ <component name="NFS_GATEWAY">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="KAFKA">
+ <component name="KAFKA_BROKER">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="MAPREDUCE2">
+ <component name="HISTORYSERVER">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+
+ <component name="MAPREDUCE2_CLIENT">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="YARN">
+ <component name="APP_TIMELINE_SERVER">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+
+ <component name="RESOURCEMANAGER">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+
+ <component name="NODEMANAGER">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+
+ <component name="YARN_CLIENT">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="HBASE">
+ <component name="HBASE_MASTER">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+
+ <component name="HBASE_REGIONSERVER">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+
+ <component name="HBASE_CLIENT">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="TEZ">
+ <component name="TEZ_CLIENT">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="MAHOUT">
+ <component name="MAHOUT">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="PIG">
+ <component name="PIG">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="SQOOP">
+ <component name="SQOOP">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="HIVE">
+ <component name="HIVE_METASTORE">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+
+ <component name="HIVE_SERVER">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+
+ <component name="WEBHCAT_SERVER">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+
+ <component name="HIVE_CLIENT">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+
+ <component name="HCAT">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="SPARK">
+ <component name="SPARK_JOBHISTORYSERVER">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+ <component name="SPARK_THRIFTSERVER">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+ <component name="SPARK_CLIENT">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="OOZIE">
+ <component name="OOZIE_SERVER">
+ <pre-upgrade>
+ <!-- It is extremely important that both of these tasks run on the exact same host. Hence, pick the first alphabetically. -->
+ <task xsi:type="configure_function" hosts="first" />
+
+ <task xsi:type="execute" hosts="first" sequential="true" summary="Upgrading the Oozie database and creating a new sharelib">
+ <script>scripts/oozie_server_upgrade.py</script>
+ <function>upgrade_oozie_database_and_sharelib</function>
+ </task>
+ </pre-upgrade>
+
+ <pre-downgrade>
+ <task xsi:type="execute" hosts="any" sequential="true" summary="Create a new sharelib">
+ <script>scripts/oozie_server_upgrade.py</script>
+ <function>create_sharelib</function>
+ </task>
+ </pre-downgrade>
+
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+
+ <component name="OOZIE_CLIENT">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="FALCON">
+ <component name="FALCON_SERVER">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+ <component name="FALCON_CLIENT">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="KNOX">
+ <component name="KNOX_GATEWAY">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="STORM">
+ <component name="NIMBUS">
+ <pre-upgrade>
+ <task xsi:type="execute" hosts="any" summary="Removing Storm data from ZooKeeper">
+ <script>scripts/storm_upgrade.py</script>
+ <function>delete_storm_zookeeper_data</function>
+ </task>
+
+ <task xsi:type="execute" summary="Removing local Storm data">
+ <script>scripts/storm_upgrade.py</script>
+ <function>delete_storm_local_data</function>
+ </task>
+ </pre-upgrade>
+
+ <pre-downgrade>
+ <task xsi:type="execute" hosts="any" summary="Removing Storm data from ZooKeeper">
+ <script>scripts/storm_upgrade.py</script>
+ <function>delete_storm_zookeeper_data</function>
+ </task>
+
+ <task xsi:type="execute" summary="Removing local Storm data">
+ <script>scripts/storm_upgrade.py</script>
+ <function>delete_storm_local_data</function>
+ </task>
+ </pre-downgrade>
+
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+
+ <component name="SUPERVISOR">
+ <pre-upgrade>
+ <task xsi:type="execute" summary="Removing local Storm data">
+ <script>scripts/storm_upgrade.py</script>
+ <function>delete_storm_local_data</function>
+ </task>
+ </pre-upgrade>
+
+ <pre-downgrade>
+ <task xsi:type="manual">
+ <message>Before continuing, please deactivate and kill any currently running topologies.</message>
+ </task>
+
+ <task xsi:type="execute" summary="Removing local Storm data">
+ <script>scripts/storm_upgrade.py</script>
+ <function>delete_storm_local_data</function>
+ </task>
+ </pre-downgrade>
+
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+
+ <component name="STORM_UI_SERVER">
+ <pre-upgrade>
+ <task xsi:type="execute" summary="Removing local Storm data">
+ <script>scripts/storm_upgrade.py</script>
+ <function>delete_storm_local_data</function>
+ </task>
+ </pre-upgrade>
+
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+
+ <component name="DRPC_SERVER">
+ <pre-upgrade>
+ <task xsi:type="execute" summary="Removing local Storm data">
+ <script>scripts/storm_upgrade.py</script>
+ <function>delete_storm_local_data</function>
+ </task>
+ </pre-upgrade>
+
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+
+ <post-upgrade>
+ <task xsi:type="manual">
+ <message>Please rebuild your topology using the new Storm version dependencies and resubmit it using the newly created jar.</message>
+ </task>
+ </post-upgrade>
+ </component>
+ </service>
+
+ <service name="ACCUMULO">
+ <component name="ACCUMULO_MASTER">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+
+ <component name="ACCUMULO_TSERVER">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+
+ <component name="ACCUMULO_MONITOR">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+
+ <component name="ACCUMULO_GC">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+
+ <component name="ACCUMULO_TRACER">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+
+ <component name="ACCUMULO_CLIENT">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="SLIDER">
+ <component name="SLIDER">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="FLUME">
+ <component name="FLUME_HANDLER">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+ </service>
+ </processing>
+</upgrade>
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.5.xml.rej b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.5.xml.rej
new file mode 100644
index 0000000000..39b0bed2c6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.5.xml.rej
@@ -0,0 +1,17 @@
+***************
+*** 31,37 ****
+ </check-properties>
+ </configuration>
+ </prerequisite-checks>
+-
+ <order>
+ <group xsi:type="cluster" name="PRE_CLUSTER" title="Prepare Upgrade">
+ <direction>UPGRADE</direction>
+--- 31,37 ----
+ </check-properties>
+ </configuration>
+ </prerequisite-checks>
++
+ <order>
+ <group xsi:type="cluster" name="PRE_CLUSTER" title="Prepare Upgrade">
+ <direction>UPGRADE</direction>
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.5.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.5.xml
index 826cf16c67..e4f56e96bd 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.5.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.5.xml
@@ -503,7 +503,7 @@
<message>-- If keytabs are managed manually, create the required ranger keytabs in the locations specified in ranger config and restart Ranger components.</message>
</task>
</execute-stage>
-
+
<execute-stage service="KAFKA" component="KAFKA_BROKER" title="Kafka Property Removal">
<task xsi:type="manual">
<message>Kafka Brokers have been upgraded with version compatability.</message>
@@ -653,7 +653,7 @@
<task xsi:type="restart-task" />
</upgrade>
</component>
-
+
<component name="NFS_GATEWAY">
<upgrade>
<task xsi:type="restart-task" />
@@ -876,6 +876,9 @@
<service name="SPARK">
<component name="SPARK_JOBHISTORYSERVER">
+ <pre-upgrade>
+ <task xsi:type="configure" id="hdp_2_5_0_0_spark_jobhistoryserver"/>
+ </pre-upgrade>
<pre-downgrade /> <!-- no-op to prevent config changes on downgrade -->
<upgrade>
<task xsi:type="restart-task" />
@@ -888,6 +891,9 @@
</upgrade>
</component>
<component name="SPARK_CLIENT">
+ <pre-upgrade>
+ <task xsi:type="configure" id="hdp_2_5_0_0_spark_client"/>
+ </pre-upgrade>
<upgrade>
<task xsi:type="restart-task" />
</upgrade>
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.5.xml.orig b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.5.xml.orig
new file mode 100644
index 0000000000..fc8a60d329
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.5.xml.orig
@@ -0,0 +1,1087 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+
+<upgrade xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+ <target>2.5.*.*</target>
+ <target-stack>HDP-2.5</target-stack>
+ <type>ROLLING</type>
+ <skip-failures>false</skip-failures>
+ <skip-service-check-failures>false</skip-service-check-failures>
+
+ <prerequisite-checks>
+ <!-- List of additional pre-req checks to run in addition to the required pre-reqs -->
+ <check>org.apache.ambari.server.checks.HiveMultipleMetastoreCheck</check>
+ <check>org.apache.ambari.server.checks.SecondaryNamenodeDeletedCheck</check>
+ <check>org.apache.ambari.server.checks.ServicesMapReduceDistributedCacheCheck</check>
+ <check>org.apache.ambari.server.checks.ServicesNamenodeHighAvailabilityCheck</check>
+ <check>org.apache.ambari.server.checks.ServicesNamenodeTruncateCheck</check>
+ <check>org.apache.ambari.server.checks.ServicesTezDistributedCacheCheck</check>
+ <check>org.apache.ambari.server.checks.ServicesYarnWorkPreservingCheck</check>
+ <check>org.apache.ambari.server.checks.YarnRMHighAvailabilityCheck</check>
+ <check>org.apache.ambari.server.checks.YarnTimelineServerStatePreservingCheck</check>
+ <check>org.apache.ambari.server.checks.AtlasPresenceCheck</check>
+ <check>org.apache.ambari.server.checks.RangerAuditDbCheck</check>
+
+ <!-- Specific to HDP 2.5, Storm is not rolling -->
+ <check>org.apache.ambari.server.checks.StormShutdownWarning</check>
+
+ <configuration>
+ <!-- Configuration properties for all pre-reqs including required pre-reqs -->
+ <!--TODO: is it required? -->
+ <check-properties name="org.apache.ambari.server.checks.HiveDynamicServiceDiscoveryCheck">
+ <property name="min-failure-stack-version">HDP-2.3.0.0</property>
+ </check-properties>
+ </configuration>
+ </prerequisite-checks>
+
+ <skip-failures>false</skip-failures>
+ <skip-service-check-failures>false</skip-service-check-failures>
+
+ <order>
+ <group xsi:type="cluster" name="PRE_CLUSTER" title="Prepare Upgrade">
+ <direction>UPGRADE</direction>
+ <supports-auto-skip-failure>false</supports-auto-skip-failure>
+
+ <execute-stage service="HDFS" component="NAMENODE" title="Pre Upgrade HDFS">
+ <task xsi:type="execute" hosts="master">
+ <script>scripts/namenode.py</script>
+ <function>prepare_rolling_upgrade</function>
+ </task>
+ </execute-stage>
+
+ <execute-stage service="TEZ" component="TEZ_CLIENT" title="Check Tez Tarball">
+ <task xsi:type="execute" hosts="any">
+ <script>scripts/pre_upgrade.py</script>
+ <function>prepare</function>
+ </task>
+ </execute-stage>
+ </group>
+
+ <group xsi:type="cluster" name="PREPARE_BACKUPS" title="Prepare Backups">
+ <direction>UPGRADE</direction>
+ <skippable>true</skippable>
+ <supports-auto-skip-failure>false</supports-auto-skip-failure>
+
+ <execute-stage service="HBASE" component="HBASE_MASTER" title="Pre Upgrade HBase Backup">
+ <task xsi:type="execute" hosts="master">
+ <script>scripts/hbase_upgrade.py</script>
+ <function>take_snapshot</function>
+ </task>
+ </execute-stage>
+
+ <execute-stage service="HIVE" component="HIVE_METASTORE" title="Pre Upgrade Hive Backup">
+ <task xsi:type="manual">
+ <message>Before continuing, please backup the Hive Metastore database referenced by the Hive Metastore service(s) located on the following host(s): {{hosts.all}}.</message>
+ </task>
+ </execute-stage>
+
+ <execute-stage service="OOZIE" component="OOZIE_SERVER" title="Pre Upgrade Oozie Backup">
+ <task xsi:type="manual">
+ <message>Before continuing, please backup the Oozie Server database referenced by the Oozie server located on {{hosts.all}}.</message>
+ </task>
+ </execute-stage>
+
+ <execute-stage service="RANGER" component="RANGER_ADMIN" title="Pre Upgrade Ranger Backup">
+ <task xsi:type="manual">
+ <message>Before continuing, please backup the Ranger Admin database and Ranger Audit database.
+ If audit database size is too large( > 3GB ) then follow the below instructions:
+ 1. Backup the audit table from audit database.
+ 2. Truncate audit table.
+ 3. Follow upgrade process and once completed then restore audit data to audit table.</message>
+ </task>
+ </execute-stage>
+
+ <execute-stage service="RANGER_KMS" component="RANGER_KMS_SERVER" title="Pre Upgrade Ranger KMS Backup">
+ <task xsi:type="manual">
+ <message>Before continuing, please backup Ranger KMS database on the following host(s): {{hosts.all}}.</message>
+ </task>
+ </execute-stage>
+ </group>
+
+ <group name="ZOOKEEPER" title="ZooKeeper">
+ <supports-auto-skip-failure>false</supports-auto-skip-failure>
+ <service name="ZOOKEEPER">
+ <component>ZOOKEEPER_SERVER</component>
+ </service>
+ </group>
+
+ <group name="RANGER" title="Ranger">
+ <skippable>true</skippable>
+ <supports-auto-skip-failure>false</supports-auto-skip-failure>
+ <service name="RANGER">
+ <component>RANGER_ADMIN</component>
+ <component>RANGER_USERSYNC</component>
+ </service>
+ </group>
+
+ <group name="RANGER_KMS" title="Ranger_KMS">
+ <skippable>true</skippable>
+ <supports-auto-skip-failure>false</supports-auto-skip-failure>
+ <service name="RANGER_KMS">
+ <component>RANGER_KMS_SERVER</component>
+ </service>
+ </group>
+
+ <group name="KAFKA" title="Kafka">
+ <skippable>true</skippable>
+ <service name="KAFKA">
+ <component>KAFKA_BROKER</component>
+ </service>
+ </group>
+
+ <!-- This group should exist for all RUs that cross a major stack version. -->
+ <group xsi:type="cluster" name="UPDATE_CLIENT_CONFIGS" title="Update Client Configs">
+ <direction>UPGRADE</direction>
+ <execute-stage service="TEZ" component="TEZ_CLIENT" title="Update tez.lib.uris">
+ <task xsi:type="configure" id="hdp_2_5_0_0_tez_client_adjust_tez_lib_uris_property"/>
+ </execute-stage>
+
+ <execute-stage service="TEZ" component="TEZ_CLIENT" title="Verify LZO codec path for Tez">
+ <task xsi:type="server_action" summary="Verifying LZO codec path for Tez" class="org.apache.ambari.server.serveraction.upgrades.FixLzoCodecPath"/>
+ </execute-stage>
+ </group>
+
+ <!-- This needs to be done for every Rolling Upgrade pack that changes configurations. -->
+ <group xsi:type="cluster" name="CONFIGURE_CLIENT_DEPENDENCIES" title="Write client configs">
+ <skippable>true</skippable>
+ <supports-auto-skip-failure>false</supports-auto-skip-failure>
+ <direction>UPGRADE</direction>
+
+ <execute-stage service="MAPREDUCE2" component="MAPREDUCE2_CLIENT" title="Write Mapreduce2 Client configs">
+ <task xsi:type="execute">
+ <script>scripts/mapreduce2_client.py</script>
+ <function>stack_upgrade_save_new_config</function>
+ </task>
+ </execute-stage>
+ <execute-stage service="TEZ" component="TEZ_CLIENT" title="Write Tez Client configs">
+ <task xsi:type="execute">
+ <script>scripts/tez_client.py</script>
+ <function>stack_upgrade_save_new_config</function>
+ </task>
+ </execute-stage>
+ <execute-stage service="SPARK" component="SPARK_CLIENT" title="Write Spark Client configs">
+ <task xsi:type="execute">
+ <script>scripts/spark_client.py</script>
+ <function>stack_upgrade_save_new_config</function>
+ </task>
+ </execute-stage>
+ </group>
+
+ <!--
+ After processing this group, the user-specified Kerberos descriptor will be updated to work with
+ the new stack-level Kerberos descriptor.
+ -->
+ <group xsi:type="cluster" name="UPDATE_KERBEROS_DESCRIPTORS" title="Update Kerberos Descriptors">
+ <execute-stage title="Update the user-specified Kerberos descriptor" service="" component="">
+ <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.UpgradeUserKerberosDescriptor"/>
+ </execute-stage>
+ </group>
+
+ <group name="CORE_MASTER" title="Core Masters">
+ <service-check>false</service-check>
+ <service name="HDFS">
+ <component>NAMENODE</component>
+ <component>JOURNALNODE</component>
+ <component>ZKFC</component>
+ </service>
+
+ <service name="MAPREDUCE2">
+ <component>HISTORYSERVER</component>
+ </service>
+
+ <service name="YARN">
+ <component>APP_TIMELINE_SERVER</component>
+ <component>RESOURCEMANAGER</component>
+ </service>
+
+ <service name="HBASE">
+ <component>HBASE_MASTER</component>
+ </service>
+ </group>
+
+ <group name="SERVICE_CHECK_1" title="All Service Checks" xsi:type="service-check">
+ <skippable>true</skippable>
+ <direction>UPGRADE</direction>
+ <priority>
+ <service>ZOOKEEPER</service>
+ <service>HDFS</service>
+ <service>YARN</service>
+ <service>MAPREDUCE2</service>
+ <service>HBASE</service>
+ </priority>
+ <exclude>
+ <service>AMBARI_METRICS</service>
+ <service>LOGSEARCH</service>
+ </exclude>
+ </group>
+
+ <group name="CORE_SLAVES" title="Core Slaves" xsi:type="colocated">
+ <skippable>true</skippable>
+ <service-check>false</service-check>
+ <service name="HDFS">
+ <component>DATANODE</component>
+ <component>NFS_GATEWAY</component>
+ </service>
+
+ <service name="HBASE">
+ <component>HBASE_REGIONSERVER</component>
+ <component>PHOENIX_QUERY_SERVER</component>
+ </service>
+
+ <service name="YARN">
+ <component>NODEMANAGER</component>
+ </service>
+
+ <batch>
+ <percent>20</percent>
+ <summary>Verification Required</summary>
+ <message>The initial batch of {{components}} hosts have been {{direction.past}}. You are advised to check the hosts and perform cluster/workload-specific tests against your cluster to ensure proper operation before proceeding with {{direction.text}} of the remaining services.</message>
+ </batch>
+ </group>
+
+ <group name="SERVICE_CHECK_2" title="All Service Checks" xsi:type="service-check">
+ <skippable>true</skippable>
+ <direction>UPGRADE</direction>
+ <priority>
+ <service>ZOOKEEPER</service>
+ <service>HDFS</service>
+ <service>YARN</service>
+ <service>HBASE</service>
+ </priority>
+ <exclude>
+ <service>AMBARI_METRICS</service>
+ <service>LOGSEARCH</service>
+ </exclude>
+ </group>
+
+ <group name="HIVE" title="Hive">
+ <skippable>true</skippable>
+ <supports-auto-skip-failure>false</supports-auto-skip-failure>
+ <service name="HIVE">
+ <component>HIVE_METASTORE</component>
+ <component>HIVE_SERVER</component>
+ <component>WEBHCAT_SERVER</component>
+ </service>
+ </group>
+
+ <group name="SPARK" title="Spark">
+ <skippable>true</skippable>
+ <supports-auto-skip-failure>false</supports-auto-skip-failure>
+ <service-check>false</service-check>
+ <service name="SPARK">
+ <component>SPARK_JOBHISTORYSERVER</component>
+ <component>SPARK_THRIFTSERVER</component>
+ </service>
+ </group>
+
+ <group name="SPARK_CLIENTS" title="Spark Clients">
+ <skippable>true</skippable>
+ <service name="SPARK">
+ <component>SPARK_CLIENT</component>
+ </service>
+ </group>
+
+ <group name="OOZIE" title="Oozie">
+ <skippable>true</skippable>
+ <supports-auto-skip-failure>false</supports-auto-skip-failure>
+ <service-check>false</service-check>
+ <service name="OOZIE">
+ <component>OOZIE_SERVER</component>
+ </service>
+ </group>
+
+ <group name="OOZIE_CLIENTS" title="Oozie Clients">
+ <skippable>true</skippable>
+ <service name="OOZIE">
+ <component>OOZIE_CLIENT</component>
+ </service>
+ </group>
+
+ <group name="FALCON" title="Falcon">
+ <skippable>true</skippable>
+ <supports-auto-skip-failure>false</supports-auto-skip-failure>
+ <service-check>false</service-check>
+ <service name="FALCON">
+ <component>FALCON_SERVER</component>
+ </service>
+ </group>
+
+ <group name="FALCON_CLIENTS" title="Falcon Clients">
+ <skippable>true</skippable>
+ <service name="FALCON">
+ <component>FALCON_CLIENT</component>
+ </service>
+ </group>
+
+ <group name="CLIENTS" title="Client Components">
+ <service-check>false</service-check>
+ <service name="ZOOKEEPER">
+ <component>ZOOKEEPER_CLIENT</component>
+ </service>
+
+ <service name="HDFS">
+ <component>HDFS_CLIENT</component>
+ </service>
+
+ <service name="YARN">
+ <component>YARN_CLIENT</component>
+ </service>
+
+ <service name="MAPREDUCE2">
+ <component>MAPREDUCE2_CLIENT</component>
+ </service>
+
+ <service name="TEZ">
+ <component>TEZ_CLIENT</component>
+ </service>
+
+ <service name="HBASE">
+ <component>HBASE_CLIENT</component>
+ </service>
+
+ <service name="PIG">
+ <component>PIG</component>
+ </service>
+
+ <service name="SQOOP">
+ <component>SQOOP</component>
+ </service>
+
+ <service name="MAHOUT">
+ <component>MAHOUT</component>
+ </service>
+
+ <service name="HIVE">
+ <component>HIVE_CLIENT</component>
+ <component>HCAT</component>
+ </service>
+ </group>
+
+ <group name="SERVICE_CHECK_3" title="All Service Checks" xsi:type="service-check">
+ <skippable>true</skippable>
+ <direction>UPGRADE</direction>
+ <priority>
+ <service>ZOOKEEPER</service>
+ <service>HDFS</service>
+ <service>YARN</service>
+ <service>HBASE</service>
+ </priority>
+ <exclude>
+ <service>AMBARI_METRICS</service>
+ <service>LOGSEARCH</service>
+ </exclude>
+ </group>
+
+ <group name="KNOX" title="Knox">
+ <skippable>true</skippable>
+ <service name="KNOX">
+ <component>KNOX_GATEWAY</component>
+ </service>
+ </group>
+
+ <!-- Storm package names changed causing an incompatibility between versions; we must
+ shut all daemons down before deleting ZooKeeper/local data. -->
+ <group xsi:type="cluster" name="STOP_STORM_WARNING" title="Deactivate Storm Topologies">
+ <direction>UPGRADE</direction>
+ <supports-auto-skip-failure>false</supports-auto-skip-failure>
+ <execute-stage service="STORM" component="NIMBUS" title="Deactivate Storm Topologies">
+ <task xsi:type="manual">
+ <message>Before continuing, please deactivate and kill any currently running topologies.</message>
+ </task>
+ </execute-stage>
+ </group>
+
+ <group xsi:type="stop" name="STOP_STORM" title="Stop Storm Services">
+ <direction>UPGRADE</direction>
+ <skippable>true</skippable>
+ <service-check>false</service-check>
+ <service name="STORM">
+ <component>NIMBUS</component>
+ <component>SUPERVISOR</component>
+ <component>STORM_UI_SERVER</component>
+ <component>DRPC_SERVER</component>
+ </service>
+ </group>
+
+ <group xsi:type="restart" name="RESTART_STORM" title="Restart Storm Services">
+ <skippable>true</skippable>
+ <service name="STORM">
+ <component>NIMBUS</component>
+ <component>SUPERVISOR</component>
+ <component>STORM_UI_SERVER</component>
+ <component>DRPC_SERVER</component>
+ </service>
+ </group>
+
+ <group xsi:type="stop" name="STOP_STORM" title="Stop Storm Services">
+ <direction>DOWNGRADE</direction>
+ <skippable>true</skippable>
+ <service-check>false</service-check>
+ <service name="STORM">
+ <component>NIMBUS</component>
+ <component>SUPERVISOR</component>
+ <component>STORM_UI_SERVER</component>
+ <component>DRPC_SERVER</component>
+ </service>
+ </group>
+
+ <group name="SLIDER" title="Slider">
+ <skippable>true</skippable>
+ <service name="SLIDER">
+ <component>SLIDER</component>
+ </service>
+ </group>
+
+ <group name="FLUME" title="Flume">
+ <skippable>true</skippable>
+ <service name="FLUME">
+ <component>FLUME_HANDLER</component>
+ </service>
+ </group>
+
+ <group name="ACCUMULO" title="Accumulo">
+ <skippable>true</skippable>
+ <service name="ACCUMULO">
+ <component>ACCUMULO_MASTER</component>
+ <component>ACCUMULO_TSERVER</component>
+ <component>ACCUMULO_MONITOR</component>
+ <component>ACCUMULO_GC</component>
+ <component>ACCUMULO_TRACER</component>
+ <component>ACCUMULO_CLIENT</component>
+ </service>
+ </group>
+
+ <group xsi:type="cluster" name="ALL_HOST_OPS" title="Finalize Hosts">
+ <execute-stage title="Update remaining HDP stack to {{version}}">
+ <task xsi:type="execute">
+ <script>scripts/ru_set_all.py</script>
+ <function>actionexecute</function>
+ </task>
+ </execute-stage>
+ </group>
+
+ <group xsi:type="cluster" name="FINALIZE_PRE_CHECK" title="Finalize {{direction.text.proper}} Pre-Check">
+ <direction>UPGRADE</direction>
+
+ <execute-stage title="Check Component Versions">
+ <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.ComponentVersionCheckAction" />
+ </execute-stage>
+ </group>
+
+ <group xsi:type="cluster" name="POST_CLUSTER" title="Finalize {{direction.text.proper}}">
+ <skippable>true</skippable>
+ <supports-auto-skip-failure>false</supports-auto-skip-failure>
+
+ <execute-stage title="Confirm Finalize">
+ <direction>UPGRADE</direction>
+ <task xsi:type="manual">
+ <message>Please confirm you are ready to finalize.</message>
+ </task>
+ </execute-stage>
+
+ <execute-stage service="RANGER" component="RANGER_ADMIN" title="Ranger Kerberos Keytab Check">
+ <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.KerberosKeytabsAction">
+ <summary>Ranger Kerberos Keytab Check</summary>
+ <message>Please follow below steps to complete upgrade in kerberos environment</message>
+ <message>-- If keytabs are managed by Ambari, Perform Regenerate Keytabs for missing components to get Ranger keytabs and restart Ranger components.</message>
+ <message>-- If keytabs are managed manually, create the required ranger keytabs in the locations specified in ranger config and restart Ranger components.</message>
+ </task>
+ </execute-stage>
+
+ <execute-stage service="KAFKA" component="KAFKA_BROKER" title="Kafka Property Removal">
+ <task xsi:type="manual">
+ <message>Kafka Brokers have been upgraded with version compatability.</message>
+ <message>Config property "inter.broker.protocol.version" should be removed at your convenience.</message>
+ <message>Config property "log.message.format.version" should be removed when all clients have been upgraded.</message>
+ </task>
+ </execute-stage>
+
+ <execute-stage service="HDFS" component="NAMENODE" title="Execute HDFS Finalize">
+ <task xsi:type="execute" hosts="master">
+ <script>scripts/namenode.py</script>
+ <function>finalize_rolling_upgrade</function>
+ </task>
+ </execute-stage>
+
+ <execute-stage title="Save Cluster State" service="" component="">
+ <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.FinalizeUpgradeAction" />
+ </execute-stage>
+
+ </group>
+ </order>
+
+ <processing>
+ <service name="ZOOKEEPER">
+ <component name="ZOOKEEPER_SERVER">
+ <upgrade>
+ <task xsi:type="restart-task" />
+ </upgrade>
+ </component>
+
+ <component name="ZOOKEEPER_CLIENT">
+ <upgrade>
+ <task xsi:type="restart-task" />
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="RANGER">
+ <component name="RANGER_ADMIN">
+ <pre-downgrade/> <!-- no-op to prevent config changes on downgrade -->
+ <pre-upgrade>
+ <task xsi:type="execute" hosts="all" summary="Stop Ranger Admin">
+ <script>scripts/ranger_admin.py</script>
+ <function>stop</function>
+ </task>
+
+ <task xsi:type="configure" id="hdp_2_5_0_0_remove_audit_db_flag" />
+ <task xsi:type="configure" id="hdp_2_5_0_0_remove_audit_db_admin_properties" />
+ <task xsi:type="configure" id="hdp_2_5_0_0_remove_audit_db_ranger_admin_site" />
+ <task xsi:type="configure" id="hdp_2_5_0_0_remove_sso_property" />
+ <task xsi:type="configure" id="hdp_2_5_0_0_set_external_solrCloud_flag"/>
+
+ <task xsi:type="server_action" summary="Calculating Ranger Properties" class="org.apache.ambari.server.serveraction.upgrades.RangerKerberosConfigCalculation"/>
+ <task xsi:type="server_action" summary="Configuring Ranger Alerts" class="org.apache.ambari.server.serveraction.upgrades.RangerWebAlertConfigAction"/>
+
+ <task xsi:type="execute" hosts="all">
+ <script>scripts/ranger_admin.py</script>
+ <function>set_pre_start</function>
+ </task>
+
+ <task xsi:type="execute" hosts="any" summary="Upgrading Ranger database schema">
+ <script>scripts/ranger_admin.py</script>
+ <function>setup_ranger_database</function>
+ </task>
+
+ <task xsi:type="configure_function" hosts="all" />
+
+ <task xsi:type="execute" hosts="any" summary="Applying Ranger java patches">
+ <script>scripts/ranger_admin.py</script>
+ <function>setup_ranger_java_patches</function>
+ </task>
+ </pre-upgrade>
+
+ <upgrade>
+ <task xsi:type="restart-task" />
+ </upgrade>
+
+ </component>
+
+ <component name="RANGER_USERSYNC">
+ <upgrade>
+ <task xsi:type="restart-task" />
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="RANGER_KMS">
+ <component name="RANGER_KMS_SERVER">
+ <pre-upgrade>
+ <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_kms_audit_db" />
+
+ <task xsi:type="execute" hosts="any" sequential="true" summary="Upgrading Ranger KMS database schema">
+ <script>scripts/kms_server.py</script>
+ <function>setup_ranger_kms_database</function>
+ </task>
+ </pre-upgrade>
+
+ <pre-downgrade>
+ <task xsi:type="execute" hosts="any" sequential="true" summary="Downgrading Ranger KMS database schema">
+ <script>scripts/kms_server.py</script>
+ <function>setup_ranger_kms_database</function>
+ </task>
+ </pre-downgrade>
+
+ <upgrade>
+ <task xsi:type="restart-task" />
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="KAFKA">
+ <component name="KAFKA_BROKER">
+ <pre-downgrade /> <!-- no-op to prevent config changes on downgrade -->
+ <pre-upgrade>
+ <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_kafka_audit_db" />
+ <task xsi:type="configure" id="hdp_2_5_0_0_add_protocol_compatibility" />
+ </pre-upgrade>
+
+ <upgrade>
+ <task xsi:type="restart-task" />
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="HDFS">
+ <component name="NAMENODE">
+ <pre-downgrade /> <!-- no-op to prevent config changes on downgrade -->
+
+ <pre-upgrade>
+ <task xsi:type="configure" id="hdp_2_5_0_0_namenode_ha_adjustments"/>
+ <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_hdfs_audit_db" />
+ </pre-upgrade>
+
+ <upgrade>
+ <task xsi:type="restart-task" />
+ </upgrade>
+ </component>
+
+ <component name="DATANODE">
+ <upgrade>
+ <task xsi:type="restart-task" />
+ </upgrade>
+ </component>
+
+ <component name="NFS_GATEWAY">
+ <upgrade>
+ <task xsi:type="restart-task" />
+ </upgrade>
+ </component>
+
+ <component name="HDFS_CLIENT">
+ <upgrade>
+ <task xsi:type="restart-task" />
+ </upgrade>
+ </component>
+
+ <component name="JOURNALNODE">
+ <upgrade>
+ <task xsi:type="restart-task" />
+ </upgrade>
+ </component>
+
+ <component name="ZKFC">
+ <upgrade>
+ <task xsi:type="restart-task" />
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="MAPREDUCE2">
+ <component name="HISTORYSERVER">
+ <pre-downgrade /> <!-- no-op to prevent config changes on downgrade -->
+
+ <upgrade>
+ <task xsi:type="restart-task" />
+ </upgrade>
+ </component>
+
+ <component name="MAPREDUCE2_CLIENT">
+ <pre-upgrade>
+ <task xsi:type="server_action" summary="Verifying LZO codec path for mapreduce" class="org.apache.ambari.server.serveraction.upgrades.FixLzoCodecPath"/>
+ </pre-upgrade>
+
+ <upgrade>
+ <task xsi:type="restart-task" />
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="YARN">
+ <component name="APP_TIMELINE_SERVER">
+ <pre-downgrade /> <!-- no-op to prevent config changes on downgrade -->
+
+ <upgrade>
+ <task xsi:type="restart-task" />
+ </upgrade>
+ </component>
+
+ <component name="RESOURCEMANAGER">
+ <pre-upgrade>
+ <task xsi:type="server_action" summary="Calculating Yarn Properties for Spark Shuffle" class="org.apache.ambari.server.serveraction.upgrades.SparkShufflePropertyConfig" />
+ <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_yarn_audit_db" />
+ </pre-upgrade>
+ <pre-downgrade /> <!-- no-op to prevent config changes on downgrade -->
+
+ <upgrade>
+ <task xsi:type="restart-task" />
+ </upgrade>
+ </component>
+
+ <component name="NODEMANAGER">
+ <pre-upgrade>
+ <task xsi:type="configure" id="hdp_2_5_0_0_add_spark2_yarn_shuffle"/>
+ </pre-upgrade>
+
+ <pre-downgrade/>
+
+ <upgrade>
+ <task xsi:type="restart-task" />
+ </upgrade>
+ </component>
+
+ <component name="YARN_CLIENT">
+ <upgrade>
+ <task xsi:type="restart-task" />
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="HBASE">
+ <component name="HBASE_MASTER">
+ <pre-downgrade /> <!-- no-op to prevent config changes on downgrade -->
+ <pre-upgrade>
+ <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_hbase_audit_db" />
+ </pre-upgrade>
+
+ <upgrade>
+ <task xsi:type="restart-task" />
+ </upgrade>
+ </component>
+
+ <component name="HBASE_REGIONSERVER">
+ <upgrade>
+ <task xsi:type="restart-task" />
+ </upgrade>
+ </component>
+
+ <component name="HBASE_CLIENT">
+ <upgrade>
+ <task xsi:type="restart-task" />
+ </upgrade>
+ </component>
+
+ <component name="PHOENIX_QUERY_SERVER">
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="TEZ">
+ <component name="TEZ_CLIENT">
+ <upgrade>
+ <task xsi:type="restart-task" />
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="PIG">
+ <component name="PIG">
+ <upgrade>
+ <task xsi:type="restart-task" />
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="MAHOUT">
+ <component name="MAHOUT">
+ <upgrade>
+ <task xsi:type="restart-task" />
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="SQOOP">
+ <component name="SQOOP">
+ <pre-upgrade>
+ <!-- Remove Atlas configs that were incorrectly added to sqoop-site instead of Atlas' application.properties. -->
+ <task xsi:type="configure" id="hdp_2_5_0_0_remove_sqoop_atlas_configs" />
+
+ <!-- If cluster is Kerberized, add configs to sqoop-atlas-application.properties,
+ which will be written to the local file system if Atlas is present. -->
+ <task xsi:type="configure" id="hdp_2_5_0_0_add_sqoop_atlas_security_configs" />
+ </pre-upgrade>
+
+ <pre-downgrade/> <!-- no-op to prevent config changes on downgrade -->
+
+ <upgrade>
+ <task xsi:type="restart-task" />
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="HIVE">
+ <component name="HIVE_METASTORE">
+ <upgrade>
+ <task xsi:type="restart-task" />
+ </upgrade>
+ </component>
+
+ <component name="HIVE_SERVER">
+ <pre-upgrade>
+ <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_hive_audit_db" />
+ <task xsi:type="server_action" summary="Update hive-env content" class="org.apache.ambari.server.serveraction.upgrades.HiveEnvClasspathAction"/>
+
+ </pre-upgrade>
+
+ <pre-downgrade/>
+
+ <upgrade>
+ <task xsi:type="restart-task" />
+ </upgrade>
+ </component>
+
+ <component name="WEBHCAT_SERVER">
+ <upgrade>
+ <task xsi:type="restart-task" />
+ </upgrade>
+ </component>
+
+ <component name="HIVE_CLIENT">
+ <upgrade>
+ <task xsi:type="restart-task" />
+ </upgrade>
+ </component>
+
+ <component name="HCAT">
+ <upgrade>
+ <task xsi:type="restart-task" />
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="SLIDER">
+ <component name="SLIDER">
+ <upgrade>
+ <task xsi:type="restart-task" />
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="SPARK">
+ <component name="SPARK_JOBHISTORYSERVER">
+ <pre-downgrade /> <!-- no-op to prevent config changes on downgrade -->
+ <upgrade>
+ <task xsi:type="restart-task" />
+ </upgrade>
+ </component>
+ <component name="SPARK_THRIFTSERVER">
+ <pre-downgrade /> <!-- no-op to prevent config changes on downgrade -->
+ <upgrade>
+ <task xsi:type="restart-task"/>
+ </upgrade>
+ </component>
+ <component name="SPARK_CLIENT">
+ <upgrade>
+ <task xsi:type="restart-task" />
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="OOZIE">
+ <component name="OOZIE_SERVER">
+ <pre-upgrade>
+ <task xsi:type="server_action" summary="Adjusting Oozie properties" class="org.apache.ambari.server.serveraction.upgrades.OozieConfigCalculation"/>
+
+ <task xsi:type="execute" hosts="all" sequential="true" summary="Shut down all Oozie servers">
+ <script>scripts/oozie_server.py</script>
+ <function>stop</function>
+ </task>
+
+ <!-- It is extremely important that both of these tasks run on the exact same host. Hence, pick the first alphabetically. -->
+ <task xsi:type="configure_function" hosts="first" />
+
+ <task xsi:type="execute" hosts="first" sequential="true" summary="Upgrading the Oozie database and creating a new sharelib">
+ <script>scripts/oozie_server_upgrade.py</script>
+ <function>upgrade_oozie_database_and_sharelib</function>
+ </task>
+ <task xsi:type="server_action" summary="Fixing Oozie admin users"
+ class="org.apache.ambari.server.serveraction.upgrades.FixOozieAdminUsers"/>
+ </pre-upgrade>
+
+ <pre-downgrade>
+ <task xsi:type="execute" hosts="all" sequential="true" summary="Shut down all Oozie servers">
+ <script>scripts/oozie_server.py</script>
+ <function>stop</function>
+ </task>
+
+ <task xsi:type="execute" hosts="any" sequential="true" summary="Create a new sharelib">
+ <script>scripts/oozie_server_upgrade.py</script>
+ <function>create_sharelib</function>
+ </task>
+ </pre-downgrade>
+
+ <upgrade>
+ <task xsi:type="restart-task" />
+ </upgrade>
+ </component>
+
+ <component name="OOZIE_CLIENT">
+ <upgrade>
+ <task xsi:type="restart-task" />
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="FALCON">
+ <component name="FALCON_SERVER">
+ <pre-upgrade>
+ <task xsi:type="configure" id="hdp_2_5_0_0_falcon_server_adjust_services_property"/>
+ </pre-upgrade>
+ <upgrade>
+ <task xsi:type="restart-task" />
+ </upgrade>
+ </component>
+ <component name="FALCON_CLIENT">
+ <upgrade>
+ <task xsi:type="restart-task" />
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="KNOX">
+ <component name="KNOX_GATEWAY">
+ <pre-downgrade /> <!-- no-op to prevent config changes on downgrade -->
+ <pre-upgrade>
+ <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_knox_audit_db" />
+ </pre-upgrade>
+
+ <upgrade>
+ <task xsi:type="restart-task" />
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="STORM">
+ <component name="NIMBUS">
+ <pre-upgrade>
+ <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_storm_audit_db" />
+ <task xsi:type="configure" id="hdp_2_5_0_0_upgrade_storm_1.0"/>
+ <task xsi:type="server_action" summary="Upgrade Storm Security Configs to 1.0"
+ class="org.apache.ambari.server.serveraction.upgrades.StormUpgradeKerberosDescriptorConfig"/>
+
+ <!-- Remove Atlas configs that were incorrectly added to storm-site instead of Atlas' application.properties. -->
+ <task xsi:type="configure" id="hdp_2_5_0_0_remove_storm_atlas_configs" />
+
+ <task xsi:type="execute" hosts="any" summary="Removing Storm data from ZooKeeper">
+ <script>scripts/storm_upgrade.py</script>
+ <function>delete_storm_zookeeper_data</function>
+ </task>
+
+ <task xsi:type="execute" summary="Removing local Storm data">
+ <script>scripts/storm_upgrade.py</script>
+ <function>delete_storm_local_data</function>
+ </task>
+ <task xsi:type="configure" id="hdp_2_5_0_0_add_storm_security_configs" />
+ <task xsi:type="configure" id="hdp_2_4_0_0_remove_empty_storm_topology_submission_notifier_plugin_class"/>
+ </pre-upgrade>
+
+ <pre-downgrade>
+ <task xsi:type="execute" hosts="any" summary="Removing Storm data from ZooKeeper">
+ <script>scripts/storm_upgrade.py</script>
+ <function>delete_storm_zookeeper_data</function>
+ </task>
+
+ <task xsi:type="execute" summary="Removing local Storm data">
+ <script>scripts/storm_upgrade.py</script>
+ <function>delete_storm_local_data</function>
+ </task>
+ </pre-downgrade>
+
+ <upgrade>
+ <task xsi:type="restart-task" />
+ </upgrade>
+ </component>
+
+ <component name="SUPERVISOR">
+ <pre-upgrade>
+ <task xsi:type="execute" summary="Removing local Storm data">
+ <script>scripts/storm_upgrade.py</script>
+ <function>delete_storm_local_data</function>
+ </task>
+ </pre-upgrade>
+
+ <pre-downgrade>
+ <task xsi:type="execute" summary="Removing local Storm data">
+ <script>scripts/storm_upgrade.py</script>
+ <function>delete_storm_local_data</function>
+ </task>
+ </pre-downgrade>
+
+ <upgrade>
+ <task xsi:type="restart-task" />
+ </upgrade>
+ </component>
+
+ <component name="STORM_UI_SERVER">
+ <pre-upgrade>
+ <task xsi:type="execute" summary="Removing local Storm data">
+ <script>scripts/storm_upgrade.py</script>
+ <function>delete_storm_local_data</function>
+ </task>
+ </pre-upgrade>
+
+ <upgrade>
+ <task xsi:type="restart-task" />
+ </upgrade>
+ </component>
+
+ <component name="DRPC_SERVER">
+ <pre-upgrade>
+ <task xsi:type="execute" summary="Removing local Storm data">
+ <script>scripts/storm_upgrade.py</script>
+ <function>delete_storm_local_data</function>
+ </task>
+ </pre-upgrade>
+
+ <post-upgrade>
+ <task xsi:type="manual">
+ <message>Please rebuild your topology using the new Storm version dependencies and resubmit it using the newly created jar.</message>
+ </task>
+ </post-upgrade>
+
+ <upgrade>
+ <task xsi:type="restart-task" />
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="FLUME">
+ <component name="FLUME_HANDLER">
+ <upgrade>
+ <task xsi:type="restart-task" />
+ </upgrade>
+ </component>
+ </service>
+
+ <service name="ACCUMULO">
+ <component name="ACCUMULO_MASTER">
+ <upgrade>
+ <task xsi:type="restart-task" />
+ </upgrade>
+ </component>
+ <component name="ACCUMULO_MONITOR">
+ <upgrade>
+ <task xsi:type="restart-task" />
+ </upgrade>
+ </component>
+ <component name="ACCUMULO_GC">
+ <upgrade>
+ <task xsi:type="restart-task" />
+ </upgrade>
+ </component>
+ <component name="ACCUMULO_TRACER">
+ <upgrade>
+ <task xsi:type="restart-task" />
+ </upgrade>
+ </component>
+ <component name="ACCUMULO_TSERVER">
+ <upgrade>
+ <task xsi:type="restart-task" />
+ </upgrade>
+ </component>
+ <component name="ACCUMULO_CLIENT">
+ <upgrade>
+ <task xsi:type="restart-task" />
+ </upgrade>
+ </component>
+ </service>
+ </processing>
+</upgrade>
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.5.xml.rej b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.5.xml.rej
new file mode 100644
index 0000000000..d733158ff2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.5.xml.rej
@@ -0,0 +1,34 @@
+***************
+*** 22,28 ****
+ <skip-failures>false</skip-failures>
+ <skip-service-check-failures>false</skip-service-check-failures>
+ <type>ROLLING</type>
+-
+ <prerequisite-checks>
+ <!-- List of additional pre-req checks to run in addition to the required pre-reqs -->
+ <check>org.apache.ambari.server.checks.HiveMultipleMetastoreCheck</check>
+--- 22,28 ----
+ <skip-failures>false</skip-failures>
+ <skip-service-check-failures>false</skip-service-check-failures>
+ <type>ROLLING</type>
++
+ <prerequisite-checks>
+ <!-- List of additional pre-req checks to run in addition to the required pre-reqs -->
+ <check>org.apache.ambari.server.checks.HiveMultipleMetastoreCheck</check>
+***************
+*** 742,748 ****
+ <pre-upgrade>
+ <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_hbase_audit_db" />
+ </pre-upgrade>
+-
+ <pre-downgrade /> <!-- no-op to prevent config changes on downgrade -->
+
+ <upgrade>
+--- 742,748 ----
+ <pre-upgrade>
+ <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_hbase_audit_db" />
+ </pre-upgrade>
++
+ <pre-downgrade /> <!-- no-op to prevent config changes on downgrade -->
+
+ <upgrade>