aboutsummaryrefslogtreecommitdiff
path: root/bigtop-deploy/puppet
diff options
context:
space:
mode:
authorRoman Shaposhnik <rvs@apache.org>2017-03-25 02:21:38 +0000
committerRoman Shaposhnik <rvs@apache.org>2017-03-24 19:24:26 -0700
commit31fa70d73af117407f00cf2ba473a76074f472ba (patch)
treebb5905f7543c6ca728647447d0e6a5157c07f478 /bigtop-deploy/puppet
parent047e4ddd754c0c909ceb7bde8bfac6cf8abf3ed8 (diff)
BIGTOP-2708. puppet code always installs hdfs by default
Diffstat (limited to 'bigtop-deploy/puppet')
-rw-r--r--bigtop-deploy/puppet/manifests/cluster.pp31
-rw-r--r--bigtop-deploy/puppet/manifests/site.pp5
-rw-r--r--bigtop-deploy/puppet/modules/hadoop/manifests/init.pp4
3 files changed, 33 insertions, 7 deletions
diff --git a/bigtop-deploy/puppet/manifests/cluster.pp b/bigtop-deploy/puppet/manifests/cluster.pp
index d9f6e019..1313082b 100644
--- a/bigtop-deploy/puppet/manifests/cluster.pp
+++ b/bigtop-deploy/puppet/manifests/cluster.pp
@@ -13,6 +13,26 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+# The following is a bit of a tricky map. The idea here is that the keys
+# correspond to anything that could be specified in
+# hadoop_cluster_node::cluster_components:
+# The values are maps from each role that a node can have in a cluster:
+# client, gateway_server, library, master, worker, standby
+# to a role recognized by each puppet module's deploy class.
+#
+# Note that the code here will pass all these roles to all the deploy
+# classes defined in every single Bigtop's puppet module. This is similar
+# to how a visitor pattern works in OOP. One subtle ramification of this
+# approach is that you should make sure that deploy classes from different
+# modules do NOT accept same strings for role types.
+#
+# And if that wasn't enough of a head scratcher -- you also need to keep
+# in mind that there's no hdfs key in the following map, even though it
+# is a perfectly legal value for hadoop_cluster_node::cluster_components:
+# The reason for this is that hdfs is treated as an alias for either
+# hdfs-non-ha or hdfs-ha depending on whether HA for HDFS is either enabled
+# or disabled.
+
$roles_map = {
apex => {
client => ["apex-client"],
@@ -217,14 +237,17 @@ class node_with_components inherits hadoop_cluster_node {
}
$given_components = $components_array[0] ? {
- "all" => delete(keys($roles_map), ["hdfs-non-ha", "hdfs-ha"]),
+ "all" => delete(keys($roles_map), ["hdfs-non-ha", "hdfs-ha"]) << "hdfs",
default => $components_array,
}
$ha_dependent_components = $ha_enabled ? {
- true => ["hdfs-ha"],
- default => ["hdfs-non-ha"],
+ true => "hdfs-ha",
+ default => "hdfs-non-ha",
+ }
+ $components = member($given_components, "hdfs") ? {
+ true => delete($given_components, "hdfs") << $ha_dependent_components,
+ default => $given_components
}
- $components = concat($given_components, $ha_dependent_components)
$master_role_types = ["master", "worker", "library"]
$standby_role_types = ["standby", "library"]
diff --git a/bigtop-deploy/puppet/manifests/site.pp b/bigtop-deploy/puppet/manifests/site.pp
index ce5857f4..f35a4e0f 100644
--- a/bigtop-deploy/puppet/manifests/site.pp
+++ b/bigtop-deploy/puppet/manifests/site.pp
@@ -69,7 +69,10 @@ if ($provision_repo) {
case $::operatingsystem {
/Debian/: {
require apt
- require apt::backports
+ class { 'apt::backports':
+ pin => 500,
+ }
+ Class['apt::backports'] -> Package <||>
package { "jdk":
name => $jdk_package_name,
diff --git a/bigtop-deploy/puppet/modules/hadoop/manifests/init.pp b/bigtop-deploy/puppet/modules/hadoop/manifests/init.pp
index 30a68e2b..7d8e48ff 100644
--- a/bigtop-deploy/puppet/modules/hadoop/manifests/init.pp
+++ b/bigtop-deploy/puppet/modules/hadoop/manifests/init.pp
@@ -45,7 +45,7 @@ class hadoop ($hadoop_security_authentication = "simple",
}
}
- if ($hadoop::common_hdfs::ha != "disabled" and "standby-namenode" in $roles) {
+ if ("standby-namenode" in $roles and $hadoop::common_hdfs::ha != "disabled") {
include hadoop::namenode
}
@@ -67,7 +67,7 @@ class hadoop ($hadoop_security_authentication = "simple",
}
}
- if ($hadoop::common_hdfs::ha == "disabled" and "secondarynamenode" in $roles) {
+ if ("secondarynamenode" in $roles and $hadoop::common_hdfs::ha == "disabled") {
include hadoop::secondarynamenode
}