summaryrefslogtreecommitdiff
path: root/ambari-agent
diff options
context:
space:
mode:
authorMahadev Konar <mahadev@apache.org>2012-10-08 01:50:57 +0000
committerMahadev Konar <mahadev@apache.org>2012-10-08 01:50:57 +0000
commit5f1f7f6dc78f1e65a351c80e4a09e57b24d3ceb0 (patch)
treefc133904a105a976e28cdcb274aa0e8f0a3798d3 /ambari-agent
parent0179792b0c265d6b0d074792e6a04846a090ae74 (diff)
Fixing removed files in the prev commit
git-svn-id: https://svn.apache.org/repos/asf/incubator/ambari/branches/AMBARI-666@1395431 13f79535-47bb-0310-9956-ffa450edef68
Diffstat (limited to 'ambari-agent')
-rw-r--r--ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/capacity-scheduler.xml.erb68
-rw-r--r--ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/core-site.xml.erb254
-rw-r--r--ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-policy.xml.erb134
-rw-r--r--ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/mapred-queue-acls.xml.erb39
-rw-r--r--ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/mapred-site.xml.erb531
5 files changed, 0 insertions, 1026 deletions
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/capacity-scheduler.xml.erb b/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/capacity-scheduler.xml.erb
deleted file mode 100644
index 7cec438787..0000000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/capacity-scheduler.xml.erb
+++ /dev/null
@@ -1,68 +0,0 @@
-<?xml version="1.0"?>
-
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-
-<!-- This is the configuration file for the resource manager in Hadoop. -->
-<!-- You can configure various scheduling parameters related to queues. -->
-<!-- The properties for a queue follow a naming convention,such as, -->
-<!-- mapred.capacity-scheduler.queue.<queue-name>.property-name. -->
-
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
-
-
- <property>
- <name>mapred.capacity-scheduler.queue.default.capacity</name>
- <value>100</value>
- <description>Percentage of the number of slots in the cluster that are
- guaranteed to be available for jobs in this queue.
- </description>
- </property>
-
- <property>
- <name>mapred.capacity-scheduler.queue.default.supports-priority</name>
- <value>false</value>
- <description>If true, priorities of jobs will be taken into
- account in scheduling decisions.
- </description>
- </property>
-
- <property>
- <name>mapred.capacity-scheduler.queue.default.minimum-user-limit-percent</name>
- <value>100</value>
- <description> Each queue enforces a limit on the percentage of resources
- allocated to a user at any given time, if there is competition for them.
- This user limit can vary between a minimum and maximum value. The former
- depends on the number of users who have submitted jobs, and the latter is
- set to this property value. For example, suppose the value of this
- property is 25. If two users have submitted jobs to a queue, no single
- user can use more than 50% of the queue resources. If a third user submits
- a job, no single user can use more than 33% of the queue resources. With 4
- or more users, no user can use more than 25% of the queue's resources. A
- value of 100 implies no user limits are imposed.
- </description>
- </property>
-
- <property>
- <name>mapred.capacity-scheduler.queue.default.maximum-initialized-jobs-per-user</name>
- <value>25</value>
- <description>The maximum number of jobs to be pre-initialized for a user
- of the job queue.
- </description>
- </property>
-
-</configuration>
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/core-site.xml.erb b/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/core-site.xml.erb
deleted file mode 100644
index bf4625ad9b..0000000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/core-site.xml.erb
+++ /dev/null
@@ -1,254 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
- <!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
- -->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
-
-<!-- i/o properties -->
-
- <property>
- <name>io.file.buffer.size</name>
- <value>131072</value>
- <description>The size of buffer for use in sequence files.
- The size of this buffer should probably be a multiple of hardware
- page size (4096 on Intel x86), and it determines how much data is
- buffered during read and write operations.</description>
- </property>
-
- <property>
- <name>io.serializations</name>
- <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
- </property>
-
- <property>
- <name>io.compression.codecs</name>
- <value><%=scope.function_hdp_template_var("compression_codecs")%></value>
- <description>A list of the compression codec classes that can be used
- for compression/decompression.</description>
- </property>
-
- <property>
- <name>io.compression.codec.lzo.class</name>
- <value>com.hadoop.compression.lzo.LzoCodec</value>
- <description>The implementation for lzo codec.</description>
- </property>
-
-<!-- file system properties -->
-
- <property>
- <name>fs.default.name</name>
- <!-- cluster variant -->
- <value>hdfs://<%=scope.function_hdp_host("namenode_host")%>:8020</value>
- <description>The name of the default file system. Either the
- literal string "local" or a host:port for NDFS.</description>
- <final>true</final>
- </property>
-
- <property>
- <name>fs.trash.interval</name>
- <value>360</value>
- <description>Number of minutes between trash checkpoints.
- If zero, the trash feature is disabled.
- </description>
- </property>
-
- <property>
- <name>fs.checkpoint.dir</name>
- <value><%=scope.function_hdp_template_var("fs_checkpoint_dir")%></value>
- <description>Determines where on the local filesystem the DFS secondary
- name node should store the temporary images to merge.
- If this is a comma-delimited list of directories then the image is
- replicated in all of the directories for redundancy.
- </description>
- </property>
-
- <property>
- <name>fs.checkpoint.edits.dir</name>
- <value>${fs.checkpoint.dir}</value>
- <description>Determines where on the local filesystem the DFS secondary
- name node should store the temporary edits to merge.
- If this is a comma-delimited list of directoires then teh edits is
- replicated in all of the directoires for redundancy.
- Default value is same as fs.checkpoint.dir
- </description>
- </property>
-
- <property>
- <name>fs.checkpoint.period</name>
- <value>21600</value>
- <description>The number of seconds between two periodic checkpoints.
- </description>
- </property>
-
- <property>
- <name>fs.checkpoint.size</name>
- <value>536870912</value>
- <description>The size of the current edit log (in bytes) that triggers
- a periodic checkpoint even if the fs.checkpoint.period hasn't expired.
- </description>
- </property>
-
- <!-- ipc properties: copied from kryptonite configuration -->
- <property>
- <name>ipc.client.idlethreshold</name>
- <value>8000</value>
- <description>Defines the threshold number of connections after which
- connections will be inspected for idleness.
- </description>
- </property>
-
- <property>
- <name>ipc.client.connection.maxidletime</name>
- <value>30000</value>
- <description>The maximum time after which a client will bring down the
- connection to the server.
- </description>
- </property>
-
- <property>
- <name>ipc.client.connect.max.retries</name>
- <value>50</value>
- <description>Defines the maximum number of retries for IPC connections.</description>
- </property>
-
- <!-- Web Interface Configuration -->
- <property>
- <name>webinterface.private.actions</name>
- <value>false</value>
- <description> If set to true, the web interfaces of JT and NN may contain
- actions, such as kill job, delete file, etc., that should
- not be exposed to public. Enable this option if the interfaces
- are only reachable by those who have the right authorization.
- </description>
- </property>
-
- <property>
- <name>hadoop.security.authentication</name>
- <value><%=scope.function_hdp_template_var("security_type")%></value>
- <description>
- Set the authentication for the cluster. Valid values are: simple or
- kerberos.
- </description>
- </property>
-<property>
- <name>hadoop.security.authorization</name>
- <value><%=scope.function_hdp_template_var("enable_security_authorization")%></value>
- <description>
- Enable authorization for different protocols.
- </description>
-</property>
-
- <property>
- <name>hadoop.security.auth_to_local</name>
- <value>
- RULE:[2:$1@$0]([jt]t@.*<%=scope.function_hdp_template_var("kerberos_domain")%>)s/.*/<%=scope.function_hdp_template_var("mapred_user")%>/
- RULE:[2:$1@$0]([nd]n@.*<%=scope.function_hdp_template_var("kerberos_domain")%>)s/.*/<%=scope.function_hdp_template_var("hdfs_user")%>/
- RULE:[2:$1@$0](hm@.*<%=scope.function_hdp_template_var("kerberos_domain")%>)s/.*/<%=scope.function_hdp_template_var("hbase_user")%>/
- RULE:[2:$1@$0](rs@.*<%=scope.function_hdp_template_var("kerberos_domain")%>)s/.*/<%=scope.function_hdp_template_var("hbase_user")%>/
- DEFAULT</value>
-<description>The mapping from kerberos principal names to local OS user names.
- So the default rule is just "DEFAULT" which takes all principals in your default domain to their first component.
- "omalley@APACHE.ORG" and "omalley/admin@APACHE.ORG" to "omalley", if your default domain is APACHE.ORG.
-The translations rules have 3 sections:
- base filter substitution
-The base consists of a number that represents the number of components in the principal name excluding the realm and the pattern for building the name from the sections of the principal name. The base uses $0 to mean the realm, $1 to mean the first component and $2 to mean the second component.
-
-[1:$1@$0] translates "omalley@APACHE.ORG" to "omalley@APACHE.ORG"
-[2:$1] translates "omalley/admin@APACHE.ORG" to "omalley"
-[2:$1%$2] translates "omalley/admin@APACHE.ORG" to "omalley%admin"
-
-The filter is a regex in parens that must the generated string for the rule to apply.
-
-"(.*%admin)" will take any string that ends in "%admin"
-"(.*@ACME.COM)" will take any string that ends in "@ACME.COM"
-
-Finally, the substitution is a sed rule to translate a regex into a fixed string.
-
-"s/@ACME\.COM//" removes the first instance of "@ACME.COM".
-"s/@[A-Z]*\.COM//" removes the first instance of "@" followed by a name followed by ".COM".
-"s/X/Y/g" replaces all of the "X" in the name with "Y"
-
-So, if your default realm was APACHE.ORG, but you also wanted to take all principals from ACME.COM that had a single component "joe@ACME.COM", you'd do:
-
-RULE:[1:$1@$0](.@ACME.ORG)s/@.//
-DEFAULT
-
-To also translate the names with a second component, you'd make the rules:
-
-RULE:[1:$1@$0](.@ACME.ORG)s/@.//
-RULE:[2:$1@$0](.@ACME.ORG)s/@.//
-DEFAULT
-
-If you want to treat all principals from APACHE.ORG with /admin as "admin", your rules would look like:
-
-RULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/
-DEFAULT
- </description>
- </property>
-
-<property>
- <name>hadoop.proxyuser.<%=scope.function_hdp_user("hive_user")%>.groups</name>
- <value><%=scope.function_hdp_template_var("proxyuser_group")%></value>
- <description>
- Proxy group for Hadoop.
- </description>
-</property>
-
-<property>
- <name>hadoop.proxyuser.<%=scope.function_hdp_user("hive_user")%>.hosts</name>
- <value><%=scope.function_hdp_host("hive_server_host")%></value>
- <description>
- Proxy host for Hadoop.
- </description>
-</property>
-
-<property>
- <name>hadoop.proxyuser.<%=scope.function_hdp_user("oozie_user")%>.groups</name>
- <value><%=scope.function_hdp_template_var("proxyuser_group")%></value>
- <description>
- Proxy group for Hadoop.
- </description>
-</property>
-
-<property>
- <name>hadoop.proxyuser.<%=scope.function_hdp_user("oozie_user")%>.hosts</name>
- <value><%=scope.function_hdp_host("oozie_server")%></value>
- <description>
- Proxy host for Hadoop.
- </description>
-</property>
-
-<property>
- <name>hadoop.proxyuser.<%=scope.function_hdp_user("templeton_user")%>.groups</name>
- <value><%=scope.function_hdp_template_var("proxyuser_group")%></value>
- <description>
- Proxy group for templeton.
- </description>
-</property>
-
-<property>
- <name>hadoop.proxyuser.<%=scope.function_hdp_user("templeton_user")%>.hosts</name>
- <value><%=scope.function_hdp_host("templeton_server_host")%></value>
- <description>
- Proxy host for templeton.
- </description>
-</property>
-</configuration>
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-policy.xml.erb b/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-policy.xml.erb
deleted file mode 100644
index 7c1dc636c5..0000000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-policy.xml.erb
+++ /dev/null
@@ -1,134 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
- <property>
- <name>security.client.protocol.acl</name>
- <value>*</value>
- <description>ACL for ClientProtocol, which is used by user code
- via the DistributedFileSystem.
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.</description>
- </property>
-
- <property>
- <name>security.client.datanode.protocol.acl</name>
- <value>*</value>
- <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
- for block recovery.
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.</description>
- </property>
-
- <property>
- <name>security.datanode.protocol.acl</name>
- <value>*</value>
- <description>ACL for DatanodeProtocol, which is used by datanodes to
- communicate with the namenode.
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.</description>
- </property>
-
- <property>
- <name>security.inter.datanode.protocol.acl</name>
- <value>*</value>
- <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
- for updating generation timestamp.
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.</description>
- </property>
-
- <property>
- <name>security.namenode.protocol.acl</name>
- <value>*</value>
- <description>ACL for NamenodeProtocol, the protocol used by the secondary
- namenode to communicate with the namenode.
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.</description>
- </property>
-
- <property>
- <name>security.inter.tracker.protocol.acl</name>
- <value>*</value>
- <description>ACL for InterTrackerProtocol, used by the tasktrackers to
- communicate with the jobtracker.
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.</description>
- </property>
-
- <property>
- <name>security.job.submission.protocol.acl</name>
- <value>*</value>
- <description>ACL for JobSubmissionProtocol, used by job clients to
- communciate with the jobtracker for job submission, querying job status etc.
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.</description>
- </property>
-
- <property>
- <name>security.task.umbilical.protocol.acl</name>
- <value>*</value>
- <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
- tasks to communicate with the parent tasktracker.
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.</description>
- </property>
-
- <property>
- <name>security.admin.operations.protocol.acl</name>
- <value> <%=scope.function_hdp_template_var("hdfs_user")%></value>
- <description>ACL for AdminOperationsProtocol. Used for admin commands.
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.</description>
- </property>
-
- <property>
- <name>security.refresh.usertogroups.mappings.protocol.acl</name>
- <value> <%=scope.function_hdp_template_var("hdfs_user")%></value>
- <description>ACL for RefreshUserMappingsProtocol. Used to refresh
- users mappings. The ACL is a comma-separated list of user and
- group names. The user and group list is separated by a blank. For
- e.g. "alice,bob users,wheel". A special value of "*" means all
- users are allowed.</description>
- </property>
-
-<property>
- <name>security.refresh.policy.protocol.acl</name>
- <value> <%=scope.function_hdp_template_var("hdfs_user")%></value>
- <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
- dfsadmin and mradmin commands to refresh the security policy in-effect.
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.</description>
- </property>
-
-
-</configuration>
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/mapred-queue-acls.xml.erb b/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/mapred-queue-acls.xml.erb
deleted file mode 100644
index ce12380767..0000000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/mapred-queue-acls.xml.erb
+++ /dev/null
@@ -1,39 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-
-<!-- mapred-queue-acls.xml -->
-<configuration>
-
-
-<!-- queue default -->
-
- <property>
- <name>mapred.queue.default.acl-submit-job</name>
- <value>*</value>
- </property>
-
- <property>
- <name>mapred.queue.default.acl-administer-jobs</name>
- <value>*</value>
- </property>
-
- <!-- END ACLs -->
-
-</configuration>
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/mapred-site.xml.erb b/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/mapred-site.xml.erb
deleted file mode 100644
index bbbb05fb6c..0000000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/mapred-site.xml.erb
+++ /dev/null
@@ -1,531 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
-
-<!-- i/o properties -->
-
- <property>
- <name>io.sort.mb</name>
- <value><%=scope.function_hdp_template_var("io_sort_mb")%></value>
- <description>No description</description>
- </property>
-
- <property>
- <name>io.sort.record.percent</name>
- <value>.2</value>
- <description>No description</description>
- </property>
-
- <property>
- <name>io.sort.spill.percent</name>
- <value><%=scope.function_hdp_template_var("io_sort_spill_percent")%></value>
- <description>No description</description>
- </property>
-
- <property>
- <name>io.sort.factor</name>
- <value>100</value>
- <description>No description</description>
- </property>
-
-<!-- map/reduce properties -->
-
-<property>
- <name>mapred.tasktracker.tasks.sleeptime-before-sigkill</name>
- <value>250</value>
- <description>Normally, this is the amount of time before killing
- processes, and the recommended-default is 5.000 seconds - a value of
- 5000 here. In this case, we are using it solely to blast tasks before
- killing them, and killing them very quickly (1/4 second) to guarantee
- that we do not leave VMs around for later jobs.
- </description>
-</property>
-
- <property>
- <name>mapred.job.tracker.handler.count</name>
- <value>50</value>
- <description>
- The number of server threads for the JobTracker. This should be roughly
- 4% of the number of tasktracker nodes.
- </description>
- </property>
-
- <property>
- <name>mapred.system.dir</name>
- <value><%=scope.function_hdp_template_var("mapred_system_dir")%></value>
- <description>No description</description>
- <final>true</final>
- </property>
-
- <property>
- <name>mapred.job.tracker</name>
- <!-- cluster variant -->
- <value><%=scope.function_hdp_host("jtnode_host")%>:50300</value>
- <description>No description</description>
- <final>true</final>
- </property>
-
- <property>
- <name>mapred.job.tracker.http.address</name>
- <!-- cluster variant -->
- <value><%=scope.function_hdp_host("jtnode_host")%>:50030</value>
- <description>No description</description>
- <final>true</final>
- </property>
-
- <property>
- <!-- cluster specific -->
- <name>mapred.local.dir</name>
- <value><%=scope.function_hdp_template_var("mapred_local_dir")%></value>
- <description>No description</description>
- <final>true</final>
- </property>
-
- <property>
- <name>mapreduce.cluster.administrators</name>
- <value> hadoop</value>
- </property>
-
- <property>
- <name>mapred.reduce.parallel.copies</name>
- <value>30</value>
- <description>No description</description>
- </property>
-
- <property>
- <name>mapred.tasktracker.map.tasks.maximum</name>
- <value><%=scope.function_hdp_template_var("mapred_map_tasks_max")%></value>
- <description>No description</description>
- </property>
-
- <property>
- <name>mapred.tasktracker.reduce.tasks.maximum</name>
- <value><%=scope.function_hdp_template_var("mapred_red_tasks_max")%></value>
- <description>No description</description>
- </property>
-
- <property>
- <name>tasktracker.http.threads</name>
- <value>50</value>
- </property>
-
- <property>
- <name>mapred.map.tasks.speculative.execution</name>
- <value>false</value>
- <description>If true, then multiple instances of some map tasks
- may be executed in parallel.</description>
- </property>
-
- <property>
- <name>mapred.reduce.tasks.speculative.execution</name>
- <value>false</value>
- <description>If true, then multiple instances of some reduce tasks
- may be executed in parallel.</description>
- </property>
-
- <property>
- <name>mapred.reduce.slowstart.completed.maps</name>
- <value>0.05</value>
- </property>
-
- <property>
- <name>mapred.inmem.merge.threshold</name>
- <value>1000</value>
- <description>The threshold, in terms of the number of files
- for the in-memory merge process. When we accumulate threshold number of files
- we initiate the in-memory merge and spill to disk. A value of 0 or less than
- 0 indicates we want to DON'T have any threshold and instead depend only on
- the ramfs's memory consumption to trigger the merge.
- </description>
- </property>
-
- <property>
- <name>mapred.job.shuffle.merge.percent</name>
- <value>0.66</value>
- <description>The usage threshold at which an in-memory merge will be
- initiated, expressed as a percentage of the total memory allocated to
- storing in-memory map outputs, as defined by
- mapred.job.shuffle.input.buffer.percent.
- </description>
- </property>
-
- <property>
- <name>mapred.job.shuffle.input.buffer.percent</name>
- <value>0.7</value>
- <description>The percentage of memory to be allocated from the maximum heap
- size to storing map outputs during the shuffle.
- </description>
- </property>
-
- <property>
- <name>mapred.map.output.compression.codec</name>
- <value><%=scope.function_hdp_template_var("mapred_map_output_compression_codec")%></value>
- <description>If the map outputs are compressed, how should they be
- compressed
- </description>
- </property>
-
-<property>
- <name>mapred.output.compression.type</name>
- <value>BLOCK</value>
- <description>If the job outputs are to compressed as SequenceFiles, how should
- they be compressed? Should be one of NONE, RECORD or BLOCK.
- </description>
-</property>
-
-
- <property>
- <name>mapred.jobtracker.completeuserjobs.maximum</name>
- <value>0</value>
- </property>
-
- <property>
- <name>mapred.jobtracker.taskScheduler</name>
- <value><%=scope.function_hdp_template_var("scheduler_name")%></value>
- </property>
-
- <property>
- <name>mapred.jobtracker.restart.recover</name>
- <value>false</value>
- <description>"true" to enable (job) recovery upon restart,
- "false" to start afresh
- </description>
- </property>
-
- <property>
- <name>mapred.job.reduce.input.buffer.percent</name>
- <value>0.0</value>
- <description>The percentage of memory- relative to the maximum heap size- to
- retain map outputs during the reduce. When the shuffle is concluded, any
- remaining map outputs in memory must consume less than this threshold before
- the reduce can begin.
- </description>
- </property>
-
- <property>
- <name>mapreduce.reduce.input.limit</name>
- <value>10737418240</value>
- <description>The limit on the input size of the reduce. (This value
- is 10 Gb.) If the estimated input size of the reduce is greater than
- this value, job is failed. A value of -1 means that there is no limit
- set. </description>
-</property>
-
-
- <!-- copied from kryptonite configuration -->
- <property>
- <name>mapred.compress.map.output</name>
- <value><%=scope.function_hdp_template_var("mapred_compress_map_output")%></value>
- </property>
-
-
- <property>
- <name>mapred.task.timeout</name>
- <value>600000</value>
- <description>The number of milliseconds before a task will be
- terminated if it neither reads an input, writes an output, nor
- updates its status string.
- </description>
- </property>
-
- <property>
- <name>jetty.connector</name>
- <value>org.mortbay.jetty.nio.SelectChannelConnector</value>
- <description>No description</description>
- </property>
-
- <property>
- <name>mapred.task.tracker.task-controller</name>
- <value><%=scope.function_hdp_template_var("task_controller")%></value>
- <description>
- TaskController which is used to launch and manage task execution.
- </description>
- </property>
-
- <property>
- <name>mapred.child.root.logger</name>
- <value>INFO,TLA</value>
- </property>
-
- <property>
- <name>mapred.child.java.opts</name>
- <value>-server <%=scope.function_hdp_template_var("mapred_child_java_opts_sz")%> -Djava.net.preferIPv4Stack=true</value>
-
- <description>No description</description>
- </property>
-
- <property>
- <name>mapred.cluster.map.memory.mb</name>
- <value><%=scope.function_hdp_template_var("mapred_cluster_map_mem_mb")%></value>
- </property>
-
- <property>
- <name>mapred.cluster.reduce.memory.mb</name>
- <value><%=scope.function_hdp_template_var("mapred_cluster_red_mem_mb")%></value>
- </property>
-
- <property>
- <name>mapred.job.map.memory.mb</name>
- <value><%=scope.function_hdp_template_var("mapred_job_map_mem_mb")%></value>
- </property>
-
- <property>
- <name>mapred.job.reduce.memory.mb</name>
- <value><%=scope.function_hdp_template_var("mapred_job_red_mem_mb")%></value>
- </property>
-
- <property>
- <name>mapred.cluster.max.map.memory.mb</name>
- <value><%=scope.function_hdp_template_var("mapred_cluster_max_map_mem_mb")%></value>
- </property>
-
- <property>
- <name>mapred.cluster.max.reduce.memory.mb</name>
- <value><%=scope.function_hdp_template_var("mapred_cluster_max_red_mem_mb")%></value>
- </property>
-
-<property>
- <name>mapred.hosts</name>
- <value><%=scope.function_hdp_template_var("conf_dir")%>/<%=scope.function_hdp_template_var("mapred_hosts_include")%></value>
-</property>
-
-<property>
- <name>mapred.hosts.exclude</name>
- <value><%=scope.function_hdp_template_var("conf_dir")%>/<%=scope.function_hdp_template_var("mapred_hosts_exclude")%></value>
-</property>
-
-<property>
- <name>mapred.max.tracker.blacklists</name>
- <value>16</value>
- <description>
- if node is reported blacklisted by 16 successful jobs within timeout-window, it will be graylisted
- </description>
-</property>
-
-<property>
- <name>mapred.healthChecker.script.path</name>
- <value><%=scope.function_hdp_template_var("conf_dir")%>/health_check</value>
-</property>
-
-<property>
- <name>mapred.healthChecker.interval</name>
- <value>135000</value>
-</property>
-
-<property>
- <name>mapred.healthChecker.script.timeout</name>
- <value>60000</value>
-</property>
-
-<property>
- <name>mapred.job.tracker.persist.jobstatus.active</name>
- <value>false</value>
- <description>Indicates if persistency of job status information is
- active or not.
- </description>
-</property>
-
-<property>
- <name>mapred.job.tracker.persist.jobstatus.hours</name>
- <value>1</value>
- <description>The number of hours job status information is persisted in DFS.
- The job status information will be available after it drops of the memory
- queue and between jobtracker restarts. With a zero value the job status
- information is not persisted at all in DFS.
- </description>
-</property>
-
-<property>
- <name>mapred.job.tracker.persist.jobstatus.dir</name>
- <value><%=scope.function_hdp_template_var("mapred_jobstatus_dir")%></value>
- <description>The directory where the job status information is persisted
- in a file system to be available after it drops of the memory queue and
- between jobtracker restarts.
- </description>
-</property>
-
-<property>
- <name>mapred.jobtracker.retirejob.check</name>
- <value>10000</value>
-</property>
-
-<property>
- <name>mapred.jobtracker.retirejob.interval</name>
- <value>0</value>
-</property>
-
-<property>
- <name>mapred.job.tracker.history.completed.location</name>
- <value>/mapred/history/done</value>
- <description>No description</description>
-</property>
-
-<property>
- <name>mapred.task.maxvmem</name>
- <value></value>
- <final>true</final>
- <description>No description</description>
-</property>
-
-<property>
- <name>mapred.jobtracker.maxtasks.per.job</name>
- <value><%=scope.function_hdp_template_var("maxtasks_per_job")%></value>
- <final>true</final>
- <description>The maximum number of tasks for a single job.
- A value of -1 indicates that there is no maximum. </description>
-</property>
-
-<property>
- <name>mapreduce.fileoutputcommitter.marksuccessfuljobs</name>
- <value>false</value>
-</property>
-
-<property>
- <name>mapred.userlog.retain.hours</name>
- <value><%=scope.function_hdp_template_var("mapreduce_userlog_retainhours")%></value>
-</property>
-
-<property>
- <name>mapred.job.reuse.jvm.num.tasks</name>
- <value>1</value>
- <description>
- How many tasks to run per jvm. If set to -1, there is no limit
- </description>
- <final>true</final>
-</property>
-
-<property>
- <name>mapreduce.jobtracker.kerberos.principal</name>
- <value>jt/_HOST@<%=scope.function_hdp_template_var("kerberos_domain")%></value>
- <description>
- JT user name key.
- </description>
-</property>
-
-<property>
- <name>mapreduce.tasktracker.kerberos.principal</name>
- <value>tt/_HOST@<%=scope.function_hdp_template_var("kerberos_domain")%></value>
- <description>
- tt user name key. "_HOST" is replaced by the host name of the task tracker.
- </description>
-</property>
-
-
- <property>
- <name>hadoop.job.history.user.location</name>
- <value>none</value>
- <final>true</final>
- </property>
-
-
- <property>
- <name>mapreduce.jobtracker.keytab.file</name>
- <value><%=scope.function_hdp_template_var("keytab_path")%>/jt.service.keytab</value>
- <description>
- The keytab for the jobtracker principal.
- </description>
-
-</property>
-
- <property>
- <name>mapreduce.tasktracker.keytab.file</name>
- <value><%=scope.function_hdp_template_var("keytab_path")%>/tt.service.keytab</value>
- <description>The filename of the keytab for the task tracker</description>
- </property>
-
- <property>
- <name>mapreduce.jobtracker.staging.root.dir</name>
- <value>/user</value>
- <description>The Path prefix for where the staging directories should be placed. The next level is always the user's
- name. It is a path in the default file system.</description>
- </property>
-
- <property>
- <name>mapreduce.tasktracker.group</name>
- <value>hadoop</value>
- <description>The group that the task controller uses for accessing the task controller. The mapred user must be a member and users should *not* be members.</description>
-
- </property>
-
- <property>
- <name>mapreduce.jobtracker.split.metainfo.maxsize</name>
- <value>50000000</value>
- <final>true</final>
- <description>If the size of the split metainfo file is larger than this, the JobTracker will fail the job during
- initialize.
- </description>
- </property>
- <property>
- <name>mapreduce.history.server.embedded</name>
- <value>false</value>
- <description>Should job history server be embedded within Job tracker
-process</description>
- <final>true</final>
- </property>
-
- <property>
- <name>mapreduce.history.server.http.address</name>
- <!-- cluster variant -->
- <value><%=scope.function_hdp_host("jtnode_host")%>:51111</value>
- <description>Http address of the history server</description>
- <final>true</final>
- </property>
-
- <property>
- <name>mapreduce.jobhistory.kerberos.principal</name>
- <!-- cluster variant -->
- <value>jt/_HOST@<%=scope.function_hdp_template_var("kerberos_domain")%></value>
- <description>Job history user name key. (must map to same user as JT
-user)</description>
- </property>
-
- <property>
- <name>mapreduce.jobhistory.keytab.file</name>
- <!-- cluster variant -->
- <value><%=scope.function_hdp_template_var("keytab_path")%>/jt.service.keytab</value>
- <description>The keytab for the job history server principal.</description>
- </property>
-
-<property>
- <name>mapred.jobtracker.blacklist.fault-timeout-window</name>
- <value>180</value>
- <description>
- 3-hour sliding window (value is in minutes)
- </description>
-</property>
-
-<property>
- <name>mapred.jobtracker.blacklist.fault-bucket-width</name>
- <value>15</value>
- <description>
- 15-minute bucket size (value is in minutes)
- </description>
-</property>
-
-<property>
- <name>mapred.queue.names</name>
- <value>default</value>
- <description> Comma separated list of queues configured for this jobtracker.</description>
-</property>
-
-</configuration>