summaryrefslogtreecommitdiff
path: root/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/core-site.xml
blob: d2166052244b3a8338856f10db7bf3c2c8f6bc0c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

 <!--
    Licensed to the Apache Software Foundation (ASF) under one or more
    contributor license agreements.  See the NOTICE file distributed with
    this work for additional information regarding copyright ownership.
    The ASF licenses this file to You under the Apache License, Version 2.0
    (the "License"); you may not use this file except in compliance with
    the License.  You may obtain a copy of the License at
 
        http://www.apache.org/licenses/LICENSE-2.0
 
    Unless required by applicable law or agreed to in writing, software
    distributed under the License is distributed on an "AS IS" BASIS,
    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    See the License for the specific language governing permissions and
    limitations under the License.
 -->
 
<!-- Put site-specific property overrides in this file. -->

<configuration supports_final="true" xmlns:xi="http://www.w3.org/2001/XInclude">

  <property>
    <name>ha.failover-controller.active-standby-elector.zk.op.retries</name>
    <value>120</value>
    <description>ZooKeeper Failover Controller retries setting for your environment</description>
  </property>

<!-- i/o properties -->

  <property>
    <name>io.file.buffer.size</name>
    <value>131072</value>
    <description>The size of buffer for use in sequence files.
  The size of this buffer should probably be a multiple of hardware
  page size (4096 on Intel x86), and it determines how much data is
  buffered during read and write operations.</description>
  </property>

  <property>
    <name>io.serializations</name>
    <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
    <description> A list of comma-delimited serialization classes that can be used for obtaining serializers and deserializers.
    </description>
  </property>

  <property>
    <name>io.compression.codecs</name>
    <value>org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec</value>
    <description>A list of the compression codec classes that can be used
                 for compression/decompression.</description>
  </property>

<!-- file system properties -->

  <property>
    <name>fs.defaultFS</name>
    <!-- cluster variant -->
    <value>hdfs://localhost:8020</value>
    <property-type>DONT_ADD_ON_UPGRADE</property-type>
    <description>The name of the default file system.  Either the
  literal string "local" or a host:port for HDFS.</description>
    <final>true</final>
  </property>

  <property>
    <name>fs.trash.interval</name>
    <value>360</value>
    <description>Number of minutes after which the checkpoint gets deleted.
        If zero, the trash feature is disabled.
        This option may be configured both on the server and the client.
        If trash is disabled server side then the client side configuration is checked.
        If trash is enabled on the server side then the value configured on the server is used and the client configuration value is ignored.
    </description>
  </property>

  <!-- ipc properties: copied from kryptonite configuration -->
  <property>
    <name>ipc.client.idlethreshold</name>
    <value>8000</value>
    <description>Defines the threshold number of connections after which
               connections will be inspected for idleness.
  </description>
  </property>

  <property>
    <name>ipc.client.connection.maxidletime</name>
    <value>30000</value>
    <description>The maximum time after which a client will bring down the
               connection to the server.
  </description>
  </property>

  <property>
    <name>ipc.client.connect.max.retries</name>
    <value>50</value>
    <description>Defines the maximum number of retries for IPC connections.</description>
  </property>

  <property>
    <name>ipc.server.tcpnodelay</name>
    <value>true</value>
    <description>Turn on/off Nagle's algorithm for the TCP socket
      connection on
      the server. Setting to true disables the algorithm and may
      decrease latency
      with a cost of more/smaller packets.
    </description>
  </property>

  <!-- Web Interface Configuration -->
  <property>
    <name>mapreduce.jobtracker.webinterface.trusted</name>
    <value>false</value>
    <description> If set to true, the web interfaces of JT and NN may contain
                actions, such as kill job, delete file, etc., that should
                not be exposed to public. Enable this option if the interfaces
                are only reachable by those who have the right authorization.
  </description>
  </property>

 <property>
   <name>hadoop.security.authentication</name>
   <value>simple</value>
   <description>
   Set the authentication for the cluster. Valid values are: simple or
   kerberos.
   </description>
 </property>
<property>
  <name>hadoop.security.authorization</name>
  <value>false</value>
  <description>
     Enable authorization for different protocols.
  </description>
</property>

  <property>
    <name>hadoop.security.auth_to_local</name>
    <value>DEFAULT</value>
<description>The mapping from kerberos principal names to local OS mapreduce.job.user.names.
  So the default rule is just "DEFAULT" which takes all principals in your default domain to their first component.
  "omalley@APACHE.ORG" and "omalley/admin@APACHE.ORG" to "omalley", if your default domain is APACHE.ORG.
The translations rules have 3 sections:
      base     filter    substitution
The base consists of a number that represents the number of components in the principal name excluding the realm and the pattern for building the name from the sections of the principal name. The base uses $0 to mean the realm, $1 to mean the first component and $2 to mean the second component.

[1:$1@$0] translates "omalley@APACHE.ORG" to "omalley@APACHE.ORG"
[2:$1] translates "omalley/admin@APACHE.ORG" to "omalley"
[2:$1%$2] translates "omalley/admin@APACHE.ORG" to "omalley%admin"

The filter is a regex in parens that must the generated string for the rule to apply.

"(.*%admin)" will take any string that ends in "%admin"
"(.*@ACME.COM)" will take any string that ends in "@ACME.COM"

Finally, the substitution is a sed rule to translate a regex into a fixed string.

"s/@ACME\.COM//" removes the first instance of "@ACME.COM".
"s/@[A-Z]*\.COM//" removes the first instance of "@" followed by a name followed by ".COM".
"s/X/Y/g" replaces all of the "X" in the name with "Y"

So, if your default realm was APACHE.ORG, but you also wanted to take all principals from ACME.COM that had a single component "joe@ACME.COM", you'd do:

RULE:[1:$1@$0](.@ACME.ORG)s/@.//
DEFAULT

To also translate the names with a second component, you'd make the rules:

RULE:[1:$1@$0](.@ACME.ORG)s/@.//
RULE:[2:$1@$0](.@ACME.ORG)s/@.//
DEFAULT

If you want to treat all principals from APACHE.ORG with /admin as "admin", your rules would look like:

RULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/
DEFAULT
    </description>
    <value-attributes>
      <type>multiLine</type>
    </value-attributes>
  </property>
  <property>
    <name>net.topology.script.file.name</name>
    <value>/etc/hadoop/conf/topology_script.py</value>
    <description>
      Location of topology script used by Hadoop to determine the rack location of nodes.
    </description>
  </property>
</configuration>