aboutsummaryrefslogtreecommitdiff
path: root/bigtop-deploy
diff options
context:
space:
mode:
authorPeter Slawski <petersla@amazon.com>2015-03-25 17:26:27 -0700
committerYoungWoo Kim <ywkim@apache.org>2015-04-03 10:29:23 +0900
commitb58c80ea6ab9bb47d0c13246e99f2c7aec6bb948 (patch)
tree1625b6e9ab89aad938b4c14bc8bd82e4a62879a8 /bigtop-deploy
parentd9c6c83b111a743d280fc72da826a0e8ef345bfc (diff)
BIGTOP-1787. puppet: Update hue.ini to match Hue 3.7.1 template
This updates hue.ini template in the hue puppet module to resemble the default hue.ini in Hue 3.7.1. This also comments out ldap_url as it shouldn't be set to a dummy url. Signed-off-by: YoungWoo Kim <ywkim@apache.org>
Diffstat (limited to 'bigtop-deploy')
-rw-r--r--bigtop-deploy/puppet/modules/hue/templates/hue.ini793
1 files changed, 578 insertions, 215 deletions
diff --git a/bigtop-deploy/puppet/modules/hue/templates/hue.ini b/bigtop-deploy/puppet/modules/hue/templates/hue.ini
index 232d138f..283c3321 100644
--- a/bigtop-deploy/puppet/modules/hue/templates/hue.ini
+++ b/bigtop-deploy/puppet/modules/hue/templates/hue.ini
@@ -17,12 +17,12 @@
# ===================================
#
# For complete documentation about the contents of this file, run
-# $ <hue_root>/build/env/bin/hue config_help
+# $ <hue_root>/build/env/bin/hue config_help
#
# All .ini files under the current directory are treated equally. Their
# contents are merged to form the Hue configuration, which can
# can be viewed on the Hue at
-# http://<hue_host>:<port>/dump_config
+# http://<hue_host>:<port>/dump_config
###########################################################################
@@ -47,11 +47,14 @@
# Time zone name
time_zone=America/Los_Angeles
- # Turn off debug
- django_debug_mode=0
+ # Enable or disable Django debug mode.
+ django_debug_mode=false
- # Turn off backtrace for server error
- http_500_debug_mode=0
+ # Enable or disable backtrace for server error
+ http_500_debug_mode=false
+
+ # Enable or disable memory profiling.
+ ## memory_profiler=false
# Server email for internal error messages
## django_server_email='hue@localhost.localdomain'
@@ -59,15 +62,16 @@
# Email backend
## django_email_backend=django.core.mail.backends.smtp.EmailBackend
- # Set to true to use CherryPy as the webserver, set to false
- # to use Spawning as the webserver. Defaults to Spawning if
- # key is not specified.
- ## use_cherrypy_server = false
-
# Webserver runs as this user
server_user=hue
server_group=hue
+ # This should be the Hue admin and proxy user
+ ## default_user=hue
+
+ # This should be the hadoop cluster admin
+ ## default_hdfs_superuser=hdfs
+
# If set to false, runcpserver will not actually start the web server.
# Used if Apache is being used as a WSGI container.
## enable_server=yes
@@ -81,9 +85,45 @@
# Filename of SSL RSA Private Key
## ssl_private_key=
+ # List of allowed and disallowed ciphers in cipher list format.
+ # See http://www.openssl.org/docs/apps/ciphers.html for more information on cipher list format.
+ ## ssl_cipher_list=DEFAULT:!aNULL:!eNULL:!LOW:!EXPORT:!SSLv2
+
+ # LDAP username and password of the hue user used for LDAP authentications.
+ # Set it to use LDAP Authentication with HiveServer2 and Impala.
+ ## ldap_username=hue
+ ## ldap_password=
+
# Default encoding for site data
## default_site_encoding=utf-8
+ # Help improve Hue with anonymous usage analytics.
+ # Use Google Analytics to see how many times an application or specific section of an application is used, nothing more.
+ ## collect_usage=true
+
+ # Support for HTTPS termination at the load-balancer level with SECURE_PROXY_SSL_HEADER.
+ ## secure_proxy_ssl_header=false
+
+ # Comma-separated list of Django middleware classes to use.
+ # See https://docs.djangoproject.com/en/1.4/ref/middleware/ for more details on middlewares in Django.
+ ## middleware=desktop.auth.backend.LdapSynchronizationBackend
+
+ # Comma-separated list of regular expressions, which match the redirect URL.
+ # For example, to restrict to your local domain and FQDN, the following value can be used:
+ # ^\/.*$,^http:\/\/www.mydomain.com\/.*$
+ ## redirect_whitelist=
+
+ # Comma separated list of apps to not load at server startup.
+ # e.g.: pig,zookeeper
+ ## app_blacklist=
+
+ # The directory where to store the auditing logs. Auditing is disable if the value is empty.
+ # e.g. /var/log/hue/audit.log
+ ## audit_event_log_dir=
+
+ # Size in KB/MB/GB for audit log to rollover.
+ ## audit_log_max_file_size=100MB
+
# Administrators
# ----------------
[[django_admins]]
@@ -95,8 +135,9 @@
# -------------------
[[custom]]
- # Top banner HTML code
- ## banner_top_html=
+ # Top banner HTML code
+ # e.g. <H2>Test Lab A2 Hue Services</H2>
+ ## banner_top_html=
# Configuration options for user authentication into the web application
# ------------------------------------------------------------------------
@@ -111,9 +152,13 @@
# - desktop.auth.backend.PamBackend
# - desktop.auth.backend.SpnegoDjangoBackend
# - desktop.auth.backend.RemoteUserDjangoBackend
- # - desktop.auth.backend.OAuthBackend
+ # - libsaml.backend.SAML2Backend
+ # - libopenid.backend.OpenIDBackend
+ # - liboauth.backend.OAuthBackend
+ # (Support Twitter, Facebook, Google+ and Linkedin
## backend=desktop.auth.backend.AllowFirstUserDjangoBackend
+ # The service to use when querying PAM.
## pam_service=login
# When using the desktop.auth.backend.RemoteUserDjangoBackend, this sets
@@ -126,44 +171,74 @@
# Defaults to HTTP_REMOTE_USER
## remote_user_header=HTTP_REMOTE_USER
+ # Synchronize a users groups when they login
+ ## sync_groups_on_login=false
+
+ # Ignore the case of usernames when searching for existing users.
+ # Only supported in remoteUserDjangoBackend.
+ ## ignore_username_case=false
+
+ # Ignore the case of usernames when searching for existing users to authenticate with.
+ # Only supported in remoteUserDjangoBackend.
+ ## force_username_lowercase=false
+
+ # Users will expire after they have not logged in for 'n' amount of seconds.
+ # A negative number means that users will never expire.
+ ## expires_after=-1
+
+ # Apply 'expires_after' to superusers.
+ ## expire_superusers=true
+
# Configuration options for connecting to LDAP and Active Directory
# -------------------------------------------------------------------
[[ldap]]
- # The search base for finding users and groups
- ## base_dn="DC=mycompany,DC=com"
+ # The search base for finding users and groups
+ ## base_dn="DC=mycompany,DC=com"
- # The NT domain to connect to (only for use with Active Directory)
- ## nt_domain=mycompany.com
+ # URL of the LDAP server
+ ## ldap_url=ldap://auth.mycompany.com
- # URL of the LDAP server
- ldap_url=ldap://auth.mycompany.com
+ # A PEM-format file containing certificates for the CA's that
+ # Hue will trust for authentication over TLS.
+ # The certificate for the CA that signed the
+ # LDAP server certificate must be included among these certificates.
+ # See more here http://www.openldap.org/doc/admin24/tls.html.
+ ## ldap_cert=
+ ## use_start_tls=true
- # A PEM-format file containing certificates for the CA's that
- # Hue will trust for authentication over TLS.
- # The certificate for the CA that signed the
- # LDAP server certificate must be included among these certificates.
- # See more here http://www.openldap.org/doc/admin24/tls.html.
- ## ldap_cert=
- ## use_start_tls=true
+ # Distinguished name of the user to bind as -- not necessary if the LDAP server
+ # supports anonymous searches
+ ## bind_dn="CN=ServiceAccount,DC=mycompany,DC=com"
- # Distinguished name of the user to bind as -- not necessary if the LDAP server
- # supports anonymous searches
- ## bind_dn="CN=ServiceAccount,DC=mycompany,DC=com"
+ # Password of the bind user -- not necessary if the LDAP server supports
+ # anonymous searches
+ ## bind_password=
- # Password of the bind user -- not necessary if the LDAP server supports
- # anonymous searches
- ## bind_password=
+ # Pattern for searching for usernames -- Use <username> for the parameter
+ # For use when using LdapBackend for Hue authentication
+ ## ldap_username_pattern="uid=<username>,ou=People,dc=mycompany,dc=com"
- # Pattern for searching for usernames -- Use <username> for the parameter
- # For use when using LdapBackend for Hue authentication
- ## ldap_username_pattern="uid=<username>,ou=People,dc=mycompany,dc=com"
+ # Create users in Hue when they try to login with their LDAP credentials
+ # For use when using LdapBackend for Hue authentication
+ ## create_users_on_login = true
- # Create users in Hue when they try to login with their LDAP credentials
- # For use when using LdapBackend for Hue authentication
- ## create_users_on_login = true
+ # Ignore the case of usernames when searching for existing users in Hue.
+ ## ignore_username_case=false
- [[[users]]]
+ # Force usernames to lowercase when creating new users from LDAP.
+ ## force_username_lowercase=false
+
+ # Use search bind authentication.
+ ## search_bind_authentication=true
+
+ # Choose which kind of subgrouping to use: nested or suboordinate (deprecated).
+ ## subgroups=suboordinate
+
+ # Define the number of levels to search for nested members.
+ ## nested_members_search_depth=10
+
+ [[[users]]]
# Base filter for searching for users
## user_filter="objectclass=*"
@@ -171,31 +246,103 @@
# The username attribute in the LDAP schema
## user_name_attr=sAMAccountName
- [[[groups]]]
+ [[[groups]]]
# Base filter for searching for groups
## group_filter="objectclass=*"
- # The username attribute in the LDAP schema
+ # The group name attribute in the LDAP schema
## group_name_attr=cn
- # Configuration options for specifying the Desktop Database. For more info,
- # see http://docs.djangoproject.com/en/1.1/ref/settings/#database-engine
+ # The attribute of the group object which identifies the members of the group
+ ## group_member_attr=members
+
+ [[[ldap_servers]]]
+
+ ## [[[[mycompany]]]]
+
+ # The search base for finding users and groups
+ ## base_dn="DC=mycompany,DC=com"
+
+ # URL of the LDAP server
+ ## ldap_url=ldap://auth.mycompany.com
+
+ # A PEM-format file containing certificates for the CA's that
+ # Hue will trust for authentication over TLS.
+ # The certificate for the CA that signed the
+ # LDAP server certificate must be included among these certificates.
+ # See more here http://www.openldap.org/doc/admin24/tls.html.
+ ## ldap_cert=
+ ## use_start_tls=true
+
+ # Distinguished name of the user to bind as -- not necessary if the LDAP server
+ # supports anonymous searches
+ ## bind_dn="CN=ServiceAccount,DC=mycompany,DC=com"
+
+ # Password of the bind user -- not necessary if the LDAP server supports
+ # anonymous searches
+ ## bind_password=
+
+ # Pattern for searching for usernames -- Use <username> for the parameter
+ # For use when using LdapBackend for Hue authentication
+ ## ldap_username_pattern="uid=<username>,ou=People,dc=mycompany,dc=com"
+
+ ## Use search bind authentication.
+ ## search_bind_authentication=true
+
+ ## [[[[[users]]]]]
+
+ # Base filter for searching for users
+ ## user_filter="objectclass=Person"
+
+ # The username attribute in the LDAP schema
+ ## user_name_attr=sAMAccountName
+
+ ## [[[[[groups]]]]]
+
+ # Base filter for searching for groups
+ ## group_filter="objectclass=groupOfNames"
+
+ # The username attribute in the LDAP schema
+ ## group_name_attr=cn
+
+ # Configuration options for specifying the Desktop Database. For more info,
+ # see http://docs.djangoproject.com/en/1.4/ref/settings/#database-engine
# ------------------------------------------------------------------------
[[database]]
engine=sqlite3
name=/var/lib/hue/desktop.db
# Database engine is typically one of:
- # postgresql_psycopg2, mysql, or sqlite3
+ # postgresql_psycopg2, mysql, sqlite3 or oracle.
#
- # Note that for sqlite3, 'name', below is a filename;
- # for other backends, it is the database name.
+ # Note that for sqlite3, 'name', below is a path to the filename. For other backends, it is the database name.
+ # Note for Oracle, options={'threaded':true} must be set in order to avoid crashes.
+ # Note for Oracle, you can use the Oracle Service Name by setting "port=0" and then "name=<host>:<port>/<service_name>".
## engine=sqlite3
## host=
## port=
## user=
## password=
- ## name=
+ ## name=desktop/desktop.db
+ ## options={}
+
+ # Configuration options for specifying the Desktop session.
+ # For more info, see https://docs.djangoproject.com/en/1.4/topics/http/sessions/
+ # ------------------------------------------------------------------------
+ [[session]]
+ # The cookie containing the users' session ID will expire after this amount of time in seconds.
+ # Default is 2 weeks.
+ ## ttl=1209600
+
+ # The cookie containing the users' session ID will be secure.
+ # Should only be enabled with HTTPS.
+ ## secure=false
+
+ # The cookie containing the users' session ID will use the HTTP only flag.
+ ## http_only=false
+
+ # Use session-length cookies. Logs out the user when she closes the browser window.
+ ## expire_at_browser_close=false
# Configuration options for connecting to an external SMTP server
@@ -229,7 +376,7 @@
<% end %>
- # Configuration options for using OAuthBackend login
+ # Configuration options for using OAuthBackend (core) login
# ------------------------------------------------------------------------
[[oauth]]
# The Consumer key of the application
@@ -249,6 +396,195 @@
###########################################################################
+# Settings to configure SAML
+###########################################################################
+
+[libsaml]
+ # Xmlsec1 binary path. This program should be executable by the user running Hue.
+ ## xmlsec_binary=/usr/local/bin/xmlsec1
+
+ # Entity ID for Hue acting as service provider.
+ # Can also accept a pattern where '<base_url>' will be replaced with server URL base.
+ ## entity_id="<base_url>/saml2/metadata/"
+
+ # Create users from SSO on login.
+ ## create_users_on_login=true
+
+ # Required attributes to ask for from IdP.
+ # This requires a comma separated list.
+ ## required_attributes=uid
+
+ # Optional attributes to ask for from IdP.
+ # This requires a comma separated list.
+ ## optional_attributes=
+
+ # IdP metadata in the form of a file. This is generally an XML file containing metadata that the Identity Provider generates.
+ ## metadata_file=
+
+ # Private key to encrypt metadata with.
+ ## key_file=
+
+ # Signed certificate to send along with encrypted metadata.
+ ## cert_file=
+
+ # A mapping from attributes in the response from the IdP to django user attributes.
+ ## user_attribute_mapping={'uid':'username'}
+
+ # Have Hue initiated authn requests be signed and provide a certificate.
+ ## authn_requests_signed=false
+
+ # Have Hue initiated logout requests be signed and provide a certificate.
+ ## logout_requests_signed=false
+
+ ## Username can be sourced from 'attributes' or 'nameid'.
+ ## username_source=attributes
+
+ # Performs the logout or not.
+ ## logout_enabled=true
+
+
+###########################################################################
+# Settings to configure OPENID
+###########################################################################
+
+[libopenid]
+ # (Required) OpenId SSO endpoint url.
+ ## server_endpoint_url=https://www.google.com/accounts/o8/id
+
+ # OpenId 1.1 identity url prefix to be used instead of SSO endpoint url
+ # This is only supported if you are using an OpenId 1.1 endpoint
+ ## identity_url_prefix=https://app.onelogin.com/openid/your_company.com/
+
+ # Create users from OPENID on login.
+ ## create_users_on_login=true
+
+ # Use email for username
+ ## use_email_for_username=true
+
+
+###########################################################################
+# Settings to configure OAuth
+###########################################################################
+
+[liboauth]
+ # NOTE:
+ # To work, each of the active (i.e. uncommented) service must have
+ # applications created on the social network.
+ # Then the "consumer key" and "consumer secret" must be provided here.
+ #
+ # The addresses where to do so are:
+ # Twitter: https://dev.twitter.com/apps
+ # Google+ : https://cloud.google.com/
+ # Facebook: https://developers.facebook.com/apps
+ # Linkedin: https://www.linkedin.com/secure/developer
+ #
+ # Additionnaly, the following must be set in the application settings:
+ # Twitter: Callback URL (aka Redirect URL) must be set to http://YOUR_HUE_IP_OR_DOMAIN_NAME/oauth/social_login/oauth_authenticated
+ # Google+ : CONSENT SCREEN must have email address
+ # Facebook: Sandbox Mode must be DISABLED
+ # Linkedin: "In OAuth User Agreement", r_emailaddress is REQUIRED
+
+ # The Consumer key of the application
+ ## consumer_key_twitter=
+ ## consumer_key_google=
+ ## consumer_key_facebook=
+ ## consumer_key_linkedin=
+
+ # The Consumer secret of the application
+ ## consumer_secret_twitter=
+ ## consumer_secret_google=
+ ## consumer_secret_facebook=
+ ## consumer_secret_linkedin=
+
+ # The Request token URL
+ ## request_token_url_twitter=https://api.twitter.com/oauth/request_token
+ ## request_token_url_google=https://accounts.google.com/o/oauth2/auth
+ ## request_token_url_linkedin=https://www.linkedin.com/uas/oauth2/authorization
+ ## request_token_url_facebook=https://graph.facebook.com/oauth/authorize
+
+ # The Access token URL
+ ## access_token_url_twitter=https://api.twitter.com/oauth/access_token
+ ## access_token_url_google=https://accounts.google.com/o/oauth2/token
+ ## access_token_url_facebook=https://graph.facebook.com/oauth/access_token
+ ## access_token_url_linkedin=https://api.linkedin.com/uas/oauth2/accessToken
+
+ # The Authenticate URL
+ ## authenticate_url_twitter=https://api.twitter.com/oauth/authorize
+ ## authenticate_url_google=https://www.googleapis.com/oauth2/v1/userinfo?access_token=
+ ## authenticate_url_facebook=https://graph.facebook.com/me?access_token=
+ ## authenticate_url_linkedin=https://api.linkedin.com/v1/people/~:(email-address)?format=json&oauth2_access_token=
+
+ # Username Map. Json Hash format.
+ # Replaces username parts in order to simplify usernames obtained
+ # Example: {"@sub1.domain.com":"_S1", "@sub2.domain.com":"_S2"}
+ # converts 'email@sub1.domain.com' to 'email_S1'
+ ## username_map={}
+
+ # Whitelisted domains (only applies to Google OAuth). CSV format.
+ ## whitelisted_domains_google=
+
+###########################################################################
+# Settings for the RDBMS application
+###########################################################################
+
+[librdbms]
+ # The RDBMS app can have any number of databases configured in the databases
+ # section. A database is known by its section name
+ # (IE sqlite, mysql, psql, and oracle in the list below).
+
+ [[databases]]
+ # sqlite configuration.
+ ## [[[sqlite]]]
+ # Name to show in the UI.
+ ## nice_name=SQLite
+
+ # For SQLite, name defines the path to the database.
+ ## name=/tmp/sqlite.db
+
+ # Database backend to use.
+ ## engine=sqlite
+
+ # Database options to send to the server when connecting.
+ # https://docs.djangoproject.com/en/1.4/ref/databases/
+ ## options={}
+
+ # mysql, oracle, or postgresql configuration.
+ ## [[[mysql]]]
+ # Name to show in the UI.
+ ## nice_name="My SQL DB"
+
+ # For MySQL and PostgreSQL, name is the name of the database.
+ # For Oracle, Name is instance of the Oracle server. For express edition
+ # this is 'xe' by default.
+ ## name=mysqldb
+
+ # Database backend to use. This can be:
+ # 1. mysql
+ # 2. postgresql
+ # 3. oracle
+ ## engine=mysql
+
+ # IP or hostname of the database to connect to.
+ ## host=localhost
+
+ # Port the database server is listening to. Defaults are:
+ # 1. MySQL: 3306
+ # 2. PostgreSQL: 5432
+ # 3. Oracle Express Edition: 1521
+ ## port=3306
+
+ # Username to authenticate with when connecting to the database.
+ ## user=example
+
+ # Password matching the username to authenticate with when
+ # connecting to the database.
+ ## password=example
+
+ # Database options to send to the server when connecting.
+ # https://docs.djangoproject.com/en/1.4/ref/databases/
+ ## options={}
+
+###########################################################################
# Settings to configure your Hadoop cluster.
###########################################################################
@@ -263,54 +599,19 @@
# Enter the filesystem uri
fs_defaultfs=<%= @default_fs %>
- # Change this if your HDFS cluster is Kerberos-secured
- security_enabled=<%= if (@kerberos_realm != "") ; "true" else "false" end %>
+ # NameNode logical name.
+ ## logical_name=
# Use WebHdfs/HttpFs as the communication mechanism.
- # This should be the web service root URL, such as
- # http://namenode:50070/webhdfs/v1
+ # Domain should be the NameNode or HttpFs host.
+ # Default port is 14000 for HttpFs.
webhdfs_url=<%= @webhdfs_url %>
- # Settings about this HDFS cluster. If you install HDFS in a
- # different location, you need to set the following.
-
- # Defaults to $HADOOP_HDFS_HOME or /usr/lib/hadoop-hdfs
- ## hadoop_hdfs_home=/usr/lib/hadoop-hdfs
-
- # Defaults to $HADOOP_BIN or /usr/bin/hadoop
- ## hadoop_bin=/usr/bin/hadoop
+ # Change this if your HDFS cluster is Kerberos-secured
+ security_enabled=<%= if (@kerberos_realm != "") ; "true" else "false" end %>
- # Defaults to $HADOOP_CONF_DIR or /etc/hadoop/conf
- ## hadoop_conf_dir=/etc/hadoop/conf
-
- # FIXME: HUE-10 Configuration for MapReduce 0.20 JobTracker (MR1)
- # ------------------------------------------------------------------------
- #[[mapred_clusters]]
- #
- # [[[default]]]
- # # Enter the host on which you are running the Hadoop JobTracker
- # jobtracker_host=localhost
- # # The port where the JobTracker IPC listens on
- # jobtracker_port=8021
- # # Thrift plug-in port for the JobTracker
- # ## thrift_port=9290
- # # Whether to submit jobs to this cluster
- # ## submit_to=True
- #
- # # Change this if your MapReduce cluster is Kerberos-secured
- # ## security_enabled=false
- #
- # # Settings about this MR1 cluster. If you install MR1 in a
- # # different location, you need to set the following.
- #
- # # Defaults to $HADOOP_MR1_HOME or /usr/lib/hadoop-0.20-mapreduce
- # hadoop_mapred_home=/usr/lib/hadoop-mapreduce
- #
- # # Defaults to $HADOOP_BIN or /usr/bin/hadoop
- # ## hadoop_bin=/usr/bin/hadoop
- #
- # # Defaults to $HADOOP_CONF_DIR or /etc/hadoop/conf
- # ## hadoop_conf_dir=/etc/hadoop/conf
+ # Default umask for file and directory creation, specified in an octal value.
+ ## umask=022
# Configuration for YARN (MR2)
# ------------------------------------------------------------------------
@@ -319,26 +620,19 @@
[[[default]]]
# Enter the host on which you are running the ResourceManager
resourcemanager_host=<%= @rm_host %>
+
# The port where the ResourceManager IPC listens on
resourcemanager_port=<%= @rm_port %>
+
# Whether to submit jobs to this cluster
submit_to=True
+ # Resource Manager logical name (required for HA)
+ ## logical_name=
+
# Change this if your YARN cluster is Kerberos-secured
security_enabled=<%= if (@kerberos_realm != "") ; "true" else "false" end %>
- # Settings about this MR2 cluster. If you install MR2 in a
- # different location, you need to set the following.
-
- # Defaults to $HADOOP_MR2_HOME or /usr/lib/hadoop-mapreduce
- hadoop_mapred_home=/usr/lib/hadoop-mapreduce
-
- # Defaults to $HADOOP_BIN or /usr/bin/hadoop
- hadoop_bin=/usr/bin/hadoop
-
- # Defaults to $HADOOP_CONF_DIR or /etc/hadoop/conf
- hadoop_conf_dir=/etc/hadoop/conf
-
# URL of the ResourceManager API
resourcemanager_api_url=<%= @rm_url %>
@@ -348,9 +642,51 @@
# URL of the HistoryServer API
history_server_api_url=<%= @history_server_url %>
- # URL of the NodeManager API
- node_manager_api_url=http://localhost:8042
+ # HA support by specifying multiple clusters
+ # e.g.
+ # [[[ha]]]
+ # Resource Manager logical name (required for HA)
+ # logical_name=my-rm-name
+
+ # Configuration for MapReduce (MR1)
+ # ------------------------------------------------------------------------
+ [[mapred_clusters]]
+
+ [[[default]]]
+ # Enter the host on which you are running the Hadoop JobTracker
+ ## jobtracker_host=localhost
+
+ # The port where the JobTracker IPC listens on
+ ## jobtracker_port=8021
+
+ # JobTracker logical name for HA
+ ## logical_name=
+
+ # Thrift plug-in port for the JobTracker
+ ## thrift_port=9290
+
+ # Whether to submit jobs to this cluster
+ submit_to=False
+
+ # Change this if your MapReduce cluster is Kerberos-secured
+ ## security_enabled=false
+
+ # HA support by specifying multiple clusters
+ # e.g.
+
+ # [[[ha]]]
+ # Enter the logical name of the JobTrackers
+ # logical_name=my-jt-name
+
+
+###########################################################################
+# Settings to configure the Filebrowser app
+###########################################################################
+
+[filebrowser]
+ # Location on local filesystem where the uploaded archives are temporary stored.
+ ## archive_upload_tempdir=/tmp
###########################################################################
# Settings to configure liboozie
@@ -358,9 +694,10 @@
[liboozie]
# The URL where the Oozie service runs on. This is required in order for
- # users to submit jobs.
+ # users to submit jobs. Empty value disables the config check.
oozie_url=<%= @oozie_url %>
+ # Requires FQDN in oozie_url if enabled
security_enabled=<%= if (@kerberos_realm != "") ; "true" else "false" end %>
# Location on HDFS where the workflows/coordinator are deployed when submitted.
@@ -381,69 +718,63 @@
# Location on HDFS where the oozie examples and workflows are stored.
remote_data_dir=/user/hue/oozie/workspaces
- # Share workflows and coordinators information with all users. If set to false,
- # they will be visible only to the owner and administrators.
- share_jobs=True
-
# Maximum of Oozie workflows or coodinators to retrieve in one API call.
oozie_jobs_count=100
+ # Use Cron format for defining the frequency of a Coordinator instead of the old frequency number/unit.
+ ## enable_cron_scheduling=true
+
###########################################################################
-# Settings to configure Beeswax
+# Settings to configure Beeswax with Hive
###########################################################################
[beeswax]
- # Host where Beeswax server Thrift daemon is running.
- # If Kerberos security is enabled, the fully-qualified domain name (FQDN) is
- # required, even if the Thrift daemon is running on the same host as Hue.
- ## beeswax_server_host=<FQDN of Beeswax Server>
+ # Host where HiveServer2 is running.
+ # If Kerberos security is enabled, use fully-qualified domain name (FQDN).
+ ## hive_server_host=localhost
- # The type of Thrift interface used for contacting the backend for sending
- # queries/metadata requests.
- # Choices are 'beeswax' (default), 'hiveserver2'.
- ## server_interface=beeswax
+ # Port where HiveServer2 Thrift server runs on.
+ ## hive_server_port=10000
- # Port where Beeswax Thrift server runs on.
- # Use 10000 when using the HiveServer2 interface.
- ## beeswax_server_port=8002
+ # Hive configuration directory, where hive-site.xml is located
+ ## hive_conf_dir=/etc/hive/conf
- # Host where internal metastore Thrift daemon is running.
- ## beeswax_meta_server_host=localhost
+ # Timeout in seconds for thrift calls to Hive service
+ ## server_conn_timeout=120
- # Configure the port the internal metastore daemon runs on.
- # Used only if hive.metastore.local is true.
- ## beeswax_meta_server_port=8003
+ # Set a LIMIT clause when browsing a partitioned table.
+ # A positive value will be set as the LIMIT. If 0 or negative, do not set any limit.
+ ## browse_partitioned_table_limit=250
- # Hive home directory
- ## hive_home_dir=/usr/lib/hive
+ # A limit to the number of rows that can be downloaded from a query.
+ # A value of -1 means there will be no limit.
+ # A maximum of 65,000 is applied to XLS downloads.
+ ## download_row_limit=1000000
- # Hive configuration directory, where hive-site.xml is located
- ## hive_conf_dir=/etc/hive/conf
+ # Hue will try to close the Hive query when the user leaves the editor page.
+ # This will free all the query resources in HiveServer2, but also make its results inaccessible.
+ ## close_queries=false
- # Timeout in seconds for thrift calls to beeswax service
- ## beeswax_server_conn_timeout=120
+ # Thrift version to use when communicating with HiveServer2
+ ## thrift_version=5
- # Timeout in seconds for thrift calls to the hive metastore
- ## metastore_conn_timeout=10
+ [[ssl]]
+ # SSL communication enabled for this server.
+ ## enabled=false
- # Maximum Java heapsize (in megabytes) used by Beeswax Server.
- # Note that the setting of HADOOP_HEAPSIZE in $HADOOP_CONF_DIR/hadoop-env.sh
- # may override this setting.
- ## beeswax_server_heapsize=1000
+ # Path to Certificate Authority certificates.
+ ## cacerts=/etc/hue/cacerts.pem
- # Share saved queries with all users. If set to false, saved queries are
- # visible only to the owner and administrators.
- ## share_saved_queries=true
+ # Path to the private key file.
+ ## key=/etc/hue/key.pem
- # Time in milliseconds for Beeswax to persist queries in its cache.
- # 7*24*60*60*1000 = 1 week
- ## beeswax_running_query_lifetime=604800000L
+ # Path to the public certificate file.
+ ## cert=/etc/hue/cert.pem
- # Set a LIMIT clause when browsing a partitioned table.
- # A positive value will be set as the LIMIT. If 0 or negative, do not set any limit.
- ## browse_partitioned_table_limit=250
+ # Choose whether Hue should validate certificates received from the server.
+ ## validate=true
###########################################################################
@@ -463,6 +794,8 @@
###########################################################################
[sqoop]
+ # For autocompletion, fill out the librdbms section.
+
# Sqoop server URL
server_url=<%= @sqoop_url %>
@@ -479,21 +812,63 @@
# Comma-separated list of regular expressions,
# which match any prefix of 'host:port/path' of requested proxy target.
# This does not support matching GET parameters.
- ## blacklist=()
+ ## blacklist=
+
+
+###########################################################################
+# Settings to configure Impala
+###########################################################################
+
+[impala]
+ # Host of the Impala Server (one of the Impalad)
+ ## server_host=localhost
+
+ # Port of the Impala Server
+ ## server_port=21050
+
+ # Kerberos principal
+ ## impala_principal=impala/hostname.foo.com
+
+ # Turn on/off impersonation mechanism when talking to Impala
+ ## impersonation_enabled=False
+
+ # Number of initial rows of a result set to ask Impala to cache in order
+ # to support re-fetching them for downloading them.
+ # Set to 0 for disabling the option and backward compatibility.
+ ## querycache_rows=50000
+
+ # Timeout in seconds for thrift calls
+ ## server_conn_timeout=120
+
+ # Hue will try to close the Impala query when the user leaves the editor page.
+ # This will free all the query resources in Impala, but also make its results inaccessible.
+ ## close_queries=true
+
+ # If QUERY_TIMEOUT_S > 0, the query will be timed out (i.e. cancelled) if Impala does not do any work
+ # (compute or send back results) for that query within QUERY_TIMEOUT_S seconds.
+ ## query_timeout_s=600
###########################################################################
-# Settings to configure Hbase
+# Settings to configure HBase Browser
###########################################################################
[hbase]
- # Comma-separated list of HBase Thrift servers for
- # clusters in the format of '(name|host:port)'.
+ # Comma-separated list of HBase Thrift servers for clusters in the format of '(name|host:port)'.
+ # Use full hostname with security.
hbase_clusters=(Bigtop|<%= @hbase_thrift_url %>)
+ # HBase configuration directory, where hbase-site.xml is located.
+ ## hbase_conf_dir=/etc/hbase/conf
+
# Hard limit of rows or columns per row fetched before truncating.
## truncate_limit = 500
+ # 'buffered' is the default of the HBase Thrift Server and supports security.
+ # 'framed' can be used to chunk up responses,
+ # which is useful when used in conjunction with the nonblocking server in Thrift.
+ ## thrift_transport=buffered
+
###########################################################################
# Settings to configure Solr Search
@@ -512,12 +887,29 @@
###########################################################################
+# Settings to configure Solr Indexer
+###########################################################################
+
+[indexer]
+
+ # Location of the solrctl binary.
+ ## solrctl_path=/usr/bin/solrctl
+
+ # Location of the solr home.
+ ## solr_home=/usr/lib/solr
+
+ # Zookeeper ensemble.
+ ## solr_zk_ensemble=localhost:2181/solr
+
+ # The contents of this directory will be copied over to the solrctl host to its temporary directory.
+ ## config_template_path=/../hue/desktop/libs/indexer/src/data/solr_configs
+
+
+###########################################################################
# Settings to configure Job Designer
###########################################################################
[jobsub]
- # Location on HDFS where the jobsub examples and templates are stored.
- ## remote_data_dir=/user/hue/jobsub
# Location on local FS where examples and template are stored.
## local_data_dir=..../data
@@ -527,7 +919,7 @@
###########################################################################
-# Settings to configure Job Browser
+# Settings to configure Job Browser.
###########################################################################
[jobbrowser]
@@ -537,73 +929,29 @@
###########################################################################
-# Settings to configure the Shell application
+# Settings to configure the Zookeeper application.
###########################################################################
-[shell]
- # The shell_buffer_amount specifies the number of bytes of output per shell
- # that the Shell app will keep in memory. If not specified, it defaults to
- # 524288 (512 MiB).
- ## shell_buffer_amount=100
-
- # If you run Hue against a Hadoop cluster with Kerberos security enabled, the
- # Shell app needs to acquire delegation tokens for the subprocesses to work
- # correctly. These delegation tokens are stored as temporary files in some
- # directory. You can configure this directory here. If not specified, it
- # defaults to /tmp/hue_delegation_tokens.
- ## shell_delegation_token_dir=/tmp/hue_delegation_tokens
-
- [[ shelltypes ]]
-
- # Define and configure a new shell type "pig"
- # ------------------------------------------------------------------------
- [[[ pig ]]]
- nice_name = "Pig Shell (Grunt)"
- command = "/usr/bin/pig -l /dev/null"
- help = "The command-line interpreter for Pig"
+[zookeeper]
- [[[[ environment ]]]]
- # You can specify environment variables for the Pig shell
- # in this section. Note that JAVA_HOME must be configured
- # for the Pig shell to run.
+ [[clusters]]
- # [[[[[ JAVA_HOME ]]]]]
- # value = "/usr/lib/jvm/java-6-sun"
- [[[[[ PATH ]]]]]
- value = "/bin:/usr/bin:/sbin:/usr/sbin"
-
- # Define and configure a new shell type "hbase"
- # ------------------------------------------------------------------------
- [[[ hbase ]]]
- nice_name = "HBase Shell"
- command = "/usr/bin/hbase shell"
- help = "The command-line HBase client interface."
+ [[[default]]]
+ # Zookeeper ensemble. Comma separated list of Host/Port.
+ # e.g. localhost:2181,localhost:2182,localhost:2183
+ ## host_ports=localhost:2181
- [[[[ environment ]]]]
- # You can configure environment variables for the HBase shell
- # in this section.
- [[[[[ PATH ]]]]]
- value = "/bin:/usr/bin:/sbin:/usr/sbin"
+ # The URL of the REST contrib service (required for znode browsing)
+ ## rest_url=http://localhost:9998
- # Define and configure a new shell type "Sqoop 2"
- # ------------------------------------------------------------------------
- [[[ sqoop2 ]]]
- nice_name = "Sqoop 2 Shell"
- command = "/usr/bin/sqoop"
- help = "The command-line Sqoop 2 client."
- [[[[ environment ]]]]
- # You can configure environment variables for the Sqoop 2 shell
- # in this section.
- [[[[[ PATH ]]]]]
- value = "/bin:/usr/bin:/sbin:/usr/sbin"
+###########################################################################
+# Settings to configure the Spark application.
+###########################################################################
- # Define and configure a new shell type "bash" for testing only
- # ------------------------------------------------------------------------
- [[[ bash ]]]
- nice_name = "Bash (Test only!!!)"
- command = "/bin/bash"
- help = "A shell that does not depend on Hadoop components"
+[spark]
+ # URL of the REST Spark Job Server.
+ ## server_url=http://localhost:8090/
###########################################################################
@@ -613,3 +961,18 @@
[useradmin]
# The name of the default user group that users will be a member of
## default_user_group=default
+
+
+###########################################################################
+# Settings for the Sentry lib
+###########################################################################
+
+[libsentry]
+ # Hostname or IP of server.
+ ## hostname=localhost
+
+ # Port the sentry service is running on.
+ ## port=8038
+
+ # Sentry configuration directory, where sentry-site.xml is located.
+ ## sentry_conf_dir=/etc/sentry/conf