diff options
author | Mahadev Konar <mahadev@apache.org> | 2012-10-08 01:37:59 +0000 |
---|---|---|
committer | Mahadev Konar <mahadev@apache.org> | 2012-10-08 01:37:59 +0000 |
commit | 0179792b0c265d6b0d074792e6a04846a090ae74 (patch) | |
tree | 463c0620f8fbefb1d0f6a9c7e1adc8d909068ac9 /ambari-agent | |
parent | cfe53d470e3ac7bd6170bee36e5a1b6159e5c204 (diff) |
Committing AMBARI-812, AMBARI-820, AMBARI-812
git-svn-id: https://svn.apache.org/repos/asf/incubator/ambari/branches/AMBARI-666@1395430 13f79535-47bb-0310-9956-ffa450edef68
Diffstat (limited to 'ambari-agent')
15 files changed, 561 insertions, 511 deletions
diff --git a/ambari-agent/src/main/puppet/manifestloader/site.pp b/ambari-agent/src/main/puppet/manifestloader/site.pp index fa6829a144..c7f531c87e 100644 --- a/ambari-agent/src/main/puppet/manifestloader/site.pp +++ b/ambari-agent/src/main/puppet/manifestloader/site.pp @@ -19,250 +19,290 @@ #
#
-$hdp_hadoop_mapred_queue_acls_props => {'mapred.queue.default.acl-submit-job' => '*',
- 'mapred.queue.default.acl-administer-jobs' => '*',}
+configgenerator::configfile::configuration {'hdp_hadoop__mapred_queue_acls':
+ filename => 'mapred-queue-acls.xml',
+ module => 'hdp-hadoop',
+ properties => {'mapred.queue.default.acl-submit-job' => '*',
+ 'mapred.queue.default.acl-administer-jobs' => '*',}
+ }
-$hdp_hadoop_policy_props => {'security.client.protocol.acl' => '*',
- 'security.client.datanode.protocol.acl' => '*',
- 'security.datanode.protocol.acl' => '*',
- 'security.inter.datanode.protocol.acl' => '*',
- 'security.namenode.protocol.acl' => '*',
- 'security.inter.tracker.protocol.acl' => '*',
- 'security.job.submission.protocol.acl' => '*',
- 'security.task.umbilical.protocol.acl' => '*',
- 'security.admin.operations.protocol.acl' => '',
- 'security.refresh.usertogroups.mappings.protocol.acl' => '',
- 'security.refresh.policy.protocol.acl' => '',}
+configgenerator::configfile::configuration {'hdp_hadoop__hadoop_policy':
+ filename => 'hadoop-policy.xml',
+ module => 'hdp-hadoop',
+ properties=> {'security.client.protocol.acl' => '*',
+ 'security.client.datanode.protocol.acl' => '*',
+ 'security.datanode.protocol.acl' => '*',
+ 'security.inter.datanode.protocol.acl' => '*',
+ 'security.namenode.protocol.acl' => '*',
+ 'security.inter.tracker.protocol.acl' => '*',
+ 'security.job.submission.protocol.acl' => '*',
+ 'security.task.umbilical.protocol.acl' => '*',
+ 'security.admin.operations.protocol.acl' => '',
+ 'security.refresh.usertogroups.mappings.protocol.acl' => '',
+ 'security.refresh.policy.protocol.acl' => '',}
+ }
-$hdp_hadoop_core_site_props => {'io.file.buffer.size' => '131072',
- 'io.serializations' => 'org.apache.hadoop.io.serializer.WritableSerialization',
- 'io.compression.codecs' => '',
- 'io.compression.codec.lzo.class' => 'com.hadoop.compression.lzo.LzoCodec',
- 'fs.default.name' => '',
- 'fs.trash.interval' => '360',
- 'fs.checkpoint.dir' => '',
- 'fs.checkpoint.edits.dir' => '',
- 'fs.checkpoint.period' => '21600',
- 'fs.checkpoint.size' => '536870912',
- 'ipc.client.idlethreshold' => '8000',
- 'ipc.client.connection.maxidletime' => '30000',
- 'ipc.client.connect.max.retries' => '50',
- 'webinterface.private.actions' => 'false',
- 'hadoop.security.authentication' => '',
- 'hadoop.security.authorization' => '',
- 'hadoop.security.auth_to_local' => '',}
+configgenerator::configfile::configuration {'hdp_hadoop__core_site':
+ filename => 'core-site.xml',
+ module => 'hdp-hadoop',
+ properties => {'io.file.buffer.size' => '131072',
+ 'io.serializations' => 'org.apache.hadoop.io.serializer.WritableSerialization',
+ 'io.compression.codecs' => '',
+ 'io.compression.codec.lzo.class' => 'com.hadoop.compression.lzo.LzoCodec',
+ 'fs.default.name' => '',
+ 'fs.trash.interval' => '360',
+ 'fs.checkpoint.dir' => '',
+ 'fs.checkpoint.edits.dir' => '',
+ 'fs.checkpoint.period' => '21600',
+ 'fs.checkpoint.size' => '536870912',
+ 'ipc.client.idlethreshold' => '8000',
+ 'ipc.client.connection.maxidletime' => '30000',
+ 'ipc.client.connect.max.retries' => '50',
+ 'webinterface.private.actions' => 'false',
+ 'hadoop.security.authentication' => '',
+ 'hadoop.security.authorization' => '',
+ 'hadoop.security.auth_to_local' => '',}
+ }
-$hdp_hadoop_mapred_site_props => {'io.sort.mb' => '',
- 'io.sort.record.percent' => '.2',
- 'io.sort.spill.percent' => '',
- 'io.sort.factor' => '100',
- 'mapred.tasktracker.tasks.sleeptime-before-sigkill' => '250',
- 'mapred.job.tracker.handler.count' => '50',
- 'mapred.system.dir' => '',
- 'mapred.job.tracker' => '',
- 'mapred.job.tracker.http.address' => '',
- 'mapred.local.dir' => '',
- 'mapreduce.cluster.administrators' => ' hadoop',
- 'mapred.reduce.parallel.copies' => '30',
- 'mapred.tasktracker.map.tasks.maximum' => '',
- 'mapred.tasktracker.reduce.tasks.maximum' => '',
- 'tasktracker.http.threads' => '50',
- 'mapred.map.tasks.speculative.execution' => 'false',
- 'mapred.reduce.tasks.speculative.execution' => 'false',
- 'mapred.reduce.slowstart.completed.maps' => '0.05',
- 'mapred.inmem.merge.threshold' => '1000',
- 'mapred.job.shuffle.merge.percent' => '0.66',
- 'mapred.job.shuffle.input.buffer.percent' => '0.7',
- 'mapred.map.output.compression.codec' => '',
- 'mapred.output.compression.type' => 'BLOCK',
- 'mapred.jobtracker.completeuserjobs.maximum' => '0',
- 'mapred.jobtracker.taskScheduler' => '',
- 'mapred.jobtracker.restart.recover' => 'false',
- 'mapred.job.reduce.input.buffer.percent' => '0.0',
- 'mapreduce.reduce.input.limit' => '10737418240',
- 'mapred.compress.map.output' => '',
- 'mapred.task.timeout' => '600000',
- 'jetty.connector' => 'org.mortbay.jetty.nio.SelectChannelConnector',
- 'mapred.task.tracker.task-controller' => '',
- 'mapred.child.root.logger' => 'INFO,TLA',
- 'mapred.child.java.opts' => '',
- 'mapred.cluster.map.memory.mb' => '',
- 'mapred.cluster.reduce.memory.mb' => '',
- 'mapred.job.map.memory.mb' => '',
- 'mapred.job.reduce.memory.mb' => '',
- 'mapred.cluster.max.map.memory.mb' => '',
- 'mapred.cluster.max.reduce.memory.mb' => '',
- 'mapred.hosts' => '',
- 'mapred.hosts.exclude' => '',
- 'mapred.max.tracker.blacklists' => '16',
- 'mapred.healthChecker.script.path' => '',
- 'mapred.healthChecker.interval' => '135000',
- 'mapred.healthChecker.script.timeout' => '60000',
- 'mapred.job.tracker.persist.jobstatus.active' => 'false',
- 'mapred.job.tracker.persist.jobstatus.hours' => '1',
- 'mapred.job.tracker.persist.jobstatus.dir' => '',
- 'mapred.jobtracker.retirejob.check' => '10000',
- 'mapred.jobtracker.retirejob.interval' => '0',
- 'mapred.job.tracker.history.completed.location' => '/mapred/history/done',
- 'mapred.task.maxvmem' => '',
- 'mapred.jobtracker.maxtasks.per.job' => '',
- 'mapreduce.fileoutputcommitter.marksuccessfuljobs' => 'false',
- 'mapred.userlog.retain.hours' => '',
- 'mapred.job.reuse.jvm.num.tasks' => '1',
- 'mapreduce.jobtracker.kerberos.principal' => '',
- 'mapreduce.tasktracker.kerberos.principal' => '',
- 'hadoop.job.history.user.location' => 'none',
- 'mapreduce.jobtracker.keytab.file' => '',
- 'mapreduce.tasktracker.keytab.file' => '',
- 'mapreduce.jobtracker.staging.root.dir' => '/user',
- 'mapreduce.tasktracker.group' => 'hadoop',
- 'mapreduce.jobtracker.split.metainfo.maxsize' => '50000000',
- 'mapreduce.history.server.embedded' => 'false',
- 'mapreduce.history.server.http.address' => '',
- 'mapreduce.jobhistory.kerberos.principal' => '',
- 'mapreduce.jobhistory.keytab.file' => '',
- 'mapred.jobtracker.blacklist.fault-timeout-window' => '180',
- 'mapred.jobtracker.blacklist.fault-bucket-width' => '15',
- 'mapred.queue.names' => 'default',}
+configgenerator::configfile::configuration {'hdp_hadoop__mapred_site':
+ filename => 'mapred-site.xml',
+ module => 'hdp-hadoop',
+ properties => {'io.sort.mb' => '',
+ 'io.sort.record.percent' => '.2',
+ 'io.sort.spill.percent' => '',
+ 'io.sort.factor' => '100',
+ 'mapred.tasktracker.tasks.sleeptime-before-sigkill' => '250',
+ 'mapred.job.tracker.handler.count' => '50',
+ 'mapred.system.dir' => '',
+ 'mapred.job.tracker' => '',
+ 'mapred.job.tracker.http.address' => '',
+ 'mapred.local.dir' => '',
+ 'mapreduce.cluster.administrators' => ' hadoop',
+ 'mapred.reduce.parallel.copies' => '30',
+ 'mapred.tasktracker.map.tasks.maximum' => '',
+ 'mapred.tasktracker.reduce.tasks.maximum' => '',
+ 'tasktracker.http.threads' => '50',
+ 'mapred.map.tasks.speculative.execution' => 'false',
+ 'mapred.reduce.tasks.speculative.execution' => 'false',
+ 'mapred.reduce.slowstart.completed.maps' => '0.05',
+ 'mapred.inmem.merge.threshold' => '1000',
+ 'mapred.job.shuffle.merge.percent' => '0.66',
+ 'mapred.job.shuffle.input.buffer.percent' => '0.7',
+ 'mapred.map.output.compression.codec' => '',
+ 'mapred.output.compression.type' => 'BLOCK',
+ 'mapred.jobtracker.completeuserjobs.maximum' => '0',
+ 'mapred.jobtracker.taskScheduler' => '',
+ 'mapred.jobtracker.restart.recover' => 'false',
+ 'mapred.job.reduce.input.buffer.percent' => '0.0',
+ 'mapreduce.reduce.input.limit' => '10737418240',
+ 'mapred.compress.map.output' => '',
+ 'mapred.task.timeout' => '600000',
+ 'jetty.connector' => 'org.mortbay.jetty.nio.SelectChannelConnector',
+ 'mapred.task.tracker.task-controller' => '',
+ 'mapred.child.root.logger' => 'INFO,TLA',
+ 'mapred.child.java.opts' => '',
+ 'mapred.cluster.map.memory.mb' => '',
+ 'mapred.cluster.reduce.memory.mb' => '',
+ 'mapred.job.map.memory.mb' => '',
+ 'mapred.job.reduce.memory.mb' => '',
+ 'mapred.cluster.max.map.memory.mb' => '',
+ 'mapred.cluster.max.reduce.memory.mb' => '',
+ 'mapred.hosts' => '',
+ 'mapred.hosts.exclude' => '',
+ 'mapred.max.tracker.blacklists' => '16',
+ 'mapred.healthChecker.script.path' => '',
+ 'mapred.healthChecker.interval' => '135000',
+ 'mapred.healthChecker.script.timeout' => '60000',
+ 'mapred.job.tracker.persist.jobstatus.active' => 'false',
+ 'mapred.job.tracker.persist.jobstatus.hours' => '1',
+ 'mapred.job.tracker.persist.jobstatus.dir' => '',
+ 'mapred.jobtracker.retirejob.check' => '10000',
+ 'mapred.jobtracker.retirejob.interval' => '0',
+ 'mapred.job.tracker.history.completed.location' => '/mapred/history/done',
+ 'mapred.task.maxvmem' => '',
+ 'mapred.jobtracker.maxtasks.per.job' => '',
+ 'mapreduce.fileoutputcommitter.marksuccessfuljobs' => 'false',
+ 'mapred.userlog.retain.hours' => '',
+ 'mapred.job.reuse.jvm.num.tasks' => '1',
+ 'mapreduce.jobtracker.kerberos.principal' => '',
+ 'mapreduce.tasktracker.kerberos.principal' => '',
+ 'hadoop.job.history.user.location' => 'none',
+ 'mapreduce.jobtracker.keytab.file' => '',
+ 'mapreduce.tasktracker.keytab.file' => '',
+ 'mapreduce.jobtracker.staging.root.dir' => '/user',
+ 'mapreduce.tasktracker.group' => 'hadoop',
+ 'mapreduce.jobtracker.split.metainfo.maxsize' => '50000000',
+ 'mapreduce.history.server.embedded' => 'false',
+ 'mapreduce.history.server.http.address' => '',
+ 'mapreduce.jobhistory.kerberos.principal' => '',
+ 'mapreduce.jobhistory.keytab.file' => '',
+ 'mapred.jobtracker.blacklist.fault-timeout-window' => '180',
+ 'mapred.jobtracker.blacklist.fault-bucket-width' => '15',
+ 'mapred.queue.names' => 'default',}
+ }
-$hdp_hadoop_capacity_scheduler_props => {'mapred.capacity-scheduler.queue.default.capacity' => '100',
- 'mapred.capacity-scheduler.queue.default.supports-priority' => 'false',
- 'mapred.capacity-scheduler.queue.default.minimum-user-limit-percent' => '100',
- 'mapred.capacity-scheduler.queue.default.maximum-initialized-jobs-per-user' => '25',}
+configgenerator::configfile::configuration {'hdp_hadoop__capacity_scheduler':
+ filename => 'capacity-scheduler.xml',
+ module => 'hdp-hadoop',
+ properties => {'mapred.capacity-scheduler.queue.default.capacity' => '100',
+ 'mapred.capacity-scheduler.queue.default.supports-priority' => 'false',
+ 'mapred.capacity-scheduler.queue.default.minimum-user-limit-percent' => '100',
+ 'mapred.capacity-scheduler.queue.default.maximum-initialized-jobs-per-user' => '25',}
+ }
-$hdp_hadoop_hdfs_site_props => {'dfs.name.dir' => '',
- 'dfs.support.append' => '',
- 'dfs.webhdfs.enabled' => '',
- 'dfs.datanode.failed.volumes.tolerated' => '',
- 'dfs.block.local-path-access.user' => '',
- 'dfs.data.dir' => '',
- 'dfs.hosts.exclude' => '',
- 'dfs.hosts' => '',
- 'dfs.replication.max' => '50',
- 'dfs.replication' => '',
- 'dfs.heartbeat.interval' => '3',
- 'dfs.safemode.threshold.pct' => '1.0f',
- 'dfs.balance.bandwidthPerSec' => '6250000',
- 'dfs.datanode.address' => '',
- 'dfs.datanode.http.address' => '',
- 'dfs.block.size' => '134217728',
- 'dfs.http.address' => '',
- 'dfs.datanode.du.reserved' => '',
- 'dfs.datanode.ipc.address' => '0.0.0.0:8010',
- 'dfs.blockreport.initialDelay' => '120',
- 'dfs.datanode.du.pct' => '0.85f',
- 'dfs.namenode.handler.count' => '40',
- 'dfs.datanode.max.xcievers' => '1024',
- 'dfs.umaskmode' => '077',
- 'dfs.web.ugi' => 'gopher,gopher',
- 'dfs.permissions' => 'true',
- 'dfs.permissions.supergroup' => 'hdfs',
- 'dfs.namenode.handler.count' => '100',
- 'ipc.server.max.response.size' => '5242880',
- 'dfs.block.access.token.enable' => 'true',
- 'dfs.namenode.kerberos.principal' => '',
- 'dfs.secondary.namenode.kerberos.principal' => '',
- 'dfs.namenode.kerberos.https.principal' => '',
- 'dfs.secondary.namenode.kerberos.https.principal' => '',
- 'dfs.secondary.http.address' => '',
- 'dfs.secondary.https.port' => '50490',
- 'dfs.web.authentication.kerberos.principal' => '',
- 'dfs.web.authentication.kerberos.keytab' => '',
- 'dfs.datanode.kerberos.principal' => '',
- 'dfs.namenode.keytab.file' => '',
- 'dfs.secondary.namenode.keytab.file' => '',
- 'dfs.datanode.keytab.file' => '',
- 'dfs.https.port' => '50470',
- 'dfs.https.address' => '',
- 'dfs.datanode.data.dir.perm' => '',
- 'dfs.access.time.precision' => '0',
- 'dfs.cluster.administrators' => ' hdfs',
- 'ipc.server.read.threadpool.size' => '5',
- 'dfs.namenode.kerberos.internal.spnego.principal' => '',
- 'dfs.secondary.namenode.kerberos.internal.spnego.principal' => '',}
+configgenerator::configfile::configuration {'hdp_hadoop__hdfs_site':
+ filename => 'hdfs-site.xml',
+ module => 'hdp-hadoop',
+ properties => {'dfs.name.dir' => '',
+ 'dfs.support.append' => '',
+ 'dfs.webhdfs.enabled' => '',
+ 'dfs.datanode.failed.volumes.tolerated' => '',
+ 'dfs.block.local-path-access.user' => '',
+ 'dfs.data.dir' => '',
+ 'dfs.hosts.exclude' => '',
+ 'dfs.hosts' => '',
+ 'dfs.replication.max' => '50',
+ 'dfs.replication' => '',
+ 'dfs.heartbeat.interval' => '3',
+ 'dfs.safemode.threshold.pct' => '1.0f',
+ 'dfs.balance.bandwidthPerSec' => '6250000',
+ 'dfs.datanode.address' => '',
+ 'dfs.datanode.http.address' => '',
+ 'dfs.block.size' => '134217728',
+ 'dfs.http.address' => '',
+ 'dfs.datanode.du.reserved' => '',
+ 'dfs.datanode.ipc.address' => '0.0.0.0:8010',
+ 'dfs.blockreport.initialDelay' => '120',
+ 'dfs.datanode.du.pct' => '0.85f',
+ 'dfs.namenode.handler.count' => '40',
+ 'dfs.datanode.max.xcievers' => '1024',
+ 'dfs.umaskmode' => '077',
+ 'dfs.web.ugi' => 'gopher,gopher',
+ 'dfs.permissions' => 'true',
+ 'dfs.permissions.supergroup' => 'hdfs',
+ 'dfs.namenode.handler.count' => '100',
+ 'ipc.server.max.response.size' => '5242880',
+ 'dfs.block.access.token.enable' => 'true',
+ 'dfs.namenode.kerberos.principal' => '',
+ 'dfs.secondary.namenode.kerberos.principal' => '',
+ 'dfs.namenode.kerberos.https.principal' => '',
+ 'dfs.secondary.namenode.kerberos.https.principal' => '',
+ 'dfs.secondary.http.address' => '',
+ 'dfs.secondary.https.port' => '50490',
+ 'dfs.web.authentication.kerberos.principal' => '',
+ 'dfs.web.authentication.kerberos.keytab' => '',
+ 'dfs.datanode.kerberos.principal' => '',
+ 'dfs.namenode.keytab.file' => '',
+ 'dfs.secondary.namenode.keytab.file' => '',
+ 'dfs.datanode.keytab.file' => '',
+ 'dfs.https.port' => '50470',
+ 'dfs.https.address' => '',
+ 'dfs.datanode.data.dir.perm' => '',
+ 'dfs.access.time.precision' => '0',
+ 'dfs.cluster.administrators' => ' hdfs',
+ 'ipc.server.read.threadpool.size' => '5',
+ 'dfs.namenode.kerberos.internal.spnego.principal' => '',
+ 'dfs.secondary.namenode.kerberos.internal.spnego.principal' => '',}
+ }
-$hdp_hcat_old_hive_site_props => {'hive.metastore.local' => 'false',
- 'javax.jdo.option.ConnectionURL' => '',
- 'javax.jdo.option.ConnectionDriverName' => 'com.mysql.jdbc.Driver',
- 'javax.jdo.option.ConnectionUserName' => '',
- 'javax.jdo.option.ConnectionPassword' => '',
- 'hive.metastore.warehouse.dir' => '/apps/hive/warehouse',
- 'hive.metastore.sasl.enabled' => '',
- 'hive.metastore.kerberos.keytab.file' => '',
- 'hive.metastore.kerberos.principal' => '',
- 'hive.metastore.cache.pinobjtypes' => 'Table,Database,Type,FieldSchema,Order',
- 'hive.metastore.uris' => '',
- 'hive.semantic.analyzer.factory.impl' => 'org.apache.hcatalog.cli.HCatSemanticAnalyzerFactory',
- 'hadoop.clientside.fs.operations',
- 'hive.metastore.client.socket.timeout' => '60',
- 'hive.metastore.execute.setugi' => 'true',}
+configgenerator::configfile::configuration {'hdp_hcat_old__hive_site':
+ filename => 'hive-site.xml',
+ module => 'hdp-hcat-old',
+ properties => {'hive.metastore.local' => 'false',
+ 'javax.jdo.option.ConnectionURL' => '',
+ 'javax.jdo.option.ConnectionDriverName' => 'com.mysql.jdbc.Driver',
+ 'javax.jdo.option.ConnectionUserName' => '',
+ 'javax.jdo.option.ConnectionPassword' => '',
+ 'hive.metastore.warehouse.dir' => '/apps/hive/warehouse',
+ 'hive.metastore.sasl.enabled' => '',
+ 'hive.metastore.kerberos.keytab.file' => '',
+ 'hive.metastore.kerberos.principal' => '',
+ 'hive.metastore.cache.pinobjtypes' => 'Table,Database,Type,FieldSchema,Order',
+ 'hive.metastore.uris' => '',
+ 'hive.semantic.analyzer.factory.impl' => 'org.apache.hcatalog.cli.HCatSemanticAnalyzerFactory',
+ 'hadoop.clientside.fs.operations',
+ 'hive.metastore.client.socket.timeout' => '60',
+ 'hive.metastore.execute.setugi' => 'true',}
+ }
-$hdp_hive_hive_site_props => {'hive.metastore.local' => 'false',
- 'javax.jdo.option.ConnectionURL' => '',
- 'javax.jdo.option.ConnectionDriverName' => 'com.mysql.jdbc.Driver',
- 'javax.jdo.option.ConnectionUserName' => '',
- 'javax.jdo.option.ConnectionPassword' => '',
- 'hive.metastore.warehouse.dir' => '/apps/hive/warehouse',
- 'hive.metastore.sasl.enabled' => '',
- 'hive.metastore.kerberos.keytab.file' => '',
- 'hive.metastore.kerberos.principal' => '',
- 'hive.metastore.cache.pinobjtypes' => 'Table,Database,Type,FieldSchema,Order',
- 'hive.metastore.uris' => '',
- 'hive.semantic.analyzer.factory.impl' => 'org.apache.hivealog.cli.HCatSemanticAnalyzerFactory',
- 'hadoop.clientside.fs.operations' => 'true',
- 'hive.metastore.client.socket.timeout' => '60',
- 'hive.metastore.execute.setugi' => 'true',
- 'hive.security.authorization.enabled' => 'true',
- 'hive.security.authorization.manager' => 'org.apache.hcatalog.security.HdfsAuthorizationProvider',}
+configgenerator::configfile::configuration {'hdp_hive__hive_site':
+ filename => 'hive-site.xml',
+ module => 'hdp-hive',
+ properties => {'hive.metastore.local' => 'false',
+ 'javax.jdo.option.ConnectionURL' => '',
+ 'javax.jdo.option.ConnectionDriverName' => 'com.mysql.jdbc.Driver',
+ 'javax.jdo.option.ConnectionUserName' => '',
+ 'javax.jdo.option.ConnectionPassword' => '',
+ 'hive.metastore.warehouse.dir' => '/apps/hive/warehouse',
+ 'hive.metastore.sasl.enabled' => '',
+ 'hive.metastore.kerberos.keytab.file' => '',
+ 'hive.metastore.kerberos.principal' => '',
+ 'hive.metastore.cache.pinobjtypes' => 'Table,Database,Type,FieldSchema,Order',
+ 'hive.metastore.uris' => '',
+ 'hive.semantic.analyzer.factory.impl' => 'org.apache.hivealog.cli.HCatSemanticAnalyzerFactory',
+ 'hadoop.clientside.fs.operations' => 'true',
+ 'hive.metastore.client.socket.timeout' => '60',
+ 'hive.metastore.execute.setugi' => 'true',
+ 'hive.security.authorization.enabled' => 'true',
+ 'hive.security.authorization.manager' => 'org.apache.hcatalog.security.HdfsAuthorizationProvider',}
+ }
-$hdp_oozie_oozie_site_props => {'oozie.base.url' => '',
- 'oozie.system.id' => '',
- 'oozie.systemmode' => 'NORMAL',
- 'oozie.service.AuthorizationService.security.enabled' => 'true',
- 'oozie.service.PurgeService.older.than' => '30',
- 'oozie.service.PurgeService.purge.interval' => '3600',
- 'oozie.service.CallableQueueService.queue.size' => '1000',
- 'oozie.service.CallableQueueService.threads' => '10',
- 'oozie.service.CallableQueueService.callable.concurrency' => '3',
- 'oozie.service.coord.normal.default.timeout' => '120',
- 'oozie.db.schema.name' => 'oozie',
- 'oozie.service.StoreService.create.db.schema' => 'true',
- 'oozie.service.StoreService.jdbc.driver' => 'org.apache.derby.jdbc.EmbeddedDriver',
- 'oozie.service.StoreService.jdbc.url' => '',
- 'oozie.service.StoreService.jdbc.username' => 'sa',
- 'oozie.service.StoreService.jdbc.password' => ' ',
- 'oozie.service.StoreService.pool.max.active.conn' => '10',
- 'oozie.service.HadoopAccessorService.kerberos.enabled' => '',
- 'local.realm' => '',
- 'oozie.service.HadoopAccessorService.keytab.file' => '',
- 'oozie.service.HadoopAccessorService.kerberos.principal' => '',
- 'oozie.service.HadoopAccessorService.jobTracker.whitelist' => ' ',
- 'oozie.authentication.type' => '',
- 'oozie.authentication.kerberos.principal' => '',
- 'oozie.authentication.kerberos.keytab' => '',
- 'oozie.service.HadoopAccessorService.nameNode.whitelist' => ' ',
- 'oozie.service.WorkflowAppService.system.libpath' => '',
- 'use.system.libpath.for.mapreduce.and.pig.jobs' => 'false',
- 'oozie.authentication.kerberos.name.rules' => '',}
+configgenerator::configfile::configuration {'hdp_oozie__oozie_site':
+ filename => 'oozie-site.xml',
+ module => 'hdp-oozie',
+ properties => {'oozie.base.url' => '',
+ 'oozie.system.id' => '',
+ 'oozie.systemmode' => 'NORMAL',
+ 'oozie.service.AuthorizationService.security.enabled' => 'true',
+ 'oozie.service.PurgeService.older.than' => '30',
+ 'oozie.service.PurgeService.purge.interval' => '3600',
+ 'oozie.service.CallableQueueService.queue.size' => '1000',
+ 'oozie.service.CallableQueueService.threads' => '10',
+ 'oozie.service.CallableQueueService.callable.concurrency' => '3',
+ 'oozie.service.coord.normal.default.timeout' => '120',
+ 'oozie.db.schema.name' => 'oozie',
+ 'oozie.service.StoreService.create.db.schema' => 'true',
+ 'oozie.service.StoreService.jdbc.driver' => 'org.apache.derby.jdbc.EmbeddedDriver',
+ 'oozie.service.StoreService.jdbc.url' => '',
+ 'oozie.service.StoreService.jdbc.username' => 'sa',
+ 'oozie.service.StoreService.jdbc.password' => ' ',
+ 'oozie.service.StoreService.pool.max.active.conn' => '10',
+ 'oozie.service.HadoopAccessorService.kerberos.enabled' => '',
+ 'local.realm' => '',
+ 'oozie.service.HadoopAccessorService.keytab.file' => '',
+ 'oozie.service.HadoopAccessorService.kerberos.principal' => '',
+ 'oozie.service.HadoopAccessorService.jobTracker.whitelist' => ' ',
+ 'oozie.authentication.type' => '',
+ 'oozie.authentication.kerberos.principal' => '',
+ 'oozie.authentication.kerberos.keytab' => '',
+ 'oozie.service.HadoopAccessorService.nameNode.whitelist' => ' ',
+ 'oozie.service.WorkflowAppService.system.libpath' => '',
+ 'use.system.libpath.for.mapreduce.and.pig.jobs' => 'false',
+ 'oozie.authentication.kerberos.name.rules' => '',}
+ }
-$hdp_templeton_templeton_site_props => {'templeton.port' => '50111',
- 'templeton.hadoop.conf.dir' => '',
- 'templeton.jar' => '',
- 'templeton.libjars' => '',
- 'templeton.hadoop' => '',
- 'templeton.pig.archive' => '',
- 'templeton.pig.path' => '',
- 'templeton.hcat' => '',
- 'templeton.hive.archive' => '',
- 'templeton.hive.path' => '',
- 'templeton.hive.properties' => '',
- 'templeton.zookeeper.hosts' => '',
- 'templeton.storage.class' => 'org.apache.hcatalog.templeton.tool.ZooKeeperStorage',
- 'templeton.override.enabled' => 'false',
- 'templeton.streaming.jar' => 'hdfs:///apps/templeton/hadoop-streaming.jar',
- 'templeton.kerberos.principal' => '',
- 'templeton.kerberos.keytab' => '',
- 'templeton.kerberos.secret' => 'secret',}
+configgenerator::configfile::configuration {'hdp_templeton__templeton_site':
+ filename => 'templeton-site.xml',
+ module => 'hdp-templeton',
+ configuration => {'templeton.port' => '50111',
+ 'templeton.hadoop.conf.dir' => '',
+ 'templeton.jar' => '',
+ 'templeton.libjars' => '',
+ 'templeton.hadoop' => '',
+ 'templeton.pig.archive' => '',
+ 'templeton.pig.path' => '',
+ 'templeton.hcat' => '',
+ 'templeton.hive.archive' => '',
+ 'templeton.hive.path' => '',
+ 'templeton.hive.properties' => '',
+ 'templeton.zookeeper.hosts' => '',
+ 'templeton.storage.class' => 'org.apache.hcatalog.templeton.tool.ZooKeeperStorage',
+ 'templeton.override.enabled' => 'false',
+ 'templeton.streaming.jar' => 'hdfs:///apps/templeton/hadoop-streaming.jar',
+ 'templeton.kerberos.principal' => '',
+ 'templeton.kerberos.keytab' => '',
+ 'templeton.kerberos.secret' => 'secret',}
+ }
class manifestloader () {
file { '/etc/puppet/agent/modules.tgz':
diff --git a/ambari-agent/src/main/puppet/modules/configgenerator/manifests/configfile.pp b/ambari-agent/src/main/puppet/modules/configgenerator/manifests/configfile.pp index 9fb7b55028..8df71b6b93 100644 --- a/ambari-agent/src/main/puppet/modules/configgenerator/manifests/configfile.pp +++ b/ambari-agent/src/main/puppet/modules/configgenerator/manifests/configfile.pp @@ -43,9 +43,9 @@ # Note: Set correct $modulespath in the configgenerator (or pass it as parameter)
#
-define configgenerator::configfile ($configname=$title, $modulespath='/etc/puppet/modules', $module, $properties) {
+define configgenerator::configfile ($modulespath='/etc/puppet/modules', $configuration) {
$configcontent = inline_template('<configuration>
- <% properties.each do |key,value| -%>
+ <% configuration.props.each do |key,value| -%>
<property>
<name><%=key %></name>
<value><%=value %></value>
@@ -56,6 +56,12 @@ define configgenerator::configfile ($configname=$title, $modulespath='/etc/puppe file {'config':
ensure => present,
content => $configcontent,
- path => "${modulespath}/${module}/templates/${configname}",
+ path => "${modulespath}/${configuration::modulename}/templates/${configuration::configfile}",
}
}
+
+define configgenerator::configfile::configuration($filename, $module, $nameproperties) {
+ $configfile = $filename
+ $modulename = $module
+ $props = $properties
+}
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/init.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/init.pp index 11de72d870..1bd89dd531 100644 --- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/init.pp +++ b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/init.pp @@ -41,34 +41,28 @@ class hdp-hadoop::initialize() #Configs generation
include manifestloader
- configgenerator::configfile{'hdfs-site.xml':
- module => 'hdp-hadoop',
- properties => $manifestloader::hdp_hadoop_hdfs_site_props
+ configgenerator::configfile{'mapred_queue_acls_xml':
+ configuration => $manifestloader::Hdp_hadoop__mapred_queue_acls
}
-
- configgenerator::configfile{'capacity-scheduler.xml':
- module => 'hdp-hadoop',
- properties => $manifestloader::hdp_hadoop_capacity_scheduler_props
+
+ configgenerator::configfile{'hadoop_policy_xml':
+ configuration => $manifestloader::Hdp_hadoop__hadoop_policy
}
-
- configgenerator::configfile{'mapred-site.xml':
- module => 'hdp-hadoop',
- properties => $manifestloader::hdp_hadoop_mapred_site_props
+
+ configgenerator::configfile{'core_site_xml':
+ configuration => $manifestloader::Hdp_hadoop__core_site
}
-
- configgenerator::configfile{'core-site.xml':
- module => 'hdp-hadoop',
- properties => $manifestloader::hdp_hadoop_core_site_props
+
+ configgenerator::configfile{'mapred_site_xml':
+ configuration => $manifestloader::Hdp_hadoop__mapred_site
}
-
- configgenerator::configfile{'hadoop-policy.xml':
- module => 'hdp-hadoop',
- properties => $manifestloader::hdp_hadoop_policy_props
+
+ configgenerator::configfile{'capacity_scheduler_xml':
+ configuration => $manifestloader::Hdp_hadoop__capacity_scheduler
}
-
- configgenerator::configfile{'mapred-queue-acls.xml.erb':
- module => 'hdp-hadoop',
- properties => $manifestloader::hdp_hadoop_mapred_queue_acls_props
+
+ configgenerator::configfile{'hdfs_site_xml':
+ configuration => $manifestloader::Hdp_hadoop__hdfs_site
}
}
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hcat-old/manifests/init.pp b/ambari-agent/src/main/puppet/modules/hdp-hcat-old/manifests/init.pp index 0eda4b0b89..9a6560bee9 100644 --- a/ambari-agent/src/main/puppet/modules/hdp-hcat-old/manifests/init.pp +++ b/ambari-agent/src/main/puppet/modules/hdp-hcat-old/manifests/init.pp @@ -27,9 +27,8 @@ class hdp-hcat( #Configs generation
- configgenerator::configfile{'hive-site.xml':
- module => 'hdp-hcat-old',
- properties => $manifestloader::hdp_hcat_old_hive_site_props
+ configgenerator::configfile{'hive_site_xml':
+ configuration => $manifestloader::Hdp_hcat_old__hive_site
}
$hcat_user = $hdp::params::hcat_user
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/init.pp b/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/init.pp index 77d8fe875d..772a6f6cec 100644 --- a/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/init.pp +++ b/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/init.pp @@ -29,9 +29,8 @@ class hdp-hive( $hive_user = $hdp-hive::params::hive_user
$hive_config_dir = $hdp-hive::params::hive_conf_dir
- configgenerator::configfile{'hive-site.xml.erb':
- module => 'hdp-hive',
- properties => $manifestloader::hdp_hive_hive_site_props
+ configgenerator::configfile{'hive_site_xml':
+ configuration => $manifestloader::Hdp_hive__hive_site
}
anchor { 'hdp-hive::begin': }
diff --git a/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/init.pp b/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/init.pp index 44d019af19..c8406623bd 100644 --- a/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/init.pp +++ b/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/init.pp @@ -27,12 +27,11 @@ class hdp-oozie( include hdp-oozie::params
include manifestloader
- configgenerator::configfile{'oozie-site.xml':
- module => 'hdp-oozie',
- properties => $manifestloader::hdp_oozie_oozie_site_props
+ configgenerator::configfile{'oozie_site_xml':
+ configuration => $manifestloader::Hdp_oozie__oozie_site
}
- $oozie_user = $hdp-oozie::params::oozie_user
+ $oozie_user = $hdp-oozie::params::oozie_user
$oozie_config_dir = $hdp-oozie::params::conf_dir
if ($service_state == 'uninstalled') {
diff --git a/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/init.pp b/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/init.pp index 0bf1739b59..b8df72bb43 100644 --- a/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/init.pp +++ b/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/init.pp @@ -25,9 +25,8 @@ class hdp-templeton( {
include manifestloader
- configgenerator::configfile{'templeton-site.xml:
- module => 'hdp-templeton',
- properties => $manifestloader::hdp_templeton_templeton_site_props
+ configgenerator::configfile{'templeton_site_xml':
+ configuration => $manifestloader::Hdp_templeton__templeton_site
}
include hdp-templeton::params
diff --git a/ambari-agent/src/main/python/ambari_agent/ActionQueue.py b/ambari-agent/src/main/python/ambari_agent/ActionQueue.py index 231fa82474..a50c66e7c4 100644 --- a/ambari-agent/src/main/python/ambari_agent/ActionQueue.py +++ b/ambari-agent/src/main/python/ambari_agent/ActionQueue.py @@ -28,6 +28,7 @@ from shell import shellRunner from FileUtil import writeFile, createStructure, deleteStructure, getFilePath, appendToFile from shell import shellRunner import json +import pprint import os import time import subprocess @@ -37,14 +38,11 @@ logger = logging.getLogger() installScriptHash = -1 class ActionQueue(threading.Thread): - global q, r, clusterId, clusterDefinitionRevision - q = Queue.Queue() - r = Queue.Queue() - clusterId = 'unknown' - clusterDefinitionRevision = 0 - + global commandQueue, resultQueue + commandQueue = Queue.Queue() + resultQueue = Queue.Queue() + def __init__(self, config): - global clusterId, clusterDefinitionRevision super(ActionQueue, self).__init__() #threading.Thread.__init__(self) self.config = config @@ -59,219 +57,80 @@ class ActionQueue(threading.Thread): def stopped(self): return self._stop.isSet() - #For unittest def getshellinstance(self): + """ For Testing purpose only.""" return self.sh - def put(self, response): - if 'actions' in response: - actions = response['actions'] - logger.debug(actions) - # for the servers, take a diff of what's running, and what the controller - # asked the agent to start. Kill all those servers that the controller - # didn't ask us to start - sh = shellRunner() - runningServers = sh.getServerTracker() - - # get the list of servers the controller wants running - serversToRun = {} - for action in actions: - if action['kind'] == 'START_ACTION': - processKey = sh.getServerKey(action['clusterId'],action['clusterDefinitionRevision'], - action['component'], action['role']) - serversToRun[processKey] = 1 - - # create stop actions for the servers that the controller wants stopped - for server in runningServers.keys(): - if server not in serversToRun: - sh.stopProcess(server) - # now put all the actions in the queue. The ordering is important (we stopped - # all unneeded servers first) - for action in actions: - q.put(action) + def put(self, command): + logger.info("The command from the server is \n" + pprint.pformat(command)) + commandQueue.put(command) + pass def run(self): - global clusterId, clusterDefinitionRevision + result = [] while not self.stopped(): - while not q.empty(): - action = q.get() - switches = { - 'START_ACTION' : self.startAction, - 'RUN_ACTION' : self.runAction, - 'CREATE_STRUCTURE_ACTION' : self.createStructureAction, - 'DELETE_STRUCTURE_ACTION' : self.deleteStructureAction, - 'WRITE_FILE_ACTION' : self.writeFileAction, - 'INSTALL_AND_CONFIG_ACTION' : self.installAndConfigAction, - 'NO_OP_ACTION' : self.noOpAction - } - - exitCode = 1 - retryCount = 1 - while (exitCode != 0 and retryCount <= self.maxRetries): - result={} - try: - #pass a copy of action since we don't want anything to change in the - #action dict - actionCopy = copy.copy(action) - result = switches.get(action['kind'], self.unknownAction)(actionCopy) - if ('commandResult' in result): - commandResult = result['commandResult'] - exitCode = commandResult['exitCode'] - if (exitCode == 0): - break - else: - logger.warn(str(action) + " exited with code " + str(exitCode)) - else: - #Really, no commandResult? Is this possible? - #TODO: check - exitCode = 0 - break - except Exception, err: - traceback.print_exc() - logger.warn(err) - if ('commandResult' in result): - commandResult = result['commandResult'] - if ('exitCode' in commandResult): - exitCode = commandResult['exitCode'] - else: - exitCode = 1 - else: - result['commandResult'] = {'exitCode': 1, 'output':"", 'error':""} - - #retry in some time - logger.warn("Retrying %s in %d seconds" % (str(action),self.sleepInterval)) - time.sleep(self.sleepInterval) - retryCount += 1 + while not commandQueue.empty(): + command = commandQueue.get() + try: + #pass a copy of action since we don't want anything to change in the + #action dict + commandCopy = copy.copy(command) + result = self.executeCommand(commandCopy) - if (exitCode != 0): - result['exitCode']=exitCode - result['retryActionCount'] = retryCount - 1 - else: - result['retryActionCount'] = retryCount - # Update the result - r.put(result) + except Exception, err: + traceback.print_exc() + logger.warn(err) + pass + + for entry in result: + resultQueue.put(entry) + pass if not self.stopped(): time.sleep(5) # Store action result to agent response queue def result(self): result = [] - while not r.empty(): - result.append(r.get()) + while not resultQueue.empty(): + result.append(resultQueue.get()) return result - # Generate default action response - def genResult(self, action): - result={} - if (action['kind'] == 'INSTALL_AND_CONFIG_ACTION' or action['kind'] == 'NO_OP_ACTION'): - result = { - 'id' : action['id'], - 'kind' : action['kind'], - } - else: - result = { - 'id' : action['id'], - 'clusterId' : action['clusterId'], - 'kind' : action['kind'], - 'clusterDefinitionRevision' : action['clusterDefinitionRevision'], - 'componentName' : action['component'], - 'role' : action['role'] - } + def registerCommand(self, command): + return {} + + def statusCommand(self, command): + return {} + + def executeCommand(self, command): + logger.info("Executing command \n" + pprint.pformat(command)) + clusterName = command['clusterName'] + commandId = command['commandId'] + hostname = command['hostname'] + params = command['params'] + clusterHostInfo = command['clusterHostInfo'] + roleCommands = command['roleCommands'] + configurations = command['configurations'] + result = [] + for roleCommand in roleCommands: + # assume some puppet pluing to run these commands + roleResult = {'role' : roleCommand['role'], + 'actionId' : commandId, + 'stdout' : "DONE", + 'stderr' : "DONE", + 'exitCode' : 0, + 'status' : "COMPLETED"} + result.append(roleResult) + pass return result - # Run start action, start a server process and - # track the liveness of the children process - def startAction(self, action): - result = self.genResult(action) - return self.sh.startProcess(action['clusterId'], - action['clusterDefinitionRevision'], - action['component'], - action['role'], - action['command'], - action['user'], result) - - # Write file action - def writeFileAction(self, action, fileName=""): - result = self.genResult(action) - return writeFile(action, result, fileName) - - # get the install file - def getInstallFilename(self,id): - return "ambari-install-file-"+id - - # Install and configure action - def installAndConfigAction(self, action): - global installScriptHash - r=self.genResult(action) - w = self.writeFileAction(action,self.getInstallFilename(action['id'])) - commandResult = {} - if w['exitCode']!=0: - commandResult['error'] = w['stderr'] - commandResult['exitCode'] = w['exitCode'] - r['commandResult'] = commandResult - return r - - if 'command' not in action: - # this is hardcoded to do puppet specific stuff for now - # append the content of the puppet file to the file written above - filepath = getFilePath(action,self.getInstallFilename(action['id'])) - logger.info("File path for puppet top level script: " + filepath) - p = self.sh.run(['/bin/cat',AmbariConfig.config.get('puppet','driver')]) - if p['exitCode']!=0: - commandResult['error'] = p['error'] - commandResult['exitCode'] = p['exitCode'] - r['commandResult'] = commandResult - return r - logger.debug("The contents of the static file " + p['output']) - appendToFile(p['output'],filepath) - arr = [AmbariConfig.config.get('puppet','commandpath') , filepath] - logger.debug(arr) - action['command'] = arr - logger.debug(action['command']) - commandResult = self.sh.run(action['command']) - logger.debug("PUPPET COMMAND OUTPUT: " + commandResult['output']) - logger.debug("PUPPET COMMAND ERROR: " + commandResult['error']) - if commandResult['exitCode'] == 0: - installScriptHash = action['id'] - r['commandResult'] = commandResult - return r - - # Run command action - def runAction(self, action): - result = self.genResult(action) - return self.sh.runAction(action['clusterId'], - action['component'], - action['role'], - action['user'], - action['command'], - action['cleanUpCommand'], result) - - # Create directory structure for cluster - def createStructureAction(self, action): - result = self.genResult(action) - result['exitCode'] = 0 - return createStructure(action, result) - - # Delete directory structure for cluster - def deleteStructureAction(self, action): - result = self.genResult(action) - result['exitCode'] = 0 - return deleteStructure(action, result) - - def noOpAction(self, action): - r = {'id' : action['id']} - return r + def noOpCommand(self, command): + result = {'commandId' : command['Id']} + return result - # Handle unknown action def unknownAction(self, action): logger.error('Unknown action: %s' % action['id']) result = { 'id': action['id'] } return result - # Discover agent idle state def isIdle(self): - return q.empty() - - # Get the hash of the script currently used for install/config - def getInstallScriptHash(self): - return installScriptHash + return commandQueue.empty() diff --git a/ambari-agent/src/main/python/ambari_agent/Controller.py b/ambari-agent/src/main/python/ambari_agent/Controller.py index 445399cc73..ac643a0b89 100644 --- a/ambari-agent/src/main/python/ambari_agent/Controller.py +++ b/ambari-agent/src/main/python/ambari_agent/Controller.py @@ -51,11 +51,6 @@ class Controller(threading.Thread): self.safeMode = True self.credential = None self.config = config - #Disabled security until we have fix for AMBARI-157 - #if(config.get('controller', 'user')!=None and config.get('controller', 'password')!=None): - # self.credential = { 'user' : config.get('controller', 'user'), - # 'password' : config.get('controller', 'password') - # } self.hostname = socket.gethostname() self.registerUrl = config.get('server', 'secured_url') + \ '/agent/register/' + self.hostname @@ -82,6 +77,7 @@ class Controller(threading.Thread): while registered == False: try: data = json.dumps(self.register.build(id)) + logger.info("Registering with the server " + pprint.pformat(data)) req = urllib2.Request(self.registerUrl, data, {'Content-Type': 'application/json'}) stream = security.secured_url_open(req) @@ -99,6 +95,23 @@ class Controller(threading.Thread): pass return ret + + def addToQueue(self, commands): + """Add to the queue for running the commands """ + """ Put the required actions into the Queue """ + """ Verify if the action is to reboot or not """ + if not commands: + logger.info("No commands from the server.") + else: + """Only add to the queue if not empty list """ + for command in commands: + logger.info("Adding command to the action queue: \n" + + pprint.pformat(command)) + self.actionQueue.put(command) + pass + pass + pass + def heartbeatWithServer(self): retry = False #TODO make sure the response id is monotonically increasing @@ -109,29 +122,33 @@ class Controller(threading.Thread): data = json.dumps(self.heartbeat.build(id)) pass logger.info("Sending HeartBeat " + pprint.pformat(data)) - req = urllib2.Request(self.heartbeatUrl, data, {'Content-Type': 'application/json'}) - - logger.info(data) - + req = urllib2.Request(self.heartbeatUrl, data, {'Content-Type': + 'application/json'}) f = security.secured_url_open(req) response = f.read() f.close() - data = json.loads(response) - id=int(data['responseId']) - logger.info("HeartBeat Response from Server: \n" + pprint.pformat(data)) + response = json.loads(response) + id=int(response['responseId']) + + if 'executionCommands' in response.keys(): + self.addToQueue(response['executionCommands']) + pass + else: + logger.info("No commands sent from the Server.") + pass retry=False except Exception, err: retry=True if "code" in err: logger.error(err.code) else: - logger.error("Unable to connect to: "+self.heartbeatUrl,exc_info=True) + logger.error("Unable to connect to: "+ + self.heartbeatUrl,exc_info=True) if self.actionQueue.isIdle(): - time.sleep(30) + time.sleep(3) else: time.sleep(1) pass - def run(self): opener = urllib2.build_opener() @@ -148,7 +165,8 @@ def main(argv=None): signal.signal(signal.SIGINT, signal.SIG_DFL) logger.setLevel(logging.INFO) - formatter = logging.Formatter("%(asctime)s %(filename)s:%(lineno)d - %(message)s") + formatter = logging.Formatter("%(asctime)s %(filename)s:%(lineno)d - \ + %(message)s") stream_handler = logging.StreamHandler() stream_handler.setFormatter(formatter) logger.addHandler(stream_handler) diff --git a/ambari-agent/src/main/python/ambari_agent/Hardware.py b/ambari-agent/src/main/python/ambari_agent/Hardware.py index 8f477ab2ea..cc9fd62504 100644 --- a/ambari-agent/src/main/python/ambari_agent/Hardware.py +++ b/ambari-agent/src/main/python/ambari_agent/Hardware.py @@ -96,6 +96,14 @@ class Hardware: pass pass pass + """ Convert the needed types to the true values """ + if 'physicalprocessorcount' in retDict.keys(): + retDict['physicalprocessorcount'] = int(retDict['physicalprocessorcount']) + pass + if 'is_virtual' in retDict.keys(): + retDict['is_virtual'] = ("true" == retDict['is_virtual']) + pass + logger.info("Facter info : \n" + pprint.pformat(retDict)) return retDict diff --git a/ambari-agent/src/main/python/ambari_agent/Heartbeat.py b/ambari-agent/src/main/python/ambari_agent/Heartbeat.py index e9215f766f..e34a8908d0 100644 --- a/ambari-agent/src/main/python/ambari_agent/Heartbeat.py +++ b/ambari-agent/src/main/python/ambari_agent/Heartbeat.py @@ -39,17 +39,19 @@ class Heartbeat: timestamp = int(time.time()*1000) queueResult = self.actionQueue.result() installedRoleStates = serverStatus.build() + nodeStatus = { "status" : "HEALTHY", "cause" : "NONE"} heartbeat = { 'responseId' : int(id), 'timestamp' : timestamp, 'hostname' : socket.gethostname(), - # 'reports' : self.reports, # 'componentStatus' : self.componentStatus, 'nodeStatus' : nodeStatus } - + if len(queueResult) != 0: + heartbeat['reports'] = queueResult + pass return heartbeat diff --git a/ambari-agent/src/main/python/ambari_agent/security.py b/ambari-agent/src/main/python/ambari_agent/security.py index 924c03a953..370ba8a8e8 100644 --- a/ambari-agent/src/main/python/ambari_agent/security.py +++ b/ambari-agent/src/main/python/ambari_agent/security.py @@ -119,7 +119,7 @@ class CertificateManager(): response = f.read() f.close() data = json.loads(response) - logger.info("Sign response from Server: \n" + pprint.pformat(data)) + logger.debug("Sign response from Server: \n" + pprint.pformat(data)) result=data['result'] if result == 'OK': agentCrtContent=data['signedCa'] diff --git a/ambari-agent/src/main/python/ambari_agent/shell.py b/ambari-agent/src/main/python/ambari_agent/shell.py index 75541208a8..a44079b449 100644 --- a/ambari-agent/src/main/python/ambari_agent/shell.py +++ b/ambari-agent/src/main/python/ambari_agent/shell.py @@ -88,18 +88,19 @@ class shellRunner: code = 0 cmd = " " cmd = cmd.join(script) - p = subprocess.Popen(cmd, preexec_fn=changeUid, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, close_fds=True) + p = subprocess.Popen(cmd, preexec_fn=changeUid, stdout=subprocess.PIPE, + stderr=subprocess.PIPE, shell=True, close_fds=True) out, err = p.communicate() code = p.wait() logger.debug("Exitcode for %s is %d" % (cmd,code)) return {'exitCode': code, 'output': out, 'error': err} # dispatch action types - def runAction(self, clusterId, component, role, user, command, cleanUpCommand, result): + def runAction(self, clusterId, component, role, + user, command, cleanUpCommand, result): oldDir = os.getcwd() #TODO: handle this better. Don't like that it is doing a chdir for the main process os.chdir(self.getWorkDir(clusterId, role)) - oldUid = os.getuid() try: if user is not None: user=getpwnam(user)[2] @@ -107,7 +108,8 @@ class shellRunner: user = oldUid threadLocal.uid = user except Exception: - logger.warn("%s %s %s can not switch user for RUN_ACTION." % (clusterId, component, role)) + logger.warn("%s %s %s can not switch user for RUN_ACTION." + % (clusterId, component, role)) code = 0 cmd = sys.executable tempfilename = tempfile.mktemp() @@ -116,7 +118,8 @@ class shellRunner: tmp.close() cmd = "%s %s %s" % (cmd, tempfilename, " ".join(command['param'])) commandResult = {} - p = subprocess.Popen(cmd, preexec_fn=changeUid, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, close_fds=True) + p = subprocess.Popen(cmd, preexec_fn=changeUid, stdout=subprocess.PIPE, + stderr=subprocess.PIPE, shell=True, close_fds=True) out, err = p.communicate() code = p.wait() if code != 0: @@ -134,7 +137,8 @@ class shellRunner: cmd = "%s %s %s" % (cmd, tempfilename, " ".join(cleanUpCommand['param'])) cleanUpCode = 0 cleanUpResult = {} - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, close_fds=True) + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, + shell=True, close_fds=True) out, err = p.communicate() cleanUpCode = p.wait() if cleanUpCode != 0: @@ -147,17 +151,20 @@ class shellRunner: try: os.chdir(oldDir) except Exception: - logger.warn("%s %s %s can not restore environment for RUN_ACTION." % (clusterId, component, role)) + logger.warn("%s %s %s can not restore environment for RUN_ACTION." + % (clusterId, component, role)) return result # Start a process and presist its state - def startProcess(self, clusterId, clusterDefinitionRevision, component, role, script, user, result): + def startProcess(self, clusterId, clusterDefinitionRevision, component, + role, script, user, result): global serverTracker oldDir = os.getcwd() try: os.chdir(self.getWorkDir(clusterId,role)) except Exception: - logger.warn("%s %s %s can not switch dir for START_ACTION." % (clusterId, component, role)) + logger.warn("%s %s %s can not switch dir for START_ACTION." + % (clusterId, component, role)) oldUid = os.getuid() try: if user is not None: @@ -166,10 +173,12 @@ class shellRunner: user = os.getuid() threadLocal.uid = user except Exception: - logger.warn("%s %s %s can not switch user for START_ACTION." % (clusterId, component, role)) + logger.warn("%s %s %s can not switch user for START_ACTION." + % (clusterId, component, role)) code = 0 commandResult = {} - process = self.getServerKey(clusterId,clusterDefinitionRevision,component,role) + process = self.getServerKey(clusterId,clusterDefinitionRevision, + component,role) if not process in serverTracker: try: plauncher = processlauncher(script,user) @@ -177,7 +186,8 @@ class shellRunner: plauncher.blockUntilProcessCreation() except Exception: traceback.print_exc() - logger.warn("Can not launch process for %s %s %s" % (clusterId, component, role)) + logger.warn("Can not launch process for %s %s %s" + % (clusterId, component, role)) code = -1 serverTracker[process] = plauncher commandResult['exitCode'] = code @@ -185,16 +195,19 @@ class shellRunner: try: os.chdir(oldDir) except Exception: - logger.warn("%s %s %s can not restore environment for START_ACTION." % (clusterId, component, role)) + logger.warn("%s %s %s can not restore environment for START_ACTION." \ + % (clusterId, component, role)) return result # Stop a process and remove presisted state def stopProcess(self, processKey): global serverTracker keyFragments = processKey.split('/') - process = self.getServerKey(keyFragments[0],keyFragments[1],keyFragments[2],keyFragments[3]) + process = self.getServerKey(keyFragments[0],keyFragments[1], + keyFragments[2],keyFragments[3]) if process in serverTracker: - logger.info ("Sending %s with PID %d the SIGTERM signal" % (process,serverTracker[process].getpid())) + logger.info ("Sending %s with PID %d the SIGTERM signal" + % (process,serverTracker[process].getpid())) killprocessgrp(serverTracker[process].getpid()) del serverTracker[process] @@ -227,10 +240,13 @@ class processlauncher(threading.Thread): tmp.write(self.script['script']) tmp.close() threadLocal.uid = self.uid - self.cmd = "%s %s %s" % (pythoncmd, tempfilename, " ".join(self.script['param'])) + self.cmd = "%s %s %s" % (pythoncmd, tempfilename, + " ".join(self.script['param'])) logger.info("Launching %s as uid %d" % (self.cmd,self.uid) ) - p = subprocess.Popen(self.cmd, preexec_fn=self.changeUidAndSetSid, stdout=subprocess.PIPE, - stderr=subprocess.PIPE, shell=True, close_fds=True) + p = subprocess.Popen(self.cmd, + preexec_fn=self.changeUidAndSetSid, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, shell=True, close_fds=True) logger.info("Launched %s; PID %d" % (self.cmd,p.pid)) self.serverpid = p.pid self.out, self.err = p.communicate() @@ -253,7 +269,8 @@ class processlauncher(threading.Thread): time.sleep(1) logger.info("Waiting for process %s to start" % self.cmd) if sleepCount > 10: - logger.warn("Couldn't start process %s even after %d seconds" % (self.cmd,sleepCount)) + logger.warn("Couldn't start process %s even after %d seconds" + % (self.cmd,sleepCount)) os._exit(1) return self.serverpid diff --git a/ambari-agent/src/main/python/manifestGenerator/imports.txt b/ambari-agent/src/main/python/manifestGenerator/imports.txt new file mode 100644 index 0000000000..7a1f7de44a --- /dev/null +++ b/ambari-agent/src/main/python/manifestGenerator/imports.txt @@ -0,0 +1,12 @@ +import "/etc/puppet/agent/modules/hdp/manifests/*.pp" +import "/etc/puppet/agent/modules/hdp-hadoop/manifests/*.pp" +import "/etc/puppet/agent/modules/hdp-hbase/manifests/*.pp" +import "/etc/puppet/agent/modules/hdp-zookeeper/manifests/*.pp" +import "/etc/puppet/agent/modules/hdp-oozie/manifests/*.pp" +import "/etc/puppet/agent/modules/hdp-pig/manifests/*.pp" +import "/etc/puppet/agent/modules/hdp-sqoop/manifests/*.pp" +import "/etc/puppet/agent/modules/hdp-templeton/manifests/*.pp" +import "/etc/puppet/agent/modules/hdp-hive/manifests/*.pp" +import "/etc/puppet/agent/modules/hdp-hcat/manifests/*.pp" +import "/etc/puppet/agent/modules/hdp-mysql/manifests/*.pp" +import "/etc/puppet/agent/modules/hdp-monitor-webserver/manifests/*.pp" diff --git a/ambari-agent/src/main/python/manifestGenerator/manifestGenerator.py b/ambari-agent/src/main/python/manifestGenerator/manifestGenerator.py new file mode 100644 index 0000000000..5442d6f9ac --- /dev/null +++ b/ambari-agent/src/main/python/manifestGenerator/manifestGenerator.py @@ -0,0 +1,98 @@ +import json + +def generateManifest(inputJsonStr): +#reading json + parsedJson = json.loads(inputJsonStr) + hostname = parsedJson['hostname'] + clusterHostInfo = parsedJson['clusterHostInfo'] + params = parsedJson['params'] + configurations = parsedJson['configurations'] + hostAttributes = parsedJson['hostAttributes'] + roles = parsedJson['roles'] + +#writing manifest + manifest = open('site.pp', 'w') + + #writing imports from external static file + writeImports(manifest) + + #writing nodes + writeNodes(manifest, clusterHostInfo) + + #writing params from map + writeParams(manifest, params) + + #writing config maps + writeConfigurations(manifest, configurations) + + #cycle here - writing host attributes + writeHostAttributes(manifest, hostAttributes) + + #writing task definitions + writeTasks(manifest, roles) + + manifest.close() + + + #read static imports from file and write them to manifest + def writeImports(outputFile, inputFileName='imports.txt'): + inputFile = open(inputFileName, 'r') + + for line in inputFile: + outputFile.write(line) + + inputFile.close() + + #write nodes + def writeNodes(outputFile, clusterHostInfo): + for node in clusterHostInfo.iterkeys(): + outputFile.write('$' + node + '= [' + + coma = '' + for host in node: + outputFile.write(coma + '\'' + host + '\'') + coma = ', ' + + outputFile.write(']\n' + + #write params + def writeParams(outputFile, params): + for param in params.iterkeys(): + outputFile.write('$' + param + '="' + params[param] + '"\n') + + #write host attributes + def writeHostAttributes(outputFile, hostAttributes): + outputFile.write('$hostAttributes={\n') + + coma = '' + for attribute in hostAttributes.iterkeys(): + outputFile.write(coma + '"' + attribute + '" => "{' + hostAttributes[attribute] + '"}') + coma = ',\n' + + outputFile.write('}\n') + + #write configurations + def writeConfigurations(outputFile, configs): + outputFile.write('$configuration = {\n' + + for configName in configs.iterkeys(): + outputFile.write('$' + configName + '=> {\n') + config = configs[configName] + + coma = '' + for configParam in config.iterkeys(): + outputFile.write(coma + '"' + configParam + '" => "' + config[configParam] + '"') + coma = ',\n' + + outputFile.write('\n}\n') + + outputFile.write('\n}\n' + + #write node tasks + def writeTasks(outputFile, tasks): + for task in tasks : + nodename = task['role'] + command = task['roleCommand'] + taskParams = task['params'] + #TODO: write node task to file +
\ No newline at end of file |