.chef/knife.rb
.berkshelf/config.json
+nodes/*tmp.json
+roles/*tmp.rb
cookbooks/dsh/files/default/rpms/*.rpm
# Berkshelf
equivalent_apache_hadoop_major_version \
= equivalent_apache_hadoop_middle_version.split('.')[0]
-if version >= '5.0.0' then
+if middle_version >= '5.0' then
conf_files_v2_yarn = conf_files_v2_yarn + [
'mapred-env.sh'
]
})
end
- # N/A yet!
- #setup_cgroup(equivalent_apache_hadoop_middle_version)
+ if middle_version >= '5.0' then
+ setup_cgroup(equivalent_apache_hadoop_middle_version)
+ end
when 'MRv1'
# hadoop-0.20-mapreduce package includes task-controller
# (Note: supports both i386 and x86_64.)
*.sink.ganglia.dmax=<%= node['hadoop']['metrics2']['*.sink.ganglia.dmax'] %>
<%
-%w{
- namenode
- datanode
- resourcemanager
- nodemanager
-}.each do |prefix|
+hadoop_daemons = [
+ 'namenode',
+ 'datanode',
+ 'resourcemanager',
+ 'nodemanager'
+]
+
+if node['hadoop']['install_flavor'] == 'cdh' \
+ && node['hadoop']['cdh']['resource_negotiator_framework'] == 'MRv1' then
+ hadoop_daemons = [
+ 'namenode',
+ 'datanode',
+ 'jobtracker',
+ 'tasktracker',
+ 'maptask',
+ 'reducetask'
+ ]
+end
+
+hadoop_daemons.each do |prefix|
servers = node['hadoop']['metrics2']["#{prefix}.sink.ganglia.servers"]
if !servers.nil? && !servers.empty? then
-%>
*.sink.ganglia.dmax=<%= node['hadoop']['metrics2']['*.sink.ganglia.dmax'] %>
<%
-%w{
- namenode
- datanode
- resourcemanager
- nodemanager
-}.each do |prefix|
+hadoop_daemons = [
+ 'namenode',
+ 'datanode',
+ 'resourcemanager',
+ 'nodemanager'
+]
+
+if node['hadoop']['install_flavor'] == 'cdh' \
+ && node['hadoop']['cdh']['resource_negotiator_framework'] == 'MRv1' then
+ hadoop_daemons = [
+ 'namenode',
+ 'datanode',
+ 'jobtracker',
+ 'tasktracker',
+ 'maptask',
+ 'reducetask'
+ ]
+end
+
+hadoop_daemons.each do |prefix|
servers = node['hadoop']['metrics2']["#{prefix}.sink.ganglia.servers"]
if !servers.nil? && !servers.empty? then
-%>
*.sink.ganglia.dmax=<%= node['hadoop']['metrics2']['*.sink.ganglia.dmax'] %>
<%
-%w{
- namenode
- datanode
- resourcemanager
- nodemanager
-}.each do |prefix|
+hadoop_daemons = [
+ 'namenode',
+ 'datanode',
+ 'resourcemanager',
+ 'nodemanager'
+]
+
+if node['hadoop']['install_flavor'] == 'cdh' \
+ && node['hadoop']['cdh']['resource_negotiator_framework'] == 'MRv1' then
+ hadoop_daemons = [
+ 'namenode',
+ 'datanode',
+ 'jobtracker',
+ 'tasktracker',
+ 'maptask',
+ 'reducetask'
+ ]
+end
+
+hadoop_daemons.each do |prefix|
servers = node['hadoop']['metrics2']["#{prefix}.sink.ganglia.servers"]
if !servers.nil? && !servers.empty? then
-%>
--- /dev/null
+{
+ "run_list": [
+ "role[hadoop-pseudo-distributed-cdh4-with-ganglia]"
+ ]
+}
--- /dev/null
+name 'hadoop-pseudo-distributed-cdh4-with-ganglia'
+description 'Hadoop pseudo distributed mode configurations for CDH 4'
+
+run_list(
+ 'role[node_commons]',
+ 'role[ganglia-all-in-one-localhost]',
+ 'role[hadoop-pseudo-distributed-cdh4]'
+)
+
+#env_run_lists()
+
+default_attributes(
+ 'hadoop' => {
+ 'metrics2' => {
+ 'namenode.sink.ganglia.servers' => 'localhost:8649',
+ 'datanode.sink.ganglia.servers' => 'localhost:8649',
+ # for MapReduce 1.x only
+ 'jobtracker.sink.ganglia.servers' => 'localhost:8649',
+ 'tasktracker.sink.ganglia.servers' => 'localhost:8649',
+ 'maptask.sink.ganglia.servers' => 'localhost:8649',
+ 'reducetask.sink.ganglia.servers' => 'localhost:8649'
+ }
+ }
+)
+
+override_attributes(
+)
+
'container-executor' => {
'min.user.id' => '500'
},
- #'yarn.nodemanager.linux-container-executor.resources-handler.class' \
- # => 'org.apache.hadoop.yarn.server.nodemanager.util.CgroupsLCEResourcesHandler', # N/A on the CDH4
+ 'yarn.nodemanager.linux-container-executor.resources-handler.class' \
+ => 'org.apache.hadoop.yarn.server.nodemanager.util.CgroupsLCEResourcesHandler', # N/A on the CDH4
# for 1.x only
'taskcontroller' => {
'min.user.id' => '500'