OSDN Git Service

add the dsh cookbook.
authorwhitestar <whitestar@gaea.test>
Sun, 15 Sep 2013 07:32:54 +0000 (16:32 +0900)
committerwhitestar <whitestar@gaea.test>
Sun, 15 Sep 2013 07:32:54 +0000 (16:32 +0900)
19 files changed:
cookbooks/cdh/recipes/yum_repo.rb
cookbooks/dsh/CHANGELOG.md [new file with mode: 0644]
cookbooks/dsh/README.md [new file with mode: 0644]
cookbooks/dsh/attributes/default.rb [new file with mode: 0644]
cookbooks/dsh/files/default/rpms/.gitkeep [new file with mode: 0644]
cookbooks/dsh/metadata.rb [new file with mode: 0644]
cookbooks/dsh/recipes/default.rb [new file with mode: 0644]
cookbooks/grid/recipes/ops-utils.rb [new file with mode: 0644]
cookbooks/grid/templates/default/etc/profile.d/gridctl.sh [new file with mode: 0755]
cookbooks/grid/templates/default/usr/local/bin/dsh_ipass [new file with mode: 0755]
cookbooks/grid/templates/default/usr/local/bin/gridctl [new file with mode: 0755]
cookbooks/hadoop/templates/default/conf-1.1/hadoop-env.sh
cookbooks/hadoop/templates/default/conf-1.2/hadoop-env.sh
roles/dsh.rb [new file with mode: 0644]
roles/grid-ops-utils.rb [new file with mode: 0644]
roles/test-ah2-on-localhost.rb
roles/test-cdh-on-localhost.rb
roles/test-hdp-on-localhost.rb
roles/test-on-localhost.rb

index 2554fce..3ce90c1 100644 (file)
@@ -85,7 +85,7 @@ if node[:platform_family] == 'rhel' then
 
   remote_file downloaded_repo_pkg do
     source cdh_repo_url
-    not_if "rpm -qi #{cdh_repo_pkg}" unless node['hdp']['yum_repo']['update']
+    not_if "rpm -qi #{cdh_repo_pkg}" unless node['cdh']['yum_repo']['update']
     action :create_if_missing
   end
 
@@ -94,7 +94,7 @@ if node[:platform_family] == 'rhel' then
   package cdh_repo_pkg do
     source downloaded_repo_pkg
     options '--nogpgcheck'
-    not_if "rpm -qi #{cdh_repo_pkg}" unless node['hdp']['yum_repo']['update']
+    not_if "rpm -qi #{cdh_repo_pkg}" unless node['cdh']['yum_repo']['update']
     action :install
   end
 
@@ -122,7 +122,7 @@ if node[:platform_family] == 'rhel' then
       action :install
     end
 
-    if node['hdp']['yum_repo']['update'] then
+    if node['cdh']['yum_repo']['update'] then
       file gplextras_repo_file do
         action :delete
       end
diff --git a/cookbooks/dsh/CHANGELOG.md b/cookbooks/dsh/CHANGELOG.md
new file mode 100644 (file)
index 0000000..a521076
--- /dev/null
@@ -0,0 +1,12 @@
+# CHANGELOG for dsh
+
+This file is used to list changes made in each version of dsh.
+
+## 0.1.0:
+
+* Initial release of dsh
+
+- - -
+Check the [Markdown Syntax Guide](http://daringfireball.net/projects/markdown/syntax) for help with Markdown.
+
+The [Github Flavored Markdown page](http://github.github.com/github-flavored-markdown/) describes the differences between markdown on github and standard markdown.
diff --git a/cookbooks/dsh/README.md b/cookbooks/dsh/README.md
new file mode 100644 (file)
index 0000000..567c6c3
--- /dev/null
@@ -0,0 +1,68 @@
+dsh Cookbook
+============
+TODO: Enter the cookbook description here.
+
+e.g.
+This cookbook makes your favorite breakfast sandwhich.
+
+Requirements
+------------
+TODO: List your cookbook requirements. Be sure to include any requirements this cookbook has on platforms, libraries, other cookbooks, packages, operating systems, etc.
+
+e.g.
+#### packages
+- `toaster` - dsh needs toaster to brown your bagel.
+
+Attributes
+----------
+TODO: List you cookbook attributes here.
+
+e.g.
+#### dsh::default
+<table>
+  <tr>
+    <th>Key</th>
+    <th>Type</th>
+    <th>Description</th>
+    <th>Default</th>
+  </tr>
+  <tr>
+    <td><tt>['dsh']['bacon']</tt></td>
+    <td>Boolean</td>
+    <td>whether to include bacon</td>
+    <td><tt>true</tt></td>
+  </tr>
+</table>
+
+Usage
+-----
+#### dsh::default
+TODO: Write usage instructions for each cookbook.
+
+e.g.
+Just include `dsh` in your node's `run_list`:
+
+```json
+{
+  "name":"my_node",
+  "run_list": [
+    "recipe[dsh]"
+  ]
+}
+```
+
+Contributing
+------------
+TODO: (optional) If this is a public cookbook, detail the process for contributing. If this is a private cookbook, remove this section.
+
+e.g.
+1. Fork the repository on Github
+2. Create a named feature branch (like `add_component_x`)
+3. Write you change
+4. Write tests for your change (if applicable)
+5. Run the tests, ensuring they all pass
+6. Submit a Pull Request using Github
+
+License and Authors
+-------------------
+Authors: TODO: List authors
diff --git a/cookbooks/dsh/attributes/default.rb b/cookbooks/dsh/attributes/default.rb
new file mode 100644 (file)
index 0000000..35b320c
--- /dev/null
@@ -0,0 +1,26 @@
+#
+# Cookbook Name:: dsh
+# Attributes:: default
+#
+# Copyright 2013, whitestar
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Please deploy dsh RPM packages as Chef cookbook_file.
+default['dsh']['rpm']['cookbook_file_dir'] = 'rpms'  # files/default/rpms
+default['dsh']['rpm']['package_names'] = [
+  'libdshconfig1-0.20.13-1',
+  'dsh-0.25.10-1'
+]
+
diff --git a/cookbooks/dsh/files/default/rpms/.gitkeep b/cookbooks/dsh/files/default/rpms/.gitkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/cookbooks/dsh/metadata.rb b/cookbooks/dsh/metadata.rb
new file mode 100644 (file)
index 0000000..27ff788
--- /dev/null
@@ -0,0 +1,7 @@
+name             'dsh'
+maintainer       'whitestar'
+maintainer_email ''
+license          'Apache 2.0'
+description      'Installs/Configures dsh'
+long_description IO.read(File.join(File.dirname(__FILE__), 'README.md'))
+version          '0.1.0'
diff --git a/cookbooks/dsh/recipes/default.rb b/cookbooks/dsh/recipes/default.rb
new file mode 100644 (file)
index 0000000..115c11f
--- /dev/null
@@ -0,0 +1,49 @@
+#
+# Cookbook Name:: dsh
+# Recipe:: default
+#
+# Copyright 2013, whitestar
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+case node[:platform_family]
+when 'debian'
+  pkg = 'dsh'
+  resources(:package => pkg) rescue package pkg do
+    action :install
+  end
+when 'rhel'
+  file_cache_path = Chef::Config[:file_cache_path]
+  kernel_machine = (node[:kernel][:machine] == 'i686') \
+    ? 'i386' \
+    : node[:kernel][:machine]
+
+  node['dsh']['rpm']['package_names'].each {|rpm_name|
+    rpm_pkg = "#{rpm_name}.#{kernel_machine}.rpm"
+    cookbook_file "#{file_cache_path}/#{rpm_pkg}" do
+      source "#{node['dsh']['rpm']['cookbook_file_dir']}/#{rpm_pkg}"
+      owner 'root'
+      group 'root'
+      mode '0644'
+    end
+
+    rpm_package rpm_name do
+      source "#{file_cache_path}/#{rpm_pkg}"
+      options '-vh --nodeps'
+      not_if "rpm -qi #{rpm_pkg.split('-').first}"
+      action :install
+    end
+  }
+end
+
diff --git a/cookbooks/grid/recipes/ops-utils.rb b/cookbooks/grid/recipes/ops-utils.rb
new file mode 100644 (file)
index 0000000..4ef5fe3
--- /dev/null
@@ -0,0 +1,31 @@
+#
+# Cookbook Name:: grid
+# Recipe:: ops-utils
+#
+# Copyright 2013, whitestar
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+%w{
+  dsh_ipass
+  gridctl
+}.each {|command|
+  template "/usr/local/bin/#{command}" do
+    source "usr/local/bin/#{command}"
+    owner 'root'
+    group 'root'
+    mode '0755'
+  end
+}
+
diff --git a/cookbooks/grid/templates/default/etc/profile.d/gridctl.sh b/cookbooks/grid/templates/default/etc/profile.d/gridctl.sh
new file mode 100755 (executable)
index 0000000..8bac820
--- /dev/null
@@ -0,0 +1,60 @@
+#
+# Copyright 2013 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+# Settings
+DOMAIN=${DOMAIN:-''}
+#DOMAIN='grid.example.com'
+
+# Hadoop (HDFS, MapReduce)
+HADOOP_HOME=${HADOOP_HOME:-'/grid/usr/hadoop'}
+HADOOP_PREFIX=${HADOOP_PREFIX:-"${HADOOP_HOME}"}
+HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-''}
+HDFS_USER=${HDFS_USER:-'hdfs'}
+MAPRED_USER=${MAPRED_USER:-'mapred'}
+DN_INIT_USER=${DN_INIT_USER:-'root'}
+NAMENODE=${NAMENODE:-'localhost'}
+SECONDARYNAMENODE=${SECONDARYNAMENODE:-'localhost'}
+DATANODES=${DATANODES:-'localhost'}
+BALANCER=${BALANCER:-'localhost'}
+JOBTRACKER=${JOBTRACKER:-'localhost'}
+TASKTRACKERS=${TASKTRACKERS:-"$DATANODES"}
+HISTORYSERVER=${HISTORYSERVER:-'localhost'}
+#NAMENODE='nn00'
+#SECONDARYNAMENODE='nn02'
+#DATANODES='dn00000 dn00001'
+#JOBTRACKER='jt00'
+# YARN
+YARN_USER=${YARN_USER:-'yarn'}
+RESOURCEMANAGER=${RESOURCEMANAGER:-'localhost'}
+NODEMANAGERS=${NODEMANAGERS:-"$DATANODES"}
+#RESOURCEMANAGER='rm00'
+
+# HBase
+HBASE_HOME=${HBASE_HOME:-'/grid/usr/hbase'}
+HBASE_USER=${HBASE_USER:-'hbase'}
+HMASTERS=${HMASTERS:-'localhost'}
+HREGIONSERVERS=${HREGIONSERVERS:-'localhost'}
+#HMASTERS='hm00 hm01'
+#HREGIONSERVERS='dn00000 dn00001'
+
+# ZooKeeper
+ZOOKEEPER_PREFIX=${ZOOKEEPER_PREFIX:-'/grid/usr/zookeeper'}
+ZOOCFGDIR=${ZOOCFGDIR:-"${ZOOKEEPER_PREFIX}/conf"}
+ZOOKEEPER_USER=${ZOOKEEPER_USER:-'zookeeper'}
+ZOOKEEPER_PEERS=${ZOOKEEPER_PEERS:-'localhost'}
+#ZOOKEEPER_PEERS='zk00 zk01 zk02'
+
diff --git a/cookbooks/grid/templates/default/usr/local/bin/dsh_ipass b/cookbooks/grid/templates/default/usr/local/bin/dsh_ipass
new file mode 100755 (executable)
index 0000000..adab04e
--- /dev/null
@@ -0,0 +1,85 @@
+#!/bin/bash
+#
+# dsh_ipass:   DSH with Password Prompt for sudo
+#
+# $Id: dsh_ipass 492 2013-05-10 11:16:15Z whitestar $
+#
+# Copyright 2012-2013 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+PATH="/sbin:/usr/sbin:/bin:/usr/bin"
+NAME="dsh_ipass"
+DESC="DSH with Password Prompt for sudo"
+
+# Settings
+
+
+ARGC="$#"
+ARGS="$@"
+COMMAND=$1
+
+
+help() {
+       echo "Usage: dsh_ipass [-P password] [argument(s) with 'sudo -S' for dsh]"
+       echo 'e.g. dsh_ipass -m target.example.com -- sudo -S uptime'
+}
+
+
+while getopts :P:h OPT; do
+       case $OPT in
+               'h') 
+                       help
+                       exit 0
+                       ;;
+               'P') PASSWD=$OPTARG;;
+       esac
+done
+
+
+# Validation
+if [ $ARGC -eq 0 ]; then
+       help
+       exit 1
+elif [ x"$COMMAND" = x'help' ]; then
+       help
+       exit 0
+fi
+
+# Main
+if [ x"$PASSWD" = x'' ]; then
+       echo -n '[sudo] password via SSH: '
+       stty -echo
+       read PASSWD
+       stty echo
+       echo ''
+else
+       NORMALIZED_ARGS=()
+       REMOVE_ELEMENT='false'
+       for i in $ARGS; do
+               if [ x"$i" = x'-P' ]; then
+                       REMOVE_ELEMENT='true'
+               elif [ x"$REMOVE_ELEMENT" = x'true' ]; then
+                       REMOVE_ELEMENT='false'
+               else
+                       NORMALIZED_ARGS+=("$i")
+               fi
+       done
+       ARGS="${NORMALIZED_ARGS[@]}"
+fi
+
+DSH_COMMAND="dsh -i -c -r ssh $ARGS"
+echo "DSH_COMMAND: ${DSH_COMMAND}"
+echo $PASSWD | $DSH_COMMAND
+exit $?
diff --git a/cookbooks/grid/templates/default/usr/local/bin/gridctl b/cookbooks/grid/templates/default/usr/local/bin/gridctl
new file mode 100755 (executable)
index 0000000..38238e6
--- /dev/null
@@ -0,0 +1,505 @@
+#!/bin/sh
+#
+# gridctl:     Control grid
+#
+# $Id: gridctl 527 2013-05-16 10:49:40Z whitestar $
+#
+# Copyright 2013 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+PATH=.:/sbin:/usr/sbin:/bin:/usr/bin:/usr/local/bin:`dirname $0`
+
+# Settings
+DOMAIN=${DOMAIN:-''}
+#DOMAIN='grid.example.com'
+
+# Hadoop (HDFS, MapReduce)
+HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-''}
+if [ x"$HADOOP_CONF_DIR" != x'' ]; then
+       HADOOP_CONF_OPT="--config ${HADOOP_CONF_DIR}"
+else
+       HADOOP_CONF_OPT=''
+fi
+HDFS_USER=${HDFS_USER:-'hdfs'}
+MAPRED_USER=${MAPRED_USER:-'mapred'}
+DN_INIT_USER=${DN_INIT_USER:-'root'}
+NAMENODE=${NAMENODE:-'localhost'}
+SECONDARYNAMENODE=${SECONDARYNAMENODE:-'localhost'}
+DATANODES=${DATANODES:-'localhost'}
+BALANCER=${BALANCER:-'localhost'}
+JOBTRACKER=${JOBTRACKER:-'localhost'}
+TASKTRACKERS=${TASKTRACKERS:-"$DATANODES"}
+HISTORYSERVER=${HISTORYSERVER:-'localhost'}
+#NAMENODE='nn00'
+#SECONDARYNAMENODE='nn02'
+#DATANODES='dn00000 dn00001'
+#JOBTRACKER='jt00'
+# YARN
+YARN_USER=${YARN_USER:-'yarn'}
+RESOURCEMANAGER=${RESOURCEMANAGER:-'localhost'}
+NODEMANAGERS=${NODEMANAGERS:-"$DATANODES"}
+#RESOURCEMANAGER='rm00'
+
+# HBase
+HBASE_HOME=${HBASE_HOME:-'/grid/usr/hbase'}
+HBASE_USER=${HBASE_USER:-'hbase'}
+HMASTERS=${HMASTERS:-'localhost'}
+HREGIONSERVERS=${HREGIONSERVERS:-'localhost'}
+#HMASTERS='hm00 hm01'
+#HREGIONSERVERS='dn00000 dn00001'
+
+# ZooKeeper
+ZOOKEEPER_PREFIX=${ZOOKEEPER_PREFIX:-'/grid/usr/zookeeper'}
+ZOOCFGDIR=${ZOOCFGDIR:-"${ZOOKEEPER_PREFIX}/conf"}
+ZOOKEEPER_USER=${ZOOKEEPER_USER:-'zookeeper'}
+ZOOKEEPER_PEERS=${ZOOKEEPER_PEERS:-'localhost'}
+#ZOOKEEPER_PEERS='zk00 zk01 zk02'
+
+
+help() {
+       echo 'Usage: gridctl SERVICE,... {start|stop|status}'
+       echo ''
+       echo 'Services: '
+       echo '    hdfs |balancer |mapred|historyserver  (Hadoop 1.x or before)'
+       echo '    hdfs2|balancer2|yarn  |historyserver2 (Hadoop 2.x or later)'
+       echo '    hbase|zookeeper'
+       echo 'Commands: '
+       echo '    start|stop: control the grid daemons'
+       echo '    status:     print the Java process(es)'
+}
+
+
+# $1: target node list (e.g. 'zk00 zk01 zk02')
+get_targets() {
+       TARGETS=''
+       HOSTS=$1
+       for HOST in $HOSTS; do
+               if [ x"$DOMAIN" != x'' ]; then
+                       FQDN=${HOST}.${DOMAIN}
+               else
+                       FQDN=${HOST}
+               fi
+               if [ x"$TARGETS" = x'' ]; then
+                       TARGETS=$FQDN
+               else
+                       TARGETS="${TARGETS},${FQDN}"
+               fi
+       done
+
+       echo $TARGETS
+}
+
+
+get_passwd() {
+       echo -n '[sudo] password via SSH: '
+       stty -echo
+       read PASSWD
+       stty echo
+       echo ''
+}
+
+
+ctl_hdfs() {
+       DAEMON_COMMAND=${DAEMON_COMMAND:-"${HADOOP_PREFIX}/bin/hadoop-daemon.sh"}
+       SERVICE_PREFIX=${SERVICE_PREFIX:-'hadoop-'}
+       _NAMENODE=`get_targets "$NAMENODE"`
+       _SECONDARYNAMENODE=`get_targets "$SECONDARYNAMENODE"`
+       _DATANODES=`get_targets "$DATANODES"`
+       echo "[Grid ${COMMAND}: ${DESC}]"
+       echo "HADOOP_CONF_DIR: ${HADOOP_CONF_DIR}"
+       echo "Daemon command: ${DAEMON_COMMAND}"
+       echo "NameNode: ${_NAMENODE}"
+       echo "SecondaryNameNode: ${_SECONDARYNAMENODE}"
+       echo "DataNodes: ${_DATANODES}"
+       get_passwd
+       case "$COMMAND" in
+               start)
+                       echo "Starting ${DESC} ..."
+                       if [ $ARTIFACT_TYPE = 'tarball' ]; then
+                               dsh_ipass -P $PASSWD -m $_NAMENODE -M -- sudo -S -p \'\' -u $HDFS_USER $DAEMON_COMMAND $HADOOP_CONF_OPT start namenode
+                               dsh_ipass -P $PASSWD -m $_SECONDARYNAMENODE -M -- sudo -S -p \'\' -u $HDFS_USER $DAEMON_COMMAND $HADOOP_CONF_OPT start secondarynamenode
+                               dsh_ipass -P $PASSWD -m $_DATANODES -M -- sudo -S -p \'\' -u $DN_INIT_USER $DAEMON_COMMAND $HADOOP_CONF_OPT start datanode
+                       elif [ $ARTIFACT_TYPE = 'package' ]; then
+                               dsh_ipass -P $PASSWD -m $_NAMENODE -M -- sudo -S -p \'\' service ${SERVICE_PREFIX}namenode start
+                               dsh_ipass -P $PASSWD -m $_SECONDARYNAMENODE -M -- sudo -S -p \'\' service ${SERVICE_PREFIX}secondarynamenode start
+                               dsh_ipass -P $PASSWD -m $_DATANODES -M -- sudo -S -p \'\' service ${SERVICE_PREFIX}datanode start
+                       fi
+                       ;;
+               stop)
+                       echo "Stopping ${DESC} ..."
+                       if [ $ARTIFACT_TYPE = 'tarball' ]; then
+                               dsh_ipass -P $PASSWD -m $_DATANODES -M -- sudo -S -p \'\' -u $DN_INIT_USER $DAEMON_COMMAND $HADOOP_CONF_OPT stop datanode
+                               dsh_ipass -P $PASSWD -m $_SECONDARYNAMENODE -M -- sudo -S -p \'\' -u $HDFS_USER $DAEMON_COMMAND $HADOOP_CONF_OPT stop secondarynamenode
+                               dsh_ipass -P $PASSWD -m $_NAMENODE -M -- sudo -S -p \'\' -u $HDFS_USER $DAEMON_COMMAND $HADOOP_CONF_OPT stop namenode
+                       elif [ $ARTIFACT_TYPE = 'package' ]; then
+                               dsh_ipass -P $PASSWD -m $_DATANODES -M -- sudo -S -p \'\' service ${SERVICE_PREFIX}datanode stop
+                               dsh_ipass -P $PASSWD -m $_SECONDARYNAMENODE -M -- sudo -S -p \'\' service ${SERVICE_PREFIX}secondarynamenode stop
+                               dsh_ipass -P $PASSWD -m $_NAMENODE -M -- sudo -S -p \'\' service ${SERVICE_PREFIX}namenode stop
+                       fi
+                       ;;
+               status)
+                       echo "${DESC} Status"
+                       if [ $ARTIFACT_TYPE = 'tarball' ]; then
+                               dsh_ipass -P $PASSWD -m $_NAMENODE -M -- sudo -S -p \'\' -u $HDFS_USER jps -mv
+                               dsh_ipass -P $PASSWD -m $_SECONDARYNAMENODE -M -- sudo -S -p \'\' -u $HDFS_USER jps -mv
+                               dsh_ipass -P $PASSWD -m $_DATANODES -M -- sudo -S -p \'\' -u $HDFS_USER jps -mv
+                       elif [ $ARTIFACT_TYPE = 'package' ]; then
+                               dsh_ipass -P $PASSWD -m $_DATANODES -M -- sudo -S -p \'\' service ${SERVICE_PREFIX}datanode status
+                               dsh_ipass -P $PASSWD -m $_SECONDARYNAMENODE -M -- sudo -S -p \'\' service ${SERVICE_PREFIX}secondarynamenode status
+                               dsh_ipass -P $PASSWD -m $_NAMENODE -M -- sudo -S -p \'\' service ${SERVICE_PREFIX}namenode status
+                       fi
+                       ;;
+       esac
+       DAEMON_COMMAND=''
+       SERVICE_PREFIX=''
+}
+
+
+ctl_hdfs2() {
+       DAEMON_COMMAND=${DAEMON_COMMAND:-"${HADOOP_PREFIX}/sbin/hadoop-daemon.sh"}
+       SERVICE_PREFIX=${SERVICE_PREFIX:-'hadoop-hdfs-'}
+       ctl_hdfs
+}
+
+
+ctl_balancer() {
+       DAEMON_COMMAND=${DAEMON_COMMAND:-"${HADOOP_PREFIX}/bin/hadoop-daemon.sh"}
+       USER=$HDFS_USER
+       _BALANCER=`get_targets "$BALANCER"`
+       echo "[Grid ${COMMAND}: ${DESC}]"
+       echo "HADOOP_CONF_DIR: ${HADOOP_CONF_DIR}"
+       echo "Daemon command: ${DAEMON_COMMAND}"
+       echo "Balancer: ${_BALANCER}"
+       get_passwd
+       case "$COMMAND" in
+               start)
+                       echo "Starting ${DESC} ..."
+                       dsh_ipass -P $PASSWD -m $_BALANCER -M -- sudo -S -p \'\' -u $USER $DAEMON_COMMAND $HADOOP_CONF_OPT start balancer
+                       ;;
+               stop)
+                       echo "Stopping ${DESC} ..."
+                       dsh_ipass -P $PASSWD -m $_BALANCER -M -- sudo -S -p \'\' -u $USER $DAEMON_COMMAND $HADOOP_CONF_OPT stop balancer
+                       ;;
+               status)
+                       echo "${DESC} Status"
+                       dsh_ipass -P $PASSWD -m $_BALANCER -M -- sudo -S -p \'\' -u $USER jps -mv
+                       ;;
+       esac
+       DAEMON_COMMAND=''
+       SERVICE_PREFIX=''
+}
+
+
+ctl_balancer2() {
+       DAEMON_COMMAND=${DAEMON_COMMAND:-"${HADOOP_PREFIX}/sbin/hadoop-daemon.sh"}
+       ctl_balancer
+}
+
+
+ctl_mapred() {
+       DAEMON_COMMAND="${HADOOP_PREFIX}/bin/hadoop-daemon.sh"
+       SERVICE_PREFIX=${SERVICE_PREFIX:-'hadoop-'}
+       USER=$MAPRED_USER
+       _JOBTRACKER=`get_targets "$JOBTRACKER"`
+       _TASKTRACKERS=`get_targets "$TASKTRACKERS"`
+       echo "[Grid ${COMMAND}: ${DESC}]"
+       echo "HADOOP_CONF_DIR: ${HADOOP_CONF_DIR}"
+       echo "Daemon command: ${DAEMON_COMMAND}"
+       echo "JobTracker: ${_JOBTRACKER}"
+       echo "TaskTrackers: ${_TASKTRACKERS}"
+       get_passwd
+       case "$COMMAND" in
+               start)
+                       echo "Starting ${DESC} ..."
+                       if [ $ARTIFACT_TYPE = 'tarball' ]; then
+                               dsh_ipass -P $PASSWD -m $_JOBTRACKER -M -- sudo -S -p \'\' -u $USER $DAEMON_COMMAND $HADOOP_CONF_OPT start jobtracker
+                               dsh_ipass -P $PASSWD -m $_TASKTRACKERS -M -- sudo -S -p \'\' -u $USER $DAEMON_COMMAND $HADOOP_CONF_OPT start tasktracker
+                       elif [ $ARTIFACT_TYPE = 'package' ]; then
+                               dsh_ipass -P $PASSWD -m $_JOBTRACKER -M -- sudo -S -p \'\' service ${SERVICE_PREFIX}jobtracker start
+                               dsh_ipass -P $PASSWD -m $_TASKTRACKERS -M -- sudo -S -p \'\' service ${SERVICE_PREFIX}tasktracker start
+                       fi
+                       ;;
+               stop)
+                       echo "Stopping ${DESC} ..."
+                       if [ $ARTIFACT_TYPE = 'tarball' ]; then
+                               dsh_ipass -P $PASSWD -m $_TASKTRACKERS -M -- sudo -S -p \'\' -u $USER $DAEMON_COMMAND $HADOOP_CONF_OPT stop tasktracker
+                               dsh_ipass -P $PASSWD -m $_JOBTRACKER -M -- sudo -S -p \'\' -u $USER $DAEMON_COMMAND $HADOOP_CONF_OPT stop jobtracker
+                       elif [ $ARTIFACT_TYPE = 'package' ]; then
+                               dsh_ipass -P $PASSWD -m $_TASKTRACKERS -M -- sudo -S -p \'\' service ${SERVICE_PREFIX}tasktracker stop
+                               dsh_ipass -P $PASSWD -m $_JOBTRACKER -M -- sudo -S -p \'\' service ${SERVICE_PREFIX}jobtracker stop
+                       fi
+                       ;;
+               status)
+                       echo "${DESC} Status"
+                       if [ $ARTIFACT_TYPE = 'tarball' ]; then
+                               dsh_ipass -P $PASSWD -m $_JOBTRACKER -M -- sudo -S -p \'\' -u $USER jps -mv
+                               dsh_ipass -P $PASSWD -m $_TASKTRACKERS -M -- sudo -S -p \'\' -u $USER jps -mv
+                       elif [ $ARTIFACT_TYPE = 'package' ]; then
+                               dsh_ipass -P $PASSWD -m $_JOBTRACKER -M -- sudo -S -p \'\' service ${SERVICE_PREFIX}jobtracker status
+                               dsh_ipass -P $PASSWD -m $_TASKTRACKERS -M -- sudo -S -p \'\' service ${SERVICE_PREFIX}tasktracker status
+                       fi
+                       ;;
+       esac
+}
+
+
+ctl_historyserver() {
+       DAEMON_COMMAND=${DAEMON_COMMAND:-"${HADOOP_PREFIX}/bin/hadoop-daemon.sh"}
+       SERVICE_PREFIX=${SERVICE_PREFIX:-'hadoop-'}
+       USER=$MAPRED_USER
+       _HISTORYSERVER=`get_targets "$HISTORYSERVER"`
+       echo "[Grid ${COMMAND}: ${DESC}]"
+       echo "HADOOP_CONF_DIR: ${HADOOP_CONF_DIR}"
+       echo "Daemon command: ${DAEMON_COMMAND}"
+       echo "HistoryServer: ${_HISTORYSERVER}"
+       get_passwd
+       case "$COMMAND" in
+               start)
+                       echo "Starting ${DESC} ..."
+                       if [ $ARTIFACT_TYPE = 'tarball' ]; then
+                               dsh_ipass -P $PASSWD -m $_HISTORYSERVER -M -- sudo -S -p \'\' -u $USER $DAEMON_COMMAND $HADOOP_CONF_OPT start historyserver
+                       elif [ $ARTIFACT_TYPE = 'package' ]; then
+                               dsh_ipass -P $PASSWD -m $_HISTORYSERVER -M -- sudo -S -p \'\' service ${SERVICE_PREFIX}historyserver start
+                       fi
+                       ;;
+               stop)
+                       echo "Stopping ${DESC} ..."
+                       if [ $ARTIFACT_TYPE = 'tarball' ]; then
+                               dsh_ipass -P $PASSWD -m $_HISTORYSERVER -M -- sudo -S -p \'\' -u $USER $DAEMON_COMMAND $HADOOP_CONF_OPT stop historyserver
+                       elif [ $ARTIFACT_TYPE = 'package' ]; then
+                               dsh_ipass -P $PASSWD -m $_HISTORYSERVER -M -- sudo -S -p \'\' service ${SERVICE_PREFIX}historyserver stop
+                       fi
+                       ;;
+               status)
+                       echo "${DESC} Status"
+                       if [ $ARTIFACT_TYPE = 'tarball' ]; then
+                               dsh_ipass -P $PASSWD -m $_HISTORYSERVER -M -- sudo -S -p \'\' -u $USER jps -mv
+                       elif [ $ARTIFACT_TYPE = 'package' ]; then
+                               dsh_ipass -P $PASSWD -m $_HISTORYSERVER -M -- sudo -S -p \'\' service ${SERVICE_PREFIX}historyserver status
+                       fi
+                       ;;
+       esac
+       DAEMON_COMMAND=''
+       SERVICE_PREFIX=''
+}
+
+
+ctl_historyserver2() {
+       DAEMON_COMMAND="${HADOOP_PREFIX}/sbin/mr-jobhistory-daemon.sh"
+       SERVICE_PREFIX=${SERVICE_PREFIX:-'hadoop-mapreduce-'}
+       ctl_historyserver
+}
+
+
+ctl_yarn() {
+       DAEMON_COMMAND="${HADOOP_PREFIX}/sbin/yarn-daemon.sh"
+       SERVICE_PREFIX=${SERVICE_PREFIX:-'hadoop-yarn-'}
+       USER=$YARN_USER
+       _RESOURCEMANAGER=`get_targets "$RESOURCEMANAGER"`
+       _NODEMANAGERS=`get_targets "$NODEMANAGERS"`
+       echo "[Grid ${COMMAND}: ${DESC}]"
+       echo "HADOOP_CONF_DIR: ${HADOOP_CONF_DIR}"
+       echo "Daemon command: ${DAEMON_COMMAND}"
+       echo "ResourceManager: ${_RESOURCEMANAGER}"
+       echo "NodeManagers: ${_NODEMANAGERS}"
+       get_passwd
+       case "$COMMAND" in
+               start)
+                       echo "Starting ${DESC} ..."
+                       if [ $ARTIFACT_TYPE = 'tarball' ]; then
+                               dsh_ipass -P $PASSWD -m $_RESOURCEMANAGER -M -- sudo -S -p \'\' -u $USER $DAEMON_COMMAND $HADOOP_CONF_OPT start resourcemanager
+                               dsh_ipass -P $PASSWD -m $_NODEMANAGERS -M -- sudo -S -p \'\' -u $USER $DAEMON_COMMAND $HADOOP_CONF_OPT start nodemanager
+                       elif [ $ARTIFACT_TYPE = 'package' ]; then
+                               dsh_ipass -P $PASSWD -m $_RESOURCEMANAGER -M -- sudo -S -p \'\' service ${SERVICE_PREFIX}resourcemanager start
+                               dsh_ipass -P $PASSWD -m $_NODEMANAGERS -M -- sudo -S -p \'\' service ${SERVICE_PREFIX}nodemanager start
+                       fi
+                       ;;
+               stop)
+                       echo "Stopping ${DESC} ..."
+                       if [ $ARTIFACT_TYPE = 'tarball' ]; then
+                               dsh_ipass -P $PASSWD -m $_NODEMANAGERS -M -- sudo -S -p \'\' -u $USER $DAEMON_COMMAND $HADOOP_CONF_OPT stop nodemanager
+                               dsh_ipass -P $PASSWD -m $_RESOURCEMANAGER -M -- sudo -S -p \'\' -u $USER $DAEMON_COMMAND $HADOOP_CONF_OPT stop resourcemanager
+                       elif [ $ARTIFACT_TYPE = 'package' ]; then
+                               dsh_ipass -P $PASSWD -m $_NODEMANAGERS -M -- sudo -S -p \'\' service ${SERVICE_PREFIX}nodemanager stop
+                               dsh_ipass -P $PASSWD -m $_RESOURCEMANAGER -M -- sudo -S -p \'\' service ${SERVICE_PREFIX}resourcemanager stop
+                       fi
+                       ;;
+               status)
+                       echo "${DESC} Status"
+                       if [ $ARTIFACT_TYPE = 'tarball' ]; then
+                               dsh_ipass -P $PASSWD -m $_RESOURCEMANAGER -M -- sudo -S -p \'\' -u $USER jps -mv
+                               dsh_ipass -P $PASSWD -m $_NODEMANAGERS -M -- sudo -S -p \'\' -u $USER jps -mv
+                       elif [ $ARTIFACT_TYPE = 'package' ]; then
+                               dsh_ipass -P $PASSWD -m $_NODEMANAGERS -M -- sudo -S -p \'\' service ${SERVICE_PREFIX}nodemanager status
+                               dsh_ipass -P $PASSWD -m $_RESOURCEMANAGER -M -- sudo -S -p \'\' service ${SERVICE_PREFIX}resourcemanager status
+                       fi
+                       ;;
+       esac
+}
+
+
+ctl_hbase() {
+       DAEMON_COMMAND="${HBASE_HOME}/bin/hbase-daemon.sh"
+       USER=$HBASE_USER
+       _HMASTERS=`get_targets "$HMASTERS"`
+       _HREGIONSERVERS=`get_targets "$HREGIONSERVERS"`
+       echo "[Grid ${COMMAND}: ${DESC}]"
+       echo "HBASE_HOME: ${HBASE_HOME}"
+       echo "Daemon command: ${DAEMON_COMMAND}"
+       echo "HMasters: ${_HMASTERS}"
+       echo "HRegionServers: ${_HREGIONSERVERS}"
+       get_passwd
+       case "$COMMAND" in
+               start)
+                       echo "Starting ${DESC} ..."
+                       dsh_ipass -P $PASSWD -m $_HMASTERS -M -- sudo -S -p \'\' -u $USER $DAEMON_COMMAND start master
+                       dsh_ipass -P $PASSWD -m $_HREGIONSERVERS -M -- sudo -S -p \'\' -u $USER $DAEMON_COMMAND start regionserver
+                       ;;
+               stop)
+                       echo "Stopping ${DESC} ..."
+                       dsh_ipass -P $PASSWD -m $_HREGIONSERVERS -M -- sudo -S -p \'\' -u $USER $DAEMON_COMMAND stop regionserver
+                       dsh_ipass -P $PASSWD -m $_HMASTERS -M -- sudo -S -p \'\' -u $USER $DAEMON_COMMAND stop master
+                       ;;
+               status)
+                       echo "${DESC} Status"
+                       dsh_ipass -P $PASSWD -m $_HMASTERS -M -- sudo -S -p \'\' -u $USER jps -mv
+                       dsh_ipass -P $PASSWD -m $_HREGIONSERVERS -M -- sudo -S -p \'\' -u $USER jps -mv
+                       ;;
+       esac
+}
+
+
+ctl_zookeeper() {
+       DAEMON_COMMAND="${ZOOKEEPER_PREFIX}/bin/zkServer.sh"
+       USER=$ZOOKEEPER_USER
+       PEERS=`get_targets "$ZOOKEEPER_PEERS"`
+       echo "[Grid ${COMMAND}: ${DESC}]"
+       echo "ZOOKEEPER_PREFIX: ${ZOOKEEPER_PREFIX}"
+       echo "ZOOCFGDIR: ${ZOOCFGDIR}"
+       echo "Daemon command: ${DAEMON_COMMAND}"
+       echo "ZooKeeper Peers: ${PEERS}"
+       get_passwd
+       case "$COMMAND" in
+               start)
+                       echo "Starting ${DESC} by ${USER} user ..."
+                       DAEMON_COMMAND="export ZOOCFGDIR=${ZOOCFGDIR};${DAEMON_COMMAND} start"
+                       dsh_ipass -P $PASSWD -m $PEERS -M -- sudo -S -p \'\' -u $USER sh -c "'"$DAEMON_COMMAND"'"
+                       ;;
+               stop)
+                       echo "Stopping ${DESC} by ${USER} user ..."
+                       DAEMON_COMMAND="export ZOOCFGDIR=${ZOOCFGDIR};${DAEMON_COMMAND} stop"
+                       dsh_ipass -P $PASSWD -m $PEERS -M -- sudo -S -p \'\' -u $USER sh -c "'"$DAEMON_COMMAND"'"
+                       ;;
+               status)
+                       echo "${DESC} Status"
+                       dsh_ipass -P $PASSWD -m $PEERS -M -- sudo -S -p \'\' -u $USER jps -mv
+                       ;;
+       esac
+}
+
+
+# Main
+ARGS="$@"
+SERVICES=`echo $1 | tr ',' ' '`
+COMMAND=$2
+
+# Validation
+if [ $# != 2 ]; then
+       help
+       exit 1
+fi
+
+if [ x"$HADOOP_HOME" != x'' ]; then
+       HADOOP_PREFIX=$HADOOP_HOME
+else
+       if [ x"$HADOOP_PREFIX" != x'' ]; then
+               HADOOP_HOME=$HADOOP_PREFIX
+       else
+               if [ -d '/usr/lib/hadoop' ]; then
+                       HADOOP_HOME='/usr/lib/hadoop'
+                       HADOOP_PREFIX=$HADOOP_HOME
+               elif [ -d '/grid/usr/hadoop' ]; then
+                       HADOOP_HOME='/grid/usr/hadoop'
+                       HADOOP_PREFIX=$HADOOP_HOME
+               else
+                       echo 'HADOOP_HOME or HADOOP_PREFIX could not be specified!'
+                       exit 1
+               fi
+       fi
+fi
+
+if [ x"$ARTIFACT_TYPE" = x'' ]; then
+       if [ $HADOOP_PREFIX = '/usr/lib/hadoop' ]; then
+               ARTIFACT_TYPE='package'
+       else
+               ARTIFACT_TYPE='tarball'
+       fi
+fi
+
+echo "HADOOP_HOME: ${HADOOP_HOME}"
+echo "HADOOP_PREFIX: ${HADOOP_PREFIX}"
+echo "ARTIFACT_TYPE: ${ARTIFACT_TYPE}"
+
+for SERVICE in $SERVICES; do
+       case "$SERVICE" in
+               hdfs)
+                       DESC='HDFS Cluster'
+                       ctl_hdfs
+                       ;;
+               hdfs2)
+                       DESC='HDFS-2 Cluster'
+                       ctl_hdfs2
+                       ;;
+               balancer)
+                       DESC='HDFS Balancer'
+                       ctl_balancer
+                       ;;
+               balancer2)
+                       DESC='HDFS-2 Balancer'
+                       ctl_balancer2
+                       ;;
+               yarn)
+                       DESC='YARN Cluster'
+                       ctl_yarn
+                       ;;
+               mapred)
+                       DESC='MapReduce Cluster'
+                       ctl_mapred
+                       ;;
+               historyserver)
+                       DESC='MapReduce JobHistoryServer'
+                       ctl_historyserver
+                       ;;
+               historyserver2)
+                       DESC='MapReduce JobHistoryServer2'
+                       ctl_historyserver2
+                       ;;
+               hbase)
+                       DESC='HBase Cluster'
+                       ctl_hbase
+                       ;;
+               zookeeper)
+                       DESC='ZooKeeper Ensemble'
+                       ctl_zookeeper
+                       ;;
+               *)
+                       help
+                       exit 1
+                       ;;
+       esac
+done
+
+exit 0
index 2984bdf..2b2a21c 100644 (file)
@@ -14,7 +14,18 @@ if node['hadoop']['with_security'] \
 export HADOOP_SECURE_DN_USER=<%= node['hadoop']['HADOOP_SECURE_DN_USER'] %>
 export HADOOP_SECURE_DN_PID_DIR=<%= node['hadoop']['HADOOP_SECURE_DN_PID_DIR'] %>
 export HADOOP_SECURE_DN_LOG_DIR=<%= node['hadoop']['HADOOP_SECURE_DN_LOG_DIR'] %>
-<% end -%>
+<%
+  if node['hadoop']['install_flavor'] == 'hdp' then
+-%>
+# HDP only. workaroud for the bug of the secure datanode init script.
+if [ x"$TARGET_USER_NAME" = x'HADOOP_DATANODE_USER' -a -n "$HADOOP_SECURE_DN_USER" ]; then
+    TARGET_USER=root
+    HADOOP_IDENT_STRING=$HADOOP_SECURE_DN_USER
+fi
+<%
+  end
+end
+-%>
 
 # Extra Java CLASSPATH elements.  Optional.
 if [ x"$HADOOP_CLASSPATH" = x ]; then
index 75197f8..10edccc 100644 (file)
@@ -14,7 +14,18 @@ if node['hadoop']['with_security'] \
 export HADOOP_SECURE_DN_USER=<%= node['hadoop']['HADOOP_SECURE_DN_USER'] %>
 export HADOOP_SECURE_DN_PID_DIR=<%= node['hadoop']['HADOOP_SECURE_DN_PID_DIR'] %>
 export HADOOP_SECURE_DN_LOG_DIR=<%= node['hadoop']['HADOOP_SECURE_DN_LOG_DIR'] %>
-<% end -%>
+<%
+  if node['hadoop']['install_flavor'] == 'hdp' then
+-%>
+# HDP only. workaroud for the bug of the secure datanode init script.
+if [ x"$TARGET_USER_NAME" = x'HADOOP_DATANODE_USER' -a -n "$HADOOP_SECURE_DN_USER" ]; then
+    TARGET_USER=root
+    HADOOP_IDENT_STRING=$HADOOP_SECURE_DN_USER
+fi
+<%
+  end
+end
+-%>
 
 # Extra Java CLASSPATH elements.  Optional.
 if [ x"$HADOOP_CLASSPATH" = x ]; then
diff --git a/roles/dsh.rb b/roles/dsh.rb
new file mode 100644 (file)
index 0000000..740d1db
--- /dev/null
@@ -0,0 +1,14 @@
+name 'dsh'
+description 'Distributed Shell / Dancer\'s shell installation.'
+
+run_list(
+  'role[apt]',
+  'recipe[dsh::default]',
+)
+
+#env_run_lists()
+
+default_attributes(
+)
+
+#override_attributes()
diff --git a/roles/grid-ops-utils.rb b/roles/grid-ops-utils.rb
new file mode 100644 (file)
index 0000000..48fa126
--- /dev/null
@@ -0,0 +1,14 @@
+name 'grid-ops-utils'
+description 'Utilities for Grid operations.'
+
+run_list(
+  'role[dsh]',
+  'recipe[grid::ops-utils]',
+)
+
+#env_run_lists()
+
+default_attributes(
+)
+
+#override_attributes()
index d2cd809..bc8c940 100644 (file)
@@ -1,8 +1,8 @@
-name 'test-on-localhost'
+name 'test-ah2-on-localhost'
 description 'Testing Apache Hadoop 2.x on local machine'
 
 run_list(
-  'role[test-ah2-on-localhost]',
+  'role[test-on-localhost]',
 )
 
 #env_run_lists()
index c55aae4..de3a793 100644 (file)
@@ -13,7 +13,8 @@ run_list(
   'role[hadoop-pseudo-distributed-with-security]',
   #'role[zookeeper-pseudo-replicated-with-security]',
   #'role[hbase-pseudo-distributed-with-security]',
-  ##'role[pig]'
+  #'role[pig]',
+  'role[grid-ops-utils]'
 )
 
 #env_run_lists()
index 36ed96b..4751f0f 100644 (file)
@@ -13,7 +13,8 @@ run_list(
   'role[hadoop-pseudo-distributed-with-security]',
   #'role[zookeeper-pseudo-replicated-with-security]',
   #'role[hbase-pseudo-distributed-with-security]',
-  ##'role[pig]'
+  #'role[pig]',
+  'role[grid-ops-utils]'
 )
 
 #env_run_lists()
index 9f2471f..28c6f05 100644 (file)
@@ -13,7 +13,8 @@ run_list(
   'role[hadoop-pseudo-distributed-with-security]',
   'role[zookeeper-pseudo-replicated-with-security]',
   'role[hbase-pseudo-distributed-with-security]',
-  'role[pig]'
+  'role[pig]',
+  'role[grid-ops-utils]'
 )
 
 #env_run_lists "prod" => ["recipe[apache2]"], "staging" => ["recipe[apache2::staging]"], "_default" => []