--- /dev/null
+# CHANGELOG for grid
+
+This file is used to list changes made in each version of grid.
+
+## 0.1.0:
+
+* Initial release of grid
+
+- - -
+Check the [Markdown Syntax Guide](http://daringfireball.net/projects/markdown/syntax) for help with Markdown.
+
+The [Github Flavored Markdown page](http://github.github.com/github-flavored-markdown/) describes the differences between markdown on github and standard markdown.
--- /dev/null
+grid Cookbook
+=============
+TODO: Enter the cookbook description here.
+
+e.g.
+This cookbook makes your favorite breakfast sandwhich.
+
+Requirements
+------------
+TODO: List your cookbook requirements. Be sure to include any requirements this cookbook has on platforms, libraries, other cookbooks, packages, operating systems, etc.
+
+e.g.
+#### packages
+- `toaster` - grid needs toaster to brown your bagel.
+
+Attributes
+----------
+TODO: List you cookbook attributes here.
+
+e.g.
+#### grid::default
+<table>
+ <tr>
+ <th>Key</th>
+ <th>Type</th>
+ <th>Description</th>
+ <th>Default</th>
+ </tr>
+ <tr>
+ <td><tt>['grid']['bacon']</tt></td>
+ <td>Boolean</td>
+ <td>whether to include bacon</td>
+ <td><tt>true</tt></td>
+ </tr>
+</table>
+
+Usage
+-----
+#### grid::default
+TODO: Write usage instructions for each cookbook.
+
+e.g.
+Just include `grid` in your node's `run_list`:
+
+```json
+{
+ "name":"my_node",
+ "run_list": [
+ "recipe[grid]"
+ ]
+}
+```
+
+Contributing
+------------
+TODO: (optional) If this is a public cookbook, detail the process for contributing. If this is a private cookbook, remove this section.
+
+e.g.
+1. Fork the repository on Github
+2. Create a named feature branch (like `add_component_x`)
+3. Write you change
+4. Write tests for your change (if applicable)
+5. Run the tests, ensuring they all pass
+6. Submit a Pull Request using Github
+
+License and Authors
+-------------------
+Authors: TODO: List authors
--- /dev/null
+#
+# Cookbook Name:: grid
+# Attributes:: default
+#
+# Copyright 2013, whitestar
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+default['grid']['etc_root'] = '/grid/etc'
+default['grid']['app_root'] = '/grid/usr'
+default['grid']['vol_root'] = '/grid/vol'
+default['grid']['max_vol_nums'] = '1'
+
--- /dev/null
+name 'grid'
+maintainer 'whitestar'
+maintainer_email ''
+license 'Apache 2.0'
+description 'Installs/Configures grid'
+long_description IO.read(File.join(File.dirname(__FILE__), 'README.md'))
+version '0.1.0'
--- /dev/null
+#
+# Cookbook Name:: grid
+# Recipe:: default
+#
+# Copyright 2013, whitestar
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+directory node['grid']['etc_root'] do
+ owner 'root'
+ group 'root'
+ mode '0755'
+ action :create
+ recursive true
+end
+
+directory node['grid']['app_root'] do
+ owner 'root'
+ group 'root'
+ mode '0755'
+ action :create
+ recursive true
+end
+
+directory node['grid']['vol_root'] do
+ owner 'root'
+ group 'root'
+ mode '0755'
+ action :create
+ recursive true
+end
+
# limitations under the License.
#
-default['grid']['etc_root'] = '/grid/etc'
-default['grid']['app_root'] = '/grid/usr'
-default['grid']['vol_root'] = '/grid/vol'
-default['grid']['max_vol_nums'] = '1'
-
# default: for pseudo-distributed
default['hadoop']['install_flavor'] = 'apache'
default['hadoop']['version'] = '1.1.2'
default['hadoop']['archive_url'] = 'http://archive.apache.org/dist/hadoop/core'
default['hadoop']['with_security'] = false
+default['hadoop']['with_hbase'] = false
## hadoop-env.sh
default['hadoop']['HADOOP_PREFIX'] = "#{node['grid']['app_root']}/hadoop"
default['hadoop']['HADOOP_CLASSPATH'] = ''
## extra settings
default['hadoop']['extra_configs'] = {
# e.g. 'core-site.xml' => {'k1' => 'v1', 'k2' => 'v2'},
+ 'hadoop-env.sh' => {},
'core-site.xml' => {},
'hdfs-site.xml' => {},
'mapred-site.xml' => {}
supports :manage_home => false
end
-directory node['grid']['app_root'] do
- owner 'root'
- group 'root'
- mode '0755'
- action :create
- recursive true
-end
-
active_vol_nums = 0
node['grid']['max_vol_nums'].to_i.times {|vol_num|
target_vol_dir = "#{node['grid']['vol_root']}/#{vol_num}"
# The scheduling priority for daemon processes. See 'man nice'.
# export HADOOP_NICENESS=10
+
+
+<%
+this_file = 'hadoop-env.sh'
+if defined? node['hadoop']['extra_configs'][this_file] \
+ && node['hadoop']['extra_configs'][this_file] != nil then
+ node['hadoop']['extra_configs'][this_file].each do |key, value|
+%>
+export <%= key %>=<%= value %>
+<%
+ end
+end
+%>
+
</property>
<% end %>
+<% if node['hadoop']['with_hbase'] then %>
+ <!-- for HBase -->
+ <property>
+ <name>dfs.support.append</name>
+ <value>true</value>
+ </property>
+ <property>
+ <name>dfs.datanode.max.xcievers</name>
+ <value>4096</value>
+ </property>
+<% end %>
+
<%
this_file = 'hdfs-site.xml'
if defined? node['hadoop']['extra_configs'][this_file] \
--- /dev/null
+# CHANGELOG for hbase
+
+This file is used to list changes made in each version of hbase.
+
+## 0.1.0:
+
+* Initial release of hbase
+
+- - -
+Check the [Markdown Syntax Guide](http://daringfireball.net/projects/markdown/syntax) for help with Markdown.
+
+The [Github Flavored Markdown page](http://github.github.com/github-flavored-markdown/) describes the differences between markdown on github and standard markdown.
--- /dev/null
+hbase Cookbook
+==============
+TODO: Enter the cookbook description here.
+
+e.g.
+This cookbook makes your favorite breakfast sandwhich.
+
+Requirements
+------------
+TODO: List your cookbook requirements. Be sure to include any requirements this cookbook has on platforms, libraries, other cookbooks, packages, operating systems, etc.
+
+e.g.
+#### packages
+- `toaster` - hbase needs toaster to brown your bagel.
+
+Attributes
+----------
+TODO: List you cookbook attributes here.
+
+e.g.
+#### hbase::default
+<table>
+ <tr>
+ <th>Key</th>
+ <th>Type</th>
+ <th>Description</th>
+ <th>Default</th>
+ </tr>
+ <tr>
+ <td><tt>['hbase']['bacon']</tt></td>
+ <td>Boolean</td>
+ <td>whether to include bacon</td>
+ <td><tt>true</tt></td>
+ </tr>
+</table>
+
+Usage
+-----
+#### hbase::default
+TODO: Write usage instructions for each cookbook.
+
+e.g.
+Just include `hbase` in your node's `run_list`:
+
+```json
+{
+ "name":"my_node",
+ "run_list": [
+ "recipe[hbase]"
+ ]
+}
+```
+
+Contributing
+------------
+TODO: (optional) If this is a public cookbook, detail the process for contributing. If this is a private cookbook, remove this section.
+
+e.g.
+1. Fork the repository on Github
+2. Create a named feature branch (like `add_component_x`)
+3. Write you change
+4. Write tests for your change (if applicable)
+5. Run the tests, ensuring they all pass
+6. Submit a Pull Request using Github
+
+License and Authors
+-------------------
+Authors: TODO: List authors
--- /dev/null
+#
+# Cookbook Name:: hbase
+# Attributes:: default
+#
+# Copyright 2013, whitestar
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# default: for pseudo-distributed
+default['hbase']['install_flavor'] = 'apache'
+default['hbase']['version'] = '0.94.8'
+default['hbase']['archive_url'] = 'http://archive.apache.org/dist/hbase'
+default['hbase']['member_of_hadoop'] = false
+default['hbase']['with_security'] = false
+## hbase-env.sh
+default['hbase']['HBASE_PREFIX'] = "#{node['grid']['app_root']}/hbase"
+default['hbase']['HBASE_CLASSPATH'] = "#{node['hadoop']['HADOOP_PREFIX']}/conf"
+default['hbase']['HBASE_MANAGES_ZK'] = 'false'
+## hbase-site.xml
+default['hbase']['this.cluster.name'] = 'localhost'
+default['hbase']['this.domain'] = 'localhost'
+default['hbase']['this.realm'] = 'LOCALDOMAIN'
+default['hbase']['this.keytab.dir'] = "#{node['grid']['etc_root']}/keytabs/#{node['hbase']['this.cluster.name']}"
+default['hbase']['this.namenode.fqdn'] = '${this.cluster.name}-nn.${this.domain}'
+default['hbase']['hbase.rootdir'] = 'hdfs://${this.namenode.fqdn}:9000/hbase'
+default['hbase']['hbase.cluster.distributed'] = 'true'
+default['hbase']['hbase.zookeeper.property.clientPort'] = '2181'
+default['hbase']['hbase.zookeeper.quorum'] = 'localhost'
+### if with_security
+
+## extra settings
+default['hbase']['extra_configs'] = {
+ # e.g. 'hbase-site.xml' => {'k1' => 'v1', 'k2' => 'v2'},
+ 'hbase-env.sh' => {},
+ 'hbase-site.xml' => {}
+}
+#default['hbase'][''] =
+
+=begin
+# e.g. for full-distributed
+default['hbase']['this.cluster.name'] = 'pleiades'
+default['hbase']['this.domain'] = 'grid.example.com'
+default['hbase']['this.realm'] = 'GRID.EXAMPLE.COM'
+=end
--- /dev/null
+name 'hbase'
+maintainer 'whitestar'
+maintainer_email ''
+license 'Apache 2.0'
+description 'Installs/Configures hbase'
+long_description IO.read(File.join(File.dirname(__FILE__), 'README.md'))
+version '0.1.0'
--- /dev/null
+#
+# Cookbook Name:: hbase
+# Recipe:: default
+#
+# Copyright 2013, whitestar
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+hbase_major_version = ''
+hbase_middle_version = ''
+
+if /^(\d+)\.(\d+)\.(\d+)$/ =~ node['hbase']['version'] then
+ hbase_major_version = $1
+ hbase_middle_version = "#{$1}.#{$2}"
+else
+ Chef::Application.fatal!("Invalid HBase version: #{node['hbase']['version']}")
+end
+
+users = {
+ :hadoop => {:name => 'hadoop', :uid => 10001},
+ :hbase => {:name => 'hbase', :uid => 10030}
+}
+
+def conf_template(conf_dir, conf_files, tpl_vars)
+ conf_files.each {|conf_file|
+ template "#{conf_dir}/#{conf_file}" do
+ source "conf/#{conf_file}"
+ #source "conf-#{hbase_middle_version}/#{conf_file}"
+ owner 'root'
+ group 'root'
+ mode '0644'
+ variables(tpl_vars)
+ end
+ }
+end
+
+users.each {|key, user|
+if key != :hadoop
+ group user[:name] do
+ gid user[:uid]
+ members []
+ action :create
+ end
+
+ user user[:name] do
+ uid user[:uid]
+ gid user[:uid]
+ home "/home/#{user[:name]}"
+ shell '/bin/sh'
+ password nil
+ supports :manage_home => false
+ end
+end
+}
+
+if node['hbase']['member_of_hadoop'] then
+ group 'add hbase to hadoop' do
+ group_name users[:hadoop][:name]
+ gid users[:hadoop][:uid]
+ members ['hbase']
+ append true
+ action :create
+ end
+end
+
+hbase_install_root = "#{node['grid']['app_root']}/hbase-#{node['hbase']['version']}"
+hbase_tarball = "hbase-#{node['hbase']['version']}.tar.gz"
+downloaded_hbase_tarball = "#{Chef::Config[:file_cache_path]}/#{hbase_tarball}"
+
+if ! FileTest.directory? hbase_install_root then
+ remote_file downloaded_hbase_tarball do
+ source "#{node['hbase']['archive_url']}/hbase-#{node['hbase']['version']}/#{hbase_tarball}"
+ action :create_if_missing
+ end
+
+ package 'tar' do
+ action :install
+ end
+
+ bash 'install_hbase' do
+ code <<-EOC
+ tar xvzf #{downloaded_hbase_tarball} -C #{node['grid']['app_root']}
+ EOC
+ creates hbase_install_root
+ end
+
+ link node['hbase']['HBASE_PREFIX'] do
+ to hbase_install_root
+ end
+end
+
+conf_files = [
+ 'hadoop-metrics.properties',
+ 'hbase-env.sh',
+ 'hbase-policy.xml',
+ 'hbase-site.xml',
+ 'log4j.properties',
+ 'regionservers'
+]
+
+jaas_conf_files = [
+ 'jaas-client.conf',
+ 'jaas-hm.conf',
+ 'jaas-hr.conf'
+]
+
+conf_dir = "#{hbase_install_root}/conf"
+tpl_vars = {}
+conf_template(conf_dir, conf_files, tpl_vars)
+
+%w{log run}.each {|dir|
+ directory "#{node['grid']['vol_root']}/0/var/#{dir}/hbase" do
+ owner 'hbase'
+ group 'hbase'
+ mode '0755'
+ action :create
+ recursive true
+ end
+}
+
+log <<-EOM
+Note:
+1. setup directories on HDFS:
+ $ cd $HADOOP_PREFIX
+ $ sudo -u hdfs ./bin/hadoop fs -mkdir /hbase
+ $ sudo -u hdfs ./bin/hadoop fs -chown hbase:hbase /hbase
+ $ sudo -u hdfs ./bin/hadoop fs -chmod 700 /hbase
+2. Start command:
+ $ cd $HBASE_PREFIX
+ $ sudo -u hbase ./bin/hbase-daemon.sh start master
+ $ sudo -u hbase ./bin/hbase-daemon.sh start regionserver
+EOM
+
+# with security
+if node['hbase']['with_security'] then
+
+ tpl_vars = {}
+ conf_template(conf_dir, jaas_conf_files, tpl_vars)
+
+ directory "#{node['hbase']['keytab_dir']} for hbase" do
+ path node['hbase']['keytab_dir']
+ owner 'root'
+ group 'root'
+ mode '0755'
+ action :create
+ recursive true
+ end
+
+end
+
--- /dev/null
+# See http://wiki.apache.org/hadoop/GangliaMetrics
+# Make sure you know whether you are using ganglia 3.0 or 3.1.
+# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
+# And, yes, this file is named hadoop-metrics.properties rather than
+# hbase-metrics.properties because we're leveraging the hadoop metrics
+# package and hadoop-metrics.properties is an hardcoded-name, at least
+# for the moment.
+#
+# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
+# GMETADHOST_IP is the hostname (or) IP address of the server on which the ganglia
+# meta daemon (gmetad) service is running
+
+# Configuration of the "hbase" context for NullContextWithUpdateThread
+# NullContextWithUpdateThread is a null context which has a thread calling
+# periodically when monitoring is started. This keeps the data sampled
+# correctly.
+hbase.class=org.apache.hadoop.metrics.spi.NullContextWithUpdateThread
+hbase.period=10
+
+# Configuration of the "hbase" context for file
+# hbase.class=org.apache.hadoop.hbase.metrics.file.TimeStampingFileContext
+# hbase.fileName=/tmp/metrics_hbase.log
+
+# HBase-specific configuration to reset long-running stats (e.g. compactions)
+# If this variable is left out, then the default is no expiration.
+hbase.extendedperiod = 3600
+
+# Configuration of the "hbase" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+# hbase.period=10
+# hbase.servers=GMETADHOST_IP:8649
+
+# Configuration of the "jvm" context for null
+jvm.class=org.apache.hadoop.metrics.spi.NullContextWithUpdateThread
+jvm.period=10
+
+# Configuration of the "jvm" context for file
+# jvm.class=org.apache.hadoop.hbase.metrics.file.TimeStampingFileContext
+# jvm.fileName=/tmp/metrics_jvm.log
+
+# Configuration of the "jvm" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+# jvm.period=10
+# jvm.servers=GMETADHOST_IP:8649
+
+# Configuration of the "rpc" context for null
+rpc.class=org.apache.hadoop.metrics.spi.NullContextWithUpdateThread
+rpc.period=10
+
+# Configuration of the "rpc" context for file
+# rpc.class=org.apache.hadoop.hbase.metrics.file.TimeStampingFileContext
+# rpc.fileName=/tmp/metrics_rpc.log
+
+# Configuration of the "rpc" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+# rpc.period=10
+# rpc.servers=GMETADHOST_IP:8649
+
+# Configuration of the "rest" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# rest.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+# rest.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+# rest.period=10
+# rest.servers=GMETADHOST_IP:8649
--- /dev/null
+#
+#/**
+# * Copyright 2007 The Apache Software Foundation
+# *
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements. See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership. The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License. You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+# Set environment variables here.
+
+# This script sets variables multiple times over the course of starting an hbase process,
+# so try to keep things idempotent unless you want to take an even deeper look
+# into the startup scripts (bin/hbase, etc.)
+
+
+export JAVA_HOME=<%= node['java']['java_home'] %>
+
+# The directory where pid files are stored. /tmp by default.
+export HBASE_PID_DIR=<%= node['grid']['vol_root'] %>/0/var/run/${USER}
+# Where log files are stored. $HBASE_HOME/logs by default.
+export HBASE_LOG_DIR=<%= node['grid']['vol_root'] %>/0/var/log/${USER}
+
+# export HADOOP_HOME or HADOOP_PREFIX
+# for adding hadoop native library path to java.library.path. see bin/hbase.
+# Note: HBASE_LIBRARY_PATH is N/A!
+export HADOOP_PREFIX=<%= node['hadoop']['HADOOP_PREFIX'] %>
+export HBASE_CLASSPATH=<%= node['hbase']['HBASE_CLASSPATH'] %>
+
+export HBASE_OPTS="-XX:+UseConcMarkSweepGC"
+
+# Tell HBase whether it should manage its own instance of Zookeeper or not.
+export HBASE_MANAGES_ZK=<%= node['hbase']['HBASE_MANAGES_ZK'] %>
+
+<% if node['hbase']['with_security'] then %>
+FQDN=`hostname --fqdn`
+export HBASE_OPTS="${HBASE_OPTS} -Djava.security.auth.login.config=${HBASE_HOME}/conf/jaas-client.conf"
+export HBASE_MASTER_OPTS="${HBASE_MASTER_OPTS} -Dthis.fqdn=${FQDN} -Djava.security.auth.login.config=${HBASE_HOME}/conf/jaas-hm.conf"
+export HBASE_REGIONSERVER_OPTS="${HBASE_REGIONSERVER_OPTS} -Dthis.fqdn=${FQDN} -Djava.security.auth.login.config=${HBASE_HOME}/conf/jaas-hr.conf"
+<% end %>
+
+
+# The java implementation to use. Java 1.6 required.
+# export JAVA_HOME=/usr/java/jdk1.6.0/
+
+# Extra Java CLASSPATH elements. Optional.
+# export HBASE_CLASSPATH=
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+# export HBASE_HEAPSIZE=1000
+
+# Extra Java runtime options.
+# Below are what we set by default. May only work with SUN JVM.
+# For more on why as well as other possible settings,
+# see http://wiki.apache.org/hadoop/PerformanceTuning
+#export HBASE_OPTS="-XX:+UseConcMarkSweepGC"
+
+# Uncomment one of the below three options to enable java garbage collection logging for the server-side processes.
+
+# This enables basic gc logging to the .out file.
+# export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps"
+
+# This enables basic gc logging to its own file.
+# If FILE-PATH is not replaced, the log file(.gc) would still be generated in the HBASE_LOG_DIR .
+# export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:<FILE-PATH>"
+
+# This enables basic GC logging to its own file with automatic log rolling. Only applies to jdk 1.6.0_34+ and 1.7.0_2+.
+# If FILE-PATH is not replaced, the log file(.gc) would still be generated in the HBASE_LOG_DIR .
+# export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:<FILE-PATH> -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=1 -XX:GCLogFileSize=512M"
+
+# Uncomment one of the below three options to enable java garbage collection logging for the client processes.
+
+# This enables basic gc logging to the .out file.
+# export CLIENT_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps"
+
+# This enables basic gc logging to its own file.
+# If FILE-PATH is not replaced, the log file(.gc) would still be generated in the HBASE_LOG_DIR .
+# export CLIENT_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:<FILE-PATH>"
+
+# This enables basic GC logging to its own file with automatic log rolling. Only applies to jdk 1.6.0_34+ and 1.7.0_2+.
+# If FILE-PATH is not replaced, the log file(.gc) would still be generated in the HBASE_LOG_DIR .
+# export CLIENT_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:<FILE-PATH> -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=1 -XX:GCLogFileSize=512M"
+
+# Uncomment below if you intend to use the EXPERIMENTAL off heap cache.
+# export HBASE_OPTS="$HBASE_OPTS -XX:MaxDirectMemorySize="
+# Set hbase.offheapcache.percentage in hbase-site.xml to a nonzero value.
+
+
+# Uncomment and adjust to enable JMX exporting
+# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.
+# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
+#
+# export HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
+# export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS $HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10101"
+# export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS $HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10102"
+# export HBASE_THRIFT_OPTS="$HBASE_THRIFT_OPTS $HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103"
+# export HBASE_ZOOKEEPER_OPTS="$HBASE_ZOOKEEPER_OPTS $HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104"
+
+# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.
+# export HBASE_REGIONSERVERS=${HBASE_HOME}/conf/regionservers
+
+# File naming hosts on which backup HMaster will run. $HBASE_HOME/conf/backup-masters by default.
+# export HBASE_BACKUP_MASTERS=${HBASE_HOME}/conf/backup-masters
+
+# Extra ssh options. Empty by default.
+# export HBASE_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR"
+
+# Where log files are stored. $HBASE_HOME/logs by default.
+# export HBASE_LOG_DIR=${HBASE_HOME}/logs
+
+# Enable remote JDWP debugging of major HBase processes. Meant for Core Developers
+# export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8070"
+# export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8071"
+# export HBASE_THRIFT_OPTS="$HBASE_THRIFT_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8072"
+# export HBASE_ZOOKEEPER_OPTS="$HBASE_ZOOKEEPER_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8073"
+
+# A string representing this instance of hbase. $USER by default.
+# export HBASE_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes. See 'man nice'.
+# export HBASE_NICENESS=10
+
+# The directory where pid files are stored. /tmp by default.
+# export HBASE_PID_DIR=/var/hadoop/pids
+
+# Seconds to sleep between slave commands. Unset by default. This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HBASE_SLAVE_SLEEP=0.1
+
+# Tell HBase whether it should manage it's own instance of Zookeeper or not.
+# export HBASE_MANAGES_ZK=true
+
+
+<%
+this_file = 'hbase-env.sh'
+if defined? node['hbase']['extra_configs'][this_file] \
+ && node['hbase']['extra_configs'][this_file] != nil then
+ node['hbase']['extra_configs'][this_file].each do |key, value|
+%>
+export <%= key %>=<%= value %>
+<%
+ end
+end
+%>
+
--- /dev/null
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+ <property>
+ <name>security.client.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for HRegionInterface protocol implementations (ie.
+ clients talking to HRegionServers)
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.admin.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for HMasterInterface protocol implementation (ie.
+ clients talking to HMaster for admin operations).
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.masterregion.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for HMasterRegionInterface protocol implementations
+ (for HRegionServers communicating with HMaster)
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+</configuration>
--- /dev/null
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+ <property>
+ <name>this.cluster.name</name>
+ <value><%= node['hbase']['this.cluster.name'] %></value>
+ <!-- <value>pleiades</value> -->
+ </property>
+ <property>
+ <name>this.domain</name>
+ <value><%= node['hbase']['this.domain'] %></value>
+ <!-- <value>grid.example.com</value> -->
+ </property>
+ <property>
+ <name>this.realm</name>
+ <value><%= node['hbase']['this.realm'] %></value>
+ <!-- <value>GRID.EXAMPLE.COM</value> -->
+ </property>
+ <property>
+ <name>this.keytab.dir</name>
+ <value><%= node['hbase']['this.keytab.dir'] %></value>
+ </property>
+ <property>
+ <name>this.namenode.fqdn</name>
+ <value><%= node['hbase']['this.namenode.fqdn'] %></value>
+ <!-- <value>${this.cluster.name}-nn.${this.domain}</value> -->
+ </property>
+
+ <property>
+ <name>hbase.rootdir</name>
+ <value><%= node['hbase']['hbase.rootdir'] %></value>
+ <description>The directory shared by RegionServers.
+ </description>
+ </property>
+ <property>
+ <name>hbase.cluster.distributed</name>
+ <value><%= node['hbase']['hbase.cluster.distributed'] %></value>
+ <description>The mode the cluster will be in. Possible values are
+ false: standalone and pseudo-distributed setups with managed Zookeeper
+ true: fully-distributed with unmanaged Zookeeper Quorum (see hbase-env.sh)
+ </description>
+ </property>
+
+ <property>
+ <name>hbase.zookeeper.property.clientPort</name>
+ <value><%= node['hbase']['hbase.zookeeper.property.clientPort'] %></value>
+ <description>Property from ZooKeeper's config zoo.cfg.
+ The port at which the clients will connect.
+ </description>
+ </property>
+ <property>
+ <name>hbase.zookeeper.quorum</name>
+ <value><%= node['hbase']['hbase.zookeeper.quorum'] %></value>
+ <!-- <value>zk00.grid.example.com,zk01.grid.example.com,zk02.grid.example.com</value> -->
+ <description>Comma separated list of servers in the ZooKeeper Quorum.
+ For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
+ By default this is set to localhost for local and pseudo-distributed modes
+ of operation. For a fully-distributed setup, this should be set to a full
+ list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
+ this is the list of servers which we will start/stop ZooKeeper on.
+ </description>
+ </property>
+
+<% if node['hbase']['with_security'] then %>
+ <property>
+ <name>hbase.security.authentication</name>
+ <value>kerberos</value>
+ </property>
+ <property>
+ <name>hbase.security.authorization</name>
+ <value>true</value>
+ </property>
+ <property>
+ <name>hbase.rpc.engine</name>
+ <value>org.apache.hadoop.hbase.ipc.SecureRpcEngine</value>
+ </property>
+ <property>
+ <name>hbase.superuser</name>
+ <value>hbase</value>
+ </property>
+ <property>
+ <name>hbase.data.umask.enable</name>
+ <value>true</value>
+ </property>
+ <property>
+ <name>hbase.data.umask</name>
+ <value>077</value>
+ </property>
+ <property>
+ <name>hbase.coprocessor.master.classes</name>
+ <value>org.apache.hadoop.hbase.security.access.AccessController</value>
+ </property>
+ <property>
+ <name>hbase.coprocessor.region.classes</name>
+ <value>org.apache.hadoop.hbase.security.token.TokenProvider,
+ org.apache.hadoop.hbase.security.access.AccessController</value>
+ </property>
+ <property>
+ <name>hbase.master.kerberos.principal</name>
+ <value>hbase/localhost@${this.realm}</value>
+ <!-- <value>hbase/_HOST@${this.realm}</value> -->
+ </property>
+ <property>
+ <name>hbase.master.keytab.file</name>
+ <value>${this.keytab.dir}/hm.keytab</value>
+ </property>
+ <property>
+ <name>hbase.regionserver.kerberos.principal</name>
+ <value>hbase/_HOST@${this.realm}</value>
+ </property>
+ <property>
+ <name>hbase.regionserver.keytab.file</name>
+ <value>${this.keytab.dir}/hr.keytab</value>
+ </property>
+
+ <!--
+ <property>
+ <name>hbase.rpc.protection</name>
+ <value>privacy</value>
+ </property>
+ -->
+<% end %>
+
+<%
+this_file = 'hbase-site.xml'
+if defined? node['hbase']['extra_configs'][this_file] \
+ && node['hbase']['extra_configs'][this_file] != nil then
+ node['hbase']['extra_configs'][this_file].each do |key,value|
+%>
+ <property>
+ <name><%= key %></name>
+ <value><%= value %></value>
+ </property>
+<%
+ end
+end
+%>
+</configuration>
--- /dev/null
+Client {
+ com.sun.security.auth.module.Krb5LoginModule required
+ useKeyTab=false
+ useTicketCache=true;
+};
--- /dev/null
+Client {
+ com.sun.security.auth.module.Krb5LoginModule required
+ useKeyTab=true
+ useTicketCache=false
+ keyTab="/grid/etc/keytabs/localhost/hm.keytab"
+ principal="hbase/localhost@LOCALDOMAIN";
+ // for distributed mode.
+ /*
+ principal="hbase/${this.fqdn}@GRID.EXAMPLE.COM";
+ principal="hbase/hm00.grid.example.com@GRID.EXAMPLE.COM";
+ principal="hbase/hm01.grid.example.com@GRID.EXAMPLE.COM";
+ */
+};
--- /dev/null
+Client {
+ com.sun.security.auth.module.Krb5LoginModule required
+ useKeyTab=true
+ useTicketCache=false
+ keyTab="/grid/etc/keytabs/localhost/hr.keytab"
+ principal="hbase/localhost@LOCALDOMAIN";
+ // for distributed mode.
+ /*
+ principal="hbase/${this.fqdn}@GRID.EXAMPLE.COM";
+ principal="hbase/dn00000.grid.example.com@GRID.EXAMPLE.COM";
+ principal="hbase/dn00001.grid.example.com@GRID.EXAMPLE.COM";
+ */
+};
--- /dev/null
+# Define some default values that can be overridden by system properties
+hbase.root.logger=INFO,console
+hbase.security.logger=INFO,console
+hbase.log.dir=.
+hbase.log.file=hbase.log
+
+# Define the root logger to the system property "hbase.root.logger".
+log4j.rootLogger=${hbase.root.logger}
+
+# Logging Threshold
+log4j.threshold=ALL
+
+#
+# Daily Rolling File Appender
+#
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hbase.log.dir}/${hbase.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+#
+# Security audit appender
+#
+hbase.security.log.file=SecurityAuth.audit
+log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFAS.File=${hbase.log.dir}/${hbase.security.log.file}
+log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.category.SecurityLogger=${hbase.security.logger}
+log4j.additivity.SecurityLogger=false
+#log4j.logger.SecurityLogger.org.apache.hadoop.hbase.security.access.AccessController=TRACE
+
+#
+# Null Appender
+#
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+# Custom Logging levels
+
+log4j.logger.org.apache.zookeeper=INFO
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+log4j.logger.org.apache.hadoop.hbase=DEBUG
+# Make these two classes INFO-level. Make them DEBUG to see more zk debug.
+log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil=INFO
+log4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher=INFO
+#log4j.logger.org.apache.hadoop.dfs=DEBUG
+# Set this class to log INFO only otherwise its OTT
+
+# Uncomment this line to enable tracing on _every_ RPC call (this can be a lot of output)
+#log4j.logger.org.apache.hadoop.ipc.HBaseServer.trace=DEBUG
+
+# Uncomment the below if you want to remove logging of client region caching'
+# and scan of .META. messages
+# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=INFO
+# log4j.logger.org.apache.hadoop.hbase.client.MetaScanner=INFO
# limitations under the License.
#
-default['grid']['etc_root'] = '/grid/etc'
-default['grid']['app_root'] = '/grid/usr'
-default['grid']['vol_root'] = '/grid/vol'
-
# default: for pseudo-replicated
default['zookeeper']['install_flavor'] = 'apache'
default['zookeeper']['version'] = '3.4.5'
}
if node['zookeeper']['member_of_hadoop'] then
- group users[:hadoop][:name] do
+ group 'add_zookeeper_to_hadoop' do
+ group_name users[:hadoop][:name]
gid users[:hadoop][:uid]
members ['zookeeper']
append true
end
end
-directory node['grid']['app_root'] do
- owner 'root'
- group 'root'
- mode '0755'
- action :create
- recursive true
-end
-
zookeeper_install_root = "#{node['grid']['app_root']}/zookeeper-#{node['zookeeper']['version']}"
zookeeper_tarball = "zookeeper-#{node['zookeeper']['version']}.tar.gz"
downloaded_zookeeper_tarball = "#{Chef::Config[:file_cache_path]}/#{zookeeper_tarball}"
# with security
if node['zookeeper']['with_security'] then
-directory node['zookeeper']['keytab_dir'] do
+directory "#{node['zookeeper']['keytab_dir']} for zookeeper" do
+ path node['zookeeper']['keytab_dir']
owner 'root'
group 'root'
mode '0755'
run_list(
#'recipe[yum]',
'role[java]',
+ 'recipe[grid]',
'recipe[hadoop]'
)
--- /dev/null
+name 'hbase-pseudo-distributed'
+description 'HBase pseudo distributed node'
+
+run_list(
+ #'recipe[yum]',
+ 'role[hbase]'
+)
+
+#env_run_lists "prod" => ["recipe[apache2]"], "staging" => ["recipe[apache2::staging]"], "_default" => []
+
+default_attributes(
+ 'hadoop' => {
+ 'with_hbase' => true
+ }
+)
+
+#override_attributes "apache2" => { "max_children" => "50" }
--- /dev/null
+name 'hbase'
+description 'HBase node'
+
+run_list(
+ #'recipe[yum]',
+ 'role[java]',
+ 'recipe[grid]',
+ 'recipe[hadoop]',
+ 'recipe[hbase]'
+)
+
+#env_run_lists "prod" => ["recipe[apache2]"], "staging" => ["recipe[apache2::staging]"], "_default" => []
+
+default_attributes(
+ 'hadoop' => {
+ 'with_hbase' => true
+ }
+)
+
+#override_attributes "apache2" => { "max_children" => "50" }
run_list(
'role[mocker]',
'role[hadoop-pseudo-distributed-with-security]',
- 'role[zookeeper-pseudo-replicated-with-security]'
+ 'role[zookeeper-pseudo-replicated-with-security]',
+ 'role[hbase-pseudo-distributed]'
)
#env_run_lists "prod" => ["recipe[apache2]"], "staging" => ["recipe[apache2::staging]"], "_default" => []
run_list(
#'recipe[yum]',
'role[java]',
+ 'recipe[grid]',
'recipe[zookeeper]'
)