# Initialization:
. ${OCF_ROOT}/resource.d/heartbeat/.ocf-shellfuncs
-#. /usr/lib/ocf/resource.d/heartbeat/.ocf-shellfuncs
+
+L7DIRCONF=${OCF_RESKEY_configfile:-/etc/ha.d/conf/l7directord.cf}
+L7DIRECTORD=${OCF_RESKEY_l7directord:-/usr/sbin/l7directord}
#######################################################################
<content type="string" default="" />
</parameter>
+<parameter name="configfile" unique="1">
+<longdesc lang="en">
+The full pathname of the l7directord configuration file.
+</longdesc>
+<shortdesc lang="en">configuration file path</shortdesc>
+<content type="string" default="/etc/ha.d/conf/l7directord.cf" />
+</parameter>
+
+<parameter name="l7directord">
+<longdesc lang="en">
+The full pathname of the l7directord.
+</longdesc>
+<shortdesc lang="en">ldirectord binary path</shortdesc>
+<content type="string" default="/usr/sbin/l7directord" />
+</parameter>
+
</parameters>
<actions>
###############################
isRunning(){
RET=0
- RET=`pgrep -fox "/usr/sbin/l7directord start" | wc -l`
+ RET=`pgrep -fox "$L7DIRECTORD $L7DIRCONF start" | wc -l`
return $RET
}
# Get Resource Status Method
###############################
l7directord_status(){
- /etc/init.d/l7directord status > /dev/null 2>&1
+ $L7DIRECTORD $L7DIRCONF status > /dev/null 2>&1
RET=$?
if [ $RET -eq 0 ]; then
return ${OCF_SUCCESS}
if [ $? -eq ${OCF_SUCCESS} ]; then
# status OK
return ${OCF_SUCCESS}
- else
- break
fi
elif [ $RET -eq 0 ]; then
MSG="l7direcotrd is not running."
outputLog info "l7directord starts."
return ${OCF_SUCCESS}
elif [ $RET -eq 0 ]; then
- /etc/init.d/l7directord start > /dev/null 2>&1
+ $L7DIRECTORD $L7DIRCONF start > /dev/null 2>&1
RET=$?
if [ $RET -ne 0 ]; then
MSG="l7directord start fatal error!: $RET"
outputLog err ${OCF_ERR_GENERIC} $MSG
+ return ${OCF_ERR_GENERIC}
fi
fi
sleep 1
l7vsd_flush
return ${OCF_SUCCESS}
elif [ $RET -eq 1 ]; then
- /etc/init.d/l7directord stop > /dev/null 2>&1
+ $L7DIRECTORD $L7DIRCONF stop > /dev/null 2>&1
RET=$?
if [ $RET -ne 0 ]; then
MSG="l7directord stop fatal error!: $RET"
# Initialization:
. ${OCF_ROOT}/resource.d/heartbeat/.ocf-shellfuncs
-#. /usr/lib/ocf/resource.d/heartbeat/.ocf-shellfuncs
+
+L7SOCKFILE=${OCF_RESKEY_socket_default:-/var/run/l7vs/l7vs}
#######################################################################
</longdesc>
<shortdesc lang="en">L7vsd resource agent</shortdesc>
-<parameter/>
-<parameters/>
+<parameters>
+<parameter name="socket" unique="0" required="0">
+<longdesc lang="en">
+The socket to be used for l7vsadm.
+</longdesc>
+<shortdesc lang="en">l7vsadm socket</shortdesc>
+<content type="string" default="${OCF_RESKEY_socket_default}"/>
+</parameter>
+</parameters>
<actions>
<action name="start" timeout="60" />
ocf_log info "l7vsd is already running."
return $OCF_SUCCESS
elif [ $RET -eq $OCF_NOT_RUNNING ]; then
- /etc/init.d/l7vsd start > /dev/null 2>&1
+ L7SOCKDIR=`dirname L7SOCKFILE`
+ if [ ! -d $L7SOCKDIR ] ; then
+ ocf_log info "Creating l7vsadm socket dir: $L7SOCKDIR"
+ mkdir -p $L7SOCKDIR
+ fi
+ if [ -e $L7SOCKFILE ] ; then
+ ocf_log info "Delete l7vsadm socket filer: $L7SOCKFILE"
+ rm -rf $L7SOCKFILE
+ fi
+ /usr/sbin/l7vsd > /dev/null 2>&1
RET=$?
if [ $RET -ne 0 ]; then
MSG="l7vsd start error!"
###############################
l7vsd_stop() {
ocf_log info "l7vsd stopping ..."
- isRunning;
+ l7vsd_status
RET=$?
- if [ $RET -eq 0 ]; then
+ if [ $RET -eq $OCF_NOT_RUNNING ]; then
ocf_log info "l7vsd stopped."
return $OCF_SUCCESS
fi
- /etc/init.d/l7vsd stop > /dev/null 2>&1
+ pkill -f "/usr/sbin/l7vsd"
count=0
while [ $count -le 10 ]
do
- isRunning;
+ l7vsd_status
RET=$?
- if [ $RET -eq 0 ]; then
+ if [ $RET -eq $OCF_NOT_RUNNING ]; then
ocf_log info "l7vsd stopped."
return $OCF_SUCCESS
fi
done
l7vsd_pkill
RET=$?
+ rm -rf $L7SOCKFILE
return $RET
}
while true
do
sleep 1
- isRunning;
+ l7vsd_status
RET=$?
- if [ $RET -eq 0 ]; then
+ if [ $RET -eq $OCF_NOT_RUNNING ]; then
# stop OK
ocf_log info "l7vsd process stopped!"
return $OCF_SUCCESS
}
###############################
-# Resource Running Check Method
-###############################
-isRunning(){
- RET=0
- RET=`pgrep -fox "/usr/sbin/l7vsd" | wc -l`
- return $RET
-}
-
-###############################
# Get Resource Status Method
###############################
l7vsd_status(){
- /etc/init.d/l7vsd status > /dev/null 2>&1
- RET=$?
- if [ $RET -eq 0 ]; then
+ RET=0
+ RET=`pgrep -fox "/usr/sbin/l7vsd" | wc -l`
+ if [ $RET -eq 1 ]; then
return $OCF_SUCCESS
+ elif [ $RET -eq 0 ]; then
+ MSG="l7vsd is not running."
+ outputLog $loglevel ${OCF_NOT_RUNNING} ${MSG}
+ return $OCF_NOT_RUNNING
else
- MSG="l7vsd status ERROR!: $RET"
+ MSG="l7vsd status ERROR!: (ps=$RET)"
outputLog err ${OCF_ERR_GENERIC} ${MSG}
return $OCF_ERR_GENERIC
fi
loglevel="info"
fi
- isRunning;
+ l7vsd_status
RET=$?
- if [ $RET -eq 1 ]; then
- # l7vsd is running
- l7vsd_status
- if [ $? -eq $OCF_SUCCESS ]; then
- # status OK
- return $OCF_SUCCESS
- else
- break
- fi
- elif [ $RET -eq 0 ]; then
- MSG="l7vsd is not running."
- outputLog $loglevel ${OCF_NOT_RUNNING} ${MSG}
+ if [ $RET -eq $OCF_SUCCESS ]; then
+ # status OK
+ return $OCF_SUCCESS
+ elif [ $RET -eq $OCF_NOT_RUNNING ]; then
+ # status NG
return $OCF_NOT_RUNNING
fi
- MSG="l7vsd does not work. (ps=$RET) "
+ MSG="l7vsd duplicated. "
outputLog err ${OCF_ERR_GENERIC} ${MSG}
return $OCF_ERR_GENERIC
}
HB2_TEMPDIR = ${prefix}/share/doc/$(UML7_VERSION)/heartbeat-ra
install:
- $(INSTALL) -b -m 644 -D ./logd.cf $(HB2_TEMPDIR)/logd.cf
- $(INSTALL) -b -m 644 -D ./ha.cf $(HB2_TEMPDIR)/ha.cf
- $(INSTALL) -b -m 600 -D ./authkeys $(HB2_TEMPDIR)/authkeys
- $(INSTALL) -b -m 600 -D ./cib.xml-sample $(HB2_TEMPDIR)/cib.xml
$(INSTALL) -b -m 600 -D ./sample.crm $(HB2_TEMPDIR)/sample.crm
$(INSTALL) -b -m 755 -D ./L7vsd $(HB2_TEMPDIR)/L7vsd
$(INSTALL) -b -m 755 -D ./L7directord $(HB2_TEMPDIR)/L7directord
- $(INSTALL) -b -m 755 -D ./VIPcheck $(HB2_TEMPDIR)/VIPcheck
== Files ==
L7directord: Resource Agent for l7directord.
L7vsd: Resource Agent for l7vsd.
-VIPcheck: Resource Agent for VIP.
-authkeys: Heartbeat authkey example.
-cib.xml-sample: Heartbeat cib.xml example.
-ha.cf: Heartbeat ha.cf example.
-logd.cf: Heartbeat logd.cf example.
+sample.crm Pacemaker CRM example.
== Set up ==
-See Heartbeat2 install manual for UltraMonkey-L7. (Japanese)
-http://sourceforge.jp/projects/ultramonkey-l7/docs/?category_id=964
+See Pacemaker install manual for UltraMonkey-L7. (Japanese)
+https://osdn.jp/projects/ultramonkey-l7/docs/?category_id=1148
You can use Resource Agents as they are. But you must edit at
least IP addresses in configuration files.
+++ /dev/null
-auth 1
-1 sha1 ClusterKey01
+++ /dev/null
- <cib admin_epoch="0" epoch="0" num_updates="0">
- <configuration>
- <crm_config>
- <cluster_property_set id="cib-bootstrap-options">
- <attributes>
- <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
- <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="false"/>
- <nvpair id="cib-bootstrap-options-default-resource-stickiness" name="default-resource-stickiness" value="INFINITY"/>
- <nvpair id="cib-bootstrap-options-default-resource-failure-stickiness" name="default-resource-failure-stickiness" value="-INFINITY"/>
- <nvpair id="cib-bootstrap-options-default-action-timeout" name="default-action-timeout" value="120s"/>
- </attributes>
- </cluster_property_set>
- </crm_config>
- <nodes/>
- <resources>
- <group id="grpUltraMonkey1">
- <primitive id="prmVIPcheck" class="ocf" type="VIPcheck" provider="heartbeat">
- <instance_attributes id="prmVIPcheck_instance_attrs">
- <attributes>
- <nvpair id="atr:VIPcheck:ex:VIPcheck:target_ip" name="target_ip" value="192.168.1.200"/>
- <nvpair id="atr:VIPcheck:ex:VIPcheck:count" name="count" value="1"/>
- <nvpair id="atr:VIPcheck:ex:VIPcheck:wait" name="wait" value="10"/>
- </attributes>
- </instance_attributes>
- <operations>
- <op id="op:VIPcheck:ex:VIPcheck:start" name="start" timeout="90s" on_fail="restart" start_delay="4s"/>
- </operations>
- </primitive>
- <primitive id="prmVIP" class="ocf" type="IPaddr2" provider="heartbeat">
- <instance_attributes id="prmVIP_instance_attrs">
- <attributes>
- <nvpair id="atr:VIP:ex:IPadder2:ip" name="ip" value="192.168.1.200"/>
- <nvpair id="atr:VIP:ex:IPadder2:nic" name="nic" value="eth0"/>
- <nvpair id="atr:VIP:ex:IPadder2:cidr_netmask" name="cidr_netmask" value="24"/>
- </attributes>
- </instance_attributes>
- <operations>
- <op id="op:VIP:ex:IPadder2:start" name="start" timeout="60s" on_fail="restart"/>
- <op id="op:VIP:ex:IPadder2:monitor" name="monitor" interval="10s" timeout="60s" on_fail="restart"/>
- <op id="op:VIP:ex:IPadder2:stop" name="stop" timeout="60s" on_fail="block"/>
- </operations>
- </primitive>
- <primitive id="prmL7directord" class="ocf" type="L7directord" provider="heartbeat">
- <operations>
- <op id="op:L7directord:ex:L7directord:start" name="start" timeout="60s" on_fail="restart"/>
- <op id="op:L7directord:ex:L7directord:monitor" name="monitor" interval="10s" timeout="60s" on_fail="restart"/>
- <op id="op:L7directord:ex:L7directord:stop" name="stop" timeout="60s" on_fail="block"/>
- </operations>
- </primitive>
- </group>
- <clone id="grpClone1" ordered="false" interleave="false" notify="false" globally_unique="false">
- <instance_attributes id="grpClone1_attrs">
- <attributes>
- <nvpair id="atr:Clone1:ex:clone:clone_max" name="clone_max" value="2"/>
- <nvpair id="atr:Clone1:ex:clone:clone_node_max" name="clone_node_max" value="1"/>
- </attributes>
- </instance_attributes>
- <primitive id="prmL7vsd" class="ocf" type="L7vsd" provider="heartbeat">
- <operations>
- <op id="op:L7vsd:ex:L7vsd:start" name="start" timeout="60s" on_fail="restart"/>
- <op id="op:L7vsd:ex:L7vsd:monitor" name="monitor" interval="10s" timeout="60s" on_fail="restart"/>
- <op id="op:L7vsd:ex:L7vsd:stop" name="stop" timeout="60s" on_fail="block"/>
- </operations>
- </primitive>
- </clone>
- </resources>
- <constraints>
- <rsc_colocation id="colocation_UltraMonkey1_and_Clone1" from="grpUltraMonkey1" to="grpClone1" score="INFINITY"/>
- <rsc_location id="location_node01_200" rsc="grpUltraMonkey1">
- <rule id="prefered_location_node01_200" score="200">
- <expression attribute="#uname" id="location:grpUltraMonkey:node01" operation="eq" value="LB01"/>
- </rule>
- </rsc_location>
- <rsc_location id="location_node02_100" rsc="grpUltraMonkey1">
- <rule id="prefered_location_node02_100" score="100">
- <expression attribute="#uname" id="location:grpUltraMonkey:node02" operation="eq" value="LB02"/>
- </rule>
- </rsc_location>
- <rsc_location id="rul_PN1_dsc" rsc="grpUltraMonkey1">
- <rule id="prefered_rul_PN1_dsc" score="-INFINITY" boolean_op="and">
- <expression attribute="default_ping_set" id="PN1_dsc:expr:defined" operation="defined"/>
- <expression attribute="default_ping_set" id="PN1_dsc:expr:lt" operation="lt" value="100"/>
- </rule>
- </rsc_location>
- <rsc_location id="rul_diskcheck_status_internal" rsc="grpUltraMonkey1">
- <rule id="preferd_diskcheck_status_internal" score="-INFINITY" boolean_op="and">
- <expression attribute="diskcheck_status_internal" id="diskcheck_status_internal:defined" operation="defined"/>
- <expression attribute="diskcheck_status_internal" id="diskcheck_status_internal:eq" operation="eq" value="ERROR"/>
- </rule>
- </rsc_location>
- <rsc_order id="order_UltraMonkey1_and_Clone1" from="grpClone1" action="start" type="before" to="grpUltraMonkey1" score="0"/>
- </constraints>
- </configuration>
- </cib>
+++ /dev/null
-crm on
-use_logd on
-
-debug 0
-udpport 694
-keepalive 2
-warntime 7
-deadtime 10
-initdead 48
-
-bcast eth2
-bcast eth3
-
-node hpww0101
-node hpww0201
-
-watchdog /dev/watchdog
-respawn root /usr/lib64/heartbeat/pingd -m 100 -a default_ping_set
-ping 192.168.43.14
-respawn root /usr/lib64/heartbeat/diskd -N /dev/sda -a diskcheck_status_internal -i 10
-respawn root /etc/ha.d/monitoring/heartbeat_logmoni.sh monitor
+++ /dev/null
-logfile /var/log/ha-log
-debugfile /var/log/ha-debug
-logfacility none
[Service]
Type=forking
+ExecStartPre=/bin/rm -rf /var/run/l7vs/l7vs
ExecStart=/usr/sbin/l7vsd
ExecStop=/bin/kill $MAINPID
-
+ExecStopPost=/bin/rm -rf /var/run/l7vs/l7vs
[Install]
WantedBy=multi-user.target