File SAPHanaTopology of Package SAPHanaSR.2773

#!/bin/bash
#
# SAPHanaTopology
#
# Description:	Clone resource to anylyze SAPHana-Topology
#
###########################################################################################################################
#
# SAPHanaTopology is a fork of SAPHana
# Thanks to Alexander Krauth for providing SAPInstance and SAPDatabase
#
# SAPHanaTopology: (short sht)
# Author:       Fabian Herschel, February 2014
# Support:      linux@sap.com
# License:      GNU General Public License (GPL)
# Copyright:    (c) 2014 SUSE Linux Products GmbH
#               (c) 2015-2016 SUSE Linux GmbH
#
# An example usage:
#      See usage() function below for more details...
#
# OCF instance parameters:
#   OCF_RESKEY_SID            (LNX, NDB, SLE)
#   OCF_RESKEY_InstanceNumber (00..99)
#	OCF_RESKEY_DIR_EXECUTABLE   (optional, well known directories will be searched by default)
#   OCF_RESKEY_SAPHanaFilter    (outdated, replaced by cluster property hana_${sid}_glob_filter)
#
#######################################################################
#
# Initialization:
SAPHanaVersion="0.152.17"
timeB=$(date '+%s')

: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs

#######################################################################
#
log_attributes=false
if ocf_is_true "$log_attributes"; then
    log_attr_file="/var/log/fhATTRIBUTES"
else
    log_attr_file="/dev/null"
fi

HANA_STATE_PRIMARY=0
HANA_STATE_SECONDARY=1
HANA_STATE_STANDALONE=2
HANA_STATE_DEFECT=3

debug_attributes=0
SH=/bin/sh

#
# function: super_ocf_log - wrapper function for ocf log in order catch usual logging into super log
# params:   LOG_MESSAGE
# globals:  SUPER_LOG_PATH, SAPHanaFilter
function super_ocf_log() {
    local level="$1"
    local message="$2"
    local skip=1
    local mtype=""
    local search=0
    local shf="${SAPHanaFilter:-all}"
    #ocf_log "info" "super_ocf_log: f:$shf l:$level m:$message"
    # message levels: (dbg)|info|warn|err|error
    #
    # message types:  (ACT|RA|FLOW|DBG|LPA|DEC
    case "$level" in
        dbg | debug | warn | err | error ) skip=0
        ;;
        info )
        case "$shf" in
            all) skip=0
            ;;
            none )
                skip=1
                ;;
            * ) mtype=${message%% *}
                mtype=${mtype%:}
                mtype=${mtype#fh}
                echo "$shf"|  grep -iq ${mtype}; search=$?
                if [ $search -eq 0 ]; then
                     skip=0
                else
                    skip=1
                fi
            ;;
        esac
        ;;
    esac
    if [ $skip -eq 0 ]; then
        ocf_log "$level" "$message"
    fi
}

#
# function: sht_usage - short usage info
# params:   -
# globals:  $0(r)
#
function sht_usage() {
    super_ocf_log info "FLOW $FUNCNAME ($*)"
    local rc=0
    methods=$(sht_methods)
    methods=$(echo $methods | tr ' ' '|')
  cat <<-!
	usage: $0 ($methods)

    $0 manages a SAP HANA Instance as an HA resource.

    The 'start'        operation starts the HANA instance or bring the "instance" to a WAITING (for primary) status
    The 'stop'         operation stops the HANA instance
    The 'status'       operation reports whether the HANA instance is running
    The 'monitor'      operation reports whether the HANA instance seems to be working in master/slave it also needs to check the system replication status
    The 'notify'       operation always returns SUCCESS
    The 'validate-all' operation reports whether the parameters are valid
    The 'methods'      operation reports on the methods $0 supports

	!
	return $rc
}

#
# function: sht_meta_data - print resource agent meta-data for cluster
# params:   -
# globals:  -
#
function sht_meta_data() {
    super_ocf_log info "FLOW $FUNCNAME ($*)"
    local rc=0
	cat <<END
<?xml version="1.0"?>
<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
<resource-agent name="SAPHanaTopology">
    <version>$SAPHanaVersion</version>
    <shortdesc lang="en">Analyzes SAP HANA System Replication Topology.</shortdesc>
    <longdesc lang="en">This RA analyzes the SAP HANA topology and "sends" all findings via the node status attributes to
        all nodes in the cluster. These attributes are taken by the SAPHana RA to control the SAP Hana Databases.
        In addition it starts and monitors the local saphostagent.

1. Interface to monitor a HANA system: landscapeHostConfiguration.py
landscapeHostConfiguration.py has some detailed output about HANA system status
and node roles. For our monitor the overall status is relevant. This overall
status is reported by the returncode of the script:
0: Internal Fatal
1: ERROR
2: WARNING
3: INFO (maybe a switch of the resource running)
4: OK
The SAPHanaTopology resource agent will interpret returncodes 1 as NOT-RUNNING (or 1 failure) and returncodes 2+3+4 as RUNNING.
SAPHanaTopology scans the output table of landscapeHostConfiguration.py to identify the roles of the cluster node. Roles means configured and current role of the nameserver as well as the indexserver.

2. Interface is hdbnsutil
   The interface hdbnsutil is used to check the "topology" of the system replication as well as the current configuration
   (primary/secondary) of a SAP HANA database instance. A second task of the interface is the posibility to run a
   system replication takeover (sr_takeover) or to register a former primary to a newer one (sr_register).

3. saphostctrl
   The interface saphostctrl uses the function ListInstances to figure out the virtual host name of the
   SAP HANA instance. This is the hostname used during the HANA installation.
    </longdesc>
<parameters>
    <parameter name="SID" unique="0" required="1">
        <longdesc lang="en">The SAP System Identifier (SID)</longdesc>
        <shortdesc lang="en">The SAP System Identifier (SID)</shortdesc>
        <content type="string" default="" />
    </parameter>
    <parameter name="InstanceNumber" unique="0" required="1">
        <longdesc lang="en">The SAP Instance Number</longdesc>
        <shortdesc lang="en">The SAP Instance Number</shortdesc>
        <content type="string" default="" />
    </parameter>
    <parameter name="DIR_EXECUTABLE" unique="0" required="0">
        <longdesc lang="en">Path to the SAP Hana Instance executable directory. If not set the RA tries /usr/sap/\$SID/\$InstanceName/exe.
        While InstanceName is the string of "HDB" and \$InstanceNumber for SAP Hana databases.
        </longdesc>
        <shortdesc lang="en">Path to the SAP Hana Instance executable directory.</shortdesc>
        <content type="string" default="" />
    </parameter>
    <parameter name="SAPHanaFilter" unique="0" required="0">
        <shortdesc lang="en">OUTDATED</shortdesc>
        <longdesc lang="en">OUTDATED</longdesc>
        <content type="string" default="" />
    </parameter>
</parameters>
<actions>
    <action name="start" timeout="180" />
    <action name="stop" timeout="60" />
    <action name="status" timeout="60" />
    <action name="monitor" depth="0" timeout="60" interval="60" />
    <action name="validate-all" timeout="5" />
    <action name="meta-data" timeout="5" />
    <action name="methods" timeout="5" />
</actions>
</resource-agent>
END
return $rc
}

#
# function: get_hana_attribute
# params:   NODE ATTR [STORE]
# globals:  -
#
function get_hana_attribute()
{
    super_ocf_log info "FLOW $FUNCNAME ($*)"
    local rc=0
    local attr_node=$1
    local attr_name=$2
    local attr_store=${3:-reboot} # DONE: PRIO5 get this (optional) from parameter
    local attr_default=${4:-}
    local dstr
    dstr=$(date)
    case "$attr_store" in
        reboot | forever )
            echo "$dstr: SAPHanaTopology: crm_attribute -N ${attr_node} -G -n \"$attr_name\" -l $attr_store -q" >> $log_attr_file
            crm_attribute -N ${attr_node} -G -n "$attr_name" -l $attr_store -q -d "$attr_default" 2>>$log_attr_file; rc=$?
            ;;
        props )
            echo "$dstr: SAPHanaTopology: crm_attribute -G -n \"$attr_name\" -t crm_config -q" >> $log_attr_file
            crm_attribute -G -n "$attr_name" -t crm_config -q -d "$attr_default" 2>>$log_attr_file; rc=$?
            ;;
    esac
    super_ocf_log info "FLOW $FUNCNAME rc=$rc"
    return $rc
}

#
# function: set_hana_attribute - set the multi-state status of a node
# params:   NODE VALUE ATTR [STORE]
# globals:  -
#
function set_hana_attribute()
{
    super_ocf_log info "FLOW $FUNCNAME ($*)"
    local attr_node=$1
    local attr_value=$2
    local attr_name=$3
    local attr_store=${4:-reboot} # DONE: PRIO5 get this (optional) from parameter
    local attr_default=${5:-}
    local rc=1
    local attr_old=""
    local dstr
    dstr=$(date)
    attr_old=$(get_hana_attribute $attr_node $attr_name $attr_store $attr_default); get_rc=$?
    if [ "$attr_old" != "$attr_value" ]; then
        super_ocf_log debug "DBG: SET attribute $attr_name for node ${attr_node} to ${attr_value} former ($attr_old) get_rc=$get_rc "
        case "$attr_store" in
            reboot | forever )
                echo "$dstr: SAPHanaTopology: crm_attribute -N $attr_node -v $attr_value -n \"$attr_name\" -l $attr_store" >> /var/log/fhATTRIBUTE
                crm_attribute -N $attr_node -v $attr_value -n "$attr_name" -l $attr_store 2>>/var/log/fhATTRIBUTE; rc=$?
                ;;
            props )
                echo "$dstr: SAPHanaTopology: crm_attribute -v $attr_value -n \"$attr_name\" -t crm_config -s SAPHanaSR" >> /var/log/fhATTRIBUTE
                crm_attribute -v $attr_value -n "$attr_name" -t crm_config  -s SAPHanaSR 2>>/var/log/fhATTRIBUTE; rc=$?
                ;;
        esac
    else
        super_ocf_log debug "DBG: LET attribute $attr_name for node ${attr_node} still be ${attr_value}"
        rc=0
    fi
    super_ocf_log info "FLOW $FUNCNAME rc=$rc"
    return $rc
}

#
# function: sht_methods - report supported cluster methods
# params:   -
# globals:  -
# methods: What methods/operations do we support?
#
function sht_methods() {
  super_ocf_log info "FLOW $FUNCNAME ($*)"
  local rc=0
  cat <<-!
    start
    stop
    status
    monitor
    notify
    validate-all
    methods
    meta-data
    usage
    admin-setup
	!
	return $rc
}

#
# function: dequote - filter: remove quotes (") from stdin
# params:   -
# globals:  -
function dequote()
{
    local rc=0; tr -d '"'; return $rc
}

# function: version: cpmpare two HANA version strings
function ver_lt() {
    ocf_version_cmp $1 $2
    test $? -eq 0 && return 0 || return 1
}

function ver_le() {
    ocf_version_cmp $1 $2
    test $? -eq 0 -o $? -eq 1 && return 0 || return 1
}

function ver_gt() {
    ocf_version_cmp $1 $2
    test $? -eq 2 && return 0 || return 1
}

function ver_ge() {
    ocf_version_cmp $1 $2
    test $? -eq 2 -o $? -eq 1 && return 0 || return 1
}
#
# function: version: cpmpare two HANA version strings
#
function version() {
    if [ $# -eq 3 ]; then
        case "$2" in
            LE | le | "<=" ) ver_le $1 $3;;
            LT | lt | "<" ) ver_lt $1 $3;;
            GE | ge | ">=" ) ver_ge $1 $3;;
            GT | gt | ">" ) ver_gt $1 $3;;
            * ) return 1;
        esac
    elif [ $# -ge 5 ]; then
        version $1 $2 $3 && shift 2 && version $*
    else
        return 1;
    fi
}
#
# function: is_clone - report, if resource is configured as a clone (also master/slave)
# params:   -
# globals:  OCF_*(r)
# descript: is_clone : find out if we are configured to run in a Master/Slave configuration
#   rc: 0: it is a clone
#       1: it is not a clone
#   Special EXIT of RA, if clone is missconfigured
#
function is_clone() {
    super_ocf_log info "FLOW $FUNCNAME ($*)"
    local rc=0
    #
    # is a clone config?
    #
    if [ -n "$OCF_RESKEY_CRM_meta_clone_max" ] \
       && [ "$OCF_RESKEY_CRM_meta_clone_max" -gt 0 ]; then
       #
       # yes it is a clone config - check, if its configured well
       #
        if [ "$OCF_RESKEY_CRM_meta_clone_node_max" -ne 1 ] ; then
                super_ocf_log err "ACT: Clone options misconfigured. (expect: clone_node_max=1)"
                exit $OCF_ERR_CONFIGURED
        fi
        rc=0;
    else
        rc=1;
    fi
    super_ocf_log info "FLOW $FUNCNAME rc=$rc"
    return $rc
}

#
# function: HANA_CALL
# params:   timeout-in-seconds cmd-line
# globals:  sid(r), SID(r), InstanceName(r)
#
function HANA_CALL()
{
    #
    # TODO: PRIO 5: remove 'su - ${sidadm} later, when SAP HANA resoled issue with
    #       root-user-called hdbnsutil -sr_state (which creates root-owned shared memory file in /var/lib/hdb/SID/shmgrp)
    # TODO: PRIO 5: Maybe make "su" optional by a parameter
    local timeOut=0
    local onTimeOut=""
    local rc=0
    local use_su=1 # Default to be changed later (see TODO above)
    local pre_cmd=""
    local cmd=""
    local pre_script=""
    local output=""
    while [ $# -gt 0 ]; do
        case "$1" in
            --timeout ) timeOut=$2; shift;;
            --use-su  ) use_su=1;;
            --on-timeout ) onTimeOut="$2"; shift;;
            --cmd ) shift; cmd="$*"; break;;
        esac
        shift
    done

    if [ $use_su -eq 1 ]; then
        pre_cmd="su - ${sid}adm -c"
        pre_script="true"
    else
        # as root user we need the library path to the SAP kernel to be able to call sapcontrol
        # check, if we already added DIR_EXECUTABLE at the beginning of LD_LIBRARY_PATH
        if [ "${LD_LIBRARY_PATH%%*:}" != "$DIR_EXECUTABLE" ]
        then
            MY_LD_LIBRARY_PATH=$DIR_EXECUTABLE${LD_LIBRARY_PATH:+:}$LD_LIBRARY_PATH
        fi
        pre_cmd="bash -c"
        pre_script="LD_LIBRARY_PATH=$MY_LD_LIBRARY_PATH; export LD_LIBRARY_PATH"
    fi
    case $timeout in
        0 | inf )
                  output=$($pre_cmd "$pre_script; /usr/sap/$SID/$InstanceName/HDBSettings.sh $cmd"); rc=$?
                  ;;
        *       )
                  output=$(timeout $timeOut $pre_cmd "$pre_script; /usr/sap/$SID/$InstanceName/HDBSettings.sh $cmd"); rc=$?
                  #
                  # on timeout ...
                  #
                  if [ $rc -eq 124 -a -n "$onTimeOut" ]; then
                      local second_output=""
                      second_output=$($pre_cmd "$pre_script; /usr/sap/$SID/$InstanceName/HDBSettings.sh $onTimeOut");
                  fi
                  ;;
    esac
    echo "$output"
    return $rc;
}

#
# function: sht_init - initialize variables for the resource agent
# params:   -
# globals:  OCF_*(r), SID(w), sid(rw), sidadm(w), InstanceName(w), InstanceNr(w),
# globals:  meta_notify_master_uname(w), HANA_SR_TOLOPOGY(w), sr_name(w)
# globals:  ATTR_NAME_HANA_SYNC_STATUS(w), ATTR_NAME_HANA_PRIMARY_AT(w), ATTR_NAME_HANA_CLONE_STATE(w)
# globals:  DIR_EXECUTABLE(w), SAPSTARTSRV(w), SAPCONTROL(w), DIR_PROFILE(w), SAPSTARTPROFILE(w), LD_LIBRARY_PATH(w), PATH(w), nodelist(w)
# globals:  NODENAME(w), hdbver(w)
# sht_init : Define global variables with default values, if optional parameters are not set
#
#

function sht_init() {
    super_ocf_log info "FLOW $FUNCNAME ($*)"
    local myInstanceName=""
    local rc=$OCF_SUCCESS
    local hdbANSWER=""
    local siteID
    local siteNAME
    local chkMethod=""
    HOSTEXECNAME=saphostexec
    USRSAP=/usr/sap
    SAPSERVICE_PATH=${USRSAP}/sapservices
    SAPHOSTCTRL_PATH=${USRSAP}/hostctrl/exe
    HOSTEXEC_PATH=${SAPHOSTCTRL_PATH}/${HOSTEXECNAME}
    HOSTEXEC_PROFILE_PATH=${SAPHOSTCTRL_PATH}/host_profile
    NODENAME=$(crm_node -n)
    SID=$OCF_RESKEY_SID
    InstanceNr=$OCF_RESKEY_InstanceNumber
    myInstanceName="${SID}_HDB${InstanceNr}"
    InstanceName="HDB${InstanceNr}"
    super_ocf_log debug "DBG2: Used new method to get SID ($SID) and InstanceNr ($InstanceNr)"
    sid=$(echo "$SID" | tr [:upper:] [:lower:])
    sidadm="${sid}adm"
    ocf_env=$(env | grep 'OCF_RESKEY_CRM')
    super_ocf_log debug "DBG3: OCF: $ocf_env"
    ATTR_NAME_HANA_SYNC_STATUS=("hana_${sid}_sync_state" "reboot")  # SOK, SFAIL, UNKNOWN?
    ATTR_NAME_HANA_PRIMARY_AT=("hana_${sid}_primary_at" "reboot")   # Not really used
    ATTR_NAME_HANA_CLONE_STATE=("hana_${sid}_clone_state" "reboot") # UKNOWN?, DEMOTED, PROMOTED
    ATTR_NAME_HANA_REMOTEHOST=("hana_${sid}_remoteHost" "forever")
    ATTR_NAME_HANA_SITE=("hana_${sid}_site" "forever")
    ATTR_NAME_HANA_ROLES=("hana_${sid}_roles" "reboot")
    ATTR_NAME_HANA_SRMODE=("hana_${sid}_srmode" "forever")
    ATTR_NAME_HANA_VHOST=("hana_${sid}_vhost" "forever")
    ATTR_NAME_HANA_STATUS=("hana_${sid}_status" "reboot")
    #
    # new "central" attributes
    #
    ATTR_NAME_HANA_FILTER=("hana_${sid}_glob_filter" "props" "ra-act-dec-lpa")
    # optional OCF parameters, we try to guess which directories are correct

    SAPHanaFilter=$(get_hana_attribute "X" ${ATTR_NAME_HANA_FILTER[@]})

    if  [ -z "$OCF_RESKEY_DIR_EXECUTABLE" ]
    then
        DIR_EXECUTABLE="/usr/sap/$SID/$InstanceName/exe"
    else
        DIR_EXECUTABLE="$OCF_RESKEY_DIR_EXECUTABLE"
    fi

    if [ -z "$DIR_EXECUTABLE" ]; then
        super_ocf_log err "DEC: Can not determine DIR_EXECUTABLE. Please set this parameter. -> OCF_ERR_CONFIGURED"
        rc=$OCF_ERR_CONFIGURED
    fi

    if [ -z "$OCF_RESKEY_DIR_PROFILE" ]
    then
        DIR_PROFILE="/usr/sap/$SID/SYS/profile"
    else
        DIR_PROFILE="$OCF_RESKEY_DIR_PROFILE"
    fi


    PATH=${PATH}:${DIR_EXECUTABLE}
    #
    # figure-out all needed values from system replication status with ONE call
    # we need: mode=primary|sync|syncmem|...; site name=<site>; mapping/<me>=<site>/<node> (multiple lines)
    case $(crm_attribute --type crm_config --name cluster-infrastructure -q) in
       *corosync* ) nodelist=$(crm_node -l | awk '{ print $2 }');;
       *openais* ) nodelist=$(crm_node -l | awk '/member/ {print $2}');;
       *cman*    ) nodelist=$(crm_node -l);;
    esac
    #
    # get HANA version
    #
    local ges_ver
    ges_ver=$(HANA_CALL --timeout 10 --cmd "HDB version" | tr -d " " | awk -F: '$1 == "version" {print $2}')
    hdbver=${ges_ver%.*.*}
    #
    # since rev 111.00 we should use a new hdbnsutil option to get the -sr_state
    # since rev 112.03 the old option is changed and we should use -sr_stateConfiguration where ever possible
    #
    hdbState="hdbnsutil -sr_state"
    hdbMap="hdbnsutil -sr_state"
    if version "$hdbver" ">=" "1.00.111"; then
        hdbState="hdbnsutil -sr_stateConfiguration"
        hdbMap="hdbnsutil -sr_stateHostMapping"
    fi
    #### SAP-CALL
    # hdbnsutil was a bit unstable in some tests so we recall the tool, if it fails to report the srmode
    for chkMethod in  hU hU hU gP ; do
        # DONE: Limit the runtime of hdbnsutil.
        # TODO: Use getParameter.py if we get no answer
        # SAP_CALL
        #super_ocf_log debug "DBG2: hdbANSWER=$hdbANSWER"
        #srmode=$(echo "$hdbANSWER" | awk -F= '/mode/ {print $2}')
        case "$chkMethod" in
            gP ) # call getParameter (gP)
                local gpKeys=""
                gpKeys=$(echo --key=global.ini/system_replication/{mode,site_name,site_id})
                hdbANSWER=$(HANA_CALL --timeout 60 --cmd "HDBSettings.sh getParameter.py $gpKeys --sapcontrol=1" 2>&1 | awk -F/ 'BEGIN {out=0} /^SAPCONTROL-OK: <begin>/ { out=1 } /^SAPCONTROL-OK: <end>/ { out=0 } /=/ {if (out==1) {print $3} }')
                srmode=$(echo "$hdbANSWER" | awk -F= '$1=="mode" {print $2}')
                super_ocf_log info "ACT: hdbnsutil not answering - using global.ini as fallback - srmode=$srmode"
                ;;
            hU | * ) # call hdbnsUtil (hU) ( also for unknown chkMethod )
                # DONE: PRIO1: Begginning from SAP HANA rev 112.03 -sr_state is not longer supported
                hdbANSWER=$(HANA_CALL --timeout 60 --cmd "$hdbState --sapcontrol=1" 2>/dev/null)
                super_ocf_log debug "DBG2: hdbANSWER=$hdbANSWER"
                srmode=$(echo "$hdbANSWER" | awk -F= '$1=="mode" {print $2}')
                ;;
        esac
        case "$srmode" in
            primary | syncmem | sync | async | none )
              # we can leave the loop as we already got a result
              break
              ;;
            * )
              # lets pause a bit to give hdbnsutil a chance to answer next time
              sleep 2
              ;;
        esac
    done
    # TODO PRIO3: Implement a file lookup, if we did not get a result
    siteID=$(echo "$hdbANSWER" | awk -F= '/site.id/ {print $2}')       # allow 'site_id' AND 'site id'
    siteNAME=$(echo "$hdbANSWER" | awk -F= '/site.name/ {print $2}')
    site=$siteNAME
    srmode=$(echo "$hdbANSWER" | awk -F= '/mode/ {print $2}')
    #
    # for rev >= 111 we use the new mapping query
    #
    if version "$hdbver" ">=" "1.00.111"; then
        hdbANSWER=$(HANA_CALL --timeout 60 --cmd "$hdbMap --sapcontrol=1" 2>/dev/null)
    fi
    MAPPING=$(echo "$hdbANSWER" | awk -F[=/] '$1 == "mapping" && $3 != site { print $4 }' site=$site)
    super_ocf_log debug "DBG: site=$site, mode=$srmode, MAPPING=$MAPPING"
    if [ -n "$MAPPING" ]; then
        # we have a mapping from HANA, lets use it
        #
        # filter all non-cluster mappings
        #
        local hanaVHost=""
        local n1=""
        hanaRemoteHost=""
        for n1 in $nodelist; do
              hanaVHost=$(get_hana_attribute ${n1} ${ATTR_NAME_HANA_VHOST[@]})
              for n2 in $MAPPING; do
                 if [ "$hanaVHost" == "$n2" ]; then
                    hanaRemoteHost="$hanaVHost"
                 fi;
              done;
        done
        super_ocf_log info "DEC: site=$site, mode=$srmode, MAPPING=$MAPPING, hanaRemoteHost=$hanaRemoteHost"
        super_ocf_log debug "DBG: site=$site, mode=$srmode, MAPPING=$MAPPING, hanaRemoteHost=$hanaRemoteHost"
    else
        # HANA DID NOT TOLD THE MAPPING, LETS TRY TO USE THE SITE ATTRIBUTES
        local n1=""
        local hanaSite=""
        for n1 in $nodelist; do
            # TODO: PRIO9 - For multi tier with more than 2 chain/star members IN the cluster we might need to be
            #       able to catch more than one remoteHost
            #       currently having more than 2 HANA in a chain/star members IN the cluster is not allowed, the third must be external
            if [ "$NODENAME" != "$n1" ]; then
                hanaSite=$(get_hana_attribute ${n1} ${ATTR_NAME_HANA_SITE[@]})
                hanaRemoteHost="$n1"
            fi
        done
        super_ocf_log info "DEC: site=$site, mode=$srmode, hanaRemoteHost=$hanaRemoteHost - found by remote site ($hanaSite)"
    fi
    super_ocf_log info "FLOW $FUNCNAME rc=$OCF_SUCCESS"
    return $OCF_SUCCESS
}

#
# function: check_for_primary - check if local SAP HANA is configured as primary
# params:   -
# globals:  HANA_STATE_PRIMARY(r), HANA_STATE_SECONDARY(r), HANA_STATE_DEFECT(r), HANA_STATE_STANDALONE(r)
#           srmode(r)
#
function check_for_primary() {
    super_ocf_log info "FLOW $FUNCNAME ($*)"
    local rc=0
    super_ocf_log debug "DBG: check_for_primary: srmode=$srmode"
    case "$srmode" in
       primary )
              super_ocf_log info "FLOW $FUNCNAME rc=HANA_STATE_PRIMARY"
              rc=$HANA_STATE_PRIMARY;;
       syncmem | sync | async )
              super_ocf_log info "FLOW $FUNCNAME rc=HANA_STATE_SECONDARY"
              rc=$HANA_STATE_SECONDARY;;
       none ) # have seen that mode on second side BEFEORE we registered it as replica
              super_ocf_log info "FLOW $FUNCNAME rc=HANA_STATE_STANDALONE"
              rc=$HANA_STATE_STANDALONE;;
       * )
          dump=$( echo $srmode | hexdump -C );
          super_ocf_log err "ACT: check_for_primary:  we didn't expect srmode to be: DUMP: <$dump>"
          rc=$HANA_STATE_DEFECT
   esac;
   super_ocf_log info "FLOW $FUNCNAME rc=$rc"
   return $rc
}


#
# function: start_saphostagent
# params:   -
# globals:  HOSTEXEC_PATH(r), HOSTEXEC_PROFILE_PATH(r)
#
function start_saphostagent()
{
    ### SAP-CALL
    if [ -x "${HOSTEXEC_PATH}" ]; then
        ${HOSTEXEC_PATH} pf=${HOSTEXEC_PROFILE_PATH}
    fi
    return 0
}

#
# function: stop_saphostagent
# params:   -
# globals: HOSTEXEC_PATH(r)
#
function stop_saphostagent()
{
    ### SAP-CALL
    if [ -x "${HOSTEXEC_PATH}" ]; then
        ${HOSTEXEC_PATH} -stop
    fi
}

#
# function: check_saphostagent
# params:   -
# globals:
#
function check_saphostagent()
{
    local rc=1
    # TODO: PRIO3: should the path been removed like "saphostexec" instead of "/usr/sap/hostctrl/exe/saphostexec"
    #       or should we use ${HOSTEXEC_PATH} instead?
    pgrep -f /usr/sap/hostctrl/exe/saphostexec; rc=$?
    return $rc
}

#
#############################################################################
#
# function: sht_start - start a hana instance
# params:   -
# globals:  OCF_*
# sht_start : Start the SAP HANA instance
#
function sht_start() {
  super_ocf_log info "FLOW $FUNCNAME ($*)"

  local rc=$OCF_NOT_RUNNING
  local output=""
  local loopcount=0

  # TODO: PRIO3: move the string "$HA_RSCTMP/SAPHana/SAPTopologyON" to a variable
  # TODO: PRIO3: move the file to the clusters tmp directory?
  mkdir -p $HA_RSCTMP/SAPHana
  touch $HA_RSCTMP/SAPHana/SAPTopologyON
  if ! check_saphostagent; then
     start_saphostagent
  fi

  rc=$OCF_SUCCESS

  super_ocf_log info "FLOW $FUNCNAME rc=$rc"
  return $rc
}

#
# function: sht_stop - stop a hana instance
# params:   -
# globals:  OCF_*(r), SAPCONTROL(r), SID(r), InstanceName(r)
# sht_stop: Stop the SAP HANA Topology Resource
#
function sht_stop() {
  super_ocf_log info "FLOW $FUNCNAME ($*)"
  local output=""
  local rc=0

  rm $HA_RSCTMP/SAPHana/SAPTopologyON
  rc=$OCF_SUCCESS

  super_ocf_log info "FLOW $FUNCNAME rc=$rc"
  return $rc
}


#
# function: sht_monitor - monitor a hana topology instance
# params:   --
# globals:  OCF_*(r), SAPCONTROL(r), InstanveNr(r)
# sht_monitor: Can the given SAP instance do anything useful?
#
function sht_monitor() {
  super_ocf_log info "FLOW $FUNCNAME ($*)"
  local rc=0

  if [ -f $HA_RSCTMP/SAPHana/SAPTopologyON ]; then
     rc=$OCF_SUCCESS
  else
     rc=$OCF_NOT_RUNNING
  fi

  super_ocf_log info "FLOW $FUNCNAME rc=$rc"
  return $rc
}


#
# function: sht_status - get status of a hana instance (os tools only)
# params:   -
# globals:  SID(r), InstanceName(r), OCF_*(r), sidarm(r)
# sht_status: Lightweight check of SAP instance only with OS tools
#
function sht_status() {
    super_ocf_log info "FLOW $FUNCNAME ($*)"
    local rc=0

    sht_monitor; rc=$?
    return $rc
}


#
# function: sht_validate - validation of (some) variables/parameters
# params:   -
# globals:  OCF_*(r), SID(r), InstanceName(r), InstanceNr(r),
# sht_validate: Check the symantic of the input parameters
#
function sht_validate() {
    super_ocf_log info "FLOW $FUNCNAME ($*)"
    local rc=$OCF_SUCCESS
    if [ $(echo "$SID" | grep -c '^[A-Z][A-Z0-9][A-Z0-9]$') -ne 1 ]
    then
        super_ocf_log err "ACT: Parsing instance profile name: '$SID' is not a valid SID!"
        rc=$OCF_ERR_ARGS
    fi

    if [ $(echo "$InstanceNr" | grep -c '^[0-9][0-9]$') -ne 1 ]
    then
        super_ocf_log err "ACT: Parsing instance profile name: '$InstanceNr' is not a valid instance number!"
        rc=$OCF_ERR_ARGS
    fi

    super_ocf_log info "FLOW $FUNCNAME rc=$rc"
    return $rc
}

#
# function: sht_start_clone - start a hana clone instance
# params:   -
# globals:  OCF_*(r),
# sht_start_clone
#
function sht_start_clone() {
    super_ocf_log info "FLOW $FUNCNAME ($*)"
    local rc=$OCF_NOT_RUNNING
    sht_start; rc=$?
    return $rc
}

#
# function: sht_stop_clone - stop a hana clone instance
# params:   -
# globals:  NODENAME(r), HANA_STATE_*, ATTR_NAME_*
# sht_stop_clone
#
function sht_stop_clone() {
    super_ocf_log info "FLOW $FUNCNAME ($*)"
    local rc=0
    check_for_primary; primary_status=$?
    if [ $primary_status -eq $HANA_STATE_PRIMARY ]; then
        hanaPrim="P"
    elif [ $primary_status -eq $HANA_STATE_SECONDARY ]; then
        hanaPrim="S"
    elif [ $primary_status -eq $HANA_STATE_STANDALONE ]; then
        hanaPrim="N"
    else
        hanaPrim="-"
    fi
    set_hana_attribute "${NODENAME}" "1:$hanaPrim:-:-:-:-" ${ATTR_NAME_HANA_ROLES[@]}
    sht_stop; rc=$?
    return $rc
}

#
# function: sht_monitor_clone - monitor a hana clone instance
# params:   -
# globals:  OCF_*, SID, InstanceNr, InstanceName, MAPPING(r)
# sht_monitor_clone
#
function sht_monitor_clone() {
    super_ocf_log info "FLOW $FUNCNAME ($*)"
    #
	local rc=$OCF_ERR_GENERIC
	local promoted=0
    local init_attribute=0


	if ocf_is_probe; then
		super_ocf_log debug "DBG2: PROBE ONLY"
        sht_monitor; rc=$?
	else
		super_ocf_log debug "DBG2: REGULAR MONITOR"
        if ! check_saphostagent; then
             start_saphostagent
        fi
	#
	# First check, if we are PRIMARY or SECONDARY
	#
    super_ocf_log debug "DBG2: HANA SID $SID"
    super_ocf_log debug "DBG2: HANA InstanceName $InstanceName"
    super_ocf_log debug "DBG2: HANA InstanceNr $InstanceNr"
	check_for_primary; primary_status=$?
    if [ $primary_status -eq $HANA_STATE_PRIMARY ]; then
        hanaPrim="P"
        super_ocf_log debug "DBG2: HANA IS PRIMARY"
        sht_monitor; rc=$?
    else
        if [ $primary_status -eq $HANA_STATE_SECONDARY  ]; then
            hanaPrim="S"
            super_ocf_log debug "DBG2: HANA IS SECONDARY"
            sht_monitor; rc=$?
        elif [ $primary_status -eq $HANA_STATE_STANDALONE  ]; then
            hanaPrim="N"
            super_ocf_log debug "DBG2: HANA IS STANDALONE"
            sht_monitor; rc=$?
        else
            hanaPrim="-"
            super_ocf_log warn "ACT: sht_monitor_clone: HANA_STATE_DEFECT"
            rc=$OCF_ERR_CONFIGURED
        fi
    fi
    # DONE: PRIO1: ASK: Is the output format of ListInstances fix? Could we take that as an API?
    # try to catch:  Inst Info : LNX - 42 - lv9041 - 740, patch 36, changelist 1444691
    # We rely on the following format: SID is word#4, NR is work#6, vHost is word#8
    #### SAP-CALL
    vName=$(/usr/sap/hostctrl/exe/saphostctrl -function ListInstances \
        | awk '$4 == SID && $6=NR { print $8 }' SID=$SID NR=$InstanceNr 2>/dev/null )
    # super_ocf_log debug "DBG: ListInstances: $(/usr/sap/hostctrl/exe/saphostctrl -function ListInstances)"
    if [ -n "$vName" ]; then
       set_hana_attribute ${NODENAME} "$vName" ${ATTR_NAME_HANA_VHOST[@]}
    else
       vName=$(get_hana_attribute ${NODENAME} ${ATTR_NAME_HANA_VHOST[@]})
    fi
    #site=$(get_site_name)
    #### SAP-CALL
    # SAP_CALL
    #hanaANSWER=$(su - $sidadm -c "python exe/python_support/landscapeHostConfiguration.py" 2>/dev/null); hanalrc="$?"
    #
    # since rev 09x SAP has added the --sapcontrol option for the landscapeHostConfiguration interface
    # we begin to use --sapcontrol with rev 100
    # since rev 120 we need to use the --sapcontrol, because SAP changed the tool output
    #
    if version "$hdbver" ">=" "1.00.100"; then
        hanaANSWER=$(HANA_CALL --timeout 60 --cmd "landscapeHostConfiguration.py --sapcontrol=1" 2>/dev/null); hanalrc="$?"
        # TODO: PRIO9: Do we need to check the lines: 'SAPCONTROL-OK: <begin>' and 'SAPCONTROL-OK: <end>'?
        hanarole=$(echo "$hanaANSWER" | tr -d ' ' | \
            awk -F= '$1 == "nameServerConfigRole"    {f1=$2}
                     $1 == "nameServerActualRole"    {f2=$2}
                     $1 == "indexServerConfigRole"   {f3=$2}
                     $1 == "indexServerActualRole"   {f4=$2}
                     END { printf "%s:%s:%s:%s\n", f1, f2, f3,f4 }')
    else
        #
        # old code for backward compatability
        #
        hanaANSWER=$(HANA_CALL --timeout 60 --cmd "landscapeHostConfiguration.py" 2>/dev/null); hanalrc="$?"
        hanarole=$(echo "$hanaANSWER" | tr -d ' ' | awk -F'|' '$2 == host {  printf "%s:%s:%s:%s\n",$10,$11,$12,$13 }  ' host=${vName})
    fi
    #if [ -z "$MAPPING" ]; then
    #   super_ocf_log info "ACT: Did not find remote Host at this moment"
    #fi
    # FH TODO PRIO3: TRY TO GET RID OF "ATTR_NAME_HANA_REMOTEHOST"
    if [ -n "$hanaRemoteHost" ]; then
        set_hana_attribute ${NODENAME} "$hanaRemoteHost" ${ATTR_NAME_HANA_REMOTEHOST[@]}
    fi
    set_hana_attribute ${NODENAME} "$hanalrc:$hanaPrim:$hanarole" ${ATTR_NAME_HANA_ROLES[@]}
    if [ -n "$site" ]; then
        set_hana_attribute ${NODENAME} "$site" ${ATTR_NAME_HANA_SITE[@]}
    fi
    case "$hanaPrim" in
        P ) ;;
        S ) # only secondary may propargate its sync status
        case $(crm_attribute --type crm_config --name cluster-infrastructure -q) in
           *corosync* ) nodelist=$(crm_node -l | awk '{ print $2 }');;
           *openais* ) nodelist=$(crm_node -l | awk '/member/ {print $2}');;
           *cman*    ) nodelist=$(crm_node -l);;
        esac

        for n in ${nodelist}; do
           set_hana_attribute ${n} "$srmode" ${ATTR_NAME_HANA_SRMODE[@]}
        done
        ;;
    esac
    #
    fi # end ocf_is_NOT_probe
    super_ocf_log info "FLOW $FUNCNAME rc=$rc"
    return $rc
}

#
# function: sht_notify - notify action
# params:   -
# globals:  OCF_*(r), ACTION(r), CLACT(r), NODENAME(r)
# sht_notify: Handle master scoring - to make sure a slave gets the next master
#
function sht_notify() {
    super_ocf_log info "FLOW $FUNCNAME ($*)"
    local rc=0
    super_ocf_log info "RA ==== end action $ACTION$CLACT (${n_type}/${n_op})===="
    return $rc
}

#
# function: main - main function to operate
# params:   ACTION
# globals:  OCF_*(r), SID(w), sidadm(w), InstanceName(w), DIR_EXECUTABLE(w), ACTION(w), CLACT(w), ra_rc(rw), $0(r), %ENV(r)
#

## GLOBALS
SID=""
sidadm=""
InstanceName=""
InstanceNr=""
DIR_EXECUTABLE=""
SAPHanaFilter="ra-act-dec-lpa"

if [ $# -ne 1 ]
then
  sht_usage
  exit $OCF_ERR_ARGS
fi

ACTION=$1
if [ "$ACTION" = "status" ]; then
    ACTION=monitor
fi

# These operations don't require OCF parameters to be set
case "$ACTION" in
    usage|methods)  sht_$ACTION
                    exit $OCF_SUCCESS;;
    meta-data)      sht_meta_data
                    exit $OCF_SUCCESS;;
    notify)         sht_notify
                    exit $OCF_SUCCESS;;
    admin-setup)    admin-setup
                    exit $OCF_SUCCESS;;
    *);;
esac
sht_init

if ! ocf_is_root
then
    super_ocf_log err "ACT: $0 must be run as root"
    exit $OCF_ERR_PERM
fi

# parameter check
if  [ -z "$OCF_RESKEY_SID" ]
then
    super_ocf_log err "ACT: Please set parameter SID!"
    exit $OCF_ERR_ARGS
fi

if  [ -z "$OCF_RESKEY_InstanceNumber" ]
then
    super_ocf_log err "ACT: Please set parameter InstanceNumber!"
    exit $OCF_ERR_ARGS
fi

if is_clone
then
    CLACT=_clone
else
    if [ "$ACTION" = "promote" -o "$ACTION" = "demote" ]
    then
        super_ocf_log err "ACT: $ACTION called in a non clone environment"
        exit $OCF_ERR_ARGS
    fi
fi

super_ocf_log info "RA ==== begin action $ACTION$CLACT ($SAPHanaVersion) ===="
ra_rc=$OCF_ERR_UNIMPLEMENTED
case "$ACTION" in
    start|stop|monitor) # Standard controling actions
        sht_$ACTION$CLACT
        ra_rc=$?
        ;;
    validate-all)
        sht_validate
        ra_rc=$?
        ;;
    *)  # seams to be a unknown request
        sht_methods
        ra_rc=$OCF_ERR_UNIMPLEMENTED
        ;;
esac
timeE=$(date '+%s')
(( timeR = timeE - timeB ))
super_ocf_log info "RA ==== end action $ACTION$CLACT with rc=${ra_rc} ($SAPHanaVersion) (${timeR}s)===="
exit ${ra_rc}
openSUSE Build Service is sponsored by