File 0001-Test-CTS-lower-remote-connection-failure-detection-t.patch of Package pacemaker.11072
From 42de0f9c0003dd44d805227907e132a8f44676d6 Mon Sep 17 00:00:00 2001
From: Ken Gaillot <kgaillot@redhat.com>
Date: Mon, 10 Dec 2018 11:38:02 -0600
Subject: [PATCH 1/2] Test: CTS: lower remote connection failure detection time
Now that we freeze pacemaker-remoted instead of kill it, the failure detection
time is up to connection monitor interval + connection monitor timeout. Lower
those values to speed it up (and stay below the existing log watch timeout of
120s), and make the remote-rsc monitor interval the same as the connection's.
---
cts/CTStests.py | 59 ++++++++++++++++++++++++++-------------------------------
1 file changed, 27 insertions(+), 32 deletions(-)
diff --git a/cts/CTStests.py b/cts/CTStests.py
index f1d6cd38a..ddcb42e7f 100644
--- a/cts/CTStests.py
+++ b/cts/CTStests.py
@@ -2635,45 +2635,40 @@ class RemoteDriver(CTSTest):
def add_primitive_rsc(self, node):
rsc_xml = """
-<primitive class="ocf" id="%s" provider="heartbeat" type="Dummy">
- <operations>
- <op id="remote-rsc-monitor-interval-10s" interval="10s" name="monitor"/>
- </operations>
- <meta_attributes id="remote-meta_attributes"/>
-</primitive>""" % (self.remote_rsc)
+<primitive class="ocf" id="%(node)s" provider="heartbeat" type="Dummy">
+ <meta_attributes id="%(node)s-meta_attributes"/>
+ <operations>
+ <op id="%(node)s-monitor-interval-20s" interval="20s" name="monitor"/>
+ </operations>
+</primitive>""" % { "node": self.remote_rsc }
self.add_rsc(node, rsc_xml)
if not self.failed:
self.remote_rsc_added = 1
def add_connection_rsc(self, node):
+ rsc_xml = """
+<primitive class="ocf" id="%(node)s" provider="pacemaker" type="remote">
+ <instance_attributes id="%(node)s-instance_attributes">
+ <nvpair id="%(node)s-instance_attributes-server" name="server" value="%(server)s"/>
+""" % { "node": self.remote_node, "server": node }
+
if self.remote_use_reconnect_interval:
- # use reconnect interval and make sure to set cluster-recheck-interval as well.
- rsc_xml = """
-<primitive class="ocf" id="%s" provider="pacemaker" type="remote">
- <instance_attributes id="remote-instance_attributes"/>
- <instance_attributes id="remote-instance_attributes">
- <nvpair id="remote-instance_attributes-server" name="server" value="%s"/>
- <nvpair id="remote-instance_attributes-reconnect_interval" name="reconnect_interval" value="60s"/>
- </instance_attributes>
- <operations>
- <op id="remote-monitor-interval-60s" interval="60s" name="monitor"/>
- <op id="remote-name-start-interval-0-timeout-120" interval="0" name="start" timeout="60"/>
- </operations>
-</primitive>""" % (self.remote_node, node)
+ # Set cluster-recheck-interval lower
self.rsh(self.get_othernode(node), self.templates["SetCheckInterval"] % ("45s"))
- else:
- # not using reconnect interval
- rsc_xml = """
-<primitive class="ocf" id="%s" provider="pacemaker" type="remote">
- <instance_attributes id="remote-instance_attributes"/>
- <instance_attributes id="remote-instance_attributes">
- <nvpair id="remote-instance_attributes-server" name="server" value="%s"/>
- </instance_attributes>
- <operations>
- <op id="remote-monitor-interval-60s" interval="60s" name="monitor"/>
- <op id="remote-name-start-interval-0-timeout-120" interval="0" name="start" timeout="120"/>
- </operations>
-</primitive>""" % (self.remote_node, node)
+
+ # Set reconnect interval on resource
+ rsc_xml = rsc_xml + """
+ <nvpair id="%s-instance_attributes-reconnect_interval" name="reconnect_interval" value="60s"/>
+""" % (self.remote_node)
+
+ rsc_xml = rsc_xml + """
+ </instance_attributes>
+ <operations>
+ <op id="%(node)s-start" name="start" interval="0" timeout="120s"/>
+ <op id="%(node)s-monitor-20s" name="monitor" interval="20s" timeout="45s"/>
+ </operations>
+</primitive>
+""" % { "node": self.remote_node }
self.add_rsc(node, rsc_xml)
if not self.failed:
--
2.16.4